Missing brace.
[mono.git] / mono / mini / mini-amd64.c
1 /*
2  * mini-amd64.c: AMD64 backend for the Mono code generator
3  *
4  * Based on mini-x86.c.
5  *
6  * Authors:
7  *   Paolo Molaro (lupus@ximian.com)
8  *   Dietmar Maurer (dietmar@ximian.com)
9  *   Patrik Torstensson
10  *
11  * (C) 2003 Ximian, Inc.
12  */
13 #include "mini.h"
14 #include <string.h>
15 #include <math.h>
16
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/metadata/threads.h>
20 #include <mono/metadata/profiler-private.h>
21 #include <mono/metadata/mono-debug.h>
22 #include <mono/utils/mono-math.h>
23
24 #include "trace.h"
25 #include "mini-amd64.h"
26 #include "inssel.h"
27 #include "cpu-amd64.h"
28
29 static gint lmf_tls_offset = -1;
30 static gint appdomain_tls_offset = -1;
31 static gint thread_tls_offset = -1;
32
33 static gboolean use_sse2 = !MONO_ARCH_USE_FPSTACK;
34
35 const char * const amd64_desc [OP_LAST];
36 static const char*const * ins_spec = amd64_desc;
37
38 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
39
40 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
41
42 #ifdef PLATFORM_WIN32
43 /* Under windows, the default pinvoke calling convention is stdcall */
44 #define CALLCONV_IS_STDCALL(call_conv) (((call_conv) == MONO_CALL_STDCALL) || ((call_conv) == MONO_CALL_DEFAULT))
45 #else
46 #define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
47 #endif
48
49 #define ARGS_OFFSET 16
50 #define GP_SCRATCH_REG AMD64_R11
51
52 /*
53  * AMD64 register usage:
54  * - callee saved registers are used for global register allocation
55  * - %r11 is used for materializing 64 bit constants in opcodes
56  * - the rest is used for local allocation
57  */
58
59 /*
60  * Floating point comparison results:
61  *                  ZF PF CF
62  * A > B            0  0  0
63  * A < B            0  0  1
64  * A = B            1  0  0
65  * A > B            0  0  0
66  * UNORDERED        1  1  1
67  */
68
69 #define NOT_IMPLEMENTED g_assert_not_reached ()
70
71 const char*
72 mono_arch_regname (int reg) {
73         switch (reg) {
74         case AMD64_RAX: return "%rax";
75         case AMD64_RBX: return "%rbx";
76         case AMD64_RCX: return "%rcx";
77         case AMD64_RDX: return "%rdx";
78         case AMD64_RSP: return "%rsp";  
79         case AMD64_RBP: return "%rbp";
80         case AMD64_RDI: return "%rdi";
81         case AMD64_RSI: return "%rsi";
82         case AMD64_R8: return "%r8";
83         case AMD64_R9: return "%r9";
84         case AMD64_R10: return "%r10";
85         case AMD64_R11: return "%r11";
86         case AMD64_R12: return "%r12";
87         case AMD64_R13: return "%r13";
88         case AMD64_R14: return "%r14";
89         case AMD64_R15: return "%r15";
90         }
91         return "unknown";
92 }
93
94 static const char * xmmregs [] = {
95         "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8",
96         "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
97 };
98
99 const char*
100 mono_arch_fregname (int reg)
101 {
102         if (reg < AMD64_XMM_NREG)
103                 return xmmregs [reg];
104         else
105                 return "unknown";
106 }
107
108 G_GNUC_UNUSED static void
109 break_count (void)
110 {
111 }
112
113 G_GNUC_UNUSED static gboolean
114 debug_count (void)
115 {
116         static int count = 0;
117         count ++;
118
119         if (!getenv ("COUNT"))
120                 return TRUE;
121
122         if (count == atoi (getenv ("COUNT"))) {
123                 break_count ();
124         }
125
126         if (count > atoi (getenv ("COUNT"))) {
127                 return FALSE;
128         }
129
130         return TRUE;
131 }
132
133 static gboolean
134 debug_omit_fp (void)
135 {
136 #if 0
137         return debug_count ();
138 #else
139         return TRUE;
140 #endif
141 }
142
143 static inline void 
144 amd64_patch (unsigned char* code, gpointer target)
145 {
146         /* Skip REX */
147         if ((code [0] >= 0x40) && (code [0] <= 0x4f))
148                 code += 1;
149
150         if ((code [0] & 0xf8) == 0xb8) {
151                 /* amd64_set_reg_template */
152                 *(guint64*)(code + 1) = (guint64)target;
153         }
154         else if (code [0] == 0x8b) {
155                 /* mov 0(%rip), %dreg */
156                 *(guint32*)(code + 2) = (guint32)(guint64)target - 7;
157         }
158         else if ((code [0] == 0xff) && (code [1] == 0x15)) {
159                 /* call *<OFFSET>(%rip) */
160                 *(guint32*)(code + 2) = ((guint32)(guint64)target) - 7;
161         }
162         else if ((code [0] == 0xe8)) {
163                 /* call <DISP> */
164                 gint64 disp = (guint8*)target - (guint8*)code;
165                 g_assert (amd64_is_imm32 (disp));
166                 x86_patch (code, (unsigned char*)target);
167         }
168         else
169                 x86_patch (code, (unsigned char*)target);
170 }
171
172 typedef enum {
173         ArgInIReg,
174         ArgInFloatSSEReg,
175         ArgInDoubleSSEReg,
176         ArgOnStack,
177         ArgValuetypeInReg,
178         ArgNone /* only in pair_storage */
179 } ArgStorage;
180
181 typedef struct {
182         gint16 offset;
183         gint8  reg;
184         ArgStorage storage;
185
186         /* Only if storage == ArgValuetypeInReg */
187         ArgStorage pair_storage [2];
188         gint8 pair_regs [2];
189 } ArgInfo;
190
191 typedef struct {
192         int nargs;
193         guint32 stack_usage;
194         guint32 reg_usage;
195         guint32 freg_usage;
196         gboolean need_stack_align;
197         ArgInfo ret;
198         ArgInfo sig_cookie;
199         ArgInfo args [1];
200 } CallInfo;
201
202 #define DEBUG(a) if (cfg->verbose_level > 1) a
203
204 #define NEW_ICONST(cfg,dest,val) do {   \
205                 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst));       \
206                 (dest)->opcode = OP_ICONST;     \
207                 (dest)->inst_c0 = (val);        \
208                 (dest)->type = STACK_I4;        \
209         } while (0)
210
211 #define PARAM_REGS 6
212
213 static AMD64_Reg_No param_regs [] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 };
214
215 static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
216
217 static void inline
218 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
219 {
220     ainfo->offset = *stack_size;
221
222     if (*gr >= PARAM_REGS) {
223                 ainfo->storage = ArgOnStack;
224                 (*stack_size) += sizeof (gpointer);
225     }
226     else {
227                 ainfo->storage = ArgInIReg;
228                 ainfo->reg = param_regs [*gr];
229                 (*gr) ++;
230     }
231 }
232
233 #define FLOAT_PARAM_REGS 8
234
235 static void inline
236 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
237 {
238     ainfo->offset = *stack_size;
239
240     if (*gr >= FLOAT_PARAM_REGS) {
241                 ainfo->storage = ArgOnStack;
242                 (*stack_size) += sizeof (gpointer);
243     }
244     else {
245                 /* A double register */
246                 if (is_double)
247                         ainfo->storage = ArgInDoubleSSEReg;
248                 else
249                         ainfo->storage = ArgInFloatSSEReg;
250                 ainfo->reg = *gr;
251                 (*gr) += 1;
252     }
253 }
254
255 typedef enum ArgumentClass {
256         ARG_CLASS_NO_CLASS,
257         ARG_CLASS_MEMORY,
258         ARG_CLASS_INTEGER,
259         ARG_CLASS_SSE
260 } ArgumentClass;
261
262 static ArgumentClass
263 merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
264 {
265         ArgumentClass class2 = ARG_CLASS_NO_CLASS;
266         MonoType *ptype;
267
268         ptype = mono_type_get_underlying_type (type);
269         switch (ptype->type) {
270         case MONO_TYPE_BOOLEAN:
271         case MONO_TYPE_CHAR:
272         case MONO_TYPE_I1:
273         case MONO_TYPE_U1:
274         case MONO_TYPE_I2:
275         case MONO_TYPE_U2:
276         case MONO_TYPE_I4:
277         case MONO_TYPE_U4:
278         case MONO_TYPE_I:
279         case MONO_TYPE_U:
280         case MONO_TYPE_STRING:
281         case MONO_TYPE_OBJECT:
282         case MONO_TYPE_CLASS:
283         case MONO_TYPE_SZARRAY:
284         case MONO_TYPE_PTR:
285         case MONO_TYPE_FNPTR:
286         case MONO_TYPE_ARRAY:
287         case MONO_TYPE_I8:
288         case MONO_TYPE_U8:
289                 class2 = ARG_CLASS_INTEGER;
290                 break;
291         case MONO_TYPE_R4:
292         case MONO_TYPE_R8:
293                 class2 = ARG_CLASS_SSE;
294                 break;
295
296         case MONO_TYPE_TYPEDBYREF:
297                 g_assert_not_reached ();
298
299         case MONO_TYPE_GENERICINST:
300                 if (!mono_type_generic_inst_is_valuetype (ptype)) {
301                         class2 = ARG_CLASS_INTEGER;
302                         break;
303                 }
304                 /* fall through */
305         case MONO_TYPE_VALUETYPE: {
306                 MonoMarshalType *info = mono_marshal_load_type_info (ptype->data.klass);
307                 int i;
308
309                 for (i = 0; i < info->num_fields; ++i) {
310                         class2 = class1;
311                         class2 = merge_argument_class_from_type (info->fields [i].field->type, class2);
312                 }
313                 break;
314         }
315         default:
316                 g_assert_not_reached ();
317         }
318
319         /* Merge */
320         if (class1 == class2)
321                 ;
322         else if (class1 == ARG_CLASS_NO_CLASS)
323                 class1 = class2;
324         else if ((class1 == ARG_CLASS_MEMORY) || (class2 == ARG_CLASS_MEMORY))
325                 class1 = ARG_CLASS_MEMORY;
326         else if ((class1 == ARG_CLASS_INTEGER) || (class2 == ARG_CLASS_INTEGER))
327                 class1 = ARG_CLASS_INTEGER;
328         else
329                 class1 = ARG_CLASS_SSE;
330
331         return class1;
332 }
333
334 static void
335 add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
336                gboolean is_return,
337                guint32 *gr, guint32 *fr, guint32 *stack_size)
338 {
339         guint32 size, quad, nquads, i;
340         ArgumentClass args [2];
341         MonoMarshalType *info;
342         MonoClass *klass;
343
344         klass = mono_class_from_mono_type (type);
345         if (sig->pinvoke) 
346                 size = mono_type_native_stack_size (&klass->byval_arg, NULL);
347         else 
348                 size = mono_type_stack_size (&klass->byval_arg, NULL);
349
350         if (!sig->pinvoke || (size == 0) || (size > 16)) {
351                 /* Allways pass in memory */
352                 ainfo->offset = *stack_size;
353                 *stack_size += ALIGN_TO (size, 8);
354                 ainfo->storage = ArgOnStack;
355
356                 return;
357         }
358
359         /* FIXME: Handle structs smaller than 8 bytes */
360         //if ((size % 8) != 0)
361         //      NOT_IMPLEMENTED;
362
363         if (size > 8)
364                 nquads = 2;
365         else
366                 nquads = 1;
367
368         /*
369          * Implement the algorithm from section 3.2.3 of the X86_64 ABI.
370          * The X87 and SSEUP stuff is left out since there are no such types in
371          * the CLR.
372          */
373         info = mono_marshal_load_type_info (klass);
374         g_assert (info);
375         if (info->native_size > 16) {
376                 ainfo->offset = *stack_size;
377                 *stack_size += ALIGN_TO (info->native_size, 8);
378                 ainfo->storage = ArgOnStack;
379
380                 return;
381         }
382
383         for (quad = 0; quad < nquads; ++quad) {
384                 int size, align;
385                 ArgumentClass class1;
386                 
387                 class1 = ARG_CLASS_NO_CLASS;
388                 for (i = 0; i < info->num_fields; ++i) {
389                         size = mono_marshal_type_size (info->fields [i].field->type, 
390                                                                                    info->fields [i].mspec, 
391                                                                                    &align, TRUE, klass->unicode);
392                         if ((info->fields [i].offset < 8) && (info->fields [i].offset + size) > 8) {
393                                 /* Unaligned field */
394                                 NOT_IMPLEMENTED;
395                         }
396
397                         /* Skip fields in other quad */
398                         if ((quad == 0) && (info->fields [i].offset >= 8))
399                                 continue;
400                         if ((quad == 1) && (info->fields [i].offset < 8))
401                                 continue;
402
403                         class1 = merge_argument_class_from_type (info->fields [i].field->type, class1);
404                 }
405                 g_assert (class1 != ARG_CLASS_NO_CLASS);
406                 args [quad] = class1;
407         }
408
409         /* Post merger cleanup */
410         if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY))
411                 args [0] = args [1] = ARG_CLASS_MEMORY;
412
413         /* Allocate registers */
414         {
415                 int orig_gr = *gr;
416                 int orig_fr = *fr;
417
418                 ainfo->storage = ArgValuetypeInReg;
419                 ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
420                 for (quad = 0; quad < nquads; ++quad) {
421                         switch (args [quad]) {
422                         case ARG_CLASS_INTEGER:
423                                 if (*gr >= PARAM_REGS)
424                                         args [quad] = ARG_CLASS_MEMORY;
425                                 else {
426                                         ainfo->pair_storage [quad] = ArgInIReg;
427                                         if (is_return)
428                                                 ainfo->pair_regs [quad] = return_regs [*gr];
429                                         else
430                                                 ainfo->pair_regs [quad] = param_regs [*gr];
431                                         (*gr) ++;
432                                 }
433                                 break;
434                         case ARG_CLASS_SSE:
435                                 if (*fr >= FLOAT_PARAM_REGS)
436                                         args [quad] = ARG_CLASS_MEMORY;
437                                 else {
438                                         ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
439                                         ainfo->pair_regs [quad] = *fr;
440                                         (*fr) ++;
441                                 }
442                                 break;
443                         case ARG_CLASS_MEMORY:
444                                 break;
445                         default:
446                                 g_assert_not_reached ();
447                         }
448                 }
449
450                 if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY)) {
451                         /* Revert possible register assignments */
452                         *gr = orig_gr;
453                         *fr = orig_fr;
454
455                         ainfo->offset = *stack_size;
456                         *stack_size += ALIGN_TO (info->native_size, 8);
457                         ainfo->storage = ArgOnStack;
458                 }
459         }
460 }
461
462 /*
463  * get_call_info:
464  *
465  *  Obtain information about a call according to the calling convention.
466  * For AMD64, see the "System V ABI, x86-64 Architecture Processor Supplement 
467  * Draft Version 0.23" document for more information.
468  */
469 static CallInfo*
470 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
471 {
472         guint32 i, gr, fr;
473         MonoType *ret_type;
474         int n = sig->hasthis + sig->param_count;
475         guint32 stack_size = 0;
476         CallInfo *cinfo;
477
478         cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
479
480         gr = 0;
481         fr = 0;
482
483         /* return value */
484         {
485                 ret_type = mono_type_get_underlying_type (sig->ret);
486                 switch (ret_type->type) {
487                 case MONO_TYPE_BOOLEAN:
488                 case MONO_TYPE_I1:
489                 case MONO_TYPE_U1:
490                 case MONO_TYPE_I2:
491                 case MONO_TYPE_U2:
492                 case MONO_TYPE_CHAR:
493                 case MONO_TYPE_I4:
494                 case MONO_TYPE_U4:
495                 case MONO_TYPE_I:
496                 case MONO_TYPE_U:
497                 case MONO_TYPE_PTR:
498                 case MONO_TYPE_FNPTR:
499                 case MONO_TYPE_CLASS:
500                 case MONO_TYPE_OBJECT:
501                 case MONO_TYPE_SZARRAY:
502                 case MONO_TYPE_ARRAY:
503                 case MONO_TYPE_STRING:
504                         cinfo->ret.storage = ArgInIReg;
505                         cinfo->ret.reg = AMD64_RAX;
506                         break;
507                 case MONO_TYPE_U8:
508                 case MONO_TYPE_I8:
509                         cinfo->ret.storage = ArgInIReg;
510                         cinfo->ret.reg = AMD64_RAX;
511                         break;
512                 case MONO_TYPE_R4:
513                         cinfo->ret.storage = ArgInFloatSSEReg;
514                         cinfo->ret.reg = AMD64_XMM0;
515                         break;
516                 case MONO_TYPE_R8:
517                         cinfo->ret.storage = ArgInDoubleSSEReg;
518                         cinfo->ret.reg = AMD64_XMM0;
519                         break;
520                 case MONO_TYPE_GENERICINST:
521                         if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
522                                 cinfo->ret.storage = ArgInIReg;
523                                 cinfo->ret.reg = AMD64_RAX;
524                                 break;
525                         }
526                         /* fall through */
527                 case MONO_TYPE_VALUETYPE: {
528                         guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
529
530                         add_valuetype (sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
531                         if (cinfo->ret.storage == ArgOnStack)
532                                 /* The caller passes the address where the value is stored */
533                                 add_general (&gr, &stack_size, &cinfo->ret);
534                         break;
535                 }
536                 case MONO_TYPE_TYPEDBYREF:
537                         /* Same as a valuetype with size 24 */
538                         add_general (&gr, &stack_size, &cinfo->ret);
539                         ;
540                         break;
541                 case MONO_TYPE_VOID:
542                         break;
543                 default:
544                         g_error ("Can't handle as return value 0x%x", sig->ret->type);
545                 }
546         }
547
548         /* this */
549         if (sig->hasthis)
550                 add_general (&gr, &stack_size, cinfo->args + 0);
551
552         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
553                 gr = PARAM_REGS;
554                 fr = FLOAT_PARAM_REGS;
555                 
556                 /* Emit the signature cookie just before the implicit arguments */
557                 add_general (&gr, &stack_size, &cinfo->sig_cookie);
558         }
559
560         for (i = 0; i < sig->param_count; ++i) {
561                 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
562                 MonoType *ptype;
563
564                 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
565                         /* We allways pass the sig cookie on the stack for simplicity */
566                         /* 
567                          * Prevent implicit arguments + the sig cookie from being passed 
568                          * in registers.
569                          */
570                         gr = PARAM_REGS;
571                         fr = FLOAT_PARAM_REGS;
572
573                         /* Emit the signature cookie just before the implicit arguments */
574                         add_general (&gr, &stack_size, &cinfo->sig_cookie);
575                 }
576
577                 if (sig->params [i]->byref) {
578                         add_general (&gr, &stack_size, ainfo);
579                         continue;
580                 }
581                 ptype = mono_type_get_underlying_type (sig->params [i]);
582                 switch (ptype->type) {
583                 case MONO_TYPE_BOOLEAN:
584                 case MONO_TYPE_I1:
585                 case MONO_TYPE_U1:
586                         add_general (&gr, &stack_size, ainfo);
587                         break;
588                 case MONO_TYPE_I2:
589                 case MONO_TYPE_U2:
590                 case MONO_TYPE_CHAR:
591                         add_general (&gr, &stack_size, ainfo);
592                         break;
593                 case MONO_TYPE_I4:
594                 case MONO_TYPE_U4:
595                         add_general (&gr, &stack_size, ainfo);
596                         break;
597                 case MONO_TYPE_I:
598                 case MONO_TYPE_U:
599                 case MONO_TYPE_PTR:
600                 case MONO_TYPE_FNPTR:
601                 case MONO_TYPE_CLASS:
602                 case MONO_TYPE_OBJECT:
603                 case MONO_TYPE_STRING:
604                 case MONO_TYPE_SZARRAY:
605                 case MONO_TYPE_ARRAY:
606                         add_general (&gr, &stack_size, ainfo);
607                         break;
608                 case MONO_TYPE_GENERICINST:
609                         if (!mono_type_generic_inst_is_valuetype (ptype)) {
610                                 add_general (&gr, &stack_size, ainfo);
611                                 break;
612                         }
613                         /* fall through */
614                 case MONO_TYPE_VALUETYPE:
615                         add_valuetype (sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
616                         break;
617                 case MONO_TYPE_TYPEDBYREF:
618                         stack_size += sizeof (MonoTypedRef);
619                         ainfo->storage = ArgOnStack;
620                         break;
621                 case MONO_TYPE_U8:
622                 case MONO_TYPE_I8:
623                         add_general (&gr, &stack_size, ainfo);
624                         break;
625                 case MONO_TYPE_R4:
626                         add_float (&fr, &stack_size, ainfo, FALSE);
627                         break;
628                 case MONO_TYPE_R8:
629                         add_float (&fr, &stack_size, ainfo, TRUE);
630                         break;
631                 default:
632                         g_assert_not_reached ();
633                 }
634         }
635
636         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
637                 gr = PARAM_REGS;
638                 fr = FLOAT_PARAM_REGS;
639                 
640                 /* Emit the signature cookie just before the implicit arguments */
641                 add_general (&gr, &stack_size, &cinfo->sig_cookie);
642         }
643
644         if (stack_size & 0x8) {
645                 /* The AMD64 ABI requires each stack frame to be 16 byte aligned */
646                 cinfo->need_stack_align = TRUE;
647                 stack_size += 8;
648         }
649
650         cinfo->stack_usage = stack_size;
651         cinfo->reg_usage = gr;
652         cinfo->freg_usage = fr;
653         return cinfo;
654 }
655
656 /*
657  * mono_arch_get_argument_info:
658  * @csig:  a method signature
659  * @param_count: the number of parameters to consider
660  * @arg_info: an array to store the result infos
661  *
662  * Gathers information on parameters such as size, alignment and
663  * padding. arg_info should be large enought to hold param_count + 1 entries. 
664  *
665  * Returns the size of the argument area on the stack.
666  */
667 int
668 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
669 {
670         int k;
671         CallInfo *cinfo = get_call_info (csig, FALSE);
672         guint32 args_size = cinfo->stack_usage;
673
674         /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
675         if (csig->hasthis) {
676                 arg_info [0].offset = 0;
677         }
678
679         for (k = 0; k < param_count; k++) {
680                 arg_info [k + 1].offset = ((k + csig->hasthis) * 8);
681                 /* FIXME: */
682                 arg_info [k + 1].size = 0;
683         }
684
685         g_free (cinfo);
686
687         return args_size;
688 }
689
690 static int 
691 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
692 {
693         return 0;
694 }
695
696 /*
697  * Initialize the cpu to execute managed code.
698  */
699 void
700 mono_arch_cpu_init (void)
701 {
702         guint16 fpcw;
703
704         /* spec compliance requires running with double precision */
705         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
706         fpcw &= ~X86_FPCW_PRECC_MASK;
707         fpcw |= X86_FPCW_PREC_DOUBLE;
708         __asm__  __volatile__ ("fldcw %0\n": : "m" (fpcw));
709         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
710 }
711
712 /*
713  * This function returns the optimizations supported on this cpu.
714  */
715 guint32
716 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
717 {
718         int eax, ebx, ecx, edx;
719         guint32 opts = 0;
720
721         /* FIXME: AMD64 */
722
723         *exclude_mask = 0;
724         /* Feature Flags function, flags returned in EDX. */
725         if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
726                 if (edx & (1 << 15)) {
727                         opts |= MONO_OPT_CMOV;
728                         if (edx & 1)
729                                 opts |= MONO_OPT_FCMOV;
730                         else
731                                 *exclude_mask |= MONO_OPT_FCMOV;
732                 } else
733                         *exclude_mask |= MONO_OPT_CMOV;
734         }
735         return opts;
736 }
737
738 gboolean
739 mono_amd64_is_sse2 (void)
740 {
741         return use_sse2;
742 }
743
744 static gboolean
745 is_regsize_var (MonoType *t) {
746         if (t->byref)
747                 return TRUE;
748         t = mono_type_get_underlying_type (t);
749         switch (t->type) {
750         case MONO_TYPE_I4:
751         case MONO_TYPE_U4:
752         case MONO_TYPE_I:
753         case MONO_TYPE_U:
754         case MONO_TYPE_PTR:
755         case MONO_TYPE_FNPTR:
756                 return TRUE;
757         case MONO_TYPE_OBJECT:
758         case MONO_TYPE_STRING:
759         case MONO_TYPE_CLASS:
760         case MONO_TYPE_SZARRAY:
761         case MONO_TYPE_ARRAY:
762                 return TRUE;
763         case MONO_TYPE_GENERICINST:
764                 if (!mono_type_generic_inst_is_valuetype (t))
765                         return TRUE;
766                 return FALSE;
767         case MONO_TYPE_VALUETYPE:
768                 return FALSE;
769         }
770         return FALSE;
771 }
772
773 GList *
774 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
775 {
776         GList *vars = NULL;
777         int i;
778
779         for (i = 0; i < cfg->num_varinfo; i++) {
780                 MonoInst *ins = cfg->varinfo [i];
781                 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
782
783                 /* unused vars */
784                 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
785                         continue;
786
787                 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || 
788                     (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
789                         continue;
790
791                 /* we dont allocate I1 to registers because there is no simply way to sign extend 
792                  * 8bit quantities in caller saved registers on x86 */
793                 if (is_regsize_var (ins->inst_vtype) || (ins->inst_vtype->type == MONO_TYPE_BOOLEAN) || 
794                     (ins->inst_vtype->type == MONO_TYPE_U1) || (ins->inst_vtype->type == MONO_TYPE_U2)||
795                     (ins->inst_vtype->type == MONO_TYPE_I2) || (ins->inst_vtype->type == MONO_TYPE_CHAR)) {
796                         g_assert (MONO_VARINFO (cfg, i)->reg == -1);
797                         g_assert (i == vmv->idx);
798                         vars = g_list_prepend (vars, vmv);
799                 }
800         }
801
802         vars = mono_varlist_sort (cfg, vars, 0);
803
804         return vars;
805 }
806
807 /**
808  * mono_arch_compute_omit_fp:
809  *
810  *   Determine whenever the frame pointer can be eliminated.
811  */
812 static void
813 mono_arch_compute_omit_fp (MonoCompile *cfg)
814 {
815         MonoMethodSignature *sig;
816         MonoMethodHeader *header;
817         int i;
818         CallInfo *cinfo;
819
820         if (cfg->arch.omit_fp_computed)
821                 return;
822
823         header = mono_method_get_header (cfg->method);
824
825         sig = mono_method_signature (cfg->method);
826
827         cinfo = get_call_info (sig, FALSE);
828
829         /*
830          * FIXME: Remove some of the restrictions.
831          */
832         cfg->arch.omit_fp = TRUE;
833         cfg->arch.omit_fp_computed = TRUE;
834
835         /* Temporarily disable this when running in the debugger until we have support
836          * for this in the debugger. */
837         if (mono_debug_using_mono_debugger ())
838                 cfg->arch.omit_fp = FALSE;
839
840         if (!debug_omit_fp ())
841                 cfg->arch.omit_fp = FALSE;
842         /*
843         if (cfg->method->save_lmf)
844                 cfg->arch.omit_fp = FALSE;
845         */
846         if (cfg->flags & MONO_CFG_HAS_ALLOCA)
847                 cfg->arch.omit_fp = FALSE;
848         if (header->num_clauses)
849                 cfg->arch.omit_fp = FALSE;
850         if (cfg->param_area)
851                 cfg->arch.omit_fp = FALSE;
852         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
853                 cfg->arch.omit_fp = FALSE;
854         if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
855                 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
856                 cfg->arch.omit_fp = FALSE;
857         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
858                 ArgInfo *ainfo = &cinfo->args [i];
859
860                 if (ainfo->storage == ArgOnStack) {
861                         /* 
862                          * The stack offset can only be determined when the frame
863                          * size is known.
864                          */
865                         cfg->arch.omit_fp = FALSE;
866                 }
867         }
868
869         if (cfg->num_varinfo > 10000) {
870                 /* Avoid hitting the stack_alloc_size < (1 << 16) assertion in emit_epilog () */
871                 cfg->arch.omit_fp = FALSE;
872         }
873
874         g_free (cinfo);
875 }
876
877 GList *
878 mono_arch_get_global_int_regs (MonoCompile *cfg)
879 {
880         GList *regs = NULL;
881
882         mono_arch_compute_omit_fp (cfg);
883
884         if (cfg->arch.omit_fp)
885                 regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
886
887         /* We use the callee saved registers for global allocation */
888         regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
889         regs = g_list_prepend (regs, (gpointer)AMD64_R12);
890         regs = g_list_prepend (regs, (gpointer)AMD64_R13);
891         regs = g_list_prepend (regs, (gpointer)AMD64_R14);
892         regs = g_list_prepend (regs, (gpointer)AMD64_R15);
893
894         return regs;
895 }
896
897 /*
898  * mono_arch_regalloc_cost:
899  *
900  *  Return the cost, in number of memory references, of the action of 
901  * allocating the variable VMV into a register during global register
902  * allocation.
903  */
904 guint32
905 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
906 {
907         MonoInst *ins = cfg->varinfo [vmv->idx];
908
909         if (cfg->method->save_lmf)
910                 /* The register is already saved */
911                 /* substract 1 for the invisible store in the prolog */
912                 return (ins->opcode == OP_ARG) ? 0 : 1;
913         else
914                 /* push+pop */
915                 return (ins->opcode == OP_ARG) ? 1 : 2;
916 }
917  
918 void
919 mono_arch_allocate_vars (MonoCompile *cfg)
920 {
921         MonoMethodSignature *sig;
922         MonoMethodHeader *header;
923         MonoInst *inst;
924         int i, offset;
925         guint32 locals_stack_size, locals_stack_align;
926         gint32 *offsets;
927         CallInfo *cinfo;
928
929         header = mono_method_get_header (cfg->method);
930
931         sig = mono_method_signature (cfg->method);
932
933         cinfo = get_call_info (sig, FALSE);
934
935         mono_arch_compute_omit_fp (cfg);
936
937         /*
938          * We use the ABI calling conventions for managed code as well.
939          * Exception: valuetypes are never passed or returned in registers.
940          */
941
942         if (cfg->arch.omit_fp) {
943                 cfg->flags |= MONO_CFG_HAS_SPILLUP;
944                 cfg->frame_reg = AMD64_RSP;
945                 offset = 0;
946         } else {
947                 /* Locals are allocated backwards from %fp */
948                 cfg->frame_reg = AMD64_RBP;
949                 offset = 0;
950         }
951
952         cfg->arch.reg_save_area_offset = offset;
953
954         /* Reserve space for caller saved registers */
955         for (i = 0; i < AMD64_NREG; ++i)
956                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
957                         offset += sizeof (gpointer);
958                 }
959
960         if (cfg->method->save_lmf) {
961                 /* Reserve stack space for saving LMF + argument regs */
962                 guint32 size = sizeof (MonoLMF);
963
964                 if (lmf_tls_offset == -1)
965                         /* Need to save argument regs too */
966                         size += (AMD64_NREG * 8) + (8 * 8);
967
968                 if (cfg->arch.omit_fp) {
969                         cfg->arch.lmf_offset = offset;
970                         offset += size;
971                 }
972                 else {
973                         offset += size;
974                         cfg->arch.lmf_offset = -offset;
975                 }
976         }
977
978         if (sig->ret->type != MONO_TYPE_VOID) {
979                 switch (cinfo->ret.storage) {
980                 case ArgInIReg:
981                 case ArgInFloatSSEReg:
982                 case ArgInDoubleSSEReg:
983                         if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
984                                 /* The register is volatile */
985                                 cfg->ret->opcode = OP_REGOFFSET;
986                                 cfg->ret->inst_basereg = cfg->frame_reg;
987                                 if (cfg->arch.omit_fp) {
988                                         cfg->ret->inst_offset = offset;
989                                         offset += 8;
990                                 } else {
991                                         offset += 8;
992                                         cfg->ret->inst_offset = -offset;
993                                 }
994                         }
995                         else {
996                                 cfg->ret->opcode = OP_REGVAR;
997                                 cfg->ret->inst_c0 = cinfo->ret.reg;
998                         }
999                         break;
1000                 case ArgValuetypeInReg:
1001                         /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1002                         g_assert (!cfg->arch.omit_fp);
1003                         offset += 16;
1004                         cfg->ret->opcode = OP_REGOFFSET;
1005                         cfg->ret->inst_basereg = cfg->frame_reg;
1006                         cfg->ret->inst_offset = - offset;
1007                         break;
1008                 default:
1009                         g_assert_not_reached ();
1010                 }
1011                 cfg->ret->dreg = cfg->ret->inst_c0;
1012         }
1013
1014         /* Allocate locals */
1015         offsets = mono_allocate_stack_slots_full (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
1016         if (locals_stack_align) {
1017                 offset += (locals_stack_align - 1);
1018                 offset &= ~(locals_stack_align - 1);
1019         }
1020         for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1021                 if (offsets [i] != -1) {
1022                         MonoInst *inst = cfg->varinfo [i];
1023                         inst->opcode = OP_REGOFFSET;
1024                         inst->inst_basereg = cfg->frame_reg;
1025                         if (cfg->arch.omit_fp)
1026                                 inst->inst_offset = (offset + offsets [i]);
1027                         else
1028                                 inst->inst_offset = - (offset + offsets [i]);
1029                         //printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
1030                 }
1031         }
1032         g_free (offsets);
1033         offset += locals_stack_size;
1034
1035         if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
1036                 g_assert (!cfg->arch.omit_fp);
1037                 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
1038                 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
1039         }
1040
1041         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1042                 inst = cfg->varinfo [i];
1043                 if (inst->opcode != OP_REGVAR) {
1044                         ArgInfo *ainfo = &cinfo->args [i];
1045                         gboolean inreg = TRUE;
1046                         MonoType *arg_type;
1047
1048                         if (sig->hasthis && (i == 0))
1049                                 arg_type = &mono_defaults.object_class->byval_arg;
1050                         else
1051                                 arg_type = sig->params [i - sig->hasthis];
1052
1053                         /* FIXME: Allocate volatile arguments to registers */
1054                         if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
1055                                 inreg = FALSE;
1056
1057                         /* 
1058                          * Under AMD64, all registers used to pass arguments to functions
1059                          * are volatile across calls.
1060                          * FIXME: Optimize this.
1061                          */
1062                         if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg) || (ainfo->storage == ArgValuetypeInReg))
1063                                 inreg = FALSE;
1064
1065                         inst->opcode = OP_REGOFFSET;
1066
1067                         switch (ainfo->storage) {
1068                         case ArgInIReg:
1069                         case ArgInFloatSSEReg:
1070                         case ArgInDoubleSSEReg:
1071                                 inst->opcode = OP_REGVAR;
1072                                 inst->dreg = ainfo->reg;
1073                                 break;
1074                         case ArgOnStack:
1075                                 g_assert (!cfg->arch.omit_fp);
1076                                 inst->opcode = OP_REGOFFSET;
1077                                 inst->inst_basereg = cfg->frame_reg;
1078                                 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
1079                                 break;
1080                         case ArgValuetypeInReg:
1081                                 break;
1082                         default:
1083                                 NOT_IMPLEMENTED;
1084                         }
1085
1086                         if (!inreg && (ainfo->storage != ArgOnStack)) {
1087                                 inst->opcode = OP_REGOFFSET;
1088                                 inst->inst_basereg = cfg->frame_reg;
1089                                 /* These arguments are saved to the stack in the prolog */
1090                                 if (cfg->arch.omit_fp) {
1091                                         inst->inst_offset = offset;
1092                                         offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
1093                                 } else {
1094                                         offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
1095                                         inst->inst_offset = - offset;
1096                                 }
1097                         }
1098                 }
1099         }
1100
1101         cfg->stack_offset = offset;
1102
1103         g_free (cinfo);
1104 }
1105
1106 void
1107 mono_arch_create_vars (MonoCompile *cfg)
1108 {
1109         MonoMethodSignature *sig;
1110         CallInfo *cinfo;
1111
1112         sig = mono_method_signature (cfg->method);
1113
1114         cinfo = get_call_info (sig, FALSE);
1115
1116         if (cinfo->ret.storage == ArgValuetypeInReg)
1117                 cfg->ret_var_is_local = TRUE;
1118
1119         g_free (cinfo);
1120 }
1121
1122 static void
1123 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, MonoInst *arg, ArgStorage storage, int reg, MonoInst *tree)
1124 {
1125         switch (storage) {
1126         case ArgInIReg:
1127                 arg->opcode = OP_OUTARG_REG;
1128                 arg->inst_left = tree;
1129                 arg->inst_right = (MonoInst*)call;
1130                 arg->unused = reg;
1131                 call->used_iregs |= 1 << reg;
1132                 break;
1133         case ArgInFloatSSEReg:
1134                 arg->opcode = OP_AMD64_OUTARG_XMMREG_R4;
1135                 arg->inst_left = tree;
1136                 arg->inst_right = (MonoInst*)call;
1137                 arg->unused = reg;
1138                 call->used_fregs |= 1 << reg;
1139                 break;
1140         case ArgInDoubleSSEReg:
1141                 arg->opcode = OP_AMD64_OUTARG_XMMREG_R8;
1142                 arg->inst_left = tree;
1143                 arg->inst_right = (MonoInst*)call;
1144                 arg->unused = reg;
1145                 call->used_fregs |= 1 << reg;
1146                 break;
1147         default:
1148                 g_assert_not_reached ();
1149         }
1150 }
1151
1152 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
1153  * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info 
1154  */
1155
1156 static int
1157 arg_storage_to_ldind (ArgStorage storage)
1158 {
1159         switch (storage) {
1160         case ArgInIReg:
1161                 return CEE_LDIND_I;
1162         case ArgInDoubleSSEReg:
1163                 return CEE_LDIND_R8;
1164         case ArgInFloatSSEReg:
1165                 return CEE_LDIND_R4;
1166         default:
1167                 g_assert_not_reached ();
1168         }
1169
1170         return -1;
1171 }
1172
1173 /* 
1174  * take the arguments and generate the arch-specific
1175  * instructions to properly call the function in call.
1176  * This includes pushing, moving arguments to the right register
1177  * etc.
1178  * Issue: who does the spilling if needed, and when?
1179  */
1180 MonoCallInst*
1181 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1182         MonoInst *arg, *in;
1183         MonoMethodSignature *sig;
1184         int i, n, stack_size;
1185         CallInfo *cinfo;
1186         ArgInfo *ainfo;
1187
1188         stack_size = 0;
1189
1190         sig = call->signature;
1191         n = sig->param_count + sig->hasthis;
1192
1193         cinfo = get_call_info (sig, sig->pinvoke);
1194
1195         for (i = 0; i < n; ++i) {
1196                 ainfo = cinfo->args + i;
1197
1198                 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1199                         MonoMethodSignature *tmp_sig;
1200                         
1201                         /* Emit the signature cookie just before the implicit arguments */
1202                         MonoInst *sig_arg;
1203                         /* FIXME: Add support for signature tokens to AOT */
1204                         cfg->disable_aot = TRUE;
1205
1206                         g_assert (cinfo->sig_cookie.storage == ArgOnStack);
1207
1208                         /*
1209                          * mono_ArgIterator_Setup assumes the signature cookie is 
1210                          * passed first and all the arguments which were before it are
1211                          * passed on the stack after the signature. So compensate by 
1212                          * passing a different signature.
1213                          */
1214                         tmp_sig = mono_metadata_signature_dup (call->signature);
1215                         tmp_sig->param_count -= call->signature->sentinelpos;
1216                         tmp_sig->sentinelpos = 0;
1217                         memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1218
1219                         MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1220                         sig_arg->inst_p0 = tmp_sig;
1221
1222                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
1223                         arg->inst_left = sig_arg;
1224                         arg->type = STACK_PTR;
1225
1226                         /* prepend, so they get reversed */
1227                         arg->next = call->out_args;
1228                         call->out_args = arg;
1229                 }
1230
1231                 if (is_virtual && i == 0) {
1232                         /* the argument will be attached to the call instruction */
1233                         in = call->args [i];
1234                 } else {
1235                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
1236                         in = call->args [i];
1237                         arg->cil_code = in->cil_code;
1238                         arg->inst_left = in;
1239                         arg->type = in->type;
1240                         /* prepend, so they get reversed */
1241                         arg->next = call->out_args;
1242                         call->out_args = arg;
1243
1244                         if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
1245                                 gint align;
1246                                 guint32 size;
1247
1248                                 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
1249                                         size = sizeof (MonoTypedRef);
1250                                         align = sizeof (gpointer);
1251                                 }
1252                                 else
1253                                 if (sig->pinvoke)
1254                                         size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1255                                 else {
1256                                         /* 
1257                                          * Other backends use mono_type_stack_size (), but that
1258                                          * aligns the size to 8, which is larger than the size of
1259                                          * the source, leading to reads of invalid memory if the
1260                                          * source is at the end of address space.
1261                                          */
1262                                         size = mono_class_value_size (in->klass, &align);
1263                                 }
1264                                 if (ainfo->storage == ArgValuetypeInReg) {
1265                                         if (ainfo->pair_storage [1] == ArgNone) {
1266                                                 MonoInst *load;
1267
1268                                                 /* Simpler case */
1269
1270                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [0]));
1271                                                 load->inst_left = in;
1272
1273                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [0], ainfo->pair_regs [0], load);
1274                                         }
1275                                         else {
1276                                                 /* Trees can't be shared so make a copy */
1277                                                 MonoInst *vtaddr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1278                                                 MonoInst *load, *load2, *offset_ins;
1279
1280                                                 /* Reg1 */
1281                                                 MONO_INST_NEW (cfg, load, CEE_LDIND_I);
1282                                                 load->ssa_op = MONO_SSA_LOAD;
1283                                                 load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
1284
1285                                                 NEW_ICONST (cfg, offset_ins, 0);
1286                                                 MONO_INST_NEW (cfg, load2, CEE_ADD);
1287                                                 load2->inst_left = load;
1288                                                 load2->inst_right = offset_ins;
1289
1290                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [0]));
1291                                                 load->inst_left = load2;
1292
1293                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [0], ainfo->pair_regs [0], load);
1294
1295                                                 /* Reg2 */
1296                                                 MONO_INST_NEW (cfg, load, CEE_LDIND_I);
1297                                                 load->ssa_op = MONO_SSA_LOAD;
1298                                                 load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
1299
1300                                                 NEW_ICONST (cfg, offset_ins, 8);
1301                                                 MONO_INST_NEW (cfg, load2, CEE_ADD);
1302                                                 load2->inst_left = load;
1303                                                 load2->inst_right = offset_ins;
1304
1305                                                 MONO_INST_NEW (cfg, load, arg_storage_to_ldind (ainfo->pair_storage [1]));
1306                                                 load->inst_left = load2;
1307
1308                                                 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1309                                                 arg->cil_code = in->cil_code;
1310                                                 arg->type = in->type;
1311                                                 /* prepend, so they get reversed */
1312                                                 arg->next = call->out_args;
1313                                                 call->out_args = arg;
1314
1315                                                 add_outarg_reg (cfg, call, arg, ainfo->pair_storage [1], ainfo->pair_regs [1], load);
1316
1317                                                 /* Prepend a copy inst */
1318                                                 MONO_INST_NEW (cfg, arg, CEE_STIND_I);
1319                                                 arg->cil_code = in->cil_code;
1320                                                 arg->ssa_op = MONO_SSA_STORE;
1321                                                 arg->inst_left = vtaddr;
1322                                                 arg->inst_right = in;
1323                                                 arg->type = in->type;
1324
1325                                                 /* prepend, so they get reversed */
1326                                                 arg->next = call->out_args;
1327                                                 call->out_args = arg;
1328                                         }
1329                                 }
1330                                 else {
1331                                         arg->opcode = OP_OUTARG_VT;
1332                                         arg->klass = in->klass;
1333                                         arg->unused = sig->pinvoke;
1334                                         arg->inst_imm = size;
1335                                 }
1336                         }
1337                         else {
1338                                 switch (ainfo->storage) {
1339                                 case ArgInIReg:
1340                                         add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
1341                                         break;
1342                                 case ArgInFloatSSEReg:
1343                                 case ArgInDoubleSSEReg:
1344                                         add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
1345                                         break;
1346                                 case ArgOnStack:
1347                                         arg->opcode = OP_OUTARG;
1348                                         if (!sig->params [i - sig->hasthis]->byref) {
1349                                                 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4)
1350                                                         arg->opcode = OP_OUTARG_R4;
1351                                                 else
1352                                                         if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8)
1353                                                                 arg->opcode = OP_OUTARG_R8;
1354                                         }
1355                                         break;
1356                                 default:
1357                                         g_assert_not_reached ();
1358                                 }
1359                         }
1360                 }
1361         }
1362
1363         if (cinfo->need_stack_align) {
1364                 MONO_INST_NEW (cfg, arg, OP_AMD64_OUTARG_ALIGN_STACK);
1365                 /* prepend, so they get reversed */
1366                 arg->next = call->out_args;
1367                 call->out_args = arg;
1368         }
1369
1370         call->stack_usage = cinfo->stack_usage;
1371         cfg->param_area = MAX (cfg->param_area, call->stack_usage);
1372         cfg->flags |= MONO_CFG_HAS_CALLS;
1373
1374         g_free (cinfo);
1375
1376         return call;
1377 }
1378
1379 #define EMIT_COND_BRANCH(ins,cond,sign) \
1380 if (ins->flags & MONO_INST_BRLABEL) { \
1381         if (ins->inst_i0->inst_c0) { \
1382                 x86_branch (code, cond, cfg->native_code + ins->inst_i0->inst_c0, sign); \
1383         } else { \
1384                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1385                 if ((cfg->opt & MONO_OPT_BRANCH) && \
1386                     x86_is_imm8 (ins->inst_i0->inst_c1 - cpos)) \
1387                         x86_branch8 (code, cond, 0, sign); \
1388                 else \
1389                         x86_branch32 (code, cond, 0, sign); \
1390         } \
1391 } else { \
1392         if (ins->inst_true_bb->native_offset) { \
1393                 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
1394         } else { \
1395                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1396                 if ((cfg->opt & MONO_OPT_BRANCH) && \
1397                     x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
1398                         x86_branch8 (code, cond, 0, sign); \
1399                 else \
1400                         x86_branch32 (code, cond, 0, sign); \
1401         } \
1402 }
1403
1404 /* emit an exception if condition is fail */
1405 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name)            \
1406         do {                                                        \
1407                 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
1408                 if (tins == NULL) {                                                                             \
1409                         mono_add_patch_info (cfg, code - cfg->native_code,   \
1410                                         MONO_PATCH_INFO_EXC, exc_name);  \
1411                         x86_branch32 (code, cond, 0, signed);               \
1412                 } else {        \
1413                         EMIT_COND_BRANCH (tins, cond, signed);  \
1414                 }                       \
1415         } while (0); 
1416
1417 #define EMIT_FPCOMPARE(code) do { \
1418         amd64_fcompp (code); \
1419         amd64_fnstsw (code); \
1420 } while (0); 
1421
1422 #define EMIT_SSE2_FPFUNC(code, op, dreg, sreg1) do { \
1423     amd64_movsd_membase_reg (code, AMD64_RSP, -8, (sreg1)); \
1424         amd64_fld_membase (code, AMD64_RSP, -8, TRUE); \
1425         amd64_ ##op (code); \
1426         amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE); \
1427         amd64_movsd_reg_membase (code, (dreg), AMD64_RSP, -8); \
1428 } while (0);
1429
1430 static guint8*
1431 emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data)
1432 {
1433         mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
1434
1435         if (cfg->compile_aot) {
1436                 amd64_call_membase (code, AMD64_RIP, 0);
1437         }
1438         else {
1439                 gboolean near_call = FALSE;
1440
1441                 /*
1442                  * Indirect calls are expensive so try to make a near call if possible.
1443                  * The caller memory is allocated by the code manager so it is 
1444                  * guaranteed to be at a 32 bit offset.
1445                  */
1446
1447                 if (patch_type != MONO_PATCH_INFO_ABS) {
1448                         /* The target is in memory allocated using the code manager */
1449                         near_call = TRUE;
1450
1451                         if ((patch_type == MONO_PATCH_INFO_METHOD) || (patch_type == MONO_PATCH_INFO_METHOD_JUMP)) {
1452                                 if (((MonoMethod*)data)->klass->image->assembly->aot_module)
1453                                         /* The callee might be an AOT method */
1454                                         near_call = FALSE;
1455                         }
1456
1457                         if (patch_type == MONO_PATCH_INFO_INTERNAL_METHOD) {
1458                                 /* 
1459                                  * The call might go directly to a native function without
1460                                  * the wrapper.
1461                                  */
1462                                 MonoJitICallInfo *mi = mono_find_jit_icall_by_name (data);
1463                                 if (mi) {
1464                                         gconstpointer target = mono_icall_get_wrapper (mi);
1465                                         if ((((guint64)target) >> 32) != 0)
1466                                                 near_call = FALSE;
1467                                 }
1468                         }
1469                 }
1470                 else {
1471                         if (mono_find_class_init_trampoline_by_addr (data))
1472                                 near_call = TRUE;
1473                         else {
1474                                 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (data);
1475                                 if (info) {
1476                                         if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && 
1477                                                 strstr (cfg->method->name, info->name)) {
1478                                                 /* A call to the wrapped function */
1479                                                 if ((((guint64)data) >> 32) == 0)
1480                                                         near_call = TRUE;
1481                                         }
1482                                         else if (info->func == info->wrapper) {
1483                                                 /* No wrapper */
1484                                                 if ((((guint64)info->func) >> 32) == 0)
1485                                                         near_call = TRUE;
1486                                         }
1487                                         else
1488                                                 near_call = TRUE;
1489                                 }
1490                                 else if ((((guint64)data) >> 32) == 0)
1491                                         near_call = TRUE;
1492                         }
1493                 }
1494
1495                 if (cfg->method->dynamic)
1496                         /* These methods are allocated using malloc */
1497                         near_call = FALSE;
1498
1499                 if (near_call) {
1500                         amd64_call_code (code, 0);
1501                 }
1502                 else {
1503                         amd64_set_reg_template (code, GP_SCRATCH_REG);
1504                         amd64_call_reg (code, GP_SCRATCH_REG);
1505                 }
1506         }
1507
1508         return code;
1509 }
1510
1511 /* FIXME: Add more instructions */
1512 #define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM) || ((ins)->opcode == OP_STOREI8_MEMBASE_REG) || ((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_ICONST) || ((ins)->opcode == OP_I8CONST) || ((ins)->opcode == OP_LOAD_MEMBASE))
1513
1514 static void
1515 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1516 {
1517         MonoInst *ins, *last_ins = NULL;
1518         ins = bb->code;
1519
1520         while (ins) {
1521
1522                 switch (ins->opcode) {
1523                 case OP_ICONST:
1524                 case OP_I8CONST:
1525                         /* reg = 0 -> XOR (reg, reg) */
1526                         /* XOR sets cflags on x86, so we cant do it always */
1527                         if (ins->inst_c0 == 0 && (ins->next && INST_IGNORES_CFLAGS (ins->next))) {
1528                                 ins->opcode = CEE_XOR;
1529                                 ins->sreg1 = ins->dreg;
1530                                 ins->sreg2 = ins->dreg;
1531                         }
1532                         break;
1533                 case OP_MUL_IMM: 
1534                         /* remove unnecessary multiplication with 1 */
1535                         if (ins->inst_imm == 1) {
1536                                 if (ins->dreg != ins->sreg1) {
1537                                         ins->opcode = OP_MOVE;
1538                                 } else {
1539                                         last_ins->next = ins->next;
1540                                         ins = ins->next;
1541                                         continue;
1542                                 }
1543                         }
1544                         break;
1545                 case OP_COMPARE_IMM:
1546                         /* OP_COMPARE_IMM (reg, 0) 
1547                          * --> 
1548                          * OP_AMD64_TEST_NULL (reg) 
1549                          */
1550                         if (!ins->inst_imm)
1551                                 ins->opcode = OP_AMD64_TEST_NULL;
1552                         break;
1553                 case OP_ICOMPARE_IMM:
1554                         if (!ins->inst_imm)
1555                                 ins->opcode = OP_X86_TEST_NULL;
1556                         break;
1557                 case OP_AMD64_ICOMPARE_MEMBASE_IMM:
1558                         /* 
1559                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1560                          * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
1561                          * -->
1562                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1563                          * OP_COMPARE_IMM reg, imm
1564                          *
1565                          * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
1566                          */
1567                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
1568                             ins->inst_basereg == last_ins->inst_destbasereg &&
1569                             ins->inst_offset == last_ins->inst_offset) {
1570                                         ins->opcode = OP_ICOMPARE_IMM;
1571                                         ins->sreg1 = last_ins->sreg1;
1572
1573                                         /* check if we can remove cmp reg,0 with test null */
1574                                         if (!ins->inst_imm)
1575                                                 ins->opcode = OP_X86_TEST_NULL;
1576                                 }
1577
1578                         break;
1579                 case OP_LOAD_MEMBASE:
1580                 case OP_LOADI4_MEMBASE:
1581                         /* 
1582                          * Note: if reg1 = reg2 the load op is removed
1583                          *
1584                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1585                          * OP_LOAD_MEMBASE offset(basereg), reg2
1586                          * -->
1587                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1588                          * OP_MOVE reg1, reg2
1589                          */
1590                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG 
1591                                          || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1592                             ins->inst_basereg == last_ins->inst_destbasereg &&
1593                             ins->inst_offset == last_ins->inst_offset) {
1594                                 if (ins->dreg == last_ins->sreg1) {
1595                                         last_ins->next = ins->next;                             
1596                                         ins = ins->next;                                
1597                                         continue;
1598                                 } else {
1599                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1600                                         ins->opcode = OP_MOVE;
1601                                         ins->sreg1 = last_ins->sreg1;
1602                                 }
1603
1604                         /* 
1605                          * Note: reg1 must be different from the basereg in the second load
1606                          * Note: if reg1 = reg2 is equal then second load is removed
1607                          *
1608                          * OP_LOAD_MEMBASE offset(basereg), reg1
1609                          * OP_LOAD_MEMBASE offset(basereg), reg2
1610                          * -->
1611                          * OP_LOAD_MEMBASE offset(basereg), reg1
1612                          * OP_MOVE reg1, reg2
1613                          */
1614                         } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1615                                            || last_ins->opcode == OP_LOAD_MEMBASE) &&
1616                               ins->inst_basereg != last_ins->dreg &&
1617                               ins->inst_basereg == last_ins->inst_basereg &&
1618                               ins->inst_offset == last_ins->inst_offset) {
1619
1620                                 if (ins->dreg == last_ins->dreg) {
1621                                         last_ins->next = ins->next;                             
1622                                         ins = ins->next;                                
1623                                         continue;
1624                                 } else {
1625                                         ins->opcode = OP_MOVE;
1626                                         ins->sreg1 = last_ins->dreg;
1627                                 }
1628
1629                                 //g_assert_not_reached ();
1630
1631 #if 0
1632                         /* 
1633                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1634                          * OP_LOAD_MEMBASE offset(basereg), reg
1635                          * -->
1636                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1637                          * OP_ICONST reg, imm
1638                          */
1639                         } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1640                                                 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1641                                    ins->inst_basereg == last_ins->inst_destbasereg &&
1642                                    ins->inst_offset == last_ins->inst_offset) {
1643                                 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1644                                 ins->opcode = OP_ICONST;
1645                                 ins->inst_c0 = last_ins->inst_imm;
1646                                 g_assert_not_reached (); // check this rule
1647 #endif
1648                         }
1649                         break;
1650                 case OP_LOADU1_MEMBASE:
1651                 case OP_LOADI1_MEMBASE:
1652                         /* 
1653                          * Note: if reg1 = reg2 the load op is removed
1654                          *
1655                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1656                          * OP_LOAD_MEMBASE offset(basereg), reg2
1657                          * -->
1658                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1659                          * OP_MOVE reg1, reg2
1660                          */
1661                         if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1662                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1663                                         ins->inst_offset == last_ins->inst_offset) {
1664                                 if (ins->dreg == last_ins->sreg1) {
1665                                         last_ins->next = ins->next;                             
1666                                         ins = ins->next;                                
1667                                         continue;
1668                                 } else {
1669                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1670                                         ins->opcode = OP_MOVE;
1671                                         ins->sreg1 = last_ins->sreg1;
1672                                 }
1673                         }
1674                         break;
1675                 case OP_LOADU2_MEMBASE:
1676                 case OP_LOADI2_MEMBASE:
1677                         /* 
1678                          * Note: if reg1 = reg2 the load op is removed
1679                          *
1680                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1681                          * OP_LOAD_MEMBASE offset(basereg), reg2
1682                          * -->
1683                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1684                          * OP_MOVE reg1, reg2
1685                          */
1686                         if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1687                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1688                                         ins->inst_offset == last_ins->inst_offset) {
1689                                 if (ins->dreg == last_ins->sreg1) {
1690                                         last_ins->next = ins->next;                             
1691                                         ins = ins->next;                                
1692                                         continue;
1693                                 } else {
1694                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1695                                         ins->opcode = OP_MOVE;
1696                                         ins->sreg1 = last_ins->sreg1;
1697                                 }
1698                         }
1699                         break;
1700                 case CEE_CONV_I4:
1701                 case CEE_CONV_U4:
1702                 case OP_MOVE:
1703                         /*
1704                          * Removes:
1705                          *
1706                          * OP_MOVE reg, reg 
1707                          */
1708                         if (ins->dreg == ins->sreg1) {
1709                                 if (last_ins)
1710                                         last_ins->next = ins->next;                             
1711                                 ins = ins->next;
1712                                 continue;
1713                         }
1714                         /* 
1715                          * Removes:
1716                          *
1717                          * OP_MOVE sreg, dreg 
1718                          * OP_MOVE dreg, sreg
1719                          */
1720                         if (last_ins && last_ins->opcode == OP_MOVE &&
1721                             ins->sreg1 == last_ins->dreg &&
1722                             ins->dreg == last_ins->sreg1) {
1723                                 last_ins->next = ins->next;                             
1724                                 ins = ins->next;                                
1725                                 continue;
1726                         }
1727                         break;
1728                 }
1729                 last_ins = ins;
1730                 ins = ins->next;
1731         }
1732         bb->last_ins = last_ins;
1733 }
1734
1735 static void
1736 insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
1737 {
1738         if (ins == NULL) {
1739                 ins = bb->code;
1740                 bb->code = to_insert;
1741                 to_insert->next = ins;
1742         }
1743         else {
1744                 to_insert->next = ins->next;
1745                 ins->next = to_insert;
1746         }
1747 }
1748
1749 #define NEW_INS(cfg,dest,op) do {       \
1750                 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst));       \
1751                 (dest)->opcode = (op);  \
1752         insert_after_ins (bb, last_ins, (dest)); \
1753         } while (0)
1754
1755 /*
1756  * mono_arch_lowering_pass:
1757  *
1758  *  Converts complex opcodes into simpler ones so that each IR instruction
1759  * corresponds to one machine instruction.
1760  */
1761 static void
1762 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1763 {
1764         MonoInst *ins, *temp, *last_ins = NULL;
1765         ins = bb->code;
1766
1767         if (bb->max_ireg > cfg->rs->next_vireg)
1768                 cfg->rs->next_vireg = bb->max_ireg;
1769         if (bb->max_freg > cfg->rs->next_vfreg)
1770                 cfg->rs->next_vfreg = bb->max_freg;
1771
1772         /*
1773          * FIXME: Need to add more instructions, but the current machine 
1774          * description can't model some parts of the composite instructions like
1775          * cdq.
1776          */
1777         while (ins) {
1778                 switch (ins->opcode) {
1779                 case OP_DIV_IMM:
1780                 case OP_REM_IMM:
1781                 case OP_IDIV_IMM:
1782                 case OP_IREM_IMM:
1783                         NEW_INS (cfg, temp, OP_ICONST);
1784                         temp->inst_c0 = ins->inst_imm;
1785                         temp->dreg = mono_regstate_next_int (cfg->rs);
1786                         switch (ins->opcode) {
1787                         case OP_DIV_IMM:
1788                                 ins->opcode = OP_LDIV;
1789                                 break;
1790                         case OP_REM_IMM:
1791                                 ins->opcode = OP_LREM;
1792                                 break;
1793                         case OP_IDIV_IMM:
1794                                 ins->opcode = OP_IDIV;
1795                                 break;
1796                         case OP_IREM_IMM:
1797                                 ins->opcode = OP_IREM;
1798                                 break;
1799                         }
1800                         ins->sreg2 = temp->dreg;
1801                         break;
1802                 case OP_COMPARE_IMM:
1803                         if (!amd64_is_imm32 (ins->inst_imm)) {
1804                                 NEW_INS (cfg, temp, OP_I8CONST);
1805                                 temp->inst_c0 = ins->inst_imm;
1806                                 temp->dreg = mono_regstate_next_int (cfg->rs);
1807                                 ins->opcode = OP_COMPARE;
1808                                 ins->sreg2 = temp->dreg;
1809                         }
1810                         break;
1811                 case OP_LOAD_MEMBASE:
1812                 case OP_LOADI8_MEMBASE:
1813                         if (!amd64_is_imm32 (ins->inst_offset)) {
1814                                 NEW_INS (cfg, temp, OP_I8CONST);
1815                                 temp->inst_c0 = ins->inst_offset;
1816                                 temp->dreg = mono_regstate_next_int (cfg->rs);
1817                                 ins->opcode = OP_AMD64_LOADI8_MEMINDEX;
1818                                 ins->inst_indexreg = temp->dreg;
1819                         }
1820                         break;
1821                 case OP_STORE_MEMBASE_IMM:
1822                 case OP_STOREI8_MEMBASE_IMM:
1823                         if (!amd64_is_imm32 (ins->inst_imm)) {
1824                                 NEW_INS (cfg, temp, OP_I8CONST);
1825                                 temp->inst_c0 = ins->inst_imm;
1826                                 temp->dreg = mono_regstate_next_int (cfg->rs);
1827                                 ins->opcode = OP_STOREI8_MEMBASE_REG;
1828                                 ins->sreg1 = temp->dreg;
1829                         }
1830                         break;
1831                 default:
1832                         break;
1833                 }
1834                 last_ins = ins;
1835                 ins = ins->next;
1836         }
1837         bb->last_ins = last_ins;
1838
1839         bb->max_ireg = cfg->rs->next_vireg;
1840         bb->max_freg = cfg->rs->next_vfreg;
1841 }
1842
1843 static const int 
1844 branch_cc_table [] = {
1845         X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1846         X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1847         X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
1848 };
1849
1850 static int
1851 opcode_to_x86_cond (int opcode)
1852 {
1853         switch (opcode) {
1854         case OP_IBEQ:
1855                 return X86_CC_EQ;
1856         case OP_IBNE_UN:
1857                 return X86_CC_NE;
1858         case OP_IBLT:
1859                 return X86_CC_LT;
1860         case OP_IBLT_UN:
1861                 return X86_CC_LT;
1862         case OP_IBGT:
1863                 return X86_CC_GT;
1864         case OP_IBGT_UN:
1865                 return X86_CC_GT;
1866         case OP_IBGE:
1867                 return X86_CC_GE;
1868         case OP_IBGE_UN:
1869                 return X86_CC_GE;
1870         case OP_IBLE:
1871                 return X86_CC_LE;
1872         case OP_IBLE_UN:
1873                 return X86_CC_LE;
1874         case OP_COND_EXC_IOV:
1875                 return X86_CC_O;
1876         case OP_COND_EXC_IC:
1877                 return X86_CC_C;
1878         default:
1879                 g_assert_not_reached ();
1880         }
1881
1882         return -1;
1883 }
1884
1885 /*#include "cprop.c"*/
1886
1887 /*
1888  * Local register allocation.
1889  * We first scan the list of instructions and we save the liveness info of
1890  * each register (when the register is first used, when it's value is set etc.).
1891  * We also reverse the list of instructions (in the InstList list) because assigning
1892  * registers backwards allows for more tricks to be used.
1893  */
1894 void
1895 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
1896 {
1897         if (!bb->code)
1898                 return;
1899
1900         mono_arch_lowering_pass (cfg, bb);
1901
1902         mono_local_regalloc (cfg, bb);
1903 }
1904
1905 static unsigned char*
1906 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
1907 {
1908         if (use_sse2) {
1909                 amd64_sse_cvttsd2si_reg_reg (code, dreg, sreg);
1910         }
1911         else {
1912                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 16);
1913                 x86_fnstcw_membase(code, AMD64_RSP, 0);
1914                 amd64_mov_reg_membase (code, dreg, AMD64_RSP, 0, 2);
1915                 amd64_alu_reg_imm (code, X86_OR, dreg, 0xc00);
1916                 amd64_mov_membase_reg (code, AMD64_RSP, 2, dreg, 2);
1917                 amd64_fldcw_membase (code, AMD64_RSP, 2);
1918                 amd64_push_reg (code, AMD64_RAX); // SP = SP - 8
1919                 amd64_fist_pop_membase (code, AMD64_RSP, 0, size == 8);
1920                 amd64_pop_reg (code, dreg);
1921                 amd64_fldcw_membase (code, AMD64_RSP, 0);
1922                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
1923         }
1924
1925         if (size == 1)
1926                 amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
1927         else if (size == 2)
1928                 amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
1929         return code;
1930 }
1931
1932 static unsigned char*
1933 mono_emit_stack_alloc (guchar *code, MonoInst* tree)
1934 {
1935         int sreg = tree->sreg1;
1936         int need_touch = FALSE;
1937
1938 #if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
1939         if (!tree->flags & MONO_INST_INIT)
1940                 need_touch = TRUE;
1941 #endif
1942
1943         if (need_touch) {
1944                 guint8* br[5];
1945
1946                 /*
1947                  * Under Windows:
1948                  * If requested stack size is larger than one page,
1949                  * perform stack-touch operation
1950                  */
1951                 /*
1952                  * Generate stack probe code.
1953                  * Under Windows, it is necessary to allocate one page at a time,
1954                  * "touching" stack after each successful sub-allocation. This is
1955                  * because of the way stack growth is implemented - there is a
1956                  * guard page before the lowest stack page that is currently commited.
1957                  * Stack normally grows sequentially so OS traps access to the
1958                  * guard page and commits more pages when needed.
1959                  */
1960                 amd64_test_reg_imm (code, sreg, ~0xFFF);
1961                 br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
1962
1963                 br[2] = code; /* loop */
1964                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
1965                 amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
1966                 amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
1967                 amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
1968                 br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
1969                 amd64_patch (br[3], br[2]);
1970                 amd64_test_reg_reg (code, sreg, sreg);
1971                 br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
1972                 amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
1973
1974                 br[1] = code; x86_jump8 (code, 0);
1975
1976                 amd64_patch (br[0], code);
1977                 amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
1978                 amd64_patch (br[1], code);
1979                 amd64_patch (br[4], code);
1980         }
1981         else
1982                 amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
1983
1984         if (tree->flags & MONO_INST_INIT) {
1985                 int offset = 0;
1986                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) {
1987                         amd64_push_reg (code, AMD64_RAX);
1988                         offset += 8;
1989                 }
1990                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) {
1991                         amd64_push_reg (code, AMD64_RCX);
1992                         offset += 8;
1993                 }
1994                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) {
1995                         amd64_push_reg (code, AMD64_RDI);
1996                         offset += 8;
1997                 }
1998                 
1999                 amd64_shift_reg_imm (code, X86_SHR, sreg, 4);
2000                 if (sreg != AMD64_RCX)
2001                         amd64_mov_reg_reg (code, AMD64_RCX, sreg, 8);
2002                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
2003                                 
2004                 amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
2005                 amd64_cld (code);
2006                 amd64_prefix (code, X86_REP_PREFIX);
2007                 amd64_stosl (code);
2008                 
2009                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
2010                         amd64_pop_reg (code, AMD64_RDI);
2011                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX)
2012                         amd64_pop_reg (code, AMD64_RCX);
2013                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX)
2014                         amd64_pop_reg (code, AMD64_RAX);
2015         }
2016         return code;
2017 }
2018
2019 static guint8*
2020 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
2021 {
2022         CallInfo *cinfo;
2023         guint32 quad;
2024
2025         /* Move return value to the target register */
2026         /* FIXME: do this in the local reg allocator */
2027         switch (ins->opcode) {
2028         case CEE_CALL:
2029         case OP_CALL_REG:
2030         case OP_CALL_MEMBASE:
2031         case OP_LCALL:
2032         case OP_LCALL_REG:
2033         case OP_LCALL_MEMBASE:
2034                 g_assert (ins->dreg == AMD64_RAX);
2035                 break;
2036         case OP_FCALL:
2037         case OP_FCALL_REG:
2038         case OP_FCALL_MEMBASE:
2039                 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2040                         if (use_sse2)
2041                                 amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0);
2042                         else {
2043                                 /* FIXME: optimize this */
2044                                 amd64_movss_membase_reg (code, AMD64_RSP, -8, AMD64_XMM0);
2045                                 amd64_fld_membase (code, AMD64_RSP, -8, FALSE);
2046                         }
2047                 }
2048                 else {
2049                         if (use_sse2) {
2050                                 if (ins->dreg != AMD64_XMM0)
2051                                         amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0);
2052                         }
2053                         else {
2054                                 /* FIXME: optimize this */
2055                                 amd64_movsd_membase_reg (code, AMD64_RSP, -8, AMD64_XMM0);
2056                                 amd64_fld_membase (code, AMD64_RSP, -8, TRUE);
2057                         }
2058                 }
2059                 break;
2060         case OP_VCALL:
2061         case OP_VCALL_REG:
2062         case OP_VCALL_MEMBASE:
2063                 cinfo = get_call_info (((MonoCallInst*)ins)->signature, FALSE);
2064                 if (cinfo->ret.storage == ArgValuetypeInReg) {
2065                         /* Pop the destination address from the stack */
2066                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
2067                         amd64_pop_reg (code, AMD64_RCX);
2068                         
2069                         for (quad = 0; quad < 2; quad ++) {
2070                                 switch (cinfo->ret.pair_storage [quad]) {
2071                                 case ArgInIReg:
2072                                         amd64_mov_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad], 8);
2073                                         break;
2074                                 case ArgInFloatSSEReg:
2075                                         amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
2076                                         break;
2077                                 case ArgInDoubleSSEReg:
2078                                         amd64_movsd_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
2079                                         break;
2080                                 case ArgNone:
2081                                         break;
2082                                 default:
2083                                         NOT_IMPLEMENTED;
2084                                 }
2085                         }
2086                 }
2087                 g_free (cinfo);
2088                 break;
2089         }
2090
2091         return code;
2092 }
2093
2094 /*
2095  * emit_load_volatile_arguments:
2096  *
2097  *  Load volatile arguments from the stack to the original input registers.
2098  * Required before a tail call.
2099  */
2100 static guint8*
2101 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2102 {
2103         MonoMethod *method = cfg->method;
2104         MonoMethodSignature *sig;
2105         MonoInst *inst;
2106         CallInfo *cinfo;
2107         guint32 i;
2108
2109         /* FIXME: Generate intermediate code instead */
2110
2111         sig = mono_method_signature (method);
2112
2113         cinfo = get_call_info (sig, FALSE);
2114         
2115         /* This is the opposite of the code in emit_prolog */
2116
2117         if (sig->ret->type != MONO_TYPE_VOID) {
2118                 if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
2119                         amd64_mov_reg_membase (code, cinfo->ret.reg, cfg->ret->inst_basereg, cfg->ret->inst_offset, 8);
2120                 }
2121         }
2122
2123         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2124                 ArgInfo *ainfo = cinfo->args + i;
2125                 MonoType *arg_type;
2126                 inst = cfg->varinfo [i];
2127
2128                 if (sig->hasthis && (i == 0))
2129                         arg_type = &mono_defaults.object_class->byval_arg;
2130                 else
2131                         arg_type = sig->params [i - sig->hasthis];
2132
2133                 if (inst->opcode != OP_REGVAR) {
2134                         switch (ainfo->storage) {
2135                         case ArgInIReg: {
2136                                 guint32 size = 8;
2137
2138                                 /* FIXME: I1 etc */
2139                                 amd64_mov_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset, size);
2140                                 break;
2141                         }
2142                         case ArgInFloatSSEReg:
2143                                 amd64_movss_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2144                                 break;
2145                         case ArgInDoubleSSEReg:
2146                                 amd64_movsd_reg_membase (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2147                                 break;
2148                         default:
2149                                 break;
2150                         }
2151                 }
2152                 else {
2153                         g_assert (ainfo->storage == ArgInIReg);
2154
2155                         amd64_mov_reg_reg (code, ainfo->reg, inst->dreg, 8);
2156                 }
2157         }
2158
2159         g_free (cinfo);
2160
2161         return code;
2162 }
2163
2164 #define REAL_PRINT_REG(text,reg) \
2165 mono_assert (reg >= 0); \
2166 amd64_push_reg (code, AMD64_RAX); \
2167 amd64_push_reg (code, AMD64_RDX); \
2168 amd64_push_reg (code, AMD64_RCX); \
2169 amd64_push_reg (code, reg); \
2170 amd64_push_imm (code, reg); \
2171 amd64_push_imm (code, text " %d %p\n"); \
2172 amd64_mov_reg_imm (code, AMD64_RAX, printf); \
2173 amd64_call_reg (code, AMD64_RAX); \
2174 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 3*4); \
2175 amd64_pop_reg (code, AMD64_RCX); \
2176 amd64_pop_reg (code, AMD64_RDX); \
2177 amd64_pop_reg (code, AMD64_RAX);
2178
2179 /* benchmark and set based on cpu */
2180 #define LOOP_ALIGNMENT 8
2181 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
2182
2183 void
2184 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2185 {
2186         MonoInst *ins;
2187         MonoCallInst *call;
2188         guint offset;
2189         guint8 *code = cfg->native_code + cfg->code_len;
2190         MonoInst *last_ins = NULL;
2191         guint last_offset = 0;
2192         int max_len, cpos;
2193
2194         if (cfg->opt & MONO_OPT_PEEPHOLE)
2195                 peephole_pass (cfg, bb);
2196
2197         if (cfg->opt & MONO_OPT_LOOP) {
2198                 int pad, align = LOOP_ALIGNMENT;
2199                 /* set alignment depending on cpu */
2200                 if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
2201                         pad = align - pad;
2202                         /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
2203                         amd64_padding (code, pad);
2204                         cfg->code_len += pad;
2205                         bb->native_offset = cfg->code_len;
2206                 }
2207         }
2208
2209         if (cfg->verbose_level > 2)
2210                 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2211
2212         cpos = bb->max_offset;
2213
2214         if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2215                 MonoProfileCoverageInfo *cov = cfg->coverage_info;
2216                 g_assert (!cfg->compile_aot);
2217                 cpos += 6;
2218
2219                 cov->data [bb->dfn].cil_code = bb->cil_code;
2220                 /* this is not thread save, but good enough */
2221                 amd64_inc_mem (code, (guint64)&cov->data [bb->dfn].count); 
2222         }
2223
2224         offset = code - cfg->native_code;
2225
2226         ins = bb->code;
2227         while (ins) {
2228                 offset = code - cfg->native_code;
2229
2230                 max_len = ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
2231
2232                 if (offset > (cfg->code_size - max_len - 16)) {
2233                         cfg->code_size *= 2;
2234                         cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2235                         code = cfg->native_code + offset;
2236                         mono_jit_stats.code_reallocs++;
2237                 }
2238
2239                 mono_debug_record_line_number (cfg, ins, offset);
2240
2241                 switch (ins->opcode) {
2242                 case OP_BIGMUL:
2243                         amd64_mul_reg (code, ins->sreg2, TRUE);
2244                         break;
2245                 case OP_BIGMUL_UN:
2246                         amd64_mul_reg (code, ins->sreg2, FALSE);
2247                         break;
2248                 case OP_X86_SETEQ_MEMBASE:
2249                         amd64_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE);
2250                         break;
2251                 case OP_STOREI1_MEMBASE_IMM:
2252                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
2253                         break;
2254                 case OP_STOREI2_MEMBASE_IMM:
2255                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
2256                         break;
2257                 case OP_STOREI4_MEMBASE_IMM:
2258                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
2259                         break;
2260                 case OP_STOREI1_MEMBASE_REG:
2261                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1);
2262                         break;
2263                 case OP_STOREI2_MEMBASE_REG:
2264                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
2265                         break;
2266                 case OP_STORE_MEMBASE_REG:
2267                 case OP_STOREI8_MEMBASE_REG:
2268                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8);
2269                         break;
2270                 case OP_STOREI4_MEMBASE_REG:
2271                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
2272                         break;
2273                 case OP_STORE_MEMBASE_IMM:
2274                 case OP_STOREI8_MEMBASE_IMM:
2275                         g_assert (amd64_is_imm32 (ins->inst_imm));
2276                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
2277                         break;
2278                 case CEE_LDIND_I:
2279                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, sizeof (gpointer));
2280                         break;
2281                 case CEE_LDIND_I4:
2282                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, 4);
2283                         break;
2284                 case CEE_LDIND_U4:
2285                         amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, 4);
2286                         break;
2287                 case OP_LOADU4_MEM:
2288                         amd64_mov_reg_imm (code, ins->dreg, ins->inst_p0);
2289                         amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
2290                         break;
2291                 case OP_LOAD_MEMBASE:
2292                 case OP_LOADI8_MEMBASE:
2293                         g_assert (amd64_is_imm32 (ins->inst_offset));
2294                         amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
2295                         break;
2296                 case OP_LOADI4_MEMBASE:
2297                         amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2298                         break;
2299                 case OP_LOADU4_MEMBASE:
2300                         amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
2301                         break;
2302                 case OP_LOADU1_MEMBASE:
2303                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
2304                         break;
2305                 case OP_LOADI1_MEMBASE:
2306                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
2307                         break;
2308                 case OP_LOADU2_MEMBASE:
2309                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
2310                         break;
2311                 case OP_LOADI2_MEMBASE:
2312                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
2313                         break;
2314                 case OP_AMD64_LOADI8_MEMINDEX:
2315                         amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, ins->inst_indexreg, 0, 8);
2316                         break;
2317                 case CEE_CONV_I1:
2318                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
2319                         break;
2320                 case CEE_CONV_I2:
2321                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
2322                         break;
2323                 case CEE_CONV_U1:
2324                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE);
2325                         break;
2326                 case CEE_CONV_U2:
2327                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE);
2328                         break;
2329                 case CEE_CONV_U8:
2330                 case CEE_CONV_U:
2331                         /* Clean out the upper word */
2332                         amd64_mov_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
2333                         break;
2334                 case CEE_CONV_I8:
2335                 case CEE_CONV_I:
2336                         amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1);
2337                         break;                  
2338                 case OP_COMPARE:
2339                 case OP_LCOMPARE:
2340                         amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
2341                         break;
2342                 case OP_COMPARE_IMM:
2343                         g_assert (amd64_is_imm32 (ins->inst_imm));
2344                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
2345                         break;
2346                 case OP_X86_COMPARE_REG_MEMBASE:
2347                         amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
2348                         break;
2349                 case OP_X86_TEST_NULL:
2350                         amd64_test_reg_reg_size (code, ins->sreg1, ins->sreg1, 4);
2351                         break;
2352                 case OP_AMD64_TEST_NULL:
2353                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
2354                         break;
2355                 case OP_X86_ADD_MEMBASE_IMM:
2356                         /* FIXME: Make a 64 version too */
2357                         amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
2358                         break;
2359                 case OP_X86_ADD_MEMBASE:
2360                         amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
2361                         break;
2362                 case OP_X86_SUB_MEMBASE_IMM:
2363                         g_assert (amd64_is_imm32 (ins->inst_imm));
2364                         amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
2365                         break;
2366                 case OP_X86_SUB_MEMBASE:
2367                         amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
2368                         break;
2369                 case OP_X86_INC_MEMBASE:
2370                         amd64_inc_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
2371                         break;
2372                 case OP_X86_INC_REG:
2373                         amd64_inc_reg_size (code, ins->dreg, 4);
2374                         break;
2375                 case OP_X86_DEC_MEMBASE:
2376                         amd64_dec_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
2377                         break;
2378                 case OP_X86_DEC_REG:
2379                         amd64_dec_reg_size (code, ins->dreg, 4);
2380                         break;
2381                 case OP_X86_MUL_MEMBASE:
2382                         amd64_imul_reg_membase_size (code, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
2383                         break;
2384                 case OP_AMD64_ICOMPARE_MEMBASE_REG:
2385                         amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
2386                         break;
2387                 case OP_AMD64_ICOMPARE_MEMBASE_IMM:
2388                         amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
2389                         break;
2390                 case OP_AMD64_ICOMPARE_REG_MEMBASE:
2391                         amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
2392                         break;
2393                 case CEE_BREAK:
2394                         amd64_breakpoint (code);
2395                         break;
2396                 case OP_ADDCC:
2397                 case CEE_ADD:
2398                         amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
2399                         break;
2400                 case OP_ADC:
2401                         amd64_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2);
2402                         break;
2403                 case OP_ADD_IMM:
2404                         g_assert (amd64_is_imm32 (ins->inst_imm));
2405                         amd64_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm);
2406                         break;
2407                 case OP_ADC_IMM:
2408                         g_assert (amd64_is_imm32 (ins->inst_imm));
2409                         amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
2410                         break;
2411                 case OP_SUBCC:
2412                 case CEE_SUB:
2413                         amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
2414                         break;
2415                 case OP_SBB:
2416                         amd64_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2);
2417                         break;
2418                 case OP_SUB_IMM:
2419                         g_assert (amd64_is_imm32 (ins->inst_imm));
2420                         amd64_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm);
2421                         break;
2422                 case OP_SBB_IMM:
2423                         g_assert (amd64_is_imm32 (ins->inst_imm));
2424                         amd64_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm);
2425                         break;
2426                 case CEE_AND:
2427                         amd64_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2);
2428                         break;
2429                 case OP_AND_IMM:
2430                         g_assert (amd64_is_imm32 (ins->inst_imm));
2431                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
2432                         break;
2433                 case CEE_MUL:
2434                 case OP_LMUL:
2435                         amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
2436                         break;
2437                 case OP_MUL_IMM:
2438                 case OP_LMUL_IMM:
2439                 case OP_IMUL_IMM: {
2440                         guint32 size = (ins->opcode == OP_IMUL_IMM) ? 4 : 8;
2441                         
2442                         switch (ins->inst_imm) {
2443                         case 2:
2444                                 /* MOV r1, r2 */
2445                                 /* ADD r1, r1 */
2446                                 if (ins->dreg != ins->sreg1)
2447                                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, size);
2448                                 amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
2449                                 break;
2450                         case 3:
2451                                 /* LEA r1, [r2 + r2*2] */
2452                                 amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
2453                                 break;
2454                         case 5:
2455                                 /* LEA r1, [r2 + r2*4] */
2456                                 amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2457                                 break;
2458                         case 6:
2459                                 /* LEA r1, [r2 + r2*2] */
2460                                 /* ADD r1, r1          */
2461                                 amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
2462                                 amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
2463                                 break;
2464                         case 9:
2465                                 /* LEA r1, [r2 + r2*8] */
2466                                 amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3);
2467                                 break;
2468                         case 10:
2469                                 /* LEA r1, [r2 + r2*4] */
2470                                 /* ADD r1, r1          */
2471                                 amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2472                                 amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
2473                                 break;
2474                         case 12:
2475                                 /* LEA r1, [r2 + r2*2] */
2476                                 /* SHL r1, 2           */
2477                                 amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
2478                                 amd64_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
2479                                 break;
2480                         case 25:
2481                                 /* LEA r1, [r2 + r2*4] */
2482                                 /* LEA r1, [r1 + r1*4] */
2483                                 amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2484                                 amd64_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
2485                                 break;
2486                         case 100:
2487                                 /* LEA r1, [r2 + r2*4] */
2488                                 /* SHL r1, 2           */
2489                                 /* LEA r1, [r1 + r1*4] */
2490                                 amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
2491                                 amd64_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
2492                                 amd64_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
2493                                 break;
2494                         default:
2495                                 amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, size);
2496                                 break;
2497                         }
2498                         break;
2499                 }
2500                 case CEE_DIV:
2501                 case OP_LDIV:
2502                         amd64_cdq (code);
2503                         amd64_div_reg (code, ins->sreg2, TRUE);
2504                         break;
2505                 case CEE_DIV_UN:
2506                 case OP_LDIV_UN:
2507                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
2508                         amd64_div_reg (code, ins->sreg2, FALSE);
2509                         break;
2510                 case CEE_REM:
2511                 case OP_LREM:
2512                         amd64_cdq (code);
2513                         amd64_div_reg (code, ins->sreg2, TRUE);
2514                         break;
2515                 case CEE_REM_UN:
2516                 case OP_LREM_UN:
2517                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
2518                         amd64_div_reg (code, ins->sreg2, FALSE);
2519                         break;
2520                 case OP_LMUL_OVF:
2521                         amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
2522                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
2523                         break;
2524                 case CEE_OR:
2525                         amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
2526                         break;
2527                 case OP_OR_IMM
2528 :                       g_assert (amd64_is_imm32 (ins->inst_imm));
2529                         amd64_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
2530                         break;
2531                 case CEE_XOR:
2532                         amd64_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
2533                         break;
2534                 case OP_XOR_IMM:
2535                         g_assert (amd64_is_imm32 (ins->inst_imm));
2536                         amd64_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm);
2537                         break;
2538                 case CEE_SHL:
2539                 case OP_LSHL:
2540                         g_assert (ins->sreg2 == AMD64_RCX);
2541                         amd64_shift_reg (code, X86_SHL, ins->dreg);
2542                         break;
2543                 case CEE_SHR:
2544                 case OP_LSHR:
2545                         g_assert (ins->sreg2 == AMD64_RCX);
2546                         amd64_shift_reg (code, X86_SAR, ins->dreg);
2547                         break;
2548                 case OP_SHR_IMM:
2549                         g_assert (amd64_is_imm32 (ins->inst_imm));
2550                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
2551                         break;
2552                 case OP_LSHR_IMM:
2553                         g_assert (amd64_is_imm32 (ins->inst_imm));
2554                         amd64_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
2555                         break;
2556                 case OP_SHR_UN_IMM:
2557                         g_assert (amd64_is_imm32 (ins->inst_imm));
2558                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
2559                         break;
2560                 case OP_LSHR_UN_IMM:
2561                         g_assert (amd64_is_imm32 (ins->inst_imm));
2562                         amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm);
2563                         break;
2564                 case CEE_SHR_UN:
2565                         g_assert (ins->sreg2 == AMD64_RCX);
2566                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
2567                         break;
2568                 case OP_LSHR_UN:
2569                         g_assert (ins->sreg2 == AMD64_RCX);
2570                         amd64_shift_reg (code, X86_SHR, ins->dreg);
2571                         break;
2572                 case OP_SHL_IMM:
2573                         g_assert (amd64_is_imm32 (ins->inst_imm));
2574                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
2575                         break;
2576                 case OP_LSHL_IMM:
2577                         g_assert (amd64_is_imm32 (ins->inst_imm));
2578                         amd64_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
2579                         break;
2580
2581                 case OP_IADDCC:
2582                 case OP_IADD:
2583                         amd64_alu_reg_reg_size (code, X86_ADD, ins->sreg1, ins->sreg2, 4);
2584                         break;
2585                 case OP_IADC:
2586                         amd64_alu_reg_reg_size (code, X86_ADC, ins->sreg1, ins->sreg2, 4);
2587                         break;
2588                 case OP_IADD_IMM:
2589                         amd64_alu_reg_imm_size (code, X86_ADD, ins->dreg, ins->inst_imm, 4);
2590                         break;
2591                 case OP_IADC_IMM:
2592                         amd64_alu_reg_imm_size (code, X86_ADC, ins->dreg, ins->inst_imm, 4);
2593                         break;
2594                 case OP_ISUBCC:
2595                 case OP_ISUB:
2596                         amd64_alu_reg_reg_size (code, X86_SUB, ins->sreg1, ins->sreg2, 4);
2597                         break;
2598                 case OP_ISBB:
2599                         amd64_alu_reg_reg_size (code, X86_SBB, ins->sreg1, ins->sreg2, 4);
2600                         break;
2601                 case OP_ISUB_IMM:
2602                         amd64_alu_reg_imm_size (code, X86_SUB, ins->dreg, ins->inst_imm, 4);
2603                         break;
2604                 case OP_ISBB_IMM:
2605                         amd64_alu_reg_imm_size (code, X86_SBB, ins->dreg, ins->inst_imm, 4);
2606                         break;
2607                 case OP_IAND:
2608                         amd64_alu_reg_reg_size (code, X86_AND, ins->sreg1, ins->sreg2, 4);
2609                         break;
2610                 case OP_IAND_IMM:
2611                         amd64_alu_reg_imm_size (code, X86_AND, ins->sreg1, ins->inst_imm, 4);
2612                         break;
2613                 case OP_IOR:
2614                         amd64_alu_reg_reg_size (code, X86_OR, ins->sreg1, ins->sreg2, 4);
2615                         break;
2616                 case OP_IOR_IMM:
2617                         amd64_alu_reg_imm_size (code, X86_OR, ins->sreg1, ins->inst_imm, 4);
2618                         break;
2619                 case OP_IXOR:
2620                         amd64_alu_reg_reg_size (code, X86_XOR, ins->sreg1, ins->sreg2, 4);
2621                         break;
2622                 case OP_IXOR_IMM:
2623                         amd64_alu_reg_imm_size (code, X86_XOR, ins->sreg1, ins->inst_imm, 4);
2624                         break;
2625                 case OP_INEG:
2626                         amd64_neg_reg_size (code, ins->sreg1, 4);
2627                         break;
2628                 case OP_INOT:
2629                         amd64_not_reg_size (code, ins->sreg1, 4);
2630                         break;
2631                 case OP_ISHL:
2632                         g_assert (ins->sreg2 == AMD64_RCX);
2633                         amd64_shift_reg_size (code, X86_SHL, ins->dreg, 4);
2634                         break;
2635                 case OP_ISHR:
2636                         g_assert (ins->sreg2 == AMD64_RCX);
2637                         amd64_shift_reg_size (code, X86_SAR, ins->dreg, 4);
2638                         break;
2639                 case OP_ISHR_IMM:
2640                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
2641                         break;
2642                 case OP_ISHR_UN_IMM:
2643                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
2644                         break;
2645                 case OP_ISHR_UN:
2646                         g_assert (ins->sreg2 == AMD64_RCX);
2647                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
2648                         break;
2649                 case OP_ISHL_IMM:
2650                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
2651                         break;
2652                 case OP_IMUL:
2653                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
2654                         break;
2655                 case OP_IMUL_OVF:
2656                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
2657                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
2658                         break;
2659                 case OP_IMUL_OVF_UN:
2660                 case OP_LMUL_OVF_UN: {
2661                         /* the mul operation and the exception check should most likely be split */
2662                         int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
2663                         int size = (ins->opcode == OP_IMUL_OVF_UN) ? 4 : 8;
2664                         /*g_assert (ins->sreg2 == X86_EAX);
2665                         g_assert (ins->dreg == X86_EAX);*/
2666                         if (ins->sreg2 == X86_EAX) {
2667                                 non_eax_reg = ins->sreg1;
2668                         } else if (ins->sreg1 == X86_EAX) {
2669                                 non_eax_reg = ins->sreg2;
2670                         } else {
2671                                 /* no need to save since we're going to store to it anyway */
2672                                 if (ins->dreg != X86_EAX) {
2673                                         saved_eax = TRUE;
2674                                         amd64_push_reg (code, X86_EAX);
2675                                 }
2676                                 amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, size);
2677                                 non_eax_reg = ins->sreg2;
2678                         }
2679                         if (ins->dreg == X86_EDX) {
2680                                 if (!saved_eax) {
2681                                         saved_eax = TRUE;
2682                                         amd64_push_reg (code, X86_EAX);
2683                                 }
2684                         } else {
2685                                 saved_edx = TRUE;
2686                                 amd64_push_reg (code, X86_EDX);
2687                         }
2688                         amd64_mul_reg_size (code, non_eax_reg, FALSE, size);
2689                         /* save before the check since pop and mov don't change the flags */
2690                         if (ins->dreg != X86_EAX)
2691                                 amd64_mov_reg_reg (code, ins->dreg, X86_EAX, size);
2692                         if (saved_edx)
2693                                 amd64_pop_reg (code, X86_EDX);
2694                         if (saved_eax)
2695                                 amd64_pop_reg (code, X86_EAX);
2696                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
2697                         break;
2698                 }
2699                 case OP_IDIV:
2700                         amd64_cdq_size (code, 4);
2701                         amd64_div_reg_size (code, ins->sreg2, TRUE, 4);
2702                         break;
2703                 case OP_IDIV_UN:
2704                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
2705                         amd64_div_reg_size (code, ins->sreg2, FALSE, 4);
2706                         break;
2707                 case OP_IREM:
2708                         amd64_cdq_size (code, 4);
2709                         amd64_div_reg_size (code, ins->sreg2, TRUE, 4);
2710                         break;
2711                 case OP_IREM_UN:
2712                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
2713                         amd64_div_reg_size (code, ins->sreg2, FALSE, 4);
2714                         break;
2715                 case OP_ICOMPARE:
2716                         amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
2717                         break;
2718                 case OP_ICOMPARE_IMM:
2719                         amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
2720                         break;
2721                 case OP_IBEQ:
2722                 case OP_IBLT:
2723                 case OP_IBGT:
2724                 case OP_IBGE:
2725                 case OP_IBLE:
2726                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), TRUE);
2727                         break;
2728                 case OP_IBNE_UN:
2729                 case OP_IBLT_UN:
2730                 case OP_IBGT_UN:
2731                 case OP_IBGE_UN:
2732                 case OP_IBLE_UN:
2733                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), FALSE);
2734                         break;
2735                 case OP_COND_EXC_IOV:
2736                         EMIT_COND_SYSTEM_EXCEPTION (opcode_to_x86_cond (ins->opcode),
2737                                                                                 TRUE, ins->inst_p1);
2738                         break;
2739                 case OP_COND_EXC_IC:
2740                         EMIT_COND_SYSTEM_EXCEPTION (opcode_to_x86_cond (ins->opcode),
2741                                                                                 FALSE, ins->inst_p1);
2742                         break;
2743                 case CEE_NOT:
2744                         amd64_not_reg (code, ins->sreg1);
2745                         break;
2746                 case CEE_NEG:
2747                         amd64_neg_reg (code, ins->sreg1);
2748                         break;
2749                 case OP_SEXT_I1:
2750                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
2751                         break;
2752                 case OP_SEXT_I2:
2753                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
2754                         break;
2755                 case OP_SEXT_I4:
2756                         amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1);
2757                         break;
2758                 case OP_ICONST:
2759                 case OP_I8CONST:
2760                         if ((((guint64)ins->inst_c0) >> 32) == 0)
2761                                 amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4);
2762                         else
2763                                 amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8);
2764                         break;
2765                 case OP_AOTCONST:
2766                         mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2767                         amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, 8);
2768                         break;
2769                 case CEE_CONV_I4:
2770                 case CEE_CONV_U4:
2771                 case OP_MOVE:
2772                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
2773                         break;
2774                 case OP_AMD64_SET_XMMREG_R4: {
2775                         if (use_sse2) {
2776                                 amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
2777                         }
2778                         else {
2779                                 amd64_fst_membase (code, AMD64_RSP, -8, FALSE, TRUE);
2780                                 /* ins->dreg is set to -1 by the reg allocator */
2781                                 amd64_movss_reg_membase (code, ins->unused, AMD64_RSP, -8);
2782                         }
2783                         break;
2784                 }
2785                 case OP_AMD64_SET_XMMREG_R8: {
2786                         if (use_sse2) {
2787                                 if (ins->dreg != ins->sreg1)
2788                                         amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
2789                         }
2790                         else {
2791                                 amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE);
2792                                 /* ins->dreg is set to -1 by the reg allocator */
2793                                 amd64_movsd_reg_membase (code, ins->unused, AMD64_RSP, -8);
2794                         }
2795                         break;
2796                 }
2797                 case CEE_JMP: {
2798                         /*
2799                          * Note: this 'frame destruction' logic is useful for tail calls, too.
2800                          * Keep in sync with the code in emit_epilog.
2801                          */
2802                         int pos = 0, i;
2803
2804                         /* FIXME: no tracing support... */
2805                         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
2806                                 code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
2807
2808                         g_assert (!cfg->method->save_lmf);
2809
2810                         code = emit_load_volatile_arguments (cfg, code);
2811
2812                         if (cfg->arch.omit_fp) {
2813                                 guint32 save_offset = 0;
2814                                 /* Pop callee-saved registers */
2815                                 for (i = 0; i < AMD64_NREG; ++i)
2816                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
2817                                                 amd64_mov_reg_membase (code, i, AMD64_RSP, save_offset, 8);
2818                                                 save_offset += 8;
2819                                         }
2820                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
2821                         }
2822                         else {
2823                                 for (i = 0; i < AMD64_NREG; ++i)
2824                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
2825                                                 pos -= sizeof (gpointer);
2826                         
2827                                 if (pos)
2828                                         amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
2829
2830                                 /* Pop registers in reverse order */
2831                                 for (i = AMD64_NREG - 1; i > 0; --i)
2832                                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
2833                                                 amd64_pop_reg (code, i);
2834                                         }
2835
2836                                 amd64_leave (code);
2837                         }
2838
2839                         offset = code - cfg->native_code;
2840                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2841                         if (cfg->compile_aot)
2842                                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
2843                         else
2844                                 amd64_set_reg_template (code, AMD64_R11);
2845                         amd64_jump_reg (code, AMD64_R11);
2846                         break;
2847                 }
2848                 case OP_CHECK_THIS:
2849                         /* ensure ins->sreg1 is not NULL */
2850                         amd64_alu_membase_imm (code, X86_CMP, ins->sreg1, 0, 0);
2851                         break;
2852                 case OP_ARGLIST: {
2853                         amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, cfg->sig_cookie);
2854                         amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, 8);
2855                         break;
2856                 }
2857                 case OP_FCALL:
2858                 case OP_LCALL:
2859                 case OP_VCALL:
2860                 case OP_VOIDCALL:
2861                 case CEE_CALL:
2862                         call = (MonoCallInst*)ins;
2863                         /*
2864                          * The AMD64 ABI forces callers to know about varargs.
2865                          */
2866                         if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke))
2867                                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
2868                         else if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (cfg->method->klass->image != mono_defaults.corlib)) {
2869                                 /* 
2870                                  * Since the unmanaged calling convention doesn't contain a 
2871                                  * 'vararg' entry, we have to treat every pinvoke call as a
2872                                  * potential vararg call.
2873                                  */
2874                                 guint32 nregs, i;
2875                                 nregs = 0;
2876                                 for (i = 0; i < AMD64_XMM_NREG; ++i)
2877                                         if (call->used_fregs & (1 << i))
2878                                                 nregs ++;
2879                                 if (!nregs)
2880                                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
2881                                 else
2882                                         amd64_mov_reg_imm (code, AMD64_RAX, nregs);
2883                         }
2884
2885                         if (ins->flags & MONO_INST_HAS_METHOD)
2886                                 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2887                         else
2888                                 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2889                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
2890                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
2891                         code = emit_move_return_value (cfg, ins, code);
2892                         break;
2893                 case OP_FCALL_REG:
2894                 case OP_LCALL_REG:
2895                 case OP_VCALL_REG:
2896                 case OP_VOIDCALL_REG:
2897                 case OP_CALL_REG:
2898                         call = (MonoCallInst*)ins;
2899
2900                         if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
2901                                 amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
2902                                 ins->sreg1 = AMD64_R11;
2903                         }
2904
2905                         /*
2906                          * The AMD64 ABI forces callers to know about varargs.
2907                          */
2908                         if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke)) {
2909                                 if (ins->sreg1 == AMD64_RAX) {
2910                                         amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
2911                                         ins->sreg1 = AMD64_R11;
2912                                 }
2913                                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
2914                         }
2915                         amd64_call_reg (code, ins->sreg1);
2916                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
2917                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
2918                         code = emit_move_return_value (cfg, ins, code);
2919                         break;
2920                 case OP_FCALL_MEMBASE:
2921                 case OP_LCALL_MEMBASE:
2922                 case OP_VCALL_MEMBASE:
2923                 case OP_VOIDCALL_MEMBASE:
2924                 case OP_CALL_MEMBASE:
2925                         call = (MonoCallInst*)ins;
2926
2927                         if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
2928                                 /* 
2929                                  * Can't use R11 because it is clobbered by the trampoline 
2930                                  * code, and the reg value is needed by get_vcall_slot_addr.
2931                                  */
2932                                 amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8);
2933                                 ins->sreg1 = AMD64_RAX;
2934                         }
2935
2936                         amd64_call_membase (code, ins->sreg1, ins->inst_offset);
2937                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
2938                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
2939                         code = emit_move_return_value (cfg, ins, code);
2940                         break;
2941                 case OP_OUTARG:
2942                 case OP_X86_PUSH:
2943                         amd64_push_reg (code, ins->sreg1);
2944                         break;
2945                 case OP_X86_PUSH_IMM:
2946                         g_assert (amd64_is_imm32 (ins->inst_imm));
2947                         amd64_push_imm (code, ins->inst_imm);
2948                         break;
2949                 case OP_X86_PUSH_MEMBASE:
2950                         amd64_push_membase (code, ins->inst_basereg, ins->inst_offset);
2951                         break;
2952                 case OP_X86_PUSH_OBJ: 
2953                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ins->inst_imm);
2954                         amd64_push_reg (code, AMD64_RDI);
2955                         amd64_push_reg (code, AMD64_RSI);
2956                         amd64_push_reg (code, AMD64_RCX);
2957                         if (ins->inst_offset)
2958                                 amd64_lea_membase (code, AMD64_RSI, ins->inst_basereg, ins->inst_offset);
2959                         else
2960                                 amd64_mov_reg_reg (code, AMD64_RSI, ins->inst_basereg, 8);
2961                         amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, 3 * 8);
2962                         amd64_mov_reg_imm (code, AMD64_RCX, (ins->inst_imm >> 3));
2963                         amd64_cld (code);
2964                         amd64_prefix (code, X86_REP_PREFIX);
2965                         amd64_movsd (code);
2966                         amd64_pop_reg (code, AMD64_RCX);
2967                         amd64_pop_reg (code, AMD64_RSI);
2968                         amd64_pop_reg (code, AMD64_RDI);
2969                         break;
2970                 case OP_X86_LEA:
2971                         amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->unused);
2972                         break;
2973                 case OP_X86_LEA_MEMBASE:
2974                         amd64_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
2975                         break;
2976                 case OP_X86_XCHG:
2977                         amd64_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
2978                         break;
2979                 case OP_LOCALLOC:
2980                         /* keep alignment */
2981                         amd64_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1);
2982                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
2983                         code = mono_emit_stack_alloc (code, ins);
2984                         amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
2985                         break;
2986                 case CEE_RET:
2987                         amd64_ret (code);
2988                         break;
2989                 case CEE_THROW: {
2990                         amd64_mov_reg_reg (code, AMD64_RDI, ins->sreg1, 8);
2991                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
2992                                              (gpointer)"mono_arch_throw_exception");
2993                         break;
2994                 }
2995                 case OP_RETHROW: {
2996                         amd64_mov_reg_reg (code, AMD64_RDI, ins->sreg1, 8);
2997                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
2998                                              (gpointer)"mono_arch_rethrow_exception");
2999                         break;
3000                 }
3001                 case OP_CALL_HANDLER: 
3002                         /* Align stack */
3003                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
3004                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3005                         amd64_call_imm (code, 0);
3006                         /* Restore stack alignment */
3007                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
3008                         break;
3009                 case OP_LABEL:
3010                         ins->inst_c0 = code - cfg->native_code;
3011                         break;
3012                 case CEE_BR:
3013                         //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3014                         //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3015                         //break;
3016                         if (ins->flags & MONO_INST_BRLABEL) {
3017                                 if (ins->inst_i0->inst_c0) {
3018                                         amd64_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
3019                                 } else {
3020                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3021                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
3022                                             x86_is_imm8 (ins->inst_i0->inst_c1 - cpos))
3023                                                 x86_jump8 (code, 0);
3024                                         else 
3025                                                 x86_jump32 (code, 0);
3026                                 }
3027                         } else {
3028                                 if (ins->inst_target_bb->native_offset) {
3029                                         amd64_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); 
3030                                 } else {
3031                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3032                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
3033                                             x86_is_imm8 (ins->inst_target_bb->max_offset - cpos))
3034                                                 x86_jump8 (code, 0);
3035                                         else 
3036                                                 x86_jump32 (code, 0);
3037                                 } 
3038                         }
3039                         break;
3040                 case OP_BR_REG:
3041                         amd64_jump_reg (code, ins->sreg1);
3042                         break;
3043                 case OP_CEQ:
3044                 case OP_ICEQ:
3045                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3046                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3047                         break;
3048                 case OP_CLT:
3049                 case OP_ICLT:
3050                         amd64_set_reg (code, X86_CC_LT, ins->dreg, TRUE);
3051                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3052                         break;
3053                 case OP_CLT_UN:
3054                 case OP_ICLT_UN:
3055                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3056                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3057                         break;
3058                 case OP_CGT:
3059                 case OP_ICGT:
3060                         amd64_set_reg (code, X86_CC_GT, ins->dreg, TRUE);
3061                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3062                         break;
3063                 case OP_CGT_UN:
3064                 case OP_ICGT_UN:
3065                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3066                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3067                         break;
3068                 case OP_COND_EXC_EQ:
3069                 case OP_COND_EXC_NE_UN:
3070                 case OP_COND_EXC_LT:
3071                 case OP_COND_EXC_LT_UN:
3072                 case OP_COND_EXC_GT:
3073                 case OP_COND_EXC_GT_UN:
3074                 case OP_COND_EXC_GE:
3075                 case OP_COND_EXC_GE_UN:
3076                 case OP_COND_EXC_LE:
3077                 case OP_COND_EXC_LE_UN:
3078                 case OP_COND_EXC_OV:
3079                 case OP_COND_EXC_NO:
3080                 case OP_COND_EXC_C:
3081                 case OP_COND_EXC_NC:
3082                         EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], 
3083                                                     (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
3084                         break;
3085                 case CEE_BEQ:
3086                 case CEE_BNE_UN:
3087                 case CEE_BLT:
3088                 case CEE_BLT_UN:
3089                 case CEE_BGT:
3090                 case CEE_BGT_UN:
3091                 case CEE_BGE:
3092                 case CEE_BGE_UN:
3093                 case CEE_BLE:
3094                 case CEE_BLE_UN:
3095                         EMIT_COND_BRANCH (ins, branch_cc_table [ins->opcode - CEE_BEQ], (ins->opcode < CEE_BNE_UN));
3096                         break;
3097
3098                 /* floating point opcodes */
3099                 case OP_R8CONST: {
3100                         double d = *(double *)ins->inst_p0;
3101
3102                         if (use_sse2) {
3103                                 if ((d == 0.0) && (mono_signbit (d) == 0)) {
3104                                         amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
3105                                 }
3106                                 else {
3107                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3108                                         amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_RIP, 0);
3109                                 }
3110                         }
3111                         else if ((d == 0.0) && (mono_signbit (d) == 0)) {
3112                                 amd64_fldz (code);
3113                         } else if (d == 1.0) {
3114                                 x86_fld1 (code);
3115                         } else {
3116                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3117                                 amd64_fld_membase (code, AMD64_RIP, 0, TRUE);
3118                         }
3119                         break;
3120                 }
3121                 case OP_R4CONST: {
3122                         float f = *(float *)ins->inst_p0;
3123
3124                         if (use_sse2) {
3125                                 if ((f == 0.0) && (mono_signbit (f) == 0)) {
3126                                         amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
3127                                 }
3128                                 else {
3129                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3130                                         amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_RIP, 0);
3131                                         amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
3132                                 }
3133                         }
3134                         else if ((f == 0.0) && (mono_signbit (f) == 0)) {
3135                                 amd64_fldz (code);
3136                         } else if (f == 1.0) {
3137                                 x86_fld1 (code);
3138                         } else {
3139                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3140                                 amd64_fld_membase (code, AMD64_RIP, 0, FALSE);
3141                         }
3142                         break;
3143                 }
3144                 case OP_STORER8_MEMBASE_REG:
3145                         if (use_sse2)
3146                                 amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
3147                         else
3148                                 amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE);
3149                         break;
3150                 case OP_LOADR8_SPILL_MEMBASE:
3151                         if (use_sse2)
3152                                 g_assert_not_reached ();
3153                         amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
3154                         amd64_fxch (code, 1);
3155                         break;
3156                 case OP_LOADR8_MEMBASE:
3157                         if (use_sse2)
3158                                 amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3159                         else
3160                                 amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
3161                         break;
3162                 case OP_STORER4_MEMBASE_REG:
3163                         if (use_sse2) {
3164                                 /* This requires a double->single conversion */
3165                                 amd64_sse_cvtsd2ss_reg_reg (code, AMD64_XMM15, ins->sreg1);
3166                                 amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, AMD64_XMM15);
3167                         }
3168                         else
3169                                 amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, FALSE, TRUE);
3170                         break;
3171                 case OP_LOADR4_MEMBASE:
3172                         if (use_sse2) {
3173                                 amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3174                                 amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
3175                         }
3176                         else
3177                                 amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
3178                         break;
3179                 case CEE_CONV_R4: /* FIXME: change precision */
3180                 case CEE_CONV_R8:
3181                         if (use_sse2)
3182                                 amd64_sse_cvtsi2sd_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
3183                         else {
3184                                 amd64_push_reg (code, ins->sreg1);
3185                                 amd64_fild_membase (code, AMD64_RSP, 0, FALSE);
3186                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
3187                         }
3188                         break;
3189                 case CEE_CONV_R_UN:
3190                         /* Emulated */
3191                         g_assert_not_reached ();
3192                         break;
3193                 case OP_LCONV_TO_R4: /* FIXME: change precision */
3194                 case OP_LCONV_TO_R8:
3195                         if (use_sse2)
3196                                 amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
3197                         else {
3198                                 amd64_push_reg (code, ins->sreg1);
3199                                 amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
3200                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
3201                         }
3202                         break;
3203                 case OP_X86_FP_LOAD_I8:
3204                         if (use_sse2)
3205                                 g_assert_not_reached ();
3206                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
3207                         break;
3208                 case OP_X86_FP_LOAD_I4:
3209                         if (use_sse2)
3210                                 g_assert_not_reached ();
3211                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
3212                         break;
3213                 case OP_FCONV_TO_I1:
3214                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
3215                         break;
3216                 case OP_FCONV_TO_U1:
3217                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
3218                         break;
3219                 case OP_FCONV_TO_I2:
3220                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
3221                         break;
3222                 case OP_FCONV_TO_U2:
3223                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
3224                         break;
3225                 case OP_FCONV_TO_I4:
3226                 case OP_FCONV_TO_I:
3227                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
3228                         break;
3229                 case OP_FCONV_TO_I8:
3230                         code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE);
3231                         break;
3232                 case OP_LCONV_TO_R_UN: { 
3233                         static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
3234                         guint8 *br;
3235
3236                         if (use_sse2)
3237                                 g_assert_not_reached ();
3238
3239                         /* load 64bit integer to FP stack */
3240                         amd64_push_imm (code, 0);
3241                         amd64_push_reg (code, ins->sreg2);
3242                         amd64_push_reg (code, ins->sreg1);
3243                         amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
3244                         /* store as 80bit FP value */
3245                         x86_fst80_membase (code, AMD64_RSP, 0);
3246                         
3247                         /* test if lreg is negative */
3248                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
3249                         br = code; x86_branch8 (code, X86_CC_GEZ, 0, TRUE);
3250         
3251                         /* add correction constant mn */
3252                         x86_fld80_mem (code, mn);
3253                         x86_fld80_membase (code, AMD64_RSP, 0);
3254                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
3255                         x86_fst80_membase (code, AMD64_RSP, 0);
3256
3257                         amd64_patch (br, code);
3258
3259                         x86_fld80_membase (code, AMD64_RSP, 0);
3260                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 12);
3261
3262                         break;
3263                 }
3264                 case CEE_CONV_OVF_U4:
3265                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0);
3266                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, TRUE, "OverflowException");
3267                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
3268                         break;
3269                 case CEE_CONV_OVF_I4_UN:
3270                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0x7fffffff);
3271                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "OverflowException");
3272                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
3273                         break;
3274                 case OP_FMOVE:
3275                         if (use_sse2 && (ins->dreg != ins->sreg1))
3276                                 amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
3277                         break;
3278                 case OP_FADD:
3279                         if (use_sse2)
3280                                 amd64_sse_addsd_reg_reg (code, ins->dreg, ins->sreg2);
3281                         else
3282                                 amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
3283                         break;
3284                 case OP_FSUB:
3285                         if (use_sse2)
3286                                 amd64_sse_subsd_reg_reg (code, ins->dreg, ins->sreg2);
3287                         else
3288                                 amd64_fp_op_reg (code, X86_FSUB, 1, TRUE);
3289                         break;          
3290                 case OP_FMUL:
3291                         if (use_sse2)
3292                                 amd64_sse_mulsd_reg_reg (code, ins->dreg, ins->sreg2);
3293                         else
3294                                 amd64_fp_op_reg (code, X86_FMUL, 1, TRUE);
3295                         break;          
3296                 case OP_FDIV:
3297                         if (use_sse2)
3298                                 amd64_sse_divsd_reg_reg (code, ins->dreg, ins->sreg2);
3299                         else
3300                                 amd64_fp_op_reg (code, X86_FDIV, 1, TRUE);
3301                         break;          
3302                 case OP_FNEG:
3303                         if (use_sse2) {
3304                                 amd64_mov_reg_imm_size (code, AMD64_R11, 0x8000000000000000, 8);
3305                                 amd64_push_reg (code, AMD64_R11);
3306                                 amd64_push_reg (code, AMD64_R11);
3307                                 amd64_sse_xorpd_reg_membase (code, ins->dreg, AMD64_RSP, 0);
3308                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
3309                         }
3310                         else
3311                                 amd64_fchs (code);
3312                         break;          
3313                 case OP_SIN:
3314                         if (use_sse2) {
3315                                 EMIT_SSE2_FPFUNC (code, fsin, ins->dreg, ins->sreg1);
3316                         }
3317                         else {
3318                                 amd64_fsin (code);
3319                                 amd64_fldz (code);
3320                                 amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
3321                         }
3322                         break;          
3323                 case OP_COS:
3324                         if (use_sse2) {
3325                                 EMIT_SSE2_FPFUNC (code, fcos, ins->dreg, ins->sreg1);
3326                         }
3327                         else {
3328                                 amd64_fcos (code);
3329                                 amd64_fldz (code);
3330                                 amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
3331                         }
3332                         break;          
3333                 case OP_ABS:
3334                         if (use_sse2) {
3335                                 EMIT_SSE2_FPFUNC (code, fabs, ins->dreg, ins->sreg1);
3336                         }
3337                         else
3338                                 amd64_fabs (code);
3339                         break;          
3340                 case OP_TAN: {
3341                         /* 
3342                          * it really doesn't make sense to inline all this code,
3343                          * it's here just to show that things may not be as simple 
3344                          * as they appear.
3345                          */
3346                         guchar *check_pos, *end_tan, *pop_jump;
3347                         if (use_sse2)
3348                                 g_assert_not_reached ();
3349                         amd64_push_reg (code, AMD64_RAX);
3350                         amd64_fptan (code);
3351                         amd64_fnstsw (code);
3352                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
3353                         check_pos = code;
3354                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
3355                         amd64_fstp (code, 0); /* pop the 1.0 */
3356                         end_tan = code;
3357                         x86_jump8 (code, 0);
3358                         amd64_fldpi (code);
3359                         amd64_fp_op (code, X86_FADD, 0);
3360                         amd64_fxch (code, 1);
3361                         x86_fprem1 (code);
3362                         amd64_fstsw (code);
3363                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
3364                         pop_jump = code;
3365                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
3366                         amd64_fstp (code, 1);
3367                         amd64_fptan (code);
3368                         amd64_patch (pop_jump, code);
3369                         amd64_fstp (code, 0); /* pop the 1.0 */
3370                         amd64_patch (check_pos, code);
3371                         amd64_patch (end_tan, code);
3372                         amd64_fldz (code);
3373                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
3374                         amd64_pop_reg (code, AMD64_RAX);
3375                         break;
3376                 }
3377                 case OP_ATAN:
3378                         if (use_sse2)
3379                                 g_assert_not_reached ();
3380                         x86_fld1 (code);
3381                         amd64_fpatan (code);
3382                         amd64_fldz (code);
3383                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
3384                         break;          
3385                 case OP_SQRT:
3386                         if (use_sse2) {
3387                                 EMIT_SSE2_FPFUNC (code, fsqrt, ins->dreg, ins->sreg1);
3388                         }
3389                         else
3390                                 amd64_fsqrt (code);
3391                         break;          
3392                 case OP_X86_FPOP:
3393                         if (!use_sse2)
3394                                 amd64_fstp (code, 0);
3395                         break;          
3396                 case OP_FREM: {
3397                         guint8 *l1, *l2;
3398
3399                         if (use_sse2)
3400                                 g_assert_not_reached ();
3401                         amd64_push_reg (code, AMD64_RAX);
3402                         /* we need to exchange ST(0) with ST(1) */
3403                         amd64_fxch (code, 1);
3404
3405                         /* this requires a loop, because fprem somtimes 
3406                          * returns a partial remainder */
3407                         l1 = code;
3408                         /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
3409                         /* x86_fprem1 (code); */
3410                         amd64_fprem (code);
3411                         amd64_fnstsw (code);
3412                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_C2);
3413                         l2 = code + 2;
3414                         x86_branch8 (code, X86_CC_NE, l1 - l2, FALSE);
3415
3416                         /* pop result */
3417                         amd64_fstp (code, 1);
3418
3419                         amd64_pop_reg (code, AMD64_RAX);
3420                         break;
3421                 }
3422                 case OP_FCOMPARE:
3423                         if (use_sse2) {
3424                                 /* 
3425                                  * The two arguments are swapped because the fbranch instructions
3426                                  * depend on this for the non-sse case to work.
3427                                  */
3428                                 amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
3429                                 break;
3430                         }
3431                         if (cfg->opt & MONO_OPT_FCMOV) {
3432                                 amd64_fcomip (code, 1);
3433                                 amd64_fstp (code, 0);
3434                                 break;
3435                         }
3436                         /* this overwrites EAX */
3437                         EMIT_FPCOMPARE(code);
3438                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
3439                         break;
3440                 case OP_FCEQ:
3441                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
3442                                 /* zeroing the register at the start results in 
3443                                  * shorter and faster code (we can also remove the widening op)
3444                                  */
3445                                 guchar *unordered_check;
3446                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3447                                 
3448                                 if (use_sse2)
3449                                         amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
3450                                 else {
3451                                         amd64_fcomip (code, 1);
3452                                         amd64_fstp (code, 0);
3453                                 }
3454                                 unordered_check = code;
3455                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
3456                                 amd64_set_reg (code, X86_CC_EQ, ins->dreg, FALSE);
3457                                 amd64_patch (unordered_check, code);
3458                                 break;
3459                         }
3460                         if (ins->dreg != AMD64_RAX) 
3461                                 amd64_push_reg (code, AMD64_RAX);
3462
3463                         EMIT_FPCOMPARE(code);
3464                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
3465                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
3466                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3467                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3468
3469                         if (ins->dreg != AMD64_RAX) 
3470                                 amd64_pop_reg (code, AMD64_RAX);
3471                         break;
3472                 case OP_FCLT:
3473                 case OP_FCLT_UN:
3474                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
3475                                 /* zeroing the register at the start results in 
3476                                  * shorter and faster code (we can also remove the widening op)
3477                                  */
3478                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3479                                 if (use_sse2)
3480                                         amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
3481                                 else {
3482                                         amd64_fcomip (code, 1);
3483                                         amd64_fstp (code, 0);
3484                                 }
3485                                 if (ins->opcode == OP_FCLT_UN) {
3486                                         guchar *unordered_check = code;
3487                                         guchar *jump_to_end;
3488                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
3489                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3490                                         jump_to_end = code;
3491                                         x86_jump8 (code, 0);
3492                                         amd64_patch (unordered_check, code);
3493                                         amd64_inc_reg (code, ins->dreg);
3494                                         amd64_patch (jump_to_end, code);
3495                                 } else {
3496                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3497                                 }
3498                                 break;
3499                         }
3500                         if (ins->dreg != AMD64_RAX) 
3501                                 amd64_push_reg (code, AMD64_RAX);
3502
3503                         EMIT_FPCOMPARE(code);
3504                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
3505                         if (ins->opcode == OP_FCLT_UN) {
3506                                 guchar *is_not_zero_check, *end_jump;
3507                                 is_not_zero_check = code;
3508                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3509                                 end_jump = code;
3510                                 x86_jump8 (code, 0);
3511                                 amd64_patch (is_not_zero_check, code);
3512                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
3513
3514                                 amd64_patch (end_jump, code);
3515                         }
3516                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3517                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3518
3519                         if (ins->dreg != AMD64_RAX) 
3520                                 amd64_pop_reg (code, AMD64_RAX);
3521                         break;
3522                 case OP_FCGT:
3523                 case OP_FCGT_UN:
3524                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
3525                                 /* zeroing the register at the start results in 
3526                                  * shorter and faster code (we can also remove the widening op)
3527                                  */
3528                                 guchar *unordered_check;
3529                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3530                                 if (use_sse2)
3531                                         amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
3532                                 else {
3533                                         amd64_fcomip (code, 1);
3534                                         amd64_fstp (code, 0);
3535                                 }
3536                                 if (ins->opcode == OP_FCGT) {
3537                                         unordered_check = code;
3538                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
3539                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3540                                         amd64_patch (unordered_check, code);
3541                                 } else {
3542                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3543                                 }
3544                                 break;
3545                         }
3546                         if (ins->dreg != AMD64_RAX) 
3547                                 amd64_push_reg (code, AMD64_RAX);
3548
3549                         EMIT_FPCOMPARE(code);
3550                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
3551                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
3552                         if (ins->opcode == OP_FCGT_UN) {
3553                                 guchar *is_not_zero_check, *end_jump;
3554                                 is_not_zero_check = code;
3555                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3556                                 end_jump = code;
3557                                 x86_jump8 (code, 0);
3558                                 amd64_patch (is_not_zero_check, code);
3559                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
3560
3561                                 amd64_patch (end_jump, code);
3562                         }
3563                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3564                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3565
3566                         if (ins->dreg != AMD64_RAX) 
3567                                 amd64_pop_reg (code, AMD64_RAX);
3568                         break;
3569                 case OP_FCLT_MEMBASE:
3570                 case OP_FCGT_MEMBASE:
3571                 case OP_FCLT_UN_MEMBASE:
3572                 case OP_FCGT_UN_MEMBASE:
3573                 case OP_FCEQ_MEMBASE: {
3574                         guchar *unordered_check, *jump_to_end;
3575                         int x86_cond;
3576                         g_assert (use_sse2);
3577
3578                         amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3579                         amd64_sse_comisd_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
3580
3581                         switch (ins->opcode) {
3582                         case OP_FCEQ_MEMBASE:
3583                                 x86_cond = X86_CC_EQ;
3584                                 break;
3585                         case OP_FCLT_MEMBASE:
3586                         case OP_FCLT_UN_MEMBASE:
3587                                 x86_cond = X86_CC_LT;
3588                                 break;
3589                         case OP_FCGT_MEMBASE:
3590                         case OP_FCGT_UN_MEMBASE:
3591                                 x86_cond = X86_CC_GT;
3592                                 break;
3593                         default:
3594                                 g_assert_not_reached ();
3595                         }
3596
3597                         unordered_check = code;
3598                         x86_branch8 (code, X86_CC_P, 0, FALSE);
3599                         amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
3600
3601                         switch (ins->opcode) {
3602                         case OP_FCEQ_MEMBASE:
3603                         case OP_FCLT_MEMBASE:
3604                         case OP_FCGT_MEMBASE:
3605                                 amd64_patch (unordered_check, code);
3606                                 break;
3607                         case OP_FCLT_UN_MEMBASE:
3608                         case OP_FCGT_UN_MEMBASE:
3609                                 jump_to_end = code;
3610                                 x86_jump8 (code, 0);
3611                                 amd64_patch (unordered_check, code);
3612                                 amd64_inc_reg (code, ins->dreg);
3613                                 amd64_patch (jump_to_end, code);
3614                                 break;
3615                         default:
3616                                 break;
3617                         }
3618                         break;
3619                 }
3620                 case OP_FBEQ:
3621                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
3622                                 guchar *jump = code;
3623                                 x86_branch8 (code, X86_CC_P, 0, TRUE);
3624                                 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3625                                 amd64_patch (jump, code);
3626                                 break;
3627                         }
3628                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
3629                         EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE);
3630                         break;
3631                 case OP_FBNE_UN:
3632                         /* Branch if C013 != 100 */
3633                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
3634                                 /* branch if !ZF or (PF|CF) */
3635                                 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3636                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3637                                 EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
3638                                 break;
3639                         }
3640                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
3641                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3642                         break;
3643                 case OP_FBLT:
3644                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
3645                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
3646                                 break;
3647                         }
3648                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3649                         break;
3650                 case OP_FBLT_UN:
3651                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
3652                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3653                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
3654                                 break;
3655                         }
3656                         if (ins->opcode == OP_FBLT_UN) {
3657                                 guchar *is_not_zero_check, *end_jump;
3658                                 is_not_zero_check = code;
3659                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3660                                 end_jump = code;
3661                                 x86_jump8 (code, 0);
3662                                 amd64_patch (is_not_zero_check, code);
3663                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
3664
3665                                 amd64_patch (end_jump, code);
3666                         }
3667                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3668                         break;
3669                 case OP_FBGT:
3670                 case OP_FBGT_UN:
3671                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
3672                                 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
3673                                 break;
3674                         }
3675                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
3676                         if (ins->opcode == OP_FBGT_UN) {
3677                                 guchar *is_not_zero_check, *end_jump;
3678                                 is_not_zero_check = code;
3679                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3680                                 end_jump = code;
3681                                 x86_jump8 (code, 0);
3682                                 amd64_patch (is_not_zero_check, code);
3683                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
3684
3685                                 amd64_patch (end_jump, code);
3686                         }
3687                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3688                         break;
3689                 case OP_FBGE:
3690                         /* Branch if C013 == 100 or 001 */
3691                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
3692                                 guchar *br1;
3693
3694                                 /* skip branch if C1=1 */
3695                                 br1 = code;
3696                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
3697                                 /* branch if (C0 | C3) = 1 */
3698                                 EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
3699                                 amd64_patch (br1, code);
3700                                 break;
3701                         }
3702                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
3703                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3704                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
3705                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3706                         break;
3707                 case OP_FBGE_UN:
3708                         /* Branch if C013 == 000 */
3709                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
3710                                 EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
3711                                 break;
3712                         }
3713                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3714                         break;
3715                 case OP_FBLE:
3716                         /* Branch if C013=000 or 100 */
3717                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
3718                                 guchar *br1;
3719
3720                                 /* skip branch if C1=1 */
3721                                 br1 = code;
3722                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
3723                                 /* branch if C0=0 */
3724                                 EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
3725                                 amd64_patch (br1, code);
3726                                 break;
3727                         }
3728                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, (X86_FP_C0|X86_FP_C1));
3729                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
3730                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3731                         break;
3732                 case OP_FBLE_UN:
3733                         /* Branch if C013 != 001 */
3734                         if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
3735                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3736                                 EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
3737                                 break;
3738                         }
3739                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
3740                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3741                         break;
3742                 case CEE_CKFINITE: {
3743                         if (use_sse2) {
3744                                 /* Transfer value to the fp stack */
3745                                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 16);
3746                                 amd64_movsd_membase_reg (code, AMD64_RSP, 0, ins->sreg1);
3747                                 amd64_fld_membase (code, AMD64_RSP, 0, TRUE);
3748                         }
3749                         amd64_push_reg (code, AMD64_RAX);
3750                         amd64_fxam (code);
3751                         amd64_fnstsw (code);
3752                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0x4100);
3753                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
3754                         amd64_pop_reg (code, AMD64_RAX);
3755                         if (use_sse2) {
3756                                 amd64_fstp (code, 0);
3757                         }                               
3758                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
3759                         if (use_sse2)
3760                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
3761                         break;
3762                 }
3763                 case OP_TLS_GET: {
3764                         x86_prefix (code, X86_FS_PREFIX);
3765                         amd64_mov_reg_mem (code, ins->dreg, ins->inst_offset, 8);
3766                         break;
3767                 }
3768                 case OP_MEMORY_BARRIER: {
3769                         /* Not needed on amd64 */
3770                         break;
3771                 }
3772                 case OP_ATOMIC_ADD_I4:
3773                 case OP_ATOMIC_ADD_I8: {
3774                         int dreg = ins->dreg;
3775                         guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8;
3776
3777                         if (dreg == ins->inst_basereg)
3778                                 dreg = AMD64_R11;
3779                         
3780                         if (dreg != ins->sreg2)
3781                                 amd64_mov_reg_reg (code, ins->dreg, ins->sreg2, size);
3782
3783                         x86_prefix (code, X86_LOCK_PREFIX);
3784                         amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
3785
3786                         if (dreg != ins->dreg)
3787                                 amd64_mov_reg_reg (code, ins->dreg, dreg, size);
3788
3789                         break;
3790                 }
3791                 case OP_ATOMIC_ADD_NEW_I4:
3792                 case OP_ATOMIC_ADD_NEW_I8: {
3793                         int dreg = ins->dreg;
3794                         guint32 size = (ins->opcode == OP_ATOMIC_ADD_NEW_I4) ? 4 : 8;
3795
3796                         if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg))
3797                                 dreg = AMD64_R11;
3798
3799                         amd64_mov_reg_reg (code, dreg, ins->sreg2, size);
3800                         amd64_prefix (code, X86_LOCK_PREFIX);
3801                         amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
3802                         /* dreg contains the old value, add with sreg2 value */
3803                         amd64_alu_reg_reg_size (code, X86_ADD, dreg, ins->sreg2, size);
3804                         
3805                         if (ins->dreg != dreg)
3806                                 amd64_mov_reg_reg (code, ins->dreg, dreg, size);
3807
3808                         break;
3809                 }
3810                 case OP_ATOMIC_EXCHANGE_I4:
3811                 case OP_ATOMIC_EXCHANGE_I8: {
3812                         guchar *br[2];
3813                         int sreg2 = ins->sreg2;
3814                         int breg = ins->inst_basereg;
3815                         guint32 size = (ins->opcode == OP_ATOMIC_EXCHANGE_I4) ? 4 : 8;
3816
3817                         /* 
3818                          * See http://msdn.microsoft.com/msdnmag/issues/0700/Win32/ for
3819                          * an explanation of how this works.
3820                          */
3821
3822                         /* cmpxchg uses eax as comperand, need to make sure we can use it
3823                          * hack to overcome limits in x86 reg allocator 
3824                          * (req: dreg == eax and sreg2 != eax and breg != eax) 
3825                          */
3826                         if (ins->dreg != AMD64_RAX)
3827                                 amd64_push_reg (code, AMD64_RAX);
3828                         
3829                         /* We need the EAX reg for the cmpxchg */
3830                         if (ins->sreg2 == AMD64_RAX) {
3831                                 amd64_push_reg (code, AMD64_RDX);
3832                                 amd64_mov_reg_reg (code, AMD64_RDX, AMD64_RAX, size);
3833                                 sreg2 = AMD64_RDX;
3834                         }
3835
3836                         if (breg == AMD64_RAX) {
3837                                 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, size);
3838                                 breg = AMD64_R11;
3839                         }
3840
3841                         amd64_mov_reg_membase (code, AMD64_RAX, breg, ins->inst_offset, size);
3842
3843                         br [0] = code; amd64_prefix (code, X86_LOCK_PREFIX);
3844                         amd64_cmpxchg_membase_reg_size (code, breg, ins->inst_offset, sreg2, size);
3845                         br [1] = code; amd64_branch8 (code, X86_CC_NE, -1, FALSE);
3846                         amd64_patch (br [1], br [0]);
3847
3848                         if (ins->dreg != AMD64_RAX) {
3849                                 amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, size);
3850                                 amd64_pop_reg (code, AMD64_RAX);
3851                         }
3852
3853                         if (ins->sreg2 != sreg2)
3854                                 amd64_pop_reg (code, AMD64_RDX);
3855
3856                         break;
3857                 }
3858                 default:
3859                         g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3860                         g_assert_not_reached ();
3861                 }
3862
3863                 if ((code - cfg->native_code - offset) > max_len) {
3864                         g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
3865                                    mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
3866                         g_assert_not_reached ();
3867                 }
3868                
3869                 cpos += max_len;
3870
3871                 last_ins = ins;
3872                 last_offset = offset;
3873                 
3874                 ins = ins->next;
3875         }
3876
3877         cfg->code_len = code - cfg->native_code;
3878 }
3879
3880 void
3881 mono_arch_register_lowlevel_calls (void)
3882 {
3883 }
3884
3885 void
3886 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3887 {
3888         MonoJumpInfo *patch_info;
3889         gboolean compile_aot = !run_cctors;
3890
3891         for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3892                 unsigned char *ip = patch_info->ip.i + code;
3893                 const unsigned char *target;
3894
3895                 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3896
3897                 if (compile_aot) {
3898                         switch (patch_info->type) {
3899                         case MONO_PATCH_INFO_BB:
3900                         case MONO_PATCH_INFO_LABEL:
3901                                 break;
3902                         default:
3903                                 /* No need to patch these */
3904                                 continue;
3905                         }
3906                 }
3907
3908                 switch (patch_info->type) {
3909                 case MONO_PATCH_INFO_NONE:
3910                         continue;
3911                 case MONO_PATCH_INFO_CLASS_INIT: {
3912                         /* Might already been changed to a nop */
3913                         guint8* ip2 = ip;
3914                         amd64_call_code (ip2, 0);
3915                         break;
3916                 }
3917                 case MONO_PATCH_INFO_METHOD_REL:
3918                 case MONO_PATCH_INFO_R8:
3919                 case MONO_PATCH_INFO_R4:
3920                         g_assert_not_reached ();
3921                         continue;
3922                 case MONO_PATCH_INFO_BB:
3923                         break;
3924                 default:
3925                         break;
3926                 }
3927                 amd64_patch (ip, (gpointer)target);
3928         }
3929 }
3930
3931 guint8 *
3932 mono_arch_emit_prolog (MonoCompile *cfg)
3933 {
3934         MonoMethod *method = cfg->method;
3935         MonoBasicBlock *bb;
3936         MonoMethodSignature *sig;
3937         MonoInst *inst;
3938         int alloc_size, pos, max_offset, i, quad;
3939         guint8 *code;
3940         CallInfo *cinfo;
3941         gint32 lmf_offset = cfg->arch.lmf_offset;
3942
3943         cfg->code_size =  MAX (((MonoMethodNormal *)method)->header->code_size * 4, 512);
3944         code = cfg->native_code = g_malloc (cfg->code_size);
3945
3946         /* Amount of stack space allocated by register saving code */
3947         pos = 0;
3948
3949         /* 
3950          * The prolog consists of the following parts:
3951          * FP present:
3952          * - push rbp, mov rbp, rsp
3953          * - save callee saved regs using pushes
3954          * - allocate frame
3955          * - save lmf if needed
3956          * FP not present:
3957          * - allocate frame
3958          * - save lmf if needed
3959          * - save callee saved regs using moves
3960          */
3961
3962         if (!cfg->arch.omit_fp) {
3963                 amd64_push_reg (code, AMD64_RBP);
3964                 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
3965         }
3966
3967         /* Save callee saved registers */
3968         if (!cfg->arch.omit_fp && !method->save_lmf) {
3969                 for (i = 0; i < AMD64_NREG; ++i)
3970                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
3971                                 amd64_push_reg (code, i);
3972                                 pos += sizeof (gpointer);
3973                         }
3974         }
3975
3976         alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
3977
3978         alloc_size -= pos;
3979
3980         if (cfg->arch.omit_fp)
3981                 /* 
3982                  * On enter, the stack is misaligned by the the pushing of the return
3983                  * address. It is either made aligned by the pushing of %rbp, or by
3984                  * this.
3985                  */
3986                 alloc_size += 8;
3987
3988         cfg->arch.stack_alloc_size = alloc_size;
3989
3990         /* Allocate stack frame */
3991         if (alloc_size) {
3992                 /* See mono_emit_stack_alloc */
3993 #if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
3994                 guint32 remaining_size = alloc_size;
3995                 while (remaining_size >= 0x1000) {
3996                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
3997                         amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
3998                         remaining_size -= 0x1000;
3999                 }
4000                 if (remaining_size)
4001                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size);
4002 #else
4003                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
4004 #endif
4005         }
4006
4007         /* Stack alignment check */
4008 #if 0
4009         {
4010                 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_RSP, 8);
4011                 amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0xf);
4012                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
4013                 x86_branch8 (code, X86_CC_EQ, 2, FALSE);
4014                 amd64_breakpoint (code);
4015         }
4016 #endif
4017
4018         /* Save LMF */
4019         if (method->save_lmf) {
4020                 /* Save ip */
4021                 amd64_lea_membase (code, AMD64_R11, AMD64_RIP, 0);
4022                 amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
4023                 /* Save fp */
4024                 amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), AMD64_RBP, 8);
4025                 /* Save sp */
4026                 amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
4027                 /* Save method */
4028                 /* FIXME: add a relocation for this */
4029                 if (IS_IMM32 (cfg->method))
4030                         amd64_mov_membase_imm (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), (guint64)cfg->method, 8);
4031                 else {
4032                         amd64_mov_reg_imm (code, AMD64_R11, cfg->method);
4033                         amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
4034                 }
4035                 /* Save callee saved regs */
4036                 amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
4037                 amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
4038                 amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
4039                 amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
4040                 amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
4041         }
4042
4043         /* Save callee saved registers */
4044         if (cfg->arch.omit_fp && !method->save_lmf) {
4045                 gint32 save_area_offset = 0;
4046
4047                 /* Save caller saved registers after sp is adjusted */
4048                 /* The registers are saved at the bottom of the frame */
4049                 /* FIXME: Optimize this so the regs are saved at the end of the frame in increasing order */
4050                 for (i = 0; i < AMD64_NREG; ++i)
4051                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
4052                                 amd64_mov_membase_reg (code, AMD64_RSP, save_area_offset, i, 8);
4053                                 save_area_offset += 8;
4054                         }
4055         }
4056
4057         /* compute max_offset in order to use short forward jumps */
4058         max_offset = 0;
4059         if (cfg->opt & MONO_OPT_BRANCH) {
4060                 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4061                         MonoInst *ins = bb->code;
4062                         bb->max_offset = max_offset;
4063
4064                         if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4065                                 max_offset += 6;
4066                         /* max alignment for loops */
4067                         if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
4068                                 max_offset += LOOP_ALIGNMENT;
4069
4070                         while (ins) {
4071                                 if (ins->opcode == OP_LABEL)
4072                                         ins->inst_c1 = max_offset;
4073                                 
4074                                 max_offset += ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
4075                                 ins = ins->next;
4076                         }
4077                 }
4078         }
4079
4080         sig = mono_method_signature (method);
4081         pos = 0;
4082
4083         cinfo = get_call_info (sig, FALSE);
4084
4085         if (sig->ret->type != MONO_TYPE_VOID) {
4086                 if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
4087                         /* Save volatile arguments to the stack */
4088                         amd64_mov_membase_reg (code, cfg->ret->inst_basereg, cfg->ret->inst_offset, cinfo->ret.reg, 8);
4089                 }
4090         }
4091
4092         /* Keep this in sync with emit_load_volatile_arguments */
4093         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4094                 ArgInfo *ainfo = cinfo->args + i;
4095                 gint32 stack_offset;
4096                 MonoType *arg_type;
4097                 inst = cfg->varinfo [i];
4098
4099                 if (sig->hasthis && (i == 0))
4100                         arg_type = &mono_defaults.object_class->byval_arg;
4101                 else
4102                         arg_type = sig->params [i - sig->hasthis];
4103
4104                 stack_offset = ainfo->offset + ARGS_OFFSET;
4105
4106                 /* Save volatile arguments to the stack */
4107                 if (inst->opcode != OP_REGVAR) {
4108                         switch (ainfo->storage) {
4109                         case ArgInIReg: {
4110                                 guint32 size = 8;
4111
4112                                 /* FIXME: I1 etc */
4113                                 /*
4114                                 if (stack_offset & 0x1)
4115                                         size = 1;
4116                                 else if (stack_offset & 0x2)
4117                                         size = 2;
4118                                 else if (stack_offset & 0x4)
4119                                         size = 4;
4120                                 else
4121                                         size = 8;
4122                                 */
4123                                 amd64_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg, size);
4124                                 break;
4125                         }
4126                         case ArgInFloatSSEReg:
4127                                 amd64_movss_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
4128                                 break;
4129                         case ArgInDoubleSSEReg:
4130                                 amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
4131                                 break;
4132                         case ArgValuetypeInReg:
4133                                 for (quad = 0; quad < 2; quad ++) {
4134                                         switch (ainfo->pair_storage [quad]) {
4135                                         case ArgInIReg:
4136                                                 amd64_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad], sizeof (gpointer));
4137                                                 break;
4138                                         case ArgInFloatSSEReg:
4139                                                 amd64_movss_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
4140                                                 break;
4141                                         case ArgInDoubleSSEReg:
4142                                                 amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
4143                                                 break;
4144                                         case ArgNone:
4145                                                 break;
4146                                         default:
4147                                                 g_assert_not_reached ();
4148                                         }
4149                                 }
4150                                 break;
4151                         default:
4152                                 break;
4153                         }
4154                 }
4155
4156                 if (inst->opcode == OP_REGVAR) {
4157                         /* Argument allocated to (non-volatile) register */
4158                         switch (ainfo->storage) {
4159                         case ArgInIReg:
4160                                 amd64_mov_reg_reg (code, inst->dreg, ainfo->reg, 8);
4161                                 break;
4162                         case ArgOnStack:
4163                                 amd64_mov_reg_membase (code, inst->dreg, AMD64_RBP, ARGS_OFFSET + ainfo->offset, 8);
4164                                 break;
4165                         default:
4166                                 g_assert_not_reached ();
4167                         }
4168                 }
4169         }
4170
4171         if (method->save_lmf) {
4172                 if (lmf_tls_offset != -1) {
4173                         /* Load lmf quicky using the FS register */
4174                         x86_prefix (code, X86_FS_PREFIX);
4175                         amd64_mov_reg_mem (code, AMD64_RAX, lmf_tls_offset, 8);
4176                 }
4177                 else {
4178                         /* 
4179                          * The call might clobber argument registers, but they are already
4180                          * saved to the stack/global regs.
4181                          */
4182
4183                         code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
4184                                                                  (gpointer)"mono_get_lmf_addr");                
4185                 }
4186
4187                 /* Save lmf_addr */
4188                 amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
4189                 /* Save previous_lmf */
4190                 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
4191                 amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
4192                 /* Set new lmf */
4193                 amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset);
4194                 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
4195         }
4196
4197
4198         g_free (cinfo);
4199
4200         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4201                 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4202
4203         cfg->code_len = code - cfg->native_code;
4204
4205         g_assert (cfg->code_len < cfg->code_size);
4206
4207         return code;
4208 }
4209
4210 void
4211 mono_arch_emit_epilog (MonoCompile *cfg)
4212 {
4213         MonoMethod *method = cfg->method;
4214         int quad, pos, i;
4215         guint8 *code;
4216         int max_epilog_size = 16;
4217         CallInfo *cinfo;
4218         gint32 lmf_offset = cfg->arch.lmf_offset;
4219         
4220         if (cfg->method->save_lmf)
4221                 max_epilog_size += 256;
4222         
4223         if (mono_jit_trace_calls != NULL)
4224                 max_epilog_size += 50;
4225
4226         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4227                 max_epilog_size += 50;
4228
4229         max_epilog_size += (AMD64_NREG * 2);
4230
4231         while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4232                 cfg->code_size *= 2;
4233                 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4234                 mono_jit_stats.code_reallocs++;
4235         }
4236
4237         code = cfg->native_code + cfg->code_len;
4238
4239         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4240                 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4241
4242         /* the code restoring the registers must be kept in sync with CEE_JMP */
4243         pos = 0;
4244         
4245         if (method->save_lmf) {
4246                 /* Restore previous lmf */
4247                 amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
4248                 amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
4249                 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
4250
4251                 /* Restore caller saved regs */
4252                 if (cfg->used_int_regs & (1 << AMD64_RBP)) {
4253                         amd64_mov_reg_membase (code, AMD64_RBP, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), 8);
4254                 }
4255                 if (cfg->used_int_regs & (1 << AMD64_RBX)) {
4256                         amd64_mov_reg_membase (code, AMD64_RBX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), 8);
4257                 }
4258                 if (cfg->used_int_regs & (1 << AMD64_R12)) {
4259                         amd64_mov_reg_membase (code, AMD64_R12, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), 8);
4260                 }
4261                 if (cfg->used_int_regs & (1 << AMD64_R13)) {
4262                         amd64_mov_reg_membase (code, AMD64_R13, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), 8);
4263                 }
4264                 if (cfg->used_int_regs & (1 << AMD64_R14)) {
4265                         amd64_mov_reg_membase (code, AMD64_R14, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
4266                 }
4267                 if (cfg->used_int_regs & (1 << AMD64_R15)) {
4268                         amd64_mov_reg_membase (code, AMD64_R15, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
4269                 }
4270         } else {
4271
4272                 if (cfg->arch.omit_fp) {
4273                         gint32 save_area_offset = 0;
4274
4275                         for (i = 0; i < AMD64_NREG; ++i)
4276                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
4277                                         amd64_mov_reg_membase (code, i, AMD64_RSP, save_area_offset, 8);
4278                                         save_area_offset += 8;
4279                                 }
4280                 }
4281                 else {
4282                         for (i = 0; i < AMD64_NREG; ++i)
4283                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
4284                                         pos -= sizeof (gpointer);
4285
4286                         if (pos) {
4287                                 if (pos == - sizeof (gpointer)) {
4288                                         /* Only one register, so avoid lea */
4289                                         for (i = AMD64_NREG - 1; i > 0; --i)
4290                                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
4291                                                         amd64_mov_reg_membase (code, i, AMD64_RBP, pos, 8);
4292                                                 }
4293                                 }
4294                                 else {
4295                                         amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
4296
4297                                         /* Pop registers in reverse order */
4298                                         for (i = AMD64_NREG - 1; i > 0; --i)
4299                                                 if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
4300                                                         amd64_pop_reg (code, i);
4301                                                 }
4302                                 }
4303                         }
4304                 }
4305         }
4306
4307         /* Load returned vtypes into registers if needed */
4308         cinfo = get_call_info (mono_method_signature (method), FALSE);
4309         if (cinfo->ret.storage == ArgValuetypeInReg) {
4310                 ArgInfo *ainfo = &cinfo->ret;
4311                 MonoInst *inst = cfg->ret;
4312
4313                 for (quad = 0; quad < 2; quad ++) {
4314                         switch (ainfo->pair_storage [quad]) {
4315                         case ArgInIReg:
4316                                 amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), sizeof (gpointer));
4317                                 break;
4318                         case ArgInFloatSSEReg:
4319                                 amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
4320                                 break;
4321                         case ArgInDoubleSSEReg:
4322                                 amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
4323                                 break;
4324                         case ArgNone:
4325                                 break;
4326                         default:
4327                                 g_assert_not_reached ();
4328                         }
4329                 }
4330         }
4331         g_free (cinfo);
4332
4333         if (cfg->arch.omit_fp) {
4334                 if (cfg->arch.stack_alloc_size)
4335                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
4336         } else {
4337                 amd64_leave (code);
4338         }
4339         amd64_ret (code);
4340
4341         cfg->code_len = code - cfg->native_code;
4342
4343         g_assert (cfg->code_len < cfg->code_size);
4344
4345         if (cfg->arch.omit_fp) {
4346                 /* 
4347                  * Encode the stack size into used_int_regs so the exception handler
4348                  * can access it.
4349                  */
4350                 g_assert (cfg->arch.stack_alloc_size < (1 << 16));
4351                 cfg->used_int_regs |= (1 << 31) | (cfg->arch.stack_alloc_size << 16);
4352         }
4353 }
4354
4355 void
4356 mono_arch_emit_exceptions (MonoCompile *cfg)
4357 {
4358         MonoJumpInfo *patch_info;
4359         int nthrows, i;
4360         guint8 *code;
4361         MonoClass *exc_classes [16];
4362         guint8 *exc_throw_start [16], *exc_throw_end [16];
4363         guint32 code_size = 0;
4364
4365         /* Compute needed space */
4366         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4367                 if (patch_info->type == MONO_PATCH_INFO_EXC)
4368                         code_size += 40;
4369                 if (patch_info->type == MONO_PATCH_INFO_R8)
4370                         code_size += 8 + 7; /* sizeof (double) + alignment */
4371                 if (patch_info->type == MONO_PATCH_INFO_R4)
4372                         code_size += 4 + 7; /* sizeof (float) + alignment */
4373         }
4374
4375         while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4376                 cfg->code_size *= 2;
4377                 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4378                 mono_jit_stats.code_reallocs++;
4379         }
4380
4381         code = cfg->native_code + cfg->code_len;
4382
4383         /* add code to raise exceptions */
4384         nthrows = 0;
4385         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4386                 switch (patch_info->type) {
4387                 case MONO_PATCH_INFO_EXC: {
4388                         MonoClass *exc_class;
4389                         guint8 *buf, *buf2;
4390                         guint32 throw_ip;
4391
4392                         amd64_patch (patch_info->ip.i + cfg->native_code, code);
4393
4394                         exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4395                         g_assert (exc_class);
4396                         throw_ip = patch_info->ip.i;
4397
4398                         //x86_breakpoint (code);
4399                         /* Find a throw sequence for the same exception class */
4400                         for (i = 0; i < nthrows; ++i)
4401                                 if (exc_classes [i] == exc_class)
4402                                         break;
4403                         if (i < nthrows) {
4404                                 amd64_mov_reg_imm (code, AMD64_RSI, (exc_throw_end [i] - cfg->native_code) - throw_ip);
4405                                 x86_jump_code (code, exc_throw_start [i]);
4406                                 patch_info->type = MONO_PATCH_INFO_NONE;
4407                         }
4408                         else {
4409                                 buf = code;
4410                                 amd64_mov_reg_imm_size (code, AMD64_RSI, 0xf0f0f0f0, 4);
4411                                 buf2 = code;
4412
4413                                 if (nthrows < 16) {
4414                                         exc_classes [nthrows] = exc_class;
4415                                         exc_throw_start [nthrows] = code;
4416                                 }
4417
4418                                 amd64_mov_reg_imm (code, AMD64_RDI, exc_class->type_token);
4419                                 patch_info->data.name = "mono_arch_throw_corlib_exception";
4420                                 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4421                                 patch_info->ip.i = code - cfg->native_code;
4422
4423                                 if (cfg->compile_aot) {
4424                                         amd64_mov_reg_membase (code, GP_SCRATCH_REG, AMD64_RIP, 0, 8);
4425                                         amd64_call_reg (code, GP_SCRATCH_REG);
4426                                 } else {
4427                                         /* The callee is in memory allocated using the code manager */
4428                                         amd64_call_code (code, 0);
4429                                 }
4430
4431                                 amd64_mov_reg_imm (buf, AMD64_RSI, (code - cfg->native_code) - throw_ip);
4432                                 while (buf < buf2)
4433                                         x86_nop (buf);
4434
4435                                 if (nthrows < 16) {
4436                                         exc_throw_end [nthrows] = code;
4437                                         nthrows ++;
4438                                 }
4439                         }
4440                         break;
4441                 }
4442                 default:
4443                         /* do nothing */
4444                         break;
4445                 }
4446         }
4447
4448         /* Handle relocations with RIP relative addressing */
4449         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4450                 gboolean remove = FALSE;
4451
4452                 switch (patch_info->type) {
4453                 case MONO_PATCH_INFO_R8: {
4454                         guint8 *pos;
4455
4456                         code = (guint8*)ALIGN_TO (code, 8);
4457
4458                         pos = cfg->native_code + patch_info->ip.i;
4459
4460                         *(double*)code = *(double*)patch_info->data.target;
4461
4462                         if (use_sse2)
4463                                 *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
4464                         else
4465                                 *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
4466                         code += 8;
4467
4468                         remove = TRUE;
4469                         break;
4470                 }
4471                 case MONO_PATCH_INFO_R4: {
4472                         guint8 *pos;
4473
4474                         code = (guint8*)ALIGN_TO (code, 8);
4475
4476                         pos = cfg->native_code + patch_info->ip.i;
4477
4478                         *(float*)code = *(float*)patch_info->data.target;
4479
4480                         if (use_sse2)
4481                                 *(guint32*)(pos + 4) = (guint8*)code - pos - 8;
4482                         else
4483                                 *(guint32*)(pos + 3) = (guint8*)code - pos - 7;
4484                         code += 4;
4485
4486                         remove = TRUE;
4487                         break;
4488                 }
4489                 default:
4490                         break;
4491                 }
4492
4493                 if (remove) {
4494                         if (patch_info == cfg->patch_info)
4495                                 cfg->patch_info = patch_info->next;
4496                         else {
4497                                 MonoJumpInfo *tmp;
4498
4499                                 for (tmp = cfg->patch_info; tmp->next != patch_info; tmp = tmp->next)
4500                                         ;
4501                                 tmp->next = patch_info->next;
4502                         }
4503                 }
4504         }
4505
4506         cfg->code_len = code - cfg->native_code;
4507
4508         g_assert (cfg->code_len < cfg->code_size);
4509
4510 }
4511
4512 void*
4513 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4514 {
4515         guchar *code = p;
4516         CallInfo *cinfo = NULL;
4517         MonoMethodSignature *sig;
4518         MonoInst *inst;
4519         int i, n, stack_area = 0;
4520
4521         /* Keep this in sync with mono_arch_get_argument_info */
4522
4523         if (enable_arguments) {
4524                 /* Allocate a new area on the stack and save arguments there */
4525                 sig = mono_method_signature (cfg->method);
4526
4527                 cinfo = get_call_info (sig, FALSE);
4528
4529                 n = sig->param_count + sig->hasthis;
4530
4531                 stack_area = ALIGN_TO (n * 8, 16);
4532
4533                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_area);
4534
4535                 for (i = 0; i < n; ++i) {
4536                         inst = cfg->varinfo [i];
4537
4538                         if (inst->opcode == OP_REGVAR)
4539                                 amd64_mov_membase_reg (code, AMD64_RSP, (i * 8), inst->dreg, 8);
4540                         else {
4541                                 amd64_mov_reg_membase (code, AMD64_R11, inst->inst_basereg, inst->inst_offset, 8);
4542                                 amd64_mov_membase_reg (code, AMD64_RSP, (i * 8), AMD64_R11, 8);
4543                         }
4544                 }
4545         }
4546
4547         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, cfg->method);
4548         amd64_set_reg_template (code, AMD64_RDI);
4549         amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RSP, 8);
4550         code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
4551
4552         if (enable_arguments) {
4553                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, stack_area);
4554
4555                 g_free (cinfo);
4556         }
4557
4558         return code;
4559 }
4560
4561 enum {
4562         SAVE_NONE,
4563         SAVE_STRUCT,
4564         SAVE_EAX,
4565         SAVE_EAX_EDX,
4566         SAVE_XMM
4567 };
4568
4569 void*
4570 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4571 {
4572         guchar *code = p;
4573         int save_mode = SAVE_NONE;
4574         MonoMethod *method = cfg->method;
4575         int rtype = mono_type_get_underlying_type (mono_method_signature (method)->ret)->type;
4576         
4577         switch (rtype) {
4578         case MONO_TYPE_VOID:
4579                 /* special case string .ctor icall */
4580                 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
4581                         save_mode = SAVE_EAX;
4582                 else
4583                         save_mode = SAVE_NONE;
4584                 break;
4585         case MONO_TYPE_I8:
4586         case MONO_TYPE_U8:
4587                 save_mode = SAVE_EAX;
4588                 break;
4589         case MONO_TYPE_R4:
4590         case MONO_TYPE_R8:
4591                 save_mode = SAVE_XMM;
4592                 break;
4593         case MONO_TYPE_GENERICINST:
4594                 if (mono_type_generic_inst_is_valuetype (mono_method_signature (method)->ret)) {
4595                         save_mode = SAVE_EAX;
4596                         break;
4597                 }
4598                 /* Fall through */
4599         case MONO_TYPE_VALUETYPE:
4600                 save_mode = SAVE_STRUCT;
4601                 break;
4602         default:
4603                 save_mode = SAVE_EAX;
4604                 break;
4605         }
4606
4607         /* Save the result and copy it into the proper argument register */
4608         switch (save_mode) {
4609         case SAVE_EAX:
4610                 amd64_push_reg (code, AMD64_RAX);
4611                 /* Align stack */
4612                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
4613                 if (enable_arguments)
4614                         amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RAX, 8);
4615                 break;
4616         case SAVE_STRUCT:
4617                 /* FIXME: */
4618                 if (enable_arguments)
4619                         amd64_mov_reg_imm (code, AMD64_RSI, 0);
4620                 break;
4621         case SAVE_XMM:
4622                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
4623                 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
4624                 /* Align stack */
4625                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
4626                 /* 
4627                  * The result is already in the proper argument register so no copying
4628                  * needed.
4629                  */
4630                 break;
4631         case SAVE_NONE:
4632                 break;
4633         default:
4634                 g_assert_not_reached ();
4635         }
4636
4637         /* Set %al since this is a varargs call */
4638         if (save_mode == SAVE_XMM)
4639                 amd64_mov_reg_imm (code, AMD64_RAX, 1);
4640         else
4641                 amd64_mov_reg_imm (code, AMD64_RAX, 0);
4642
4643         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method);
4644         amd64_set_reg_template (code, AMD64_RDI);
4645         code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
4646
4647         /* Restore result */
4648         switch (save_mode) {
4649         case SAVE_EAX:
4650                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4651                 amd64_pop_reg (code, AMD64_RAX);
4652                 break;
4653         case SAVE_STRUCT:
4654                 /* FIXME: */
4655                 break;
4656         case SAVE_XMM:
4657                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4658                 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
4659                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
4660                 break;
4661         case SAVE_NONE:
4662                 break;
4663         default:
4664                 g_assert_not_reached ();
4665         }
4666
4667         return code;
4668 }
4669
4670 void
4671 mono_arch_flush_icache (guint8 *code, gint size)
4672 {
4673         /* Not needed */
4674 }
4675
4676 void
4677 mono_arch_flush_register_windows (void)
4678 {
4679 }
4680
4681 gboolean 
4682 mono_arch_is_inst_imm (gint64 imm)
4683 {
4684         return amd64_is_imm32 (imm);
4685 }
4686
4687 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
4688
4689 static int reg_to_ucontext_reg [] = {
4690         REG_RAX, REG_RCX, REG_RDX, REG_RBX, REG_RSP, REG_RBP, REG_RSI, REG_RDI,
4691         REG_R8, REG_R9, REG_R10, REG_R11, REG_R12, REG_R13, REG_R14, REG_R15,
4692         REG_RIP
4693 };
4694
4695 /*
4696  * Determine whenever the trap whose info is in SIGINFO is caused by
4697  * integer overflow.
4698  */
4699 gboolean
4700 mono_arch_is_int_overflow (void *sigctx, void *info)
4701 {
4702         ucontext_t *ctx = (ucontext_t*)sigctx;
4703         guint8* rip;
4704         int reg;
4705
4706         rip = (guint8*)ctx->uc_mcontext.gregs [REG_RIP];
4707
4708         if (IS_REX (rip [0])) {
4709                 reg = amd64_rex_b (rip [0]);
4710                 rip ++;
4711         }
4712         else
4713                 reg = 0;
4714
4715         if ((rip [0] == 0xf7) && (x86_modrm_mod (rip [1]) == 0x3) && (x86_modrm_reg (rip [1]) == 0x7)) {
4716                 /* idiv REG */
4717                 reg += x86_modrm_rm (rip [1]);
4718
4719                 if (ctx->uc_mcontext.gregs [reg_to_ucontext_reg [reg]] == -1)
4720                         return TRUE;
4721         }
4722
4723         return FALSE;
4724 }
4725
4726 guint32
4727 mono_arch_get_patch_offset (guint8 *code)
4728 {
4729         return 3;
4730 }
4731
4732 gpointer*
4733 mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
4734 {
4735         guint32 reg;
4736         guint32 disp;
4737         guint8 rex = 0;
4738
4739         /* go to the start of the call instruction
4740          *
4741          * address_byte = (m << 6) | (o << 3) | reg
4742          * call opcode: 0xff address_byte displacement
4743          * 0xff m=1,o=2 imm8
4744          * 0xff m=2,o=2 imm32
4745          */
4746         code -= 7;
4747
4748         /* 
4749          * A given byte sequence can match more than case here, so we have to be
4750          * really careful about the ordering of the cases. Longer sequences
4751          * come first.
4752          */
4753         if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
4754                 /* call OFFSET(%rip) */
4755                 disp = *(guint32*)(code + 3);
4756                 return (gpointer*)(code + disp + 7);
4757         }
4758         else if ((code [1] == 0xff) && (amd64_modrm_reg (code [2]) == 0x2) && (amd64_modrm_mod (code [2]) == 0x2)) {
4759                 /* call *[reg+disp32] */
4760                 if (IS_REX (code [0]))
4761                         rex = code [0];
4762                 reg = amd64_modrm_rm (code [2]);
4763                 disp = *(guint32*)(code + 3);
4764                 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
4765         }
4766         else if (code [2] == 0xe8) {
4767                 /* call <ADDR> */
4768                 return NULL;
4769         }
4770         else if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
4771                 /* call *%reg */
4772                 return NULL;
4773         }
4774         else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x1)) {
4775                 /* call *[reg+disp8] */
4776                 if (IS_REX (code [3]))
4777                         rex = code [3];
4778                 reg = amd64_modrm_rm (code [5]);
4779                 disp = *(guint8*)(code + 6);
4780                 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
4781         }
4782         else if ((code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x0)) {
4783                         /*
4784                          * This is a interface call: should check the above code can't catch it earlier 
4785                          * 8b 40 30   mov    0x30(%eax),%eax
4786                          * ff 10      call   *(%eax)
4787                          */
4788                 if (IS_REX (code [4]))
4789                         rex = code [4];
4790                 reg = amd64_modrm_rm (code [6]);
4791                 disp = 0;
4792         }
4793         else
4794                 g_assert_not_reached ();
4795
4796         reg += amd64_rex_b (rex);
4797
4798         /* R11 is clobbered by the trampoline code */
4799         g_assert (reg != AMD64_R11);
4800
4801         return (gpointer)(((guint64)(regs [reg])) + disp);
4802 }
4803
4804 gpointer*
4805 mono_arch_get_delegate_method_ptr_addr (guint8* code, gpointer *regs)
4806 {
4807         guint32 reg;
4808         guint32 disp;
4809
4810         code -= 10;
4811
4812         if (IS_REX (code [0]) && (code [1] == 0x8b) && (code [3] == 0x48) && (code [4] == 0x8b) && (code [5] == 0x40) && (code [7] == 0x48) && (code [8] == 0xff) && (code [9] == 0xd0)) {
4813                 /* mov REG, %rax; mov <OFFSET>(%rax), %rax; call *%rax */
4814                 reg = amd64_rex_b (code [0]) + amd64_modrm_rm (code [2]);
4815                 disp = code [6];
4816
4817                 if (reg == AMD64_RAX)
4818                         return NULL;
4819                 else
4820                         return (gpointer*)(((guint64)(regs [reg])) + disp);
4821         }
4822
4823         return NULL;
4824 }
4825
4826 /*
4827  * Support for fast access to the thread-local lmf structure using the GS
4828  * segment register on NPTL + kernel 2.6.x.
4829  */
4830
4831 static gboolean tls_offset_inited = FALSE;
4832
4833 void
4834 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4835 {
4836         if (!tls_offset_inited) {
4837                 tls_offset_inited = TRUE;
4838
4839                 appdomain_tls_offset = mono_domain_get_tls_offset ();
4840                 lmf_tls_offset = mono_get_lmf_tls_offset ();
4841                 thread_tls_offset = mono_thread_get_tls_offset ();
4842         }               
4843 }
4844
4845 void
4846 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4847 {
4848 }
4849
4850 void
4851 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
4852 {
4853         MonoCallInst *call = (MonoCallInst*)inst;
4854         CallInfo * cinfo = get_call_info (inst->signature, FALSE);
4855
4856         if (vt_reg != -1) {
4857                 MonoInst *vtarg;
4858
4859                 if (cinfo->ret.storage == ArgValuetypeInReg) {
4860                         /*
4861                          * The valuetype is in RAX:RDX after the call, need to be copied to
4862                          * the stack. Push the address here, so the call instruction can
4863                          * access it.
4864                          */
4865                         MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
4866                         vtarg->sreg1 = vt_reg;
4867                         mono_bblock_add_inst (cfg->cbb, vtarg);
4868
4869                         /* Align stack */
4870                         MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
4871                 }
4872                 else {
4873                         MONO_INST_NEW (cfg, vtarg, OP_MOVE);
4874                         vtarg->sreg1 = vt_reg;
4875                         vtarg->dreg = mono_regstate_next_int (cfg->rs);
4876                         mono_bblock_add_inst (cfg->cbb, vtarg);
4877
4878                         mono_call_inst_add_outarg_reg (call, vtarg->dreg, cinfo->ret.reg, FALSE);
4879                 }
4880         }
4881
4882         /* add the this argument */
4883         if (this_reg != -1) {
4884                 MonoInst *this;
4885                 MONO_INST_NEW (cfg, this, OP_MOVE);
4886                 this->type = this_type;
4887                 this->sreg1 = this_reg;
4888                 this->dreg = mono_regstate_next_int (cfg->rs);
4889                 mono_bblock_add_inst (cfg->cbb, this);
4890
4891                 mono_call_inst_add_outarg_reg (call, this->dreg, cinfo->args [0].reg, FALSE);
4892         }
4893
4894         g_free (cinfo);
4895 }
4896
4897 MonoInst*
4898 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4899 {
4900         MonoInst *ins = NULL;
4901
4902         if (cmethod->klass == mono_defaults.math_class) {
4903                 if (strcmp (cmethod->name, "Sin") == 0) {
4904                         MONO_INST_NEW (cfg, ins, OP_SIN);
4905                         ins->inst_i0 = args [0];
4906                 } else if (strcmp (cmethod->name, "Cos") == 0) {
4907                         MONO_INST_NEW (cfg, ins, OP_COS);
4908                         ins->inst_i0 = args [0];
4909                 } else if (strcmp (cmethod->name, "Tan") == 0) {
4910                         if (use_sse2)
4911                                 return ins;
4912                         MONO_INST_NEW (cfg, ins, OP_TAN);
4913                         ins->inst_i0 = args [0];
4914                 } else if (strcmp (cmethod->name, "Atan") == 0) {
4915                         if (use_sse2)
4916                                 return ins;
4917                         MONO_INST_NEW (cfg, ins, OP_ATAN);
4918                         ins->inst_i0 = args [0];
4919                 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4920                         MONO_INST_NEW (cfg, ins, OP_SQRT);
4921                         ins->inst_i0 = args [0];
4922                 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4923                         MONO_INST_NEW (cfg, ins, OP_ABS);
4924                         ins->inst_i0 = args [0];
4925                 }
4926 #if 0
4927                 /* OP_FREM is not IEEE compatible */
4928                 else if (strcmp (cmethod->name, "IEEERemainder") == 0) {
4929                         MONO_INST_NEW (cfg, ins, OP_FREM);
4930                         ins->inst_i0 = args [0];
4931                         ins->inst_i1 = args [1];
4932                 }
4933 #endif
4934         } else if (cmethod->klass == mono_defaults.thread_class &&
4935                            strcmp (cmethod->name, "MemoryBarrier") == 0) {
4936                 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4937         } else if(cmethod->klass->image == mono_defaults.corlib &&
4938                            (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4939                            (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4940
4941                 if (strcmp (cmethod->name, "Increment") == 0) {
4942                         MonoInst *ins_iconst;
4943                         guint32 opcode;
4944
4945                         if (fsig->params [0]->type == MONO_TYPE_I4)
4946                                 opcode = OP_ATOMIC_ADD_NEW_I4;
4947                         else if (fsig->params [0]->type == MONO_TYPE_I8)
4948                                 opcode = OP_ATOMIC_ADD_NEW_I8;
4949                         else
4950                                 g_assert_not_reached ();
4951                         MONO_INST_NEW (cfg, ins, opcode);
4952                         MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4953                         ins_iconst->inst_c0 = 1;
4954
4955                         ins->inst_i0 = args [0];
4956                         ins->inst_i1 = ins_iconst;
4957                 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4958                         MonoInst *ins_iconst;
4959                         guint32 opcode;
4960
4961                         if (fsig->params [0]->type == MONO_TYPE_I4)
4962                                 opcode = OP_ATOMIC_ADD_NEW_I4;
4963                         else if (fsig->params [0]->type == MONO_TYPE_I8)
4964                                 opcode = OP_ATOMIC_ADD_NEW_I8;
4965                         else
4966                                 g_assert_not_reached ();
4967                         MONO_INST_NEW (cfg, ins, opcode);
4968                         MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4969                         ins_iconst->inst_c0 = -1;
4970
4971                         ins->inst_i0 = args [0];
4972                         ins->inst_i1 = ins_iconst;
4973                 } else if (strcmp (cmethod->name, "Add") == 0) {
4974                         guint32 opcode;
4975
4976                         if (fsig->params [0]->type == MONO_TYPE_I4)
4977                                 opcode = OP_ATOMIC_ADD_I4;
4978                         else if (fsig->params [0]->type == MONO_TYPE_I8)
4979                                 opcode = OP_ATOMIC_ADD_I8;
4980                         else
4981                                 g_assert_not_reached ();
4982                         
4983                         MONO_INST_NEW (cfg, ins, opcode);
4984
4985                         ins->inst_i0 = args [0];
4986                         ins->inst_i1 = args [1];
4987                 } else if (strcmp (cmethod->name, "Exchange") == 0) {
4988                         guint32 opcode;
4989
4990                         if (fsig->params [0]->type == MONO_TYPE_I4)
4991                                 opcode = OP_ATOMIC_EXCHANGE_I4;
4992                         else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
4993                                          (fsig->params [0]->type == MONO_TYPE_I) ||
4994                                          (fsig->params [0]->type == MONO_TYPE_OBJECT))
4995                                 opcode = OP_ATOMIC_EXCHANGE_I8;
4996                         else
4997                                 return NULL;
4998
4999                         MONO_INST_NEW (cfg, ins, opcode);
5000
5001                         ins->inst_i0 = args [0];
5002                         ins->inst_i1 = args [1];
5003                 } else if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5004                         /* 64 bit reads are already atomic */
5005                         MONO_INST_NEW (cfg, ins, CEE_LDIND_I8);
5006                         ins->inst_i0 = args [0];
5007                 }
5008
5009                 /* 
5010                  * Can't implement CompareExchange methods this way since they have
5011                  * three arguments.
5012                  */
5013         }
5014
5015         return ins;
5016 }
5017
5018 gboolean
5019 mono_arch_print_tree (MonoInst *tree, int arity)
5020 {
5021         return 0;
5022 }
5023
5024 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5025 {
5026         MonoInst* ins;
5027         
5028         if (appdomain_tls_offset == -1)
5029                 return NULL;
5030         
5031         MONO_INST_NEW (cfg, ins, OP_TLS_GET);
5032         ins->inst_offset = appdomain_tls_offset;
5033         return ins;
5034 }
5035
5036 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
5037 {
5038         MonoInst* ins;
5039         
5040         if (thread_tls_offset == -1)
5041                 return NULL;
5042         
5043         MONO_INST_NEW (cfg, ins, OP_TLS_GET);
5044         ins->inst_offset = thread_tls_offset;
5045         return ins;
5046 }