2945f630f3f557ce925da5ccdb511d7fed0ea8e6
[mono.git] / mono / mini / mini-amd64.c
1 /*
2  * mini-amd64.c: AMD64 backend for the Mono code generator
3  *
4  * Based on mini-x86.c.
5  *
6  * Authors:
7  *   Paolo Molaro (lupus@ximian.com)
8  *   Dietmar Maurer (dietmar@ximian.com)
9  *   Patrik Torstensson
10  *
11  * (C) 2003 Ximian, Inc.
12  */
13 #include "mini.h"
14 #include <string.h>
15 #include <math.h>
16
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/metadata/profiler-private.h>
20 #include <mono/utils/mono-math.h>
21
22 #include "trace.h"
23 #include "mini-amd64.h"
24 #include "inssel.h"
25 #include "cpu-amd64.h"
26
27 static gint lmf_tls_offset = -1;
28
29 #ifdef PLATFORM_WIN32
30 /* Under windows, the default pinvoke calling convention is stdcall */
31 #define CALLCONV_IS_STDCALL(call_conv) (((call_conv) == MONO_CALL_STDCALL) || ((call_conv) == MONO_CALL_DEFAULT))
32 #else
33 #define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
34 #endif
35
36 #define SIGNAL_STACK_SIZE (64 * 1024)
37
38 #define ARGS_OFFSET 16
39 #define GP_SCRATCH_REG AMD64_R11
40
41 /*
42  * AMD64 register usage:
43  * - callee saved registers are used for global register allocation
44  * - %r11 is used for materializing 64 bit constants in opcodes
45  * - the rest is used for local allocation
46  */
47
48 #define NOT_IMPLEMENTED g_assert_not_reached ()
49
50 static int
51 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar);
52
53 const char*
54 mono_arch_regname (int reg) {
55         switch (reg) {
56         case AMD64_RAX: return "%rax";
57         case AMD64_RBX: return "%rbx";
58         case AMD64_RCX: return "%rcx";
59         case AMD64_RDX: return "%rdx";
60         case AMD64_RSP: return "%rsp";  
61         case AMD64_RBP: return "%rbp";
62         case AMD64_RDI: return "%rdi";
63         case AMD64_RSI: return "%rsi";
64         case AMD64_R8: return "%r8";
65         case AMD64_R9: return "%r9";
66         case AMD64_R10: return "%r10";
67         case AMD64_R11: return "%r11";
68         case AMD64_R12: return "%r12";
69         case AMD64_R13: return "%r13";
70         case AMD64_R14: return "%r14";
71         case AMD64_R15: return "%r15";
72         }
73         return "unknown";
74 }
75
76 static inline void 
77 amd64_patch (unsigned char* code, gpointer target)
78 {
79         /* Skip REX */
80
81         if ((code [0] >= 0x40) && (code [0] <= 0x4f))
82                 code += 1;
83
84         x86_patch (code, (unsigned char*)target);
85 }
86
87 typedef enum {
88         ArgInIReg,
89         ArgInIRegPair,
90         ArgInSSEReg,
91         ArgOnStack,
92 } ArgStorage;
93
94 typedef struct {
95         gint16 offset;
96         gint8  reg;
97         ArgStorage storage;
98 } ArgInfo;
99
100 typedef struct {
101         int nargs;
102         guint32 stack_usage;
103         guint32 reg_usage;
104         ArgInfo ret;
105         ArgInfo sig_cookie;
106         ArgInfo args [1];
107 } CallInfo;
108
109 #define DEBUG(a) if (cfg->verbose_level > 1) a
110
111 #define PARAM_REGS 6
112
113 static AMD64_Reg_No param_regs [] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 };
114
115 static void inline
116 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
117 {
118     ainfo->offset = *stack_size;
119
120     if (*gr >= PARAM_REGS) {
121                 ainfo->storage = ArgOnStack;
122                 (*stack_size) += sizeof (gpointer);
123     }
124     else {
125                 ainfo->storage = ArgInIReg;
126                 ainfo->reg = param_regs [*gr];
127                 (*gr) ++;
128     }
129 }
130
131 #define FLOAT_PARAM_REGS 8
132
133 static void inline
134 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
135 {
136     ainfo->offset = *stack_size;
137
138     if (*gr >= FLOAT_PARAM_REGS) {
139                 ainfo->storage = ArgOnStack;
140                 (*stack_size) += sizeof (gpointer);
141     }
142     else {
143                 /* A double register */
144                 ainfo->storage = ArgInSSEReg;
145                 ainfo->reg = *gr;
146                 (*gr) += 1;
147     }
148 }
149
150 /*
151  * get_call_info:
152  *
153  *  Obtain information about a call according to the calling convention.
154  * For AMD64, see the "System V ABI, x86-64 Architecture Processor Supplement 
155  * Draft Version 0.23" document for more information.
156  */
157 static CallInfo*
158 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
159 {
160         guint32 i, gr, fr, simpletype;
161         int n = sig->hasthis + sig->param_count;
162         guint32 stack_size = 0;
163         CallInfo *cinfo;
164
165         cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
166
167         gr = 0;
168         fr = 0;
169
170         if (((sig->ret->type == MONO_TYPE_VALUETYPE) && !sig->ret->data.klass->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
171                 add_general (&gr, &stack_size, &cinfo->ret);
172         }
173
174         /* this */
175         if (sig->hasthis)
176                 add_general (&gr, &stack_size, cinfo->args + 0);
177
178         for (i = 0; i < sig->param_count; ++i) {
179                 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
180
181                 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
182                         /* Emit the signature cookie just before the implicit arguments */
183                         add_general (&gr, &stack_size, &cinfo->sig_cookie);
184                         /* Prevent implicit arguments from being passed in registers */
185                         gr = PARAM_REGS;
186                 }
187
188                 if (sig->params [i]->byref) {
189                         add_general (&gr, &stack_size, ainfo);
190                         continue;
191                 }
192                 simpletype = sig->params [i]->type;
193         enum_calc_size:
194                 switch (simpletype) {
195                 case MONO_TYPE_BOOLEAN:
196                 case MONO_TYPE_I1:
197                 case MONO_TYPE_U1:
198                         add_general (&gr, &stack_size, ainfo);
199                         break;
200                 case MONO_TYPE_I2:
201                 case MONO_TYPE_U2:
202                 case MONO_TYPE_CHAR:
203                         add_general (&gr, &stack_size, ainfo);
204                         break;
205                 case MONO_TYPE_I4:
206                 case MONO_TYPE_U4:
207                         add_general (&gr, &stack_size, ainfo);
208                         break;
209                 case MONO_TYPE_I:
210                 case MONO_TYPE_U:
211                 case MONO_TYPE_PTR:
212                 case MONO_TYPE_CLASS:
213                 case MONO_TYPE_OBJECT:
214                 case MONO_TYPE_STRING:
215                 case MONO_TYPE_SZARRAY:
216                 case MONO_TYPE_ARRAY:
217                         add_general (&gr, &stack_size, ainfo);
218                         break;
219                 case MONO_TYPE_VALUETYPE:
220                         if (sig->params [i]->data.klass->enumtype) {
221                                 simpletype = sig->params [i]->data.klass->enum_basetype->type;
222                                 goto enum_calc_size;
223                         }
224
225                         if (sig->pinvoke)
226                                 NOT_IMPLEMENTED;
227                         add_general (&gr, &stack_size, ainfo);
228                         break;
229                 case MONO_TYPE_TYPEDBYREF:
230                         add_general (&gr, &stack_size, ainfo);
231                         break;
232                 case MONO_TYPE_U8:
233                 case MONO_TYPE_I8:
234                         add_general (&gr, &stack_size, ainfo);
235                         break;
236                 case MONO_TYPE_R4:
237                         add_float (&fr, &stack_size, ainfo);
238                         break;
239                 case MONO_TYPE_R8:
240                         add_float (&fr, &stack_size, ainfo);
241                         break;
242                 default:
243                         g_assert_not_reached ();
244                 }
245         }
246
247         /* return value */
248         {
249                 simpletype = sig->ret->type;
250 enum_retvalue:
251                 switch (simpletype) {
252                 case MONO_TYPE_BOOLEAN:
253                 case MONO_TYPE_I1:
254                 case MONO_TYPE_U1:
255                 case MONO_TYPE_I2:
256                 case MONO_TYPE_U2:
257                 case MONO_TYPE_CHAR:
258                 case MONO_TYPE_I4:
259                 case MONO_TYPE_U4:
260                 case MONO_TYPE_I:
261                 case MONO_TYPE_U:
262                 case MONO_TYPE_PTR:
263                 case MONO_TYPE_CLASS:
264                 case MONO_TYPE_OBJECT:
265                 case MONO_TYPE_SZARRAY:
266                 case MONO_TYPE_ARRAY:
267                 case MONO_TYPE_STRING:
268                         cinfo->ret.storage = ArgInIReg;
269                         cinfo->ret.reg = AMD64_RAX;
270                         break;
271                 case MONO_TYPE_U8:
272                 case MONO_TYPE_I8:
273                         cinfo->ret.storage = ArgInIReg;
274                         cinfo->ret.reg = AMD64_RAX;
275                         break;
276                 case MONO_TYPE_R4:
277                 case MONO_TYPE_R8:
278                         cinfo->ret.storage = ArgInSSEReg;
279                         cinfo->ret.reg = AMD64_XMM0;
280                         break;
281                 case MONO_TYPE_VALUETYPE:
282                         if (sig->ret->data.klass->enumtype) {
283                                 simpletype = sig->ret->data.klass->enum_basetype->type;
284                                 goto enum_retvalue;
285                         }
286                         if (sig->pinvoke)
287                             NOT_IMPLEMENTED;
288                         else
289                             /* Already done */
290                             ;
291                         break;
292                 case MONO_TYPE_TYPEDBYREF:
293                         if (sig->pinvoke)
294                                 /* Same as a valuetype with size 24 */
295                                 NOT_IMPLEMENTED;
296                         else
297                                 /* Already done */
298                                 ;
299                         break;
300                 case MONO_TYPE_VOID:
301                         break;
302                 default:
303                         g_error ("Can't handle as return value 0x%x", sig->ret->type);
304                 }
305         }
306
307         cinfo->stack_usage = stack_size;
308         cinfo->reg_usage = gr;
309         return cinfo;
310 }
311
312 /*
313  * mono_arch_get_argument_info:
314  * @csig:  a method signature
315  * @param_count: the number of parameters to consider
316  * @arg_info: an array to store the result infos
317  *
318  * Gathers information on parameters such as size, alignment and
319  * padding. arg_info should be large enought to hold param_count + 1 entries. 
320  *
321  * Returns the size of the activation frame.
322  */
323 int
324 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
325 {
326         int k, frame_size = 0;
327         int size, align, pad;
328         int offset = 8;
329
330         NOT_IMPLEMENTED;
331
332         if (MONO_TYPE_ISSTRUCT (csig->ret)) { 
333                 frame_size += sizeof (gpointer);
334                 offset += 4;
335         }
336
337         arg_info [0].offset = offset;
338
339         if (csig->hasthis) {
340                 frame_size += sizeof (gpointer);
341                 offset += 4;
342         }
343
344         arg_info [0].size = frame_size;
345
346         for (k = 0; k < param_count; k++) {
347                 
348                 if (csig->pinvoke)
349                         size = mono_type_native_stack_size (csig->params [k], &align);
350                 else
351                         size = mono_type_stack_size (csig->params [k], &align);
352
353                 /* ignore alignment for now */
354                 align = 1;
355
356                 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1); 
357                 arg_info [k].pad = pad;
358                 frame_size += size;
359                 arg_info [k + 1].pad = 0;
360                 arg_info [k + 1].size = size;
361                 offset += pad;
362                 arg_info [k + 1].offset = offset;
363                 offset += size;
364         }
365
366         align = MONO_ARCH_FRAME_ALIGNMENT;
367         frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
368         arg_info [k].pad = pad;
369
370         return frame_size;
371 }
372
373 static int 
374 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
375 {
376         return 0;
377 }
378
379 /*
380  * Initialize the cpu to execute managed code.
381  */
382 void
383 mono_arch_cpu_init (void)
384 {
385         guint16 fpcw;
386
387         /* spec compliance requires running with double precision */
388         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
389         fpcw &= ~X86_FPCW_PRECC_MASK;
390         fpcw |= X86_FPCW_PREC_DOUBLE;
391         __asm__  __volatile__ ("fldcw %0\n": : "m" (fpcw));
392         __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
393
394 }
395
396 /*
397  * This function returns the optimizations supported on this cpu.
398  */
399 guint32
400 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
401 {
402         int eax, ebx, ecx, edx;
403         guint32 opts = 0;
404
405         /* FIXME: AMD64 */
406
407         *exclude_mask = 0;
408         /* Feature Flags function, flags returned in EDX. */
409         if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
410                 if (edx & (1 << 15)) {
411                         opts |= MONO_OPT_CMOV;
412                         if (edx & 1)
413                                 opts |= MONO_OPT_FCMOV;
414                         else
415                                 *exclude_mask |= MONO_OPT_FCMOV;
416                 } else
417                         *exclude_mask |= MONO_OPT_CMOV;
418         }
419         return opts;
420 }
421
422 static gboolean
423 is_regsize_var (MonoType *t) {
424         if (t->byref)
425                 return TRUE;
426         switch (t->type) {
427         case MONO_TYPE_I4:
428         case MONO_TYPE_U4:
429         case MONO_TYPE_I:
430         case MONO_TYPE_U:
431         case MONO_TYPE_PTR:
432                 return TRUE;
433         case MONO_TYPE_OBJECT:
434         case MONO_TYPE_STRING:
435         case MONO_TYPE_CLASS:
436         case MONO_TYPE_SZARRAY:
437         case MONO_TYPE_ARRAY:
438                 return TRUE;
439         case MONO_TYPE_VALUETYPE:
440                 if (t->data.klass->enumtype)
441                         return is_regsize_var (t->data.klass->enum_basetype);
442                 return FALSE;
443         }
444         return FALSE;
445 }
446
447 GList *
448 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
449 {
450         GList *vars = NULL;
451         int i;
452
453         for (i = 0; i < cfg->num_varinfo; i++) {
454                 MonoInst *ins = cfg->varinfo [i];
455                 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
456
457                 /* unused vars */
458                 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
459                         continue;
460
461                 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || 
462                     (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
463                         continue;
464
465                 /* FIXME: */
466                 if (ins->opcode == OP_ARG)
467                         continue;
468
469                 /* we dont allocate I1 to registers because there is no simply way to sign extend 
470                  * 8bit quantities in caller saved registers on x86 */
471                 if (is_regsize_var (ins->inst_vtype) || (ins->inst_vtype->type == MONO_TYPE_BOOLEAN) || 
472                     (ins->inst_vtype->type == MONO_TYPE_U1) || (ins->inst_vtype->type == MONO_TYPE_U2)||
473                     (ins->inst_vtype->type == MONO_TYPE_I2) || (ins->inst_vtype->type == MONO_TYPE_CHAR)) {
474                         g_assert (MONO_VARINFO (cfg, i)->reg == -1);
475                         g_assert (i == vmv->idx);
476                         vars = g_list_prepend (vars, vmv);
477                 }
478         }
479
480         vars = mono_varlist_sort (cfg, vars, 0);
481
482         return vars;
483 }
484
485 GList *
486 mono_arch_get_global_int_regs (MonoCompile *cfg)
487 {
488         GList *regs = NULL;
489
490         /* We use the callee saved registers for global allocation */
491         regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
492         regs = g_list_prepend (regs, (gpointer)AMD64_R12);
493         regs = g_list_prepend (regs, (gpointer)AMD64_R13);
494         regs = g_list_prepend (regs, (gpointer)AMD64_R14);
495         regs = g_list_prepend (regs, (gpointer)AMD64_R15);
496
497         return regs;
498 }
499
500 /*
501  * mono_arch_regalloc_cost:
502  *
503  *  Return the cost, in number of memory references, of the action of 
504  * allocating the variable VMV into a register during global register
505  * allocation.
506  */
507 guint32
508 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
509 {
510         MonoInst *ins = cfg->varinfo [vmv->idx];
511
512         if (cfg->method->save_lmf)
513                 /* The register is already saved */
514                 return (ins->opcode == OP_ARG) ? 1 : 0;
515         else
516                 /* push+pop+possible load if it is an argument */
517                 return (ins->opcode == OP_ARG) ? 3 : 2;
518 }
519  
520 void
521 mono_arch_allocate_vars (MonoCompile *m)
522 {
523         MonoMethodSignature *sig;
524         MonoMethodHeader *header;
525         MonoInst *inst;
526         int i, offset, size, align, curinst;
527         CallInfo *cinfo;
528
529         header = ((MonoMethodNormal *)m->method)->header;
530
531         sig = m->method->signature;
532
533         cinfo = get_call_info (sig, FALSE);
534
535         if (sig->ret->type != MONO_TYPE_VOID) {
536                 switch (cinfo->ret.storage) {
537                 case ArgInIReg:
538                 case ArgInSSEReg:
539                 case ArgInIRegPair:
540                         m->ret->opcode = OP_REGVAR;
541                         m->ret->inst_c0 = cinfo->ret.reg;
542                         break;
543                 default:
544                         g_assert_not_reached ();
545                 }
546                 m->ret->dreg = m->ret->inst_c0;
547         }
548
549         /*
550          * We use the ABI calling conventions for managed code as well.
551          * FIXME: Exception: valuetypes are never passed or returned in registers.
552          */
553
554         /* Locals are allocated backwards from %fp */
555         m->frame_reg = AMD64_RBP;
556         offset = 0;
557
558         /* 
559          * Reserve a stack slot for holding information used during exception 
560          * handling.
561          */
562         if (header->num_clauses)
563                 offset += sizeof (gpointer) * 2;
564
565         if (m->method->save_lmf) {
566                 offset += sizeof (MonoLMF);
567                 m->arch.lmf_offset = offset;
568         }
569
570         curinst = m->locals_start;
571         for (i = curinst; i < m->num_varinfo; ++i) {
572                 inst = m->varinfo [i];
573
574                 if (inst->opcode == OP_REGVAR) {
575                         //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
576                         continue;
577                 }
578
579                 /* inst->unused indicates native sized value types, this is used by the
580                 * pinvoke wrappers when they call functions returning structure */
581                 if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
582                         size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
583                 else
584                         size = mono_type_stack_size (inst->inst_vtype, &align);
585
586                 /*
587                  * variables are accessed as negative offsets from %fp, so increase
588                  * the offset before assigning it to a variable
589                  */
590                 offset += size;
591
592                 offset += align - 1;
593                 offset &= ~(align - 1);
594                 inst->opcode = OP_REGOFFSET;
595                 inst->inst_basereg = AMD64_RBP;
596                 inst->inst_offset = - offset;
597
598                 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
599         }
600
601         if (sig->call_convention == MONO_CALL_VARARG) {
602                 NOT_IMPLEMENTED;
603                 m->sig_cookie = cinfo->sig_cookie.offset;
604         }
605
606         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
607                 inst = m->varinfo [i];
608                 if (inst->opcode != OP_REGVAR) {
609                         ArgInfo *ainfo = &cinfo->args [i];
610                         gboolean inreg = TRUE;
611                         MonoType *arg_type;
612
613                         if (sig->hasthis && (i == 0))
614                                 arg_type = &mono_defaults.object_class->byval_arg;
615                         else
616                                 arg_type = sig->params [i - sig->hasthis];
617
618                         /* FIXME: Allocate volatile arguments to registers */
619                         if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
620                                 inreg = FALSE;
621
622                         /* 
623                          * Under AMD64, all registers used to pass arguments to functions
624                          * are volatile across calls.
625                          * FIXME: Optimize this.
626                          */
627                         if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInSSEReg))
628                                 inreg = FALSE;
629
630                         if (MONO_TYPE_ISSTRUCT (arg_type))
631                                 /* FIXME: this isn't needed */
632                                 inreg = FALSE;
633
634                         inst->opcode = OP_REGOFFSET;
635
636                         switch (ainfo->storage) {
637                         case ArgInIReg:
638                         case ArgInSSEReg:
639                                 inst->opcode = OP_REGVAR;
640                                 inst->dreg = ainfo->reg;
641                                 break;
642                         case ArgOnStack:
643                                 inst->opcode = OP_REGOFFSET;
644                                 inst->inst_basereg = AMD64_RBP;
645                                 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
646                                 break;
647                         default:
648                                 NOT_IMPLEMENTED;
649                         }
650
651                         if (!inreg) {
652                                 inst->opcode = OP_REGOFFSET;
653                                 inst->inst_basereg = AMD64_RBP;
654                                 /* These arguments are saved to the stack in the prolog */
655                                 offset += 8;
656                                 inst->inst_offset = - offset;
657                         }
658
659                         if (MONO_TYPE_ISSTRUCT (arg_type)) {
660                                 /* Add a level of indirection */
661                                 /*
662                                  * It would be easier to add OP_LDIND_I here, but ldind_i instructions
663                                  * are destructively modified in a lot of places in inssel.brg.
664                                  */
665                                 NOT_IMPLEMENTED;
666                         }
667                 }
668         }
669
670         m->stack_offset = offset;
671
672         /* Add a properly aligned dword for use by int<->float conversion opcodes */
673         m->spill_count ++;
674         mono_spillvar_offset_float (m, 0);
675
676         g_free (cinfo);
677 }
678
679 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
680  * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info 
681  */
682
683 /* 
684  * take the arguments and generate the arch-specific
685  * instructions to properly call the function in call.
686  * This includes pushing, moving arguments to the right register
687  * etc.
688  * Issue: who does the spilling if needed, and when?
689  */
690 MonoCallInst*
691 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
692         MonoInst *arg, *in;
693         MonoMethodSignature *sig;
694         int i, n, stack_size;
695         CallInfo *cinfo;
696         ArgInfo *ainfo;
697
698         stack_size = 0;
699         /* add the vararg cookie before the non-implicit args */
700         if (call->signature->call_convention == MONO_CALL_VARARG) {
701                 NOT_IMPLEMENTED;
702                 MonoInst *sig_arg;
703                 /* FIXME: Add support for signature tokens to AOT */
704                 cfg->disable_aot = TRUE;
705                 MONO_INST_NEW (cfg, arg, OP_OUTARG);
706                 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
707                 sig_arg->inst_p0 = call->signature;
708                 arg->inst_left = sig_arg;
709                 arg->type = STACK_PTR;
710                 /* prepend, so they get reversed */
711                 arg->next = call->out_args;
712                 call->out_args = arg;
713                 stack_size += sizeof (gpointer);
714         }
715         sig = call->signature;
716         n = sig->param_count + sig->hasthis;
717
718         cinfo = get_call_info (sig, sig->pinvoke);
719
720         if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret))
721                 stack_size += sizeof (gpointer);
722
723         for (i = 0; i < n; ++i) {
724                 ainfo = cinfo->args + i;
725
726                 if (is_virtual && i == 0) {
727                         /* the argument will be attached to the call instruction */
728                         in = call->args [i];
729                 } else {
730                         MONO_INST_NEW (cfg, arg, OP_OUTARG);
731                         in = call->args [i];
732                         arg->cil_code = in->cil_code;
733                         arg->inst_left = in;
734                         arg->type = in->type;
735                         /* prepend, so they get reversed */
736                         arg->next = call->out_args;
737                         call->out_args = arg;
738
739                         switch (ainfo->storage) {
740                         case ArgInIReg:
741                                 arg->opcode = OP_OUTARG_REG;
742                                 arg->unused = ainfo->reg;
743                                 call->used_iregs |= 1 << ainfo->reg;
744                                 break;
745                         case ArgInSSEReg:
746                                 arg->opcode = OP_AMD64_OUTARG_XMMREG;
747                                 arg->unused = ainfo->reg;
748                                 /* FIXME: set call->used_... */
749                                 break;
750                         case ArgOnStack:
751                                 arg->opcode = OP_OUTARG;
752                                 break;
753                                 break;
754                         default:
755                                 g_assert_not_reached ();
756                         }
757                 }
758         }
759
760         call->stack_usage = cinfo->stack_usage;
761         cfg->param_area = MAX (cfg->param_area, call->stack_usage);
762         cfg->flags |= MONO_CFG_HAS_CALLS;
763
764         g_free (cinfo);
765
766         return call;
767 }
768
769 /*
770  * Allow tracing to work with this interface (with an optional argument)
771  */
772
773 /*
774  * This may be needed on some archs or for debugging support.
775  */
776 void
777 mono_arch_instrument_mem_needs (MonoMethod *method, int *stack, int *code)
778 {
779         /* no stack room needed now (may be needed for FASTCALL-trace support) */
780         *stack = 0;
781         /* split prolog-epilog requirements? */
782         *code = 50; /* max bytes needed: check this number */
783 }
784
785 void*
786 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
787 {
788         guchar *code = p;
789
790         /* if some args are passed in registers, we need to save them here */
791         amd64_push_reg (code, AMD64_RBP);
792         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, cfg->method);
793         amd64_push_imm (code, (guint64)(cfg->method));
794         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_ABS, (gpointer)func);
795         amd64_call_code (code, 0);
796         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
797
798         return code;
799 }
800
801 enum {
802         SAVE_NONE,
803         SAVE_STRUCT,
804         SAVE_EAX,
805         SAVE_EAX_EDX,
806         SAVE_FP
807 };
808
809 void*
810 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
811 {
812         guchar *code = p;
813         int arg_size = 0, save_mode = SAVE_NONE;
814         MonoMethod *method = cfg->method;
815         int rtype = method->signature->ret->type;
816
817         NOT_IMPLEMENTED;
818
819 handle_enum:
820         switch (rtype) {
821         case MONO_TYPE_VOID:
822                 /* special case string .ctor icall */
823                 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
824                         save_mode = SAVE_EAX;
825                 else
826                         save_mode = SAVE_NONE;
827                 break;
828         case MONO_TYPE_I8:
829         case MONO_TYPE_U8:
830                 save_mode = SAVE_EAX_EDX;
831                 break;
832         case MONO_TYPE_R4:
833         case MONO_TYPE_R8:
834                 save_mode = SAVE_FP;
835                 break;
836         case MONO_TYPE_VALUETYPE:
837                 if (method->signature->ret->data.klass->enumtype) {
838                         rtype = method->signature->ret->data.klass->enum_basetype->type;
839                         goto handle_enum;
840                 }
841                 save_mode = SAVE_STRUCT;
842                 break;
843         default:
844                 save_mode = SAVE_EAX;
845                 break;
846         }
847
848         switch (save_mode) {
849         case SAVE_EAX_EDX:
850                 amd64_push_reg (code, AMD64_RDX);
851                 amd64_push_reg (code, AMD64_RAX);
852                 if (enable_arguments) {
853                         amd64_push_reg (code, AMD64_RDX);
854                         amd64_push_reg (code, AMD64_RAX);
855                         arg_size = 8;
856                 }
857                 break;
858         case SAVE_EAX:
859                 amd64_push_reg (code, AMD64_RAX);
860                 if (enable_arguments) {
861                         amd64_push_reg (code, AMD64_RAX);
862                         arg_size = 4;
863                 }
864                 break;
865         case SAVE_FP:
866                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
867                 amd64_fst_membase (code, AMD64_RSP, 0, TRUE, TRUE);
868                 if (enable_arguments) {
869                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
870                         amd64_fst_membase (code, AMD64_RSP, 0, TRUE, TRUE);
871                         arg_size = 8;
872                 }
873                 break;
874         case SAVE_STRUCT:
875                 if (enable_arguments) {
876                         amd64_push_membase (code, AMD64_RBP, 8);
877                         arg_size = 4;
878                 }
879                 break;
880         case SAVE_NONE:
881         default:
882                 break;
883         }
884
885
886         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method);
887         amd64_push_imm (code, (guint64)method);
888         mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_ABS, (gpointer)func);
889         amd64_call_code (code, 0);
890         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, arg_size + 4);
891
892         switch (save_mode) {
893         case SAVE_EAX_EDX:
894                 amd64_pop_reg (code, AMD64_RAX);
895                 amd64_pop_reg (code, AMD64_RDX);
896                 break;
897         case SAVE_EAX:
898                 amd64_pop_reg (code, AMD64_RAX);
899                 break;
900         case SAVE_FP:
901                 amd64_fld_membase (code, AMD64_RSP, 0, TRUE);
902                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
903                 break;
904         case SAVE_NONE:
905         default:
906                 break;
907         }
908
909         return code;
910 }
911
912 #define EMIT_COND_BRANCH(ins,cond,sign) \
913 if (ins->flags & MONO_INST_BRLABEL) { \
914         if (ins->inst_i0->inst_c0) { \
915                 x86_branch (code, cond, cfg->native_code + ins->inst_i0->inst_c0, sign); \
916         } else { \
917                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
918                 if ((cfg->opt & MONO_OPT_BRANCH) && \
919                     x86_is_imm8 (ins->inst_i0->inst_c1 - cpos)) \
920                         x86_branch8 (code, cond, 0, sign); \
921                 else \
922                         x86_branch32 (code, cond, 0, sign); \
923         } \
924 } else { \
925         if (ins->inst_true_bb->native_offset) { \
926                 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
927         } else { \
928                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
929                 if ((cfg->opt & MONO_OPT_BRANCH) && \
930                     x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
931                         x86_branch8 (code, cond, 0, sign); \
932                 else \
933                         x86_branch32 (code, cond, 0, sign); \
934         } \
935 }
936
937 /* emit an exception if condition is fail */
938 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name)            \
939         do {                                                        \
940                 mono_add_patch_info (cfg, code - cfg->native_code,   \
941                                     MONO_PATCH_INFO_EXC, exc_name);  \
942                 x86_branch32 (code, cond, 0, signed);               \
943         } while (0); 
944
945 #define EMIT_FPCOMPARE(code) do { \
946         amd64_fcompp (code); \
947         amd64_fnstsw (code); \
948 } while (0); 
949
950 #define EMIT_CALL() do { \
951     amd64_set_reg_template (code, GP_SCRATCH_REG); \
952     amd64_call_reg (code, GP_SCRATCH_REG); \
953 } while (0);
954
955 /* FIXME: Add more instructions */
956 #define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM))
957
958 static void
959 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
960 {
961         MonoInst *ins, *last_ins = NULL;
962         ins = bb->code;
963
964         while (ins) {
965
966                 switch (ins->opcode) {
967                 case OP_ICONST:
968                         /* reg = 0 -> XOR (reg, reg) */
969                         /* XOR sets cflags on x86, so we cant do it always */
970                         if (ins->inst_c0 == 0 && ins->next && INST_IGNORES_CFLAGS (ins->next)) {
971                                 ins->opcode = CEE_XOR;
972                                 ins->sreg1 = ins->dreg;
973                                 ins->sreg2 = ins->dreg;
974                         }
975                         break;
976                 case OP_MUL_IMM: 
977                         /* remove unnecessary multiplication with 1 */
978                         if (ins->inst_imm == 1) {
979                                 if (ins->dreg != ins->sreg1) {
980                                         ins->opcode = OP_MOVE;
981                                 } else {
982                                         last_ins->next = ins->next;
983                                         ins = ins->next;
984                                         continue;
985                                 }
986                         }
987                         break;
988                 case OP_COMPARE_IMM:
989                         /* OP_COMPARE_IMM (reg, 0) 
990                          * --> 
991                          * OP_AMD64_TEST_NULL (reg) 
992                          */
993                         if (ins->inst_imm == 0 && ins->next &&
994                             (ins->next->opcode == CEE_BEQ || ins->next->opcode == CEE_BNE_UN ||
995                              ins->next->opcode == OP_CEQ)) {
996                                 ins->opcode = OP_X86_TEST_NULL;
997                         }     
998                         break;
999                 case OP_X86_COMPARE_MEMBASE_IMM:
1000                         /* 
1001                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1002                          * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
1003                          * -->
1004                          * OP_STORE_MEMBASE_REG reg, offset(basereg)
1005                          * OP_COMPARE_IMM reg, imm
1006                          *
1007                          * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
1008                          */
1009                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
1010                             ins->inst_basereg == last_ins->inst_destbasereg &&
1011                             ins->inst_offset == last_ins->inst_offset) {
1012                                         ins->opcode = OP_COMPARE_IMM;
1013                                         ins->sreg1 = last_ins->sreg1;
1014
1015                                         /* check if we can remove cmp reg,0 with test null */
1016                                         if (ins->inst_imm == 0 && ins->next &&
1017                                                 (ins->next->opcode == CEE_BEQ || ins->next->opcode == CEE_BNE_UN ||
1018                                                 ins->next->opcode == OP_CEQ)) {
1019                                                 ins->opcode = OP_X86_TEST_NULL;
1020                                         }     
1021                                 }
1022
1023                         break;
1024                 case OP_LOAD_MEMBASE:
1025                 case OP_LOADI4_MEMBASE:
1026                         /* 
1027                          * Note: if reg1 = reg2 the load op is removed
1028                          *
1029                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1030                          * OP_LOAD_MEMBASE offset(basereg), reg2
1031                          * -->
1032                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1033                          * OP_MOVE reg1, reg2
1034                          */
1035                         if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG 
1036                                          || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1037                             ins->inst_basereg == last_ins->inst_destbasereg &&
1038                             ins->inst_offset == last_ins->inst_offset) {
1039                                 if (ins->dreg == last_ins->sreg1) {
1040                                         last_ins->next = ins->next;                             
1041                                         ins = ins->next;                                
1042                                         continue;
1043                                 } else {
1044                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1045                                         ins->opcode = OP_MOVE;
1046                                         ins->sreg1 = last_ins->sreg1;
1047                                 }
1048
1049                         /* 
1050                          * Note: reg1 must be different from the basereg in the second load
1051                          * Note: if reg1 = reg2 is equal then second load is removed
1052                          *
1053                          * OP_LOAD_MEMBASE offset(basereg), reg1
1054                          * OP_LOAD_MEMBASE offset(basereg), reg2
1055                          * -->
1056                          * OP_LOAD_MEMBASE offset(basereg), reg1
1057                          * OP_MOVE reg1, reg2
1058                          */
1059                         } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1060                                            || last_ins->opcode == OP_LOAD_MEMBASE) &&
1061                               ins->inst_basereg != last_ins->dreg &&
1062                               ins->inst_basereg == last_ins->inst_basereg &&
1063                               ins->inst_offset == last_ins->inst_offset) {
1064
1065                                 if (ins->dreg == last_ins->dreg) {
1066                                         last_ins->next = ins->next;                             
1067                                         ins = ins->next;                                
1068                                         continue;
1069                                 } else {
1070                                         ins->opcode = OP_MOVE;
1071                                         ins->sreg1 = last_ins->dreg;
1072                                 }
1073
1074                                 //g_assert_not_reached ();
1075
1076 #if 0
1077                         /* 
1078                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1079                          * OP_LOAD_MEMBASE offset(basereg), reg
1080                          * -->
1081                          * OP_STORE_MEMBASE_IMM imm, offset(basereg) 
1082                          * OP_ICONST reg, imm
1083                          */
1084                         } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1085                                                 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1086                                    ins->inst_basereg == last_ins->inst_destbasereg &&
1087                                    ins->inst_offset == last_ins->inst_offset) {
1088                                 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1089                                 ins->opcode = OP_ICONST;
1090                                 ins->inst_c0 = last_ins->inst_imm;
1091                                 g_assert_not_reached (); // check this rule
1092 #endif
1093                         }
1094                         break;
1095                 case OP_LOADU1_MEMBASE:
1096                 case OP_LOADI1_MEMBASE:
1097                         /* 
1098                          * Note: if reg1 = reg2 the load op is removed
1099                          *
1100                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1101                          * OP_LOAD_MEMBASE offset(basereg), reg2
1102                          * -->
1103                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1104                          * OP_MOVE reg1, reg2
1105                          */
1106                         if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1107                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1108                                         ins->inst_offset == last_ins->inst_offset) {
1109                                 if (ins->dreg == last_ins->sreg1) {
1110                                         last_ins->next = ins->next;                             
1111                                         ins = ins->next;                                
1112                                         continue;
1113                                 } else {
1114                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1115                                         ins->opcode = OP_MOVE;
1116                                         ins->sreg1 = last_ins->sreg1;
1117                                 }
1118                         }
1119                         break;
1120                 case OP_LOADU2_MEMBASE:
1121                 case OP_LOADI2_MEMBASE:
1122                         /* 
1123                          * Note: if reg1 = reg2 the load op is removed
1124                          *
1125                          * OP_STORE_MEMBASE_REG reg1, offset(basereg) 
1126                          * OP_LOAD_MEMBASE offset(basereg), reg2
1127                          * -->
1128                          * OP_STORE_MEMBASE_REG reg1, offset(basereg)
1129                          * OP_MOVE reg1, reg2
1130                          */
1131                         if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1132                                         ins->inst_basereg == last_ins->inst_destbasereg &&
1133                                         ins->inst_offset == last_ins->inst_offset) {
1134                                 if (ins->dreg == last_ins->sreg1) {
1135                                         last_ins->next = ins->next;                             
1136                                         ins = ins->next;                                
1137                                         continue;
1138                                 } else {
1139                                         //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1140                                         ins->opcode = OP_MOVE;
1141                                         ins->sreg1 = last_ins->sreg1;
1142                                 }
1143                         }
1144                         break;
1145                 case CEE_CONV_I4:
1146                 case CEE_CONV_U4:
1147                 case OP_MOVE:
1148                         /*
1149                          * Removes:
1150                          *
1151                          * OP_MOVE reg, reg 
1152                          */
1153                         if (ins->dreg == ins->sreg1) {
1154                                 if (last_ins)
1155                                         last_ins->next = ins->next;                             
1156                                 ins = ins->next;
1157                                 continue;
1158                         }
1159                         /* 
1160                          * Removes:
1161                          *
1162                          * OP_MOVE sreg, dreg 
1163                          * OP_MOVE dreg, sreg
1164                          */
1165                         if (last_ins && last_ins->opcode == OP_MOVE &&
1166                             ins->sreg1 == last_ins->dreg &&
1167                             ins->dreg == last_ins->sreg1) {
1168                                 last_ins->next = ins->next;                             
1169                                 ins = ins->next;                                
1170                                 continue;
1171                         }
1172                         break;
1173                 }
1174                 last_ins = ins;
1175                 ins = ins->next;
1176         }
1177         bb->last_ins = last_ins;
1178 }
1179
1180 static const int 
1181 branch_cc_table [] = {
1182         X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1183         X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
1184         X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
1185 };
1186
1187 static int
1188 opcode_to_x86_cond (int opcode)
1189 {
1190         switch (opcode) {
1191         case OP_IBEQ:
1192                 return X86_CC_EQ;
1193         case OP_IBNE_UN:
1194                 return X86_CC_NE;
1195         case OP_IBLT:
1196                 return X86_CC_LT;
1197         case OP_IBLT_UN:
1198                 return X86_CC_LT;
1199         case OP_IBGT:
1200                 return X86_CC_GT;
1201         case OP_IBGT_UN:
1202                 return X86_CC_GT;
1203         case OP_IBGE:
1204                 return X86_CC_GE;
1205         case OP_IBGE_UN:
1206                 return X86_CC_GE;
1207         case OP_IBLE:
1208                 return X86_CC_LE;
1209         case OP_IBLE_UN:
1210                 return X86_CC_LE;
1211         default:
1212                 g_assert_not_reached ();
1213         }
1214
1215         return -1;
1216 }
1217
1218 /*
1219  * returns the offset used by spillvar. It allocates a new
1220  * spill variable if necessary. 
1221  */
1222 static int
1223 mono_spillvar_offset (MonoCompile *cfg, int spillvar)
1224 {
1225         MonoSpillInfo **si, *info;
1226         int i = 0;
1227
1228         si = &cfg->spill_info; 
1229         
1230         while (i <= spillvar) {
1231
1232                 if (!*si) {
1233                         *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1234                         info->next = NULL;
1235                         cfg->stack_offset += sizeof (gpointer);
1236                         info->offset = - cfg->stack_offset;
1237                 }
1238
1239                 if (i == spillvar)
1240                         return (*si)->offset;
1241
1242                 i++;
1243                 si = &(*si)->next;
1244         }
1245
1246         g_assert_not_reached ();
1247         return 0;
1248 }
1249
1250 /*
1251  * returns the offset used by spillvar. It allocates a new
1252  * spill float variable if necessary. 
1253  * (same as mono_spillvar_offset but for float)
1254  */
1255 static int
1256 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
1257 {
1258         MonoSpillInfo **si, *info;
1259         int i = 0;
1260
1261         si = &cfg->spill_info_float; 
1262         
1263         while (i <= spillvar) {
1264
1265                 if (!*si) {
1266                         *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1267                         info->next = NULL;
1268                         cfg->stack_offset += sizeof (double);
1269                         info->offset = - cfg->stack_offset;
1270                 }
1271
1272                 if (i == spillvar)
1273                         return (*si)->offset;
1274
1275                 i++;
1276                 si = &(*si)->next;
1277         }
1278
1279         g_assert_not_reached ();
1280         return 0;
1281 }
1282
1283 /*
1284  * Creates a store for spilled floating point items
1285  */
1286 static MonoInst*
1287 create_spilled_store_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
1288 {
1289         MonoInst *store;
1290         MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
1291         store->sreg1 = reg;
1292         store->inst_destbasereg = AMD64_RBP;
1293         store->inst_offset = mono_spillvar_offset_float (cfg, spill);
1294
1295         DEBUG (g_print ("SPILLED FLOAT STORE (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)store->inst_offset, reg));
1296         return store;
1297 }
1298
1299 /*
1300  * Creates a load for spilled floating point items 
1301  */
1302 static MonoInst*
1303 create_spilled_load_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
1304 {
1305         MonoInst *load;
1306         MONO_INST_NEW (cfg, load, OP_LOADR8_SPILL_MEMBASE);
1307         load->dreg = reg;
1308         load->inst_basereg = AMD64_RBP;
1309         load->inst_offset = mono_spillvar_offset_float (cfg, spill);
1310
1311         DEBUG (g_print ("SPILLED FLOAT LOAD (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)load->inst_offset, reg));
1312         return load;
1313 }
1314
1315 #define reg_is_freeable(r) ((r) >= 0 && (r) <= 7 && AMD64_IS_CALLEE_REG ((r)))
1316
1317 typedef struct {
1318         int born_in;
1319         int killed_in;
1320         int last_use;
1321         int prev_use;
1322         int flags;              /* used to track fp spill/load */
1323 } RegTrack;
1324
1325 static const char*const * ins_spec = amd64_desc;
1326
1327 static void
1328 print_ins (int i, MonoInst *ins)
1329 {
1330         const char *spec = ins_spec [ins->opcode];
1331         g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
1332         if (!spec)
1333                 g_error ("Unknown opcode: %s\n", mono_inst_name (ins->opcode));
1334         if (spec [MONO_INST_DEST]) {
1335                 if (ins->dreg >= MONO_MAX_IREGS)
1336                         g_print (" R%d <-", ins->dreg);
1337                 else
1338                         g_print (" %s <-", mono_arch_regname (ins->dreg));
1339         }
1340         if (spec [MONO_INST_SRC1]) {
1341                 if (ins->sreg1 >= MONO_MAX_IREGS)
1342                         g_print (" R%d", ins->sreg1);
1343                 else
1344                         g_print (" %s", mono_arch_regname (ins->sreg1));
1345         }
1346         if (spec [MONO_INST_SRC2]) {
1347                 if (ins->sreg2 >= MONO_MAX_IREGS)
1348                         g_print (" R%d", ins->sreg2);
1349                 else
1350                         g_print (" %s", mono_arch_regname (ins->sreg2));
1351         }
1352         if (spec [MONO_INST_CLOB])
1353                 g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
1354         g_print ("\n");
1355 }
1356
1357 static void
1358 print_regtrack (RegTrack *t, int num)
1359 {
1360         int i;
1361         char buf [32];
1362         const char *r;
1363         
1364         for (i = 0; i < num; ++i) {
1365                 if (!t [i].born_in)
1366                         continue;
1367                 if (i >= MONO_MAX_IREGS) {
1368                         g_snprintf (buf, sizeof(buf), "R%d", i);
1369                         r = buf;
1370                 } else
1371                         r = mono_arch_regname (i);
1372                 g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
1373         }
1374 }
1375
1376 typedef struct InstList InstList;
1377
1378 struct InstList {
1379         InstList *prev;
1380         InstList *next;
1381         MonoInst *data;
1382 };
1383
1384 static inline InstList*
1385 inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
1386 {
1387         InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
1388         item->data = data;
1389         item->prev = NULL;
1390         item->next = list;
1391         if (list)
1392                 list->prev = item;
1393         return item;
1394 }
1395
1396 /*
1397  * Force the spilling of the variable in the symbolic register 'reg'.
1398  */
1399 static int
1400 get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg)
1401 {
1402         MonoInst *load;
1403         int i, sel, spill;
1404         
1405         sel = cfg->rs->iassign [reg];
1406         /*i = cfg->rs->isymbolic [sel];
1407         g_assert (i == reg);*/
1408         i = reg;
1409         spill = ++cfg->spill_count;
1410         cfg->rs->iassign [i] = -spill - 1;
1411         mono_regstate_free_int (cfg->rs, sel);
1412         /* we need to create a spill var and insert a load to sel after the current instruction */
1413         MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1414         load->dreg = sel;
1415         load->inst_basereg = AMD64_RBP;
1416         load->inst_offset = mono_spillvar_offset (cfg, spill);
1417         if (item->prev) {
1418                 while (ins->next != item->prev->data)
1419                         ins = ins->next;
1420         }
1421         load->next = ins->next;
1422         ins->next = load;
1423         DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_arch_regname (sel)));
1424         i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1425         g_assert (i == sel);
1426
1427         return sel;
1428 }
1429
1430 static int
1431 get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg)
1432 {
1433         MonoInst *load;
1434         int i, sel, spill;
1435
1436         DEBUG (g_print ("\tstart regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1437         /* exclude the registers in the current instruction */
1438         if (reg != ins->sreg1 && (reg_is_freeable (ins->sreg1) || (ins->sreg1 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg1] >= 0))) {
1439                 if (ins->sreg1 >= MONO_MAX_IREGS)
1440                         regmask &= ~ (1 << cfg->rs->iassign [ins->sreg1]);
1441                 else
1442                         regmask &= ~ (1 << ins->sreg1);
1443                 DEBUG (g_print ("\t\texcluding sreg1 %s\n", mono_arch_regname (ins->sreg1)));
1444         }
1445         if (reg != ins->sreg2 && (reg_is_freeable (ins->sreg2) || (ins->sreg2 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg2] >= 0))) {
1446                 if (ins->sreg2 >= MONO_MAX_IREGS)
1447                         regmask &= ~ (1 << cfg->rs->iassign [ins->sreg2]);
1448                 else
1449                         regmask &= ~ (1 << ins->sreg2);
1450                 DEBUG (g_print ("\t\texcluding sreg2 %s %d\n", mono_arch_regname (ins->sreg2), ins->sreg2));
1451         }
1452         if (reg != ins->dreg && reg_is_freeable (ins->dreg)) {
1453                 regmask &= ~ (1 << ins->dreg);
1454                 DEBUG (g_print ("\t\texcluding dreg %s\n", mono_arch_regname (ins->dreg)));
1455         }
1456
1457         DEBUG (g_print ("\t\tavailable regmask: 0x%08x\n", regmask));
1458         g_assert (regmask); /* need at least a register we can free */
1459         sel = -1;
1460         /* we should track prev_use and spill the register that's farther */
1461         for (i = 0; i < MONO_MAX_IREGS; ++i) {
1462                 if (regmask & (1 << i)) {
1463                         sel = i;
1464                         DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
1465                         break;
1466                 }
1467         }
1468         i = cfg->rs->isymbolic [sel];
1469         spill = ++cfg->spill_count;
1470         cfg->rs->iassign [i] = -spill - 1;
1471         mono_regstate_free_int (cfg->rs, sel);
1472         /* we need to create a spill var and insert a load to sel after the current instruction */
1473         MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1474         load->dreg = sel;
1475         load->inst_basereg = AMD64_RBP;
1476         load->inst_offset = mono_spillvar_offset (cfg, spill);
1477         if (item->prev) {
1478                 while (ins->next != item->prev->data)
1479                         ins = ins->next;
1480         }
1481         load->next = ins->next;
1482         ins->next = load;
1483         DEBUG (g_print ("\tSPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_arch_regname (sel)));
1484         i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1485         g_assert (i == sel);
1486         
1487         return sel;
1488 }
1489
1490 static MonoInst*
1491 create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins)
1492 {
1493         MonoInst *copy;
1494         MONO_INST_NEW (cfg, copy, OP_MOVE);
1495         copy->dreg = dest;
1496         copy->sreg1 = src;
1497         if (ins) {
1498                 copy->next = ins->next;
1499                 ins->next = copy;
1500         }
1501         DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
1502         return copy;
1503 }
1504
1505 static MonoInst*
1506 create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins)
1507 {
1508         MonoInst *store;
1509         MONO_INST_NEW (cfg, store, OP_STORE_MEMBASE_REG);
1510         store->sreg1 = reg;
1511         store->inst_destbasereg = AMD64_RBP;
1512         store->inst_offset = mono_spillvar_offset (cfg, spill);
1513         if (ins) {
1514                 store->next = ins->next;
1515                 ins->next = store;
1516         }
1517         DEBUG (g_print ("\tSPILLED STORE (%d at 0x%08lx(%%ebp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_arch_regname (reg)));
1518         return store;
1519 }
1520
1521 static void
1522 insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
1523 {
1524         MonoInst *prev;
1525         if (item->next) {
1526                 prev = item->next->data;
1527
1528                 while (prev->next != ins)
1529                         prev = prev->next;
1530                 to_insert->next = ins;
1531                 prev->next = to_insert;
1532         } else {
1533                 to_insert->next = ins;
1534         }
1535         /* 
1536          * needed otherwise in the next instruction we can add an ins to the 
1537          * end and that would get past this instruction.
1538          */
1539         item->data = to_insert; 
1540 }
1541
1542
1543 #if  0
1544 static int
1545 alloc_int_reg (MonoCompile *cfg, InstList *curinst, MonoInst *ins, int sym_reg, guint32 allow_mask)
1546 {
1547         int val = cfg->rs->iassign [sym_reg];
1548         if (val < 0) {
1549                 int spill = 0;
1550                 if (val < -1) {
1551                         /* the register gets spilled after this inst */
1552                         spill = -val -1;
1553                 }
1554                 val = mono_regstate_alloc_int (cfg->rs, allow_mask);
1555                 if (val < 0)
1556                         val = get_register_spilling (cfg, curinst, ins, allow_mask, sym_reg);
1557                 cfg->rs->iassign [sym_reg] = val;
1558                 /* add option to store before the instruction for src registers */
1559                 if (spill)
1560                         create_spilled_store (cfg, spill, val, sym_reg, ins);
1561         }
1562         cfg->rs->isymbolic [val] = sym_reg;
1563         return val;
1564 }
1565 #endif
1566
1567 /* flags used in reginfo->flags */
1568 enum {
1569         MONO_X86_FP_NEEDS_LOAD_SPILL    = 1 << 0,
1570         MONO_X86_FP_NEEDS_SPILL                 = 1 << 1,
1571         MONO_X86_FP_NEEDS_LOAD                  = 1 << 2,
1572         MONO_X86_REG_NOT_ECX                    = 1 << 3,
1573         MONO_X86_REG_EAX                                = 1 << 4,
1574         MONO_X86_REG_EDX                                = 1 << 5,
1575         MONO_X86_REG_ECX                                = 1 << 6
1576 };
1577
1578 static int
1579 mono_amd64_alloc_int_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg, int flags)
1580 {
1581         int val;
1582         int test_mask = dest_mask;
1583
1584         if (flags & MONO_X86_REG_EAX)
1585                 test_mask &= (1 << AMD64_RAX);
1586         else if (flags & MONO_X86_REG_EDX)
1587                 test_mask &= (1 << AMD64_RDX);
1588         else if (flags & MONO_X86_REG_ECX)
1589                 test_mask &= (1 << AMD64_RCX);
1590         else if (flags & MONO_X86_REG_NOT_ECX)
1591                 test_mask &= ~ (1 << AMD64_RCX);
1592
1593         val = mono_regstate_alloc_int (cfg->rs, test_mask);
1594         if (val >= 0 && test_mask != dest_mask)
1595                 DEBUG(g_print ("\tUsed flag to allocate reg %s for R%u\n", mono_arch_regname (val), sym_reg));
1596
1597         if (val < 0 && (flags & MONO_X86_REG_NOT_ECX)) {
1598                 DEBUG(g_print ("\tFailed to allocate flag suggested mask (%u) but exluding ECX\n", test_mask));
1599                 val = mono_regstate_alloc_int (cfg->rs, (dest_mask & (~1 << AMD64_RCX)));
1600         }
1601
1602         if (val < 0) {
1603                 val = mono_regstate_alloc_int (cfg->rs, dest_mask);
1604                 if (val < 0)
1605                         val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg);
1606         }
1607
1608         return val;
1609 }
1610
1611
1612 /*#include "cprop.c"*/
1613
1614 /*
1615  * Local register allocation.
1616  * We first scan the list of instructions and we save the liveness info of
1617  * each register (when the register is first used, when it's value is set etc.).
1618  * We also reverse the list of instructions (in the InstList list) because assigning
1619  * registers backwards allows for more tricks to be used.
1620  */
1621 void
1622 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
1623 {
1624         MonoInst *ins;
1625         MonoRegState *rs = cfg->rs;
1626         int i, val, fpcount;
1627         RegTrack *reginfo, *reginfof;
1628         RegTrack *reginfo1, *reginfo2, *reginfod;
1629         InstList *tmp, *reversed = NULL;
1630         const char *spec;
1631         guint32 src1_mask, src2_mask, dest_mask;
1632         GList *fspill_list = NULL;
1633         int fspill = 0;
1634
1635         if (!bb->code)
1636                 return;
1637         rs->next_vireg = bb->max_ireg;
1638         rs->next_vfreg = bb->max_freg;
1639         mono_regstate_assign (rs);
1640         reginfo = g_malloc0 (sizeof (RegTrack) * rs->next_vireg);
1641         reginfof = g_malloc0 (sizeof (RegTrack) * rs->next_vfreg);
1642         rs->ifree_mask = AMD64_CALLEE_REGS;
1643
1644         ins = bb->code;
1645
1646         /*if (cfg->opt & MONO_OPT_COPYPROP)
1647                 local_copy_prop (cfg, ins);*/
1648
1649         i = 1;
1650         fpcount = 0;
1651         DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
1652         /* forward pass on the instructions to collect register liveness info */
1653         while (ins) {
1654                 spec = ins_spec [ins->opcode];
1655                 
1656                 DEBUG (print_ins (i, ins));
1657
1658                 if (spec [MONO_INST_SRC1]) {
1659                         if (spec [MONO_INST_SRC1] == 'f') {
1660                                 GList *spill;
1661                                 reginfo1 = reginfof;
1662
1663                                 spill = g_list_first (fspill_list);
1664                                 if (spill && fpcount < MONO_MAX_FREGS) {
1665                                         reginfo1 [ins->sreg1].flags |= MONO_X86_FP_NEEDS_LOAD;
1666                                         fspill_list = g_list_remove (fspill_list, spill->data);
1667                                 } else
1668                                         fpcount--;
1669                         }
1670                         else
1671                                 reginfo1 = reginfo;
1672                         reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
1673                         reginfo1 [ins->sreg1].last_use = i;
1674                         if (spec [MONO_INST_SRC1] == 'L') {
1675                                 /* The virtual register is allocated sequentially */
1676                                 reginfo1 [ins->sreg1 + 1].prev_use = reginfo1 [ins->sreg1 + 1].last_use;
1677                                 reginfo1 [ins->sreg1 + 1].last_use = i;
1678                                 if (reginfo1 [ins->sreg1 + 1].born_in == 0 || reginfo1 [ins->sreg1 + 1].born_in > i)
1679                                         reginfo1 [ins->sreg1 + 1].born_in = i;
1680
1681                                 reginfo1 [ins->sreg1].flags |= MONO_X86_REG_EAX;
1682                                 reginfo1 [ins->sreg1 + 1].flags |= MONO_X86_REG_EDX;
1683                         }
1684                 } else {
1685                         ins->sreg1 = -1;
1686                 }
1687                 if (spec [MONO_INST_SRC2]) {
1688                         if (spec [MONO_INST_SRC2] == 'f') {
1689                                 GList *spill;
1690                                 reginfo2 = reginfof;
1691                                 spill = g_list_first (fspill_list);
1692                                 if (spill) {
1693                                         reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD;
1694                                         fspill_list = g_list_remove (fspill_list, spill->data);
1695                                         if (fpcount >= MONO_MAX_FREGS) {
1696                                                 fspill++;
1697                                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
1698                                                 reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD_SPILL;
1699                                         }
1700                                 } else
1701                                         fpcount--;
1702                         }
1703                         else
1704                                 reginfo2 = reginfo;
1705                         reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
1706                         reginfo2 [ins->sreg2].last_use = i;
1707                         if (spec [MONO_INST_SRC2] == 'L') {
1708                                 /* The virtual register is allocated sequentially */
1709                                 reginfo2 [ins->sreg2 + 1].prev_use = reginfo2 [ins->sreg2 + 1].last_use;
1710                                 reginfo2 [ins->sreg2 + 1].last_use = i;
1711                                 if (reginfo2 [ins->sreg2 + 1].born_in == 0 || reginfo2 [ins->sreg2 + 1].born_in > i)
1712                                         reginfo2 [ins->sreg2 + 1].born_in = i;
1713                         }
1714                         if (spec [MONO_INST_CLOB] == 's') {
1715                                 reginfo2 [ins->sreg1].flags |= MONO_X86_REG_NOT_ECX;
1716                                 reginfo2 [ins->sreg2].flags |= MONO_X86_REG_ECX;
1717                         }
1718                 } else {
1719                         ins->sreg2 = -1;
1720                 }
1721                 if (spec [MONO_INST_DEST]) {
1722                         if (spec [MONO_INST_DEST] == 'f') {
1723                                 reginfod = reginfof;
1724                                 if (fpcount >= MONO_MAX_FREGS) {
1725                                         reginfod [ins->dreg].flags |= MONO_X86_FP_NEEDS_SPILL;
1726                                         fspill++;
1727                                         fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
1728                                         fpcount--;
1729                                 }
1730                                 fpcount++;
1731                         }
1732                         else
1733                                 reginfod = reginfo;
1734                         if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
1735                                 reginfod [ins->dreg].killed_in = i;
1736                         reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
1737                         reginfod [ins->dreg].last_use = i;
1738                         if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
1739                                 reginfod [ins->dreg].born_in = i;
1740                         if (spec [MONO_INST_DEST] == 'l' || spec [MONO_INST_DEST] == 'L') {
1741                                 /* The virtual register is allocated sequentially */
1742                                 reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
1743                                 reginfod [ins->dreg + 1].last_use = i;
1744                                 if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
1745                                         reginfod [ins->dreg + 1].born_in = i;
1746
1747                                 reginfod [ins->dreg].flags |= MONO_X86_REG_EAX;
1748                                 reginfod [ins->dreg + 1].flags |= MONO_X86_REG_EDX;
1749                         }
1750                 } else {
1751                         ins->dreg = -1;
1752                 }
1753
1754                 reversed = inst_list_prepend (cfg->mempool, reversed, ins);
1755                 ++i;
1756                 ins = ins->next;
1757         }
1758
1759         // todo: check if we have anything left on fp stack, in verify mode?
1760         fspill = 0;
1761
1762         DEBUG (print_regtrack (reginfo, rs->next_vireg));
1763         DEBUG (print_regtrack (reginfof, rs->next_vfreg));
1764         tmp = reversed;
1765         while (tmp) {
1766                 int prev_dreg, prev_sreg1, prev_sreg2, clob_dreg;
1767                 dest_mask = src1_mask = src2_mask = AMD64_CALLEE_REGS;
1768                 --i;
1769                 ins = tmp->data;
1770                 spec = ins_spec [ins->opcode];
1771                 prev_dreg = -1;
1772                 clob_dreg = -1;
1773                 DEBUG (g_print ("processing:"));
1774                 DEBUG (print_ins (i, ins));
1775                 if (spec [MONO_INST_CLOB] == 's') {
1776                         if (rs->ifree_mask & (1 << AMD64_RCX)) {
1777                                 DEBUG (g_print ("\tshortcut assignment of R%d to ECX\n", ins->sreg2));
1778                                 rs->iassign [ins->sreg2] = AMD64_RCX;
1779                                 rs->isymbolic [AMD64_RCX] = ins->sreg2;
1780                                 ins->sreg2 = AMD64_RCX;
1781                                 rs->ifree_mask &= ~ (1 << AMD64_RCX);
1782                         } else {
1783                                 int need_ecx_spill = TRUE;
1784                                 /* 
1785                                  * we first check if src1/dreg is already assigned a register
1786                                  * and then we force a spill of the var assigned to ECX.
1787                                  */
1788                                 /* the destination register can't be ECX */
1789                                 dest_mask &= ~ (1 << AMD64_RCX);
1790                                 src1_mask &= ~ (1 << AMD64_RCX);
1791                                 val = rs->iassign [ins->dreg];
1792                                 /* 
1793                                  * the destination register is already assigned to ECX:
1794                                  * we need to allocate another register for it and then
1795                                  * copy from this to ECX.
1796                                  */
1797                                 if (val == AMD64_RCX && ins->dreg != ins->sreg2) {
1798                                         int new_dest;
1799                                         new_dest = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
1800                                         g_assert (new_dest >= 0);
1801                                         DEBUG (g_print ("\tclob:s changing dreg R%d to %s from ECX\n", ins->dreg, mono_arch_regname (new_dest)));
1802
1803                                         rs->isymbolic [new_dest] = ins->dreg;
1804                                         rs->iassign [ins->dreg] = new_dest;
1805                                         clob_dreg = ins->dreg;
1806                                         ins->dreg = new_dest;
1807                                         create_copy_ins (cfg, AMD64_RCX, new_dest, ins);
1808                                         need_ecx_spill = FALSE;
1809                                         /*DEBUG (g_print ("\tforced spill of R%d\n", ins->dreg));
1810                                         val = get_register_force_spilling (cfg, tmp, ins, ins->dreg);
1811                                         rs->iassign [ins->dreg] = val;
1812                                         rs->isymbolic [val] = prev_dreg;
1813                                         ins->dreg = val;*/
1814                                 }
1815                                 val = rs->iassign [ins->sreg1];
1816                                 if (val == AMD64_RCX) {
1817                                         g_assert_not_reached ();
1818                                 } else if (val >= 0) {
1819                                         /* 
1820                                          * the first src reg was already assigned to a register,
1821                                          * we need to copy it to the dest register because the 
1822                                          * shift instruction clobbers the first operand.
1823                                          */
1824                                         MonoInst *copy = create_copy_ins (cfg, ins->dreg, val, NULL);
1825                                         DEBUG (g_print ("\tclob:s moved sreg1 from R%d to R%d\n", val, ins->dreg));
1826                                         insert_before_ins (ins, tmp, copy);
1827                                 }
1828                                 val = rs->iassign [ins->sreg2];
1829                                 if (val >= 0 && val != AMD64_RCX) {
1830                                         MonoInst *move = create_copy_ins (cfg, AMD64_RCX, val, NULL);
1831                                         DEBUG (g_print ("\tmoved arg from R%d (%d) to ECX\n", val, ins->sreg2));
1832                                         move->next = ins;
1833                                         g_assert_not_reached ();
1834                                         /* FIXME: where is move connected to the instruction list? */
1835                                         //tmp->prev->data->next = move;
1836                                 }
1837                                 if (need_ecx_spill && !(rs->ifree_mask & (1 << AMD64_RCX))) {
1838                                         DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RCX]));
1839                                         get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RCX]);
1840                                         mono_regstate_free_int (rs, AMD64_RCX);
1841                                 }
1842                                 /* force-set sreg2 */
1843                                 rs->iassign [ins->sreg2] = AMD64_RCX;
1844                                 rs->isymbolic [AMD64_RCX] = ins->sreg2;
1845                                 ins->sreg2 = AMD64_RCX;
1846                                 rs->ifree_mask &= ~ (1 << AMD64_RCX);
1847                         }
1848                 } else if (spec [MONO_INST_CLOB] == 'd') { /* division */
1849                         int dest_reg = AMD64_RAX;
1850                         int clob_reg = AMD64_RDX;
1851                         if (spec [MONO_INST_DEST] == 'd') {
1852                                 dest_reg = AMD64_RDX; /* reminder */
1853                                 clob_reg = AMD64_RAX;
1854                         }
1855                         val = rs->iassign [ins->dreg];
1856                         if (0 && val >= 0 && val != dest_reg && !(rs->ifree_mask & (1 << dest_reg))) {
1857                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
1858                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg]);
1859                                 mono_regstate_free_int (rs, dest_reg);
1860                         }
1861                         if (val < 0) {
1862                                 if (val < -1) {
1863                                         /* the register gets spilled after this inst */
1864                                         int spill = -val -1;
1865                                         dest_mask = 1 << clob_reg;
1866                                         prev_dreg = ins->dreg;
1867                                         val = mono_regstate_alloc_int (rs, dest_mask);
1868                                         if (val < 0)
1869                                                 val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg);
1870                                         rs->iassign [ins->dreg] = val;
1871                                         if (spill)
1872                                                 create_spilled_store (cfg, spill, val, prev_dreg, ins);
1873                                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
1874                                         rs->isymbolic [val] = prev_dreg;
1875                                         ins->dreg = val;
1876                                         if (val != dest_reg) { /* force a copy */
1877                                                 create_copy_ins (cfg, val, dest_reg, ins);
1878                                         }
1879                                 } else {
1880                                         DEBUG (g_print ("\tshortcut assignment of R%d to %s\n", ins->dreg, mono_arch_regname (dest_reg)));
1881                                         prev_dreg = ins->dreg;
1882                                         rs->iassign [ins->dreg] = dest_reg;
1883                                         rs->isymbolic [dest_reg] = ins->dreg;
1884                                         ins->dreg = dest_reg;
1885                                         rs->ifree_mask &= ~ (1 << dest_reg);
1886                                 }
1887                         } else {
1888                                 //DEBUG (g_print ("dest reg in div assigned: %s\n", mono_arch_regname (val)));
1889                                 if (val != dest_reg) { /* force a copy */
1890                                         create_copy_ins (cfg, val, dest_reg, ins);
1891                                         if (!(rs->ifree_mask & (1 << dest_reg)) && rs->isymbolic [dest_reg] >= MONO_MAX_IREGS) {
1892                                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
1893                                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg]);
1894                                                 mono_regstate_free_int (rs, dest_reg);
1895                                         }
1896                                 }
1897                         }
1898                         if (!(rs->ifree_mask & (1 << clob_reg)) && (clob_reg != val) && (rs->isymbolic [clob_reg] >= 8)) {
1899                                 DEBUG (g_print ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg]));
1900                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [clob_reg]);
1901                                 mono_regstate_free_int (rs, clob_reg);
1902                         }
1903                         src1_mask = 1 << AMD64_RAX;
1904                         src2_mask = 1 << AMD64_RCX;
1905                 }
1906                 if (spec [MONO_INST_DEST] == 'l') {
1907                         int hreg;
1908                         val = rs->iassign [ins->dreg];
1909                         /* check special case when dreg have been moved from ecx (clob shift) */
1910                         if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
1911                                 hreg = clob_dreg + 1;
1912                         else
1913                                 hreg = ins->dreg + 1;
1914
1915                         /* base prev_dreg on fixed hreg, handle clob case */
1916                         val = hreg - 1;
1917
1918                         if (val != rs->isymbolic [AMD64_RAX] && !(rs->ifree_mask & (1 << AMD64_RAX))) {
1919                                 DEBUG (g_print ("\t(long-low) forced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
1920                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX]);
1921                                 mono_regstate_free_int (rs, AMD64_RAX);
1922                         }
1923                         if (hreg != rs->isymbolic [AMD64_RDX] && !(rs->ifree_mask & (1 << AMD64_RDX))) {
1924                                 DEBUG (g_print ("\t(long-high) forced spill of R%d\n", rs->isymbolic [AMD64_RDX]));
1925                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RDX]);
1926                                 mono_regstate_free_int (rs, AMD64_RDX);
1927                         }
1928                 }
1929
1930                 /* Track dreg */
1931                 if (spec [MONO_INST_DEST] == 'f') {
1932                         if (reginfof [ins->dreg].flags & MONO_X86_FP_NEEDS_SPILL) {
1933                                 GList *spill_node;
1934                                 MonoInst *store;
1935                                 spill_node = g_list_first (fspill_list);
1936                                 g_assert (spill_node);
1937
1938                                 store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->dreg, ins);
1939                                 insert_before_ins (ins, tmp, store);
1940                                 fspill_list = g_list_remove (fspill_list, spill_node->data);
1941                                 fspill--;
1942                         }
1943                 } else if (spec [MONO_INST_DEST] == 'L') {
1944                         int hreg;
1945                         val = rs->iassign [ins->dreg];
1946                         /* check special case when dreg have been moved from ecx (clob shift) */
1947                         if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
1948                                 hreg = clob_dreg + 1;
1949                         else
1950                                 hreg = ins->dreg + 1;
1951
1952                         /* base prev_dreg on fixed hreg, handle clob case */
1953                         prev_dreg = hreg - 1;
1954
1955                         if (val < 0) {
1956                                 int spill = 0;
1957                                 if (val < -1) {
1958                                         /* the register gets spilled after this inst */
1959                                         spill = -val -1;
1960                                 }
1961                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
1962                                 rs->iassign [ins->dreg] = val;
1963                                 if (spill)
1964                                         create_spilled_store (cfg, spill, val, prev_dreg, ins);
1965                         }
1966
1967                         DEBUG (g_print ("\tassigned dreg (long) %s to dest R%d\n", mono_arch_regname (val), hreg - 1));
1968  
1969                         rs->isymbolic [val] = hreg - 1;
1970                         ins->dreg = val;
1971                         
1972                         val = rs->iassign [hreg];
1973                         if (val < 0) {
1974                                 int spill = 0;
1975                                 if (val < -1) {
1976                                         /* the register gets spilled after this inst */
1977                                         spill = -val -1;
1978                                 }
1979                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
1980                                 rs->iassign [hreg] = val;
1981                                 if (spill)
1982                                         create_spilled_store (cfg, spill, val, hreg, ins);
1983                         }
1984
1985                         DEBUG (g_print ("\tassigned hreg (long-high) %s to dest R%d\n", mono_arch_regname (val), hreg));
1986                         rs->isymbolic [val] = hreg;
1987                         /* save reg allocating into unused */
1988                         ins->unused = val;
1989
1990                         /* check if we can free our long reg */
1991                         if (reg_is_freeable (val) && hreg >= 0 && reginfo [hreg].born_in >= i) {
1992                                 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (val), hreg, reginfo [hreg].born_in));
1993                                 mono_regstate_free_int (rs, val);
1994                         }
1995                 }
1996                 else if (ins->dreg >= MONO_MAX_IREGS) {
1997                         int hreg;
1998                         val = rs->iassign [ins->dreg];
1999                         if (spec [MONO_INST_DEST] == 'l') {
2000                                 /* check special case when dreg have been moved from ecx (clob shift) */
2001                                 if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
2002                                         hreg = clob_dreg + 1;
2003                                 else
2004                                         hreg = ins->dreg + 1;
2005
2006                                 /* base prev_dreg on fixed hreg, handle clob case */
2007                                 prev_dreg = hreg - 1;
2008                         } else
2009                                 prev_dreg = ins->dreg;
2010
2011                         if (val < 0) {
2012                                 int spill = 0;
2013                                 if (val < -1) {
2014                                         /* the register gets spilled after this inst */
2015                                         spill = -val -1;
2016                                 }
2017                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
2018                                 rs->iassign [ins->dreg] = val;
2019                                 if (spill)
2020                                         create_spilled_store (cfg, spill, val, prev_dreg, ins);
2021                         }
2022                         DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2023                         rs->isymbolic [val] = prev_dreg;
2024                         ins->dreg = val;
2025                         /* handle cases where lreg needs to be eax:edx */
2026                         if (spec [MONO_INST_DEST] == 'l') {
2027                                 /* check special case when dreg have been moved from ecx (clob shift) */
2028                                 int hreg = prev_dreg + 1;
2029                                 val = rs->iassign [hreg];
2030                                 if (val < 0) {
2031                                         int spill = 0;
2032                                         if (val < -1) {
2033                                                 /* the register gets spilled after this inst */
2034                                                 spill = -val -1;
2035                                         }
2036                                         val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
2037                                         rs->iassign [hreg] = val;
2038                                         if (spill)
2039                                                 create_spilled_store (cfg, spill, val, hreg, ins);
2040                                 }
2041                                 DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
2042                                 rs->isymbolic [val] = hreg;
2043                                 if (ins->dreg == AMD64_RAX) {
2044                                         if (val != AMD64_RDX)
2045                                                 create_copy_ins (cfg, val, AMD64_RDX, ins);
2046                                 } else if (ins->dreg == AMD64_RDX) {
2047                                         if (val == AMD64_RAX) {
2048                                                 /* swap */
2049                                                 g_assert_not_reached ();
2050                                         } else {
2051                                                 /* two forced copies */
2052                                                 create_copy_ins (cfg, val, AMD64_RDX, ins);
2053                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins);
2054                                         }
2055                                 } else {
2056                                         if (val == AMD64_RDX) {
2057                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins);
2058                                         } else {
2059                                                 /* two forced copies */
2060                                                 create_copy_ins (cfg, val, AMD64_RDX, ins);
2061                                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins);
2062                                         }
2063                                 }
2064                                 if (reg_is_freeable (val) && hreg >= 0 && reginfo [hreg].born_in >= i) {
2065                                         DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
2066                                         mono_regstate_free_int (rs, val);
2067                                 }
2068                         } else if (spec [MONO_INST_DEST] == 'a' && ins->dreg != AMD64_RAX && spec [MONO_INST_CLOB] != 'd') {
2069                                 /* this instruction only outputs to EAX, need to copy */
2070                                 create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins);
2071                         } else if (spec [MONO_INST_DEST] == 'd' && ins->dreg != AMD64_RDX && spec [MONO_INST_CLOB] != 'd') {
2072                                 create_copy_ins (cfg, ins->dreg, AMD64_RDX, ins);
2073                         }
2074                 }
2075                 if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg) && prev_dreg >= 0 && reginfo [prev_dreg].born_in >= i) {
2076                         DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
2077                         mono_regstate_free_int (rs, ins->dreg);
2078                 }
2079                 /* put src1 in EAX if it needs to be */
2080                 if (spec [MONO_INST_SRC1] == 'a') {
2081                         if (!(rs->ifree_mask & (1 << AMD64_RAX))) {
2082                                 DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
2083                                 get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX]);
2084                                 mono_regstate_free_int (rs, AMD64_RAX);
2085                         }
2086                         /* force-set sreg1 */
2087                         rs->iassign [ins->sreg1] = AMD64_RAX;
2088                         rs->isymbolic [AMD64_RAX] = ins->sreg1;
2089                         ins->sreg1 = AMD64_RAX;
2090                         rs->ifree_mask &= ~ (1 << AMD64_RAX);
2091                 }
2092
2093                 /* Track sreg1 */
2094                 if (spec [MONO_INST_SRC1] == 'f') {
2095                         if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD) {
2096                                 MonoInst *load;
2097                                 MonoInst *store = NULL;
2098
2099                                 if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
2100                                         GList *spill_node;
2101                                         spill_node = g_list_first (fspill_list);
2102                                         g_assert (spill_node);
2103
2104                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg1, ins);          
2105                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2106                                 }
2107
2108                                 fspill++;
2109                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2110                                 load = create_spilled_load_float (cfg, fspill, ins->sreg1, ins);
2111                                 insert_before_ins (ins, tmp, load);
2112                                 if (store) 
2113                                         insert_before_ins (load, tmp, store);
2114                         }
2115                 } else if ((spec [MONO_INST_DEST] == 'L') && (spec [MONO_INST_SRC1] == 'L')) {
2116                         /* force source to be same as dest */
2117                         rs->iassign [ins->sreg1] = ins->dreg;
2118                         rs->iassign [ins->sreg1 + 1] = ins->unused;
2119
2120                         DEBUG (g_print ("\tassigned sreg1 (long) %s to sreg1 R%d\n", mono_arch_regname (ins->dreg), ins->sreg1));
2121                         DEBUG (g_print ("\tassigned sreg1 (long-high) %s to sreg1 R%d\n", mono_arch_regname (ins->unused), ins->sreg1 + 1));
2122
2123                         ins->sreg1 = ins->dreg;
2124                         /* 
2125                          * No need for saving the reg, we know that src1=dest in this cases
2126                          * ins->inst_c0 = ins->unused;
2127                          */
2128
2129                         /* make sure that we remove them from free mask */
2130                         rs->ifree_mask &= ~ (1 << ins->dreg);
2131                         rs->ifree_mask &= ~ (1 << ins->unused);
2132                 }
2133                 else if (ins->sreg1 >= MONO_MAX_IREGS) {
2134                         val = rs->iassign [ins->sreg1];
2135                         prev_sreg1 = ins->sreg1;
2136                         if (val < 0) {
2137                                 int spill = 0;
2138                                 if (val < -1) {
2139                                         /* the register gets spilled after this inst */
2140                                         spill = -val -1;
2141                                 }
2142                                 if (0 && ins->opcode == OP_MOVE) {
2143                                         /* 
2144                                          * small optimization: the dest register is already allocated
2145                                          * but the src one is not: we can simply assign the same register
2146                                          * here and peephole will get rid of the instruction later.
2147                                          * This optimization may interfere with the clobbering handling:
2148                                          * it removes a mov operation that will be added again to handle clobbering.
2149                                          * There are also some other issues that should with make testjit.
2150                                          */
2151                                         mono_regstate_alloc_int (rs, 1 << ins->dreg);
2152                                         val = rs->iassign [ins->sreg1] = ins->dreg;
2153                                         //g_assert (val >= 0);
2154                                         DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2155                                 } else {
2156                                         //g_assert (val == -1); /* source cannot be spilled */
2157                                         val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src1_mask, ins->sreg1, reginfo [ins->sreg1].flags);
2158                                         rs->iassign [ins->sreg1] = val;
2159                                         DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2160                                 }
2161                                 if (spill) {
2162                                         MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL);
2163                                         insert_before_ins (ins, tmp, store);
2164                                 }
2165                         }
2166                         rs->isymbolic [val] = prev_sreg1;
2167                         ins->sreg1 = val;
2168                 } else {
2169                         prev_sreg1 = -1;
2170                 }
2171                 /* handle clobbering of sreg1 */
2172                 if ((spec [MONO_INST_CLOB] == '1' || spec [MONO_INST_CLOB] == 's') && ins->dreg != ins->sreg1) {
2173                         MonoInst *copy = create_copy_ins (cfg, ins->dreg, ins->sreg1, NULL);
2174                         DEBUG (g_print ("\tneed to copy sreg1 %s to dreg %s\n", mono_arch_regname (ins->sreg1), mono_arch_regname (ins->dreg)));
2175                         if (ins->sreg2 == -1 || spec [MONO_INST_CLOB] == 's') {
2176                                 /* note: the copy is inserted before the current instruction! */
2177                                 insert_before_ins (ins, tmp, copy);
2178                                 /* we set sreg1 to dest as well */
2179                                 prev_sreg1 = ins->sreg1 = ins->dreg;
2180                         } else {
2181                                 /* inserted after the operation */
2182                                 copy->next = ins->next;
2183                                 ins->next = copy;
2184                         }
2185                 }
2186                 /* track sreg2 */
2187                 if (spec [MONO_INST_SRC2] == 'f') {
2188                         if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD) {
2189                                 MonoInst *load;
2190                                 MonoInst *store = NULL;
2191
2192                                 if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
2193                                         GList *spill_node;
2194
2195                                         spill_node = g_list_first (fspill_list);
2196                                         g_assert (spill_node);
2197                                         if (spec [MONO_INST_SRC1] == 'f' && (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL))
2198                                                 spill_node = g_list_next (spill_node);
2199         
2200                                         store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg2, ins);
2201                                         fspill_list = g_list_remove (fspill_list, spill_node->data);
2202                                 } 
2203                                 
2204                                 fspill++;
2205                                 fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
2206                                 load = create_spilled_load_float (cfg, fspill, ins->sreg2, ins);
2207                                 insert_before_ins (ins, tmp, load);
2208                                 if (store) 
2209                                         insert_before_ins (load, tmp, store);
2210                         }
2211                 } 
2212                 else if (ins->sreg2 >= MONO_MAX_IREGS) {
2213                         val = rs->iassign [ins->sreg2];
2214                         prev_sreg2 = ins->sreg2;
2215                         if (val < 0) {
2216                                 int spill = 0;
2217                                 if (val < -1) {
2218                                         /* the register gets spilled after this inst */
2219                                         spill = -val -1;
2220                                 }
2221                                 val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src2_mask, ins->sreg2, reginfo [ins->sreg2].flags);
2222                                 rs->iassign [ins->sreg2] = val;
2223                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2224                                 if (spill)
2225                                         create_spilled_store (cfg, spill, val, prev_sreg2, ins);
2226                         }
2227                         rs->isymbolic [val] = prev_sreg2;
2228                         ins->sreg2 = val;
2229                         if (spec [MONO_INST_CLOB] == 's' && ins->sreg2 != AMD64_RCX) {
2230                                 DEBUG (g_print ("\tassigned sreg2 %s to R%d, but ECX is needed (R%d)\n", mono_arch_regname (val), ins->sreg2, rs->iassign [AMD64_RCX]));
2231                         }
2232                 } else {
2233                         prev_sreg2 = -1;
2234                 }
2235
2236                 if (spec [MONO_INST_CLOB] == 'c') {
2237                         int j, s;
2238                         guint32 clob_mask = AMD64_CALLEE_REGS;
2239                         for (j = 0; j < MONO_MAX_IREGS; ++j) {
2240                                 s = 1 << j;
2241                                 if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
2242                                         //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2243                                 }
2244                         }
2245                 }
2246                 /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
2247                         DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
2248                         mono_regstate_free_int (rs, ins->sreg1);
2249                 }
2250                 if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
2251                         DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
2252                         mono_regstate_free_int (rs, ins->sreg2);
2253                 }*/
2254         
2255                 //DEBUG (print_ins (i, ins));
2256                 /* this may result from a insert_before call */
2257                 if (!tmp->next)
2258                         bb->code = tmp->data;
2259                 tmp = tmp->next;
2260         }
2261
2262         g_free (reginfo);
2263         g_free (reginfof);
2264         g_list_free (fspill_list);
2265 }
2266
2267 static unsigned char*
2268 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int size, gboolean is_signed)
2269 {
2270         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
2271         x86_fnstcw_membase(code, AMD64_RSP, 0);
2272         amd64_mov_reg_membase (code, dreg, AMD64_RSP, 0, 2);
2273         amd64_alu_reg_imm (code, X86_OR, dreg, 0xc00);
2274         amd64_mov_membase_reg (code, AMD64_RSP, 2, dreg, 2);
2275         amd64_fldcw_membase (code, AMD64_RSP, 2);
2276         amd64_push_reg (code, AMD64_RAX); // SP = SP - 8
2277         amd64_fist_pop_membase (code, AMD64_RSP, 0, size == 8);
2278         amd64_pop_reg (code, dreg);
2279         amd64_fldcw_membase (code, AMD64_RSP, 0);
2280         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
2281
2282         if (size == 1)
2283                 amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
2284         else if (size == 2)
2285                 amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
2286         return code;
2287 }
2288
2289 static unsigned char*
2290 mono_emit_stack_alloc (guchar *code, MonoInst* tree)
2291 {
2292         int sreg = tree->sreg1;
2293 #ifdef PLATFORM_WIN32
2294         guint8* br[5];
2295
2296         /*
2297          * Under Windows:
2298          * If requested stack size is larger than one page,
2299          * perform stack-touch operation
2300          */
2301         /*
2302          * Generate stack probe code.
2303          * Under Windows, it is necessary to allocate one page at a time,
2304          * "touching" stack after each successful sub-allocation. This is
2305          * because of the way stack growth is implemented - there is a
2306          * guard page before the lowest stack page that is currently commited.
2307          * Stack normally grows sequentially so OS traps access to the
2308          * guard page and commits more pages when needed.
2309          */
2310         amd64_test_reg_imm (code, sreg, ~0xFFF);
2311         br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
2312
2313         br[2] = code; /* loop */
2314         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
2315         amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
2316         amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
2317         amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
2318         br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
2319         amd64_patch (br[3], br[2]);
2320         amd64_test_reg_reg (code, sreg, sreg);
2321         br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
2322         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
2323
2324         br[1] = code; x86_jump8 (code, 0);
2325
2326         amd64_patch (br[0], code);
2327         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
2328         amd64_patch (br[1], code);
2329         amd64_patch (br[4], code);
2330 #else /* PLATFORM_WIN32 */
2331         amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
2332 #endif
2333         if (tree->flags & MONO_INST_INIT) {
2334                 int offset = 0;
2335                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) {
2336                         amd64_push_reg (code, AMD64_RAX);
2337                         offset += 4;
2338                 }
2339                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) {
2340                         amd64_push_reg (code, AMD64_RCX);
2341                         offset += 4;
2342                 }
2343                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) {
2344                         amd64_push_reg (code, AMD64_RDI);
2345                         offset += 4;
2346                 }
2347                 
2348                 amd64_shift_reg_imm (code, X86_SHR, sreg, 2);
2349                 if (sreg != AMD64_RCX)
2350                         amd64_mov_reg_reg (code, AMD64_RCX, sreg, 4);
2351                 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
2352                                 
2353                 amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
2354                 amd64_cld (code);
2355                 amd64_prefix (code, X86_REP_PREFIX);
2356                 amd64_stosl (code);
2357                 
2358                 if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
2359                         amd64_pop_reg (code, AMD64_RDI);
2360                 if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX)
2361                         amd64_pop_reg (code, AMD64_RCX);
2362                 if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX)
2363                         amd64_pop_reg (code, AMD64_RAX);
2364         }
2365         return code;
2366 }
2367
2368 static guint32*
2369 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint32 *code)
2370 {
2371         guint32 offset;
2372
2373         /* Move return value to the target register */
2374         /* FIXME: do this in the local reg allocator */
2375         switch (ins->opcode) {
2376         case OP_FCALL:
2377         case OP_FCALL_REG:
2378         case OP_FCALL_MEMBASE:
2379                 /* FIXME: optimize this */
2380                 offset = mono_spillvar_offset_float (cfg, 0);
2381                 amd64_movsd_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
2382                 amd64_fld_membase (code, AMD64_RBP, offset, TRUE);
2383                 break;
2384         }
2385
2386         return code;
2387 }
2388
2389 #define REAL_PRINT_REG(text,reg) \
2390 mono_assert (reg >= 0); \
2391 amd64_push_reg (code, AMD64_RAX); \
2392 amd64_push_reg (code, AMD64_RDX); \
2393 amd64_push_reg (code, AMD64_RCX); \
2394 amd64_push_reg (code, reg); \
2395 amd64_push_imm (code, reg); \
2396 amd64_push_imm (code, text " %d %p\n"); \
2397 amd64_mov_reg_imm (code, AMD64_RAX, printf); \
2398 amd64_call_reg (code, AMD64_RAX); \
2399 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 3*4); \
2400 amd64_pop_reg (code, AMD64_RCX); \
2401 amd64_pop_reg (code, AMD64_RDX); \
2402 amd64_pop_reg (code, AMD64_RAX);
2403
2404 /* benchmark and set based on cpu */
2405 #define LOOP_ALIGNMENT 8
2406 #define bb_is_loop_start(bb) ((bb)->nesting && ((bb)->in_count == 1))
2407
2408 void
2409 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2410 {
2411         MonoInst *ins;
2412         MonoCallInst *call;
2413         guint offset;
2414         guint8 *code = cfg->native_code + cfg->code_len;
2415         MonoInst *last_ins = NULL;
2416         guint last_offset = 0;
2417         int max_len, cpos;
2418
2419         if (cfg->opt & MONO_OPT_PEEPHOLE)
2420                 peephole_pass (cfg, bb);
2421
2422         if (cfg->opt & MONO_OPT_LOOP) {
2423                 int pad, align = LOOP_ALIGNMENT;
2424                 /* set alignment depending on cpu */
2425                 if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
2426                         pad = align - pad;
2427                         /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
2428                         amd64_padding (code, pad);
2429                         cfg->code_len += pad;
2430                         bb->native_offset = cfg->code_len;
2431                 }
2432         }
2433
2434         if (cfg->verbose_level > 2)
2435                 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2436
2437         cpos = bb->max_offset;
2438
2439         if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2440                 MonoProfileCoverageInfo *cov = cfg->coverage_info;
2441                 g_assert (!mono_compile_aot);
2442                 cpos += 6;
2443
2444                 cov->data [bb->dfn].cil_code = bb->cil_code;
2445                 /* this is not thread save, but good enough */
2446                 amd64_inc_mem (code, (guint64)&cov->data [bb->dfn].count); 
2447         }
2448
2449         offset = code - cfg->native_code;
2450
2451         ins = bb->code;
2452         while (ins) {
2453                 offset = code - cfg->native_code;
2454
2455                 max_len = ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
2456
2457                 if (offset > (cfg->code_size - max_len - 16)) {
2458                         cfg->code_size *= 2;
2459                         cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2460                         code = cfg->native_code + offset;
2461                         mono_jit_stats.code_reallocs++;
2462                 }
2463
2464                 mono_debug_record_line_number (cfg, ins, offset);
2465
2466                 switch (ins->opcode) {
2467                 case OP_BIGMUL:
2468                         amd64_mul_reg (code, ins->sreg2, TRUE);
2469                         break;
2470                 case OP_BIGMUL_UN:
2471                         amd64_mul_reg (code, ins->sreg2, FALSE);
2472                         break;
2473                 case OP_X86_SETEQ_MEMBASE:
2474                         amd64_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE);
2475                         break;
2476                 case OP_STOREI1_MEMBASE_IMM:
2477                         g_assert (amd64_is_imm32 (ins->inst_imm));
2478                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
2479                         break;
2480                 case OP_STOREI2_MEMBASE_IMM:
2481                         g_assert (amd64_is_imm32 (ins->inst_imm));
2482                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
2483                         break;
2484                 case OP_STOREI4_MEMBASE_IMM:
2485                         g_assert (amd64_is_imm32 (ins->inst_imm));
2486                         amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
2487                         break;
2488                 case OP_STOREI1_MEMBASE_REG:
2489                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1);
2490                         break;
2491                 case OP_STOREI2_MEMBASE_REG:
2492                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
2493                         break;
2494                 case OP_STORE_MEMBASE_REG:
2495                 case OP_STOREI8_MEMBASE_REG:
2496                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8);
2497                         break;
2498                 case OP_STOREI4_MEMBASE_REG:
2499                         amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
2500                         break;
2501                 case OP_STORE_MEMBASE_IMM:
2502                 case OP_STOREI8_MEMBASE_IMM:
2503                         if (amd64_is_imm32 (ins->inst_imm))
2504                                 amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
2505                         else {
2506                                 amd64_mov_reg_imm (code, GP_SCRATCH_REG, ins->inst_imm);
2507                                 amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, GP_SCRATCH_REG, 8);
2508                         }
2509                         break;
2510                 case CEE_LDIND_I:
2511                         amd64_mov_reg_mem (code, ins->dreg, ins->inst_p0, sizeof (gpointer));
2512                         break;
2513                 case CEE_LDIND_I4:
2514                         amd64_mov_reg_mem (code, ins->dreg, ins->inst_p0, 4);
2515                         break;
2516                 case CEE_LDIND_U4:
2517                         amd64_mov_reg_mem (code, ins->dreg, ins->inst_p0, 4);
2518                         break;
2519                 case OP_LOADU4_MEM:
2520                         amd64_mov_reg_imm (code, ins->dreg, ins->inst_p0);
2521                         amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
2522                         break;
2523                 case OP_LOAD_MEMBASE:
2524                 case OP_LOADI8_MEMBASE:
2525                         if (amd64_is_imm32 (ins->inst_offset)) {
2526                                 amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
2527                         }
2528                         else {
2529                                 amd64_mov_reg_imm_size (code, GP_SCRATCH_REG, ins->inst_offset, 8);
2530                                 amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, GP_SCRATCH_REG, 0, 8);
2531                         }
2532                         break;
2533                 case OP_LOADI4_MEMBASE:
2534                         amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2535                         break;
2536                 case OP_LOADU4_MEMBASE:
2537                         amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
2538                         break;
2539                 case OP_LOADU1_MEMBASE:
2540                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
2541                         break;
2542                 case OP_LOADI1_MEMBASE:
2543                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
2544                         break;
2545                 case OP_LOADU2_MEMBASE:
2546                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
2547                         break;
2548                 case OP_LOADI2_MEMBASE:
2549                         amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
2550                         break;
2551                 case CEE_CONV_I1:
2552                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
2553                         break;
2554                 case CEE_CONV_I2:
2555                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
2556                         break;
2557                 case CEE_CONV_U1:
2558                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE);
2559                         break;
2560                 case CEE_CONV_U2:
2561                         amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE);
2562                         break;
2563                 case CEE_CONV_U8:
2564                         /* Clean out the upper word */
2565                         amd64_mov_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
2566                         break;
2567                 case CEE_CONV_I8:
2568                         amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1);
2569                         break;                  
2570                 case OP_COMPARE:
2571                 case OP_LCOMPARE:
2572                         amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
2573                         break;
2574                 case OP_COMPARE_IMM:
2575                         g_assert (amd64_is_imm32 (ins->inst_imm));
2576                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
2577                         break;
2578                 case OP_X86_COMPARE_MEMBASE_REG:
2579                         amd64_alu_membase_reg (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2);
2580                         break;
2581                 case OP_X86_COMPARE_MEMBASE_IMM:
2582                         g_assert (amd64_is_imm32 (ins->inst_imm));
2583                         amd64_alu_membase_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2584                         break;
2585                 case OP_X86_COMPARE_REG_MEMBASE:
2586                         amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
2587                         break;
2588                 case OP_X86_TEST_NULL:
2589                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
2590                         break;
2591                 case OP_X86_ADD_MEMBASE_IMM:
2592                         amd64_alu_membase_imm (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2593                         break;
2594                 case OP_X86_ADD_MEMBASE:
2595                         amd64_alu_reg_membase (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset);
2596                         break;
2597                 case OP_X86_SUB_MEMBASE_IMM:
2598                         g_assert (amd64_is_imm32 (ins->inst_imm));
2599                         amd64_alu_membase_imm (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
2600                         break;
2601                 case OP_X86_SUB_MEMBASE:
2602                         amd64_alu_reg_membase (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset);
2603                         break;
2604                 case OP_X86_INC_MEMBASE:
2605                         amd64_inc_membase (code, ins->inst_basereg, ins->inst_offset);
2606                         break;
2607                 case OP_X86_INC_REG:
2608                         amd64_inc_reg (code, ins->dreg);
2609                         break;
2610                 case OP_X86_DEC_MEMBASE:
2611                         amd64_dec_membase (code, ins->inst_basereg, ins->inst_offset);
2612                         break;
2613                 case OP_X86_DEC_REG:
2614                         amd64_dec_reg (code, ins->dreg);
2615                         break;
2616                 case OP_X86_MUL_MEMBASE:
2617                         amd64_imul_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
2618                         break;
2619                 case OP_AMD64_ICOMPARE_MEMBASE_REG:
2620                         amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
2621                         break;
2622                 case OP_AMD64_ICOMPARE_MEMBASE_IMM:
2623                         amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
2624                         break;
2625                 case OP_AMD64_ICOMPARE_REG_MEMBASE:
2626                         amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
2627                         break;
2628                 case CEE_BREAK:
2629                         amd64_breakpoint (code);
2630                         break;
2631
2632                 case OP_ADDCC:
2633                 case CEE_ADD:
2634                         amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
2635                         break;
2636                 case OP_ADC:
2637                         amd64_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2);
2638                         break;
2639                 case OP_ADD_IMM:
2640                         g_assert (amd64_is_imm32 (ins->inst_imm));
2641                         amd64_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm);
2642                         break;
2643                 case OP_ADC_IMM:
2644                         g_assert (amd64_is_imm32 (ins->inst_imm));
2645                         amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
2646                         break;
2647                 case OP_SUBCC:
2648                 case CEE_SUB:
2649                         amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
2650                         break;
2651                 case OP_SBB:
2652                         amd64_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2);
2653                         break;
2654                 case OP_SUB_IMM:
2655                         g_assert (amd64_is_imm32 (ins->inst_imm));
2656                         amd64_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm);
2657                         break;
2658                 case OP_SBB_IMM:
2659                         g_assert (amd64_is_imm32 (ins->inst_imm));
2660                         amd64_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm);
2661                         break;
2662                 case CEE_AND:
2663                         amd64_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2);
2664                         break;
2665                 case OP_AND_IMM:
2666                         g_assert (amd64_is_imm32 (ins->inst_imm));
2667                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
2668                         break;
2669                 case CEE_DIV:
2670                         amd64_cdq (code);
2671                         amd64_div_reg (code, ins->sreg2, TRUE);
2672                         break;
2673                 case CEE_DIV_UN:
2674                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
2675                         amd64_div_reg (code, ins->sreg2, FALSE);
2676                         break;
2677                 case OP_DIV_IMM:
2678                         g_assert (amd64_is_imm32 (ins->inst_imm));
2679                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
2680                         amd64_cdq (code);
2681                         amd64_div_reg (code, ins->sreg2, TRUE);
2682                         break;
2683                 case CEE_REM:
2684                         amd64_cdq (code);
2685                         amd64_div_reg (code, ins->sreg2, TRUE);
2686                         break;
2687                 case CEE_REM_UN:
2688                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
2689                         amd64_div_reg (code, ins->sreg2, FALSE);
2690                         break;
2691                 case OP_REM_IMM:
2692                         g_assert (amd64_is_imm32 (ins->inst_imm));
2693                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
2694                         amd64_cdq (code);
2695                         amd64_div_reg (code, ins->sreg2, TRUE);
2696                         break;
2697                 case CEE_OR:
2698                         amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
2699                         break;
2700                 case OP_OR_IMM
2701 :                       g_assert (amd64_is_imm32 (ins->inst_imm));
2702                         amd64_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
2703                         break;
2704                 case CEE_XOR:
2705                         amd64_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
2706                         break;
2707                 case OP_XOR_IMM:
2708                         g_assert (amd64_is_imm32 (ins->inst_imm));
2709                         amd64_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm);
2710                         break;
2711                 case CEE_SHL:
2712                 case OP_LSHL:
2713                         g_assert (ins->sreg2 == AMD64_RCX);
2714                         amd64_shift_reg (code, X86_SHL, ins->dreg);
2715                         break;
2716                 case CEE_SHR:
2717                 case OP_LSHR:
2718                         g_assert (ins->sreg2 == AMD64_RCX);
2719                         amd64_shift_reg (code, X86_SAR, ins->dreg);
2720                         break;
2721                 case OP_SHR_IMM:
2722                         g_assert (amd64_is_imm32 (ins->inst_imm));
2723                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
2724                         break;
2725                 case OP_LSHR_IMM:
2726                         g_assert (amd64_is_imm32 (ins->inst_imm));
2727                         amd64_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
2728                         break;
2729                 case OP_SHR_UN_IMM:
2730                         g_assert (amd64_is_imm32 (ins->inst_imm));
2731                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
2732                         break;
2733                 case OP_LSHR_UN_IMM:
2734                         g_assert (amd64_is_imm32 (ins->inst_imm));
2735                         amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm);
2736                         break;
2737                 case CEE_SHR_UN:
2738                         g_assert (ins->sreg2 == AMD64_RCX);
2739                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
2740                         break;
2741                 case OP_LSHR_UN:
2742                         g_assert (ins->sreg2 == AMD64_RCX);
2743                         amd64_shift_reg (code, X86_SHR, ins->dreg);
2744                         break;
2745                 case OP_SHL_IMM:
2746                         g_assert (amd64_is_imm32 (ins->inst_imm));
2747                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
2748                         break;
2749                 case OP_LSHL_IMM:
2750                         g_assert (amd64_is_imm32 (ins->inst_imm));
2751                         amd64_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
2752                         break;
2753
2754                 case OP_IADDCC:
2755                 case OP_IADD:
2756                         amd64_alu_reg_reg_size (code, X86_ADD, ins->sreg1, ins->sreg2, 4);
2757                         break;
2758                 case OP_IADC:
2759                         amd64_alu_reg_reg_size (code, X86_ADC, ins->sreg1, ins->sreg2, 4);
2760                         break;
2761                 case OP_IADD_IMM:
2762                         amd64_alu_reg_imm_size (code, X86_ADD, ins->dreg, ins->inst_imm, 4);
2763                         break;
2764                 case OP_IADC_IMM:
2765                         amd64_alu_reg_imm_size (code, X86_ADC, ins->dreg, ins->inst_imm, 4);
2766                         break;
2767                 case OP_ISUBCC:
2768                 case OP_ISUB:
2769                         amd64_alu_reg_reg_size (code, X86_SUB, ins->sreg1, ins->sreg2, 4);
2770                         break;
2771                 case OP_ISBB:
2772                         amd64_alu_reg_reg_size (code, X86_SBB, ins->sreg1, ins->sreg2, 4);
2773                         break;
2774                 case OP_ISUB_IMM:
2775                         amd64_alu_reg_imm_size (code, X86_SUB, ins->dreg, ins->inst_imm, 4);
2776                         break;
2777                 case OP_ISBB_IMM:
2778                         amd64_alu_reg_imm_size (code, X86_SBB, ins->dreg, ins->inst_imm, 4);
2779                         break;
2780                 case OP_IAND:
2781                         amd64_alu_reg_reg_size (code, X86_AND, ins->sreg1, ins->sreg2, 4);
2782                         break;
2783                 case OP_IAND_IMM:
2784                         amd64_alu_reg_imm_size (code, X86_AND, ins->sreg1, ins->inst_imm, 4);
2785                         break;
2786                 case OP_IOR:
2787                         amd64_alu_reg_reg_size (code, X86_OR, ins->sreg1, ins->sreg2, 4);
2788                         break;
2789                 case OP_IOR_IMM:
2790                         amd64_alu_reg_imm_size (code, X86_OR, ins->sreg1, ins->inst_imm, 4);
2791                         break;
2792                 case OP_IXOR:
2793                         amd64_alu_reg_reg_size (code, X86_XOR, ins->sreg1, ins->sreg2, 4);
2794                         break;
2795                 case OP_IXOR_IMM:
2796                         amd64_alu_reg_imm_size (code, X86_XOR, ins->sreg1, ins->inst_imm, 4);
2797                         break;
2798                 case OP_INEG:
2799                         amd64_neg_reg_size (code, ins->sreg1, 4);
2800                         break;
2801                 case OP_INOT:
2802                         amd64_not_reg_size (code, ins->sreg1, 4);
2803                         break;
2804                 case OP_ISHL:
2805                         g_assert (ins->sreg2 == AMD64_RCX);
2806                         amd64_shift_reg_size (code, X86_SHL, ins->dreg, 4);
2807                         break;
2808                 case OP_ISHR:
2809                         g_assert (ins->sreg2 == AMD64_RCX);
2810                         amd64_shift_reg_size (code, X86_SAR, ins->dreg, 4);
2811                         break;
2812                 case OP_ISHR_IMM:
2813                         amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
2814                         break;
2815                 case OP_ISHR_UN_IMM:
2816                         amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
2817                         break;
2818                 case OP_ISHR_UN:
2819                         g_assert (ins->sreg2 == AMD64_RCX);
2820                         amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
2821                         break;
2822                 case OP_ISHL_IMM:
2823                         amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
2824                         break;
2825                 case OP_IMUL:
2826                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
2827                         break;
2828                 case OP_IMUL_IMM:
2829                         amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, 4);
2830                         break;
2831                 case OP_IMUL_OVF:
2832                         amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
2833                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
2834                         break;
2835                 case OP_IMUL_OVF_UN: {
2836                         /* the mul operation and the exception check should most likely be split */
2837                         int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
2838                         /*g_assert (ins->sreg2 == X86_EAX);
2839                         g_assert (ins->dreg == X86_EAX);*/
2840                         if (ins->sreg2 == X86_EAX) {
2841                                 non_eax_reg = ins->sreg1;
2842                         } else if (ins->sreg1 == X86_EAX) {
2843                                 non_eax_reg = ins->sreg2;
2844                         } else {
2845                                 /* no need to save since we're going to store to it anyway */
2846                                 if (ins->dreg != X86_EAX) {
2847                                         saved_eax = TRUE;
2848                                         amd64_push_reg (code, X86_EAX);
2849                                 }
2850                                 amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, 4);
2851                                 non_eax_reg = ins->sreg2;
2852                         }
2853                         if (ins->dreg == X86_EDX) {
2854                                 if (!saved_eax) {
2855                                         saved_eax = TRUE;
2856                                         amd64_push_reg (code, X86_EAX);
2857                                 }
2858                         } else if (ins->dreg != X86_EAX) {
2859                                 saved_edx = TRUE;
2860                                 amd64_push_reg (code, X86_EDX);
2861                         }
2862                         amd64_mul_reg_size (code, non_eax_reg, FALSE, 4);
2863                         /* save before the check since pop and mov don't change the flags */
2864                         if (ins->dreg != X86_EAX)
2865                                 amd64_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
2866                         if (saved_edx)
2867                                 amd64_pop_reg (code, X86_EDX);
2868                         if (saved_eax)
2869                                 amd64_pop_reg (code, X86_EAX);
2870                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
2871                         break;
2872                 }
2873                 case OP_IDIV:
2874                         amd64_cdq_size (code, 4);
2875                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
2876                         break;
2877                 case OP_IDIV_UN:
2878                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
2879                         amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
2880                         break;
2881                 case OP_IDIV_IMM:
2882                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
2883                         amd64_cdq_size (code, 4);
2884                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
2885                         break;
2886                 case OP_IREM:
2887                         amd64_cdq_size (code, 4);
2888                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
2889                         break;
2890                 case OP_IREM_UN:
2891                         amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
2892                         amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
2893                         break;
2894                 case OP_IREM_IMM:
2895                         amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
2896                         amd64_cdq_size (code, 4);
2897                         amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
2898                         break;
2899
2900                 case OP_ICOMPARE:
2901                         amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
2902                         break;
2903                 case OP_ICOMPARE_IMM:
2904                         amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
2905                         break;
2906
2907                 case OP_IBEQ:
2908                 case OP_IBLT:
2909                 case OP_IBGT:
2910                 case OP_IBGE:
2911                 case OP_IBLE:
2912                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), TRUE);
2913                         break;
2914                 case OP_IBNE_UN:
2915                 case OP_IBLT_UN:
2916                 case OP_IBGT_UN:
2917                 case OP_IBGE_UN:
2918                 case OP_IBLE_UN:
2919                         EMIT_COND_BRANCH (ins, opcode_to_x86_cond (ins->opcode), FALSE);
2920                         break;
2921                 case CEE_NOT:
2922                         amd64_not_reg (code, ins->sreg1);
2923                         break;
2924                 case CEE_NEG:
2925                         amd64_neg_reg (code, ins->sreg1);
2926                         break;
2927                 case OP_SEXT_I1:
2928                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
2929                         break;
2930                 case OP_SEXT_I2:
2931                         amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
2932                         break;
2933                 case OP_ICONST:
2934                 case OP_I8CONST:
2935                         /* FIXME: optimize this */
2936                         amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8);
2937                         break;
2938                 case OP_AOTCONST:
2939                         mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2940                         amd64_set_reg_template (code, ins->dreg);
2941                         break;
2942                 case CEE_CONV_I4:
2943                 case OP_MOVE:
2944                 case OP_SETREG:
2945                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
2946                         break;
2947                 case OP_AMD64_SET_XMMREG: {
2948                         /* FIXME: optimize this */
2949                         guint32 offset = mono_spillvar_offset_float (cfg, 0);
2950                         amd64_fst_membase (code, AMD64_RBP, offset, TRUE, TRUE);
2951                         /* ins->dreg is set to -1 by the reg allocator */
2952                         amd64_movsd_reg_membase (code, ins->unused, AMD64_RBP, offset);
2953                         break;
2954                 }
2955                 case CEE_CONV_U4:
2956                         g_assert_not_reached ();
2957                 case CEE_JMP: {
2958                         /*
2959                          * Note: this 'frame destruction' logic is useful for tail calls, too.
2960                          * Keep in sync with the code in emit_epilog.
2961                          */
2962                         int pos = 0;
2963
2964                         /* FIXME: no tracing support... */
2965                         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
2966                                 code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
2967                         /* reset offset to make max_len work */
2968                         offset = code - cfg->native_code;
2969
2970                         g_assert (!cfg->method->save_lmf);
2971
2972                         if (cfg->used_int_regs & (1 << AMD64_RBX))
2973                                 pos -= 4;
2974                         if (cfg->used_int_regs & (1 << AMD64_RDI))
2975                                 pos -= 4;
2976                         if (cfg->used_int_regs & (1 << AMD64_RSI))
2977                                 pos -= 4;
2978                         if (pos)
2979                                 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
2980         
2981                         if (cfg->used_int_regs & (1 << AMD64_RSI))
2982                                 amd64_pop_reg (code, AMD64_RSI);
2983                         if (cfg->used_int_regs & (1 << AMD64_RDI))
2984                                 amd64_pop_reg (code, AMD64_RDI);
2985                         if (cfg->used_int_regs & (1 << AMD64_RBX))
2986                                 amd64_pop_reg (code, AMD64_RBX);
2987         
2988                         /* restore ESP/EBP */
2989                         amd64_leave (code);
2990                         offset = code - cfg->native_code;
2991                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2992                         x86_jump32 (code, 0);
2993                         break;
2994                 }
2995                 case OP_CHECK_THIS:
2996                         /* ensure ins->sreg1 is not NULL */
2997                         amd64_alu_membase_imm (code, X86_CMP, ins->sreg1, 0, 0);
2998                         break;
2999                 case OP_ARGLIST: {
3000                         int hreg = ins->sreg1 == AMD64_RAX? AMD64_RCX: AMD64_RAX;
3001                         amd64_push_reg (code, hreg);
3002                         amd64_lea_membase (code, hreg, AMD64_RBP, cfg->sig_cookie);
3003                         amd64_mov_membase_reg (code, ins->sreg1, 0, hreg, 4);
3004                         amd64_pop_reg (code, hreg);
3005                         break;
3006                 }
3007                 case OP_FCALL:
3008                 case OP_LCALL:
3009                 case OP_VCALL:
3010                 case OP_VOIDCALL:
3011                 case CEE_CALL:
3012                         call = (MonoCallInst*)ins;
3013                         if (ins->flags & MONO_INST_HAS_METHOD)
3014                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3015                         else {
3016                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3017                         }
3018                         EMIT_CALL ();
3019                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3020                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3021                         code = emit_move_return_value (cfg, ins, code);
3022                         break;
3023                 case OP_FCALL_REG:
3024                 case OP_LCALL_REG:
3025                 case OP_VCALL_REG:
3026                 case OP_VOIDCALL_REG:
3027                 case OP_CALL_REG:
3028                         call = (MonoCallInst*)ins;
3029                         amd64_call_reg (code, ins->sreg1);
3030                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3031                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3032                         code = emit_move_return_value (cfg, ins, code);
3033                         break;
3034                 case OP_FCALL_MEMBASE:
3035                 case OP_LCALL_MEMBASE:
3036                 case OP_VCALL_MEMBASE:
3037                 case OP_VOIDCALL_MEMBASE:
3038                 case OP_CALL_MEMBASE:
3039                         call = (MonoCallInst*)ins;
3040                         amd64_call_membase (code, ins->sreg1, ins->inst_offset);
3041                         if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
3042                                 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
3043                         code = emit_move_return_value (cfg, ins, code);
3044                         break;
3045                 case OP_OUTARG:
3046                 case OP_X86_PUSH:
3047                         amd64_push_reg (code, ins->sreg1);
3048                         break;
3049                 case OP_X86_PUSH_IMM:
3050                         g_assert (amd64_is_imm32 (ins->inst_imm));
3051                         amd64_push_imm (code, ins->inst_imm);
3052                         break;
3053                 case OP_X86_PUSH_MEMBASE:
3054                         amd64_push_membase (code, ins->inst_basereg, ins->inst_offset);
3055                         break;
3056                 case OP_X86_PUSH_OBJ: 
3057                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ins->inst_imm);
3058                         amd64_push_reg (code, AMD64_RDI);
3059                         amd64_push_reg (code, AMD64_RSI);
3060                         amd64_push_reg (code, AMD64_RCX);
3061                         if (ins->inst_offset)
3062                                 amd64_lea_membase (code, AMD64_RSI, ins->inst_basereg, ins->inst_offset);
3063                         else
3064                                 amd64_mov_reg_reg (code, AMD64_RSI, ins->inst_basereg, 4);
3065                         amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, 12);
3066                         amd64_mov_reg_imm (code, AMD64_RCX, (ins->inst_imm >> 2));
3067                         amd64_cld (code);
3068                         amd64_prefix (code, X86_REP_PREFIX);
3069                         amd64_movsd (code);
3070                         amd64_pop_reg (code, AMD64_RCX);
3071                         amd64_pop_reg (code, AMD64_RSI);
3072                         amd64_pop_reg (code, AMD64_RDI);
3073                         break;
3074                 case OP_X86_LEA:
3075                         amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->unused);
3076                         break;
3077                 case OP_X86_LEA_MEMBASE:
3078                         amd64_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
3079                         break;
3080                 case OP_X86_XCHG:
3081                         amd64_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
3082                         break;
3083                 case OP_LOCALLOC:
3084                         /* keep alignment */
3085                         amd64_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1);
3086                         amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
3087                         code = mono_emit_stack_alloc (code, ins);
3088                         amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 4);
3089                         break;
3090                 case CEE_RET:
3091                         amd64_ret (code);
3092                         break;
3093                 case CEE_THROW: {
3094                         amd64_push_reg (code, ins->sreg1);
3095                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, 
3096                                              (gpointer)"mono_arch_throw_exception");
3097                         EMIT_CALL ();
3098                         break;
3099                 }
3100                 case OP_CALL_HANDLER: 
3101                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3102                         amd64_call_imm (code, 0);
3103                         break;
3104                 case OP_LABEL:
3105                         ins->inst_c0 = code - cfg->native_code;
3106                         break;
3107                 case CEE_BR:
3108                         //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3109                         //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3110                         //break;
3111                         if (ins->flags & MONO_INST_BRLABEL) {
3112                                 if (ins->inst_i0->inst_c0) {
3113                                         amd64_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
3114                                 } else {
3115                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3116                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
3117                                             x86_is_imm8 (ins->inst_i0->inst_c1 - cpos))
3118                                                 x86_jump8 (code, 0);
3119                                         else 
3120                                                 x86_jump32 (code, 0);
3121                                 }
3122                         } else {
3123                                 if (ins->inst_target_bb->native_offset) {
3124                                         amd64_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); 
3125                                 } else {
3126                                         mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3127                                         if ((cfg->opt & MONO_OPT_BRANCH) &&
3128                                             x86_is_imm8 (ins->inst_target_bb->max_offset - cpos))
3129                                                 x86_jump8 (code, 0);
3130                                         else 
3131                                                 x86_jump32 (code, 0);
3132                                 } 
3133                         }
3134                         break;
3135                 case OP_BR_REG:
3136                         amd64_jump_reg (code, ins->sreg1);
3137                         break;
3138                 case OP_CEQ:
3139                 case OP_ICEQ:
3140                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3141                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3142                         break;
3143                 case OP_CLT:
3144                 case OP_ICLT:
3145                         amd64_set_reg (code, X86_CC_LT, ins->dreg, TRUE);
3146                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3147                         break;
3148                 case OP_CLT_UN:
3149                 case OP_ICLT_UN:
3150                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3151                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3152                         break;
3153                 case OP_CGT:
3154                 case OP_ICGT:
3155                         amd64_set_reg (code, X86_CC_GT, ins->dreg, TRUE);
3156                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3157                         break;
3158                 case OP_CGT_UN:
3159                 case OP_ICGT_UN:
3160                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3161                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3162                         break;
3163                 case OP_COND_EXC_EQ:
3164                 case OP_COND_EXC_NE_UN:
3165                 case OP_COND_EXC_LT:
3166                 case OP_COND_EXC_LT_UN:
3167                 case OP_COND_EXC_GT:
3168                 case OP_COND_EXC_GT_UN:
3169                 case OP_COND_EXC_GE:
3170                 case OP_COND_EXC_GE_UN:
3171                 case OP_COND_EXC_LE:
3172                 case OP_COND_EXC_LE_UN:
3173                 case OP_COND_EXC_OV:
3174                 case OP_COND_EXC_NO:
3175                 case OP_COND_EXC_C:
3176                 case OP_COND_EXC_NC:
3177                         EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], 
3178                                                     (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
3179                         break;
3180                 case CEE_BEQ:
3181                 case CEE_BNE_UN:
3182                 case CEE_BLT:
3183                 case CEE_BLT_UN:
3184                 case CEE_BGT:
3185                 case CEE_BGT_UN:
3186                 case CEE_BGE:
3187                 case CEE_BGE_UN:
3188                 case CEE_BLE:
3189                 case CEE_BLE_UN:
3190                         EMIT_COND_BRANCH (ins, branch_cc_table [ins->opcode - CEE_BEQ], (ins->opcode < CEE_BNE_UN));
3191                         break;
3192
3193                 /* floating point opcodes */
3194                 case OP_R8CONST: {
3195                         double d = *(double *)ins->inst_p0;
3196
3197                         if ((d == 0.0) && (mono_signbit (d) == 0)) {
3198                                 amd64_fldz (code);
3199                         } else if (d == 1.0) {
3200                                 x86_fld1 (code);
3201                         } else {
3202                                 /* FIXME: Use RIP relative addressing */
3203                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3204                                 amd64_set_reg_template (code, GP_SCRATCH_REG);
3205                                 amd64_fld_membase (code, GP_SCRATCH_REG, 0, TRUE);
3206                         }
3207                         break;
3208                 }
3209                 case OP_R4CONST: {
3210                         float f = *(float *)ins->inst_p0;
3211
3212                         if ((f == 0.0) && (mono_signbit (f) == 0)) {
3213                                 amd64_fldz (code);
3214                         } else if (f == 1.0) {
3215                                 x86_fld1 (code);
3216                         } else {
3217                                 /* FIXME: Use RIP relative addressing */
3218                                 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3219                                 amd64_set_reg_template (code, GP_SCRATCH_REG);
3220                                 amd64_fld_membase (code, GP_SCRATCH_REG, 0, FALSE);
3221                         }
3222                         break;
3223                 }
3224                 case OP_STORER8_MEMBASE_REG:
3225                         amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE);
3226                         break;
3227                 case OP_LOADR8_SPILL_MEMBASE:
3228                         amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
3229                         amd64_fxch (code, 1);
3230                         break;
3231                 case OP_LOADR8_MEMBASE:
3232                         amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
3233                         break;
3234                 case OP_STORER4_MEMBASE_REG:
3235                         amd64_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, FALSE, TRUE);
3236                         break;
3237                 case OP_LOADR4_MEMBASE:
3238                         amd64_fld_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
3239                         break;
3240                 case CEE_CONV_R4: /* FIXME: change precision */
3241                 case CEE_CONV_R8:
3242                         amd64_push_reg (code, ins->sreg1);
3243                         amd64_fild_membase (code, AMD64_RSP, 0, FALSE);
3244                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 4);
3245                         break;
3246                 case OP_LCONV_TO_R4: /* FIXME: change precision */
3247                 case OP_LCONV_TO_R8:
3248                         amd64_push_reg (code, ins->sreg1);
3249                         amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
3250                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 4);
3251                         break;
3252                 case OP_X86_FP_LOAD_I8:
3253                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
3254                         break;
3255                 case OP_X86_FP_LOAD_I4:
3256                         amd64_fild_membase (code, ins->inst_basereg, ins->inst_offset, FALSE);
3257                         break;
3258                 case OP_FCONV_TO_I1:
3259                         code = emit_float_to_int (cfg, code, ins->dreg, 1, TRUE);
3260                         break;
3261                 case OP_FCONV_TO_U1:
3262                         code = emit_float_to_int (cfg, code, ins->dreg, 1, FALSE);
3263                         break;
3264                 case OP_FCONV_TO_I2:
3265                         code = emit_float_to_int (cfg, code, ins->dreg, 2, TRUE);
3266                         break;
3267                 case OP_FCONV_TO_U2:
3268                         code = emit_float_to_int (cfg, code, ins->dreg, 2, FALSE);
3269                         break;
3270                 case OP_FCONV_TO_I4:
3271                 case OP_FCONV_TO_I:
3272                         code = emit_float_to_int (cfg, code, ins->dreg, 4, TRUE);
3273                         break;
3274                 case OP_FCONV_TO_I8:
3275                         code = emit_float_to_int (cfg, code, ins->dreg, 8, TRUE);
3276                         break;
3277                 case OP_LCONV_TO_R_UN: { 
3278                         static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
3279                         guint8 *br;
3280
3281                         /* load 64bit integer to FP stack */
3282                         amd64_push_imm (code, 0);
3283                         amd64_push_reg (code, ins->sreg2);
3284                         amd64_push_reg (code, ins->sreg1);
3285                         amd64_fild_membase (code, AMD64_RSP, 0, TRUE);
3286                         /* store as 80bit FP value */
3287                         x86_fst80_membase (code, AMD64_RSP, 0);
3288                         
3289                         /* test if lreg is negative */
3290                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
3291                         br = code; x86_branch8 (code, X86_CC_GEZ, 0, TRUE);
3292         
3293                         /* add correction constant mn */
3294                         x86_fld80_mem (code, mn);
3295                         x86_fld80_membase (code, AMD64_RSP, 0);
3296                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
3297                         x86_fst80_membase (code, AMD64_RSP, 0);
3298
3299                         amd64_patch (br, code);
3300
3301                         x86_fld80_membase (code, AMD64_RSP, 0);
3302                         amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 12);
3303
3304                         break;
3305                 }
3306                 case OP_LCONV_TO_OVF_I: {
3307                         guint8 *br [3], *label [1];
3308
3309                         /* 
3310                          * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3311                          */
3312                         amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
3313
3314                         /* If the low word top bit is set, see if we are negative */
3315                         br [0] = code; x86_branch8 (code, X86_CC_LT, 0, TRUE);
3316                         /* We are not negative (no top bit set, check for our top word to be zero */
3317                         amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
3318                         br [1] = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
3319                         label [0] = code;
3320
3321                         /* throw exception */
3322                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
3323                         x86_jump32 (code, 0);
3324         
3325                         amd64_patch (br [0], code);
3326                         /* our top bit is set, check that top word is 0xfffffff */
3327                         amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0xffffffff);
3328                 
3329                         amd64_patch (br [1], code);
3330                         /* nope, emit exception */
3331                         br [2] = code; x86_branch8 (code, X86_CC_NE, 0, TRUE);
3332                         amd64_patch (br [2], label [0]);
3333
3334                         if (ins->dreg != ins->sreg1)
3335                                 amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
3336                         break;
3337                 }
3338                 case CEE_CONV_OVF_U4:
3339                         /* FIXME: */
3340                         amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
3341                         break;
3342                 case OP_FADD:
3343                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
3344                         break;
3345                 case OP_FSUB:
3346                         amd64_fp_op_reg (code, X86_FSUB, 1, TRUE);
3347                         break;          
3348                 case OP_FMUL:
3349                         amd64_fp_op_reg (code, X86_FMUL, 1, TRUE);
3350                         break;          
3351                 case OP_FDIV:
3352                         amd64_fp_op_reg (code, X86_FDIV, 1, TRUE);
3353                         break;          
3354                 case OP_FNEG:
3355                         amd64_fchs (code);
3356                         break;          
3357                 case OP_SIN:
3358                         amd64_fsin (code);
3359                         amd64_fldz (code);
3360                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
3361                         break;          
3362                 case OP_COS:
3363                         amd64_fcos (code);
3364                         amd64_fldz (code);
3365                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
3366                         break;          
3367                 case OP_ABS:
3368                         amd64_fabs (code);
3369                         break;          
3370                 case OP_TAN: {
3371                         /* 
3372                          * it really doesn't make sense to inline all this code,
3373                          * it's here just to show that things may not be as simple 
3374                          * as they appear.
3375                          */
3376                         guchar *check_pos, *end_tan, *pop_jump;
3377                         amd64_push_reg (code, AMD64_RAX);
3378                         amd64_fptan (code);
3379                         amd64_fnstsw (code);
3380                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
3381                         check_pos = code;
3382                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
3383                         amd64_fstp (code, 0); /* pop the 1.0 */
3384                         end_tan = code;
3385                         x86_jump8 (code, 0);
3386                         amd64_fldpi (code);
3387                         amd64_fp_op (code, X86_FADD, 0);
3388                         amd64_fxch (code, 1);
3389                         x86_fprem1 (code);
3390                         amd64_fstsw (code);
3391                         amd64_test_reg_imm (code, AMD64_RAX, X86_FP_C2);
3392                         pop_jump = code;
3393                         x86_branch8 (code, X86_CC_NE, 0, FALSE);
3394                         amd64_fstp (code, 1);
3395                         amd64_fptan (code);
3396                         amd64_patch (pop_jump, code);
3397                         amd64_fstp (code, 0); /* pop the 1.0 */
3398                         amd64_patch (check_pos, code);
3399                         amd64_patch (end_tan, code);
3400                         amd64_fldz (code);
3401                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
3402                         amd64_pop_reg (code, AMD64_RAX);
3403                         break;
3404                 }
3405                 case OP_ATAN:
3406                         x86_fld1 (code);
3407                         amd64_fpatan (code);
3408                         amd64_fldz (code);
3409                         amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
3410                         break;          
3411                 case OP_SQRT:
3412                         amd64_fsqrt (code);
3413                         break;          
3414                 case OP_X86_FPOP:
3415                         amd64_fstp (code, 0);
3416                         break;          
3417                 case OP_FREM: {
3418                         guint8 *l1, *l2;
3419
3420                         amd64_push_reg (code, AMD64_RAX);
3421                         /* we need to exchange ST(0) with ST(1) */
3422                         amd64_fxch (code, 1);
3423
3424                         /* this requires a loop, because fprem somtimes 
3425                          * returns a partial remainder */
3426                         l1 = code;
3427                         /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
3428                         /* x86_fprem1 (code); */
3429                         amd64_fprem (code);
3430                         amd64_fnstsw (code);
3431                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_C2);
3432                         l2 = code + 2;
3433                         x86_branch8 (code, X86_CC_NE, l1 - l2, FALSE);
3434
3435                         /* pop result */
3436                         amd64_fstp (code, 1);
3437
3438                         amd64_pop_reg (code, AMD64_RAX);
3439                         break;
3440                 }
3441                 case OP_FCOMPARE:
3442                         if (cfg->opt & MONO_OPT_FCMOV) {
3443                                 amd64_fcomip (code, 1);
3444                                 amd64_fstp (code, 0);
3445                                 break;
3446                         }
3447                         /* this overwrites EAX */
3448                         EMIT_FPCOMPARE(code);
3449                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
3450                         break;
3451                 case OP_FCEQ:
3452                         if (cfg->opt & MONO_OPT_FCMOV) {
3453                                 /* zeroing the register at the start results in 
3454                                  * shorter and faster code (we can also remove the widening op)
3455                                  */
3456                                 guchar *unordered_check;
3457                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3458                                 amd64_fcomip (code, 1);
3459                                 amd64_fstp (code, 0);
3460                                 unordered_check = code;
3461                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
3462                                 amd64_set_reg (code, X86_CC_EQ, ins->dreg, FALSE);
3463                                 amd64_patch (unordered_check, code);
3464                                 break;
3465                         }
3466                         if (ins->dreg != AMD64_RAX) 
3467                                 amd64_push_reg (code, AMD64_RAX);
3468
3469                         EMIT_FPCOMPARE(code);
3470                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
3471                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
3472                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3473                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3474
3475                         if (ins->dreg != AMD64_RAX) 
3476                                 amd64_pop_reg (code, AMD64_RAX);
3477                         break;
3478                 case OP_FCLT:
3479                 case OP_FCLT_UN:
3480                         if (cfg->opt & MONO_OPT_FCMOV) {
3481                                 /* zeroing the register at the start results in 
3482                                  * shorter and faster code (we can also remove the widening op)
3483                                  */
3484                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3485                                 amd64_fcomip (code, 1);
3486                                 amd64_fstp (code, 0);
3487                                 if (ins->opcode == OP_FCLT_UN) {
3488                                         guchar *unordered_check = code;
3489                                         guchar *jump_to_end;
3490                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
3491                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3492                                         jump_to_end = code;
3493                                         x86_jump8 (code, 0);
3494                                         amd64_patch (unordered_check, code);
3495                                         amd64_inc_reg (code, ins->dreg);
3496                                         amd64_patch (jump_to_end, code);
3497                                 } else {
3498                                         amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
3499                                 }
3500                                 break;
3501                         }
3502                         if (ins->dreg != AMD64_RAX) 
3503                                 amd64_push_reg (code, AMD64_RAX);
3504
3505                         EMIT_FPCOMPARE(code);
3506                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
3507                         if (ins->opcode == OP_FCLT_UN) {
3508                                 guchar *is_not_zero_check, *end_jump;
3509                                 is_not_zero_check = code;
3510                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3511                                 end_jump = code;
3512                                 x86_jump8 (code, 0);
3513                                 amd64_patch (is_not_zero_check, code);
3514                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
3515
3516                                 amd64_patch (end_jump, code);
3517                         }
3518                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3519                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3520
3521                         if (ins->dreg != AMD64_RAX) 
3522                                 amd64_pop_reg (code, AMD64_RAX);
3523                         break;
3524                 case OP_FCGT:
3525                 case OP_FCGT_UN:
3526                         if (cfg->opt & MONO_OPT_FCMOV) {
3527                                 /* zeroing the register at the start results in 
3528                                  * shorter and faster code (we can also remove the widening op)
3529                                  */
3530                                 guchar *unordered_check;
3531                                 amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
3532                                 amd64_fcomip (code, 1);
3533                                 amd64_fstp (code, 0);
3534                                 if (ins->opcode == OP_FCGT) {
3535                                         unordered_check = code;
3536                                         x86_branch8 (code, X86_CC_P, 0, FALSE);
3537                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3538                                         amd64_patch (unordered_check, code);
3539                                 } else {
3540                                         amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
3541                                 }
3542                                 break;
3543                         }
3544                         if (ins->dreg != AMD64_RAX) 
3545                                 amd64_push_reg (code, AMD64_RAX);
3546
3547                         EMIT_FPCOMPARE(code);
3548                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, X86_FP_CC_MASK);
3549                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
3550                         if (ins->opcode == OP_FCGT_UN) {
3551                                 guchar *is_not_zero_check, *end_jump;
3552                                 is_not_zero_check = code;
3553                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3554                                 end_jump = code;
3555                                 x86_jump8 (code, 0);
3556                                 amd64_patch (is_not_zero_check, code);
3557                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
3558
3559                                 amd64_patch (end_jump, code);
3560                         }
3561                         amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
3562                         amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
3563
3564                         if (ins->dreg != AMD64_RAX) 
3565                                 amd64_pop_reg (code, AMD64_RAX);
3566                         break;
3567                 case OP_FBEQ:
3568                         if (cfg->opt & MONO_OPT_FCMOV) {
3569                                 guchar *jump = code;
3570                                 x86_branch8 (code, X86_CC_P, 0, TRUE);
3571                                 EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3572                                 amd64_patch (jump, code);
3573                                 break;
3574                         }
3575                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0x4000);
3576                         EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE);
3577                         break;
3578                 case OP_FBNE_UN:
3579                         /* Branch if C013 != 100 */
3580                         if (cfg->opt & MONO_OPT_FCMOV) {
3581                                 /* branch if !ZF or (PF|CF) */
3582                                 EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3583                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3584                                 EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
3585                                 break;
3586                         }
3587                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
3588                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3589                         break;
3590                 case OP_FBLT:
3591                         if (cfg->opt & MONO_OPT_FCMOV) {
3592                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
3593                                 break;
3594                         }
3595                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3596                         break;
3597                 case OP_FBLT_UN:
3598                         if (cfg->opt & MONO_OPT_FCMOV) {
3599                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3600                                 EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
3601                                 break;
3602                         }
3603                         if (ins->opcode == OP_FBLT_UN) {
3604                                 guchar *is_not_zero_check, *end_jump;
3605                                 is_not_zero_check = code;
3606                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3607                                 end_jump = code;
3608                                 x86_jump8 (code, 0);
3609                                 amd64_patch (is_not_zero_check, code);
3610                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
3611
3612                                 amd64_patch (end_jump, code);
3613                         }
3614                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3615                         break;
3616                 case OP_FBGT:
3617                 case OP_FBGT_UN:
3618                         if (cfg->opt & MONO_OPT_FCMOV) {
3619                                 EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
3620                                 break;
3621                         }
3622                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
3623                         if (ins->opcode == OP_FBGT_UN) {
3624                                 guchar *is_not_zero_check, *end_jump;
3625                                 is_not_zero_check = code;
3626                                 x86_branch8 (code, X86_CC_NZ, 0, TRUE);
3627                                 end_jump = code;
3628                                 x86_jump8 (code, 0);
3629                                 amd64_patch (is_not_zero_check, code);
3630                                 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_CC_MASK);
3631
3632                                 amd64_patch (end_jump, code);
3633                         }
3634                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3635                         break;
3636                 case OP_FBGE:
3637                         /* Branch if C013 == 100 or 001 */
3638                         if (cfg->opt & MONO_OPT_FCMOV) {
3639                                 guchar *br1;
3640
3641                                 /* skip branch if C1=1 */
3642                                 br1 = code;
3643                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
3644                                 /* branch if (C0 | C3) = 1 */
3645                                 EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
3646                                 amd64_patch (br1, code);
3647                                 break;
3648                         }
3649                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
3650                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3651                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C3);
3652                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3653                         break;
3654                 case OP_FBGE_UN:
3655                         /* Branch if C013 == 000 */
3656                         if (cfg->opt & MONO_OPT_FCMOV) {
3657                                 EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
3658                                 break;
3659                         }
3660                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3661                         break;
3662                 case OP_FBLE:
3663                         /* Branch if C013=000 or 100 */
3664                         if (cfg->opt & MONO_OPT_FCMOV) {
3665                                 guchar *br1;
3666
3667                                 /* skip branch if C1=1 */
3668                                 br1 = code;
3669                                 x86_branch8 (code, X86_CC_P, 0, FALSE);
3670                                 /* branch if C0=0 */
3671                                 EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
3672                                 amd64_patch (br1, code);
3673                                 break;
3674                         }
3675                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, (X86_FP_C0|X86_FP_C1));
3676                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
3677                         EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
3678                         break;
3679                 case OP_FBLE_UN:
3680                         /* Branch if C013 != 001 */
3681                         if (cfg->opt & MONO_OPT_FCMOV) {
3682                                 EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
3683                                 EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
3684                                 break;
3685                         }
3686                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
3687                         EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
3688                         break;
3689                 case CEE_CKFINITE: {
3690                         amd64_push_reg (code, AMD64_RAX);
3691                         amd64_fxam (code);
3692                         amd64_fnstsw (code);
3693                         amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0x4100);
3694                         amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
3695                         amd64_pop_reg (code, AMD64_RAX);
3696                         EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
3697                         break;
3698                 }
3699                 default:
3700                         g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3701                         g_assert_not_reached ();
3702                 }
3703
3704                 if ((code - cfg->native_code - offset) > max_len) {
3705                         g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
3706                                    mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
3707                         g_assert_not_reached ();
3708                 }
3709                
3710                 cpos += max_len;
3711
3712                 last_ins = ins;
3713                 last_offset = offset;
3714                 
3715                 ins = ins->next;
3716         }
3717
3718         cfg->code_len = code - cfg->native_code;
3719 }
3720
3721 void
3722 mono_arch_register_lowlevel_calls (void)
3723 {
3724 }
3725
3726 void
3727 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3728 {
3729         MonoJumpInfo *patch_info;
3730
3731         for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3732                 unsigned char *ip = patch_info->ip.i + code;
3733                 const unsigned char *target;
3734
3735                 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3736
3737                 switch (patch_info->type) {
3738                 case MONO_PATCH_INFO_METHOD_REL:
3739                         *((gconstpointer *)(ip)) = target;
3740                         continue;
3741                 case MONO_PATCH_INFO_SWITCH: {
3742                         *((gconstpointer *)(ip + 2)) = target;
3743                         continue;
3744                 }
3745                 case MONO_PATCH_INFO_IID:
3746                         *((guint32 *)(ip + 1)) = (guint32)target;
3747                         continue;                       
3748                 case MONO_PATCH_INFO_CLASS_INIT: {
3749                         guint8 *code = ip;
3750                         /* Might already been changed to a nop */
3751                         amd64_call_imm (code, 0);
3752                         break;
3753                 }
3754                 case MONO_PATCH_INFO_R4:
3755                 case MONO_PATCH_INFO_R8:
3756                         *((gconstpointer *)(ip + 2)) = target;
3757                         continue;
3758                 case MONO_PATCH_INFO_METHODCONST:
3759                 case MONO_PATCH_INFO_CLASS:
3760                 case MONO_PATCH_INFO_IMAGE:
3761                 case MONO_PATCH_INFO_FIELD:
3762                 case MONO_PATCH_INFO_VTABLE:
3763                 case MONO_PATCH_INFO_SFLDA:
3764                 case MONO_PATCH_INFO_EXC_NAME:
3765                 case MONO_PATCH_INFO_LDSTR:
3766                 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
3767                 case MONO_PATCH_INFO_LDTOKEN:
3768                 case MONO_PATCH_INFO_IP:
3769                         *((gconstpointer *)(ip + 2)) = target;
3770                         continue;
3771                 case MONO_PATCH_INFO_METHOD:
3772                 case MONO_PATCH_INFO_ABS:
3773                 case MONO_PATCH_INFO_INTERNAL_METHOD:
3774                         *((gconstpointer *)(ip + 2)) = target;
3775                         continue;
3776                 default:
3777                         break;
3778                 }
3779                 amd64_patch (ip, (gpointer)target);
3780         }
3781 }
3782
3783 int
3784 mono_arch_max_epilog_size (MonoCompile *cfg)
3785 {
3786         int exc_count = 0, max_epilog_size = 16;
3787         MonoJumpInfo *patch_info;
3788         
3789         if (cfg->method->save_lmf)
3790                 max_epilog_size += 128;
3791         
3792         if (mono_jit_trace_calls != NULL)
3793                 max_epilog_size += 50;
3794
3795         if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3796                 max_epilog_size += 50;
3797
3798         max_epilog_size += (AMD64_NREG * 2);
3799
3800         /* count the number of exception infos */
3801      
3802         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
3803                 if (patch_info->type == MONO_PATCH_INFO_EXC)
3804                         exc_count++;
3805         }
3806
3807         /* 
3808          * make sure we have enough space for exceptions
3809          * 16 is the size of two push_imm instructions and a call
3810          */
3811         max_epilog_size += exc_count*16;
3812
3813         return max_epilog_size;
3814 }
3815
3816 guint8 *
3817 mono_arch_emit_prolog (MonoCompile *cfg)
3818 {
3819         MonoMethod *method = cfg->method;
3820         MonoBasicBlock *bb;
3821         MonoMethodSignature *sig;
3822         MonoInst *inst;
3823         int alloc_size, pos, max_offset, i;
3824         guint8 *code;
3825         CallInfo *cinfo;
3826
3827         cfg->code_size =  MAX (((MonoMethodNormal *)method)->header->code_size * 4, 256);
3828         code = cfg->native_code = g_malloc (cfg->code_size);
3829
3830         amd64_push_reg (code, AMD64_RBP);
3831         amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
3832
3833         alloc_size = cfg->stack_offset;
3834         pos = 0;
3835
3836         if (method->save_lmf) {
3837                 /* FIXME: */
3838 #if 0
3839                 pos += sizeof (MonoLMF);
3840
3841                 /* save the current IP */
3842                 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
3843                 amd64_set_reg_template (code, GP_SCRATCH_REG);
3844                 amd64_push_reg (code, GP_SCRATCH_REG);
3845
3846                 /* save all caller saved regs */
3847                 amd64_push_reg (code, AMD64_RBP);
3848                 amd64_push_reg (code, AMD64_RBX);
3849                 amd64_push_reg (code, AMD64_R12);
3850                 amd64_push_reg (code, AMD64_R13);
3851                 amd64_push_reg (code, AMD64_R14);
3852                 amd64_push_reg (code, AMD64_R15);
3853
3854                 /* save method info */
3855                 amd64_mov_reg_imm (code, GP_SCRATCH_REG, method);
3856                 amd64_push_reg (code, GP_SCRATCH_REG);
3857
3858                 /* get the address of lmf for the current thread */
3859                 /* 
3860                  * This is performance critical so we try to use some tricks to make
3861                  * it fast.
3862                  */
3863                 if (lmf_tls_offset != -1) {
3864                         NOT_IMPLEMENTED;
3865
3866                         /* Load lmf quicky using the GS register */
3867                         amd64_prefix (code, X86_GS_PREFIX);
3868                         amd64_mov_reg_mem (code, AMD64_RAX, 0, 4);
3869                         amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, lmf_tls_offset, 4);
3870                 }
3871                 else {
3872                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, 
3873                                                                  (gpointer)"mono_get_lmf_addr");
3874                         EMIT_CALL ();
3875                 }
3876
3877                 /* push lmf */
3878                 amd64_push_reg (code, AMD64_RAX); 
3879                 /* push *lfm (previous_lmf) */
3880                 amd64_push_membase (code, AMD64_RAX, 0);
3881                 /* *(lmf) = ESP */
3882                 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_RSP, sizeof (gpointer));
3883 #endif
3884         } else {
3885
3886                 for (i = 0; i < AMD64_NREG; ++i)
3887                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
3888                                 amd64_push_reg (code, i);
3889                                 pos += sizeof (gpointer);
3890                         }
3891         }
3892
3893         alloc_size -= pos;
3894
3895         if (alloc_size) {
3896                 /* See mono_emit_stack_alloc */
3897 #ifdef PLATFORM_WIN32
3898                 guint32 remaining_size = alloc_size;
3899                 while (remaining_size >= 0x1000) {
3900                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
3901                         amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
3902                         remaining_size -= 0x1000;
3903                 }
3904                 if (remaining_size)
3905                         amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size);
3906 #else
3907                 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
3908 #endif
3909         }
3910
3911         /* compute max_offset in order to use short forward jumps */
3912         max_offset = 0;
3913         if (cfg->opt & MONO_OPT_BRANCH) {
3914                 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3915                         MonoInst *ins = bb->code;
3916                         bb->max_offset = max_offset;
3917
3918                         if (cfg->prof_options & MONO_PROFILE_COVERAGE)
3919                                 max_offset += 6;
3920                         /* max alignment for loops */
3921                         if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
3922                                 max_offset += LOOP_ALIGNMENT;
3923
3924                         while (ins) {
3925                                 if (ins->opcode == OP_LABEL)
3926                                         ins->inst_c1 = max_offset;
3927                                 
3928                                 max_offset += ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
3929                                 ins = ins->next;
3930                         }
3931                 }
3932         }
3933
3934         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3935                 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
3936
3937         sig = method->signature;
3938         pos = 0;
3939
3940         cinfo = get_call_info (sig, FALSE);
3941
3942         for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3943                 ArgInfo *ainfo = cinfo->args + i;
3944                 gint32 stack_offset;
3945                 MonoType *arg_type;
3946                 inst = cfg->varinfo [i];
3947
3948                 if (sig->hasthis && (i == 0))
3949                         arg_type = &mono_defaults.object_class->byval_arg;
3950                 else
3951                         arg_type = sig->params [i - sig->hasthis];
3952
3953                 stack_offset = ainfo->offset + ARGS_OFFSET;
3954
3955                 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
3956                         /* Argument in register, but need to be saved to stack */
3957                         amd64_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg, sizeof (gpointer));
3958                 }
3959                 if ((ainfo->storage == ArgInSSEReg) && (inst->opcode != OP_REGVAR)) {
3960                         /* Argument in register, but need to be saved to stack */
3961                         amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
3962                 }
3963
3964                 if (inst->opcode == OP_REGVAR) {
3965                         /* Argument allocated to (non-volatile) register */
3966                         NOT_IMPLEMENTED;
3967                 }
3968         }
3969
3970         g_free (cinfo);
3971
3972         cfg->code_len = code - cfg->native_code;
3973
3974         return code;
3975 }
3976
3977 void
3978 mono_arch_emit_epilog (MonoCompile *cfg)
3979 {
3980         MonoJumpInfo *patch_info;
3981         MonoMethod *method = cfg->method;
3982         MonoMethodSignature *sig = method->signature;
3983         int pos, i;
3984         guint32 stack_to_pop;
3985         guint8 *code;
3986
3987         code = cfg->native_code + cfg->code_len;
3988
3989         if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3990                 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
3991
3992         /* the code restoring the registers must be kept in sync with CEE_JMP */
3993         pos = 0;
3994         
3995         if (method->save_lmf) {
3996                 /* FIXME: */
3997 #if 0
3998                 gint32 prev_lmf_reg;
3999
4000                 /* Find a spare register */
4001                 prev_lmf_reg = GP_SCRATCH_REG;
4002
4003                 /* reg = previous_lmf */
4004                 amd64_mov_reg_membase (code, prev_lmf_reg, AMD64_RBP, -32, sizeof (gpointer));
4005
4006                 /* ecx = lmf */
4007                 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, -28, sizeof (gpointer));
4008
4009                 /* *(lmf) = previous_lmf */
4010                 amd64_mov_membase_reg (code, AMD64_RCX, 0, prev_lmf_reg, sizeof (gpointer));
4011
4012                 /* restore caller saved regs */
4013                 if (cfg->used_int_regs & (1 << AMD64_RBX)) {
4014                         amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RBP, -20, sizeof (gpointer));
4015                 }
4016
4017                 if (cfg->used_int_regs & (1 << AMD64_RDI)) {
4018                         amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RBP, -16, sizeof (gpointer));
4019                 }
4020                 if (cfg->used_int_regs & (1 << AMD64_RSI)) {
4021                         amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RBP, -12, sizeof (gpointer));
4022                 }
4023 #endif
4024
4025                 /* EBP is restored by LEAVE */
4026         } else {
4027
4028                 for (i = 0; i < AMD64_NREG; ++i)
4029                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
4030                                 pos -= sizeof (gpointer);
4031
4032                 if (pos)
4033                         amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
4034
4035                 /* Pop registers in reverse order */
4036                 for (i = AMD64_NREG - 1; i > 0; --i)
4037                         if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
4038                                 amd64_pop_reg (code, i);
4039                         }
4040         }
4041
4042         amd64_leave (code);
4043
4044         if (CALLCONV_IS_STDCALL (sig->call_convention)) {
4045                 MonoJitArgumentInfo *arg_info = alloca (sizeof (MonoJitArgumentInfo) * (sig->param_count + 1));
4046
4047                 stack_to_pop = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
4048         } else if (MONO_TYPE_ISSTRUCT (cfg->method->signature->ret))
4049                 stack_to_pop = 4;
4050         else
4051                 stack_to_pop = 0;
4052
4053         if (stack_to_pop)
4054                 amd64_ret_imm (code, stack_to_pop);
4055         else
4056                 amd64_ret (code);
4057
4058         /* add code to raise exceptions */
4059         for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4060                 switch (patch_info->type) {
4061                 case MONO_PATCH_INFO_EXC:
4062                         amd64_patch (patch_info->ip.i + cfg->native_code, code);
4063                         mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);
4064                         amd64_push_imm (code, patch_info->data.target);
4065                         mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_METHOD_REL, (gpointer)patch_info->ip.i);
4066                         amd64_push_imm (code, patch_info->ip.i + cfg->native_code);
4067                         patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4068                         patch_info->data.name = "mono_arch_throw_exception_by_name";
4069                         patch_info->ip.i = code - cfg->native_code;
4070                         amd64_jump_code (code, 0);
4071                         break;
4072                 default:
4073                         /* do nothing */
4074                         break;
4075                 }
4076         }
4077
4078         cfg->code_len = code - cfg->native_code;
4079
4080         g_assert (cfg->code_len < cfg->code_size);
4081
4082 }
4083
4084 void
4085 mono_arch_flush_icache (guint8 *code, gint size)
4086 {
4087         /* not needed */
4088 }
4089
4090 void
4091 mono_arch_flush_register_windows (void)
4092 {
4093 }
4094
4095 gboolean 
4096 mono_arch_is_inst_imm (gint64 imm)
4097 {
4098         return amd64_is_imm32 (imm);
4099 }
4100
4101 /*
4102  * Support for fast access to the thread-local lmf structure using the GS
4103  * segment register on NPTL + kernel 2.6.x.
4104  */
4105
4106 static gboolean tls_offset_inited = FALSE;
4107
4108 void
4109 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4110 {
4111 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4112         pthread_t self = pthread_self();
4113         pthread_attr_t attr;
4114         void *staddr = NULL;
4115         size_t stsize = 0;
4116         struct sigaltstack sa;
4117 #endif
4118
4119         if (!tls_offset_inited) {
4120                 guint8 *code;
4121
4122                 tls_offset_inited = TRUE;
4123
4124                 code = (guint8*)mono_get_lmf_addr;
4125
4126                 if (getenv ("MONO_NPTL")) {
4127                         /* 
4128                          * Determine the offset of mono_lfm_addr inside the TLS structures
4129                          * by disassembling the function above.
4130                          */
4131
4132                         /* This is generated by gcc 3.3.2 */
4133                         if ((code [0] == 0x55) && (code [1] == 0x89) && (code [2] == 0xe5) &&
4134                                 (code [3] == 0x65) && (code [4] == 0xa1) && (code [5] == 0x00) &&
4135                                 (code [6] == 0x00) && (code [7] == 0x00) && (code [8] == 0x00) &&
4136                                 (code [9] == 0x8b) && (code [10] == 0x80)) {
4137                                 lmf_tls_offset = *(int*)&(code [11]);
4138                         }
4139                         else
4140                                 /* This is generated by gcc-3.4 */
4141                                 if ((code [0] == 0x55) && (code [1] == 0x89) && (code [2] == 0xe5) &&
4142                                         (code [3] == 0x65) && (code [4] == 0xa1)) {
4143                                         lmf_tls_offset = *(int*)&(code [5]);
4144                                 }
4145                 }
4146         }               
4147
4148 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4149
4150         /* Determine stack boundaries */
4151         if (!mono_running_on_valgrind ()) {
4152 #ifdef HAVE_PTHREAD_GETATTR_NP
4153                 pthread_getattr_np( self, &attr );
4154 #else
4155 #ifdef HAVE_PTHREAD_ATTR_GET_NP
4156                 pthread_attr_get_np( self, &attr );
4157 #elif defined(sun)
4158                 pthread_attr_init( &attr );
4159                 pthread_attr_getstacksize( &attr, &stsize );
4160 #else
4161 #error "Not implemented"
4162 #endif
4163 #endif
4164 #ifndef sun
4165                 pthread_attr_getstack( &attr, &staddr, &stsize );
4166 #endif
4167         }
4168
4169         /* 
4170          * staddr seems to be wrong for the main thread, so we keep the value in
4171          * tls->end_of_stack
4172          */
4173         tls->stack_size = stsize;
4174
4175         /* Setup an alternate signal stack */
4176         tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
4177         tls->signal_stack_size = SIGNAL_STACK_SIZE;
4178
4179         sa.ss_sp = tls->signal_stack;
4180         sa.ss_size = SIGNAL_STACK_SIZE;
4181         sa.ss_flags = SS_ONSTACK;
4182         sigaltstack (&sa, NULL);
4183 #endif
4184 }
4185
4186 void
4187 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4188 {
4189 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4190         struct sigaltstack sa;
4191
4192         sa.ss_sp = tls->signal_stack;
4193         sa.ss_size = SIGNAL_STACK_SIZE;
4194         sa.ss_flags = SS_DISABLE;
4195         sigaltstack  (&sa, NULL);
4196
4197         if (tls->signal_stack)
4198                 g_free (tls->signal_stack);
4199 #endif
4200 }
4201
4202 void
4203 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
4204 {
4205
4206         /* add the this argument */
4207         if (this_reg != -1) {
4208                 MonoInst *this;
4209                 MONO_INST_NEW (cfg, this, OP_OUTARG);
4210                 this->type = this_type;
4211                 this->sreg1 = this_reg;
4212                 mono_bblock_add_inst (cfg->cbb, this);
4213         }
4214
4215         if (vt_reg != -1) {
4216                 MonoInst *vtarg;
4217                 MONO_INST_NEW (cfg, vtarg, OP_OUTARG);
4218                 vtarg->type = STACK_MP;
4219                 vtarg->sreg1 = vt_reg;
4220                 mono_bblock_add_inst (cfg->cbb, vtarg);
4221         }
4222 }
4223
4224
4225 gint
4226 mono_arch_get_opcode_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4227 {
4228         if (cmethod->klass == mono_defaults.math_class) {
4229                 if (strcmp (cmethod->name, "Sin") == 0)
4230                         return OP_SIN;
4231                 else if (strcmp (cmethod->name, "Cos") == 0)
4232                         return OP_COS;
4233                 else if (strcmp (cmethod->name, "Tan") == 0)
4234                         return OP_TAN;
4235                 else if (strcmp (cmethod->name, "Atan") == 0)
4236                         return OP_ATAN;
4237                 else if (strcmp (cmethod->name, "Sqrt") == 0)
4238                         return OP_SQRT;
4239                 else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8)
4240                         return OP_ABS;
4241 #if 0
4242                 /* OP_FREM is not IEEE compatible */
4243                 else if (strcmp (cmethod->name, "IEEERemainder") == 0)
4244                         return OP_FREM;
4245 #endif
4246                 else
4247                         return -1;
4248         } else {
4249                 return -1;
4250         }
4251         return -1;
4252 }
4253
4254
4255 gboolean
4256 mono_arch_print_tree (MonoInst *tree, int arity)
4257 {
4258         return 0;
4259 }