2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
20 #ifdef HAVE_SYS_TIME_H
24 #ifdef HAVE_VALGRIND_MEMCHECK_H
25 #include <valgrind/memcheck.h>
28 #include <mono/metadata/assembly.h>
29 #include <mono/metadata/loader.h>
30 #include <mono/metadata/tabledefs.h>
31 #include <mono/metadata/class.h>
32 #include <mono/metadata/object.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/opcodes.h>
35 #include <mono/metadata/mono-endian.h>
36 #include <mono/metadata/tokentype.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/threads.h>
39 #include <mono/metadata/marshal.h>
40 #include <mono/metadata/socket-io.h>
41 #include <mono/metadata/appdomain.h>
42 #include <mono/metadata/debug-helpers.h>
43 #include <mono/io-layer/io-layer.h>
44 #include "mono/metadata/profiler.h"
45 #include <mono/metadata/profiler-private.h>
46 #include <mono/metadata/mono-config.h>
47 #include <mono/metadata/environment.h>
48 #include <mono/metadata/mono-debug.h>
49 #include <mono/metadata/mono-debug-debugger.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/rawbuffer.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/utils/mono-math.h>
57 #include <mono/utils/mono-compiler.h>
58 #include <mono/os/gc_wrapper.h>
68 #include "jit-icalls.h"
72 #define BRANCH_COST 100
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE do {\
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > -1) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
105 goto exception_exit; \
108 #define GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(opcode) do { \
109 if (method->klass->valuetype) \
110 GENERIC_SHARING_FAILURE ((opcode)); \
113 /* Determine whenever 'ins' represents a load of the 'this' argument */
114 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
116 static int ldind_to_load_membase (int opcode);
117 static int stind_to_store_membase (int opcode);
119 int mono_op_to_op_imm (int opcode);
120 int mono_op_to_op_imm_noemul (int opcode);
122 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
123 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
124 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
126 int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
127 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
128 guint inline_offset, gboolean is_virtual_call);
130 /* helper methods signature */
131 MonoMethodSignature *helper_sig_class_init_trampoline;
132 MonoMethodSignature *helper_sig_domain_get;
133 MonoMethodSignature *helper_sig_generic_class_init_trampoline;
134 MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
137 * Instruction metadata
142 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
147 #if SIZEOF_VOID_P == 8
152 /* keep in sync with the enum in mini.h */
155 #include "mini-ops.h"
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_alloc_ireg (MonoCompile *cfg)
170 return alloc_ireg (cfg);
174 mono_alloc_freg (MonoCompile *cfg)
176 return alloc_freg (cfg);
180 mono_alloc_preg (MonoCompile *cfg)
182 return alloc_preg (cfg);
186 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
188 return alloc_dreg (cfg, stack_type);
192 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
198 switch (type->type) {
201 case MONO_TYPE_BOOLEAN:
213 case MONO_TYPE_FNPTR:
215 case MONO_TYPE_CLASS:
216 case MONO_TYPE_STRING:
217 case MONO_TYPE_OBJECT:
218 case MONO_TYPE_SZARRAY:
219 case MONO_TYPE_ARRAY:
223 #if SIZEOF_VOID_P == 8
232 case MONO_TYPE_VALUETYPE:
233 if (type->data.klass->enumtype) {
234 type = type->data.klass->enum_basetype;
238 case MONO_TYPE_TYPEDBYREF:
240 case MONO_TYPE_GENERICINST:
241 type = &type->data.generic_class->container_class->byval_arg;
245 g_assert (cfg->generic_sharing_context);
248 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
254 mono_print_bb (MonoBasicBlock *bb, const char *msg)
259 printf ("\n%s %d: [IN: ", msg, bb->block_num);
260 for (i = 0; i < bb->in_count; ++i)
261 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
263 for (i = 0; i < bb->out_count; ++i)
264 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
266 for (tree = bb->code; tree; tree = tree->next)
267 mono_print_ins_index (-1, tree);
270 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
272 #define GET_BBLOCK(cfg,tblock,ip) do { \
273 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
275 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
276 NEW_BBLOCK (cfg, (tblock)); \
277 (tblock)->cil_code = (ip); \
278 ADD_BBLOCK (cfg, (tblock)); \
282 #define CHECK_BBLOCK(target,ip,tblock) do { \
283 if ((target) < (ip) && !(tblock)->code) { \
284 bb_recheck = g_list_prepend (bb_recheck, (tblock)); \
285 if (cfg->verbose_level > 2) printf ("queued block %d for check at IL%04x from IL%04x\n", (tblock)->block_num, (int)((target) - header->code), (int)((ip) - header->code)); \
289 #ifndef MONO_ARCH_EMIT_BOUNDS_CHECK
290 #define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
291 int _length_reg = alloc_ireg (cfg); \
292 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, _length_reg, array_reg, offset); \
293 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, _length_reg, index_reg); \
294 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
298 #define MONO_EMIT_BOUNDS_CHECK(cfg, array_reg, array_type, array_length_field, index_reg) do { \
299 if (!(cfg->opt & MONO_OPT_ABCREM)) { \
300 MONO_ARCH_EMIT_BOUNDS_CHECK ((cfg), (array_reg), G_STRUCT_OFFSET (array_type, array_length_field), (index_reg)); \
303 MONO_INST_NEW ((cfg), ins, OP_BOUNDS_CHECK); \
304 ins->sreg1 = array_reg; \
305 ins->sreg2 = index_reg; \
306 ins->inst_imm = G_STRUCT_OFFSET (array_type, array_length_field); \
307 MONO_ADD_INS ((cfg)->cbb, ins); \
308 (cfg)->flags |= MONO_CFG_HAS_ARRAY_ACCESS; \
309 (cfg)->cbb->has_array_access = TRUE; \
313 #if defined(__i386__) || defined(__x86_64__)
314 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
315 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
316 (dest)->dreg = alloc_preg ((cfg)); \
317 (dest)->sreg1 = (sr1); \
318 (dest)->sreg2 = (sr2); \
319 (dest)->inst_imm = (imm); \
320 (dest)->backend.shift_amount = (shift); \
321 MONO_ADD_INS ((cfg)->cbb, (dest)); \
325 #if SIZEOF_VOID_P == 8
326 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
327 /* FIXME: Need to add many more cases */ \
328 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
330 int dr = alloc_preg (cfg); \
331 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
332 (ins)->sreg2 = widen->dreg; \
336 #define ADD_WIDEN_OP(arg1, arg2)
339 #define ADD_BINOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
342 ins->sreg1 = sp [0]->dreg; \
343 ins->sreg2 = sp [1]->dreg; \
344 type_from_op (ins, sp [0], sp [1]); \
346 /* Have to insert a widening op */ \
347 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
348 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
349 MONO_ADD_INS ((cfg)->cbb, (ins)); \
351 mono_decompose_opcode ((cfg), (ins)); \
354 #define ADD_UNOP(op) do { \
355 MONO_INST_NEW (cfg, ins, (op)); \
357 ins->sreg1 = sp [0]->dreg; \
358 type_from_op (ins, sp [0], NULL); \
360 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
361 MONO_ADD_INS ((cfg)->cbb, (ins)); \
363 mono_decompose_opcode (cfg, ins); \
366 #define ADD_BINCOND(next_block) do { \
369 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
370 cmp->sreg1 = sp [0]->dreg; \
371 cmp->sreg2 = sp [1]->dreg; \
372 type_from_op (cmp, sp [0], sp [1]); \
374 type_from_op (ins, sp [0], sp [1]); \
375 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
376 GET_BBLOCK (cfg, tblock, target); \
377 link_bblock (cfg, bblock, tblock); \
378 ins->inst_true_bb = tblock; \
379 CHECK_BBLOCK (target, ip, tblock); \
380 if ((next_block)) { \
381 link_bblock (cfg, bblock, (next_block)); \
382 ins->inst_false_bb = (next_block); \
383 start_new_bblock = 1; \
385 GET_BBLOCK (cfg, tblock, ip); \
386 link_bblock (cfg, bblock, tblock); \
387 ins->inst_false_bb = tblock; \
388 start_new_bblock = 2; \
390 if (sp != stack_start) { \
391 handle_stack_args (cfg, stack_start, sp - stack_start); \
392 CHECK_UNVERIFIABLE (cfg); \
394 MONO_ADD_INS (bblock, cmp); \
395 MONO_ADD_INS (bblock, ins); \
399 * link_bblock: Links two basic blocks
401 * links two basic blocks in the control flow graph, the 'from'
402 * argument is the starting block and the 'to' argument is the block
403 * the control flow ends to after 'from'.
406 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
408 MonoBasicBlock **newa;
412 if (from->cil_code) {
414 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
416 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
419 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
421 printf ("edge from entry to exit\n");
426 for (i = 0; i < from->out_count; ++i) {
427 if (to == from->out_bb [i]) {
433 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
434 for (i = 0; i < from->out_count; ++i) {
435 newa [i] = from->out_bb [i];
443 for (i = 0; i < to->in_count; ++i) {
444 if (from == to->in_bb [i]) {
450 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
451 for (i = 0; i < to->in_count; ++i) {
452 newa [i] = to->in_bb [i];
461 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
463 link_bblock (cfg, from, to);
467 * mono_find_block_region:
469 * We mark each basic block with a region ID. We use that to avoid BB
470 * optimizations when blocks are in different regions.
473 * A region token that encodes where this region is, and information
474 * about the clause owner for this block.
476 * The region encodes the try/catch/filter clause that owns this block
477 * as well as the type. -1 is a special value that represents a block
478 * that is in none of try/catch/filter.
481 mono_find_block_region (MonoCompile *cfg, int offset)
483 MonoMethod *method = cfg->method;
484 MonoMethodHeader *header = mono_method_get_header (method);
485 MonoExceptionClause *clause;
488 /* first search for handlers and filters */
489 for (i = 0; i < header->num_clauses; ++i) {
490 clause = &header->clauses [i];
491 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
492 (offset < (clause->handler_offset)))
493 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
495 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
496 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
497 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
498 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
499 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
501 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
505 /* search the try blocks */
506 for (i = 0; i < header->num_clauses; ++i) {
507 clause = &header->clauses [i];
508 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
509 return ((i + 1) << 8) | clause->flags;
516 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
518 MonoMethod *method = cfg->method;
519 MonoMethodHeader *header = mono_method_get_header (method);
520 MonoExceptionClause *clause;
521 MonoBasicBlock *handler;
525 for (i = 0; i < header->num_clauses; ++i) {
526 clause = &header->clauses [i];
527 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
528 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
529 if (clause->flags == type) {
530 handler = cfg->cil_offset_to_bb [clause->handler_offset];
532 res = g_list_append (res, handler);
540 mono_create_spvar_for_region (MonoCompile *cfg, int region)
544 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
548 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
549 /* prevent it from being register allocated */
550 var->flags |= MONO_INST_INDIRECT;
552 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
556 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
558 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
562 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
566 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
570 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
571 /* prevent it from being register allocated */
572 var->flags |= MONO_INST_INDIRECT;
574 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
579 static MonoBasicBlock*
580 find_previous (MonoBasicBlock **bblocks, guint32 n_bblocks, MonoBasicBlock *start, const guchar *code)
582 MonoBasicBlock *best = start;
585 for (i = 0; i < n_bblocks; ++i) {
587 MonoBasicBlock *bb = bblocks [i];
589 if (bb->cil_code && bb->cil_code < code && bb->cil_code > best->cil_code)
598 split_bblock (MonoCompile *cfg, MonoBasicBlock *first, MonoBasicBlock *second) {
607 * FIXME: take into account all the details:
608 * second may have been the target of more than one bblock
610 second->out_count = first->out_count;
611 second->out_bb = first->out_bb;
613 for (i = 0; i < first->out_count; ++i) {
614 bb = first->out_bb [i];
615 for (j = 0; j < bb->in_count; ++j) {
616 if (bb->in_bb [j] == first)
617 bb->in_bb [j] = second;
621 first->out_count = 0;
622 first->out_bb = NULL;
623 link_bblock (cfg, first, second);
625 second->last_ins = first->last_ins;
627 /*printf ("start search at %p for %p\n", first->cil_code, second->cil_code);*/
628 for (inst = first->code; inst && inst->next; inst = inst->next) {
629 /*char *code = mono_disasm_code_one (NULL, cfg->method, inst->next->cil_code, NULL);
630 printf ("found %p: %s", inst->next->cil_code, code);
632 if (inst->cil_code < second->cil_code && inst->next->cil_code >= second->cil_code) {
633 second->code = inst->next;
635 first->last_ins = inst;
636 second->next_bb = first->next_bb;
637 first->next_bb = second;
642 g_warning ("bblock split failed in %s::%s\n", cfg->method->klass->name, cfg->method->name);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
657 inst->type = STACK_MP;
658 inst->klass = mono_defaults.object_class;
662 inst->klass = klass = mono_class_from_mono_type (type);
665 switch (type->type) {
667 inst->type = STACK_INV;
671 case MONO_TYPE_BOOLEAN:
677 inst->type = STACK_I4;
682 case MONO_TYPE_FNPTR:
683 inst->type = STACK_PTR;
685 case MONO_TYPE_CLASS:
686 case MONO_TYPE_STRING:
687 case MONO_TYPE_OBJECT:
688 case MONO_TYPE_SZARRAY:
689 case MONO_TYPE_ARRAY:
690 inst->type = STACK_OBJ;
694 inst->type = STACK_I8;
698 inst->type = STACK_R8;
700 case MONO_TYPE_VALUETYPE:
701 if (type->data.klass->enumtype) {
702 type = type->data.klass->enum_basetype;
706 inst->type = STACK_VTYPE;
709 case MONO_TYPE_TYPEDBYREF:
710 inst->klass = mono_defaults.typed_reference_class;
711 inst->type = STACK_VTYPE;
713 case MONO_TYPE_GENERICINST:
714 type = &type->data.generic_class->container_class->byval_arg;
717 case MONO_TYPE_MVAR :
718 /* FIXME: all the arguments must be references for now,
719 * later look inside cfg and see if the arg num is
722 g_assert (cfg->generic_sharing_context);
723 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_MOVE;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1143 inline static MonoInst *
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to montype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 switch (mono_type_get_underlying_type (t)->type) {
1195 case MONO_TYPE_BOOLEAN:
1198 case MONO_TYPE_CHAR:
1205 case MONO_TYPE_FNPTR:
1207 case MONO_TYPE_CLASS:
1208 case MONO_TYPE_STRING:
1209 case MONO_TYPE_OBJECT:
1210 case MONO_TYPE_SZARRAY:
1211 case MONO_TYPE_ARRAY:
1219 case MONO_TYPE_VALUETYPE:
1220 case MONO_TYPE_TYPEDBYREF:
1222 case MONO_TYPE_GENERICINST:
1223 if (mono_type_generic_inst_is_valuetype (t))
1229 g_assert_not_reached ();
1236 array_access_to_klass (int opcode)
1240 return mono_defaults.byte_class;
1242 return mono_defaults.uint16_class;
1245 return mono_defaults.int_class;
1248 return mono_defaults.sbyte_class;
1251 return mono_defaults.int16_class;
1254 return mono_defaults.int32_class;
1256 return mono_defaults.uint32_class;
1259 return mono_defaults.int64_class;
1262 return mono_defaults.single_class;
1265 return mono_defaults.double_class;
1266 case CEE_LDELEM_REF:
1267 case CEE_STELEM_REF:
1268 return mono_defaults.object_class;
1270 g_assert_not_reached ();
1276 * We try to share variables when possible
1279 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1284 /* inlining can result in deeper stacks */
1285 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1286 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1288 pos = ins->type - 1 + slot * STACK_MAX;
1290 switch (ins->type) {
1297 if ((vnum = cfg->intvars [pos]))
1298 return cfg->varinfo [vnum];
1299 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1300 cfg->intvars [pos] = res->inst_c0;
1303 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1309 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1311 if (cfg->compile_aot) {
1312 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1313 jump_info_token->image = image;
1314 jump_info_token->token = token;
1315 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1320 * This function is called to handle items that are left on the evaluation stack
1321 * at basic block boundaries. What happens is that we save the values to local variables
1322 * and we reload them later when first entering the target basic block (with the
1323 * handle_loaded_temps () function).
1324 * A single joint point will use the same variables (stored in the array bb->out_stack or
1325 * bb->in_stack, if the basic block is before or after the joint point).
1327 * This function needs to be called _before_ emitting the last instruction of
1328 * the bb (i.e. before emitting a branch).
1329 * If the stack merge fails at a join point, cfg->unverifiable is set.
1332 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1335 MonoBasicBlock *bb = cfg->cbb;
1336 MonoBasicBlock *outb;
1337 MonoInst *inst, **locals;
1342 if (cfg->verbose_level > 3)
1343 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1344 if (!bb->out_scount) {
1345 bb->out_scount = count;
1346 //printf ("bblock %d has out:", bb->block_num);
1348 for (i = 0; i < bb->out_count; ++i) {
1349 outb = bb->out_bb [i];
1350 /* exception handlers are linked, but they should not be considered for stack args */
1351 if (outb->flags & BB_EXCEPTION_HANDLER)
1353 //printf (" %d", outb->block_num);
1354 if (outb->in_stack) {
1356 bb->out_stack = outb->in_stack;
1362 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1363 for (i = 0; i < count; ++i) {
1365 * try to reuse temps already allocated for this purpouse, if they occupy the same
1366 * stack slot and if they are of the same type.
1367 * This won't cause conflicts since if 'local' is used to
1368 * store one of the values in the in_stack of a bblock, then
1369 * the same variable will be used for the same outgoing stack
1371 * This doesn't work when inlining methods, since the bblocks
1372 * in the inlined methods do not inherit their in_stack from
1373 * the bblock they are inlined to. See bug #58863 for an
1376 if (cfg->inlined_method)
1377 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1379 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1384 for (i = 0; i < bb->out_count; ++i) {
1385 outb = bb->out_bb [i];
1386 /* exception handlers are linked, but they should not be considered for stack args */
1387 if (outb->flags & BB_EXCEPTION_HANDLER)
1389 if (outb->in_scount) {
1390 if (outb->in_scount != bb->out_scount) {
1391 cfg->unverifiable = TRUE;
1394 continue; /* check they are the same locals */
1396 outb->in_scount = count;
1397 outb->in_stack = bb->out_stack;
1400 locals = bb->out_stack;
1402 for (i = 0; i < count; ++i) {
1403 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1404 inst->cil_code = sp [i]->cil_code;
1405 sp [i] = locals [i];
1406 if (cfg->verbose_level > 3)
1407 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1411 * It is possible that the out bblocks already have in_stack assigned, and
1412 * the in_stacks differ. In this case, we will store to all the different
1419 /* Find a bblock which has a different in_stack */
1421 while (bindex < bb->out_count) {
1422 outb = bb->out_bb [bindex];
1423 /* exception handlers are linked, but they should not be considered for stack args */
1424 if (outb->flags & BB_EXCEPTION_HANDLER) {
1428 if (outb->in_stack != locals) {
1429 for (i = 0; i < count; ++i) {
1430 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1431 inst->cil_code = sp [i]->cil_code;
1432 sp [i] = locals [i];
1433 if (cfg->verbose_level > 3)
1434 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1436 locals = outb->in_stack;
1445 /* Emit code which loads interface_offsets [klass->interface_id]
1446 * The array is stored in memory before vtable.
1449 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1451 if (cfg->compile_aot) {
1452 int ioffset_reg = alloc_preg (cfg);
1453 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1465 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1466 * stored in "klass_reg" implements the interface "klass".
1469 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1471 int ibitmap_reg = alloc_preg (cfg);
1472 int ibitmap_byte_reg = alloc_preg (cfg);
1474 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1476 if (cfg->compile_aot) {
1477 int iid_reg = alloc_preg (cfg);
1478 int shifted_iid_reg = alloc_preg (cfg);
1479 int ibitmap_byte_address_reg = alloc_preg (cfg);
1480 int masked_iid_reg = alloc_preg (cfg);
1481 int iid_one_bit_reg = alloc_preg (cfg);
1482 int iid_bit_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1484 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1485 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1486 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1487 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1488 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1489 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1490 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1498 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1499 * stored in "vtable_reg" implements the interface "klass".
1502 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1504 int ibitmap_reg = alloc_preg (cfg);
1505 int ibitmap_byte_reg = alloc_preg (cfg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1509 if (cfg->compile_aot) {
1510 int iid_reg = alloc_preg (cfg);
1511 int shifted_iid_reg = alloc_preg (cfg);
1512 int ibitmap_byte_address_reg = alloc_preg (cfg);
1513 int masked_iid_reg = alloc_preg (cfg);
1514 int iid_one_bit_reg = alloc_preg (cfg);
1515 int iid_bit_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1518 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1519 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1521 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1522 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1523 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1525 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1531 * Emit code which checks whenever the interface id of @klass is smaller than
1532 * than the value given by max_iid_reg.
1535 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1536 MonoBasicBlock *false_target)
1538 if (cfg->compile_aot) {
1539 int iid_reg = alloc_preg (cfg);
1540 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1541 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1548 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1551 /* Same as above, but obtains max_iid from a vtable */
1553 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1554 MonoBasicBlock *false_target)
1556 int max_iid_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1559 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1562 /* Same as above, but obtains max_iid from a klass */
1564 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1565 MonoBasicBlock *false_target)
1567 int max_iid_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1570 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1574 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1576 int idepth_reg = alloc_preg (cfg);
1577 int stypes_reg = alloc_preg (cfg);
1578 int stype = alloc_preg (cfg);
1580 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1587 if (cfg->compile_aot) {
1588 int const_reg = alloc_preg (cfg);
1589 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1590 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1598 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1600 int intf_reg = alloc_preg (cfg);
1602 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1603 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1608 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1612 * Variant of the above that takes a register to the class, not the vtable.
1615 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1617 int intf_bit_reg = alloc_preg (cfg);
1619 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1620 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1621 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1623 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1625 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1629 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1631 if (cfg->compile_aot) {
1632 int const_reg = alloc_preg (cfg);
1633 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1634 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1636 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1638 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1642 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1644 if (cfg->compile_aot) {
1645 int const_reg = alloc_preg (cfg);
1646 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1647 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1651 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1655 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1658 int rank_reg = alloc_preg (cfg);
1659 int eclass_reg = alloc_preg (cfg);
1661 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1663 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1664 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1666 if (klass->cast_class == mono_defaults.object_class) {
1667 int parent_reg = alloc_preg (cfg);
1668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1669 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1670 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1671 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1672 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1673 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1674 } else if (klass->cast_class == mono_defaults.enum_class) {
1675 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1676 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1677 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1679 mini_emit_castclass (cfg, obj_reg, eclass_reg, klass->cast_class, object_is_null);
1682 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
1683 /* Check that the object is a vector too */
1684 int bounds_reg = alloc_preg (cfg);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1687 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1690 int idepth_reg = alloc_preg (cfg);
1691 int stypes_reg = alloc_preg (cfg);
1692 int stype = alloc_preg (cfg);
1694 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1695 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1697 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1700 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1701 mini_emit_class_check (cfg, stype, klass);
1706 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1710 g_assert (val == 0);
1712 if ((size <= 4) && (size <= align)) {
1715 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1723 #if SIZEOF_VOID_P == 8
1725 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1731 val_reg = alloc_preg (cfg);
1733 if (sizeof (gpointer) == 8)
1734 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1736 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1739 /* This could be optimized further if neccesary */
1741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1748 #if !NO_UNALIGNED_ACCESS
1749 if (sizeof (gpointer) == 8) {
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1769 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1781 mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1786 /* This could be optimized further if neccesary */
1788 cur_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1797 #if !NO_UNALIGNED_ACCESS
1798 if (sizeof (gpointer) == 8) {
1800 cur_reg = alloc_preg (cfg);
1801 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1802 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1811 cur_reg = alloc_preg (cfg);
1812 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1813 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1819 cur_reg = alloc_preg (cfg);
1820 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1827 cur_reg = alloc_preg (cfg);
1828 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1829 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1837 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
1839 int vtable_reg = alloc_preg (cfg);
1841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
1843 if (cfg->opt & MONO_OPT_SHARED) {
1844 int class_reg = alloc_preg (cfg);
1845 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1846 if (cfg->compile_aot) {
1847 int klass_reg = alloc_preg (cfg);
1848 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
1849 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
1851 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
1854 if (cfg->compile_aot) {
1855 int vt_reg = alloc_preg (cfg);
1856 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
1857 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
1859 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
1863 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
1867 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1870 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1873 type = mini_get_basic_type_from_generic (gsctx, type);
1874 switch (type->type) {
1875 case MONO_TYPE_VOID:
1876 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1879 case MONO_TYPE_BOOLEAN:
1882 case MONO_TYPE_CHAR:
1885 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1889 case MONO_TYPE_FNPTR:
1890 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1891 case MONO_TYPE_CLASS:
1892 case MONO_TYPE_STRING:
1893 case MONO_TYPE_OBJECT:
1894 case MONO_TYPE_SZARRAY:
1895 case MONO_TYPE_ARRAY:
1896 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1899 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1902 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1903 case MONO_TYPE_VALUETYPE:
1904 if (type->data.klass->enumtype) {
1905 type = type->data.klass->enum_basetype;
1908 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1909 case MONO_TYPE_TYPEDBYREF:
1910 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1911 case MONO_TYPE_GENERICINST:
1912 type = &type->data.generic_class->container_class->byval_arg;
1915 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1921 * target_type_is_incompatible:
1922 * @cfg: MonoCompile context
1924 * Check that the item @arg on the evaluation stack can be stored
1925 * in the target type (can be a local, or field, etc).
1926 * The cfg arg can be used to check if we need verification or just
1929 * Returns: non-0 value if arg can't be stored on a target.
1932 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1934 MonoType *simple_type;
1937 if (target->byref) {
1938 /* FIXME: check that the pointed to types match */
1939 if (arg->type == STACK_MP)
1940 return arg->klass != mono_class_from_mono_type (target);
1941 if (arg->type == STACK_PTR)
1946 simple_type = mono_type_get_underlying_type (target);
1947 switch (simple_type->type) {
1948 case MONO_TYPE_VOID:
1952 case MONO_TYPE_BOOLEAN:
1955 case MONO_TYPE_CHAR:
1958 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1962 /* STACK_MP is needed when setting pinned locals */
1963 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1968 case MONO_TYPE_FNPTR:
1969 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1972 case MONO_TYPE_CLASS:
1973 case MONO_TYPE_STRING:
1974 case MONO_TYPE_OBJECT:
1975 case MONO_TYPE_SZARRAY:
1976 case MONO_TYPE_ARRAY:
1977 if (arg->type != STACK_OBJ)
1979 /* FIXME: check type compatibility */
1983 if (arg->type != STACK_I8)
1988 if (arg->type != STACK_R8)
1991 case MONO_TYPE_VALUETYPE:
1992 if (arg->type != STACK_VTYPE)
1994 klass = mono_class_from_mono_type (simple_type);
1995 if (klass != arg->klass)
1998 case MONO_TYPE_TYPEDBYREF:
1999 if (arg->type != STACK_VTYPE)
2001 klass = mono_class_from_mono_type (simple_type);
2002 if (klass != arg->klass)
2005 case MONO_TYPE_GENERICINST:
2006 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2007 if (arg->type != STACK_VTYPE)
2009 klass = mono_class_from_mono_type (simple_type);
2010 if (klass != arg->klass)
2014 if (arg->type != STACK_OBJ)
2016 /* FIXME: check type compatibility */
2020 case MONO_TYPE_MVAR:
2021 /* FIXME: all the arguments must be references for now,
2022 * later look inside cfg and see if the arg num is
2023 * really a reference
2025 g_assert (cfg->generic_sharing_context);
2026 if (arg->type != STACK_OBJ)
2030 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2036 * Prepare arguments for passing to a function call.
2037 * Return a non-zero value if the arguments can't be passed to the given
2039 * The type checks are not yet complete and some conversions may need
2040 * casts on 32 or 64 bit architectures.
2042 * FIXME: implement this using target_type_is_incompatible ()
2045 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2047 MonoType *simple_type;
2051 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2055 for (i = 0; i < sig->param_count; ++i) {
2056 if (sig->params [i]->byref) {
2057 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2061 simple_type = sig->params [i];
2062 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2064 switch (simple_type->type) {
2065 case MONO_TYPE_VOID:
2070 case MONO_TYPE_BOOLEAN:
2073 case MONO_TYPE_CHAR:
2076 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2082 case MONO_TYPE_FNPTR:
2083 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2086 case MONO_TYPE_CLASS:
2087 case MONO_TYPE_STRING:
2088 case MONO_TYPE_OBJECT:
2089 case MONO_TYPE_SZARRAY:
2090 case MONO_TYPE_ARRAY:
2091 if (args [i]->type != STACK_OBJ)
2096 if (args [i]->type != STACK_I8)
2101 if (args [i]->type != STACK_R8)
2104 case MONO_TYPE_VALUETYPE:
2105 if (simple_type->data.klass->enumtype) {
2106 simple_type = simple_type->data.klass->enum_basetype;
2109 if (args [i]->type != STACK_VTYPE)
2112 case MONO_TYPE_TYPEDBYREF:
2113 if (args [i]->type != STACK_VTYPE)
2116 case MONO_TYPE_GENERICINST:
2117 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2121 g_error ("unknown type 0x%02x in check_call_signature",
2129 callvirt_to_call (int opcode)
2134 case OP_VOIDCALLVIRT:
2143 g_assert_not_reached ();
2150 callvirt_to_call_membase (int opcode)
2154 return OP_CALL_MEMBASE;
2155 case OP_VOIDCALLVIRT:
2156 return OP_VOIDCALL_MEMBASE;
2158 return OP_FCALL_MEMBASE;
2160 return OP_LCALL_MEMBASE;
2162 return OP_VCALL_MEMBASE;
2164 g_assert_not_reached ();
2170 #ifdef MONO_ARCH_HAVE_IMT
2172 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2174 #ifdef MONO_ARCH_IMT_REG
2175 int method_reg = alloc_preg (cfg);
2177 if (cfg->compile_aot) {
2178 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2179 } else if (imt_arg) {
2180 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2183 MONO_INST_NEW (cfg, ins, OP_PCONST);
2184 ins->inst_p0 = call->method;
2185 ins->dreg = method_reg;
2186 MONO_ADD_INS (cfg->cbb, ins);
2189 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2191 mono_arch_emit_imt_argument (cfg, call);
2196 inline static MonoInst*
2197 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2199 inline static MonoCallInst *
2200 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2201 MonoInst **args, int calli, int virtual)
2204 #ifdef MONO_ARCH_SOFT_FLOAT
2208 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2211 call->signature = sig;
2213 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2215 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2216 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2219 temp->backend.is_pinvoke = sig->pinvoke;
2222 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2223 * address of return value to increase optimization opportunities.
2224 * Before vtype decomposition, the dreg of the call ins itself represents the
2225 * fact the call modifies the return value. After decomposition, the call will
2226 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2227 * will be transformed into an LDADDR.
2229 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2230 loada->dreg = alloc_preg (cfg);
2231 loada->inst_p0 = temp;
2232 /* We reference the call too since call->dreg could change during optimization */
2233 loada->inst_p1 = call;
2234 MONO_ADD_INS (cfg->cbb, loada);
2236 call->inst.dreg = temp->dreg;
2238 call->vret_var = loada;
2239 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2240 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2242 #ifdef MONO_ARCH_SOFT_FLOAT
2244 * If the call has a float argument, we would need to do an r8->r4 conversion using
2245 * an icall, but that cannot be done during the call sequence since it would clobber
2246 * the call registers + the stack. So we do it before emitting the call.
2248 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2250 MonoInst *in = call->args [i];
2252 if (i >= sig->hasthis)
2253 t = sig->params [i - sig->hasthis];
2255 t = &mono_defaults.int_class->byval_arg;
2256 t = mono_type_get_underlying_type (t);
2258 if (!t->byref && t->type == MONO_TYPE_R4) {
2259 MonoInst *iargs [1];
2263 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2265 /* The result will be in an int vreg */
2266 call->args [i] = conv;
2271 mono_arch_emit_call (cfg, call);
2273 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2274 cfg->flags |= MONO_CFG_HAS_CALLS;
2279 inline static MonoInst*
2280 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2282 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2284 call->inst.sreg1 = addr->dreg;
2286 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2288 return (MonoInst*)call;
2291 inline static MonoInst*
2292 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2294 #ifdef MONO_ARCH_RGCTX_REG
2296 int rgctx_reg = mono_alloc_preg (cfg);
2298 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2299 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2300 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2301 return (MonoInst*)call;
2303 g_assert_not_reached ();
2309 mono_emit_imt_method_call (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2310 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2312 gboolean virtual = this != NULL;
2313 gboolean enable_for_aot = TRUE;
2316 if (method->string_ctor) {
2317 /* Create the real signature */
2318 /* FIXME: Cache these */
2319 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup (sig);
2320 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2325 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2327 if (this && sig->hasthis &&
2328 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2329 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2330 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2332 call->method = method;
2334 call->inst.flags |= MONO_INST_HAS_METHOD;
2335 call->inst.inst_left = this;
2338 int vtable_reg, slot_reg, this_reg;
2340 this_reg = this->dreg;
2342 if ((!cfg->compile_aot || enable_for_aot) &&
2343 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2344 ((method->flags & METHOD_ATTRIBUTE_FINAL) &&
2345 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2347 * the method is not virtual, we just need to ensure this is not null
2348 * and then we can call the method directly.
2350 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2351 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2354 if (!method->string_ctor) {
2355 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2356 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2357 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2360 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2362 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2364 return (MonoInst*)call;
2367 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2368 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2369 /* Make a call to delegate->invoke_impl */
2370 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2371 call->inst.inst_basereg = this_reg;
2372 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2373 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2375 return (MonoInst*)call;
2379 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
2380 ((method->flags & METHOD_ATTRIBUTE_FINAL) ||
2381 (method->klass && method->klass->flags & TYPE_ATTRIBUTE_SEALED))) {
2383 * the method is virtual, but we can statically dispatch since either
2384 * it's class or the method itself are sealed.
2385 * But first we need to ensure it's not a null reference.
2387 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2388 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2389 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2391 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2392 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2394 return (MonoInst*)call;
2397 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2399 /* Initialize method->slot */
2400 mono_class_setup_vtable (method->klass);
2402 vtable_reg = alloc_preg (cfg);
2403 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2404 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2406 #ifdef MONO_ARCH_HAVE_IMT
2408 guint32 imt_slot = mono_method_get_imt_slot (method);
2409 emit_imt_argument (cfg, call, imt_arg);
2410 slot_reg = vtable_reg;
2411 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2414 if (slot_reg == -1) {
2415 slot_reg = alloc_preg (cfg);
2416 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2417 call->inst.inst_offset = method->slot * SIZEOF_VOID_P;
2420 slot_reg = vtable_reg;
2421 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) + (method->slot * SIZEOF_VOID_P);
2424 call->inst.sreg1 = slot_reg;
2425 call->virtual = TRUE;
2428 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2430 return (MonoInst*)call;
2433 static inline MonoInst*
2434 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2435 MonoInst **args, MonoInst *this)
2437 return mono_emit_imt_method_call (cfg, method, sig, args, this, NULL);
2441 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2448 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2456 inline static MonoInst*
2457 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2459 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2463 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2467 get_memcpy_method (void)
2469 static MonoMethod *memcpy_method = NULL;
2470 if (!memcpy_method) {
2471 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2473 g_error ("Old corlib found. Install a new one");
2475 return memcpy_method;
2479 * Emit code to copy a valuetype of type @klass whose address is stored in
2480 * @src->dreg to memory whose address is stored at @dest->dreg.
2483 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2485 MonoInst *iargs [3];
2488 MonoMethod *memcpy_method;
2492 * This check breaks with spilled vars... need to handle it during verification anyway.
2493 * g_assert (klass && klass == src->klass && klass == dest->klass);
2497 n = mono_class_native_size (klass, &align);
2499 n = mono_class_value_size (klass, &align);
2501 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2502 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2503 mini_emit_memcpy2 (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2507 EMIT_NEW_ICONST (cfg, iargs [2], n);
2509 memcpy_method = get_memcpy_method ();
2510 mono_emit_method_call (cfg, memcpy_method, memcpy_method->signature, iargs, NULL);
2515 get_memset_method (void)
2517 static MonoMethod *memset_method = NULL;
2518 if (!memset_method) {
2519 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2521 g_error ("Old corlib found. Install a new one");
2523 return memset_method;
2527 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2529 MonoInst *iargs [3];
2532 MonoMethod *memset_method;
2534 /* FIXME: Optimize this for the case when dest is an LDADDR */
2536 mono_class_init (klass);
2537 n = mono_class_value_size (klass, &align);
2539 if (n <= sizeof (gpointer) * 5) {
2540 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2543 memset_method = get_memset_method ();
2545 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2546 EMIT_NEW_ICONST (cfg, iargs [2], n);
2547 mono_emit_method_call (cfg, memset_method, memset_method->signature, iargs, NULL);
2552 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used, MonoInst *this)
2554 g_assert (!method->klass->valuetype);
2556 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2557 MonoInst *mrgctx_loc, *mrgctx_var;
2560 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2562 mrgctx_loc = mono_get_vtable_var (cfg);
2563 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2566 } else if (method->flags & METHOD_ATTRIBUTE_STATIC) {
2567 MonoInst *vtable_loc, *vtable_var;
2571 vtable_loc = mono_get_vtable_var (cfg);
2572 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2574 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2575 MonoInst *mrgctx_var = vtable_var;
2578 vtable_reg = alloc_preg (cfg);
2579 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2580 vtable_var->type = STACK_PTR;
2586 int vtable_reg, res_reg;
2588 vtable_reg = alloc_preg (cfg);
2589 res_reg = alloc_preg (cfg);
2590 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2595 #define EMIT_GET_RGCTX(rgctx, context_used) do { \
2596 MonoInst *this = NULL; \
2597 GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(*ip); \
2598 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) && !((context_used) & MONO_GENERIC_CONTEXT_USED_METHOD)) \
2599 EMIT_NEW_ARGLOAD (cfg, this, 0); \
2600 (rgctx) = emit_get_rgctx (cfg, method, (context_used), this); \
2604 emit_get_rgctx_other_table_ptr (MonoCompile *cfg, MonoInst *rgc_ptr, int slot)
2606 MonoMethodSignature *sig = helper_sig_rgctx_lazy_fetch_trampoline;
2607 guint8 *tramp = mini_create_rgctx_lazy_fetch_trampoline (slot);
2609 return mono_emit_native_call (cfg, tramp, sig, &rgc_ptr);
2613 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2614 MonoInst *rgctx, MonoClass *klass, int rgctx_type)
2616 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2617 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, &klass->byval_arg, rgctx_type, cfg->generic_context);
2619 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2623 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2624 MonoInst *rgctx, MonoMethod *cmethod, int rgctx_type)
2626 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2627 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, cmethod, rgctx_type, cfg->generic_context);
2629 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2633 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2634 MonoInst *rgctx, MonoClassField *field, int rgctx_type)
2636 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2637 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, field, rgctx_type, cfg->generic_context);
2639 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2643 emit_get_rgctx_method_rgctx (MonoCompile *cfg, int context_used,
2644 MonoInst *rgctx, MonoMethod *rgctx_method)
2646 guint32 slot = mono_method_lookup_or_register_other_info (cfg->current_method,
2647 context_used & MONO_GENERIC_CONTEXT_USED_METHOD, rgctx_method,
2648 MONO_RGCTX_INFO_METHOD_RGCTX, cfg->generic_context);
2650 return emit_get_rgctx_other_table_ptr (cfg, rgctx, slot);
2654 * Handles unbox of a Nullable<T>. If a rgctx is passed, then shared generic code
2658 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used, MonoInst *rgctx)
2660 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2661 // Can't encode method ref
2662 cfg->disable_aot = TRUE;
2665 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, rgctx, method,
2666 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2668 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2670 return mono_emit_method_call (cfg, method, mono_method_signature (method), &val, NULL);
2675 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used, MonoInst *rgctx)
2679 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2680 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2681 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2682 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2684 obj_reg = sp [0]->dreg;
2685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2688 /* FIXME: generics */
2689 g_assert (klass->rank == 0);
2692 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2693 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2695 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2699 MonoInst *element_class;
2701 /* This assertion is from the unboxcast insn */
2702 g_assert (klass->rank == 0);
2704 element_class = emit_get_rgctx_klass (cfg, context_used, rgctx,
2705 klass->element_class, MONO_RGCTX_INFO_KLASS);
2707 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2708 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2710 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2713 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2714 MONO_ADD_INS (cfg->cbb, add);
2715 add->type = STACK_MP;
2722 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2724 MonoInst *iargs [2];
2727 if (cfg->opt & MONO_OPT_SHARED) {
2728 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2729 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2731 alloc_ftn = mono_object_new;
2732 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib) {
2733 /* This happens often in argument checking code, eg. throw new FooException... */
2734 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2735 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2736 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2738 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2739 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2742 if (managed_alloc) {
2743 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2744 return mono_emit_method_call (cfg, managed_alloc, mono_method_signature (managed_alloc), iargs, NULL);
2746 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2748 guint32 lw = vtable->klass->instance_size;
2749 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2750 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2751 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2754 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2758 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2762 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2765 MonoInst *iargs [2];
2766 MonoMethod *managed_alloc = NULL;
2770 FIXME: we cannot get managed_alloc here because we can't get
2771 the class's vtable (because it's not a closed class)
2773 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2774 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2777 if (cfg->opt & MONO_OPT_SHARED) {
2778 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2779 iargs [1] = data_inst;
2780 alloc_ftn = mono_object_new;
2782 g_assert (!cfg->compile_aot);
2784 if (managed_alloc) {
2785 iargs [0] = data_inst;
2786 return mono_emit_method_call (cfg, managed_alloc,
2787 mono_method_signature (managed_alloc), iargs, NULL);
2790 iargs [0] = data_inst;
2791 alloc_ftn = mono_object_new_specific;
2794 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2798 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2800 MonoInst *alloc, *ins;
2802 if (mono_class_is_nullable (klass)) {
2803 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2804 // Can't encode method ref
2805 cfg->disable_aot = TRUE;
2806 return mono_emit_method_call (cfg, method, mono_method_signature (method), &val, NULL);
2809 alloc = handle_alloc (cfg, klass, TRUE);
2811 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2817 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, MonoInst *data_inst)
2819 MonoInst *alloc, *ins;
2821 g_assert (!mono_class_is_nullable (klass));
2823 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2825 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2831 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2833 MonoBasicBlock *is_null_bb;
2834 int obj_reg = src->dreg;
2835 int vtable_reg = alloc_preg (cfg);
2837 NEW_BBLOCK (cfg, is_null_bb);
2839 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2840 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2842 if (mini_get_debug_options ()->better_cast_details) {
2843 int to_klass_reg = alloc_preg (cfg);
2844 int klass_reg = alloc_preg (cfg);
2845 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2848 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2852 MONO_ADD_INS (cfg->cbb, tls_get);
2853 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2854 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2857 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2861 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2862 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2863 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2865 int klass_reg = alloc_preg (cfg);
2867 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2869 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2870 /* the remoting code is broken, access the class for now */
2872 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2873 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2875 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2876 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2878 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2881 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2885 MONO_START_BB (cfg, is_null_bb);
2887 /* Reset the variables holding the cast details */
2888 if (mini_get_debug_options ()->better_cast_details) {
2889 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2891 MONO_ADD_INS (cfg->cbb, tls_get);
2892 /* It is enough to reset the from field */
2893 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2900 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2903 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2904 int obj_reg = src->dreg;
2905 int vtable_reg = alloc_preg (cfg);
2906 int res_reg = alloc_preg (cfg);
2908 NEW_BBLOCK (cfg, is_null_bb);
2909 NEW_BBLOCK (cfg, false_bb);
2910 NEW_BBLOCK (cfg, end_bb);
2912 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2913 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2915 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2916 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2917 /* the is_null_bb target simply copies the input register to the output */
2918 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2920 int klass_reg = alloc_preg (cfg);
2922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2925 int rank_reg = alloc_preg (cfg);
2926 int eclass_reg = alloc_preg (cfg);
2928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2929 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2930 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2931 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2932 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2933 if (klass->cast_class == mono_defaults.object_class) {
2934 int parent_reg = alloc_preg (cfg);
2935 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2936 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2937 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2938 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2939 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2940 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2941 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2942 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2943 } else if (klass->cast_class == mono_defaults.enum_class) {
2944 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2945 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2946 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2947 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2949 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2950 /* Check that the object is a vector too */
2951 int bounds_reg = alloc_preg (cfg);
2952 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2953 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2954 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2957 /* the is_null_bb target simply copies the input register to the output */
2958 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2960 } else if (mono_class_is_nullable (klass)) {
2961 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2962 /* the is_null_bb target simply copies the input register to the output */
2963 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2965 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2966 /* the remoting code is broken, access the class for now */
2968 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2969 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2971 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2972 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2974 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2975 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
2977 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2978 /* the is_null_bb target simply copies the input register to the output */
2979 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
2984 MONO_START_BB (cfg, false_bb);
2986 MONO_EMIT_NEW_ICONST (cfg, res_reg, 0);
2987 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2989 MONO_START_BB (cfg, is_null_bb);
2991 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2992 ins->type = STACK_OBJ;
2995 MONO_START_BB (cfg, end_bb);
3001 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3003 /* This opcode takes as input an object reference and a class, and returns:
3004 0) if the object is an instance of the class,
3005 1) if the object is not instance of the class,
3006 2) if the object is a proxy whose type cannot be determined */
3009 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3010 int obj_reg = src->dreg;
3011 int dreg = alloc_ireg (cfg);
3013 int klass_reg = alloc_preg (cfg);
3015 NEW_BBLOCK (cfg, true_bb);
3016 NEW_BBLOCK (cfg, false_bb);
3017 NEW_BBLOCK (cfg, false2_bb);
3018 NEW_BBLOCK (cfg, end_bb);
3019 NEW_BBLOCK (cfg, no_proxy_bb);
3021 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3022 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3024 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3025 NEW_BBLOCK (cfg, interface_fail_bb);
3027 tmp_reg = alloc_preg (cfg);
3028 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3029 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3030 MONO_START_BB (cfg, interface_fail_bb);
3031 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3033 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3035 tmp_reg = alloc_preg (cfg);
3036 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3037 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3038 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3040 tmp_reg = alloc_preg (cfg);
3041 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3042 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3044 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3045 tmp_reg = alloc_preg (cfg);
3046 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3047 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3049 tmp_reg = alloc_preg (cfg);
3050 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3051 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3052 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3054 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3055 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3057 MONO_START_BB (cfg, no_proxy_bb);
3059 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3062 MONO_START_BB (cfg, false_bb);
3064 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3067 MONO_START_BB (cfg, false2_bb);
3069 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3070 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3072 MONO_START_BB (cfg, true_bb);
3074 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3076 MONO_START_BB (cfg, end_bb);
3079 MONO_INST_NEW (cfg, ins, OP_ICONST);
3081 ins->type = STACK_I4;
3087 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3089 /* This opcode takes as input an object reference and a class, and returns:
3090 0) if the object is an instance of the class,
3091 1) if the object is a proxy whose type cannot be determined
3092 an InvalidCastException exception is thrown otherwhise*/
3095 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3096 int obj_reg = src->dreg;
3097 int dreg = alloc_ireg (cfg);
3098 int tmp_reg = alloc_preg (cfg);
3099 int klass_reg = alloc_preg (cfg);
3101 NEW_BBLOCK (cfg, end_bb);
3102 NEW_BBLOCK (cfg, ok_result_bb);
3104 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3105 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3107 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3108 NEW_BBLOCK (cfg, interface_fail_bb);
3110 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3111 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3112 MONO_START_BB (cfg, interface_fail_bb);
3113 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3115 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3117 tmp_reg = alloc_preg (cfg);
3118 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3120 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3122 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3123 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3126 NEW_BBLOCK (cfg, no_proxy_bb);
3128 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3129 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3130 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3132 tmp_reg = alloc_preg (cfg);
3133 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3134 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3136 tmp_reg = alloc_preg (cfg);
3137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3139 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3141 NEW_BBLOCK (cfg, fail_1_bb);
3143 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3145 MONO_START_BB (cfg, fail_1_bb);
3147 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3148 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3150 MONO_START_BB (cfg, no_proxy_bb);
3152 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3155 MONO_START_BB (cfg, ok_result_bb);
3157 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3159 MONO_START_BB (cfg, end_bb);
3162 MONO_INST_NEW (cfg, ins, OP_ICONST);
3164 ins->type = STACK_I4;
3169 static G_GNUC_UNUSED MonoInst*
3170 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3172 gpointer *trampoline;
3173 MonoInst *obj, *method_ins, *tramp_ins;
3175 obj = handle_alloc (cfg, klass, FALSE);
3177 /* Inline the contents of mono_delegate_ctor */
3179 /* Set target field */
3180 /* Optimize away setting of NULL target */
3181 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3182 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3184 /* Set method field */
3185 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3186 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3188 /* Set invoke_impl field */
3189 trampoline = mono_create_delegate_trampoline (klass);
3190 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_ABS, trampoline);
3191 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3193 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3199 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3201 MonoJitICallInfo *info;
3203 /* Need to register the icall so it gets an icall wrapper */
3204 info = mono_get_array_new_va_icall (rank);
3206 cfg->flags |= MONO_CFG_HAS_VARARGS;
3208 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3209 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3213 mono_emit_load_got_addr (MonoCompile *cfg)
3215 MonoInst *getaddr, *dummy_use;
3217 if (!cfg->got_var || cfg->got_var_allocated)
3220 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3221 getaddr->dreg = cfg->got_var->dreg;
3223 /* Add it to the start of the first bblock */
3224 if (cfg->bb_entry->code) {
3225 getaddr->next = cfg->bb_entry->code;
3226 cfg->bb_entry->code = getaddr;
3229 MONO_ADD_INS (cfg->bb_entry, getaddr);
3231 cfg->got_var_allocated = TRUE;
3234 * Add a dummy use to keep the got_var alive, since real uses might
3235 * only be generated by the back ends.
3236 * Add it to end_bblock, so the variable's lifetime covers the whole
3238 * It would be better to make the usage of the got var explicit in all
3239 * cases when the backend needs it (i.e. calls, throw etc.), so this
3240 * wouldn't be needed.
3242 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3243 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3246 #define CODE_IS_STLOC(ip) (((ip) [0] >= CEE_STLOC_0 && (ip) [0] <= CEE_STLOC_3) || ((ip) [0] == CEE_STLOC_S))
3249 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3251 MonoMethodHeader *header = mono_method_get_header (method);
3253 #ifdef MONO_ARCH_SOFT_FLOAT
3254 MonoMethodSignature *sig = mono_method_signature (method);
3258 if (cfg->generic_sharing_context)
3261 #ifdef MONO_ARCH_HAVE_LMF_OPS
3262 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3263 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3264 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3268 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3269 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3270 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3271 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3272 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3273 (method->klass->marshalbyref) ||
3274 !header || header->num_clauses)
3277 /* also consider num_locals? */
3278 /* Do the size check early to avoid creating vtables */
3279 if (getenv ("MONO_INLINELIMIT")) {
3280 if (header->code_size >= atoi (getenv ("MONO_INLINELIMIT"))) {
3283 } else if (header->code_size >= INLINE_LENGTH_LIMIT)
3287 * if we can initialize the class of the method right away, we do,
3288 * otherwise we don't allow inlining if the class needs initialization,
3289 * since it would mean inserting a call to mono_runtime_class_init()
3290 * inside the inlined code
3292 if (!(cfg->opt & MONO_OPT_SHARED)) {
3293 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3294 if (cfg->run_cctors && method->klass->has_cctor) {
3295 if (!method->klass->runtime_info)
3296 /* No vtable created yet */
3298 vtable = mono_class_vtable (cfg->domain, method->klass);
3301 /* This makes so that inline cannot trigger */
3302 /* .cctors: too many apps depend on them */
3303 /* running with a specific order... */
3304 if (! vtable->initialized)
3306 mono_runtime_class_init (vtable);
3308 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3309 if (!method->klass->runtime_info)
3310 /* No vtable created yet */
3312 vtable = mono_class_vtable (cfg->domain, method->klass);
3315 if (!vtable->initialized)
3320 * If we're compiling for shared code
3321 * the cctor will need to be run at aot method load time, for example,
3322 * or at the end of the compilation of the inlining method.
3324 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3329 * CAS - do not inline methods with declarative security
3330 * Note: this has to be before any possible return TRUE;
3332 if (mono_method_has_declsec (method))
3335 #ifdef MONO_ARCH_SOFT_FLOAT
3337 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3339 for (i = 0; i < sig->param_count; ++i)
3340 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3348 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3350 if (vtable->initialized && !cfg->compile_aot)
3353 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3356 if (!mono_class_needs_cctor_run (vtable->klass, method))
3359 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3360 /* The initialization is already done before the method is called */
3367 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3371 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3373 mono_class_init (klass);
3374 size = mono_class_array_element_size (klass);
3376 mult_reg = alloc_preg (cfg);
3377 array_reg = arr->dreg;
3378 index_reg = index->dreg;
3380 #if SIZEOF_VOID_P == 8
3381 /* The array reg is 64 bits but the index reg is only 32 */
3382 index2_reg = alloc_preg (cfg);
3383 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3385 index2_reg = index_reg;
3388 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3390 #if defined(__i386__) || defined(__x86_64__)
3391 if (size == 1 || size == 2 || size == 4 || size == 8) {
3392 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3394 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3395 ins->type = STACK_PTR;
3401 add_reg = alloc_preg (cfg);
3403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3405 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3406 ins->type = STACK_PTR;
3407 MONO_ADD_INS (cfg->cbb, ins);
3412 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3414 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3416 int bounds_reg = alloc_preg (cfg);
3417 int add_reg = alloc_preg (cfg);
3418 int mult_reg = alloc_preg (cfg);
3419 int mult2_reg = alloc_preg (cfg);
3420 int low1_reg = alloc_preg (cfg);
3421 int low2_reg = alloc_preg (cfg);
3422 int high1_reg = alloc_preg (cfg);
3423 int high2_reg = alloc_preg (cfg);
3424 int realidx1_reg = alloc_preg (cfg);
3425 int realidx2_reg = alloc_preg (cfg);
3426 int sum_reg = alloc_preg (cfg);
3431 mono_class_init (klass);
3432 size = mono_class_array_element_size (klass);
3434 index1 = index_ins1->dreg;
3435 index2 = index_ins2->dreg;
3437 /* range checking */
3438 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3439 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3441 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3442 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3443 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3444 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3445 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3446 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3447 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3449 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3450 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3451 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3452 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3453 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3454 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3455 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3457 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3458 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3460 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3461 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3463 ins->type = STACK_MP;
3465 MONO_ADD_INS (cfg->cbb, ins);
3472 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3476 MonoMethod *addr_method;
3479 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3482 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3484 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3485 /* emit_ldelema_2 depends on OP_LMUL */
3486 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3487 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3491 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3492 addr_method = mono_marshal_get_array_address (rank, element_size);
3493 addr = mono_emit_method_call (cfg, addr_method, addr_method->signature, sp, NULL);
3499 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3501 MonoInst *ins = NULL;
3503 static MonoClass *runtime_helpers_class = NULL;
3504 if (! runtime_helpers_class)
3505 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3506 "System.Runtime.CompilerServices", "RuntimeHelpers");
3508 if (cmethod->klass == mono_defaults.string_class) {
3509 if (strcmp (cmethod->name, "get_Chars") == 0) {
3510 int dreg = alloc_ireg (cfg);
3511 int index_reg = alloc_preg (cfg);
3512 int mult_reg = alloc_preg (cfg);
3513 int add_reg = alloc_preg (cfg);
3515 #if SIZEOF_VOID_P == 8
3516 /* The array reg is 64 bits but the index reg is only 32 */
3517 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3519 index_reg = args [1]->dreg;
3521 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3523 #if defined(__i386__) || defined(__x86_64__)
3524 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3525 add_reg = ins->dreg;
3526 /* Avoid a warning */
3528 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3531 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3532 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3533 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3534 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3536 type_from_op (ins, NULL, NULL);
3538 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3539 int dreg = alloc_ireg (cfg);
3540 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3541 args [0]->dreg, G_STRUCT_OFFSET (MonoString, length));
3542 type_from_op (ins, NULL, NULL);
3545 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3546 int mult_reg = alloc_preg (cfg);
3547 int add_reg = alloc_preg (cfg);
3549 /* The corlib functions check for oob already. */
3550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3551 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3552 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3555 } else if (cmethod->klass == mono_defaults.object_class) {
3557 if (strcmp (cmethod->name, "GetType") == 0) {
3558 int dreg = alloc_preg (cfg);
3559 int vt_reg = alloc_preg (cfg);
3560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3561 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3562 type_from_op (ins, NULL, NULL);
3565 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3566 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3567 int dreg = alloc_ireg (cfg);
3568 int t1 = alloc_ireg (cfg);
3570 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3571 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3572 ins->type = STACK_I4;
3576 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3577 MONO_INST_NEW (cfg, ins, OP_NOP);
3578 MONO_ADD_INS (cfg->cbb, ins);
3582 } else if (cmethod->klass == mono_defaults.array_class) {
3583 if (cmethod->name [0] != 'g')
3586 if (strcmp (cmethod->name, "get_Rank") == 0) {
3587 int dreg = alloc_ireg (cfg);
3588 int vtable_reg = alloc_preg (cfg);
3589 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3590 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3591 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3592 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3593 type_from_op (ins, NULL, NULL);
3596 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3597 int dreg = alloc_ireg (cfg);
3599 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3600 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3601 type_from_op (ins, NULL, NULL);
3606 } else if (cmethod->klass == runtime_helpers_class) {
3608 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3609 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3613 } else if (cmethod->klass == mono_defaults.thread_class) {
3614 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3615 ins->dreg = alloc_preg (cfg);
3616 ins->type = STACK_OBJ;
3617 MONO_ADD_INS (cfg->cbb, ins);
3619 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3620 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3621 MONO_ADD_INS (cfg->cbb, ins);
3624 } else if (mini_class_is_system_array (cmethod->klass) &&
3625 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3626 MonoInst *addr, *store, *load;
3627 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3629 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3630 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3631 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3633 } else if (cmethod->klass->image == mono_defaults.corlib &&
3634 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3635 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3638 #if SIZEOF_VOID_P == 8
3639 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3640 /* 64 bit reads are already atomic */
3641 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3642 ins->dreg = mono_alloc_preg (cfg);
3643 ins->inst_basereg = args [0]->dreg;
3644 ins->inst_offset = 0;
3645 MONO_ADD_INS (cfg->cbb, ins);
3649 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3650 if (strcmp (cmethod->name, "Increment") == 0) {
3651 MonoInst *ins_iconst;
3654 if (fsig->params [0]->type == MONO_TYPE_I4)
3655 opcode = OP_ATOMIC_ADD_NEW_I4;
3656 #if SIZEOF_VOID_P == 8
3657 else if (fsig->params [0]->type == MONO_TYPE_I8)
3658 opcode = OP_ATOMIC_ADD_NEW_I8;
3661 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3662 ins_iconst->inst_c0 = 1;
3663 ins_iconst->dreg = mono_alloc_ireg (cfg);
3664 MONO_ADD_INS (cfg->cbb, ins_iconst);
3666 MONO_INST_NEW (cfg, ins, opcode);
3667 ins->dreg = mono_alloc_ireg (cfg);
3668 ins->inst_basereg = args [0]->dreg;
3669 ins->inst_offset = 0;
3670 ins->sreg2 = ins_iconst->dreg;
3671 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3672 MONO_ADD_INS (cfg->cbb, ins);
3674 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3675 MonoInst *ins_iconst;
3678 if (fsig->params [0]->type == MONO_TYPE_I4)
3679 opcode = OP_ATOMIC_ADD_NEW_I4;
3680 #if SIZEOF_VOID_P == 8
3681 else if (fsig->params [0]->type == MONO_TYPE_I8)
3682 opcode = OP_ATOMIC_ADD_NEW_I8;
3685 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3686 ins_iconst->inst_c0 = -1;
3687 ins_iconst->dreg = mono_alloc_ireg (cfg);
3688 MONO_ADD_INS (cfg->cbb, ins_iconst);
3690 MONO_INST_NEW (cfg, ins, opcode);
3691 ins->dreg = mono_alloc_ireg (cfg);
3692 ins->inst_basereg = args [0]->dreg;
3693 ins->inst_offset = 0;
3694 ins->sreg2 = ins_iconst->dreg;
3695 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3696 MONO_ADD_INS (cfg->cbb, ins);
3698 } else if (strcmp (cmethod->name, "Add") == 0) {
3701 if (fsig->params [0]->type == MONO_TYPE_I4)
3702 opcode = OP_ATOMIC_ADD_NEW_I4;
3703 #if SIZEOF_VOID_P == 8
3704 else if (fsig->params [0]->type == MONO_TYPE_I8)
3705 opcode = OP_ATOMIC_ADD_NEW_I8;
3709 MONO_INST_NEW (cfg, ins, opcode);
3710 ins->dreg = mono_alloc_ireg (cfg);
3711 ins->inst_basereg = args [0]->dreg;
3712 ins->inst_offset = 0;
3713 ins->sreg2 = args [1]->dreg;
3714 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
3715 MONO_ADD_INS (cfg->cbb, ins);
3718 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3720 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3721 if (strcmp (cmethod->name, "Exchange") == 0) {
3724 if (fsig->params [0]->type == MONO_TYPE_I4)
3725 opcode = OP_ATOMIC_EXCHANGE_I4;
3726 #if SIZEOF_VOID_P == 8
3727 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3728 (fsig->params [0]->type == MONO_TYPE_I) ||
3729 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3730 opcode = OP_ATOMIC_EXCHANGE_I8;
3732 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3733 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3734 opcode = OP_ATOMIC_EXCHANGE_I4;
3739 MONO_INST_NEW (cfg, ins, opcode);
3740 ins->dreg = mono_alloc_ireg (cfg);
3741 ins->inst_basereg = args [0]->dreg;
3742 ins->inst_offset = 0;
3743 ins->sreg2 = args [1]->dreg;
3744 MONO_ADD_INS (cfg->cbb, ins);
3746 switch (fsig->params [0]->type) {
3748 ins->type = STACK_I4;
3752 ins->type = STACK_I8;
3754 case MONO_TYPE_OBJECT:
3755 ins->type = STACK_OBJ;
3758 g_assert_not_reached ();
3761 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3763 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3765 * Can't implement CompareExchange methods this way since they have
3766 * three arguments. We can implement one of the common cases, where the new
3767 * value is a constant.
3769 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3770 if (fsig->params [1]->type == MONO_TYPE_I4 && args [2]->opcode == OP_ICONST) {
3771 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3772 ins->dreg = alloc_ireg (cfg);
3773 ins->sreg1 = args [0]->dreg;
3774 ins->sreg2 = args [1]->dreg;
3775 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3776 ins->type = STACK_I4;
3777 MONO_ADD_INS (cfg->cbb, ins);
3779 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3781 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3785 } else if (cmethod->klass->image == mono_defaults.corlib) {
3786 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3787 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3788 MONO_INST_NEW (cfg, ins, OP_BREAK);
3789 MONO_ADD_INS (cfg->cbb, ins);
3792 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3793 && strcmp (cmethod->klass->name, "Environment") == 0) {
3794 #ifdef PLATFORM_WIN32
3795 EMIT_NEW_ICONST (cfg, ins, 1);
3797 EMIT_NEW_ICONST (cfg, ins, 0);
3801 } else if (cmethod->klass == mono_defaults.math_class) {
3803 * There is general branches code for Min/Max, but it does not work for
3805 * http://everything2.com/?node_id=1051618
3809 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3813 * This entry point could be used later for arbitrary method
3816 inline static MonoInst*
3817 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3818 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3820 if (method->klass == mono_defaults.string_class) {
3821 /* managed string allocation support */
3822 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3823 MonoInst *iargs [2];
3824 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3825 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3828 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3829 iargs [1] = args [0];
3830 return mono_emit_method_call (cfg, managed_alloc, mono_method_signature (managed_alloc), iargs, this);
3837 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp, MonoInst **args)
3839 MonoInst *store, *temp;
3842 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3843 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3846 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3847 * would be different than the MonoInst's used to represent arguments, and
3848 * the ldelema implementation can't deal with that.
3849 * Solution: When ldelema is used on an inline argument, create a var for
3850 * it, emit ldelema on that var, and emit the saving code below in
3851 * inline_method () if needed.
3853 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3855 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, *sp);
3856 store->cil_code = sp [0]->cil_code;
3861 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3862 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3864 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3866 mono_inline_called_method_name_limit = NULL;
3867 static gboolean check_inline_called_method_name_limit (MonoMethod *called_method) {
3868 char *called_method_name = mono_method_full_name (called_method, TRUE);
3871 if (mono_inline_called_method_name_limit == NULL) {
3872 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
3873 if (limit_string != NULL) {
3874 mono_inline_called_method_name_limit = limit_string;
3876 mono_inline_called_method_name_limit = (char *) "";
3880 strncmp_result = strncmp (called_method_name, mono_inline_called_method_name_limit, strlen (mono_inline_called_method_name_limit));
3881 g_free (called_method_name);
3883 //return (strncmp_result <= 0);
3884 return (strncmp_result == 0);
3888 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3890 mono_inline_caller_method_name_limit = NULL;
3891 static gboolean check_inline_caller_method_name_limit (MonoMethod *caller_method) {
3892 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
3895 if (mono_inline_caller_method_name_limit == NULL) {
3896 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
3897 if (limit_string != NULL) {
3898 mono_inline_caller_method_name_limit = limit_string;
3900 mono_inline_caller_method_name_limit = (char *) "";
3904 strncmp_result = strncmp (caller_method_name, mono_inline_caller_method_name_limit, strlen (mono_inline_caller_method_name_limit));
3905 g_free (caller_method_name);
3907 //return (strncmp_result <= 0);
3908 return (strncmp_result == 0);
3913 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
3914 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
3916 MonoInst *ins, *rvar = NULL;
3917 MonoMethodHeader *cheader;
3918 MonoBasicBlock *ebblock, *sbblock;
3920 MonoMethod *prev_inlined_method;
3921 MonoInst **prev_locals, **prev_args;
3922 guint prev_real_offset;
3923 GHashTable *prev_cbb_hash;
3924 MonoBasicBlock **prev_cil_offset_to_bb;
3925 MonoBasicBlock *prev_cbb;
3926 unsigned char* prev_cil_start;
3927 guint32 prev_cil_offset_to_bb_len;
3928 MonoMethod *prev_current_method;
3929 MonoGenericContext *prev_generic_context;
3931 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
3933 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3934 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
3937 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
3938 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
3942 if (cfg->verbose_level > 2)
3943 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
3945 if (!cmethod->inline_info) {
3946 mono_jit_stats.inlineable_methods++;
3947 cmethod->inline_info = 1;
3949 /* allocate space to store the return value */
3950 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3951 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
3954 /* allocate local variables */
3955 cheader = mono_method_get_header (cmethod);
3956 prev_locals = cfg->locals;
3957 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
3958 for (i = 0; i < cheader->num_locals; ++i)
3959 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
3961 prev_args = cfg->args;
3963 /* allocate start and end blocks */
3964 /* This is needed so if the inline is aborted, we can clean up */
3965 NEW_BBLOCK (cfg, sbblock);
3966 sbblock->real_offset = real_offset;
3968 NEW_BBLOCK (cfg, ebblock);
3969 ebblock->block_num = cfg->num_bblocks++;
3970 ebblock->real_offset = real_offset;
3972 prev_inlined_method = cfg->inlined_method;
3973 cfg->inlined_method = cmethod;
3974 cfg->ret_var_set = FALSE;
3975 prev_real_offset = cfg->real_offset;
3976 prev_cbb_hash = cfg->cbb_hash;
3977 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
3978 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
3979 prev_cil_start = cfg->cil_start;
3980 prev_cbb = cfg->cbb;
3981 prev_current_method = cfg->current_method;
3982 prev_generic_context = cfg->generic_context;
3984 costs = mono_method_to_ir2 (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
3986 cfg->inlined_method = prev_inlined_method;
3987 cfg->real_offset = prev_real_offset;
3988 cfg->cbb_hash = prev_cbb_hash;
3989 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
3990 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
3991 cfg->cil_start = prev_cil_start;
3992 cfg->locals = prev_locals;
3993 cfg->args = prev_args;
3994 cfg->current_method = prev_current_method;
3995 cfg->generic_context = prev_generic_context;
3997 if ((costs >= 0 && costs < 60) || inline_allways) {
3998 if (cfg->verbose_level > 2)
3999 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4001 mono_jit_stats.inlined_methods++;
4003 /* always add some code to avoid block split failures */
4004 MONO_INST_NEW (cfg, ins, OP_NOP);
4005 MONO_ADD_INS (prev_cbb, ins);
4007 prev_cbb->next_bb = sbblock;
4008 link_bblock (cfg, prev_cbb, sbblock);
4011 * Get rid of the begin and end bblocks if possible to aid local
4014 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4016 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4017 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4019 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4020 MonoBasicBlock *prev = ebblock->in_bb [0];
4021 mono_merge_basic_blocks (cfg, prev, ebblock);
4029 * If the inlined method contains only a throw, then the ret var is not
4030 * set, so set it to a dummy value.
4032 if (!cfg->ret_var_set) {
4033 static double r8_0 = 0.0;
4035 switch (rvar->type) {
4037 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4040 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4045 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4048 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4049 ins->type = STACK_R8;
4050 ins->inst_p0 = (void*)&r8_0;
4051 ins->dreg = rvar->dreg;
4052 MONO_ADD_INS (cfg->cbb, ins);
4055 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4058 g_assert_not_reached ();
4062 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4067 if (cfg->verbose_level > 2)
4068 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4069 cfg->exception_type = MONO_EXCEPTION_NONE;
4070 mono_loader_clear_error ();
4072 /* This gets rid of the newly added bblocks */
4073 cfg->cbb = prev_cbb;
4079 * Some of these comments may well be out-of-date.
4080 * Design decisions: we do a single pass over the IL code (and we do bblock
4081 * splitting/merging in the few cases when it's required: a back jump to an IL
4082 * address that was not already seen as bblock starting point).
4083 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4084 * Complex operations are decomposed in simpler ones right away. We need to let the
4085 * arch-specific code peek and poke inside this process somehow (except when the
4086 * optimizations can take advantage of the full semantic info of coarse opcodes).
4087 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4088 * MonoInst->opcode initially is the IL opcode or some simplification of that
4089 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4090 * opcode with value bigger than OP_LAST.
4091 * At this point the IR can be handed over to an interpreter, a dumb code generator
4092 * or to the optimizing code generator that will translate it to SSA form.
4094 * Profiling directed optimizations.
4095 * We may compile by default with few or no optimizations and instrument the code
4096 * or the user may indicate what methods to optimize the most either in a config file
4097 * or through repeated runs where the compiler applies offline the optimizations to
4098 * each method and then decides if it was worth it.
4101 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4102 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4103 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4104 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4105 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4106 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4107 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4108 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4110 /* offset from br.s -> br like opcodes */
4111 #define BIG_BRANCH_OFFSET 13
4114 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4116 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4118 return b == NULL || b == bb;
4122 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4124 unsigned char *ip = start;
4125 unsigned char *target;
4128 MonoBasicBlock *bblock;
4129 const MonoOpcode *opcode;
4132 cli_addr = ip - start;
4133 i = mono_opcode_value ((const guint8 **)&ip, end);
4136 opcode = &mono_opcodes [i];
4137 switch (opcode->argument) {
4138 case MonoInlineNone:
4141 case MonoInlineString:
4142 case MonoInlineType:
4143 case MonoInlineField:
4144 case MonoInlineMethod:
4147 case MonoShortInlineR:
4154 case MonoShortInlineVar:
4155 case MonoShortInlineI:
4158 case MonoShortInlineBrTarget:
4159 target = start + cli_addr + 2 + (signed char)ip [1];
4160 GET_BBLOCK (cfg, bblock, target);
4163 GET_BBLOCK (cfg, bblock, ip);
4165 case MonoInlineBrTarget:
4166 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4167 GET_BBLOCK (cfg, bblock, target);
4170 GET_BBLOCK (cfg, bblock, ip);
4172 case MonoInlineSwitch: {
4173 guint32 n = read32 (ip + 1);
4176 cli_addr += 5 + 4 * n;
4177 target = start + cli_addr;
4178 GET_BBLOCK (cfg, bblock, target);
4180 for (j = 0; j < n; ++j) {
4181 target = start + cli_addr + (gint32)read32 (ip);
4182 GET_BBLOCK (cfg, bblock, target);
4192 g_assert_not_reached ();
4195 if (i == CEE_THROW) {
4196 unsigned char *bb_start = ip - 1;
4198 /* Find the start of the bblock containing the throw */
4200 while ((bb_start >= start) && !bblock) {
4201 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4205 bblock->out_of_line = 1;
4214 static inline MonoMethod *
4215 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4219 if (m->wrapper_type != MONO_WRAPPER_NONE)
4220 return mono_method_get_wrapper_data (m, token);
4222 method = mono_get_method_full (m->klass->image, token, klass, context);
4227 static inline MonoMethod *
4228 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4230 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4232 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4238 static inline MonoClass*
4239 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4243 if (method->wrapper_type != MONO_WRAPPER_NONE)
4244 klass = mono_method_get_wrapper_data (method, token);
4246 klass = mono_class_get_full (method->klass->image, token, context);
4248 mono_class_init (klass);
4253 * Returns TRUE if the JIT should abort inlining because "callee"
4254 * is influenced by security attributes.
4257 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4261 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4265 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4266 if (result == MONO_JIT_SECURITY_OK)
4269 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4270 /* Generate code to throw a SecurityException before the actual call/link */
4271 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4274 NEW_ICONST (cfg, args [0], 4);
4275 NEW_METHODCONST (cfg, args [1], caller);
4276 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, mono_method_signature (secman->linkdemandsecurityexception), args, NULL);
4277 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4278 /* don't hide previous results */
4279 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4280 cfg->exception_data = result;
4288 method_access_exception (void)
4290 static MonoMethod *method = NULL;
4293 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4294 method = mono_class_get_method_from_name (secman->securitymanager,
4295 "MethodAccessException", 2);
4302 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4303 MonoBasicBlock *bblock, unsigned char *ip)
4305 MonoMethod *thrower = method_access_exception ();
4308 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4309 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4310 mono_emit_method_call (cfg, thrower, mono_method_signature (thrower), args, NULL);
4314 verification_exception (void)
4316 static MonoMethod *method = NULL;
4319 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4320 method = mono_class_get_method_from_name (secman->securitymanager,
4321 "VerificationException", 0);
4328 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4330 MonoMethod *thrower = verification_exception ();
4332 mono_emit_method_call (cfg, thrower, mono_method_signature (thrower), NULL, NULL);
4336 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4337 MonoBasicBlock *bblock, unsigned char *ip)
4339 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4340 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4341 gboolean is_safe = TRUE;
4343 if (!(caller_level >= callee_level ||
4344 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4345 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4350 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4354 method_is_safe (MonoMethod *method)
4357 if (strcmp (method->name, "unsafeMethod") == 0)
4364 * Check that the IL instructions at ip are the array initialization
4365 * sequence and return the pointer to the data and the size.
4368 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size)
4371 * newarr[System.Int32]
4373 * ldtoken field valuetype ...
4374 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4376 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4377 guint32 token = read32 (ip + 7);
4378 guint32 field_token = read32 (ip + 2);
4379 guint32 field_index = field_token & 0xffffff;
4381 const char *data_ptr;
4383 MonoMethod *cmethod;
4384 MonoClass *dummy_class;
4385 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4391 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4394 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4396 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4397 case MONO_TYPE_BOOLEAN:
4401 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4402 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4403 case MONO_TYPE_CHAR:
4413 return NULL; /* stupid ARM FP swapped format */
4423 if (size > mono_type_size (field->type, &dummy_align))
4426 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4427 field_index = read32 (ip + 2) & 0xffffff;
4428 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4429 data_ptr = mono_image_rva_map (method->klass->image, rva);
4430 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4431 /* for aot code we do the lookup on load */
4432 if (aot && data_ptr)
4433 return GUINT_TO_POINTER (rva);
4440 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4442 char *method_fname = mono_method_full_name (method, TRUE);
4445 if (mono_method_get_header (method)->code_size == 0)
4446 method_code = g_strdup ("method body is empty.");
4448 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4449 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4450 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4451 g_free (method_fname);
4452 g_free (method_code);
4456 set_exception_object (MonoCompile *cfg, MonoException *exception)
4458 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4459 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4460 cfg->exception_ptr = exception;
4464 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4468 if (cfg->generic_sharing_context)
4469 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4471 type = &klass->byval_arg;
4472 return MONO_TYPE_IS_REFERENCE (type);
4476 * mono_decompose_array_access_opts:
4478 * Decompose array access opcodes.
4481 mono_decompose_array_access_opts (MonoCompile *cfg)
4483 MonoBasicBlock *bb, *first_bb;
4486 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4487 * can be executed anytime. It should be run before decompose_long
4491 * Create a dummy bblock and emit code into it so we can use the normal
4492 * code generation macros.
4494 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4495 first_bb = cfg->cbb;
4497 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4499 MonoInst *prev = NULL;
4501 MonoInst *iargs [3];
4504 if (!bb->has_array_access)
4507 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4509 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4515 for (ins = bb->code; ins; ins = ins->next) {
4516 switch (ins->opcode) {
4518 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4519 G_STRUCT_OFFSET (MonoArray, max_length));
4520 MONO_ADD_INS (cfg->cbb, dest);
4522 case OP_BOUNDS_CHECK:
4523 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4526 if (cfg->opt & MONO_OPT_SHARED) {
4527 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4528 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4529 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4530 iargs [2]->dreg = ins->sreg1;
4532 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4533 dest->dreg = ins->dreg;
4535 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4538 NEW_VTABLECONST (cfg, iargs [0], vtable);
4539 MONO_ADD_INS (cfg->cbb, iargs [0]);
4540 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4541 iargs [1]->dreg = ins->sreg1;
4543 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4544 dest->dreg = ins->dreg;
4551 g_assert (cfg->cbb == first_bb);
4553 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4554 /* Replace the original instruction with the new code sequence */
4556 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4557 first_bb->code = first_bb->last_ins = NULL;
4558 first_bb->in_count = first_bb->out_count = 0;
4559 cfg->cbb = first_bb;
4566 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4576 #ifdef MONO_ARCH_SOFT_FLOAT
4579 * mono_handle_soft_float:
4581 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4582 * similar to long support on 32 bit platforms. 32 bit float values require special
4583 * handling when used as locals, arguments, and in calls.
4584 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4587 mono_handle_soft_float (MonoCompile *cfg)
4589 MonoBasicBlock *bb, *first_bb;
4592 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4596 * Create a dummy bblock and emit code into it so we can use the normal
4597 * code generation macros.
4599 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4600 first_bb = cfg->cbb;
4602 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4604 MonoInst *prev = NULL;
4607 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4609 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4615 for (ins = bb->code; ins; ins = ins->next) {
4616 const char *spec = INS_INFO (ins->opcode);
4618 /* Most fp operations are handled automatically by opcode emulation */
4620 switch (ins->opcode) {
4623 d.vald = *(double*)ins->inst_p0;
4624 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4629 /* We load the r8 value */
4630 d.vald = *(float*)ins->inst_p0;
4631 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4635 ins->opcode = OP_LMOVE;
4638 ins->opcode = OP_MOVE;
4639 ins->sreg1 = ins->sreg1 + 1;
4642 ins->opcode = OP_MOVE;
4643 ins->sreg1 = ins->sreg1 + 2;
4646 int reg = ins->sreg1;
4648 ins->opcode = OP_SETLRET;
4650 ins->sreg1 = reg + 1;
4651 ins->sreg2 = reg + 2;
4654 case OP_LOADR8_MEMBASE:
4655 ins->opcode = OP_LOADI8_MEMBASE;
4657 case OP_STORER8_MEMBASE_REG:
4658 ins->opcode = OP_STOREI8_MEMBASE_REG;
4660 case OP_STORER4_MEMBASE_REG: {
4661 MonoInst *iargs [2];
4664 /* Arg 1 is the double value */
4665 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4666 iargs [0]->dreg = ins->sreg1;
4668 /* Arg 2 is the address to store to */
4669 addr_reg = mono_alloc_preg (cfg);
4670 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4671 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4675 case OP_LOADR4_MEMBASE: {
4676 MonoInst *iargs [1];
4680 addr_reg = mono_alloc_preg (cfg);
4681 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4682 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4683 conv->dreg = ins->dreg;
4688 case OP_FCALL_MEMBASE: {
4689 MonoCallInst *call = (MonoCallInst*)ins;
4690 if (call->signature->ret->type == MONO_TYPE_R4) {
4691 MonoCallInst *call2;
4692 MonoInst *iargs [1];
4695 /* Convert the call into a call returning an int */
4696 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4697 memcpy (call2, call, sizeof (MonoCallInst));
4698 switch (ins->opcode) {
4700 call2->inst.opcode = OP_CALL;
4703 call2->inst.opcode = OP_CALL_REG;
4705 case OP_FCALL_MEMBASE:
4706 call2->inst.opcode = OP_CALL_MEMBASE;
4709 g_assert_not_reached ();
4711 call2->inst.dreg = mono_alloc_ireg (cfg);
4712 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4714 /* FIXME: Optimize this */
4716 /* Emit an r4->r8 conversion */
4717 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4718 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4719 conv->dreg = ins->dreg;
4721 switch (ins->opcode) {
4723 ins->opcode = OP_LCALL;
4726 ins->opcode = OP_LCALL_REG;
4728 case OP_FCALL_MEMBASE:
4729 ins->opcode = OP_LCALL_MEMBASE;
4732 g_assert_not_reached ();
4738 MonoJitICallInfo *info;
4739 MonoInst *iargs [2];
4740 MonoInst *call, *cmp, *br;
4742 /* Convert fcompare+fbcc to icall+icompare+beq */
4744 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4747 /* Create dummy MonoInst's for the arguments */
4748 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4749 iargs [0]->dreg = ins->sreg1;
4750 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4751 iargs [1]->dreg = ins->sreg2;
4753 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4755 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4756 cmp->sreg1 = call->dreg;
4758 MONO_ADD_INS (cfg->cbb, cmp);
4760 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4761 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4762 br->inst_true_bb = ins->next->inst_true_bb;
4763 br->inst_false_bb = ins->next->inst_false_bb;
4764 MONO_ADD_INS (cfg->cbb, br);
4766 /* The call sequence might include fp ins */
4769 /* Skip fbcc or fccc */
4770 NULLIFY_INS (ins->next);
4778 MonoJitICallInfo *info;
4779 MonoInst *iargs [2];
4782 /* Convert fccc to icall+icompare+iceq */
4784 info = mono_find_jit_opcode_emulation (ins->opcode);
4787 /* Create dummy MonoInst's for the arguments */
4788 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4789 iargs [0]->dreg = ins->sreg1;
4790 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4791 iargs [1]->dreg = ins->sreg2;
4793 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4795 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4796 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4798 /* The call sequence might include fp ins */
4803 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4804 mono_print_ins (ins);
4805 g_assert_not_reached ();
4810 g_assert (cfg->cbb == first_bb);
4812 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4813 /* Replace the original instruction with the new code sequence */
4815 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4816 first_bb->code = first_bb->last_ins = NULL;
4817 first_bb->in_count = first_bb->out_count = 0;
4818 cfg->cbb = first_bb;
4825 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
4828 mono_decompose_long_opts (cfg);
4834 * mono_method_to_ir: translates IL into basic blocks containing trees
4837 mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
4838 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
4839 guint inline_offset, gboolean is_virtual_call)
4841 MonoInst *ins, **sp, **stack_start;
4842 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
4843 MonoMethod *cmethod, *method_definition;
4844 MonoInst **arg_array;
4845 MonoMethodHeader *header;
4847 guint32 token, ins_flag;
4849 MonoClass *constrained_call = NULL;
4850 unsigned char *ip, *end, *target, *err_pos;
4851 static double r8_0 = 0.0;
4852 MonoMethodSignature *sig;
4853 MonoGenericContext *generic_context = NULL;
4854 MonoGenericContainer *generic_container = NULL;
4855 MonoType **param_types;
4856 GList *bb_recheck = NULL, *tmp;
4857 int i, n, start_new_bblock, dreg;
4858 int num_calls = 0, inline_costs = 0;
4859 int breakpoint_id = 0;
4861 MonoBoolean security, pinvoke;
4862 MonoSecurityManager* secman = NULL;
4863 MonoDeclSecurityActions actions;
4864 GSList *class_inits = NULL;
4865 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
4868 /* serialization and xdomain stuff may need access to private fields and methods */
4869 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
4870 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
4871 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
4872 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
4873 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
4874 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
4876 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
4878 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
4879 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
4880 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
4881 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
4883 image = method->klass->image;
4884 header = mono_method_get_header (method);
4885 generic_container = mono_method_get_generic_container (method);
4886 sig = mono_method_signature (method);
4887 num_args = sig->hasthis + sig->param_count;
4888 ip = (unsigned char*)header->code;
4889 cfg->cil_start = ip;
4890 end = ip + header->code_size;
4891 mono_jit_stats.cil_code_size += header->code_size;
4893 method_definition = method;
4894 while (method_definition->is_inflated) {
4895 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
4896 method_definition = imethod->declaring;
4899 /* SkipVerification is not allowed if core-clr is enabled */
4900 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
4902 dont_verify_stloc = TRUE;
4905 if (!dont_verify && mini_method_verify (cfg, method_definition))
4906 goto exception_exit;
4908 if (sig->is_inflated)
4909 generic_context = mono_method_get_context (method);
4910 else if (generic_container)
4911 generic_context = &generic_container->context;
4912 cfg->generic_context = generic_context;
4914 if (!cfg->generic_sharing_context)
4915 g_assert (!sig->has_type_parameters);
4917 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
4918 g_assert (method->is_inflated);
4919 g_assert (mono_method_get_context (method)->method_inst);
4921 if (method->is_inflated && mono_method_get_context (method)->method_inst)
4922 g_assert (sig->generic_param_count);
4924 if (cfg->method == method) {
4925 cfg->real_offset = 0;
4927 cfg->real_offset = inline_offset;
4930 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
4931 cfg->cil_offset_to_bb_len = header->code_size;
4933 cfg->current_method = method;
4935 if (cfg->verbose_level > 2)
4936 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
4938 dont_inline = g_list_prepend (dont_inline, method);
4939 if (cfg->method == method) {
4941 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
4942 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
4945 NEW_BBLOCK (cfg, start_bblock);
4946 cfg->bb_entry = start_bblock;
4947 start_bblock->cil_code = NULL;
4948 start_bblock->cil_length = 0;
4951 NEW_BBLOCK (cfg, end_bblock);
4952 cfg->bb_exit = end_bblock;
4953 end_bblock->cil_code = NULL;
4954 end_bblock->cil_length = 0;
4955 g_assert (cfg->num_bblocks == 2);
4957 arg_array = alloca (sizeof (MonoInst *) * num_args);
4958 for (i = num_args - 1; i >= 0; i--)
4959 arg_array [i] = cfg->args [i];
4961 if (header->num_clauses) {
4962 cfg->spvars = g_hash_table_new (NULL, NULL);
4963 cfg->exvars = g_hash_table_new (NULL, NULL);
4965 /* handle exception clauses */
4966 for (i = 0; i < header->num_clauses; ++i) {
4967 MonoBasicBlock *try_bb;
4968 MonoExceptionClause *clause = &header->clauses [i];
4969 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
4970 try_bb->real_offset = clause->try_offset;
4971 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
4972 tblock->real_offset = clause->handler_offset;
4973 tblock->flags |= BB_EXCEPTION_HANDLER;
4975 link_bblock (cfg, try_bb, tblock);
4977 if (*(ip + clause->handler_offset) == CEE_POP)
4978 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
4980 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
4981 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
4982 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
4983 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
4984 MONO_ADD_INS (tblock, ins);
4986 /* todo: is a fault block unsafe to optimize? */
4987 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
4988 tblock->flags |= BB_EXCEPTION_UNSAFE;
4992 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
4994 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
4996 /* catch and filter blocks get the exception object on the stack */
4997 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
4998 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
4999 MonoInst *dummy_use;
5001 /* mostly like handle_stack_args (), but just sets the input args */
5002 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5003 tblock->in_scount = 1;
5004 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5005 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5008 * Add a dummy use for the exvar so its liveness info will be
5012 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5014 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5015 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5016 tblock->real_offset = clause->data.filter_offset;
5017 tblock->in_scount = 1;
5018 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5019 /* The filter block shares the exvar with the handler block */
5020 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5021 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5022 MONO_ADD_INS (tblock, ins);
5026 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5027 clause->data.catch_class &&
5028 cfg->generic_sharing_context &&
5029 mono_class_check_context_used (clause->data.catch_class)) {
5030 if (mono_method_get_context (method)->method_inst)
5031 GENERIC_SHARING_FAILURE (CEE_NOP);
5034 * In shared generic code with catch
5035 * clauses containing type variables
5036 * the exception handling code has to
5037 * be able to get to the rgctx.
5038 * Therefore we have to make sure that
5039 * the vtable/mrgctx argument (for
5040 * static or generic methods) or the
5041 * "this" argument (for non-static
5042 * methods) are live.
5044 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5045 mini_method_get_context (method)->method_inst) {
5046 mono_get_vtable_var (cfg);
5048 MonoInst *dummy_use;
5050 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5055 arg_array = alloca (sizeof (MonoInst *) * num_args);
5056 cfg->cbb = start_bblock;
5057 mono_save_args (cfg, sig, inline_args, arg_array);
5060 /* FIRST CODE BLOCK */
5061 NEW_BBLOCK (cfg, bblock);
5062 bblock->cil_code = ip;
5066 ADD_BBLOCK (cfg, bblock);
5068 if (cfg->method == method) {
5069 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5070 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5071 MONO_INST_NEW (cfg, ins, OP_BREAK);
5072 MONO_ADD_INS (bblock, ins);
5076 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5077 secman = mono_security_manager_get_methods ();
5079 security = (secman && mono_method_has_declsec (method));
5080 /* at this point having security doesn't mean we have any code to generate */
5081 if (security && (cfg->method == method)) {
5082 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5083 * And we do not want to enter the next section (with allocation) if we
5084 * have nothing to generate */
5085 security = mono_declsec_get_demands (method, &actions);
5088 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5089 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5091 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5092 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5093 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5095 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5096 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5100 mono_custom_attrs_free (custom);
5103 custom = mono_custom_attrs_from_class (wrapped->klass);
5104 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5108 mono_custom_attrs_free (custom);
5111 /* not a P/Invoke after all */
5116 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5117 /* we use a separate basic block for the initialization code */
5118 NEW_BBLOCK (cfg, init_localsbb);
5119 cfg->bb_init = init_localsbb;
5120 init_localsbb->real_offset = cfg->real_offset;
5121 start_bblock->next_bb = init_localsbb;
5122 init_localsbb->next_bb = bblock;
5123 link_bblock (cfg, start_bblock, init_localsbb);
5124 link_bblock (cfg, init_localsbb, bblock);
5126 cfg->cbb = init_localsbb;
5128 start_bblock->next_bb = bblock;
5129 link_bblock (cfg, start_bblock, bblock);
5132 /* at this point we know, if security is TRUE, that some code needs to be generated */
5133 if (security && (cfg->method == method)) {
5136 mono_jit_stats.cas_demand_generation++;
5138 if (actions.demand.blob) {
5139 /* Add code for SecurityAction.Demand */
5140 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5141 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5142 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5143 mono_emit_method_call (cfg, secman->demand, mono_method_signature (secman->demand), args, NULL);
5145 if (actions.noncasdemand.blob) {
5146 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5147 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5148 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5149 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5150 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5151 mono_emit_method_call (cfg, secman->demand, mono_method_signature (secman->demand), args, NULL);
5153 if (actions.demandchoice.blob) {
5154 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5155 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5156 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5157 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5158 mono_emit_method_call (cfg, secman->demandchoice, mono_method_signature (secman->demandchoice), args, NULL);
5162 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5164 mono_emit_method_call (cfg, secman->demandunmanaged, mono_method_signature (secman->demandunmanaged), NULL, NULL);
5167 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5168 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5169 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5170 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5171 if (!(method->klass && method->klass->image &&
5172 mono_security_core_clr_is_platform_image (method->klass->image))) {
5173 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5177 if (!method_is_safe (method))
5178 emit_throw_verification_exception (cfg, bblock, ip);
5181 if (header->code_size == 0)
5184 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5189 if (cfg->method == method)
5190 mono_debug_init_method (cfg, bblock, breakpoint_id);
5192 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5194 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5195 for (n = 0; n < sig->param_count; ++n)
5196 param_types [n + sig->hasthis] = sig->params [n];
5197 for (n = 0; n < header->num_locals; ++n) {
5198 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5203 /* add a check for this != NULL to inlined methods */
5204 if (is_virtual_call) {
5207 NEW_ARGLOAD (cfg, arg_ins, 0);
5208 MONO_ADD_INS (cfg->cbb, arg_ins);
5209 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5210 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5211 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5214 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5215 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5218 start_new_bblock = 0;
5222 if (cfg->method == method)
5223 cfg->real_offset = ip - header->code;
5225 cfg->real_offset = inline_offset;
5230 if (start_new_bblock) {
5231 bblock->cil_length = ip - bblock->cil_code;
5232 if (start_new_bblock == 2) {
5233 g_assert (ip == tblock->cil_code);
5235 GET_BBLOCK (cfg, tblock, ip);
5237 bblock->next_bb = tblock;
5240 start_new_bblock = 0;
5241 for (i = 0; i < bblock->in_scount; ++i) {
5242 if (cfg->verbose_level > 3)
5243 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5244 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5248 g_slist_free (class_inits);
5251 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5252 link_bblock (cfg, bblock, tblock);
5253 if (sp != stack_start) {
5254 handle_stack_args (cfg, stack_start, sp - stack_start);
5256 CHECK_UNVERIFIABLE (cfg);
5258 bblock->next_bb = tblock;
5261 for (i = 0; i < bblock->in_scount; ++i) {
5262 if (cfg->verbose_level > 3)
5263 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5264 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5267 g_slist_free (class_inits);
5272 bblock->real_offset = cfg->real_offset;
5274 if ((cfg->method == method) && cfg->coverage_info) {
5275 guint32 cil_offset = ip - header->code;
5276 cfg->coverage_info->data [cil_offset].cil_code = ip;
5278 /* TODO: Use an increment here */
5279 #if defined(__i386__)
5280 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5281 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5283 MONO_ADD_INS (cfg->cbb, ins);
5285 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5286 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5290 if (cfg->verbose_level > 3)
5291 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5296 MONO_INST_NEW (cfg, ins, (*ip) == CEE_NOP ? OP_NOP : OP_BREAK);
5298 MONO_ADD_INS (bblock, ins);
5304 CHECK_STACK_OVF (1);
5305 n = (*ip)-CEE_LDARG_0;
5307 EMIT_NEW_ARGLOAD (cfg, ins, n);
5315 CHECK_STACK_OVF (1);
5316 n = (*ip)-CEE_LDLOC_0;
5318 EMIT_NEW_LOCLOAD (cfg, ins, n);
5329 n = (*ip)-CEE_STLOC_0;
5332 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5335 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5336 if ((opcode == OP_MOVE) && ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5337 /* Optimize reg-reg moves away */
5339 * Can't optimize other opcodes, since sp[0] might point to
5340 * the last ins of a decomposed opcode.
5342 sp [0]->dreg = (cfg)->locals [n]->dreg;
5344 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5352 CHECK_STACK_OVF (1);
5355 EMIT_NEW_ARGLOAD (cfg, ins, n);
5361 CHECK_STACK_OVF (1);
5364 NEW_ARGLOADA (cfg, ins, n);
5365 MONO_ADD_INS (cfg->cbb, ins);
5375 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5377 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5382 CHECK_STACK_OVF (1);
5385 EMIT_NEW_LOCLOAD (cfg, ins, n);
5391 CHECK_STACK_OVF (1);
5392 CHECK_LOCAL (ip [1]);
5395 * ldloca inhibits many optimizations so try to get rid of it in common
5398 if (ip + 8 < end && (ip [2] == CEE_PREFIX1) && (ip [3] == CEE_INITOBJ) && ip_in_bb (cfg, bblock, ip + 3)) {
5399 gboolean skip = FALSE;
5401 /* From the INITOBJ case */
5402 token = read32 (ip + 4);
5403 klass = mini_get_class (method, token, generic_context);
5404 CHECK_TYPELOAD (klass);
5405 if (cfg->generic_sharing_context && mono_class_check_context_used (klass))
5406 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
5408 if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5409 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [ip [1]]->dreg, NULL);
5410 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5411 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [ip [1]]->dreg, klass);
5423 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5431 CHECK_LOCAL (ip [1]);
5432 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5434 EMIT_NEW_LOCSTORE (cfg, ins, ip [1], *sp);
5439 CHECK_STACK_OVF (1);
5440 EMIT_NEW_PCONST (cfg, ins, NULL);
5441 ins->type = STACK_OBJ;
5446 CHECK_STACK_OVF (1);
5447 EMIT_NEW_ICONST (cfg, ins, -1);
5460 CHECK_STACK_OVF (1);
5461 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5467 CHECK_STACK_OVF (1);
5469 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5475 CHECK_STACK_OVF (1);
5476 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5482 CHECK_STACK_OVF (1);
5483 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5484 ins->type = STACK_I8;
5485 ins->dreg = alloc_dreg (cfg, STACK_I8);
5487 ins->inst_l = (gint64)read64 (ip);
5488 MONO_ADD_INS (bblock, ins);
5494 /* FIXME: we should really allocate this only late in the compilation process */
5495 mono_domain_lock (cfg->domain);
5496 f = mono_mempool_alloc (cfg->domain->mp, sizeof (float));
5497 mono_domain_unlock (cfg->domain);
5499 CHECK_STACK_OVF (1);
5500 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5501 ins->type = STACK_R8;
5502 ins->dreg = alloc_dreg (cfg, STACK_R8);
5506 MONO_ADD_INS (bblock, ins);
5514 /* FIXME: we should really allocate this only late in the compilation process */
5515 mono_domain_lock (cfg->domain);
5516 d = mono_mempool_alloc (cfg->domain->mp, sizeof (double));
5517 mono_domain_unlock (cfg->domain);
5519 CHECK_STACK_OVF (1);
5520 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5521 ins->type = STACK_R8;
5522 ins->dreg = alloc_dreg (cfg, STACK_R8);
5526 MONO_ADD_INS (bblock, ins);
5533 MonoInst *temp, *store;
5535 CHECK_STACK_OVF (1);
5539 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5540 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5542 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5545 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5558 if (sp [0]->type == STACK_R8)
5559 /* we need to pop the value from the x86 FP stack */
5560 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5567 if (stack_start != sp)
5569 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5570 ins = (MonoInst*)call;
5571 token = read32 (ip + 1);
5572 /* FIXME: check the signature matches */
5573 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5578 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5579 GENERIC_SHARING_FAILURE (CEE_JMP);
5581 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5582 if (check_linkdemand (cfg, method, cmethod))
5584 CHECK_CFG_EXCEPTION;
5587 ins->inst_p0 = cmethod;
5588 MONO_ADD_INS (bblock, ins);
5590 start_new_bblock = 1;
5593 cfg->disable_aot = 1;
5598 case CEE_CALLVIRT: {
5599 MonoInst *addr = NULL;
5600 MonoMethodSignature *fsig = NULL;
5602 int virtual = *ip == CEE_CALLVIRT;
5603 int calli = *ip == CEE_CALLI;
5604 gboolean pass_imt_from_rgctx = FALSE;
5605 MonoInst *imt_arg = NULL;
5606 gboolean pass_vtable = FALSE;
5607 gboolean pass_mrgctx = FALSE;
5608 MonoInst *vtable_arg = NULL;
5609 gboolean check_this = FALSE;
5612 token = read32 (ip + 1);
5619 if (method->wrapper_type != MONO_WRAPPER_NONE)
5620 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5622 fsig = mono_metadata_parse_signature (image, token);
5624 n = fsig->param_count + fsig->hasthis;
5626 MonoMethod *cil_method;
5628 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5629 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5630 cil_method = cmethod;
5631 } else if (constrained_call) {
5632 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5634 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5635 cil_method = cmethod;
5640 if (!dont_verify && !cfg->skip_visibility) {
5641 MonoMethod *target_method = cil_method;
5642 if (method->is_inflated) {
5643 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5645 if (!mono_method_can_access_method (method_definition, target_method) &&
5646 !mono_method_can_access_method (method, cil_method))
5647 METHOD_ACCESS_FAILURE;
5650 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5651 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5653 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5654 /* MS.NET seems to silently convert this to a callvirt */
5657 if (!cmethod->klass->inited)
5658 if (!mono_class_init (cmethod->klass))
5661 if (mono_method_signature (cmethod)->pinvoke) {
5662 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc);
5663 fsig = mono_method_signature (wrapper);
5664 } else if (constrained_call) {
5665 fsig = mono_method_signature (cmethod);
5667 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5670 mono_save_token_info (cfg, image, token, cmethod);
5672 n = fsig->param_count + fsig->hasthis;
5674 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5675 if (check_linkdemand (cfg, method, cmethod))
5677 CHECK_CFG_EXCEPTION;
5680 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5681 mini_class_is_system_array (cmethod->klass)) {
5682 array_rank = cmethod->klass->rank;
5685 if (cmethod->string_ctor)
5686 g_assert_not_reached ();
5689 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5692 if (!cfg->generic_sharing_context && cmethod)
5693 g_assert (!mono_method_check_context_used (cmethod));
5697 //g_assert (!virtual || fsig->hasthis);
5701 if (constrained_call) {
5703 * We have the `constrained.' prefix opcode.
5705 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5709 * The type parameter is instantiated as a valuetype,
5710 * but that type doesn't override the method we're
5711 * calling, so we need to box `this'.
5713 dreg = alloc_dreg (cfg, STACK_VTYPE);
5714 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
5715 ins->klass = constrained_call;
5716 sp [0] = handle_box (cfg, ins, constrained_call);
5717 } else if (!constrained_call->valuetype) {
5718 int dreg = alloc_preg (cfg);
5721 * The type parameter is instantiated as a reference
5722 * type. We have a managed pointer on the stack, so
5723 * we need to dereference it here.
5725 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5726 ins->type = STACK_OBJ;
5728 } else if (cmethod->klass->valuetype)
5730 constrained_call = NULL;
5733 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
5737 if (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
5738 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
5739 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5740 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
5741 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5744 * Pass vtable iff target method might
5745 * be shared, which means that sharing
5746 * is enabled for its class and its
5747 * context is sharable (and it's not a
5750 if (sharing_enabled && context_sharable &&
5751 !mini_method_get_context (cmethod)->method_inst)
5755 if (cmethod && mini_method_get_context (cmethod) &&
5756 mini_method_get_context (cmethod)->method_inst) {
5757 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
5758 MonoGenericContext *context = mini_method_get_context (cmethod);
5759 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
5761 g_assert (!pass_vtable);
5763 if (sharing_enabled && context_sharable)
5767 if (cfg->generic_sharing_context && cmethod) {
5768 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
5770 context_used = mono_method_check_context_used (cmethod);
5772 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
5773 /* Generic method interface
5774 calls are resolved via a
5775 helper function and don't
5777 if (!cmethod_context || !cmethod_context->method_inst)
5778 pass_imt_from_rgctx = TRUE;
5782 * If a shared method calls another
5783 * shared method then the caller must
5784 * have a generic sharing context
5785 * because the magic trampoline
5786 * requires it. FIXME: We shouldn't
5787 * have to force the vtable/mrgctx
5788 * variable here. Instead there
5789 * should be a flag in the cfg to
5790 * request a generic sharing context.
5792 if (context_used && method->flags & METHOD_ATTRIBUTE_STATIC)
5793 mono_get_vtable_var (cfg);
5798 GENERIC_SHARING_FAILURE (*ip);
5804 EMIT_GET_RGCTX (rgctx, context_used);
5805 vtable_arg = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5807 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
5809 CHECK_TYPELOAD (cmethod->klass);
5810 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5815 g_assert (!vtable_arg);
5820 EMIT_GET_RGCTX (rgctx, context_used);
5821 vtable_arg = emit_get_rgctx_method_rgctx (cfg, context_used, rgctx, cmethod);
5823 MonoMethodRuntimeGenericContext *mrgctx;
5825 mrgctx = mono_method_lookup_rgctx (mono_class_vtable (cfg->domain, cmethod->klass),
5826 mini_method_get_context (cmethod)->method_inst);
5828 EMIT_NEW_PCONST (cfg, vtable_arg, mrgctx);
5831 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
5832 (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
5839 if (pass_imt_from_rgctx) {
5842 g_assert (!pass_vtable);
5845 EMIT_GET_RGCTX (rgctx, context_used);
5846 imt_arg = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod,
5847 MONO_RGCTX_INFO_METHOD);
5853 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
5854 check->sreg1 = sp [0]->dreg;
5855 MONO_ADD_INS (cfg->cbb, check);
5858 /* Calling virtual generic methods */
5859 if (cmethod && virtual &&
5860 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
5861 !((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
5862 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
5863 mono_method_signature (cmethod)->generic_param_count) {
5864 MonoInst *this_temp, *this_arg_temp, *store;
5865 MonoInst *iargs [4];
5867 g_assert (mono_method_signature (cmethod)->is_inflated);
5869 /* Prevent inlining of methods that contain indirect calls */
5872 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
5873 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
5874 MONO_ADD_INS (bblock, store);
5876 /* FIXME: This should be a managed pointer */
5877 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
5879 /* Because of the PCONST below */
5880 cfg->disable_aot = TRUE;
5881 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
5885 EMIT_GET_RGCTX (rgctx, context_used);
5886 iargs [1] = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_METHOD);
5887 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
5888 addr = mono_emit_jit_icall (cfg,
5889 mono_helper_compile_generic_method_wo_context, iargs);
5891 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
5892 EMIT_NEW_PCONST (cfg, iargs [2], mono_method_get_context (cmethod));
5893 EMIT_NEW_TEMPLOADA (cfg, iargs [3], this_arg_temp->inst_c0);
5894 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
5897 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
5899 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
5900 if (!MONO_TYPE_IS_VOID (fsig->ret))
5909 /* FIXME: runtime generic context pointer for jumps? */
5910 /* FIXME: handle this for generic sharing eventually */
5911 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
5912 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
5915 /* FIXME: runtime generic context pointer for jumps? */
5916 GENERIC_SHARING_FAILURE (*ip);
5918 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
5921 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5922 call->tail_call = TRUE;
5923 call->method = cmethod;
5924 call->signature = mono_method_signature (cmethod);
5927 /* Handle tail calls similarly to calls */
5928 call->inst.opcode = OP_TAILCALL;
5930 mono_arch_emit_call (cfg, call);
5933 * We implement tail calls by storing the actual arguments into the
5934 * argument variables, then emitting a CEE_JMP.
5936 for (i = 0; i < n; ++i) {
5937 /* Prevent argument from being register allocated */
5938 arg_array [i]->flags |= MONO_INST_VOLATILE;
5939 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
5944 cfg->disable_aot = 1;
5946 ins = (MonoInst*)call;
5947 ins->inst_p0 = cmethod;
5948 ins->inst_p1 = arg_array [0];
5949 MONO_ADD_INS (bblock, ins);
5950 link_bblock (cfg, bblock, end_bblock);
5951 start_new_bblock = 1;
5952 /* skip CEE_RET as well */
5958 /* Conversion to a JIT intrinsic */
5959 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
5960 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5961 type_to_eval_stack_type ((cfg), fsig->ret, ins);
5972 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
5973 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
5974 mono_method_check_inlining (cfg, cmethod) &&
5975 !g_list_find (dont_inline, cmethod)) {
5977 gboolean allways = FALSE;
5979 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5980 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5981 /* Prevent inlining of methods that call wrappers */
5983 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc);
5987 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
5989 cfg->real_offset += 5;
5992 if (!MONO_TYPE_IS_VOID (fsig->ret))
5993 /* *sp is already set by inline_method */
5996 inline_costs += costs;
6002 inline_costs += 10 * num_calls++;
6004 /* Tail recursion elimination */
6005 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6006 gboolean has_vtargs = FALSE;
6009 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6012 /* keep it simple */
6013 for (i = fsig->param_count - 1; i >= 0; i--) {
6014 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6019 for (i = 0; i < n; ++i)
6020 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6021 MONO_INST_NEW (cfg, ins, OP_BR);
6022 MONO_ADD_INS (bblock, ins);
6023 tblock = start_bblock->out_bb [0];
6024 link_bblock (cfg, bblock, tblock);
6025 ins->inst_target_bb = tblock;
6026 start_new_bblock = 1;
6028 /* skip the CEE_RET, too */
6029 if (ip_in_bb (cfg, bblock, ip + 5))
6039 /* Generic sharing */
6040 /* FIXME: only do this for generic methods if
6041 they are not shared! */
6043 (cmethod->klass->valuetype ||
6044 (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst && !pass_mrgctx) ||
6045 ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
6046 mono_class_generic_sharing_enabled (cmethod->klass)) ||
6047 (!imt_arg && !mono_method_is_generic_sharable_impl (cmethod, TRUE) &&
6048 (!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6049 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))))) {
6054 g_assert (cfg->generic_sharing_context && cmethod);
6058 * We are compiling a call to a
6059 * generic method from shared code,
6060 * which means that we have to look up
6061 * the method in the rgctx and do an
6065 EMIT_GET_RGCTX (rgctx, context_used);
6066 addr = emit_get_rgctx_method (cfg, context_used, rgctx, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6069 /* Indirect calls */
6071 g_assert (!imt_arg);
6073 if (*ip == CEE_CALL)
6074 g_assert (context_used);
6075 else if (*ip == CEE_CALLI)
6076 g_assert (!vtable_arg);
6078 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6079 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6081 /* Prevent inlining of methods with indirect calls */
6085 #ifdef MONO_ARCH_RGCTX_REG
6087 int rgctx_reg = mono_alloc_preg (cfg);
6089 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6090 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6091 call = (MonoCallInst*)ins;
6092 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6094 GENERIC_SHARING_FAILURE (*ip);
6097 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6099 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6100 if (fsig->pinvoke && !fsig->ret->byref) {
6104 * Native code might return non register sized integers
6105 * without initializing the upper bits.
6107 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6108 case OP_LOADI1_MEMBASE:
6109 widen_op = OP_ICONV_TO_I1;
6111 case OP_LOADU1_MEMBASE:
6112 widen_op = OP_ICONV_TO_U1;
6114 case OP_LOADI2_MEMBASE:
6115 widen_op = OP_ICONV_TO_I2;
6117 case OP_LOADU2_MEMBASE:
6118 widen_op = OP_ICONV_TO_U2;
6124 if (widen_op != -1) {
6125 int dreg = alloc_preg (cfg);
6128 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6129 widen->type = ins->type;
6146 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6147 if (sp [fsig->param_count]->type == STACK_OBJ) {
6148 MonoInst *iargs [2];
6151 iargs [1] = sp [fsig->param_count];
6153 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6156 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6157 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6158 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6159 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6161 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6164 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6165 if (!cmethod->klass->element_class->valuetype && !readonly)
6166 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6169 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6172 g_assert_not_reached ();
6180 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6182 if (!MONO_TYPE_IS_VOID (fsig->ret))
6193 #ifdef MONO_ARCH_RGCTX_REG
6195 int rgctx_reg = mono_alloc_preg (cfg);
6197 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6198 ins = (MonoInst*)mono_emit_method_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6199 call = (MonoCallInst*)ins;
6200 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6202 GENERIC_SHARING_FAILURE (*ip);
6204 } else if (imt_arg) {
6205 ins = (MonoInst*)mono_emit_imt_method_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6207 ins = (MonoInst*)mono_emit_method_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6210 if (!MONO_TYPE_IS_VOID (fsig->ret))
6218 if (cfg->method != method) {
6219 /* return from inlined method */
6224 //g_assert (returnvar != -1);
6225 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6226 cfg->ret_var_set = TRUE;
6230 MonoType *ret_type = mono_method_signature (method)->ret;
6232 g_assert (!return_var);
6235 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6238 if (!cfg->vret_addr) {
6241 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6243 EMIT_NEW_RETLOADA (cfg, ret_addr);
6245 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6246 ins->klass = mono_class_from_mono_type (ret_type);
6249 #ifdef MONO_ARCH_SOFT_FLOAT
6250 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6251 MonoInst *iargs [1];
6255 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6256 mono_arch_emit_setret (cfg, method, conv);
6258 mono_arch_emit_setret (cfg, method, *sp);
6261 mono_arch_emit_setret (cfg, method, *sp);
6266 if (sp != stack_start)
6268 MONO_INST_NEW (cfg, ins, OP_BR);
6270 ins->inst_target_bb = end_bblock;
6271 MONO_ADD_INS (bblock, ins);
6272 link_bblock (cfg, bblock, end_bblock);
6273 start_new_bblock = 1;
6277 MONO_INST_NEW (cfg, ins, OP_BR);
6279 target = ip + 1 + (signed char)(*ip);
6281 GET_BBLOCK (cfg, tblock, target);
6282 link_bblock (cfg, bblock, tblock);
6283 CHECK_BBLOCK (target, ip, tblock);
6284 ins->inst_target_bb = tblock;
6285 if (sp != stack_start) {
6286 handle_stack_args (cfg, stack_start, sp - stack_start);
6288 CHECK_UNVERIFIABLE (cfg);
6290 MONO_ADD_INS (bblock, ins);
6291 start_new_bblock = 1;
6292 inline_costs += BRANCH_COST;
6306 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6308 target = ip + 1 + *(signed char*)ip;
6314 inline_costs += BRANCH_COST;
6318 MONO_INST_NEW (cfg, ins, OP_BR);
6321 target = ip + 4 + (gint32)read32(ip);
6323 GET_BBLOCK (cfg, tblock, target);
6324 link_bblock (cfg, bblock, tblock);
6325 CHECK_BBLOCK (target, ip, tblock);
6326 ins->inst_target_bb = tblock;
6327 if (sp != stack_start) {
6328 handle_stack_args (cfg, stack_start, sp - stack_start);
6330 CHECK_UNVERIFIABLE (cfg);
6333 MONO_ADD_INS (bblock, ins);
6335 start_new_bblock = 1;
6336 inline_costs += BRANCH_COST;
6343 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6344 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6345 guint32 opsize = is_short ? 1 : 4;
6347 CHECK_OPSIZE (opsize);
6349 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6352 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6357 GET_BBLOCK (cfg, tblock, target);
6358 link_bblock (cfg, bblock, tblock);
6359 CHECK_BBLOCK (target, ip, tblock);
6360 GET_BBLOCK (cfg, tblock, ip);
6361 link_bblock (cfg, bblock, tblock);
6363 if (sp != stack_start) {
6364 handle_stack_args (cfg, stack_start, sp - stack_start);
6365 CHECK_UNVERIFIABLE (cfg);
6368 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6369 cmp->sreg1 = sp [0]->dreg;
6370 type_from_op (cmp, sp [0], NULL);
6373 #if SIZEOF_VOID_P == 4
6374 if (cmp->opcode == OP_LCOMPARE_IMM) {
6375 /* Convert it to OP_LCOMPARE */
6376 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6377 ins->type = STACK_I8;
6378 ins->dreg = alloc_dreg (cfg, STACK_I8);
6380 MONO_ADD_INS (bblock, ins);
6381 cmp->opcode = OP_LCOMPARE;
6382 cmp->sreg2 = ins->dreg;
6385 MONO_ADD_INS (bblock, cmp);
6387 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6388 type_from_op (ins, sp [0], NULL);
6389 MONO_ADD_INS (bblock, ins);
6390 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6391 GET_BBLOCK (cfg, tblock, target);
6392 ins->inst_true_bb = tblock;
6393 GET_BBLOCK (cfg, tblock, ip);
6394 ins->inst_false_bb = tblock;
6395 start_new_bblock = 2;
6398 inline_costs += BRANCH_COST;
6413 MONO_INST_NEW (cfg, ins, *ip);
6415 target = ip + 4 + (gint32)read32(ip);
6421 inline_costs += BRANCH_COST;
6425 MonoBasicBlock **targets;
6426 MonoBasicBlock *default_bblock;
6427 MonoJumpInfoBBTable *table;
6428 int offset_reg = alloc_preg (cfg);
6429 int target_reg = alloc_preg (cfg);
6430 int table_reg = alloc_preg (cfg);
6431 int sum_reg = alloc_preg (cfg);
6435 n = read32 (ip + 1);
6438 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6442 CHECK_OPSIZE (n * sizeof (guint32));
6443 target = ip + n * sizeof (guint32);
6445 GET_BBLOCK (cfg, default_bblock, target);
6447 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6448 for (i = 0; i < n; ++i) {
6449 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6450 targets [i] = tblock;
6454 if (sp != stack_start) {
6456 * Link the current bb with the targets as well, so handle_stack_args
6457 * will set their in_stack correctly.
6459 link_bblock (cfg, bblock, default_bblock);
6460 for (i = 0; i < n; ++i)
6461 link_bblock (cfg, bblock, targets [i]);
6463 handle_stack_args (cfg, stack_start, sp - stack_start);
6465 CHECK_UNVERIFIABLE (cfg);
6468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6469 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6472 for (i = 0; i < n; ++i)
6473 link_bblock (cfg, bblock, targets [i]);
6475 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6476 table->table = targets;
6477 table->table_size = n;
6480 /* ARM implements SWITCH statements differently */
6481 /* FIXME: Make it use the generic implementation */
6482 /* the backend code will deal with aot vs normal case */
6483 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6484 ins->sreg1 = src1->dreg;
6485 ins->inst_p0 = table;
6486 ins->inst_many_bb = targets;
6487 ins->klass = GUINT_TO_POINTER (n);
6488 MONO_ADD_INS (cfg->cbb, ins);
6490 if (sizeof (gpointer) == 8)
6491 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6495 #if SIZEOF_VOID_P == 8
6496 /* The upper word might not be zero, and we add it to a 64 bit address later */
6497 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6500 if (cfg->compile_aot) {
6501 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6503 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6504 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6505 ins->inst_p0 = table;
6506 ins->dreg = table_reg;
6507 MONO_ADD_INS (cfg->cbb, ins);
6510 /* FIXME: Use load_memindex */
6511 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6512 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6513 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6515 start_new_bblock = 1;
6516 inline_costs += (BRANCH_COST * 2);
6536 dreg = alloc_freg (cfg);
6539 dreg = alloc_lreg (cfg);
6542 dreg = alloc_preg (cfg);
6545 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6546 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6547 ins->flags |= ins_flag;
6549 MONO_ADD_INS (bblock, ins);
6564 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6565 ins->flags |= ins_flag;
6567 MONO_ADD_INS (bblock, ins);
6575 MONO_INST_NEW (cfg, ins, (*ip));
6577 ins->sreg1 = sp [0]->dreg;
6578 ins->sreg2 = sp [1]->dreg;
6579 type_from_op (ins, sp [0], sp [1]);
6581 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6583 /* Use the immediate opcodes if possible */
6584 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6585 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6586 if (imm_opcode != -1) {
6587 ins->opcode = imm_opcode;
6588 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6591 sp [1]->opcode = OP_NOP;
6595 MONO_ADD_INS ((cfg)->cbb, (ins));
6598 mono_decompose_opcode (cfg, ins);
6615 MONO_INST_NEW (cfg, ins, (*ip));
6617 ins->sreg1 = sp [0]->dreg;
6618 ins->sreg2 = sp [1]->dreg;
6619 type_from_op (ins, sp [0], sp [1]);
6621 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6622 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6624 /* FIXME: Pass opcode to is_inst_imm */
6626 /* Use the immediate opcodes if possible */
6627 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6630 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6631 if (imm_opcode != -1) {
6632 ins->opcode = imm_opcode;
6633 if (sp [1]->opcode == OP_I8CONST) {
6634 #if SIZEOF_VOID_P == 8
6635 ins->inst_imm = sp [1]->inst_l;
6637 ins->inst_ls_word = sp [1]->inst_ls_word;
6638 ins->inst_ms_word = sp [1]->inst_ms_word;
6642 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6645 sp [1]->opcode = OP_NOP;
6648 MONO_ADD_INS ((cfg)->cbb, (ins));
6651 mono_decompose_opcode (cfg, ins);
6664 case CEE_CONV_OVF_I8:
6665 case CEE_CONV_OVF_U8:
6669 /* Special case this earlier so we have long constants in the IR */
6670 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6671 int data = sp [-1]->inst_c0;
6672 sp [-1]->opcode = OP_I8CONST;
6673 sp [-1]->type = STACK_I8;
6674 #if SIZEOF_VOID_P == 8
6675 if ((*ip) == CEE_CONV_U8)
6676 sp [-1]->inst_c0 = (guint32)data;
6678 sp [-1]->inst_c0 = data;
6680 sp [-1]->inst_ls_word = data;
6681 if ((*ip) == CEE_CONV_U8)
6682 sp [-1]->inst_ms_word = 0;
6684 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6686 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6693 case CEE_CONV_OVF_I4:
6694 case CEE_CONV_OVF_I1:
6695 case CEE_CONV_OVF_I2:
6696 case CEE_CONV_OVF_I:
6697 case CEE_CONV_OVF_U:
6700 if (sp [-1]->type == STACK_R8) {
6701 ADD_UNOP (CEE_CONV_OVF_I8);
6708 case CEE_CONV_OVF_U1:
6709 case CEE_CONV_OVF_U2:
6710 case CEE_CONV_OVF_U4:
6713 if (sp [-1]->type == STACK_R8) {
6714 ADD_UNOP (CEE_CONV_OVF_U8);
6721 case CEE_CONV_OVF_I1_UN:
6722 case CEE_CONV_OVF_I2_UN:
6723 case CEE_CONV_OVF_I4_UN:
6724 case CEE_CONV_OVF_I8_UN:
6725 case CEE_CONV_OVF_U1_UN:
6726 case CEE_CONV_OVF_U2_UN:
6727 case CEE_CONV_OVF_U4_UN:
6728 case CEE_CONV_OVF_U8_UN:
6729 case CEE_CONV_OVF_I_UN:
6730 case CEE_CONV_OVF_U_UN:
6740 case CEE_ADD_OVF_UN:
6742 case CEE_MUL_OVF_UN:
6744 case CEE_SUB_OVF_UN:
6752 token = read32 (ip + 1);
6753 klass = mini_get_class (method, token, generic_context);
6754 CHECK_TYPELOAD (klass);
6756 if (generic_class_is_reference_type (cfg, klass)) {
6757 MonoInst *store, *load;
6758 int dreg = alloc_preg (cfg);
6760 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
6761 load->flags |= ins_flag;
6762 MONO_ADD_INS (cfg->cbb, load);
6764 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
6765 store->flags |= ins_flag;
6766 MONO_ADD_INS (cfg->cbb, store);
6768 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6780 token = read32 (ip + 1);
6781 klass = mini_get_class (method, token, generic_context);
6782 CHECK_TYPELOAD (klass);
6784 /* Optimize the common ldobj+stloc combination */
6794 loc_index = ip [5] - CEE_STLOC_0;
6801 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
6802 CHECK_LOCAL (loc_index);
6804 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6805 ins->dreg = cfg->locals [loc_index]->dreg;
6811 /* Optimize the ldobj+stobj combination */
6812 /* The reference case ends up being a load+store anyway */
6813 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
6818 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
6825 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
6834 CHECK_STACK_OVF (1);
6836 n = read32 (ip + 1);
6838 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
6839 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
6840 ins->type = STACK_OBJ;
6843 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
6844 MonoInst *iargs [1];
6846 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
6847 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
6849 if (cfg->opt & MONO_OPT_SHARED) {
6850 MonoInst *iargs [3];
6852 if (cfg->compile_aot) {
6853 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
6855 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
6856 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
6857 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
6858 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
6859 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6861 if (bblock->out_of_line) {
6862 MonoInst *iargs [2];
6864 if (cfg->method->klass->image == mono_defaults.corlib) {
6866 * Avoid relocations in AOT and save some space by using a
6867 * version of helper_ldstr specialized to mscorlib.
6869 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
6870 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
6872 /* Avoid creating the string object */
6873 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
6874 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
6875 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
6879 if (cfg->compile_aot) {
6880 NEW_LDSTRCONST (cfg, ins, image, n);
6882 MONO_ADD_INS (bblock, ins);
6885 NEW_PCONST (cfg, ins, NULL);
6886 ins->type = STACK_OBJ;
6887 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
6889 MONO_ADD_INS (bblock, ins);
6898 MonoInst *iargs [2];
6899 MonoMethodSignature *fsig;
6904 token = read32 (ip + 1);
6905 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6908 fsig = mono_method_get_signature (cmethod, image, token);
6910 mono_save_token_info (cfg, image, token, cmethod);
6912 if (!mono_class_init (cmethod->klass))
6915 if (cfg->generic_sharing_context)
6916 context_used = mono_method_check_context_used (cmethod);
6918 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6919 if (check_linkdemand (cfg, method, cmethod))
6921 CHECK_CFG_EXCEPTION;
6922 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6923 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
6926 n = fsig->param_count;
6930 * Generate smaller code for the common newobj <exception> instruction in
6931 * argument checking code.
6933 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib && n <= 2 &&
6934 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
6935 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
6936 MonoInst *iargs [3];
6940 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
6943 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
6947 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
6952 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
6955 g_assert_not_reached ();
6963 /* move the args to allow room for 'this' in the first position */
6969 /* check_call_signature () requires sp[0] to be set */
6970 this_ins.type = STACK_OBJ;
6972 if (check_call_signature (cfg, fsig, sp))
6977 if (mini_class_is_system_array (cmethod->klass)) {
6978 g_assert (!context_used);
6979 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
6980 if (fsig->param_count == 2)
6981 /* Avoid varargs in the common case */
6982 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
6984 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
6985 } else if (cmethod->string_ctor) {
6986 g_assert (!context_used);
6987 /* we simply pass a null pointer */
6988 EMIT_NEW_PCONST (cfg, *sp, NULL);
6989 /* now call the string ctor */
6990 alloc = mono_emit_method_call (cfg, cmethod, fsig, sp, NULL);
6992 MonoInst* callvirt_this_arg = NULL;
6994 if (cmethod->klass->valuetype) {
6995 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
6996 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
6997 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7002 * The code generated by mini_emit_virtual_call () expects
7003 * iargs [0] to be a boxed instance, but luckily the vcall
7004 * will be transformed into a normal call there.
7006 } else if (context_used) {
7007 MonoInst *rgctx, *data;
7010 EMIT_GET_RGCTX (rgctx, context_used);
7011 if (cfg->opt & MONO_OPT_SHARED)
7012 rgctx_info = MONO_RGCTX_INFO_KLASS;
7014 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7015 data = emit_get_rgctx_klass (cfg, context_used, rgctx, cmethod->klass, rgctx_info);
7017 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7020 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7022 CHECK_TYPELOAD (cmethod->klass);
7025 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7026 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7027 * As a workaround, we call class cctors before allocating objects.
7029 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7030 guint8 *tramp = mono_create_class_init_trampoline (vtable);
7031 mono_emit_native_call (cfg, tramp,
7032 helper_sig_class_init_trampoline,
7034 if (cfg->verbose_level > 2)
7035 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7036 class_inits = g_slist_prepend (class_inits, vtable);
7039 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7044 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7046 /* Now call the actual ctor */
7047 /* Avoid virtual calls to ctors if possible */
7048 if (cmethod->klass->marshalbyref)
7049 callvirt_this_arg = sp [0];
7051 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used &&
7052 mono_method_check_inlining (cfg, cmethod) &&
7053 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7054 !g_list_find (dont_inline, cmethod)) {
7057 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7058 cfg->real_offset += 5;
7061 inline_costs += costs - 5;
7064 mono_emit_method_call (cfg, cmethod, fsig, sp, callvirt_this_arg);
7066 } else if (context_used &&
7067 (cmethod->klass->valuetype ||
7068 !mono_method_is_generic_sharable_impl (cmethod, TRUE))) {
7069 MonoInst *rgctx, *cmethod_addr;
7071 g_assert (!callvirt_this_arg);
7073 EMIT_GET_RGCTX (rgctx, context_used);
7074 cmethod_addr = emit_get_rgctx_method (cfg, context_used, rgctx,
7075 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7077 mono_emit_calli (cfg, fsig, sp, cmethod_addr);
7080 mono_emit_method_call (cfg, cmethod, fsig, sp, callvirt_this_arg);
7084 if (alloc == NULL) {
7086 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7087 ins->type = STACK_VTYPE;
7088 ins->klass = ins->klass;
7102 token = read32 (ip + 1);
7103 klass = mini_get_class (method, token, generic_context);
7104 CHECK_TYPELOAD (klass);
7105 if (sp [0]->type != STACK_OBJ)
7108 if (cfg->generic_sharing_context)
7109 context_used = mono_class_check_context_used (klass);
7112 MonoInst *rgctx, *args [2];
7114 g_assert (!method->klass->valuetype);
7120 EMIT_GET_RGCTX (rgctx, context_used);
7121 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass,
7122 MONO_RGCTX_INFO_KLASS);
7124 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7128 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7129 MonoMethod *mono_castclass;
7130 MonoInst *iargs [1];
7133 mono_castclass = mono_marshal_get_castclass (klass);
7136 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7137 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7138 g_assert (costs > 0);
7141 cfg->real_offset += 5;
7146 inline_costs += costs;
7149 ins = handle_castclass (cfg, klass, *sp);
7159 token = read32 (ip + 1);
7160 klass = mini_get_class (method, token, generic_context);
7161 CHECK_TYPELOAD (klass);
7162 if (sp [0]->type != STACK_OBJ)
7165 if (cfg->generic_sharing_context && mono_class_check_context_used (klass))
7166 GENERIC_SHARING_FAILURE (CEE_ISINST);
7168 if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7170 MonoMethod *mono_isinst;
7171 MonoInst *iargs [1];
7174 mono_isinst = mono_marshal_get_isinst (klass);
7177 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7178 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7179 g_assert (costs > 0);
7182 cfg->real_offset += 5;
7187 inline_costs += costs;
7190 ins = handle_isinst (cfg, klass, *sp);
7196 case CEE_UNBOX_ANY: {
7197 MonoInst *rgctx = NULL;
7202 token = read32 (ip + 1);
7203 klass = mini_get_class (method, token, generic_context);
7204 CHECK_TYPELOAD (klass);
7206 if (cfg->generic_sharing_context)
7207 context_used = mono_class_check_context_used (klass);
7209 if (generic_class_is_reference_type (cfg, klass)) {
7212 MonoInst *iargs [2];
7214 g_assert (!method->klass->valuetype);
7219 EMIT_GET_RGCTX (rgctx, context_used);
7220 iargs [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7221 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7225 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7226 MonoMethod *mono_castclass;
7227 MonoInst *iargs [1];
7230 mono_castclass = mono_marshal_get_castclass (klass);
7233 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7234 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7236 g_assert (costs > 0);
7239 cfg->real_offset += 5;
7243 inline_costs += costs;
7245 ins = handle_castclass (cfg, klass, *sp);
7254 EMIT_GET_RGCTX (rgctx, context_used);
7256 if (mono_class_is_nullable (klass)) {
7257 ins = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7264 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7270 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7278 int context_used = 0;
7284 token = read32 (ip + 1);
7285 klass = mini_get_class (method, token, generic_context);
7286 CHECK_TYPELOAD (klass);
7288 if (cfg->generic_sharing_context) {
7289 context_used = mono_class_check_context_used (klass);
7291 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
7292 GENERIC_SHARING_FAILURE (*ip);
7295 if (generic_class_is_reference_type (cfg, klass)) {
7301 if (klass == mono_defaults.void_class)
7303 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7305 /* frequent check in generic code: box (struct), brtrue */
7306 if (!mono_class_is_nullable (klass) &&
7307 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7308 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7310 MONO_INST_NEW (cfg, ins, OP_BR);
7311 if (*ip == CEE_BRTRUE_S) {
7314 target = ip + 1 + (signed char)(*ip);
7319 target = ip + 4 + (gint)(read32 (ip));
7322 GET_BBLOCK (cfg, tblock, target);
7323 link_bblock (cfg, bblock, tblock);
7324 CHECK_BBLOCK (target, ip, tblock);
7325 ins->inst_target_bb = tblock;
7326 GET_BBLOCK (cfg, tblock, ip);
7327 link_bblock (cfg, bblock, tblock);
7328 if (sp != stack_start) {
7329 handle_stack_args (cfg, stack_start, sp - stack_start);
7331 CHECK_UNVERIFIABLE (cfg);
7333 MONO_ADD_INS (bblock, ins);
7334 start_new_bblock = 1;
7341 if (mono_class_is_nullable (klass)) {
7342 GENERIC_SHARING_FAILURE (CEE_BOX);
7347 EMIT_GET_RGCTX (rgctx, context_used);
7348 if (cfg->opt & MONO_OPT_SHARED)
7349 rgctx_info = MONO_RGCTX_INFO_KLASS;
7351 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7352 data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, rgctx_info);
7353 *sp++ = handle_box_from_inst (cfg, val, klass, data);
7356 *sp++ = handle_box (cfg, val, klass);
7364 MonoInst *rgctx = NULL;
7369 token = read32 (ip + 1);
7370 klass = mini_get_class (method, token, generic_context);
7371 CHECK_TYPELOAD (klass);
7373 if (cfg->generic_sharing_context)
7374 context_used = mono_class_check_context_used (klass);
7377 EMIT_GET_RGCTX (rgctx, context_used);
7379 if (mono_class_is_nullable (klass)) {
7382 val = handle_unbox_nullable (cfg, *sp, klass, context_used, rgctx);
7383 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7387 ins = handle_unbox (cfg, klass, sp, context_used, rgctx);
7397 MonoClassField *field;
7401 if (*ip == CEE_STFLD) {
7408 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7410 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7413 token = read32 (ip + 1);
7414 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7415 field = mono_method_get_wrapper_data (method, token);
7416 klass = field->parent;
7419 field = mono_field_from_token (image, token, &klass, generic_context);
7423 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7424 FIELD_ACCESS_FAILURE;
7425 mono_class_init (klass);
7427 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7428 if (*ip == CEE_STFLD) {
7429 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7431 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7432 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7433 MonoInst *iargs [5];
7436 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7437 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7438 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7442 if (cfg->opt & MONO_OPT_INLINE) {
7443 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7444 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7445 g_assert (costs > 0);
7448 cfg->real_offset += 5;
7451 inline_costs += costs;
7454 mono_emit_method_call (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper), iargs, NULL);
7459 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7461 store->flags |= ins_flag;
7468 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7469 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7470 MonoInst *iargs [4];
7473 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7474 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7475 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7476 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7477 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7478 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7480 g_assert (costs > 0);
7483 cfg->real_offset += 5;
7487 inline_costs += costs;
7490 ins = mono_emit_method_call (cfg, wrapper, mono_method_signature (wrapper), iargs, NULL);
7494 if (sp [0]->type == STACK_VTYPE) {
7497 /* Have to compute the address of the variable */
7499 var = cfg->vreg_to_inst [sp [0]->dreg];
7500 if (!var && sp [0]->opcode == OP_VMOVE)
7501 var = cfg->vreg_to_inst [sp [0]->sreg1];
7504 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7508 if (*ip == CEE_LDFLDA) {
7509 dreg = alloc_preg (cfg);
7511 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7512 ins->klass = mono_class_from_mono_type (field->type);
7513 ins->type = STACK_MP;
7518 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7519 load->flags |= ins_flag;
7530 MonoClassField *field;
7531 gpointer addr = NULL;
7532 gboolean is_special_static;
7535 token = read32 (ip + 1);
7537 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7538 field = mono_method_get_wrapper_data (method, token);
7539 klass = field->parent;
7542 field = mono_field_from_token (image, token, &klass, generic_context);
7545 mono_class_init (klass);
7546 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7547 FIELD_ACCESS_FAILURE;
7550 * We can only support shared generic static
7551 * field access on architectures where the
7552 * trampoline code has been extended to handle
7553 * the generic class init.
7555 #ifndef MONO_ARCH_VTABLE_REG
7556 GENERIC_SHARING_FAILURE (*ip);
7559 if (cfg->generic_sharing_context)
7560 context_used = mono_class_check_context_used (klass);
7562 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7564 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7565 * to be called here.
7567 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7568 mono_class_vtable (cfg->domain, klass);
7569 CHECK_TYPELOAD (klass);
7571 mono_domain_lock (cfg->domain);
7572 if (cfg->domain->special_static_fields)
7573 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7574 mono_domain_unlock (cfg->domain);
7576 is_special_static = mono_class_field_is_special_static (field);
7578 /* Generate IR to compute the field address */
7580 if ((cfg->opt & MONO_OPT_SHARED) ||
7581 (cfg->compile_aot && is_special_static) ||
7582 (context_used && is_special_static)) {
7583 MonoInst *iargs [2];
7585 g_assert (field->parent);
7586 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7590 EMIT_GET_RGCTX (rgctx, context_used);
7591 iargs [1] = emit_get_rgctx_field (cfg, context_used, rgctx, field, MONO_RGCTX_INFO_CLASS_FIELD);
7593 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7595 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7596 } else if (context_used) {
7597 MonoInst *rgctx, *static_data;
7600 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7601 method->klass->name_space, method->klass->name, method->name,
7602 depth, field->offset);
7605 if (mono_class_needs_cctor_run (klass, method)) {
7607 MonoInst *vtable, *rgctx;
7609 EMIT_GET_RGCTX (rgctx, context_used);
7610 vtable = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_VTABLE);
7612 // FIXME: This doesn't work since it tries to pass the argument
7613 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7615 * The vtable pointer is always passed in a register regardless of
7616 * the calling convention, so assign it manually, and make a call
7617 * using a signature without parameters.
7619 call = (MonoCallInst*)mono_emit_native_call (cfg, mono_get_trampoline_code (MONO_TRAMPOLINE_GENERIC_CLASS_INIT), helper_sig_generic_class_init_trampoline, &vtable);
7620 #ifdef MONO_ARCH_VTABLE_REG
7621 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7628 * The pointer we're computing here is
7630 * super_info.static_data + field->offset
7632 EMIT_GET_RGCTX (rgctx, context_used);
7633 static_data = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_STATIC_DATA);
7635 if (field->offset == 0) {
7638 int addr_reg = mono_alloc_preg (cfg);
7639 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7641 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7642 MonoInst *iargs [2];
7644 g_assert (field->parent);
7645 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7646 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7647 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7649 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7651 CHECK_TYPELOAD (klass);
7653 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7654 guint8 *tramp = mono_create_class_init_trampoline (vtable);
7655 mono_emit_native_call (cfg, tramp,
7656 helper_sig_class_init_trampoline,
7658 if (cfg->verbose_level > 2)
7659 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, field->name);
7660 class_inits = g_slist_prepend (class_inits, vtable);
7662 if (cfg->run_cctors) {
7664 /* This makes so that inline cannot trigger */
7665 /* .cctors: too many apps depend on them */
7666 /* running with a specific order... */
7667 if (! vtable->initialized)
7669 ex = mono_runtime_class_init_full (vtable, FALSE);
7671 set_exception_object (cfg, ex);
7672 goto exception_exit;
7676 addr = (char*)vtable->data + field->offset;
7678 if (cfg->compile_aot)
7679 EMIT_NEW_SFLDACONST (cfg, ins, field);
7681 EMIT_NEW_PCONST (cfg, ins, addr);
7684 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
7685 * This could be later optimized to do just a couple of
7686 * memory dereferences with constant offsets.
7688 MonoInst *iargs [1];
7689 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
7690 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
7694 /* Generate IR to do the actual load/store operation */
7696 if (*ip == CEE_LDSFLDA) {
7697 ins->klass = mono_class_from_mono_type (field->type);
7699 } else if (*ip == CEE_STSFLD) {
7704 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
7705 store->flags |= ins_flag;
7708 gboolean is_const = FALSE;
7709 MonoVTable *vtable = NULL;
7711 if (!context_used) {
7712 vtable = mono_class_vtable (cfg->domain, klass);
7713 CHECK_TYPELOAD (klass);
7715 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
7716 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
7717 gpointer addr = (char*)vtable->data + field->offset;
7718 int ro_type = field->type->type;
7719 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
7720 ro_type = field->type->data.klass->enum_basetype->type;
7722 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, field->name);*/
7725 case MONO_TYPE_BOOLEAN:
7727 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
7731 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
7734 case MONO_TYPE_CHAR:
7736 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
7740 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
7745 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
7749 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
7754 case MONO_TYPE_STRING:
7755 case MONO_TYPE_OBJECT:
7756 case MONO_TYPE_CLASS:
7757 case MONO_TYPE_SZARRAY:
7759 case MONO_TYPE_FNPTR:
7760 case MONO_TYPE_ARRAY:
7761 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
7762 type_to_eval_stack_type ((cfg), field->type, *sp);
7767 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
7772 case MONO_TYPE_VALUETYPE:
7782 CHECK_STACK_OVF (1);
7784 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
7785 load->flags |= ins_flag;
7789 /* fixme: dont see the problem why this does not work */
7790 //cfg->disable_aot = TRUE;
7800 token = read32 (ip + 1);
7801 klass = mini_get_class (method, token, generic_context);
7802 CHECK_TYPELOAD (klass);
7803 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
7804 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
7815 const char *data_ptr;
7817 gboolean shared_access = FALSE;
7823 token = read32 (ip + 1);
7825 klass = mini_get_class (method, token, generic_context);
7826 CHECK_TYPELOAD (klass);
7828 if (cfg->generic_sharing_context) {
7829 int context_used = mono_class_check_context_used (klass);
7831 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
7832 GENERIC_SHARING_FAILURE (CEE_NEWARR);
7835 shared_access = TRUE;
7838 if (shared_access) {
7842 /* FIXME: Decompose later to help abcrem */
7845 EMIT_NEW_DOMAINCONST (cfg, args [0]);
7848 EMIT_GET_RGCTX (rgctx, context_used);
7849 args [1] = emit_get_rgctx_klass (cfg, context_used, rgctx, klass, MONO_RGCTX_INFO_KLASS);
7854 ins = mono_emit_jit_icall (cfg, mono_array_new, args);
7856 if (cfg->opt & MONO_OPT_SHARED) {
7857 /* Decompose now to avoid problems with references to the domainvar */
7858 MonoInst *iargs [3];
7860 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7861 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7864 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
7866 /* Decompose later since it is needed by abcrem */
7867 MONO_INST_NEW (cfg, ins, OP_NEWARR);
7868 ins->dreg = alloc_preg (cfg);
7869 ins->sreg1 = sp [0]->dreg;
7870 ins->inst_newa_class = klass;
7871 ins->type = STACK_OBJ;
7873 MONO_ADD_INS (cfg->cbb, ins);
7874 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7875 cfg->cbb->has_array_access = TRUE;
7885 * we inline/optimize the initialization sequence if possible.
7886 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
7887 * for small sizes open code the memcpy
7888 * ensure the rva field is big enough
7890 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size))) {
7891 MonoMethod *memcpy_method = get_memcpy_method ();
7892 MonoInst *iargs [3];
7893 int add_reg = alloc_preg (cfg);
7895 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
7896 if (cfg->compile_aot) {
7897 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(data_ptr), STACK_PTR, NULL);
7899 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
7901 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
7902 mono_emit_method_call (cfg, memcpy_method, memcpy_method->signature, iargs, NULL);
7911 if (sp [0]->type != STACK_OBJ)
7914 dreg = alloc_preg (cfg);
7915 MONO_INST_NEW (cfg, ins, OP_LDLEN);
7916 ins->dreg = alloc_preg (cfg);
7917 ins->sreg1 = sp [0]->dreg;
7918 ins->type = STACK_I4;
7919 MONO_ADD_INS (cfg->cbb, ins);
7920 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
7921 cfg->cbb->has_array_access = TRUE;
7929 if (sp [0]->type != STACK_OBJ)
7932 cfg->flags |= MONO_CFG_HAS_LDELEMA;
7934 klass = mini_get_class (method, read32 (ip + 1), generic_context);
7935 CHECK_TYPELOAD (klass);
7936 /* we need to make sure that this array is exactly the type it needs
7937 * to be for correctness. the wrappers are lax with their usage
7938 * so we need to ignore them here
7940 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
7941 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
7944 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
7948 case CEE_LDELEM_ANY:
7959 case CEE_LDELEM_REF: {
7965 if (*ip == CEE_LDELEM_ANY) {
7967 token = read32 (ip + 1);
7968 klass = mini_get_class (method, token, generic_context);
7969 CHECK_TYPELOAD (klass);
7970 mono_class_init (klass);
7973 klass = array_access_to_klass (*ip);
7975 if (sp [0]->type != STACK_OBJ)
7978 cfg->flags |= MONO_CFG_HAS_LDELEMA;
7980 if (sp [1]->opcode == OP_ICONST) {
7981 int array_reg = sp [0]->dreg;
7982 int index_reg = sp [1]->dreg;
7983 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
7985 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
7986 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
7988 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
7989 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
7992 if (*ip == CEE_LDELEM_ANY)
8005 case CEE_STELEM_REF:
8006 case CEE_STELEM_ANY: {
8012 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8014 if (*ip == CEE_STELEM_ANY) {
8016 token = read32 (ip + 1);
8017 klass = mini_get_class (method, token, generic_context);
8018 CHECK_TYPELOAD (klass);
8019 mono_class_init (klass);
8022 klass = array_access_to_klass (*ip);
8024 if (sp [0]->type != STACK_OBJ)
8027 /* storing a NULL doesn't need any of the complex checks in stelemref */
8028 if (generic_class_is_reference_type (cfg, klass) &&
8029 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8030 MonoMethod* helper = mono_marshal_get_stelemref ();
8031 MonoInst *iargs [3];
8033 if (sp [0]->type != STACK_OBJ)
8035 if (sp [2]->type != STACK_OBJ)
8042 mono_emit_method_call (cfg, helper, mono_method_signature (helper), iargs, NULL);
8044 if (sp [1]->opcode == OP_ICONST) {
8045 int array_reg = sp [0]->dreg;
8046 int index_reg = sp [1]->dreg;
8047 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8049 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8050 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8052 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8053 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8057 if (*ip == CEE_STELEM_ANY)
8064 case CEE_CKFINITE: {
8068 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8069 ins->sreg1 = sp [0]->dreg;
8070 ins->dreg = alloc_freg (cfg);
8071 ins->type = STACK_R8;
8072 MONO_ADD_INS (bblock, ins);
8078 case CEE_REFANYVAL: {
8079 MonoInst *src_var, *src;
8080 int context_used = 0;
8082 int klass_reg = alloc_preg (cfg);
8083 int dreg = alloc_preg (cfg);
8086 MONO_INST_NEW (cfg, ins, *ip);
8089 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8090 CHECK_TYPELOAD (klass);
8091 mono_class_init (klass);
8093 if (cfg->generic_sharing_context) {
8094 context_used = mono_class_check_context_used (klass);
8095 if (context_used && cfg->compile_aot)
8096 GENERIC_SHARING_FAILURE (*ip);
8101 GENERIC_SHARING_FAILURE (*ip);
8104 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8106 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8107 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8108 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8109 mini_emit_class_check (cfg, klass_reg, klass);
8110 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8112 ins->type = STACK_MP;
8117 case CEE_MKREFANY: {
8118 MonoInst *loc, *addr;
8119 int context_used = 0;
8122 MONO_INST_NEW (cfg, ins, *ip);
8125 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8126 CHECK_TYPELOAD (klass);
8127 mono_class_init (klass);
8129 if (cfg->generic_sharing_context) {
8130 context_used = mono_class_check_context_used (klass);
8131 if (context_used && cfg->compile_aot)
8132 GENERIC_SHARING_FAILURE (CEE_MKREFANY);
8135 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8136 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8139 GENERIC_SHARING_FAILURE (CEE_MKREFANY);
8140 } else if (cfg->compile_aot) {
8141 int const_reg = alloc_preg (cfg);
8142 int type_reg = alloc_preg (cfg);
8144 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8145 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8147 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8149 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8150 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8152 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8154 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8155 ins->type = STACK_VTYPE;
8156 ins->klass = mono_defaults.typed_reference_class;
8163 MonoClass *handle_class;
8164 int context_used = 0;
8166 CHECK_STACK_OVF (1);
8169 n = read32 (ip + 1);
8171 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8172 handle = mono_method_get_wrapper_data (method, n);
8173 handle_class = mono_method_get_wrapper_data (method, n + 1);
8174 if (handle_class == mono_defaults.typehandle_class)
8175 handle = &((MonoClass*)handle)->byval_arg;
8178 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8182 mono_class_init (handle_class);
8183 if (cfg->generic_sharing_context) {
8184 if (handle_class == mono_defaults.typehandle_class) {
8185 /* If we get a MONO_TYPE_CLASS
8186 then we need to provide the
8188 instantiation of it. */
8189 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8192 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8193 } else if (handle_class == mono_defaults.fieldhandle_class)
8194 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8195 else if (handle_class == mono_defaults.methodhandle_class)
8196 context_used = mono_method_check_context_used (handle);
8198 g_assert_not_reached ();
8201 if (cfg->opt & MONO_OPT_SHARED) {
8202 MonoInst *addr, *vtvar, *iargs [3];
8203 int method_context_used;
8205 if (cfg->generic_sharing_context)
8206 method_context_used = mono_method_check_context_used (method);
8208 method_context_used = 0;
8210 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8212 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8213 EMIT_NEW_ICONST (cfg, iargs [1], n);
8214 if (method_context_used) {
8217 EMIT_GET_RGCTX (rgctx, method_context_used);
8218 iargs [2] = emit_get_rgctx_method (cfg, method_context_used, rgctx, method, MONO_RGCTX_INFO_METHOD);
8219 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8221 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8222 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8224 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8226 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8228 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8230 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8231 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8232 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8233 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8234 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8235 MonoClass *tclass = mono_class_from_mono_type (handle);
8237 mono_class_init (tclass);
8241 g_assert (!cfg->compile_aot);
8242 EMIT_GET_RGCTX (rgctx, context_used);
8243 ins = emit_get_rgctx_klass (cfg, context_used, rgctx, tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8244 } else if (cfg->compile_aot) {
8246 * FIXME: We would have to include the context into the
8247 * aot constant too (tests/generic-array-type.2.exe).
8249 if (generic_context)
8250 cfg->disable_aot = TRUE;
8251 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
8253 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8255 ins->type = STACK_OBJ;
8256 ins->klass = cmethod->klass;
8259 MonoInst *addr, *vtvar;
8261 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8266 g_assert (!cfg->compile_aot);
8268 EMIT_GET_RGCTX (rgctx, context_used);
8269 if (handle_class == mono_defaults.typehandle_class) {
8270 ins = emit_get_rgctx_klass (cfg, context_used, rgctx,
8271 mono_class_from_mono_type (handle),
8272 MONO_RGCTX_INFO_TYPE);
8273 } else if (handle_class == mono_defaults.methodhandle_class) {
8274 ins = emit_get_rgctx_method (cfg, context_used, rgctx,
8275 handle, MONO_RGCTX_INFO_METHOD);
8276 } else if (handle_class == mono_defaults.fieldhandle_class) {
8277 ins = emit_get_rgctx_field (cfg, context_used, rgctx,
8278 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8280 g_assert_not_reached ();
8282 } else if (cfg->compile_aot) {
8283 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8285 EMIT_NEW_PCONST (cfg, ins, handle);
8287 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8288 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8289 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8299 MONO_INST_NEW (cfg, ins, OP_THROW);
8301 ins->sreg1 = sp [0]->dreg;
8303 bblock->out_of_line = TRUE;
8304 MONO_ADD_INS (bblock, ins);
8305 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8306 MONO_ADD_INS (bblock, ins);
8309 link_bblock (cfg, bblock, end_bblock);
8310 start_new_bblock = 1;
8312 case CEE_ENDFINALLY:
8313 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8314 MONO_ADD_INS (bblock, ins);
8316 start_new_bblock = 1;
8319 * Control will leave the method so empty the stack, otherwise
8320 * the next basic block will start with a nonempty stack.
8322 while (sp != stack_start) {
8330 if (*ip == CEE_LEAVE) {
8332 target = ip + 5 + (gint32)read32(ip + 1);
8335 target = ip + 2 + (signed char)(ip [1]);
8338 /* empty the stack */
8339 while (sp != stack_start) {
8344 * If this leave statement is in a catch block, check for a
8345 * pending exception, and rethrow it if necessary.
8347 for (i = 0; i < header->num_clauses; ++i) {
8348 MonoExceptionClause *clause = &header->clauses [i];
8351 * Use <= in the final comparison to handle clauses with multiple
8352 * leave statements, like in bug #78024.
8353 * The ordering of the exception clauses guarantees that we find the
8356 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8358 MonoBasicBlock *dont_throw;
8363 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8366 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8368 NEW_BBLOCK (cfg, dont_throw);
8371 * Currently, we allways rethrow the abort exception, despite the
8372 * fact that this is not correct. See thread6.cs for an example.
8373 * But propagating the abort exception is more important than
8374 * getting the sematics right.
8376 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8377 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8378 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8380 MONO_START_BB (cfg, dont_throw);
8385 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8387 for (tmp = handlers; tmp; tmp = tmp->next) {
8389 link_bblock (cfg, bblock, tblock);
8390 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8391 ins->inst_target_bb = tblock;
8392 MONO_ADD_INS (bblock, ins);
8394 g_list_free (handlers);
8397 MONO_INST_NEW (cfg, ins, OP_BR);
8398 MONO_ADD_INS (bblock, ins);
8399 GET_BBLOCK (cfg, tblock, target);
8400 link_bblock (cfg, bblock, tblock);
8401 CHECK_BBLOCK (target, ip, tblock);
8402 ins->inst_target_bb = tblock;
8403 start_new_bblock = 1;
8405 if (*ip == CEE_LEAVE)
8414 * Mono specific opcodes
8416 case MONO_CUSTOM_PREFIX: {
8418 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8422 case CEE_MONO_ICALL: {
8424 MonoJitICallInfo *info;
8426 token = read32 (ip + 2);
8427 func = mono_method_get_wrapper_data (method, token);
8428 info = mono_find_jit_icall_by_addr (func);
8431 CHECK_STACK (info->sig->param_count);
8432 sp -= info->sig->param_count;
8434 ins = mono_emit_jit_icall (cfg, info->func, sp);
8435 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8439 inline_costs += 10 * num_calls++;
8443 case CEE_MONO_LDPTR: {
8446 CHECK_STACK_OVF (1);
8448 token = read32 (ip + 2);
8450 ptr = mono_method_get_wrapper_data (method, token);
8451 if (cfg->compile_aot && (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || cfg->method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE)) {
8452 MonoMethod *wrapped = mono_marshal_method_from_wrapper (cfg->method);
8454 if (wrapped && ptr != NULL && mono_lookup_internal_call (wrapped) == ptr) {
8455 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, wrapped);
8461 if ((method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8462 MonoJitICallInfo *callinfo;
8463 const char *icall_name;
8465 icall_name = method->name + strlen ("__icall_wrapper_");
8466 g_assert (icall_name);
8467 callinfo = mono_find_jit_icall_by_name (icall_name);
8468 g_assert (callinfo);
8470 if (ptr == callinfo->func) {
8471 /* Will be transformed into an AOTCONST later */
8472 EMIT_NEW_PCONST (cfg, ins, ptr);
8479 /* FIXME: Generalize this */
8480 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8481 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8486 EMIT_NEW_PCONST (cfg, ins, ptr);
8489 inline_costs += 10 * num_calls++;
8490 /* Can't embed random pointers into AOT code */
8491 cfg->disable_aot = 1;
8494 case CEE_MONO_VTADDR: {
8495 MonoInst *src_var, *src;
8501 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8502 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8507 case CEE_MONO_NEWOBJ: {
8508 MonoInst *iargs [2];
8510 CHECK_STACK_OVF (1);
8512 token = read32 (ip + 2);
8513 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8514 mono_class_init (klass);
8515 NEW_DOMAINCONST (cfg, iargs [0]);
8516 MONO_ADD_INS (cfg->cbb, iargs [0]);
8517 NEW_CLASSCONST (cfg, iargs [1], klass);
8518 MONO_ADD_INS (cfg->cbb, iargs [1]);
8519 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8521 inline_costs += 10 * num_calls++;
8524 case CEE_MONO_OBJADDR:
8527 MONO_INST_NEW (cfg, ins, OP_MOVE);
8528 ins->dreg = alloc_preg (cfg);
8529 ins->sreg1 = sp [0]->dreg;
8530 ins->type = STACK_MP;
8531 MONO_ADD_INS (cfg->cbb, ins);
8535 case CEE_MONO_LDNATIVEOBJ:
8537 * Similar to LDOBJ, but instead load the unmanaged
8538 * representation of the vtype to the stack.
8543 token = read32 (ip + 2);
8544 klass = mono_method_get_wrapper_data (method, token);
8545 g_assert (klass->valuetype);
8546 mono_class_init (klass);
8549 MonoInst *src, *dest, *temp;
8552 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8553 temp->backend.is_pinvoke = 1;
8554 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8555 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8557 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8558 dest->type = STACK_VTYPE;
8559 dest->klass = klass;
8565 case CEE_MONO_RETOBJ: {
8567 * Same as RET, but return the native representation of a vtype
8570 g_assert (cfg->ret);
8571 g_assert (mono_method_signature (method)->pinvoke);
8576 token = read32 (ip + 2);
8577 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8579 EMIT_NEW_RETLOADA (cfg, ins);
8580 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8582 if (sp != stack_start)
8585 MONO_INST_NEW (cfg, ins, OP_BR);
8586 ins->inst_target_bb = end_bblock;
8587 MONO_ADD_INS (bblock, ins);
8588 link_bblock (cfg, bblock, end_bblock);
8589 start_new_bblock = 1;
8593 case CEE_MONO_CISINST:
8594 case CEE_MONO_CCASTCLASS: {
8599 token = read32 (ip + 2);
8600 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8601 if (ip [1] == CEE_MONO_CISINST)
8602 ins = handle_cisinst (cfg, klass, sp [0]);
8604 ins = handle_ccastclass (cfg, klass, sp [0]);
8610 case CEE_MONO_SAVE_LMF:
8611 case CEE_MONO_RESTORE_LMF:
8612 #ifdef MONO_ARCH_HAVE_LMF_OPS
8613 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8614 MONO_ADD_INS (bblock, ins);
8615 cfg->need_lmf_area = TRUE;
8619 case CEE_MONO_CLASSCONST:
8620 CHECK_STACK_OVF (1);
8622 token = read32 (ip + 2);
8623 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8626 inline_costs += 10 * num_calls++;
8628 case CEE_MONO_NOT_TAKEN:
8629 bblock->out_of_line = TRUE;
8633 CHECK_STACK_OVF (1);
8635 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8636 ins->dreg = alloc_preg (cfg);
8637 ins->inst_offset = (gint32)read32 (ip + 2);
8638 ins->type = STACK_PTR;
8639 MONO_ADD_INS (bblock, ins);
8644 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8654 /* somewhat similar to LDTOKEN */
8655 MonoInst *addr, *vtvar;
8656 CHECK_STACK_OVF (1);
8657 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8659 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8660 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8662 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8663 ins->type = STACK_VTYPE;
8664 ins->klass = mono_defaults.argumenthandle_class;
8677 * The following transforms:
8678 * CEE_CEQ into OP_CEQ
8679 * CEE_CGT into OP_CGT
8680 * CEE_CGT_UN into OP_CGT_UN
8681 * CEE_CLT into OP_CLT
8682 * CEE_CLT_UN into OP_CLT_UN
8684 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
8686 MONO_INST_NEW (cfg, ins, cmp->opcode);
8688 cmp->sreg1 = sp [0]->dreg;
8689 cmp->sreg2 = sp [1]->dreg;
8690 type_from_op (cmp, sp [0], sp [1]);
8692 if ((sp [0]->type == STACK_I8) || ((sizeof (gpointer) == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
8693 cmp->opcode = OP_LCOMPARE;
8694 else if (sp [0]->type == STACK_R8)
8695 cmp->opcode = OP_FCOMPARE;
8697 cmp->opcode = OP_ICOMPARE;
8698 MONO_ADD_INS (bblock, cmp);
8699 ins->type = STACK_I4;
8700 ins->dreg = alloc_dreg (cfg, ins->type);
8701 type_from_op (ins, sp [0], sp [1]);
8703 if (cmp->opcode == OP_FCOMPARE) {
8705 * The backends expect the fceq opcodes to do the
8708 cmp->opcode = OP_NOP;
8709 ins->sreg1 = cmp->sreg1;
8710 ins->sreg2 = cmp->sreg2;
8712 MONO_ADD_INS (bblock, ins);
8719 MonoMethod *cil_method, *ctor_method;
8722 CHECK_STACK_OVF (1);
8724 n = read32 (ip + 2);
8725 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8728 mono_class_init (cmethod->klass);
8730 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8731 GENERIC_SHARING_FAILURE (CEE_LDFTN);
8733 is_shared = (cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
8734 (cmethod->klass->generic_class || cmethod->klass->generic_container) &&
8735 mono_class_generic_sharing_enabled (cmethod->klass);
8737 cil_method = cmethod;
8738 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
8739 METHOD_ACCESS_FAILURE;
8741 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8742 if (check_linkdemand (cfg, method, cmethod))
8744 CHECK_CFG_EXCEPTION;
8745 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8746 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8750 * Optimize the common case of ldftn+delegate creation
8752 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
8753 /* FIXME: SGEN support */
8754 if (!is_shared && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
8755 MonoInst *target_ins;
8758 if (cfg->verbose_level > 3)
8759 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8760 target_ins = sp [-1];
8762 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
8770 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
8772 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
8773 if (method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED)
8774 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
8776 ins = mono_emit_jit_icall (cfg, mono_ldftn_nosync, &argconst);
8780 inline_costs += 10 * num_calls++;
8783 case CEE_LDVIRTFTN: {
8788 n = read32 (ip + 2);
8789 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
8792 mono_class_init (cmethod->klass);
8794 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8795 GENERIC_SHARING_FAILURE (CEE_LDVIRTFTN);
8797 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8798 if (check_linkdemand (cfg, method, cmethod))
8800 CHECK_CFG_EXCEPTION;
8801 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8802 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8807 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8808 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
8811 inline_costs += 10 * num_calls++;
8815 CHECK_STACK_OVF (1);
8817 n = read16 (ip + 2);
8819 EMIT_NEW_ARGLOAD (cfg, ins, n);
8824 CHECK_STACK_OVF (1);
8826 n = read16 (ip + 2);
8828 NEW_ARGLOADA (cfg, ins, n);
8829 MONO_ADD_INS (cfg->cbb, ins);
8837 n = read16 (ip + 2);
8839 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
8841 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8845 CHECK_STACK_OVF (1);
8847 n = read16 (ip + 2);
8849 EMIT_NEW_LOCLOAD (cfg, ins, n);
8854 CHECK_STACK_OVF (1);
8856 n = read16 (ip + 2);
8858 EMIT_NEW_LOCLOADA (cfg, ins, n);
8866 n = read16 (ip + 2);
8868 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8870 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
8877 if (sp != stack_start)
8879 if (cfg->method != method)
8881 * Inlining this into a loop in a parent could lead to
8882 * stack overflows which is different behavior than the
8883 * non-inlined case, thus disable inlining in this case.
8885 goto inline_failure;
8887 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8888 ins->dreg = alloc_preg (cfg);
8889 ins->sreg1 = sp [0]->dreg;
8890 ins->type = STACK_PTR;
8891 MONO_ADD_INS (cfg->cbb, ins);
8893 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8894 if (header->init_locals)
8895 ins->flags |= MONO_INST_INIT;
8900 case CEE_ENDFILTER: {
8901 MonoExceptionClause *clause, *nearest;
8902 int cc, nearest_num;
8906 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
8908 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
8909 ins->sreg1 = (*sp)->dreg;
8910 MONO_ADD_INS (bblock, ins);
8911 start_new_bblock = 1;
8916 for (cc = 0; cc < header->num_clauses; ++cc) {
8917 clause = &header->clauses [cc];
8918 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
8919 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
8920 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
8926 if ((ip - header->code) != nearest->handler_offset)
8931 case CEE_UNALIGNED_:
8932 ins_flag |= MONO_INST_UNALIGNED;
8933 /* FIXME: record alignment? we can assume 1 for now */
8938 ins_flag |= MONO_INST_VOLATILE;
8942 ins_flag |= MONO_INST_TAILCALL;
8943 cfg->flags |= MONO_CFG_HAS_TAIL;
8944 /* Can't inline tail calls at this time */
8945 inline_costs += 100000;
8952 token = read32 (ip + 2);
8953 klass = mini_get_class (method, token, generic_context);
8954 CHECK_TYPELOAD (klass);
8955 if (generic_class_is_reference_type (cfg, klass)) {
8956 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
8958 GENERIC_SHARING_FAILURE (CEE_INITOBJ);
8959 mini_emit_initobj (cfg, *sp, NULL, klass);
8964 case CEE_CONSTRAINED_:
8966 token = read32 (ip + 2);
8967 constrained_call = mono_class_get_full (image, token, generic_context);
8968 CHECK_TYPELOAD (constrained_call);
8973 MonoInst *iargs [3];
8977 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
8978 mini_emit_memcpy2 (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
8979 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
8980 /* emit_memset only works when val == 0 */
8981 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
8986 if (ip [1] == CEE_CPBLK) {
8987 MonoMethod *memcpy_method = get_memcpy_method ();
8988 mono_emit_method_call (cfg, memcpy_method, memcpy_method->signature, iargs, NULL);
8990 MonoMethod *memset_method = get_memset_method ();
8991 mono_emit_method_call (cfg, memset_method, memset_method->signature, iargs, NULL);
9001 ins_flag |= MONO_INST_NOTYPECHECK;
9003 ins_flag |= MONO_INST_NORANGECHECK;
9004 /* we ignore the no-nullcheck for now since we
9005 * really do it explicitly only when doing callvirt->call
9011 int handler_offset = -1;
9013 for (i = 0; i < header->num_clauses; ++i) {
9014 MonoExceptionClause *clause = &header->clauses [i];
9015 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY))
9016 handler_offset = clause->handler_offset;
9019 bblock->flags |= BB_EXCEPTION_UNSAFE;
9021 g_assert (handler_offset != -1);
9023 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9024 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9025 ins->sreg1 = load->dreg;
9026 MONO_ADD_INS (bblock, ins);
9028 link_bblock (cfg, bblock, end_bblock);
9029 start_new_bblock = 1;
9037 GENERIC_SHARING_FAILURE (CEE_SIZEOF);
9039 CHECK_STACK_OVF (1);
9041 token = read32 (ip + 2);
9042 /* FIXXME: handle generics. */
9043 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9044 MonoType *type = mono_type_create_from_typespec (image, token);
9045 token = mono_type_size (type, &ialign);
9047 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9048 CHECK_TYPELOAD (klass);
9049 mono_class_init (klass);
9050 token = mono_class_value_size (klass, &align);
9052 EMIT_NEW_ICONST (cfg, ins, token);
9057 case CEE_REFANYTYPE: {
9058 MonoInst *src_var, *src;
9064 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9066 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9067 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9068 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typed_reference_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9078 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9083 g_error ("opcode 0x%02x not handled", *ip);
9086 if (start_new_bblock != 1)
9089 bblock->cil_length = ip - bblock->cil_code;
9090 bblock->next_bb = end_bblock;
9092 if (cfg->method == method && cfg->domainvar) {
9094 MonoInst *get_domain;
9096 cfg->cbb = init_localsbb;
9098 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9099 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9102 get_domain->dreg = alloc_preg (cfg);
9103 MONO_ADD_INS (cfg->cbb, get_domain);
9105 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9106 MONO_ADD_INS (cfg->cbb, store);
9109 if (cfg->method == method && cfg->got_var)
9110 mono_emit_load_got_addr (cfg);
9112 if (header->init_locals) {
9115 cfg->cbb = init_localsbb;
9116 cfg->ip = header->code;
9117 for (i = 0; i < header->num_locals; ++i) {
9118 MonoType *ptype = header->locals [i];
9119 int t = ptype->type;
9120 dreg = cfg->locals [i]->dreg;
9122 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9123 t = ptype->data.klass->enum_basetype->type;
9125 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9126 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9127 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9128 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9129 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9130 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9131 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9132 ins->type = STACK_R8;
9133 ins->inst_p0 = (void*)&r8_0;
9134 ins->dreg = alloc_dreg (cfg, STACK_R8);
9135 MONO_ADD_INS (init_localsbb, ins);
9136 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9137 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9138 + ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9139 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9141 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9148 /* resolve backward branches in the middle of an existing basic block */
9149 for (tmp = bb_recheck; tmp; tmp = tmp->next) {
9151 /*printf ("need recheck in %s at IL_%04x\n", method->name, bblock->cil_code - header->code);*/
9152 tblock = find_previous (cfg->cil_offset_to_bb, header->code_size, start_bblock, bblock->cil_code);
9153 if (tblock != start_bblock) {
9155 split_bblock (cfg, tblock, bblock);
9156 l = bblock->cil_code - header->code;
9157 bblock->cil_length = tblock->cil_length - l;
9158 tblock->cil_length = l;
9160 printf ("recheck failed.\n");
9164 if (cfg->method == method) {
9166 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9167 bb->region = mono_find_block_region (cfg, bb->real_offset);
9169 mono_create_spvar_for_region (cfg, bb->region);
9170 if (cfg->verbose_level > 2)
9171 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9175 g_slist_free (class_inits);
9176 dont_inline = g_list_remove (dont_inline, method);
9178 if (inline_costs < 0) {
9181 /* Method is too large */
9182 mname = mono_method_full_name (method, TRUE);
9183 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9184 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9189 if ((cfg->verbose_level > 1) && (cfg->method == method))
9190 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9192 return inline_costs;
9195 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9196 g_slist_free (class_inits);
9197 dont_inline = g_list_remove (dont_inline, method);
9201 g_slist_free (class_inits);
9202 dont_inline = g_list_remove (dont_inline, method);
9206 g_slist_free (class_inits);
9207 dont_inline = g_list_remove (dont_inline, method);
9208 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9212 g_slist_free (class_inits);
9213 dont_inline = g_list_remove (dont_inline, method);
9214 set_exception_type_from_invalid_il (cfg, method, ip);
9219 store_membase_reg_to_store_membase_imm (int opcode)
9222 case OP_STORE_MEMBASE_REG:
9223 return OP_STORE_MEMBASE_IMM;
9224 case OP_STOREI1_MEMBASE_REG:
9225 return OP_STOREI1_MEMBASE_IMM;
9226 case OP_STOREI2_MEMBASE_REG:
9227 return OP_STOREI2_MEMBASE_IMM;
9228 case OP_STOREI4_MEMBASE_REG:
9229 return OP_STOREI4_MEMBASE_IMM;
9230 case OP_STOREI8_MEMBASE_REG:
9231 return OP_STOREI8_MEMBASE_IMM;
9233 g_assert_not_reached ();
9240 mono_op_to_op_imm (int opcode)
9250 return OP_IDIV_UN_IMM;
9254 return OP_IREM_UN_IMM;
9268 return OP_ISHR_UN_IMM;
9285 return OP_LSHR_UN_IMM;
9288 return OP_COMPARE_IMM;
9290 return OP_ICOMPARE_IMM;
9292 return OP_LCOMPARE_IMM;
9294 case OP_STORE_MEMBASE_REG:
9295 return OP_STORE_MEMBASE_IMM;
9296 case OP_STOREI1_MEMBASE_REG:
9297 return OP_STOREI1_MEMBASE_IMM;
9298 case OP_STOREI2_MEMBASE_REG:
9299 return OP_STOREI2_MEMBASE_IMM;
9300 case OP_STOREI4_MEMBASE_REG:
9301 return OP_STOREI4_MEMBASE_IMM;
9303 #if defined(__i386__) || defined (__x86_64__)
9305 return OP_X86_PUSH_IMM;
9306 case OP_X86_COMPARE_MEMBASE_REG:
9307 return OP_X86_COMPARE_MEMBASE_IMM;
9309 #if defined(__x86_64__)
9310 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9311 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9313 case OP_VOIDCALL_REG:
9322 return OP_LOCALLOC_IMM;
9329 ldind_to_load_membase (int opcode)
9333 return OP_LOADI1_MEMBASE;
9335 return OP_LOADU1_MEMBASE;
9337 return OP_LOADI2_MEMBASE;
9339 return OP_LOADU2_MEMBASE;
9341 return OP_LOADI4_MEMBASE;
9343 return OP_LOADU4_MEMBASE;
9345 return OP_LOAD_MEMBASE;
9347 return OP_LOAD_MEMBASE;
9349 return OP_LOADI8_MEMBASE;
9351 return OP_LOADR4_MEMBASE;
9353 return OP_LOADR8_MEMBASE;
9355 g_assert_not_reached ();
9362 stind_to_store_membase (int opcode)
9366 return OP_STOREI1_MEMBASE_REG;
9368 return OP_STOREI2_MEMBASE_REG;
9370 return OP_STOREI4_MEMBASE_REG;
9373 return OP_STORE_MEMBASE_REG;
9375 return OP_STOREI8_MEMBASE_REG;
9377 return OP_STORER4_MEMBASE_REG;
9379 return OP_STORER8_MEMBASE_REG;
9381 g_assert_not_reached ();
9388 mono_load_membase_to_load_mem (int opcode)
9390 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9391 #if defined(__i386__) || defined(__x86_64__)
9393 case OP_LOAD_MEMBASE:
9395 case OP_LOADU1_MEMBASE:
9396 return OP_LOADU1_MEM;
9397 case OP_LOADU2_MEMBASE:
9398 return OP_LOADU2_MEM;
9399 case OP_LOADI4_MEMBASE:
9400 return OP_LOADI4_MEM;
9401 case OP_LOADU4_MEMBASE:
9402 return OP_LOADU4_MEM;
9403 #if SIZEOF_VOID_P == 8
9404 case OP_LOADI8_MEMBASE:
9405 return OP_LOADI8_MEM;
9414 op_to_op_dest_membase (int store_opcode, int opcode)
9416 #if defined(__i386__)
9417 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9422 return OP_X86_ADD_MEMBASE_REG;
9424 return OP_X86_SUB_MEMBASE_REG;
9426 return OP_X86_AND_MEMBASE_REG;
9428 return OP_X86_OR_MEMBASE_REG;
9430 return OP_X86_XOR_MEMBASE_REG;
9433 return OP_X86_ADD_MEMBASE_IMM;
9436 return OP_X86_SUB_MEMBASE_IMM;
9439 return OP_X86_AND_MEMBASE_IMM;
9442 return OP_X86_OR_MEMBASE_IMM;
9445 return OP_X86_XOR_MEMBASE_IMM;
9451 #if defined(__x86_64__)
9452 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9457 return OP_X86_ADD_MEMBASE_REG;
9459 return OP_X86_SUB_MEMBASE_REG;
9461 return OP_X86_AND_MEMBASE_REG;
9463 return OP_X86_OR_MEMBASE_REG;
9465 return OP_X86_XOR_MEMBASE_REG;
9467 return OP_X86_ADD_MEMBASE_IMM;
9469 return OP_X86_SUB_MEMBASE_IMM;
9471 return OP_X86_AND_MEMBASE_IMM;
9473 return OP_X86_OR_MEMBASE_IMM;
9475 return OP_X86_XOR_MEMBASE_IMM;
9477 return OP_AMD64_ADD_MEMBASE_REG;
9479 return OP_AMD64_SUB_MEMBASE_REG;
9481 return OP_AMD64_AND_MEMBASE_REG;
9483 return OP_AMD64_OR_MEMBASE_REG;
9485 return OP_AMD64_XOR_MEMBASE_REG;
9488 return OP_AMD64_ADD_MEMBASE_IMM;
9491 return OP_AMD64_SUB_MEMBASE_IMM;
9494 return OP_AMD64_AND_MEMBASE_IMM;
9497 return OP_AMD64_OR_MEMBASE_IMM;
9500 return OP_AMD64_XOR_MEMBASE_IMM;
9510 op_to_op_store_membase (int store_opcode, int opcode)
9512 #if defined(__i386__) || defined(__x86_64__)
9515 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9516 return OP_X86_SETEQ_MEMBASE;
9518 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9519 return OP_X86_SETNE_MEMBASE;
9527 op_to_op_src1_membase (int load_opcode, int opcode)
9530 /* FIXME: This has sign extension issues */
9532 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9533 return OP_X86_COMPARE_MEMBASE8_IMM;
9536 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9541 return OP_X86_PUSH_MEMBASE;
9542 case OP_COMPARE_IMM:
9543 case OP_ICOMPARE_IMM:
9544 return OP_X86_COMPARE_MEMBASE_IMM;
9547 return OP_X86_COMPARE_MEMBASE_REG;
9552 /* FIXME: This has sign extension issues */
9554 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9555 return OP_X86_COMPARE_MEMBASE8_IMM;
9560 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9561 return OP_X86_PUSH_MEMBASE;
9563 /* FIXME: This only works for 32 bit immediates
9564 case OP_COMPARE_IMM:
9565 case OP_LCOMPARE_IMM:
9566 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9567 return OP_AMD64_COMPARE_MEMBASE_IMM;
9569 case OP_ICOMPARE_IMM:
9570 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9571 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9575 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9576 return OP_AMD64_COMPARE_MEMBASE_REG;
9579 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9580 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9589 op_to_op_src2_membase (int load_opcode, int opcode)
9592 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9598 return OP_X86_COMPARE_REG_MEMBASE;
9600 return OP_X86_ADD_REG_MEMBASE;
9602 return OP_X86_SUB_REG_MEMBASE;
9604 return OP_X86_AND_REG_MEMBASE;
9606 return OP_X86_OR_REG_MEMBASE;
9608 return OP_X86_XOR_REG_MEMBASE;
9615 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9616 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9620 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9621 return OP_AMD64_COMPARE_REG_MEMBASE;
9624 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9625 return OP_X86_ADD_REG_MEMBASE;
9627 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9628 return OP_X86_SUB_REG_MEMBASE;
9630 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9631 return OP_X86_AND_REG_MEMBASE;
9633 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9634 return OP_X86_OR_REG_MEMBASE;
9636 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9637 return OP_X86_XOR_REG_MEMBASE;
9639 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9640 return OP_AMD64_ADD_REG_MEMBASE;
9642 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9643 return OP_AMD64_SUB_REG_MEMBASE;
9645 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9646 return OP_AMD64_AND_REG_MEMBASE;
9648 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9649 return OP_AMD64_OR_REG_MEMBASE;
9651 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9652 return OP_AMD64_XOR_REG_MEMBASE;
9660 mono_op_to_op_imm_noemul (int opcode)
9663 #if SIZEOF_VOID_P == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPTS)
9668 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9676 return mono_op_to_op_imm (opcode);
9681 * mono_handle_global_vregs:
9683 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
9687 mono_handle_global_vregs (MonoCompile *cfg)
9693 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
9695 /* Find local vregs used in more than one bb */
9696 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9697 MonoInst *ins = bb->code;
9698 int block_num = bb->block_num;
9700 if (cfg->verbose_level > 1)
9701 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
9704 for (; ins; ins = ins->next) {
9705 const char *spec = INS_INFO (ins->opcode);
9706 int regtype, regindex;
9709 if (G_UNLIKELY (cfg->verbose_level > 1))
9710 mono_print_ins (ins);
9712 g_assert (ins->opcode >= MONO_CEE_LAST);
9714 for (regindex = 0; regindex < 3; regindex ++) {
9717 if (regindex == 0) {
9718 regtype = spec [MONO_INST_DEST];
9722 } else if (regindex == 1) {
9723 regtype = spec [MONO_INST_SRC1];
9728 regtype = spec [MONO_INST_SRC2];
9734 #if SIZEOF_VOID_P == 4
9735 if (regtype == 'l') {
9737 * Since some instructions reference the original long vreg,
9738 * and some reference the two component vregs, it is quite hard
9739 * to determine when it needs to be global. So be conservative.
9741 if (!get_vreg_to_inst (cfg, vreg)) {
9742 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
9744 if (cfg->verbose_level > 1)
9745 printf ("LONG VREG R%d made global.\n", vreg);
9748 * Make the component vregs volatile since the optimizations can
9749 * get confused otherwise.
9751 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
9752 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
9757 g_assert (vreg != -1);
9759 prev_bb = vreg_to_bb [vreg];
9761 /* 0 is a valid block num */
9762 vreg_to_bb [vreg] = block_num + 1;
9763 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
9764 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
9767 if (!get_vreg_to_inst (cfg, vreg)) {
9768 if (G_UNLIKELY (cfg->verbose_level > 1))
9769 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
9773 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
9776 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
9779 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
9782 g_assert_not_reached ();
9786 /* Flag as having been used in more than one bb */
9787 vreg_to_bb [vreg] = -1;
9793 /* If a variable is used in only one bblock, convert it into a local vreg */
9794 for (i = 0; i < cfg->num_varinfo; i++) {
9795 MonoInst *var = cfg->varinfo [i];
9796 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
9798 switch (var->type) {
9804 #if SIZEOF_VOID_P == 8
9807 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
9808 /* Enabling this screws up the fp stack on x86 */
9811 /* Arguments are implicitly global */
9812 /* Putting R4 vars into registers doesn't work currently */
9813 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4)) {
9815 * Make that the variable's liveness interval doesn't contain a call, since
9816 * that would cause the lvreg to be spilled, making the whole optimization
9819 /* This is too slow for JIT compilation */
9821 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
9823 int def_index, call_index, ins_index;
9824 gboolean spilled = FALSE;
9829 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
9830 const char *spec = INS_INFO (ins->opcode);
9832 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
9833 def_index = ins_index;
9835 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
9836 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
9837 if (call_index > def_index) {
9843 if (MONO_IS_CALL (ins))
9844 call_index = ins_index;
9854 if (G_UNLIKELY (cfg->verbose_level > 2))
9855 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
9856 var->flags |= MONO_INST_IS_DEAD;
9857 cfg->vreg_to_inst [var->dreg] = NULL;
9864 * Compress the varinfo and vars tables so the liveness computation is faster and
9865 * takes up less space.
9868 for (i = 0; i < cfg->num_varinfo; ++i) {
9869 MonoInst *var = cfg->varinfo [i];
9870 if (pos < i && cfg->locals_start == i)
9871 cfg->locals_start = pos;
9872 if (!(var->flags & MONO_INST_IS_DEAD)) {
9874 cfg->varinfo [pos] = cfg->varinfo [i];
9875 cfg->varinfo [pos]->inst_c0 = pos;
9876 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
9877 cfg->vars [pos].idx = pos;
9878 #if SIZEOF_VOID_P == 4
9879 if (cfg->varinfo [pos]->type == STACK_I8) {
9880 /* Modify the two component vars too */
9883 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
9884 var1->inst_c0 = pos;
9885 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
9886 var1->inst_c0 = pos;
9893 cfg->num_varinfo = pos;
9894 if (cfg->locals_start > cfg->num_varinfo)
9895 cfg->locals_start = cfg->num_varinfo;
9899 * mono_spill_global_vars:
9901 * Generate spill code for variables which are not allocated to registers,
9902 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
9903 * code is generated which could be optimized by the local optimization passes.
9906 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
9911 guint32 *vreg_to_lvreg;
9913 guint32 i, lvregs_len;
9914 gboolean dest_has_lvreg = FALSE;
9915 guint32 stacktypes [128];
9917 *need_local_opts = FALSE;
9919 memset (spec2, 0, sizeof (spec2));
9921 /* FIXME: Move this function to mini.c */
9922 stacktypes ['i'] = STACK_PTR;
9923 stacktypes ['l'] = STACK_I8;
9924 stacktypes ['f'] = STACK_R8;
9926 #if SIZEOF_VOID_P == 4
9927 /* Create MonoInsts for longs */
9928 for (i = 0; i < cfg->num_varinfo; i++) {
9929 MonoInst *ins = cfg->varinfo [i];
9931 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
9932 switch (ins->type) {
9933 #ifdef MONO_ARCH_SOFT_FLOAT
9939 g_assert (ins->opcode == OP_REGOFFSET);
9941 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
9943 tree->opcode = OP_REGOFFSET;
9944 tree->inst_basereg = ins->inst_basereg;
9945 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
9947 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
9949 tree->opcode = OP_REGOFFSET;
9950 tree->inst_basereg = ins->inst_basereg;
9951 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
9961 /* FIXME: widening and truncation */
9964 * As an optimization, when a variable allocated to the stack is first loaded into
9965 * an lvreg, we will remember the lvreg and use it the next time instead of loading
9966 * the variable again.
9968 orig_next_vreg = cfg->next_vreg;
9969 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
9970 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
9973 /* Add spill loads/stores */
9974 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9977 if (cfg->verbose_level > 1)
9978 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
9980 /* Clear vreg_to_lvreg array */
9981 for (i = 0; i < lvregs_len; i++)
9982 vreg_to_lvreg [lvregs [i]] = 0;
9986 MONO_BB_FOR_EACH_INS (bb, ins) {
9987 const char *spec = INS_INFO (ins->opcode);
9988 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
9989 gboolean store, no_lvreg;
9991 if (G_UNLIKELY (cfg->verbose_level > 1))
9992 mono_print_ins (ins);
9994 if (ins->opcode == OP_NOP)
9998 * We handle LDADDR here as well, since it can only be decomposed
9999 * when variable addresses are known.
10001 if (ins->opcode == OP_LDADDR) {
10002 MonoInst *var = ins->inst_p0;
10004 if (var->opcode == OP_VTARG_ADDR) {
10005 /* Happens on SPARC/S390 where vtypes are passed by reference */
10006 MonoInst *vtaddr = var->inst_left;
10007 if (vtaddr->opcode == OP_REGVAR) {
10008 ins->opcode = OP_MOVE;
10009 ins->sreg1 = vtaddr->dreg;
10011 else if (var->inst_left->opcode == OP_REGOFFSET) {
10012 ins->opcode = OP_LOAD_MEMBASE;
10013 ins->inst_basereg = vtaddr->inst_basereg;
10014 ins->inst_offset = vtaddr->inst_offset;
10018 g_assert (var->opcode == OP_REGOFFSET);
10020 ins->opcode = OP_ADD_IMM;
10021 ins->sreg1 = var->inst_basereg;
10022 ins->inst_imm = var->inst_offset;
10025 *need_local_opts = TRUE;
10026 spec = INS_INFO (ins->opcode);
10029 if (ins->opcode < MONO_CEE_LAST) {
10030 mono_print_ins (ins);
10031 g_assert_not_reached ();
10035 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10039 if (MONO_IS_STORE_MEMBASE (ins)) {
10040 tmp_reg = ins->dreg;
10041 ins->dreg = ins->sreg2;
10042 ins->sreg2 = tmp_reg;
10045 spec2 [MONO_INST_DEST] = ' ';
10046 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10047 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10049 } else if (MONO_IS_STORE_MEMINDEX (ins))
10050 g_assert_not_reached ();
10055 if (G_UNLIKELY (cfg->verbose_level > 1))
10056 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10061 regtype = spec [MONO_INST_DEST];
10062 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10065 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10066 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10067 MonoInst *store_ins;
10070 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10072 if (var->opcode == OP_REGVAR) {
10073 ins->dreg = var->dreg;
10074 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10076 * Instead of emitting a load+store, use a _membase opcode.
10078 g_assert (var->opcode == OP_REGOFFSET);
10079 if (ins->opcode == OP_MOVE) {
10082 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10083 ins->inst_basereg = var->inst_basereg;
10084 ins->inst_offset = var->inst_offset;
10087 spec = INS_INFO (ins->opcode);
10091 g_assert (var->opcode == OP_REGOFFSET);
10093 prev_dreg = ins->dreg;
10095 /* Invalidate any previous lvreg for this vreg */
10096 vreg_to_lvreg [ins->dreg] = 0;
10100 #ifdef MONO_ARCH_SOFT_FLOAT
10101 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10103 store_opcode = OP_STOREI8_MEMBASE_REG;
10107 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10109 if (regtype == 'l') {
10110 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10111 mono_bblock_insert_after_ins (bb, ins, store_ins);
10112 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10113 mono_bblock_insert_after_ins (bb, ins, store_ins);
10116 g_assert (store_opcode != OP_STOREV_MEMBASE);
10118 /* Try to fuse the store into the instruction itself */
10119 /* FIXME: Add more instructions */
10120 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10121 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10122 ins->inst_imm = ins->inst_c0;
10123 ins->inst_destbasereg = var->inst_basereg;
10124 ins->inst_offset = var->inst_offset;
10125 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10126 ins->opcode = store_opcode;
10127 ins->inst_destbasereg = var->inst_basereg;
10128 ins->inst_offset = var->inst_offset;
10132 tmp_reg = ins->dreg;
10133 ins->dreg = ins->sreg2;
10134 ins->sreg2 = tmp_reg;
10137 spec2 [MONO_INST_DEST] = ' ';
10138 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10139 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10141 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10142 // FIXME: The backends expect the base reg to be in inst_basereg
10143 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10145 ins->inst_basereg = var->inst_basereg;
10146 ins->inst_offset = var->inst_offset;
10147 spec = INS_INFO (ins->opcode);
10149 /* printf ("INS: "); mono_print_ins (ins); */
10150 /* Create a store instruction */
10151 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10153 /* Insert it after the instruction */
10154 mono_bblock_insert_after_ins (bb, ins, store_ins);
10157 * We can't assign ins->dreg to var->dreg here, since the
10158 * sregs could use it. So set a flag, and do it after
10161 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10162 dest_has_lvreg = TRUE;
10171 for (srcindex = 0; srcindex < 2; ++srcindex) {
10172 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10173 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10175 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10176 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10177 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10178 MonoInst *load_ins;
10179 guint32 load_opcode;
10181 if (var->opcode == OP_REGVAR) {
10183 ins->sreg1 = var->dreg;
10185 ins->sreg2 = var->dreg;
10189 g_assert (var->opcode == OP_REGOFFSET);
10191 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10193 g_assert (load_opcode != OP_LOADV_MEMBASE);
10195 if (vreg_to_lvreg [sreg]) {
10196 /* The variable is already loaded to an lvreg */
10197 if (G_UNLIKELY (cfg->verbose_level > 1))
10198 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10200 ins->sreg1 = vreg_to_lvreg [sreg];
10202 ins->sreg2 = vreg_to_lvreg [sreg];
10206 /* Try to fuse the load into the instruction */
10207 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10208 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10209 ins->inst_basereg = var->inst_basereg;
10210 ins->inst_offset = var->inst_offset;
10211 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10212 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10213 ins->sreg2 = var->inst_basereg;
10214 ins->inst_offset = var->inst_offset;
10216 if ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE)) {
10217 ins->opcode = OP_NOP;
10220 //printf ("%d ", srcindex); mono_print_ins (ins);
10222 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10224 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10225 if (var->dreg == prev_dreg) {
10227 * sreg refers to the value loaded by the load
10228 * emitted below, but we need to use ins->dreg
10229 * since it refers to the store emitted earlier.
10233 vreg_to_lvreg [var->dreg] = sreg;
10234 g_assert (lvregs_len < 1024);
10235 lvregs [lvregs_len ++] = var->dreg;
10244 if (regtype == 'l') {
10245 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10246 mono_bblock_insert_before_ins (bb, ins, load_ins);
10247 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10248 mono_bblock_insert_before_ins (bb, ins, load_ins);
10251 #if SIZEOF_VOID_P == 4
10252 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10254 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10255 mono_bblock_insert_before_ins (bb, ins, load_ins);
10261 if (dest_has_lvreg) {
10262 vreg_to_lvreg [prev_dreg] = ins->dreg;
10263 g_assert (lvregs_len < 1024);
10264 lvregs [lvregs_len ++] = prev_dreg;
10265 dest_has_lvreg = FALSE;
10269 tmp_reg = ins->dreg;
10270 ins->dreg = ins->sreg2;
10271 ins->sreg2 = tmp_reg;
10274 if (MONO_IS_CALL (ins)) {
10275 /* Clear vreg_to_lvreg array */
10276 for (i = 0; i < lvregs_len; i++)
10277 vreg_to_lvreg [lvregs [i]] = 0;
10281 if (cfg->verbose_level > 1)
10282 mono_print_ins_index (1, ins);
10289 * - use 'iadd' instead of 'int_add'
10290 * - handling ovf opcodes: decompose in method_to_ir.
10291 * - unify iregs/fregs
10292 * -> partly done, the missing parts are:
10293 * - a more complete unification would involve unifying the hregs as well, so
10294 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10295 * would no longer map to the machine hregs, so the code generators would need to
10296 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10297 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10298 * fp/non-fp branches speeds it up by about 15%.
10299 * - use sext/zext opcodes instead of shifts
10301 * - get rid of TEMPLOADs if possible and use vregs instead
10302 * - clean up usage of OP_P/OP_ opcodes
10303 * - cleanup usage of DUMMY_USE
10304 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10306 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10307 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10308 * - make sure handle_stack_args () is called before the branch is emitted
10309 * - when the new IR is done, get rid of all unused stuff
10310 * - COMPARE/BEQ as separate instructions or unify them ?
10311 * - keeping them separate allows specialized compare instructions like
10312 * compare_imm, compare_membase
10313 * - most back ends unify fp compare+branch, fp compare+ceq
10314 * - integrate handle_stack_args into inline_method
10315 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10316 * - Things to backport to the old JIT:
10317 * - op_atomic_exchange fix for amd64
10318 * - localloc fix for amd64
10319 * - x86 type_token change
10321 * - long eq/ne optimizations
10322 * - handle long shift opts on 32 bit platforms somehow: they require
10323 * 3 sregs (2 for arg1 and 1 for arg2)
10324 * - make byref a 'normal' type.
10325 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10326 * variable if needed.
10327 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10328 * like inline_method.
10329 * - remove inlining restrictions
10330 * - remove mono_save_args.
10331 * - add 'introduce a new optimization to simplify some range checks'
10332 * - fix LNEG and enable cfold of INEG
10333 * - generalize x86 optimizations like ldelema as a peephole optimization
10334 * - add store_mem_imm for amd64
10335 * - optimize the loading of the interruption flag in the managed->native wrappers
10336 * - avoid special handling of OP_NOP in passes
10337 * - move code inserting instructions into one function/macro.
10338 * - cleanup the code replacement in decompose_long_opts ()
10339 * - try a coalescing phase after liveness analysis
10340 * - add float -> vreg conversion + local optimizations on !x86
10341 * - figure out how to handle decomposed branches during optimizations, ie.
10342 * compare+branch, op_jump_table+op_br etc.
10343 * - promote RuntimeXHandles to vregs
10344 * - vtype cleanups:
10345 * - add a NEW_VARLOADA_VREG macro
10346 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10347 * accessing vtype fields.
10348 * - get rid of I8CONST on 64 bit platforms
10349 * - dealing with the increase in code size due to branches created during opcode
10351 * - use extended basic blocks
10352 * - all parts of the JIT
10353 * - handle_global_vregs () && local regalloc
10354 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10355 * - sources of increase in code size:
10358 * - isinst and castclass
10359 * - lvregs not allocated to global registers even if used multiple times
10360 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10362 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10363 * - add all micro optimizations from the old JIT
10364 * - put tree optimizations into the deadce pass
10365 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10366 * specific function.
10367 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10368 * fcompare + branchCC.
10369 * - sig->ret->byref seems to be set for some calls made from ldfld wrappers when
10370 * running generics.exe.
10371 * - create a helper function for allocating a stack slot, taking into account
10372 * MONO_CFG_HAS_SPILLUP.
10373 * - merge new GC changes in mini.c.
10375 * - merge the ia64 switch changes.
10376 * - merge the mips conditional changes.
10377 * - remove unused opcodes from mini-ops.h, remove "op_" from the opcode names,
10378 * remove the op_ opcodes from the cpu-..md files, clean up the cpu-..md files.
10379 * - make the cpu_ tables smaller when the usage of the cee_ opcodes is removed.
10380 * - optimize mono_regstate2_alloc_int/float.
10381 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10382 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10383 * parts of the tree could be separated by other instructions, killing the tree
10384 * arguments, or stores killing loads etc. Also, should we fold loads into other
10385 * instructions if the result of the load is used multiple times ?
10386 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10387 * - LAST MERGE: 108395.
10388 * - when returning vtypes in registers, generate IR and append it to the end of the
10389 * last bb instead of doing it in the epilog.
10390 * - when the new JIT is done, use the ins emission macros in ir-emit.h instead of the
10391 * ones in inssel.h.
10392 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10400 - When to decompose opcodes:
10401 - earlier: this makes some optimizations hard to implement, since the low level IR
10402 no longer contains the neccessary information. But it is easier to do.
10403 - later: harder to implement, enables more optimizations.
10404 - Branches inside bblocks:
10405 - created when decomposing complex opcodes.
10406 - branches to another bblock: harmless, but not tracked by the branch
10407 optimizations, so need to branch to a label at the start of the bblock.
10408 - branches to inside the same bblock: very problematic, trips up the local
10409 reg allocator. Can be fixed by spitting the current bblock, but that is a
10410 complex operation, since some local vregs can become global vregs etc.
10411 - Local/global vregs:
10412 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10413 local register allocator.
10414 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10415 structure, created by mono_create_var (). Assigned to hregs or the stack by
10416 the global register allocator.
10417 - When to do optimizations like alu->alu_imm:
10418 - earlier -> saves work later on since the IR will be smaller/simpler
10419 - later -> can work on more instructions
10420 - Handling of valuetypes:
10421 - When a vtype is pushed on the stack, a new tempotary is created, an
10422 instruction computing its address (LDADDR) is emitted and pushed on
10423 the stack. Need to optimize cases when the vtype is used immediately as in
10424 argument passing, stloc etc.
10425 - Instead of the to_end stuff in the old JIT, simply call the function handling
10426 the values on the stack before emitting the last instruction of the bb.