+static MonoJitInfo*
+create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
+{
+ GSList *tmp;
+ MonoMethodHeader *header;
+ MonoJitInfo *jinfo;
+ int num_clauses;
+ int generic_info_size;
+ int holes_size = 0, num_holes = 0;
+
+ g_assert (method_to_compile == cfg->method);
+ header = cfg->header;
+
+ if (cfg->generic_sharing_context)
+ generic_info_size = sizeof (MonoGenericJitInfo);
+ else
+ generic_info_size = 0;
+
+ if (cfg->try_block_holes) {
+ for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
+ TryBlockHole *hole = tmp->data;
+ MonoExceptionClause *ec = hole->clause;
+ int hole_end = hole->basic_block->native_offset + hole->basic_block->native_length;
+ MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
+ g_assert (clause_last_bb);
+
+ /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
+ if (clause_last_bb->native_offset != hole_end)
+ ++num_holes;
+ }
+ if (num_holes)
+ holes_size = sizeof (MonoTryBlockHoleTableJitInfo) + num_holes * sizeof (MonoTryBlockHoleJitInfo);
+ if (G_UNLIKELY (cfg->verbose_level >= 4))
+ printf ("Number of try block holes %d\n", num_holes);
+ }
+
+ if (COMPILE_LLVM (cfg))
+ num_clauses = cfg->llvm_ex_info_len;
+ else
+ num_clauses = header->num_clauses;
+
+ if (cfg->method->dynamic) {
+ jinfo = g_malloc0 (MONO_SIZEOF_JIT_INFO + (num_clauses * sizeof (MonoJitExceptionInfo)) +
+ generic_info_size + holes_size);
+ } else {
+ jinfo = mono_domain_alloc0 (cfg->domain, MONO_SIZEOF_JIT_INFO +
+ (num_clauses * sizeof (MonoJitExceptionInfo)) +
+ generic_info_size + holes_size);
+ }
+
+ jinfo->method = cfg->method_to_register;
+ jinfo->code_start = cfg->native_code;
+ jinfo->code_size = cfg->code_len;
+ jinfo->used_regs = cfg->used_int_regs;
+ jinfo->domain_neutral = (cfg->opt & MONO_OPT_SHARED) != 0;
+ jinfo->cas_inited = FALSE; /* initialization delayed at the first stalk walk using this method */
+ jinfo->num_clauses = num_clauses;
+ if (COMPILE_LLVM (cfg))
+ jinfo->from_llvm = TRUE;
+
+ if (cfg->generic_sharing_context) {
+ MonoInst *inst;
+ MonoGenericJitInfo *gi;
+
+ jinfo->has_generic_jit_info = 1;
+
+ gi = mono_jit_info_get_generic_jit_info (jinfo);
+ g_assert (gi);
+
+ gi->generic_sharing_context = cfg->generic_sharing_context;
+
+ if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
+ mini_method_get_context (method_to_compile)->method_inst ||
+ method_to_compile->klass->valuetype) {
+ g_assert (cfg->rgctx_var);
+ }
+
+ gi->has_this = 1;
+
+ if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
+ mini_method_get_context (method_to_compile)->method_inst ||
+ method_to_compile->klass->valuetype) {
+ inst = cfg->rgctx_var;
+ g_assert (inst->opcode == OP_REGOFFSET);
+ } else {
+ inst = cfg->args [0];
+ }
+
+ if (inst->opcode == OP_REGVAR) {
+ gi->this_in_reg = 1;
+ gi->this_reg = inst->dreg;
+ } else {
+ g_assert (inst->opcode == OP_REGOFFSET);
+#ifdef TARGET_X86
+ g_assert (inst->inst_basereg == X86_EBP);
+#elif defined(TARGET_AMD64)
+ g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
+#endif
+ g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
+
+ gi->this_in_reg = 0;
+ gi->this_reg = inst->inst_basereg;
+ gi->this_offset = inst->inst_offset;
+ }
+ }
+
+ if (num_holes) {
+ MonoTryBlockHoleTableJitInfo *table;
+ int i;
+
+ jinfo->has_try_block_holes = 1;
+ table = mono_jit_info_get_try_block_hole_table_info (jinfo);
+ table->num_holes = (guint16)num_holes;
+ i = 0;
+ for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
+ guint32 start_bb_offset;
+ MonoTryBlockHoleJitInfo *hole;
+ TryBlockHole *hole_data = tmp->data;
+ MonoExceptionClause *ec = hole_data->clause;
+ int hole_end = hole_data->basic_block->native_offset + hole_data->basic_block->native_length;
+ MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
+ g_assert (clause_last_bb);
+
+ /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
+ if (clause_last_bb->native_offset == hole_end)
+ continue;
+
+ start_bb_offset = hole_data->start_offset - hole_data->basic_block->native_offset;
+ hole = &table->holes [i++];
+ hole->clause = hole_data->clause - &header->clauses [0];
+ hole->offset = (guint32)hole_data->start_offset;
+ hole->length = (guint16)(hole_data->basic_block->native_length - start_bb_offset);
+
+ if (G_UNLIKELY (cfg->verbose_level >= 4))
+ printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole->clause, hole->offset, hole->length);
+ }
+ g_assert (i == num_holes);
+ }
+
+ if (COMPILE_LLVM (cfg)) {
+ if (num_clauses)
+ memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
+ } else if (header->num_clauses) {
+ int i;
+
+ for (i = 0; i < header->num_clauses; i++) {
+ MonoExceptionClause *ec = &header->clauses [i];
+ MonoJitExceptionInfo *ei = &jinfo->clauses [i];
+ MonoBasicBlock *tblock;
+ MonoInst *exvar;
+
+ ei->flags = ec->flags;
+
+ exvar = mono_find_exvar_for_offset (cfg, ec->handler_offset);
+ ei->exvar_offset = exvar ? exvar->inst_offset : 0;
+
+ if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
+ tblock = cfg->cil_offset_to_bb [ec->data.filter_offset];
+ g_assert (tblock);
+ ei->data.filter = cfg->native_code + tblock->native_offset;
+ } else {
+ ei->data.catch_class = ec->data.catch_class;
+ }
+
+ tblock = cfg->cil_offset_to_bb [ec->try_offset];
+ g_assert (tblock);
+ ei->try_start = cfg->native_code + tblock->native_offset;
+ g_assert (tblock->native_offset);
+ tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
+ g_assert (tblock);
+ ei->try_end = cfg->native_code + tblock->native_offset;
+ g_assert (tblock->native_offset);
+ tblock = cfg->cil_offset_to_bb [ec->handler_offset];
+ g_assert (tblock);
+ ei->handler_start = cfg->native_code + tblock->native_offset;
+
+ for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
+ TryBlockHole *hole = tmp->data;
+ gpointer hole_end = cfg->native_code + (hole->basic_block->native_offset + hole->basic_block->native_length);
+ if (hole->clause == ec && hole_end == ei->try_end) {
+ if (G_UNLIKELY (cfg->verbose_level >= 4))
+ printf ("\tShortening try block %d from %x to %x\n", i, (int)((guint8*)ei->try_end - cfg->native_code), hole->start_offset);
+
+ ei->try_end = cfg->native_code + hole->start_offset;
+ break;
+ }
+ }
+
+ if (ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
+ int end_offset;
+ if (ec->handler_offset + ec->handler_len < header->code_size) {
+ tblock = cfg->cil_offset_to_bb [ec->handler_offset + ec->handler_len];
+ g_assert (tblock);
+ end_offset = tblock->native_offset;
+ } else {
+ end_offset = cfg->epilog_begin;
+ }
+ ei->data.handler_end = cfg->native_code + end_offset;
+ }
+ }
+
+ if (G_UNLIKELY (cfg->verbose_level >= 4)) {
+ for (i = 0; i < jinfo->num_clauses; i++) {
+ MonoJitExceptionInfo *ei = &jinfo->clauses [i];
+ int start = (guint8*)ei->try_start - cfg->native_code;
+ int end = (guint8*)ei->try_end - cfg->native_code;
+ int handler = (guint8*)ei->handler_start - cfg->native_code;
+
+ printf ("JitInfo EH clause %d flags %x try %x-%x handler %x\n", i, ei->flags, start, end, handler);
+ }
+ }
+
+ }
+
+ /*
+ * Its possible to generate dwarf unwind info for xdebug etc, but not actually
+ * using it during runtime, hence the define.
+ */
+#ifdef MONO_ARCH_HAVE_XP_UNWIND
+ if (cfg->encoded_unwind_ops) {
+ jinfo->used_regs = mono_cache_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
+ g_free (cfg->encoded_unwind_ops);
+ } else if (cfg->unwind_ops) {
+ guint32 info_len;
+ guint8 *unwind_info = mono_unwind_ops_encode (cfg->unwind_ops, &info_len);
+
+ jinfo->used_regs = mono_cache_unwind_info (unwind_info, info_len);
+ g_free (unwind_info);
+ }
+#endif
+
+ return jinfo;
+}
+#endif
+
+/*
+ * mini_get_shared_method:
+ *
+ * Return the method which is actually compiled/registered when doing generic sharing.
+ */
+MonoMethod*
+mini_get_shared_method (MonoMethod *method)
+{
+ MonoGenericContext shared_context;
+ MonoMethod *declaring_method, *res;
+ int i;
+ gboolean partial = FALSE;
+
+ if (method->is_generic || method->klass->generic_container)
+ declaring_method = method;
+ else
+ declaring_method = mono_method_get_declaring_generic_method (method);
+
+ if (declaring_method->is_generic)
+ shared_context = mono_method_get_generic_container (declaring_method)->context;
+ else
+ shared_context = declaring_method->klass->generic_container->context;
+
+ /* Handle partial sharing */
+ if (method != declaring_method && method->is_inflated && !mono_method_is_generic_sharable_impl_full (method, FALSE, FALSE)) {
+ MonoGenericContext *context = mono_method_get_context (method);
+ MonoGenericInst *inst;
+ MonoType **type_argv;
+
+ /*
+ * Create the shared context by replacing the ref type arguments with
+ * type parameters, and keeping the rest.
+ */
+ partial = TRUE;
+ inst = context->class_inst;
+ if (inst) {
+ type_argv = g_new0 (MonoType*, inst->type_argc);
+ for (i = 0; i < inst->type_argc; ++i) {
+ if (MONO_TYPE_IS_REFERENCE (inst->type_argv [i]) || inst->type_argv [i]->type == MONO_TYPE_VAR || inst->type_argv [i]->type == MONO_TYPE_MVAR)
+ type_argv [i] = shared_context.class_inst->type_argv [i];
+ else
+ type_argv [i] = inst->type_argv [i];
+ }
+
+ shared_context.class_inst = mono_metadata_get_generic_inst (inst->type_argc, type_argv);
+ g_free (type_argv);
+ }
+
+ inst = context->method_inst;
+ if (inst) {
+ type_argv = g_new0 (MonoType*, inst->type_argc);
+ for (i = 0; i < inst->type_argc; ++i) {
+ if (MONO_TYPE_IS_REFERENCE (inst->type_argv [i]) || inst->type_argv [i]->type == MONO_TYPE_VAR || inst->type_argv [i]->type == MONO_TYPE_MVAR)
+ type_argv [i] = shared_context.method_inst->type_argv [i];
+ else
+ type_argv [i] = inst->type_argv [i];
+ }
+
+ shared_context.method_inst = mono_metadata_get_generic_inst (inst->type_argc, type_argv);
+ g_free (type_argv);
+ }
+ }
+
+ res = mono_class_inflate_generic_method (declaring_method, &shared_context);
+ if (!partial) {
+ /* The result should be an inflated method whose parent is not inflated */
+ g_assert (!res->klass->is_inflated);
+ }
+ return res;
+}
+
+#ifndef DISABLE_JIT