}
#endif
+static int
+count_fields_nested (MonoClass *klass)
+{
+ MonoMarshalType *info;
+ guint32 align;
+ int i, count;
+
+ info = mono_marshal_load_type_info (klass);
+ g_assert(info);
+ count = 0;
+ for (i = 0; i < info->num_fields; ++i) {
+ if (MONO_TYPE_ISSTRUCT (info->fields [i].field->type))
+ count += count_fields_nested (mono_class_from_mono_type (info->fields [i].field->type));
+ else
+ count ++;
+ }
+ return count;
+}
+
+static int
+collect_field_info_nested (MonoClass *klass, MonoMarshalField *fields, int index, int offset)
+{
+ MonoMarshalType *info;
+ guint32 align;
+ int i;
+
+ info = mono_marshal_load_type_info (klass);
+ g_assert(info);
+ for (i = 0; i < info->num_fields; ++i) {
+ if (MONO_TYPE_ISSTRUCT (info->fields [i].field->type)) {
+ index = collect_field_info_nested (mono_class_from_mono_type (info->fields [i].field->type), fields, index, info->fields [i].offset);
+ } else {
+ memcpy (&fields [index], &info->fields [i], sizeof (MonoMarshalField));
+ fields [index].offset += offset;
+ index ++;
+ }
+ }
+ return index;
+}
+
static void
add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
gboolean is_return,
guint32 *gr, guint32 *fr, guint32 *stack_size)
{
- guint32 size, quad, nquads, i;
+ guint32 size, quad, nquads, i, nfields;
/* Keep track of the size used in each quad so we can */
/* use the right size when copying args/return vars. */
guint32 quadsize [2] = {8, 8};
ArgumentClass args [2];
MonoMarshalType *info = NULL;
+ MonoMarshalField *fields = NULL;
MonoClass *klass;
MonoGenericSharingContext tmp_gsctx;
gboolean pass_on_stack = FALSE;
guint32 field_size;
info = mono_marshal_load_type_info (klass);
- g_assert(info);
- for (i = 0; i < info->num_fields; ++i) {
- field_size = mono_marshal_type_size (info->fields [i].field->type,
- info->fields [i].mspec,
+ g_assert (info);
+
+ /*
+ * Collect field information recursively to be able to
+ * handle nested structures.
+ */
+ nfields = count_fields_nested (klass);
+ fields = g_new0 (MonoMarshalField, nfields);
+ collect_field_info_nested (klass, fields, 0, 0);
+
+ for (i = 0; i < nfields; ++i) {
+ field_size = mono_marshal_type_size (fields [i].field->type,
+ fields [i].mspec,
&align, TRUE, klass->unicode);
- if ((info->fields [i].offset < 8) && (info->fields [i].offset + field_size) > 8) {
+ if ((fields [i].offset < 8) && (fields [i].offset + field_size) > 8) {
pass_on_stack = TRUE;
break;
}
*stack_size += ALIGN_TO (size, 8);
ainfo->storage = ArgOnStack;
+ g_free (fields);
return;
}
* The X87 and SSEUP stuff is left out since there are no such types in
* the CLR.
*/
- info = mono_marshal_load_type_info (klass);
g_assert (info);
+ g_assert (fields);
#ifndef HOST_WIN32
if (info->native_size > 16) {
*stack_size += ALIGN_TO (info->native_size, 8);
ainfo->storage = ArgOnStack;
+ g_free (fields);
return;
}
#else
}
}
+ g_free (fields);
return;
}
#endif
guint32 align;
ArgumentClass class1;
- if (info->num_fields == 0)
+ if (nfields == 0)
class1 = ARG_CLASS_MEMORY;
else
class1 = ARG_CLASS_NO_CLASS;
- for (i = 0; i < info->num_fields; ++i) {
- size = mono_marshal_type_size (info->fields [i].field->type,
- info->fields [i].mspec,
+ for (i = 0; i < nfields; ++i) {
+ size = mono_marshal_type_size (fields [i].field->type,
+ fields [i].mspec,
&align, TRUE, klass->unicode);
- if ((info->fields [i].offset < 8) && (info->fields [i].offset + size) > 8) {
+ if ((fields [i].offset < 8) && (fields [i].offset + size) > 8) {
/* Unaligned field */
NOT_IMPLEMENTED;
}
/* Skip fields in other quad */
- if ((quad == 0) && (info->fields [i].offset >= 8))
+ if ((quad == 0) && (fields [i].offset >= 8))
continue;
- if ((quad == 1) && (info->fields [i].offset < 8))
+ if ((quad == 1) && (fields [i].offset < 8))
continue;
/* How far into this quad this data extends.*/
/* (8 is size of quad) */
- quadsize [quad] = info->fields [i].offset + size - (quad * 8);
+ quadsize [quad] = fields [i].offset + size - (quad * 8);
- class1 = merge_argument_class_from_type (gsctx, info->fields [i].field->type, class1);
+ class1 = merge_argument_class_from_type (gsctx, fields [i].field->type, class1);
}
g_assert (class1 != ARG_CLASS_NO_CLASS);
args [quad] = class1;
}
}
+ g_free (fields);
+
/* Post merger cleanup */
if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY))
args [0] = args [1] = ARG_CLASS_MEMORY;
}
}
- if (cfg->gen_seq_points) {
+ if (cfg->gen_seq_points_debug_data) {
MonoInst *ins;
if (cfg->compile_aot) {
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
+ case OP_IL_SEQ_POINT:
+ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
+ break;
case OP_SEQ_POINT: {
int i;
for (i = 0; next && i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
gboolean match = FALSE;
+
+ while (next && next->opcode == OP_IL_SEQ_POINT)
+ next = next->next;
+ if (!next)
+ break;
ins = cfg->args [i];
if (ins->opcode != OP_REGVAR) {
}
}
- if (cfg->gen_seq_points) {
+ if (cfg->gen_seq_points_debug_data) {
MonoInst *info_var = cfg->arch.seq_point_info_var;
/* Initialize seq_point_info_var */
if (!has_target)
g_free (buff);
}
+ mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
return start;
}
/* Load the vtable */
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoObject, vtable), 8);
amd64_jump_membase (code, AMD64_RAX, offset);
+ mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
return start;
}
g_assert (code - start <= size);
nacl_domain_code_validate(domain, &start, size, &code);
+ mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
return start;
}