HttpListenerRequest Uri is now unescaped.
-Subproject commit d47982f6131bf8411aef9221dba1c4a42d9d5a41
+Subproject commit abe1cdfb1f941a03c477546534b33693a2ed1b40
astruct = new AStruct ();
}
rs = "A";
+ List<int> alist = new List<int> () { 12 };
}
StackFrame frame = e.Thread.GetFrames () [0];
var locals = frame.Method.GetLocals ();
- Assert.AreEqual (8, locals.Length);
- for (int i = 0; i < 8; ++i) {
+ Assert.AreEqual (9, locals.Length);
+ for (int i = 0; i < 9; ++i) {
if (locals [i].Name == "args") {
Assert.IsTrue (locals [i].IsArg);
Assert.AreEqual ("String[]", locals [i].Type.Name);
Assert.IsTrue (locals [i].IsArg);
Assert.AreEqual ("String", locals [i].Type.Name);
} else if (locals [i].Name == "astruct") {
+ } else if (locals [i].Name == "alist") {
} else {
Assert.Fail ();
}
AssertValue ("AB", vals [i]);
if (locals [i].Name == "t")
AssertValue ("ABC", vals [i]);
+ if (locals [i].Name == "alist")
+ ;
}
// Argument checking
type = constrType;
cVal = val;
tabIx = MDTable.CustomAttribute;
+
+ var bac = val as ByteArrConst;
+ if (bac != null)
+ byteVal = bac.val;
}
internal CustomAttribute(MetaDataElement paren, Method constrType,
}
public class ByteArrConst : DataConstant {
- byte[] val;
+ internal byte[] val;
public ByteArrConst(byte[] val)
{
Type type;
Class cmodType;
+ PrimitiveTypeRef cmodPrimType;
/// <summary>
/// Create a new custom modifier for a type
this.cmodType = cmodType;
}
+ public CustomModifiedType(Type type, CustomModifier cmod, PrimitiveTypeRef cmodType)
+ : base((byte)cmod)
+ {
+ this.type = type;
+ this.cmodPrimType = cmodType;
+ }
+
internal sealed override void TypeSig(MemoryStream str)
{
str.WriteByte(typeIndex);
- MetaData.CompressNum(cmodType.TypeDefOrRefToken(),str);
+
+ if (cmodType != null) {
+ MetaData.CompressNum(cmodType.TypeDefOrRefToken(),str);
+ } else {
+ MetaData.CompressNum(cmodPrimType.TypeDefOrRefToken(),str);
+ }
+
type.TypeSig(str);
}
}
+ public class PrimitiveTypeRef : Type
+ {
+ PrimitiveType type;
+ MetaData metaData;
+
+ internal PrimitiveTypeRef(PrimitiveType type, MetaData md)
+ : base (0)
+ {
+ this.type = type;
+ this.metaData = md;
+ }
+
+ internal uint TypeDefOrRefToken()
+ {
+ uint cIx = type.GetTypeSpec (metaData).Row;
+ cIx = (cIx << 2) | 0x2;
+ return cIx;
+ }
+ }
+
/**************************************************************************/
/// <summary>
/// Descriptor for an pointer (type * or type &)
return file;
}
+ public PrimitiveTypeRef AddPrimitiveType (PrimitiveType type)
+ {
+ return new PrimitiveTypeRef (type, metaData);
+ }
+
/// <summary>
/// Add a manifest resource to this PEFile NOT YET IMPLEMENTED
/// </summary>
namespace System.Runtime.Serialization
{
- public class BitFlagsGenerator
+ class BitFlagsGenerator
{
int bitCount;
byte [] locals;
namespace System.Runtime.Serialization
{
- public class CodeInterpreter
+ static class CodeInterpreter
{
-
internal static object ConvertValue(object arg, Type source, Type target)
{
return InternalConvert(arg, source, target, false);
../../build/common/Consts.cs
-../../build/common/Locale.cs
-../../build/common/MonoTODOAttribute.cs
../../build/common/SR.cs
Assembly/AssemblyInfo.cs
public override string ToString ()
{
+ ResolveArguments ();
+
StringBuilder sb = new StringBuilder ();
sb.Append ("[" + ctorInfo.DeclaringType.FullName + "(");
DateTime ttime = pair.Key;
TimeType ttype = pair.Value;
+ if (ttime.Year > year)
+ continue;
+ if (ttime.Year < year)
+ break;
+
if (ttype.IsDst) {
// DaylightTime.Delta is relative to the current BaseUtcOffset.
- var d = new TimeSpan (0, 0, ttype.Offset) - BaseUtcOffset;
- // Handle DST gradients
- if (start != DateTime.MinValue && delta != d)
- end = start;
-
+ delta = new TimeSpan (0, 0, ttype.Offset) - BaseUtcOffset;
start = ttime;
- delta = d;
-
- if (ttime.Year <= year)
- break;
} else {
- if (ttime.Year < year)
- break;
-
end = ttime;
- start = DateTime.MinValue;
}
}
start += BaseUtcOffset;
// DaylightTime.End is relative to the DST time.
- if (end != DateTime.MaxValue)
+ if (end != DateTime.MinValue)
end += BaseUtcOffset + delta;
} else {
- AdjustmentRule rule = null;
- foreach (var r in GetAdjustmentRules ()) {
- if (r.DateEnd.Year < year)
+ AdjustmentRule first = null, last = null;
+
+ foreach (var rule in GetAdjustmentRules ()) {
+ if (rule.DateStart.Year != year && rule.DateEnd.Year != year)
continue;
- if (r.DateStart.Year > year)
- break;
- rule = r;
- }
- if (rule != null) {
- start = TransitionPoint (rule.DaylightTransitionStart, year);
- end = TransitionPoint (rule.DaylightTransitionEnd, year);
- delta = rule.DaylightDelta;
+ if (rule.DateStart.Year == year)
+ first = rule;
+ if (rule.DateEnd.Year == year)
+ last = rule;
}
+
+ if (first == null || last == null)
+ return new DaylightTime (new DateTime (), new DateTime (), new TimeSpan ());
+
+ start = TransitionPoint (first.DaylightTransitionStart, year);
+ end = TransitionPoint (last.DaylightTransitionEnd, year);
+ delta = first.DaylightDelta;
}
if (start == DateTime.MinValue || end == DateTime.MinValue)
using System.IO;
using System.Runtime.Serialization.Formatters.Binary;
using System.Collections;
+using System.Reflection;
+using System.Globalization;
using NUnit.Framework;
namespace MonoTests.System
Assert.AreEqual(baseUtcOffset, cairo.GetUtcOffset (d.Add (new TimeSpan(0,0,0, 1))));
}
}
+
+ [TestFixture]
+ public class GetDaylightChanges
+ {
+ MethodInfo getChanges;
+
+ [SetUp]
+ public void Setup ()
+ {
+ var flags = BindingFlags.Instance | BindingFlags.NonPublic;
+ getChanges = typeof (TimeZoneInfo).GetMethod ("GetDaylightChanges", flags);
+ }
+
+ [Test]
+ public void TestSydneyDaylightChanges ()
+ {
+ TimeZoneInfo tz;
+ if (Environment.OSVersion.Platform == PlatformID.Unix)
+ tz = TimeZoneInfo.FindSystemTimeZoneById ("Australia/Sydney");
+ else
+ tz = TimeZoneInfo.FindSystemTimeZoneById ("W. Australia Standard Time");
+
+ var changes = (DaylightTime) getChanges.Invoke (tz, new object [] {2014});
+
+ Assert.AreEqual (new TimeSpan (1, 0, 0), changes.Delta);
+ Assert.AreEqual (new DateTime (2014, 10, 5, 2, 0, 0), changes.Start);
+ Assert.AreEqual (new DateTime (2014, 4, 6, 3, 0, 0), changes.End);
+ }
+ }
}
}
}
public void MakeCustomModified (CodeGen code_gen, PEAPI.CustomModifier modifier,
- BaseClassRef klass)
+ BaseTypeRef klass)
{
use_type_spec = true;
conversion_list.Add (ConversionMethod.MakeCustomModified);
break;
case ConversionMethod.MakeCustomModified:
peapi_type.MakeCustomModified (code_gen, (PEAPI.CustomModifier) conversion_list[++i],
- (BaseClassRef) conversion_list[++i]);
+ (BaseTypeRef) conversion_list[++i]);
break;
}
}
public void MakeCustomModified (CodeGen code_gen, PEAPI.CustomModifier modifier,
- BaseClassRef klass)
+ BaseTypeRef klass)
{
PEAPI.Type type;
Pair p = new Pair (peapi_type, modifier.ToString ());
type = type_table [p] as PEAPI.Type;
if (type == null) {
- klass.Resolve (code_gen);
- type = new PEAPI.CustomModifiedType (peapi_type,
- modifier, klass.PeapiClass);
+ type = GetType (code_gen, modifier, klass);
type_table [p] = type;
}
peapi_type = type;
}
+ PEAPI.Type GetType (CodeGen code_gen, PEAPI.CustomModifier modifier, BaseTypeRef klass)
+ {
+ klass.Resolve (code_gen);
+ var bcr = klass as BaseClassRef;
+ if (bcr != null)
+ return new PEAPI.CustomModifiedType (peapi_type, modifier, bcr.PeapiClass);
+
+ var pt = klass as PrimitiveTypeRef;
+ return new PEAPI.CustomModifiedType (peapi_type, modifier, code_gen.PEFile.AddPrimitiveType ((PEAPI.PrimitiveType) pt.PeapiType));
+
+ throw new NotSupportedException (klass.GetType ().ToString ());
+ }
+
public void MakePinned ()
{
use_type_spec = true;
base_type.MakePinned ();\r
$$ = base_type;\r
}\r
- | type K_MODREQ OPEN_PARENS class_ref CLOSE_PARENS\r
+ | type K_MODREQ OPEN_PARENS custom_modifier_type CLOSE_PARENS\r
{\r
BaseTypeRef base_type = GetTypeRef ((BaseTypeRef) $1);\r
- BaseClassRef class_ref = (BaseClassRef) $4;\r
+ BaseTypeRef class_ref = (BaseTypeRef) $4;\r
base_type.MakeCustomModified (codegen,\r
CustomModifier.modreq, class_ref);\r
$$ = base_type;\r
}\r
- | type K_MODOPT OPEN_PARENS class_ref CLOSE_PARENS\r
+ | type K_MODOPT OPEN_PARENS custom_modifier_type CLOSE_PARENS\r
{\r
BaseTypeRef base_type = GetTypeRef ((BaseTypeRef) $1);\r
- BaseClassRef class_ref = (BaseClassRef) $4;\r
+ BaseTypeRef class_ref = (BaseTypeRef) $4;\r
base_type.MakeCustomModified (codegen,\r
CustomModifier.modopt, class_ref);\r
$$ = base_type;\r
| K_CLSID\r
;\r
\r
+custom_modifier_type\r
+ : primitive_type\r
+ | class_ref\r
+ ;\r
+\r
field_decl : D_FIELD repeat_opt field_attr type id at_opt init_opt\r
{\r
FieldDef field_def = new FieldDef((FieldAttr) $3, \r
--- /dev/null
+.assembly extern mscorlib
+{
+}
+
+.assembly 'test-custom-mod-2'
+{
+}
+
+.class interface public abstract auto ansi I
+{
+ .custom instance void [mscorlib]System.Reflection.DefaultMemberAttribute::.ctor(string)
+ = {string('Item')}
+ .method public hidebysig newslot specialname abstract virtual instance char
+ get_P() cil managed
+ {
+ }
+
+ .method public hidebysig newslot specialname abstract virtual instance void
+ set_P(char 'value') cil managed
+ {
+ }
+
+ .method public hidebysig newslot specialname abstract virtual instance int32
+ get_Item(bool x) cil managed
+ {
+ }
+
+ .method public hidebysig newslot specialname abstract virtual instance void
+ set_Item(bool x,
+ int32 'value') cil managed
+ {
+ }
+
+ .property instance char modopt(int8) P()
+ {
+ .get instance char I::get_P()
+ .set instance void I::set_P(char)
+ }
+
+ .property instance int32 modopt(int8) Item(bool modopt(int16))
+ {
+ .get instance int32 I::get_Item(bool)
+ .set instance void I::set_Item(bool,
+ int32)
+ }
+}
\ No newline at end of file
DECL_OFFSET(MonoLMF, ebp)
DECL_OFFSET(MonoLMF, eip)
#elif defined(TARGET_AMD64)
-DECL_OFFSET(MonoContext, rax)
-DECL_OFFSET(MonoContext, rcx)
-DECL_OFFSET(MonoContext, rdx)
-DECL_OFFSET(MonoContext, rbx)
-DECL_OFFSET(MonoContext, rbp)
-DECL_OFFSET(MonoContext, rsi)
-DECL_OFFSET(MonoContext, rdi)
-DECL_OFFSET(MonoContext, rsp)
-DECL_OFFSET(MonoContext, r8)
-DECL_OFFSET(MonoContext, r9)
-DECL_OFFSET(MonoContext, r10)
-DECL_OFFSET(MonoContext, r11)
-DECL_OFFSET(MonoContext, r12)
-DECL_OFFSET(MonoContext, r13)
-DECL_OFFSET(MonoContext, r14)
-DECL_OFFSET(MonoContext, r15)
-DECL_OFFSET(MonoContext, rip)
+DECL_OFFSET(MonoContext, gregs)
#ifdef TARGET_WIN32
DECL_OFFSET(MonoLMF, lmf_addr)
{
}
+static inline void
+dynamic_image_lock (MonoDynamicImage *image)
+{
+ mono_image_lock ((MonoImage*)image);
+}
+
+static inline void
+dynamic_image_unlock (MonoDynamicImage *image)
+{
+ mono_image_unlock ((MonoImage*)image);
+}
+
+static void
+register_dyn_token (MonoDynamicImage *assembly, guint32 token, MonoObject *obj)
+{
+ dynamic_image_lock (assembly);
+ mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), obj);
+ dynamic_image_unlock (assembly);
+}
+
+static MonoObject*
+lookup_dyn_token (MonoDynamicImage *assembly, guint32 token)
+{
+ MonoObject *obj;
+
+ dynamic_image_lock (assembly);
+ obj = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token));
+ dynamic_image_unlock (assembly);
+
+ return obj;
+}
+
static void
sigbuffer_init (SigBuffer *buf, int size)
{
(type->type != MONO_TYPE_MVAR)) {
MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass);
token = MONO_TYPEDEFORREF_TYPEDEF | (tb->table_idx << MONO_TYPEDEFORREF_BITS);
- mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), mono_class_get_ref_info (klass));
+ register_dyn_token (assembly, token, mono_class_get_ref_info (klass));
return token;
}
token = MONO_TYPEDEFORREF_TYPEREF | (table->next_idx << MONO_TYPEDEFORREF_BITS); /* typeref */
g_hash_table_insert (assembly->typeref, type, GUINT_TO_POINTER(token));
table->next_idx ++;
- mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), mono_class_get_ref_info (klass));
+ register_dyn_token (assembly, token, mono_class_get_ref_info (klass));
return token;
}
idx = assembly->us.index ++;
}
- mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (MONO_TOKEN_STRING | idx), str);
+ register_dyn_token (assembly, MONO_TOKEN_STRING | idx, (MonoObject*)str);
return MONO_TOKEN_STRING | idx;
}
}
g_hash_table_insert (assembly->vararg_aux_hash, GUINT_TO_POINTER (token), sig);
- mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), obj);
+ register_dyn_token (assembly, token, obj);
return token;
}
void
mono_image_register_token (MonoDynamicImage *assembly, guint32 token, MonoObject *obj)
{
- MonoObject *prev = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token));
+ MonoObject *prev;
+
+ dynamic_image_lock (assembly);
+ prev = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token));
if (prev) {
/* There could be multiple MethodInfo objects with the same token */
//g_assert (prev == obj);
} else {
mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), obj);
}
+ dynamic_image_unlock (assembly);
}
static MonoDynamicImage*
if (!image)
image = mono_defaults.corlib;
+ if (!rootimage)
+ rootimage = mono_defaults.corlib;
+
if (ignorecase) {
MonoError error;
klass = mono_class_from_name_case_checked (image, info->name_space, info->name, &error);
g_assert (mono_class_get_ref_info (klass) == tb);
}
- mono_g_hash_table_insert (tb->module->dynamic_image->tokens,
- GUINT_TO_POINTER (MONO_TOKEN_TYPE_DEF | tb->table_idx), tb);
+ register_dyn_token (tb->module->dynamic_image, MONO_TOKEN_TYPE_DEF | tb->table_idx, (MonoObject*)tb);
if (parent != NULL) {
mono_class_setup_parent (klass, parent);
gboolean
mono_reflection_is_valid_dynamic_token (MonoDynamicImage *image, guint32 token)
{
- return mono_g_hash_table_lookup (image->tokens, GUINT_TO_POINTER (token)) != NULL;
+ return lookup_dyn_token (image, token) != NULL;
}
MonoMethodSignature *
MonoObject *obj;
MonoClass *klass;
- mono_loader_lock ();
- obj = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token));
- mono_loader_unlock ();
+ obj = lookup_dyn_token (assembly, token);
if (!obj) {
if (valid_token)
g_error ("Could not find required dynamic token 0x%08x", token);
if test -d $(top_srcdir)/.git; then \
(cd $(top_srcdir); \
LANG=C; export LANG; \
- branch=`git branch | grep '^\*' | cut -d ' ' -f 2`; \
+ branch=`git branch | grep '^\*' | sed 's/(detached from .*/explicit/' | cut -d ' ' -f 2`; \
version=`git log --no-color --first-parent -n1 --pretty=format:%h`; \
echo "#define FULL_VERSION \"$$branch/$$version\""; \
); \
case MONO_PATCH_INFO_ICALL_ADDR:
case MONO_PATCH_INFO_CLASS_INIT:
case MONO_PATCH_INFO_RGCTX_FETCH:
- case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
case MONO_PATCH_INFO_MONITOR_ENTER:
case MONO_PATCH_INFO_MONITOR_ENTER_V4:
case MONO_PATCH_INFO_MONITOR_EXIT:
encode_patch (acfg, entry->data, p, &p);
break;
}
- case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
case MONO_PATCH_INFO_MONITOR_ENTER:
case MONO_PATCH_INFO_MONITOR_ENTER_V4:
case MONO_PATCH_INFO_MONITOR_EXIT:
case MONO_PATCH_INFO_JIT_ICALL_ADDR:
debug_sym = g_strdup_printf ("%s_jit_icall_native_%s", prefix, ji->data.name);
break;
- case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
- debug_sym = g_strdup_printf ("%s_generic_class_init", prefix);
- break;
default:
break;
}
emit_trampoline (acfg, acfg->got_offset, info);
#endif
-#ifndef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
- mono_arch_create_generic_class_init_trampoline (&info, TRUE);
- emit_trampoline (acfg, acfg->got_offset, info);
-#endif
-
/* Emit the exception related code pieces */
mono_arch_get_restore_context (&info, TRUE);
emit_trampoline (acfg, acfg->got_offset, info);
ji->data.offset = decode_value (p, &p);
break;
case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG:
- case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
case MONO_PATCH_INFO_MONITOR_ENTER:
case MONO_PATCH_INFO_MONITOR_ENTER_V4:
case MONO_PATCH_INFO_MONITOR_EXIT:
gc_spill_slot_liveness_def: len:0
gc_param_slot_liveness_def: len:0
-generic_class_init: src1:A len:32
+generic_class_init: src1:A len:32 clob:c
if (pos < 0) {
pos = - pos - 1;
+ DEBUG_PRINTF (4, "[dbg] send arg %d.\n", pos);
+
g_assert (pos >= 0 && pos < jit->num_params);
add_var (buf, jit, sig->params [pos], &jit->params [pos], &frame->ctx, frame->domain, FALSE);
} else {
+ MonoDebugLocalsInfo *locals;
+
+ locals = mono_debug_lookup_locals (frame->method);
+ if (locals) {
+ g_assert (pos < locals->num_locals);
+ pos = locals->locals [pos].index;
+ mono_debug_free_locals (locals);
+ }
g_assert (pos >= 0 && pos < jit->num_locals);
+ DEBUG_PRINTF (4, "[dbg] send local %d.\n", pos);
+
add_var (buf, jit, header->locals [pos], &jit->locals [pos], &frame->ctx, frame->domain, FALSE);
}
}
t = sig->params [pos];
var = &jit->params [pos];
} else {
+ MonoDebugLocalsInfo *locals;
+
+ locals = mono_debug_lookup_locals (frame->method);
+ if (locals) {
+ g_assert (pos < locals->num_locals);
+ pos = locals->locals [pos].index;
+ mono_debug_free_locals (locals);
+ }
g_assert (pos >= 0 && pos < jit->num_locals);
t = header->locals [pos];
guint8 *code;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
+ int i, gregs_offset;
/* restore_contect (MonoContext *ctx) */
amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
/* Restore all registers except %rip and %r11 */
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rax), 8);
- amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rcx), 8);
- amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rdx), 8);
- amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rbx), 8);
- amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rbp), 8);
- amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rsi), 8);
- amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rdi), 8);
- //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r8), 8);
- //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r9), 8);
- //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r10), 8);
- amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r12), 8);
- amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r13), 8);
- amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r14), 8);
-#if !defined(__native_client_codegen__)
- amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r15), 8);
+ gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
+ for (i = 0; i < AMD64_NREG; ++i) {
+#if defined(__native_client_codegen__)
+ if (i == AMD64_R15)
+ continue;
#endif
+ if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
+ amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
+ }
/*
* The context resides on the stack, in the stack frame of the
* size. Hence the stack pointer can be restored only after
* we have finished loading everything from the context.
*/
- amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rsp), 8);
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rip), 8);
+ amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
/* jump to the saved IP */
amd64_jump_reg (code, AMD64_R11);
- nacl_global_codeman_validate(&start, 256, &code);
+ nacl_global_codeman_validate (&start, 256, &code);
mono_arch_flush_icache (start, code - start);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
{
guint8 *start;
- int i;
+ int i, gregs_offset;
guint8 *code;
guint32 pos;
MonoJumpInfo *ji = NULL;
if (! (pos & 8))
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+ gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
+
/* set new EBP */
- amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, rbp), 8);
+ amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
/* load callee saved regs */
- amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, rbx), 8);
- amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, r12), 8);
- amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, r13), 8);
- amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, r14), 8);
-#if !defined(__native_client_codegen__)
- amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, r15), 8);
-#endif
-#ifdef TARGET_WIN32
- amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, rdi), 8);
- amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, rsi), 8);
+ for (i = 0; i < AMD64_NREG; ++i) {
+#if defined(__native_client_codegen__)
+ if (i == AMD64_R15)
+ continue;
#endif
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
+ amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
+ }
/* load exc register */
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, rax), 8);
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
/* call the handler */
amd64_call_reg (code, AMD64_ARG_REG2);
void
mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
guint64 dummy5, guint64 dummy6,
- mgreg_t *regs, mgreg_t rip,
- MonoObject *exc, gboolean rethrow)
+ MonoContext *mctx, MonoObject *exc, gboolean rethrow)
{
MonoContext ctx;
- ctx.rsp = regs [AMD64_RSP];
- ctx.rip = rip;
- ctx.rbx = regs [AMD64_RBX];
- ctx.rbp = regs [AMD64_RBP];
- ctx.r12 = regs [AMD64_R12];
- ctx.r13 = regs [AMD64_R13];
- ctx.r14 = regs [AMD64_R14];
- ctx.r15 = regs [AMD64_R15];
- ctx.rdi = regs [AMD64_RDI];
- ctx.rsi = regs [AMD64_RSI];
- ctx.rax = regs [AMD64_RAX];
- ctx.rcx = regs [AMD64_RCX];
- ctx.rdx = regs [AMD64_RDX];
+ /* mctx is on the caller's stack */
+ memcpy (&ctx, mctx, sizeof (MonoContext));
if (mono_object_isinst (exc, mono_defaults.exception_class)) {
MonoException *mono_ex = (MonoException*)exc;
}
/* adjust eip so that it point into the call instruction */
- ctx.rip -= 1;
+ ctx.gregs [AMD64_RIP] --;
mono_handle_exception (&ctx, exc);
mono_restore_context (&ctx);
void
mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
guint64 dummy5, guint64 dummy6,
- mgreg_t *regs, mgreg_t rip,
- guint32 ex_token_index, gint64 pc_offset)
+ MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
{
guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
MonoException *ex;
ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
- rip -= pc_offset;
+ mctx->gregs [AMD64_RIP] -= pc_offset;
/* Negate the ip adjustment done in mono_amd64_throw_exception () */
- rip += 1;
+ mctx->gregs [AMD64_RIP] += 1;
- mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, regs, rip, (MonoObject*)ex, FALSE);
+ mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
}
static void
mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
guint64 dummy5, guint64 dummy6,
- mgreg_t *regs, mgreg_t rip,
- guint32 dummy7, gint64 dummy8)
+ MonoContext *mctx, guint32 dummy7, gint64 dummy8)
{
/* Only the register parameters are valid */
MonoContext ctx;
- ctx.rsp = regs [AMD64_RSP];
- ctx.rip = rip;
- ctx.rbx = regs [AMD64_RBX];
- ctx.rbp = regs [AMD64_RBP];
- ctx.r12 = regs [AMD64_R12];
- ctx.r13 = regs [AMD64_R13];
- ctx.r14 = regs [AMD64_R14];
- ctx.r15 = regs [AMD64_R15];
- ctx.rdi = regs [AMD64_RDI];
- ctx.rsi = regs [AMD64_RSI];
- ctx.rax = regs [AMD64_RAX];
- ctx.rcx = regs [AMD64_RCX];
- ctx.rdx = regs [AMD64_RDX];
+ /* mctx is on the caller's stack */
+ memcpy (&ctx, mctx, sizeof (MonoContext));
mono_resume_unwind (&ctx);
}
guint8 *code;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
- int i, stack_size, arg_offsets [16], regs_offset, dummy_stack_space;
+ int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
const guint kMaxCodeSize = NACL_SIZE (256, 512);
#ifdef TARGET_WIN32
start = code = mono_global_codeman_reserve (kMaxCodeSize);
/* The stack is unaligned on entry */
- stack_size = 192 + 8 + dummy_stack_space;
+ stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
code = start;
arg_offsets [0] = dummy_stack_space + 0;
arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
- arg_offsets [3] = dummy_stack_space + sizeof(mgreg_t) * 3;
- regs_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
+ ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
+ regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
/* Save registers */
for (i = 0; i < AMD64_NREG; ++i)
/* Save RSP */
amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
- /* Set arg1 == regs */
- amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, regs_offset);
- amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
- /* Set arg2 == eip */
+ /* Save IP */
if (llvm_abs)
amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
else
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_RAX, sizeof(mgreg_t));
- /* Set arg3 == exc/ex_token_index */
+ amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
+ /* Set arg1 == ctx */
+ amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
+ /* Set arg2 == exc/ex_token_index */
if (resume_unwind)
- amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
+ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
else
- amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG1, sizeof(mgreg_t));
- /* Set arg4 == rethrow/pc offset */
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
+ /* Set arg3 == rethrow/pc offset */
if (resume_unwind) {
- amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], 0, sizeof(mgreg_t));
+ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
} else if (corlib) {
- amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [3], AMD64_ARG_REG2, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
if (llvm_abs)
/*
* The caller is LLVM code which passes the absolute address not a pc offset,
* so compensate by passing 0 as 'rip' and passing the negated abs address as
* the pc offset.
*/
- amd64_neg_membase (code, AMD64_RSP, arg_offsets [3]);
+ amd64_neg_membase (code, AMD64_RSP, arg_offsets [2]);
} else {
- amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], rethrow, sizeof(mgreg_t));
+ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
}
if (aot) {
StackFrameInfo *frame)
{
gpointer ip = MONO_CONTEXT_GET_IP (ctx);
+ int i;
memset (frame, 0, sizeof (StackFrameInfo));
frame->ji = ji;
if (ji->has_arch_eh_info)
epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
- regs [AMD64_RAX] = new_ctx->rax;
- regs [AMD64_RBX] = new_ctx->rbx;
- regs [AMD64_RCX] = new_ctx->rcx;
- regs [AMD64_RDX] = new_ctx->rdx;
- regs [AMD64_RBP] = new_ctx->rbp;
- regs [AMD64_RSP] = new_ctx->rsp;
- regs [AMD64_RSI] = new_ctx->rsi;
- regs [AMD64_RDI] = new_ctx->rdi;
- regs [AMD64_RIP] = new_ctx->rip;
- regs [AMD64_R12] = new_ctx->r12;
- regs [AMD64_R13] = new_ctx->r13;
- regs [AMD64_R14] = new_ctx->r14;
- regs [AMD64_R15] = new_ctx->r15;
+ for (i = 0; i < AMD64_NREG; ++i)
+ regs [i] = new_ctx->gregs [i];
mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
(guint8*)ji->code_start + ji->code_size,
ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
save_locations, MONO_MAX_IREGS, &cfa);
- new_ctx->rax = regs [AMD64_RAX];
- new_ctx->rbx = regs [AMD64_RBX];
- new_ctx->rcx = regs [AMD64_RCX];
- new_ctx->rdx = regs [AMD64_RDX];
- new_ctx->rbp = regs [AMD64_RBP];
- new_ctx->rsp = regs [AMD64_RSP];
- new_ctx->rsi = regs [AMD64_RSI];
- new_ctx->rdi = regs [AMD64_RDI];
- new_ctx->rip = regs [AMD64_RIP];
- new_ctx->r12 = regs [AMD64_R12];
- new_ctx->r13 = regs [AMD64_R13];
- new_ctx->r14 = regs [AMD64_R14];
- new_ctx->r15 = regs [AMD64_R15];
+ for (i = 0; i < AMD64_NREG; ++i)
+ new_ctx->gregs [i] = regs [i];
/* The CFA becomes the new SP value */
- new_ctx->rsp = (mgreg_t)cfa;
+ new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
/* Adjust IP */
- new_ctx->rip --;
+ new_ctx->gregs [AMD64_RIP] --;
return TRUE;
} else if (*lmf) {
frame->ji = ji;
frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
- new_ctx->rip = rip;
- new_ctx->rbp = (*lmf)->rbp;
- new_ctx->rsp = (*lmf)->rsp;
+ new_ctx->gregs [AMD64_RIP] = rip;
+ new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
+ new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
if (((guint64)(*lmf)->previous_lmf) & 4) {
MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
/* Trampoline frame */
- new_ctx->rbx = ext->regs [AMD64_RBX];
- new_ctx->r12 = ext->regs [AMD64_R12];
- new_ctx->r13 = ext->regs [AMD64_R13];
- new_ctx->r14 = ext->regs [AMD64_R14];
- new_ctx->r15 = ext->regs [AMD64_R15];
-#ifdef TARGET_WIN32
- new_ctx->rdi = ext->regs [AMD64_RDI];
- new_ctx->rsi = ext->regs [AMD64_RSI];
-#endif
+ for (i = 0; i < AMD64_NREG; ++i) {
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
+ new_ctx->gregs [i] = ext->ctx->gregs [i];
+ }
} else {
/*
* The registers saved in the LMF will be restored using the normal unwind info,
* when the wrapper frame is processed.
*/
- new_ctx->rbx = 0;
- new_ctx->r12 = 0;
- new_ctx->r13 = 0;
- new_ctx->r14 = 0;
- new_ctx->r15 = 0;
-#ifdef TARGET_WIN32
- new_ctx->rdi = 0;
- new_ctx->rsi = 0;
-#endif
+ for (i = 0; i < AMD64_NREG; ++i) {
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
+ new_ctx->gregs [i] = 0;
+ }
}
*lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~7);
void
mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
{
- guint64 sp = ctx->rsp;
+ guint64 sp = ctx->gregs [AMD64_RSP];
- ctx->rdi = (guint64)user_data;
+ ctx->gregs [AMD64_RDI] = (guint64)user_data;
/* Allocate a stack frame below the red zone */
sp -= 128;
sp -= 8;
#ifdef __linux__
/* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
- *(guint64*)sp = ctx->rip;
+ *(guint64*)sp = ctx->gregs [AMD64_RIP];
#endif
- ctx->rsp = sp;
- ctx->rip = (guint64)async_cb;
+ ctx->gregs [AMD64_RSP] = sp;
+ ctx->gregs [AMD64_RIP] = (guint64)async_cb;
}
/**
prepare_for_guard_pages (MonoContext *mctx)
{
gpointer *sp;
- sp = (gpointer)(mctx->rsp);
+ sp = (gpointer)(mctx->gregs [AMD64_RSP]);
sp -= 1;
/* the return addr */
- sp [0] = (gpointer)(mctx->rip);
- mctx->rip = (guint64)restore_soft_guard_pages;
- mctx->rsp = (guint64)sp;
+ sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
+ mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
+ mctx->gregs [AMD64_RSP] = (guint64)sp;
}
static void
else
mono_gc_wbarrier_generic_store (dest, *(MonoObject**)src);
}
+
+void
+mono_generic_class_init (MonoVTable *vtable)
+{
+ mono_runtime_class_init (vtable);
+}
MonoObject*
mono_object_castclass_with_cache (MonoObject *obj, MonoClass *klass, gpointer *cache);
+void
+mono_generic_class_init (MonoVTable *vtable);
+
MonoObject*
mono_gsharedvt_constrained_call (gpointer mp, MonoMethod *cmethod, MonoClass *klass, gboolean deref_arg, gpointer *args);
/* helper methods signatures */
static MonoMethodSignature *helper_sig_class_init_trampoline;
static MonoMethodSignature *helper_sig_domain_get;
-static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
-static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
{
helper_sig_domain_get = mono_create_icall_signature ("ptr");
helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
- helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
- helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
* On return the caller must check @klass for load errors.
*/
static void
-emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
+emit_generic_class_init (MonoCompile *cfg, MonoClass *klass, MonoBasicBlock **out_bblock)
{
MonoInst *vtable_arg;
int context_used;
+ gboolean use_op_generic_class_init = FALSE;
+
+ *out_bblock = cfg->cbb;
context_used = mini_class_check_context_used (cfg, klass);
}
#ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
- MonoInst *ins;
+ if (!COMPILE_LLVM (cfg))
+ use_op_generic_class_init = TRUE;
+#endif
- /*
- * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
- * so this doesn't have to clobber any regs.
- */
- /*
- * For LLVM, this requires that the code in the generic trampoline obtain the vtable argument according to
- * the normal calling convention of the platform.
- */
- MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
- ins->sreg1 = vtable_arg->dreg;
- MONO_ADD_INS (cfg->cbb, ins);
-#else
- MonoCallInst *call;
+ if (use_op_generic_class_init) {
+ MonoInst *ins;
- if (COMPILE_LLVM (cfg))
- call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
- else
- call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
- mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
- cfg->uses_vtable_reg = TRUE;
-#endif
+ /*
+ * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
+ * so this doesn't have to clobber any regs and it doesn't break basic blocks.
+ */
+ MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
+ ins->sreg1 = vtable_arg->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else {
+ static int byte_offset = -1;
+ static guint8 bitmask;
+ int bits_reg, inited_reg;
+ MonoBasicBlock *inited_bb;
+ MonoInst *args [16];
+
+ if (byte_offset < 0)
+ mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
+
+ bits_reg = alloc_ireg (cfg);
+ inited_reg = alloc_ireg (cfg);
+
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
+
+ NEW_BBLOCK (cfg, inited_bb);
+
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
+
+ args [0] = vtable_arg;
+ mono_emit_jit_icall (cfg, mono_generic_class_init, args);
+
+ MONO_START_BB (cfg, inited_bb);
+ *out_bblock = inited_bb;
+ }
+}
+
+
+static void
+emit_class_init (MonoCompile *cfg, MonoClass *klass, MonoBasicBlock **out_bblock)
+{
+ /* This could be used as a fallback if needed */
+ //emit_generic_class_init (cfg, klass, out_bblock);
+
+ *out_bblock = cfg->cbb;
+
+ mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
}
static void
* might not get called after the call was patched.
*/
if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
- emit_generic_class_init (cfg, cmethod->klass);
+ emit_generic_class_init (cfg, cmethod->klass, &bblock);
CHECK_TYPELOAD (cmethod->klass);
}
ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
- emit_generic_class_init (cfg, cmethod->klass);
+ emit_generic_class_init (cfg, cmethod->klass, &bblock);
CHECK_TYPELOAD (cmethod->klass);
}
* As a workaround, we call class cctors before allocating objects.
*/
if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
- mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
+ emit_class_init (cfg, cmethod->klass, &bblock);
if (cfg->verbose_level > 2)
printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
class_inits = g_slist_prepend (class_inits, cmethod->klass);
*/
if (mono_class_needs_cctor_run (klass, method))
- emit_generic_class_init (cfg, klass);
+ emit_generic_class_init (cfg, klass, &bblock);
/*
* The pointer we're computing here is
if (!addr) {
if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
if (!(g_slist_find (class_inits, klass))) {
- mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
+ emit_class_init (cfg, klass, &bblock);
if (cfg->verbose_level > 2)
printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
class_inits = g_slist_prepend (class_inits, klass);
mono_sigctx_to_monoctx (sigctx, &ctx);
- rip = (guint8*)ctx.rip;
+ rip = (guint8*)ctx.gregs [AMD64_RIP];
if (IS_REX (rip [0])) {
reg = amd64_rex_b (rip [0]);
/* idiv REG */
reg += x86_modrm_rm (rip [1]);
- switch (reg) {
- case AMD64_RAX:
- value = ctx.rax;
- break;
- case AMD64_RBX:
- value = ctx.rbx;
- break;
- case AMD64_RCX:
- value = ctx.rcx;
- break;
- case AMD64_RDX:
- value = ctx.rdx;
- break;
- case AMD64_RBP:
- value = ctx.rbp;
- break;
- case AMD64_RSP:
- value = ctx.rsp;
- break;
- case AMD64_RSI:
- value = ctx.rsi;
- break;
- case AMD64_RDI:
- value = ctx.rdi;
- break;
- case AMD64_R12:
- value = ctx.r12;
- break;
- case AMD64_R13:
- value = ctx.r13;
- break;
- case AMD64_R14:
- value = ctx.r14;
- break;
- case AMD64_R15:
- value = ctx.r15;
- break;
- default:
- g_assert_not_reached ();
- reg = -1;
- }
+ value = ctx.gregs [reg];
if (value == -1)
return TRUE;
return 0;
}
-#define _CTX_REG(ctx,fld,i) ((&ctx->fld)[i])
-
mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
- switch (reg) {
- case AMD64_RCX: return ctx->rcx;
- case AMD64_RDX: return ctx->rdx;
- case AMD64_RBX: return ctx->rbx;
- case AMD64_RBP: return ctx->rbp;
- case AMD64_RSP: return ctx->rsp;
- default:
- return _CTX_REG (ctx, rax, reg);
- }
+ return ctx->gregs [reg];
}
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
{
- switch (reg) {
- case AMD64_RCX:
- ctx->rcx = val;
- break;
- case AMD64_RDX:
- ctx->rdx = val;
- break;
- case AMD64_RBX:
- ctx->rbx = val;
- break;
- case AMD64_RBP:
- ctx->rbp = val;
- break;
- case AMD64_RSP:
- ctx->rsp = val;
- break;
- default:
- _CTX_REG (ctx, rax, reg) = val;
- }
+ ctx->gregs [reg] = val;
}
gpointer
/* LMF structure used by the JIT trampolines */
typedef struct {
struct MonoLMF lmf;
- guint64 *regs;
+ MonoContext *ctx;
gpointer lmf_addr;
} MonoLMFTramp;
guint8 *ret;
} DynCallArgs;
-#define MONO_CONTEXT_SET_LLVM_EXC_REG(ctx, exc) do { (ctx)->rax = (gsize)exc; } while (0)
-#define MONO_CONTEXT_SET_LLVM_EH_SELECTOR_REG(ctx, sel) do { (ctx)->rdx = (gsize)(sel); } while (0)
+#define MONO_CONTEXT_SET_LLVM_EXC_REG(ctx, exc) do { (ctx)->gregs [AMD64_RAX] = (gsize)exc; } while (0)
+#define MONO_CONTEXT_SET_LLVM_EH_SELECTOR_REG(ctx, sel) do { (ctx)->gregs [AMD64_RDX] = (gsize)(sel); } while (0)
#define MONO_ARCH_INIT_TOP_LMF_ENTRY(lmf)
void
mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
guint64 dummy5, guint64 dummy6,
- mgreg_t *regs, mgreg_t rip,
- MonoObject *exc, gboolean rethrow);
+ MonoContext *mctx, MonoObject *exc, gboolean rethrow);
void
mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
guint64 dummy5, guint64 dummy6,
- mgreg_t *regs, mgreg_t rip,
- guint32 ex_token_index, gint64 pc_offset);
+ MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset);
guint64
mono_amd64_get_original_ip (void);
*/
#ifndef TARGET_AMD64
if (abs_ji->type == MONO_PATCH_INFO_MONITOR_ENTER || abs_ji->type == MONO_PATCH_INFO_MONITOR_ENTER_V4 ||
- abs_ji->type == MONO_PATCH_INFO_MONITOR_EXIT || abs_ji->type == MONO_PATCH_INFO_GENERIC_CLASS_INIT)
+ abs_ji->type == MONO_PATCH_INFO_MONITOR_EXIT)
LLVM_FAILURE (ctx, "trampoline with own cconv");
#endif
target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, abs_ji, FALSE);
CHECK_FAILURE (ctx);
break;
}
- case OP_GENERIC_CLASS_INIT: {
- static int byte_offset = -1;
- static guint8 bitmask;
- LLVMValueRef flags_load, cmp;
- MonoMethodSignature *sig;
- const char *icall_name;
- LLVMValueRef callee;
- LLVMBasicBlockRef init_bb, noinit_bb;
- LLVMValueRef args [16];
-
- if (byte_offset < 0)
- mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
-
- flags_load = emit_load (ctx, bb, &builder, 1, convert (ctx, lhs, LLVMPointerType (LLVMInt8Type(), 0)), "", FALSE);
- set_metadata_flag (flags_load, "mono.nofail.load");
- cmp = LLVMBuildICmp (builder, LLVMIntEQ, LLVMBuildAnd (builder, flags_load, LLVMConstInt (LLVMInt8Type (), bitmask, 0), ""), LLVMConstInt (LLVMInt8Type (), 1, FALSE), "");
-
- callee = ctx->lmodule->generic_class_init_tramp;
- if (!callee) {
- icall_name = "specific_trampoline_generic_class_init";
- sig = mono_metadata_signature_alloc (mono_get_corlib (), 1);
- sig->ret = &mono_get_void_class ()->byval_arg;
- sig->params [0] = &mono_get_intptr_class ()->byval_arg;
- if (cfg->compile_aot) {
- callee = get_plt_entry (ctx, sig_to_llvm_sig (ctx, sig), MONO_PATCH_INFO_INTERNAL_METHOD, icall_name);
- } else {
- callee = LLVMAddFunction (module, icall_name, sig_to_llvm_sig (ctx, sig));
- LLVMAddGlobalMapping (ctx->lmodule->ee, callee, resolve_patch (cfg, MONO_PATCH_INFO_INTERNAL_METHOD, icall_name));
- }
- mono_memory_barrier ();
- ctx->lmodule->generic_class_init_tramp = callee;
- }
-
- init_bb = gen_bb (ctx, "INIT_BB");
- noinit_bb = gen_bb (ctx, "NOINIT_BB");
-
- LLVMBuildCondBr (ctx->builder, cmp, noinit_bb, init_bb);
-
- builder = create_builder (ctx);
- ctx->builder = builder;
- LLVMPositionBuilderAtEnd (builder, init_bb);
- args [0] = convert (ctx, lhs, IntPtrType ());
- emit_call (ctx, bb, &builder, callee, args, 1);
- LLVMBuildBr (builder, noinit_bb);
-
- builder = create_builder (ctx);
- ctx->builder = builder;
- LLVMPositionBuilderAtEnd (builder, noinit_bb);
-
- ctx->bblocks [bb->block_num].end_bblock = noinit_bb;
- break;
- }
case OP_AOTCONST: {
guint32 got_offset;
LLVMValueRef indexes [2];
case MONO_PATCH_INFO_IID:
case MONO_PATCH_INFO_ADJUSTED_IID:
case MONO_PATCH_INFO_CLASS_INIT:
- case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
case MONO_PATCH_INFO_METHODCONST:
case MONO_PATCH_INFO_METHOD:
case MONO_PATCH_INFO_METHOD_JUMP:
target = mono_create_rgctx_lazy_fetch_trampoline (slot);
break;
}
- case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
- target = mono_create_generic_class_init_trampoline ();
- break;
case MONO_PATCH_INFO_MONITOR_ENTER:
target = mono_create_monitor_enter_trampoline ();
break;
register_icall (mono_object_castclass_with_cache, "mono_object_castclass_with_cache", "object object ptr ptr", FALSE);
register_icall (mono_object_isinst_with_cache, "mono_object_isinst_with_cache", "object object ptr ptr", FALSE);
+ register_icall (mono_generic_class_init, "mono_generic_class_init", "void ptr", FALSE);
register_icall (mono_debugger_agent_user_break, "mono_debugger_agent_user_break", "void", FALSE);
register_dyn_icall (mono_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL),
case MONO_PATCH_INFO_INTERNAL_METHOD:
case MONO_PATCH_INFO_JIT_ICALL_ADDR:
case MONO_PATCH_INFO_CLASS_INIT:
- case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
case MONO_PATCH_INFO_RGCTX_FETCH:
case MONO_PATCH_INFO_MONITOR_ENTER:
case MONO_PATCH_INFO_MONITOR_ENTER_V4:
return ptr;
}
-gpointer
-mono_create_generic_class_init_trampoline (void)
-{
-#ifndef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
- static gpointer code;
- MonoTrampInfo *info;
-
- mono_trampolines_lock ();
-
- if (!code) {
- if (mono_aot_only)
- /* get_named_code () might return an ftnptr, but our caller expects a direct pointer */
- code = mono_get_addr_from_ftnptr (mono_aot_get_trampoline ("generic_class_init_trampoline"));
- else {
- code = mono_arch_create_generic_class_init_trampoline (&info, FALSE);
- mono_tramp_info_register (info);
- }
- }
-
- mono_trampolines_unlock ();
-
- return code;
-#else
- /* Not used */
- g_assert_not_reached ();
- return NULL;
-#endif
-}
-
gpointer
mono_create_jump_trampoline (MonoDomain *domain, MonoMethod *method, gboolean add_sync_wrapper)
{
case MONO_PATCH_INFO_BB:
case MONO_PATCH_INFO_LABEL:
case MONO_PATCH_INFO_RGCTX_FETCH:
- case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
case MONO_PATCH_INFO_MONITOR_ENTER:
case MONO_PATCH_INFO_MONITOR_ENTER_V4:
case MONO_PATCH_INFO_MONITOR_EXIT:
#endif
/* Version number of the AOT file format */
-#define MONO_AOT_FILE_VERSION 115
+#define MONO_AOT_FILE_VERSION 116
//TODO: This is x86/amd64 specific.
#define mono_simd_shuffle_mask(a,b,c,d) ((a) | ((b) << 2) | ((c) << 4) | ((d) << 6))
MonoMethod *method,
gboolean add_sync_wrapper);
gpointer mono_create_class_init_trampoline (MonoVTable *vtable);
-gpointer mono_create_generic_class_init_trampoline (void);
gpointer mono_create_jit_trampoline (MonoMethod *method);
gpointer mono_create_jit_trampoline_from_token (MonoImage *image, guint32 token);
gpointer mono_create_jit_trampoline_in_domain (MonoDomain *domain, MonoMethod *method) MONO_LLVM_INTERNAL;
guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot);
-gpointer mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info);
guint8* mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_create_monitor_enter_trampoline (MonoTrampInfo **info, gboolean is_v4, gboolean aot);
PATCH_INFO(INTERRUPTION_REQUEST_FLAG, "interruption_request_flag")
PATCH_INFO(METHOD_RGCTX, "method_rgctx")
PATCH_INFO(RGCTX_FETCH, "rgctx_fetch")
-PATCH_INFO(GENERIC_CLASS_INIT, "generic_class_init")
PATCH_INFO(MONITOR_ENTER, "monitor_enter")
PATCH_INFO(MONITOR_ENTER_V4, "monitor_enter_v4")
PATCH_INFO(MONITOR_EXIT, "monitor_exit")
{
char *tramp_name;
guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
- int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, saved_regs_offset;
+ int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, ctx_offset, saved_regs_offset;
int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
gboolean has_caller;
GSList *unwind_ops = NULL;
code = buf = mono_global_codeman_reserve (kMaxCodeSize);
- framesize = kMaxCodeSize + sizeof (MonoLMFTramp);
- framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
+ /* Compute stack frame size and offsets */
+ offset = 0;
+ rbp_offset = -offset;
+
+ offset += sizeof(mgreg_t);
+ rax_offset = -offset;
+
+ offset += sizeof(mgreg_t);
+ tramp_offset = -offset;
+
+ offset += sizeof(gpointer);
+ arg_offset = -offset;
+
+ offset += sizeof(mgreg_t);
+ res_offset = -offset;
+
+ offset += sizeof (MonoContext);
+ ctx_offset = -offset;
+ saved_regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
+
+ offset += 8 * sizeof(mgreg_t);
+ saved_fpregs_offset = -offset;
+
+ offset += sizeof (MonoLMFTramp);
+ lmf_offset = -offset;
+
+ framesize = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
orig_rsp_to_rbp_offset = 0;
r11_save_code = code;
mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
- offset = 0;
- rbp_offset = - offset;
-
- offset += sizeof(mgreg_t);
- rax_offset = - offset;
-
- offset += sizeof(mgreg_t);
- tramp_offset = - offset;
-
- offset += sizeof(gpointer);
- arg_offset = - offset;
-
/* Compute the trampoline address from the return address */
if (aot) {
#if defined(__default_codegen__)
}
amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, sizeof(gpointer));
- offset += sizeof(mgreg_t);
- res_offset = - offset;
-
/* Save all registers */
-
- offset += AMD64_NREG * sizeof(mgreg_t);
- saved_regs_offset = - offset;
for (i = 0; i < AMD64_NREG; ++i) {
if (i == AMD64_RBP) {
/* RAX is already saved */
g_assert (r11_save_code == after_r11_save_code);
}
}
- offset += 8 * sizeof(mgreg_t);
- saved_fpregs_offset = - offset;
for (i = 0; i < 8; ++i)
amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
/* Save LMF begin */
- offset += sizeof (MonoLMFTramp);
- lmf_offset = - offset;
-
/* Save ip */
if (has_caller)
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof(gpointer));
amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, sizeof(mgreg_t));
/* Save pointer to registers */
- amd64_lea_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, regs), AMD64_R11, sizeof(mgreg_t));
+ amd64_lea_membase (code, AMD64_R11, AMD64_RBP, ctx_offset);
+ amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, ctx), AMD64_R11, sizeof(mgreg_t));
if (aot) {
code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
return buf;
}
-gpointer
-mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
-{
- /* Not used on amd64 */
- g_assert_not_reached ();
- return NULL;
-}
-
#ifdef MONO_ARCH_MONITOR_OBJECT_REG
gpointer
mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
{
int tramp_size = 256;
- int framesize, ctx_offset, cfa_offset;
+ int i, framesize, ctx_offset, cfa_offset, gregs_offset;
guint8 *code, *buf;
GSList *unwind_ops = NULL;
MonoJumpInfo *ji = NULL;
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
ctx_offset = 0;
+ gregs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
/* Initialize a MonoContext structure on the stack */
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rax), AMD64_RAX, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rbx), AMD64_RBX, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rcx), AMD64_RCX, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rdx), AMD64_RDX, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rsi), AMD64_RSI, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rdi), AMD64_RDI, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r8), AMD64_R8, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r9), AMD64_R9, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r10), AMD64_R10, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r11), AMD64_R11, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r12), AMD64_R12, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r13), AMD64_R13, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r14), AMD64_R14, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r15), AMD64_R15, sizeof (mgreg_t));
-
+ for (i = 0; i < AMD64_NREG; ++i) {
+ if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_RBP)
+ amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (i * sizeof (mgreg_t)), i, sizeof (mgreg_t));
+ }
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 0, sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rbp), AMD64_R11, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RBP * sizeof (mgreg_t)), AMD64_R11, sizeof (mgreg_t));
amd64_lea_membase (code, AMD64_R11, AMD64_RBP, 2 * sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rsp), AMD64_R11, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RSP * sizeof (mgreg_t)), AMD64_R11, sizeof (mgreg_t));
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, sizeof (mgreg_t), sizeof (mgreg_t));
- amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rip), AMD64_R11, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RIP * sizeof (mgreg_t)), AMD64_R11, sizeof (mgreg_t));
/* Call the single step/breakpoint function in sdb */
amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RSP, ctx_offset);
}
/* Restore registers from ctx */
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rax), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rbx), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rcx), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rdx), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rsi), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rdi), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_R8, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r8), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_R9, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r9), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_R10, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r10), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r11), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_R12, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r12), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_R13, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r13), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_R14, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r14), sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_R15, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r15), sizeof (mgreg_t));
-
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rbp), sizeof (mgreg_t));
+ for (i = 0; i < AMD64_NREG; ++i) {
+ if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_RBP)
+ amd64_mov_reg_membase (code, AMD64_RSP, i, gregs_offset + (i * sizeof (mgreg_t)), sizeof (mgreg_t));
+ }
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, gregs_offset + (AMD64_RBP * sizeof (mgreg_t)), sizeof (mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, 0, AMD64_R11, sizeof (mgreg_t));
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rip), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, gregs_offset + (AMD64_RIP * sizeof (mgreg_t)), sizeof (mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, sizeof (mgreg_t), AMD64_R11, sizeof (mgreg_t));
amd64_leave (code);
return buf;
}
-#define arm_is_imm8(v) ((v) > -256 && (v) < 256)
-
-gpointer
-mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
-{
- /* Not used */
- g_assert_not_reached ();
- return NULL;
-}
-
static gpointer
handler_block_trampoline_helper (gpointer *ptr)
{
return NULL;
}
-gpointer
-mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
-{
- g_assert_not_reached ();
- return NULL;
-}
-
gpointer
mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info)
{
return buf;
}
-
-gpointer
-mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
-{
- guint8 *tramp;
- guint8 *code, *buf;
- static int byte_offset = -1;
- static guint8 bitmask;
- guint8 *jump;
- int tramp_size;
- guint32 code_len;
- GSList *unwind_ops = NULL;
- MonoJumpInfo *ji = NULL;
-
- tramp_size = 64;
-
- code = buf = mono_global_codeman_reserve (tramp_size);
-
- if (byte_offset < 0)
- mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
-
- /* if (!(vtable->initialized)) */
- mips_lbu (code, mips_at, MONO_ARCH_VTABLE_REG, byte_offset);
- g_assert (!(bitmask & 0xffff0000));
- mips_andi (code, mips_at, mips_at, bitmask);
- jump = code;
- mips_beq (code, mips_at, mips_zero, 0);
- mips_nop (code);
- /* Initialized case */
- mips_jr (code, mips_ra);
- mips_nop (code);
-
- /* Uninitialized case */
- mips_patch ((guint32*)jump, (guint32)code);
-
- if (aot) {
- ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
- mips_load (code, mips_at, 0);
- mips_jr (code, mips_at);
- mips_nop (code);
- } else {
- tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), &code_len);
- mips_load (code, mips_at, tramp);
- mips_jr (code, mips_at);
- mips_nop (code);
- }
-
- mono_arch_flush_icache (buf, code - buf);
-
- g_assert (code - buf <= tramp_size);
-
- if (info)
- *info = mono_tramp_info_create ("generic_class_init_trampoline", buf, code - buf, ji, unwind_ops);
-
- return buf;
-}
return buf;
}
-gpointer
-mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
-{
- guint8 *tramp;
- guint8 *code, *buf;
- static int byte_offset = -1;
- static guint8 bitmask;
- guint8 *jump;
- int tramp_size;
- GSList *unwind_ops = NULL;
- MonoJumpInfo *ji = NULL;
-
- tramp_size = MONO_PPC_32_64_CASE (32, 44);
- if (aot)
- tramp_size += 32;
-
- code = buf = mono_global_codeman_reserve (tramp_size);
-
- if (byte_offset < 0)
- mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
-
- ppc_lbz (code, ppc_r4, byte_offset, MONO_ARCH_VTABLE_REG);
- ppc_andid (code, ppc_r4, ppc_r4, bitmask);
- jump = code;
- ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0);
-
- ppc_blr (code);
-
- ppc_patch (jump, code);
-
- if (aot) {
- code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
- /* Branch to the trampoline */
-#ifdef PPC_USES_FUNCTION_DESCRIPTOR
- ppc_ldptr (code, ppc_r12, 0, ppc_r12);
-#endif
- ppc_mtctr (code, ppc_r12);
- ppc_bcctr (code, PPC_BR_ALWAYS, 0);
- } else {
- tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT,
- mono_get_root_domain (), NULL);
-
- /* jump to the actual trampoline */
- code = emit_trampoline_jump (code, tramp);
- }
-
- mono_arch_flush_icache (buf, code - buf);
-
- g_assert (code - buf <= tramp_size);
-
- *info = mono_tramp_info_create ("generic_class_init_trampoline", buf, code - buf, ji, unwind_ops);
-
- return buf;
-}
-
gpointer
mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info)
{
/*========================= End of Function ========================*/
-/*------------------------------------------------------------------*/
-/* */
-/* Name - mono_arch_create_generic_class_init_trampoline */
-/* */
-/* Function - */
-/* */
-/*------------------------------------------------------------------*/
-
-gpointer
-mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
-{
- guint8 *tramp;
- guint8 *code, *buf;
- static int byte_offset = -1;
- static guint8 bitmask;
- gint32 displace;
- int tramp_size;
- GSList *unwind_ops = NULL;
- MonoJumpInfo *ji = NULL;
-
- tramp_size = 48;
-
- code = buf = mono_global_codeman_reserve (tramp_size);
-
- unwind_ops = mono_arch_get_cie_program ();
-
- if (byte_offset < 0)
- mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
-
- s390_llgc(code, s390_r0, 0, MONO_ARCH_VTABLE_REG, byte_offset);
- s390_nill(code, s390_r0, bitmask);
- s390_bnzr(code, s390_r14);
-
- tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT,
- mono_get_root_domain (), NULL);
-
- /* jump to the actual trampoline */
- displace = (tramp - code) / 2;
- s390_jg (code, displace);
-
- mono_arch_flush_icache (buf, code - buf);
-
- g_assert (code - buf <= tramp_size);
-
- *info = mono_tramp_info_create ("generic_class_init_trampoline", buf, code - buf, ji, unwind_ops);
-
- return(buf);
-}
-
-/*========================= End of Function ========================*/
-
#ifdef MONO_ARCH_MONITOR_OBJECT_REG
/*------------------------------------------------------------------*/
/* */
return buf;
}
-gpointer
-mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
-{
- guint8 *tramp;
- guint8 *code, *buf;
- static int byte_offset = -1;
- static guint8 bitmask;
- guint8 *jump;
- int tramp_size;
- GSList *unwind_ops = NULL;
- MonoJumpInfo *ji = NULL;
-
- tramp_size = 64;
-
- code = buf = mono_global_codeman_reserve (tramp_size);
-
- unwind_ops = mono_arch_get_cie_program ();
-
- if (byte_offset < 0)
- mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
-
- x86_test_membase_imm (code, MONO_ARCH_VTABLE_REG, byte_offset, bitmask);
- jump = code;
- x86_branch8 (code, X86_CC_Z, -1, 1);
-
- x86_ret (code);
-
- x86_patch (jump, code);
-
- /* Push the vtable so the stack is the same as in a specific trampoline */
- x86_push_reg (code, MONO_ARCH_VTABLE_REG);
-
- if (aot) {
- code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "generic_trampoline_generic_class_init");
- x86_jump_reg (code, X86_EAX);
- } else {
- tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_GENERIC_CLASS_INIT);
-
- /* jump to the actual trampoline */
- x86_jump_code (code, tramp);
- }
-
- mono_arch_flush_icache (code, code - buf);
-
- g_assert (code - buf <= tramp_size);
-#ifdef __native_client_codegen__
- g_assert (code - buf <= kNaClAlignment);
-#endif
-
- nacl_global_codeman_validate (&buf, tramp_size, &code);
- mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
-
- *info = mono_tramp_info_create ("generic_class_init_trampoline", buf, code - buf, ji, unwind_ops);
-
- return buf;
-}
-
#ifdef MONO_ARCH_MONITOR_OBJECT_REG
/*
* The code produced by this trampoline is equivalent to this:
thread_static_gc_layout.cs \
sleep.cs \
bug-27147.cs \
+ bug-30085.cs \
bug-17537.cs
TEST_CS_SRC_DIST= \
--- /dev/null
+using System;
+
+class Program
+{
+
+ static void MissingImage ()
+ {
+ Type good = System.Type.GetType("System.Nullable`1[[System.Int32, mscorlib]]");
+ Type bad = System.Type.GetType("System.Nullable`1[[System.Int32, mscorlibBAD]]");
+
+ if (good.Assembly.FullName.Split (',') [0] != "mscorlib")
+ throw new Exception ("Wrong assembly name");
+
+ if (bad != null)
+ throw new Exception ("Should not have loaded type");
+ }
+
+ static void ProbeCorlib ()
+ {
+ Type good = System.Type.GetType("System.Nullable`1[[System.Int32, mscorlib]]");
+ Type bad = System.Type.GetType("System.Nullable`1[[System.IO.Pipes.PipeOptions, System.Core, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089]]");
+
+ if (good.Assembly.FullName.Split (',') [0] != "mscorlib")
+ throw new Exception ("Wrong assembly name");
+
+ if (good == null || bad == null)
+ throw new Exception ("Missing image did not probe corlib");
+ }
+
+ static void Main()
+ {
+ MissingImage ();
+ ProbeCorlib ();
+ }
+}
#if defined(MONO_SIGNAL_USE_SIGACTION)
ucontext_t *ctx = (ucontext_t*)sigctx;
- mctx->rax = UCONTEXT_REG_RAX (ctx);
- mctx->rbx = UCONTEXT_REG_RBX (ctx);
- mctx->rcx = UCONTEXT_REG_RCX (ctx);
- mctx->rdx = UCONTEXT_REG_RDX (ctx);
- mctx->rbp = UCONTEXT_REG_RBP (ctx);
- mctx->rsp = UCONTEXT_REG_RSP (ctx);
- mctx->rsi = UCONTEXT_REG_RSI (ctx);
- mctx->rdi = UCONTEXT_REG_RDI (ctx);
- mctx->r8 = UCONTEXT_REG_R8 (ctx);
- mctx->r9 = UCONTEXT_REG_R9 (ctx);
- mctx->r10 = UCONTEXT_REG_R10 (ctx);
- mctx->r11 = UCONTEXT_REG_R11 (ctx);
- mctx->r12 = UCONTEXT_REG_R12 (ctx);
- mctx->r13 = UCONTEXT_REG_R13 (ctx);
- mctx->r14 = UCONTEXT_REG_R14 (ctx);
- mctx->r15 = UCONTEXT_REG_R15 (ctx);
- mctx->rip = UCONTEXT_REG_RIP (ctx);
+ mctx->gregs [AMD64_RAX] = UCONTEXT_REG_RAX (ctx);
+ mctx->gregs [AMD64_RBX] = UCONTEXT_REG_RBX (ctx);
+ mctx->gregs [AMD64_RCX] = UCONTEXT_REG_RCX (ctx);
+ mctx->gregs [AMD64_RDX] = UCONTEXT_REG_RDX (ctx);
+ mctx->gregs [AMD64_RBP] = UCONTEXT_REG_RBP (ctx);
+ mctx->gregs [AMD64_RSP] = UCONTEXT_REG_RSP (ctx);
+ mctx->gregs [AMD64_RSI] = UCONTEXT_REG_RSI (ctx);
+ mctx->gregs [AMD64_RDI] = UCONTEXT_REG_RDI (ctx);
+ mctx->gregs [AMD64_R8] = UCONTEXT_REG_R8 (ctx);
+ mctx->gregs [AMD64_R9] = UCONTEXT_REG_R9 (ctx);
+ mctx->gregs [AMD64_R10] = UCONTEXT_REG_R10 (ctx);
+ mctx->gregs [AMD64_R11] = UCONTEXT_REG_R11 (ctx);
+ mctx->gregs [AMD64_R12] = UCONTEXT_REG_R12 (ctx);
+ mctx->gregs [AMD64_R13] = UCONTEXT_REG_R13 (ctx);
+ mctx->gregs [AMD64_R14] = UCONTEXT_REG_R14 (ctx);
+ mctx->gregs [AMD64_R15] = UCONTEXT_REG_R15 (ctx);
+ mctx->gregs [AMD64_RIP] = UCONTEXT_REG_RIP (ctx);
#elif defined(HOST_WIN32)
CONTEXT *context = (CONTEXT*)sigctx;
- mctx->rip = context->Rip;
- mctx->rax = context->Rax;
- mctx->rcx = context->Rcx;
- mctx->rdx = context->Rdx;
- mctx->rbx = context->Rbx;
- mctx->rsp = context->Rsp;
- mctx->rbp = context->Rbp;
- mctx->rsi = context->Rsi;
- mctx->rdi = context->Rdi;
- mctx->r8 = context->R8;
- mctx->r9 = context->R9;
- mctx->r10 = context->R10;
- mctx->r11 = context->R11;
- mctx->r12 = context->R12;
- mctx->r13 = context->R13;
- mctx->r14 = context->R14;
- mctx->r15 = context->R15;
+ mctx->gregs [AMD64_RIP] = context->gregs [AMD64_RIP];
+ mctx->gregs [AMD64_RAX] = context->gregs [AMD64_RAX];
+ mctx->gregs [AMD64_RCX] = context->gregs [AMD64_RCX];
+ mctx->gregs [AMD64_RDX] = context->gregs [AMD64_RDX];
+ mctx->gregs [AMD64_RBX] = context->gregs [AMD64_RBX];
+ mctx->gregs [AMD64_RSP] = context->gregs [AMD64_RSP];
+ mctx->gregs [AMD64_RBP] = context->gregs [AMD64_RBP];
+ mctx->gregs [AMD64_RSI] = context->gregs [AMD64_RSI];
+ mctx->gregs [AMD64_RDI] = context->gregs [AMD64_RDI];
+ mctx->gregs [AMD64_R8] = context->gregs [AMD64_R8];
+ mctx->gregs [AMD64_R9] = context->gregs [AMD64_R9];
+ mctx->gregs [AMD64_R10] = context->gregs [AMD64_R10];
+ mctx->gregs [AMD64_R11] = context->gregs [AMD64_R11];
+ mctx->gregs [AMD64_R12] = context->gregs [AMD64_R12];
+ mctx->gregs [AMD64_R13] = context->gregs [AMD64_R13];
+ mctx->gregs [AMD64_R14] = context->gregs [AMD64_R14];
+ mctx->gregs [AMD64_R15] = context->gregs [AMD64_R15];
#else
MonoContext *ctx = (MonoContext *)sigctx;
- mctx->rax = ctx->rax;
- mctx->rbx = ctx->rbx;
- mctx->rcx = ctx->rcx;
- mctx->rdx = ctx->rdx;
- mctx->rbp = ctx->rbp;
- mctx->rsp = ctx->rsp;
- mctx->rsi = ctx->rsi;
- mctx->rdi = ctx->rdi;
- mctx->r8 = ctx->r8;
- mctx->r9 = ctx->r9;
- mctx->r10 = ctx->r10;
- mctx->r11 = ctx->r11;
- mctx->r12 = ctx->r12;
- mctx->r13 = ctx->r13;
- mctx->r14 = ctx->r14;
- mctx->r15 = ctx->r15;
- mctx->rip = ctx->rip;
+ mctx->gregs [AMD64_RAX] = ctx->gregs [AMD64_RAX];
+ mctx->gregs [AMD64_RBX] = ctx->gregs [AMD64_RBX];
+ mctx->gregs [AMD64_RCX] = ctx->gregs [AMD64_RCX];
+ mctx->gregs [AMD64_RDX] = ctx->gregs [AMD64_RDX];
+ mctx->gregs [AMD64_RBP] = ctx->gregs [AMD64_RBP];
+ mctx->gregs [AMD64_RSP] = ctx->gregs [AMD64_RSP];
+ mctx->gregs [AMD64_RSI] = ctx->gregs [AMD64_RSI];
+ mctx->gregs [AMD64_RDI] = ctx->gregs [AMD64_RDI];
+ mctx->gregs [AMD64_R8] = ctx->gregs [AMD64_R8];
+ mctx->gregs [AMD64_R9] = ctx->gregs [AMD64_R9];
+ mctx->gregs [AMD64_R10] = ctx->gregs [AMD64_R10];
+ mctx->gregs [AMD64_R11] = ctx->gregs [AMD64_R11];
+ mctx->gregs [AMD64_R12] = ctx->gregs [AMD64_R12];
+ mctx->gregs [AMD64_R13] = ctx->gregs [AMD64_R13];
+ mctx->gregs [AMD64_R14] = ctx->gregs [AMD64_R14];
+ mctx->gregs [AMD64_R15] = ctx->gregs [AMD64_R15];
+ mctx->gregs [AMD64_RIP] = ctx->gregs [AMD64_RIP];
#endif
}
#if defined(MONO_SIGNAL_USE_SIGACTION)
ucontext_t *ctx = (ucontext_t*)sigctx;
- UCONTEXT_REG_RAX (ctx) = mctx->rax;
- UCONTEXT_REG_RBX (ctx) = mctx->rbx;
- UCONTEXT_REG_RCX (ctx) = mctx->rcx;
- UCONTEXT_REG_RDX (ctx) = mctx->rdx;
- UCONTEXT_REG_RBP (ctx) = mctx->rbp;
- UCONTEXT_REG_RSP (ctx) = mctx->rsp;
- UCONTEXT_REG_RSI (ctx) = mctx->rsi;
- UCONTEXT_REG_RDI (ctx) = mctx->rdi;
- UCONTEXT_REG_R8 (ctx) = mctx->r8;
- UCONTEXT_REG_R9 (ctx) = mctx->r9;
- UCONTEXT_REG_R10 (ctx) = mctx->r10;
- UCONTEXT_REG_R11 (ctx) = mctx->r11;
- UCONTEXT_REG_R12 (ctx) = mctx->r12;
- UCONTEXT_REG_R13 (ctx) = mctx->r13;
- UCONTEXT_REG_R14 (ctx) = mctx->r14;
- UCONTEXT_REG_R15 (ctx) = mctx->r15;
- UCONTEXT_REG_RIP (ctx) = mctx->rip;
+ UCONTEXT_REG_RAX (ctx) = mctx->gregs [AMD64_RAX];
+ UCONTEXT_REG_RBX (ctx) = mctx->gregs [AMD64_RBX];
+ UCONTEXT_REG_RCX (ctx) = mctx->gregs [AMD64_RCX];
+ UCONTEXT_REG_RDX (ctx) = mctx->gregs [AMD64_RDX];
+ UCONTEXT_REG_RBP (ctx) = mctx->gregs [AMD64_RBP];
+ UCONTEXT_REG_RSP (ctx) = mctx->gregs [AMD64_RSP];
+ UCONTEXT_REG_RSI (ctx) = mctx->gregs [AMD64_RSI];
+ UCONTEXT_REG_RDI (ctx) = mctx->gregs [AMD64_RDI];
+ UCONTEXT_REG_R8 (ctx) = mctx->gregs [AMD64_R8];
+ UCONTEXT_REG_R9 (ctx) = mctx->gregs [AMD64_R9];
+ UCONTEXT_REG_R10 (ctx) = mctx->gregs [AMD64_R10];
+ UCONTEXT_REG_R11 (ctx) = mctx->gregs [AMD64_R11];
+ UCONTEXT_REG_R12 (ctx) = mctx->gregs [AMD64_R12];
+ UCONTEXT_REG_R13 (ctx) = mctx->gregs [AMD64_R13];
+ UCONTEXT_REG_R14 (ctx) = mctx->gregs [AMD64_R14];
+ UCONTEXT_REG_R15 (ctx) = mctx->gregs [AMD64_R15];
+ UCONTEXT_REG_RIP (ctx) = mctx->gregs [AMD64_RIP];
#elif defined(HOST_WIN32)
CONTEXT *context = (CONTEXT*)sigctx;
- context->Rip = mctx->rip;
- context->Rax = mctx->rax;
- context->Rcx = mctx->rcx;
- context->Rdx = mctx->rdx;
- context->Rbx = mctx->rbx;
- context->Rsp = mctx->rsp;
- context->Rbp = mctx->rbp;
- context->Rsi = mctx->rsi;
- context->Rdi = mctx->rdi;
- context->R8 = mctx->r8;
- context->R9 = mctx->r9;
- context->R10 = mctx->r10;
- context->R11 = mctx->r11;
- context->R12 = mctx->r12;
- context->R13 = mctx->r13;
- context->R14 = mctx->r14;
- context->R15 = mctx->r15;
+ context->gregs [AMD64_RIP] = mctx->gregs [AMD64_RIP];
+ context->gregs [AMD64_RAX] = mctx->gregs [AMD64_RAX];
+ context->gregs [AMD64_RCX] = mctx->gregs [AMD64_RCX];
+ context->gregs [AMD64_RDX] = mctx->gregs [AMD64_RDX];
+ context->gregs [AMD64_RBX] = mctx->gregs [AMD64_RBX];
+ context->gregs [AMD64_RSP] = mctx->gregs [AMD64_RSP];
+ context->gregs [AMD64_RBP] = mctx->gregs [AMD64_RBP];
+ context->gregs [AMD64_RSI] = mctx->gregs [AMD64_RSI];
+ context->gregs [AMD64_RDI] = mctx->gregs [AMD64_RDI];
+ context->gregs [AMD64_R8] = mctx->gregs [AMD64_R8];
+ context->gregs [AMD64_R9] = mctx->gregs [AMD64_R9];
+ context->gregs [AMD64_R10] = mctx->gregs [AMD64_R10];
+ context->gregs [AMD64_R11] = mctx->gregs [AMD64_R11];
+ context->gregs [AMD64_R12] = mctx->gregs [AMD64_R12];
+ context->gregs [AMD64_R13] = mctx->gregs [AMD64_R13];
+ context->gregs [AMD64_R14] = mctx->gregs [AMD64_R14];
+ context->gregs [AMD64_R15] = mctx->gregs [AMD64_R15];
#else
MonoContext *ctx = (MonoContext *)sigctx;
- ctx->rax = mctx->rax;
- ctx->rbx = mctx->rbx;
- ctx->rcx = mctx->rcx;
- ctx->rdx = mctx->rdx;
- ctx->rbp = mctx->rbp;
- ctx->rsp = mctx->rsp;
- ctx->rsi = mctx->rsi;
- ctx->rdi = mctx->rdi;
- ctx->r8 = mctx->r8;
- ctx->r9 = mctx->r9;
- ctx->r10 = mctx->r10;
- ctx->r11 = mctx->r11;
- ctx->r12 = mctx->r12;
- ctx->r13 = mctx->r13;
- ctx->r14 = mctx->r14;
- ctx->r15 = mctx->r15;
- ctx->rip = mctx->rip;
+ ctx->gregs [AMD64_RAX] = mctx->gregs [AMD64_RAX];
+ ctx->gregs [AMD64_RBX] = mctx->gregs [AMD64_RBX];
+ ctx->gregs [AMD64_RCX] = mctx->gregs [AMD64_RCX];
+ ctx->gregs [AMD64_RDX] = mctx->gregs [AMD64_RDX];
+ ctx->gregs [AMD64_RBP] = mctx->gregs [AMD64_RBP];
+ ctx->gregs [AMD64_RSP] = mctx->gregs [AMD64_RSP];
+ ctx->gregs [AMD64_RSI] = mctx->gregs [AMD64_RSI];
+ ctx->gregs [AMD64_RDI] = mctx->gregs [AMD64_RDI];
+ ctx->gregs [AMD64_R8] = mctx->gregs [AMD64_R8];
+ ctx->gregs [AMD64_R9] = mctx->gregs [AMD64_R9];
+ ctx->gregs [AMD64_R10] = mctx->gregs [AMD64_R10];
+ ctx->gregs [AMD64_R11] = mctx->gregs [AMD64_R11];
+ ctx->gregs [AMD64_R12] = mctx->gregs [AMD64_R12];
+ ctx->gregs [AMD64_R13] = mctx->gregs [AMD64_R13];
+ ctx->gregs [AMD64_R14] = mctx->gregs [AMD64_R14];
+ ctx->gregs [AMD64_R15] = mctx->gregs [AMD64_R15];
+ ctx->gregs [AMD64_RIP] = mctx->gregs [AMD64_RIP];
#endif
}
#elif (defined(__x86_64__) && !defined(MONO_CROSS_COMPILE)) || (defined(TARGET_AMD64)) /* defined(__i386__) */
+#include <mono/arch/amd64/amd64-codegen.h>
#if !defined( HOST_WIN32 ) && !defined(__native_client__) && !defined(__native_client_codegen__)
#endif
typedef struct {
- mgreg_t rax;
- mgreg_t rbx;
- mgreg_t rcx;
- mgreg_t rdx;
- mgreg_t rbp;
- mgreg_t rsp;
- mgreg_t rsi;
- mgreg_t rdi;
- mgreg_t r8;
- mgreg_t r9;
- mgreg_t r10;
- mgreg_t r11;
- mgreg_t r12;
- mgreg_t r13;
- mgreg_t r14;
- mgreg_t r15;
- mgreg_t rip;
+ mgreg_t gregs [AMD64_NREG];
} MonoContext;
-#define MONO_CONTEXT_SET_IP(ctx,ip) do { (ctx)->rip = (mgreg_t)(ip); } while (0);
-#define MONO_CONTEXT_SET_BP(ctx,bp) do { (ctx)->rbp = (mgreg_t)(bp); } while (0);
-#define MONO_CONTEXT_SET_SP(ctx,esp) do { (ctx)->rsp = (mgreg_t)(esp); } while (0);
+#define MONO_CONTEXT_SET_IP(ctx,ip) do { (ctx)->gregs [AMD64_RIP] = (mgreg_t)(ip); } while (0);
+#define MONO_CONTEXT_SET_BP(ctx,bp) do { (ctx)->gregs [AMD64_RBP] = (mgreg_t)(bp); } while (0);
+#define MONO_CONTEXT_SET_SP(ctx,esp) do { (ctx)->gregs [AMD64_RSP] = (mgreg_t)(esp); } while (0);
-#define MONO_CONTEXT_GET_IP(ctx) ((gpointer)((ctx)->rip))
-#define MONO_CONTEXT_GET_BP(ctx) ((gpointer)((ctx)->rbp))
-#define MONO_CONTEXT_GET_SP(ctx) ((gpointer)((ctx)->rsp))
+#define MONO_CONTEXT_GET_IP(ctx) ((gpointer)((ctx)->gregs [AMD64_RIP]))
+#define MONO_CONTEXT_GET_BP(ctx) ((gpointer)((ctx)->gregs [AMD64_RBP]))
+#define MONO_CONTEXT_GET_SP(ctx) ((gpointer)((ctx)->gregs [AMD64_RSP]))
#if defined (HOST_WIN32) && !defined(__GNUC__)
/* msvc doesn't support inline assembly, so have to use a separate .asm file */