<opcode name="mono_ldptr_nursery_bits" input="Pop0" output="PushI" args="InlineNone" o1="0xF0" o2="0x17" flow="next" />
<opcode name="mono_calli_extra_arg" input="VarPop" output="VarPush" args="InlineSig" o1="0xF0" o2="0x18" flow="call" />
<opcode name="mono_lddomain" input="Pop0" output="PushI" args="InlineNone" o1="0xF0" o2="0x19" flow="next" />
+<opcode name="mono_atomic_store_i4" input="PopI+PopI" output="Push0" args="InlineI" o1="0xF0" o2="0x1A" flow="next" />
</opdesc>
OPDEF(CEE_MONO_LDPTR_NURSERY_BITS, "mono_ldptr_nursery_bits", Pop0, PushI, InlineNone, X, 2, 0xF0, 0x17, NEXT)
OPDEF(CEE_MONO_CALLI_EXTRA_ARG, "mono_calli_extra_arg", VarPop, VarPush, InlineSig, X, 2, 0xF0, 0x18, CALL)
OPDEF(CEE_MONO_LDDOMAIN, "mono_lddomain", Pop0, PushI, InlineNone, X, 2, 0xF0, 0x19, NEXT)
+OPDEF(CEE_MONO_ATOMIC_STORE_I4, "mono_atomic_store_i4", PopI+PopI, Push0, InlineI, X, 2, 0xF0, 0x1A, NEXT)
#ifndef OPALIAS
#define _MONO_CIL_OPALIAS_DEFINED_
#define OPALIAS(a,s,r)
return NULL;
if (!SMALL_ENOUGH (klass->instance_size))
return NULL;
- if (mono_class_has_finalizer (klass) || mono_class_is_marshalbyref (klass) || (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS))
+ if (mono_class_has_finalizer (klass) || mono_class_is_marshalbyref (klass))
+ return NULL;
+ if (mono_profiler_get_events () & (MONO_PROFILE_ALLOCATIONS | MONO_PROFILE_STATISTICAL))
return NULL;
if (klass->rank)
return NULL;
atype = ATYPE_NORMAL;
*/
}
- return mono_gc_get_managed_allocator_by_type (atype, FALSE);
+ return mono_gc_get_managed_allocator_by_type (atype, MANAGED_ALLOCATOR_REGULAR);
}
MonoMethod*
* Return a managed allocator method corresponding to allocator type ATYPE.
*/
MonoMethod*
-mono_gc_get_managed_allocator_by_type (int atype, gboolean slowpath)
+mono_gc_get_managed_allocator_by_type (int atype, ManagedAllocatorVariant variant)
{
int offset = -1;
MonoMethod *res;
+ gboolean slowpath = variant != MANAGED_ALLOCATOR_REGULAR;
MonoMethod **cache = slowpath ? slowpath_alloc_method_cache : alloc_method_cache;
MONO_THREAD_VAR_OFFSET (GC_thread_tls, offset);
}
MonoMethod*
-mono_gc_get_managed_allocator_by_type (int atype, gboolean slowpath)
+mono_gc_get_managed_allocator_by_type (int atype, ManagedAllocatorVariant variant)
{
return NULL;
}
/* fast allocation support */
+typedef enum {
+ // Regular fast path allocator.
+ MANAGED_ALLOCATOR_REGULAR,
+ // Managed allocator that just calls into the runtime. Used when allocation profiling w/ AOT.
+ MANAGED_ALLOCATOR_SLOW_PATH,
+} ManagedAllocatorVariant;
+
int mono_gc_get_aligned_size_for_allocator (int size);
MonoMethod* mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size);
MonoMethod* mono_gc_get_managed_array_allocator (MonoClass *klass);
-MonoMethod *mono_gc_get_managed_allocator_by_type (int atype, gboolean slowpath);
+MonoMethod *mono_gc_get_managed_allocator_by_type (int atype, ManagedAllocatorVariant variant);
guint32 mono_gc_get_managed_allocator_types (void);
}
MonoMethod*
-mono_gc_get_managed_allocator_by_type (int atype, gboolean slowpath)
+mono_gc_get_managed_allocator_by_type (int atype, ManagedAllocatorVariant variant)
{
return NULL;
}
DECL_OFFSET(MonoThreadsSync, status)
DECL_OFFSET(MonoThreadsSync, nest)
-#if defined (HAVE_SGEN_GC) && !defined (HAVE_KW_THREAD)
+#ifdef HAVE_SGEN_GC
+DECL_OFFSET(SgenClientThreadInfo, in_critical_region)
+#ifndef HAVE_KW_THREAD
DECL_OFFSET(SgenThreadInfo, tlab_next_addr)
DECL_OFFSET(SgenThreadInfo, tlab_temp_end)
#endif
+#endif
#endif //DISABLE METADATA OFFSETS
*/
#define EXIT_CRITICAL_REGION do { mono_atomic_store_release (&IN_CRITICAL_REGION, 0); } while (0)
+#ifndef DISABLE_CRITICAL_REGION
+/*
+ * We can only use a critical region in the managed allocator if the JIT supports OP_ATOMIC_STORE_I4.
+ *
+ * TODO: Query the JIT instead of this ifdef hack.
+ */
+#if defined (TARGET_X86) || defined (TARGET_AMD64) || (defined (TARGET_ARM) && defined (HAVE_ARMV7)) || defined (TARGET_ARM64)
+#define MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
+#endif
+#endif
+
#define SGEN_TV_DECLARE(name) gint64 name
#define SGEN_TV_GETTIME(tv) tv = mono_100ns_ticks ()
#define SGEN_TV_ELAPSED(start,end) ((gint64)(end-start))
#ifdef HAVE_KW_THREAD
+#define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb) \
+ do { \
+ mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
+ mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
+ mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_IN_CRITICAL_REGION_ADDR); \
+ } while (0)
+
#define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \
mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
#else
#if defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS)
+#define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb) \
+ do { \
+ mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
+ mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
+ mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
+ mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenClientThreadInfo, in_critical_region)); \
+ mono_mb_emit_byte ((mb), CEE_ADD); \
+ } while (0)
+
#define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \
mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
#else
#define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
#define EMIT_TLS_ACCESS_TEMP_END(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
+#define EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
#endif
#endif
* that they are executed atomically via the restart mechanism.
*/
static MonoMethod*
-create_allocator (int atype, gboolean slowpath)
+create_allocator (int atype, ManagedAllocatorVariant variant)
{
int p_var, size_var;
+ gboolean slowpath = variant == MANAGED_ALLOCATOR_SLOW_PATH;
guint32 slowpath_branch, max_size_branch;
MonoMethodBuilder *mb;
MonoMethod *res;
goto done;
}
+#ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
+ EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb);
+ mono_mb_emit_byte (mb, CEE_LDC_I4_1);
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_ATOMIC_STORE_I4);
+ mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_NONE);
+#endif
+
size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
if (atype == ATYPE_SMALL) {
/* size_var = size_arg */
mono_mb_emit_byte (mb, MONO_CEE_STIND_I4);
}
+#ifdef MANAGED_ALLOCATOR_CAN_USE_CRITICAL_REGION
+ EMIT_TLS_ACCESS_IN_CRITICAL_REGION_ADDR (mb);
+ mono_mb_emit_byte (mb, CEE_LDC_I4_0);
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_ATOMIC_STORE_I4);
+#else
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
+#endif
/*
We must make sure both vtable and max_length are globaly visible before returning to managed land.
*/
- mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
- mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
/* return p */
return NULL;
if (known_instance_size && ALIGN_TO (klass->instance_size, SGEN_ALLOC_ALIGN) >= SGEN_MAX_SMALL_OBJ_SIZE)
return NULL;
- if (mono_class_has_finalizer (klass) || mono_class_is_marshalbyref (klass) || (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS))
+ if (mono_class_has_finalizer (klass) || mono_class_is_marshalbyref (klass))
return NULL;
if (klass->rank)
return NULL;
+ if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
+ return NULL;
if (klass->byval_arg.type == MONO_TYPE_STRING)
- return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, FALSE);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, MANAGED_ALLOCATOR_REGULAR);
/* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
if (known_instance_size)
- return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, FALSE);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, MANAGED_ALLOCATOR_REGULAR);
else
- return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, FALSE);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, MANAGED_ALLOCATOR_REGULAR);
#else
return NULL;
#endif
return NULL;
g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
- return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, FALSE);
+ return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, MANAGED_ALLOCATOR_REGULAR);
#else
return NULL;
#endif
}
MonoMethod*
-mono_gc_get_managed_allocator_by_type (int atype, gboolean slowpath)
+mono_gc_get_managed_allocator_by_type (int atype, ManagedAllocatorVariant variant)
{
#ifdef MANAGED_ALLOCATION
MonoMethod *res;
- MonoMethod **cache = slowpath ? slowpath_alloc_method_cache : alloc_method_cache;
+ MonoMethod **cache;
if (!use_managed_allocator)
return NULL;
if (!mono_runtime_has_tls_get ())
return NULL;
+ switch (variant) {
+ case MANAGED_ALLOCATOR_REGULAR: cache = alloc_method_cache; break;
+ case MANAGED_ALLOCATOR_SLOW_PATH: cache = slowpath_alloc_method_cache; break;
+ default: g_assert_not_reached (); break;
+ }
+
res = cache [atype];
if (res)
return res;
- res = create_allocator (atype, slowpath);
+ res = create_allocator (atype, variant);
LOCK_GC;
if (cache [atype]) {
mono_free_method (res);
/* Managed Allocators */
nallocators = mono_gc_get_managed_allocator_types ();
for (i = 0; i < nallocators; ++i) {
- m = mono_gc_get_managed_allocator_by_type (i, TRUE);
- if (m)
+ if ((m = mono_gc_get_managed_allocator_by_type (i, MANAGED_ALLOCATOR_REGULAR)))
add_method (acfg, m);
- }
- for (i = 0; i < nallocators; ++i) {
- m = mono_gc_get_managed_allocator_by_type (i, FALSE);
- if (m)
+ if ((m = mono_gc_get_managed_allocator_by_type (i, MANAGED_ALLOCATOR_SLOW_PATH)))
add_method (acfg, m);
}
}
#endif
case MONO_WRAPPER_ALLOC: {
int atype = decode_value (p, &p);
+ ManagedAllocatorVariant variant =
+ mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS ?
+ MANAGED_ALLOCATOR_SLOW_PATH : MANAGED_ALLOCATOR_REGULAR;
- ref->method = mono_gc_get_managed_allocator_by_type (atype, !!(mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS));
+ ref->method = mono_gc_get_managed_allocator_by_type (atype, variant);
if (!ref->method) {
mono_error_set_bad_image_name (error, module->aot_name, "Error: No managed allocator, but we need one for AOT.\nAre you using non-standard GC options?\n");
return FALSE;
ip += 6;
break;
}
+ case CEE_MONO_ATOMIC_STORE_I4: {
+ g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
+
+ CHECK_OPSIZE (6);
+ CHECK_STACK (2);
+ sp -= 2;
+
+ MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
+ ins->dreg = sp [0]->dreg;
+ ins->sreg1 = sp [1]->dreg;
+ ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ ip += 6;
+ break;
+ }
case CEE_MONO_JIT_ATTACH: {
MonoInst *args [16], *domain_ins;
MonoInst *ad_ins, *jit_tls_ins;
static __thread char *tlab_real_end;
/* Used by the managed allocator/wbarrier */
static __thread char **tlab_next_addr MONO_ATTR_USED;
+#ifndef SGEN_WITHOUT_MONO
+static __thread volatile int *in_critical_region_addr MONO_ATTR_USED;
+#endif
#endif
#ifdef HAVE_KW_THREAD
#ifdef HAVE_KW_THREAD
tlab_next_addr = &tlab_next;
+#ifndef SGEN_WITHOUT_MONO
+ in_critical_region_addr = &info->client_info.in_critical_region;
+#endif
#endif
}
#if defined(HAVE_KW_THREAD) && !defined(SGEN_WITHOUT_MONO)
int tlab_next_addr_offset = -1;
int tlab_temp_end_offset = -1;
-
+ int in_critical_region_addr_offset = -1;
MONO_THREAD_VAR_OFFSET (tlab_next_addr, tlab_next_addr_offset);
MONO_THREAD_VAR_OFFSET (tlab_temp_end, tlab_temp_end_offset);
+ MONO_THREAD_VAR_OFFSET (in_critical_region_addr, in_critical_region_addr_offset);
mono_tls_key_set_offset (TLS_KEY_SGEN_TLAB_NEXT_ADDR, tlab_next_addr_offset);
mono_tls_key_set_offset (TLS_KEY_SGEN_TLAB_TEMP_END, tlab_temp_end_offset);
+ mono_tls_key_set_offset (TLS_KEY_SGEN_IN_CRITICAL_REGION_ADDR, in_critical_region_addr_offset);
#endif
#ifdef HEAVY_STATISTICS
TLS_KEY_SGEN_TLAB_TEMP_END = 6,
TLS_KEY_BOEHM_GC_THREAD = 7,
TLS_KEY_LMF_ADDR = 8,
- TLS_KEY_NUM = 9
+ TLS_KEY_SGEN_IN_CRITICAL_REGION_ADDR = 9,
+ TLS_KEY_NUM = 10
} MonoTlsKey;
#ifdef HOST_WIN32