By default all platforms will call into native getters/setters whenever they need to get access tls value. On certain platforms we can try to be faster than this and avoid the call. We call this fast tls and each platform defines its own way to achieve this. Fast tls should normally be inlined, otherwise there is little point to doing anything else in the first place (on linux, __thread access is 2-3 instructions, on mac pthread_getspecific is 2 instructions, other platforms also having decent implementations). For this, a platform has to define MONO_ARCH_HAVE_FAST_TLS, and provide alternative getters/setters for a MonoTlsKey. In order to have fast getter/setters, the platform has to declare a way to fetch an internal offset (MONO_THREAD_VAR_OFFSET) which is stored in the tls module, and in the arch specific file probe the system to see if we can use the offset initialized here. If these run-time checks don't succeed we just use the fallbacks.
In case we would wish to provide fast inlined tls for aot code, we would need to be sure that, at run-time, these two platform checks would never fail otherwise the tls getter/setters that we emitted would not work. Normally, there is little incentive to support this since tls access is most common in wrappers and managed allocators, both of which are not aot-ed by default. So far, we never supported inlined fast tls on full-aot systems.
amd64_set_xmmreg_r8: dest:f src1:f len:14 clob:m
amd64_save_sp_to_lmf: len:16
tls_get: dest:i len:32
-tls_get_reg: dest:i src1:i len:64
tls_set: src1:i len:16
-tls_set_reg: src1:i src2:i len:32
atomic_add_i4: src1:b src2:i dest:i len:32
atomic_add_i8: src1:b src2:i dest:i len:32
atomic_exchange_i4: src1:b src2:i dest:i len:12
}
}
+static MonoInst*
+mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
+{
+ int tls_offset = mono_tls_get_tls_offset (key);
+
+ if (cfg->compile_aot)
+ return NULL;
+
+ if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
+ MonoInst *ins;
+ MONO_INST_NEW (cfg, ins, OP_TLS_GET);
+ ins->dreg = mono_alloc_preg (cfg);
+ ins->inst_offset = tls_offset;
+ return ins;
+ }
+ return NULL;
+}
+
+static MonoInst*
+mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
+{
+ int tls_offset = mono_tls_get_tls_offset (key);
+
+ if (cfg->compile_aot)
+ return NULL;
+
+ if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
+ MonoInst *ins;
+ MONO_INST_NEW (cfg, ins, OP_TLS_SET);
+ ins->sreg1 = value->dreg;
+ ins->inst_offset = tls_offset;
+ return ins;
+ }
+ return NULL;
+}
+
+
MonoInst*
mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
{
+ MonoInst *fast_tls = mono_create_fast_tls_getter (cfg, key);
+
+ if (fast_tls) {
+ MONO_ADD_INS (cfg->cbb, fast_tls);
+ return fast_tls;
+ }
+
if (cfg->compile_aot) {
MonoInst *addr;
/*
static MonoInst*
mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
{
+ MonoInst *fast_tls = mono_create_fast_tls_setter (cfg, value, key);
+
+ if (fast_tls) {
+ MONO_ADD_INS (cfg->cbb, fast_tls);
+ return fast_tls;
+ }
+
if (cfg->compile_aot) {
MonoInst *addr;
EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
#endif /* DISABLE_JIT */
-#ifdef __APPLE__
+#ifdef TARGET_MACH
static int tls_gs_offset;
#endif
gboolean
-mono_amd64_have_tls_get (void)
+mono_arch_have_fast_tls (void)
{
#ifdef TARGET_MACH
- static gboolean have_tls_get = FALSE;
+ static gboolean have_fast_tls = FALSE;
static gboolean inited = FALSE;
+ guint8 *ins;
if (inited)
- return have_tls_get;
+ return have_fast_tls;
-#if MONO_HAVE_FAST_TLS
- guint8 *ins = (guint8*)pthread_getspecific;
+ ins = (guint8*)pthread_getspecific;
/*
* We're looking for these two instructions:
* mov %gs:[offset](,%rdi,8),%rax
* retq
*/
- have_tls_get = ins [0] == 0x65 &&
+ have_fast_tls = ins [0] == 0x65 &&
ins [1] == 0x48 &&
ins [2] == 0x8b &&
ins [3] == 0x04 &&
* popq %rbp
* retq
*/
- if (!have_tls_get) {
- have_tls_get = ins [0] == 0x55 &&
+ if (!have_fast_tls) {
+ have_fast_tls = ins [0] == 0x55 &&
ins [1] == 0x48 &&
ins [2] == 0x89 &&
ins [3] == 0xe5 &&
tls_gs_offset = ins[9];
}
-#endif
-
inited = TRUE;
- return have_tls_get;
+ return have_fast_tls;
#elif defined(TARGET_ANDROID)
return FALSE;
#else
#endif
}
-int
-mono_amd64_get_tls_gs_offset (void)
-{
-#ifdef TARGET_OSX
- return tls_gs_offset;
-#else
- g_assert_not_reached ();
- return -1;
-#endif
-}
-
/*
* mono_amd64_emit_tls_get:
* @code: buffer to store code to
*
* Returns: a pointer to the end of the stored code
*/
-guint8*
+static guint8*
mono_amd64_emit_tls_get (guint8* code, int dreg, int tls_offset)
{
#ifdef TARGET_WIN32
amd64_mov_reg_membase (code, dreg, dreg, (tls_offset * 8) - 0x200, 8);
amd64_patch (buf [0], code);
}
-#elif defined(__APPLE__)
+#elif defined(TARGET_MACH)
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, tls_gs_offset + (tls_offset * 8), 8);
#else
return code;
}
-#ifdef TARGET_WIN32
-
-#define MAX_TEB_TLS_SLOTS 64
-#define TEB_TLS_SLOTS_OFFSET 0x1480
-#define TEB_TLS_EXPANSION_SLOTS_OFFSET 0x1780
-
static guint8*
-emit_tls_get_reg_windows (guint8* code, int dreg, int offset_reg)
-{
- int tmp_reg = -1;
- guint8 * more_than_64_slots = NULL;
- guint8 * empty_slot = NULL;
- guint8 * tls_get_reg_done = NULL;
-
- //Use temporary register for offset calculation?
- if (dreg == offset_reg) {
- tmp_reg = dreg == AMD64_RAX ? AMD64_RCX : AMD64_RAX;
- amd64_push_reg (code, tmp_reg);
- amd64_mov_reg_reg (code, tmp_reg, offset_reg, sizeof (gpointer));
- offset_reg = tmp_reg;
- }
-
- //TEB TLS slot array only contains MAX_TEB_TLS_SLOTS items, if more is used the expansion slots must be addressed.
- amd64_alu_reg_imm (code, X86_CMP, offset_reg, MAX_TEB_TLS_SLOTS);
- more_than_64_slots = code;
- amd64_branch8 (code, X86_CC_GE, 0, TRUE);
-
- //TLS slot array, _TEB.TlsSlots, is at offset TEB_TLS_SLOTS_OFFSET and index is offset * 8 in Windows 64-bit _TEB structure.
- amd64_shift_reg_imm (code, X86_SHL, offset_reg, 3);
- amd64_alu_reg_imm (code, X86_ADD, offset_reg, TEB_TLS_SLOTS_OFFSET);
-
- //TEB pointer is stored in GS segment register on Windows x64. TLS slot is located at calculated offset from that pointer.
- x86_prefix (code, X86_GS_PREFIX);
- amd64_mov_reg_membase (code, dreg, offset_reg, 0, sizeof (gpointer));
-
- tls_get_reg_done = code;
- amd64_jump8 (code, 0);
-
- amd64_patch (more_than_64_slots, code);
-
- //TLS expansion slots, _TEB.TlsExpansionSlots, is at offset TEB_TLS_EXPANSION_SLOTS_OFFSET in Windows 64-bit _TEB structure.
- x86_prefix (code, X86_GS_PREFIX);
- amd64_mov_reg_mem (code, dreg, TEB_TLS_EXPANSION_SLOTS_OFFSET, sizeof (gpointer));
-
- //Check for NULL in _TEB.TlsExpansionSlots.
- amd64_test_reg_reg (code, dreg, dreg);
- empty_slot = code;
- amd64_branch8 (code, X86_CC_EQ, 0, TRUE);
-
- //TLS expansion slots are at index offset into the expansion array.
- //Calculate for the MAX_TEB_TLS_SLOTS offsets, since the interessting offset is offset_reg - MAX_TEB_TLS_SLOTS.
- amd64_alu_reg_imm (code, X86_SUB, offset_reg, MAX_TEB_TLS_SLOTS);
- amd64_shift_reg_imm (code, X86_SHL, offset_reg, 3);
-
- amd64_mov_reg_memindex (code, dreg, dreg, 0, offset_reg, 0, sizeof (gpointer));
-
- amd64_patch (empty_slot, code);
- amd64_patch (tls_get_reg_done, code);
-
- if (tmp_reg != -1)
- amd64_pop_reg (code, tmp_reg);
-
- return code;
-}
-
-#endif
-
-static guint8*
-emit_tls_get_reg (guint8* code, int dreg, int offset_reg)
-{
- /* offset_reg contains a value translated by mono_arch_translate_tls_offset () */
-#ifdef TARGET_OSX
- if (dreg != offset_reg)
- amd64_mov_reg_reg (code, dreg, offset_reg, sizeof (mgreg_t));
- amd64_prefix (code, X86_GS_PREFIX);
- amd64_mov_reg_membase (code, dreg, dreg, 0, sizeof (mgreg_t));
-#elif defined(__linux__)
- int tmpreg = -1;
-
- if (dreg == offset_reg) {
- /* Use a temporary reg by saving it to the redzone */
- tmpreg = dreg == AMD64_RAX ? AMD64_RCX : AMD64_RAX;
- amd64_mov_membase_reg (code, AMD64_RSP, -8, tmpreg, 8);
- amd64_mov_reg_reg (code, tmpreg, offset_reg, sizeof (gpointer));
- offset_reg = tmpreg;
- }
- x86_prefix (code, X86_FS_PREFIX);
- amd64_mov_reg_mem (code, dreg, 0, 8);
- amd64_mov_reg_memindex (code, dreg, dreg, 0, offset_reg, 0, 8);
- if (tmpreg != -1)
- amd64_mov_reg_membase (code, tmpreg, AMD64_RSP, -8, 8);
-#elif defined(TARGET_WIN32)
- code = emit_tls_get_reg_windows (code, dreg, offset_reg);
-#else
- g_assert_not_reached ();
-#endif
- return code;
-}
-
-static guint8*
-amd64_emit_tls_set (guint8 *code, int sreg, int tls_offset)
+mono_amd64_emit_tls_set (guint8 *code, int sreg, int tls_offset)
{
#ifdef TARGET_WIN32
g_assert_not_reached ();
-#elif defined(__APPLE__)
+#elif defined(TARGET_MACH)
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_mem_reg (code, tls_gs_offset + (tls_offset * 8), sreg, 8);
#else
return code;
}
-static guint8*
-amd64_emit_tls_set_reg (guint8 *code, int sreg, int offset_reg)
-{
- /* offset_reg contains a value translated by mono_arch_translate_tls_offset () */
-#ifdef TARGET_WIN32
- g_assert_not_reached ();
-#elif defined(__APPLE__)
- x86_prefix (code, X86_GS_PREFIX);
- amd64_mov_membase_reg (code, offset_reg, 0, sreg, 8);
-#else
- x86_prefix (code, X86_FS_PREFIX);
- amd64_mov_membase_reg (code, offset_reg, 0, sreg, 8);
-#endif
- return code;
-}
-
/*
* emit_setup_lmf:
*
code = mono_amd64_emit_tls_get (code, ins->dreg, ins->inst_offset);
break;
}
- case OP_TLS_GET_REG:
- code = emit_tls_get_reg (code, ins->dreg, ins->sreg1);
- break;
case OP_TLS_SET: {
- code = amd64_emit_tls_set (code, ins->sreg1, ins->inst_offset);
- break;
- }
- case OP_TLS_SET_REG: {
- code = amd64_emit_tls_set_reg (code, ins->sreg1, ins->sreg2);
+ code = mono_amd64_emit_tls_set (code, ins->sreg1, ins->inst_offset);
break;
}
case OP_MEMORY_BARRIER: {
if (method->save_lmf) {
/* check if we need to restore protection of the stack after a stack overflow */
- /* FIXME */
-#if 0
- if (!cfg->compile_aot && mono_get_jit_tls_offset () != -1) {
+ if (!cfg->compile_aot && mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_JIT_TLS) != -1) {
guint8 *patch;
- code = mono_amd64_emit_tls_get (code, AMD64_RCX, mono_get_jit_tls_offset ());
+ code = mono_amd64_emit_tls_get (code, AMD64_RCX, mono_tls_get_tls_offset (TLS_KEY_JIT_TLS));
/* we load the value in a separate instruction: this mechanism may be
* used later as a safer way to do thread interruption
*/
} else {
/* FIXME: maybe save the jit tls in the prolog */
}
-#endif
if (cfg->used_int_regs & (1 << AMD64_RBP)) {
amd64_mov_reg_membase (code, AMD64_RBP, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
}
#define MONO_ARCH_EMULATE_FREM 1
#define MONO_ARCH_HAVE_IS_INT_OVERFLOW 1
+#ifndef HOST_WIN32
#define MONO_ARCH_ENABLE_MONO_LMF_VAR 1
+#endif
#define MONO_ARCH_HAVE_INVALIDATE_METHOD 1
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_IMT_REG AMD64_R10
guint64
mono_amd64_get_original_ip (void);
+gboolean
+mono_amd64_have_fast_tls (void);
+
GSList*
mono_amd64_get_exception_trampolines (gboolean aot);
return code;
}
+gboolean
+mono_arch_have_fast_tls (void)
+{
+ return FALSE;
+}
+
/*
* emit_save_lmf:
*
return emit_aotconst_full (NULL, (MonoJumpInfo**)ji, code, code_start, dreg, patch_type, data);
}
+gboolean
+mono_arch_have_fast_tls (void)
+{
+ return FALSE;
+}
+
static guint8*
emit_tls_get (guint8 *code, int dreg, int tls_offset)
{
{
}
+gboolean
+mono_arch_have_fast_tls (void)
+{
+ return FALSE;
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
mono_os_mutex_destroy (&mini_arch_mutex);
}
+gboolean
+mono_arch_have_fast_tls (void)
+{
+ return FALSE;
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
mono_os_mutex_destroy (&mini_arch_mutex);
}
+gboolean
+mono_arch_have_fast_tls (void)
+{
+ return FALSE;
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
/*========================= End of Function ========================*/
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_have_fast_tls */
+/* */
+/* Function - Returns whether we use fast inlined thread local */
+/* storage managed access, instead of falling back */
+/* to native code. */
+/* */
+/*------------------------------------------------------------------*/
+
+gboolean
+mono_arch_have_fast_tls (void)
+{
+ return FALSE;
+}
+
+/*========================= End of Function ========================*/
+
/*------------------------------------------------------------------*/
/* */
/* Name - mono_arch_cpu_optimizations */
{
}
+gboolean
+mono_arch_have_fast_tls (void)
+{
+ return FALSE;
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
#endif
gboolean
-mono_x86_have_tls_get (void)
+mono_arch_have_fast_tls (void)
{
#ifdef TARGET_MACH
static gboolean have_tls_get = FALSE;
int mono_arch_translate_tls_offset (int offset);
gboolean mono_arch_opcode_supported (int opcode);
void mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func);
+gboolean mono_arch_have_fast_tls (void);
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
gboolean mono_arch_is_soft_float (void);
#include "mono-tls.h"
+/*
+ * On all platforms we should be able to use either __thread or pthread/TlsGetValue.
+ * Certain platforms will support fast tls only when using one of the thread local
+ * storage backends. By default this is __thread if we have HAVE_KW_THREAD defined.
+ *
+ * By default all platforms will call into these native getters whenever they need
+ * to get a tls value. On certain platforms we can try to be faster than this and
+ * avoid the call. We call this fast tls and each platform defines its own way to
+ * achieve this. For this, a platform has to define MONO_ARCH_HAVE_INLINED_TLS,
+ * and provide alternative getters/setters for a MonoTlsKey. In order to have fast
+ * getter/setters, the platform has to declare a way to fetch an internal offset
+ * (MONO_THREAD_VAR_OFFSET) which is stored here, and in the arch specific file
+ * probe the system to see if we can use the offset initialized here. If these
+ * run-time checks don't succeed we just use the fallbacks.
+ *
+ * In case we would wish to provide fast inlined tls for aot code, we would need
+ * to be sure that, at run-time, these two platform checks would never fail
+ * otherwise the tls getter/setters that we emitted would not work. Normally,
+ * there is little incentive to support this since tls access is most common in
+ * wrappers and managed allocators, both of which are not aot-ed by default.
+ * So far, we never supported inlined fast tls on full-aot systems.
+ */
#ifdef HAVE_KW_THREAD
#define USE_KW_THREAD
#endif
-/* Tls variables for each MonoTlsKey */
#ifdef USE_KW_THREAD
-static __thread gpointer mono_tls_thread;
-static __thread gpointer mono_tls_jit_tls;
-static __thread gpointer mono_tls_domain;
-static __thread gpointer mono_tls_lmf;
-static __thread gpointer mono_tls_sgen_thread_info;
-static __thread gpointer mono_tls_lmf_addr;
+
+/* tls attribute */
+#if HAVE_TLS_MODEL_ATTR
+
+#if defined(__PIC__) && !defined(PIC)
+/*
+ * Must be compiling -fPIE, for executables. Build PIC
+ * but with initial-exec.
+ * http://bugs.gentoo.org/show_bug.cgi?id=165547
+ */
+#define PIC
+#define PIC_INITIAL_EXEC
+#endif
+
+/*
+ * Define this if you want a faster libmono, which cannot be loaded dynamically as a
+ * module.
+ */
+//#define PIC_INITIAL_EXEC
+
+#if defined(PIC)
+
+#ifdef PIC_INITIAL_EXEC
+#define MONO_TLS_FAST __attribute__((tls_model("initial-exec")))
+#else
+#if defined (__powerpc__)
+/* local dynamic requires a call to __tls_get_addr to look up the
+ TLS block address via the Dynamic Thread Vector. In this case Thread
+ Pointer relative offsets can't be used as this modules TLS was
+ allocated separately (none contiguoiusly) from the initial TLS
+ block.
+
+ For now we will disable this. */
+#define MONO_TLS_FAST
+#else
+#define MONO_TLS_FAST __attribute__((tls_model("local-dynamic")))
+#endif
+#endif
+
+#else
+
+#define MONO_TLS_FAST __attribute__((tls_model("local-exec")))
+
+#endif
+
+#else
+#define MONO_TLS_FAST
+#endif
+
+/* Runtime offset detection */
+#if defined(TARGET_AMD64) && !defined(TARGET_MACH) && !defined(HOST_WIN32) /* __thread likely not tested on mac/win */
+
+#if defined(PIC)
+// This only works if libmono is linked into the application
+#define MONO_THREAD_VAR_OFFSET(var,offset) do { guint64 foo; __asm ("movq " #var "@GOTTPOFF(%%rip), %0" : "=r" (foo)); offset = foo; } while (0)
#else
+#define MONO_THREAD_VAR_OFFSET(var,offset) do { guint64 foo; __asm ("movq $" #var "@TPOFF, %0" : "=r" (foo)); offset = foo; } while (0)
+#endif
+
+#else
+
+#define MONO_THREAD_VAR_OFFSET(var,offset) (offset) = -1
+
+#endif
+
+/* Tls variables for each MonoTlsKey */
+
+static __thread gpointer mono_tls_thread MONO_TLS_FAST;
+static __thread gpointer mono_tls_jit_tls MONO_TLS_FAST;
+static __thread gpointer mono_tls_domain MONO_TLS_FAST;
+static __thread gpointer mono_tls_lmf MONO_TLS_FAST;
+static __thread gpointer mono_tls_sgen_thread_info MONO_TLS_FAST;
+static __thread gpointer mono_tls_lmf_addr MONO_TLS_FAST;
+
+#else
+
+#if defined(TARGET_AMD64) && (defined(TARGET_MACH) || defined(HOST_WIN32))
+#define MONO_THREAD_VAR_OFFSET(key,offset) (offset) = (gint32)key
+#else
+#define MONO_THREAD_VAR_OFFSET(var,offset) (offset) = -1
+#endif
+
static MonoNativeTlsKey mono_tls_key_thread;
static MonoNativeTlsKey mono_tls_key_jit_tls;
static MonoNativeTlsKey mono_tls_key_domain;
static MonoNativeTlsKey mono_tls_key_lmf;
static MonoNativeTlsKey mono_tls_key_sgen_thread_info;
static MonoNativeTlsKey mono_tls_key_lmf_addr;
+
#endif
+static gint32 tls_offsets [TLS_KEY_NUM];
+
#ifdef USE_KW_THREAD
#define MONO_TLS_GET_VALUE(tls_var,tls_key) (tls_var)
#define MONO_TLS_SET_VALUE(tls_var,tls_key,value) (tls_var = value)
void
mono_tls_init_gc_keys (void)
{
-#ifndef USE_KW_THREAD
+#ifdef USE_KW_THREAD
+ MONO_THREAD_VAR_OFFSET (mono_tls_sgen_thread_info, tls_offsets [TLS_KEY_SGEN_THREAD_INFO]);
+#else
mono_native_tls_alloc (&mono_tls_key_sgen_thread_info, NULL);
+ MONO_THREAD_VAR_OFFSET (mono_tls_key_sgen_thread_info, tls_offsets [TLS_KEY_SGEN_THREAD_INFO]);
#endif
}
void
mono_tls_init_runtime_keys (void)
{
-#ifndef USE_KW_THREAD
+#ifdef USE_KW_THREAD
+ MONO_THREAD_VAR_OFFSET (mono_tls_thread, tls_offsets [TLS_KEY_THREAD]);
+ MONO_THREAD_VAR_OFFSET (mono_tls_jit_tls, tls_offsets [TLS_KEY_JIT_TLS]);
+ MONO_THREAD_VAR_OFFSET (mono_tls_domain, tls_offsets [TLS_KEY_DOMAIN]);
+ MONO_THREAD_VAR_OFFSET (mono_tls_lmf, tls_offsets [TLS_KEY_LMF]);
+ MONO_THREAD_VAR_OFFSET (mono_tls_lmf_addr, tls_offsets [TLS_KEY_LMF_ADDR]);
+#else
mono_native_tls_alloc (&mono_tls_key_thread, NULL);
+ MONO_THREAD_VAR_OFFSET (mono_tls_key_thread, tls_offsets [TLS_KEY_THREAD]);
mono_native_tls_alloc (&mono_tls_key_jit_tls, NULL);
+ MONO_THREAD_VAR_OFFSET (mono_tls_key_jit_tls, tls_offsets [TLS_KEY_JIT_TLS]);
mono_native_tls_alloc (&mono_tls_key_domain, NULL);
+ MONO_THREAD_VAR_OFFSET (mono_tls_key_domain, tls_offsets [TLS_KEY_DOMAIN]);
mono_native_tls_alloc (&mono_tls_key_lmf, NULL);
+ MONO_THREAD_VAR_OFFSET (mono_tls_key_lmf, tls_offsets [TLS_KEY_LMF]);
mono_native_tls_alloc (&mono_tls_key_lmf_addr, NULL);
+ MONO_THREAD_VAR_OFFSET (mono_tls_key_lmf_addr, tls_offsets [TLS_KEY_LMF_ADDR]);
#endif
}
#endif
}
+
+/*
+ * Gets the tls offset associated with the key. This offset is set at key
+ * initialization (at runtime). Certain targets can implement computing
+ * this offset and using it at runtime for fast inlined tls access.
+ */
+gint32
+mono_tls_get_tls_offset (MonoTlsKey key)
+{
+ g_assert (tls_offsets [key]);
+ return tls_offsets [key];
+}
+
/*
* Returns the getter (gpointer (*)(void)) for the mono tls key.
* Managed code will always get the value by calling this getter.
void mono_tls_init_gc_keys (void);
void mono_tls_init_runtime_keys (void);
void mono_tls_free_keys (void);
+gint32 mono_tls_get_tls_offset (MonoTlsKey key);
gpointer mono_tls_get_tls_getter (MonoTlsKey key, gboolean name);
gpointer mono_tls_get_tls_setter (MonoTlsKey key, gboolean name);
gpointer mono_tls_get_tls_addr (MonoTlsKey key);