{
ThrowIfDisposedAndClosed ();
- if (optionLevel == SocketOptionLevel.Socket && optionName == SocketOptionName.ReuseAddress && optionValue != 0 && !SupportsPortReuse (protocolType))
- throw new SocketException ((int) SocketError.OperationNotSupported, "Operating system sockets do not support ReuseAddress.\nIf your socket is not intended to bind to the same address and port multiple times remove this option, otherwise you should ignore this exception inside a try catch and check that ReuseAddress is true before binding to the same address and port multiple times.");
-
int error;
SetSocketOption_internal (m_Handle, optionLevel, optionName, null, null, optionValue, out error);
unresolved_algorithms.Add (nameECDsa_2, defaultECDsa);
unresolved_algorithms.Add (nameECDsa_3, defaultECDsa);
+#if MONODROID
+ algorithms.Add (nameSHA1Cng, defaultSHA1);
+ algorithms.Add (nameSHA256Cng, defaultSHA256);
+ algorithms.Add (nameSHA256Provider, defaultSHA256);
+ algorithms.Add (nameSHA384Cng, defaultSHA384);
+ algorithms.Add (nameSHA384Provider, defaultSHA384);
+ algorithms.Add (nameSHA512Cng, defaultSHA512);
+ algorithms.Add (nameSHA512Provider, defaultSHA512);
+#else
unresolved_algorithms.Add (nameSHA1Cng, defaultSHA1Cng);
unresolved_algorithms.Add (nameSHA256Cng, defaultSHA256Cng);
unresolved_algorithms.Add (nameSHA256Provider, defaultSHA256Provider);
unresolved_algorithms.Add (nameSHA384Provider, defaultSHA384Provider);
unresolved_algorithms.Add (nameSHA512Cng, defaultSHA512Cng);
unresolved_algorithms.Add (nameSHA512Provider, defaultSHA512Provider);
+#endif
Dictionary<string,string> oid = new Dictionary<string, string> (StringComparer.OrdinalIgnoreCase);
// comments here are to match with MS implementation (but not with doc)
public void RSASignatureDescription ()
{
// TODO: this would be cleaner with NUnit TestCase'es but they're NUnit 2.5+ :(
-#if FULL_AOT_RUNTIME || MONOTOUCH
+#if FULL_AOT_RUNTIME || MONOTOUCH || MONODROID
RSASignatureDescriptionCore ("http://www.w3.org/2000/09/xmldsig#rsa-sha1", "System.Security.Cryptography.SHA1Cng", "System.Security.Cryptography.SHA1CryptoServiceProvider");
RSASignatureDescriptionCore ("http://www.w3.org/2001/04/xmldsig-more#rsa-sha256", "System.Security.Cryptography.SHA256Cng", "System.Security.Cryptography.SHA256Managed");
RSASignatureDescriptionCore ("http://www.w3.org/2001/04/xmldsig-more#rsa-sha384", "System.Security.Cryptography.SHA384Cng", "System.Security.Cryptography.SHA384Managed");
}
}
-}
\ No newline at end of file
+}
<Compile Include="..\referencesource\mscorlib\system\security\util\parser.cs" />\r
<Compile Include="..\referencesource\mscorlib\system\security\util\tokenizer.cs" />\r
<Compile Include="..\referencesource\mscorlib\system\serializableattribute.cs" />\r
+ <Compile Include="..\referencesource\mscorlib\system\sharedstatics.cs" />\r
<Compile Include="..\referencesource\mscorlib\system\single.cs" />\r
<Compile Include="..\referencesource\mscorlib\system\stackoverflowexception.cs" />\r
<Compile Include="..\referencesource\mscorlib\system\string.cs" />\r
<Compile Include="ReferenceSources\RuntimeHandles.cs" />\r
<Compile Include="ReferenceSources\RuntimeType.cs" />\r
<Compile Include="ReferenceSources\SecurityContext.cs" />\r
- <Compile Include="ReferenceSources\SharedStatics.cs" />\r
<Compile Include="ReferenceSources\String.cs" />\r
<Compile Include="ReferenceSources\TextInfo.cs" />\r
<Compile Include="ReferenceSources\Type.cs" />\r
encode_method_ref (acfg, patch_info->data.virt_method->method, p, &p);
break;
case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG:
+ case MONO_PATCH_INFO_GET_TLS_TRAMP:
break;
default:
g_warning ("unable to handle jump info %d", patch_info->type);
get_got_offset (acfg, FALSE, ji);
get_got_offset (acfg, TRUE, ji);
+ ji = (MonoJumpInfo *)mono_mempool_alloc0 (acfg->mempool, sizeof (MonoJumpInfo));
+ ji->type = MONO_PATCH_INFO_GET_TLS_TRAMP;
+ get_got_offset (acfg, FALSE, ji);
+ get_got_offset (acfg, TRUE, ji);
+
for (i = 0; i < sizeof (preinited_jit_icalls) / sizeof (char*); ++i) {
ji = (MonoJumpInfo *)mono_mempool_alloc0 (acfg->mempool, sizeof (MonoAotCompile));
ji->type = MONO_PATCH_INFO_INTERNAL_METHOD;
break;
}
case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG:
+ case MONO_PATCH_INFO_GET_TLS_TRAMP:
break;
case MONO_PATCH_INFO_AOT_JIT_INFO:
ji->data.index = decode_value (p, &p);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
- } else if (cfg->compile_aot) {
+ } else {
int const_reg = alloc_preg (cfg);
int type_reg = alloc_preg (cfg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
- } else {
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
- MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
static int i8_align;
static gpointer single_step_tramp, breakpoint_tramp;
+static gpointer get_tls_tramp;
/*
* The code generated for sequence points reads from this location, which is
static void mono_arch_compute_omit_fp (MonoCompile *cfg);
#endif
+static guint8*
+emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data);
+
const char*
mono_arch_regname (int reg)
{
{
#ifdef HAVE_FAST_TLS
code = mono_arm_emit_load_imm (code, ARMREG_R0, tls_offset);
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
- "mono_get_tls_key");
- code = emit_call_seq (cfg, code);
+ if (cfg->compile_aot) {
+ /*
+ * This opcode is generated by CEE_MONO_JIT_ATTACH, so it can execute on
+ * threads which are not yet attached to the runtime. This means we can't
+ * call it directly, since the call would go through the trampoline code
+ * which assumes the thread is attached. So use a separate patch info type
+ * for it, and load it from a preinitialized GOT slot.
+ */
+ code = emit_aotconst (cfg, code, ARMREG_R1, MONO_PATCH_INFO_GET_TLS_TRAMP, NULL);
+ code = emit_call_reg (code, ARMREG_R1);
+ } else {
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ "mono_get_tls_key");
+ code = emit_call_seq (cfg, code);
+ }
if (dreg != ARMREG_R0)
ARM_MOV_REG_REG (code, dreg, ARMREG_R0);
#else
#ifdef HAVE_FAST_TLS
if (tls_offset_reg != ARMREG_R0)
ARM_MOV_REG_REG (code, ARMREG_R0, tls_offset_reg);
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
- "mono_get_tls_key");
- code = emit_call_seq (cfg, code);
+ if (cfg->compile_aot) {
+ code = emit_aotconst (cfg, code, ARMREG_R1, MONO_PATCH_INFO_GET_TLS_TRAMP, NULL);
+ code = emit_call_reg (code, ARMREG_R1);
+ } else {
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ "mono_get_tls_key");
+ code = emit_call_seq (cfg, code);
+ }
if (dreg != ARMREG_R0)
ARM_MOV_REG_REG (code, dreg, ARMREG_R0);
#else
mono_register_jit_icall (tls_imp.get_tls_thunk, "mono_get_tls_key", mono_create_icall_signature ("ptr ptr"), TRUE);
mono_register_jit_icall (tls_imp.set_tls_thunk, "mono_set_tls_key", mono_create_icall_signature ("void ptr ptr"), TRUE);
+ get_tls_tramp = tls_imp.get_tls_thunk;
+
if (tls_imp.get_tls_thunk_end) {
mono_tramp_info_register (
mono_tramp_info_create (
{
return get_call_info (mp, sig);
}
+
+gpointer
+mono_arch_get_get_tls_tramp (void)
+{
+ return get_tls_tramp;
+}
+
+static guint8*
+emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data)
+{
+ /* OP_AOTCONST */
+ mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
+ ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
+ ARM_B (code, 0);
+ *(gpointer*)code = NULL;
+ code += 4;
+ /* Load the value from the GOT */
+ ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
+ return code;
+}
#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#define MONO_ARCH_HAVE_PATCH_CODE_NEW 1
#define MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT 1
+#define MONO_ARCH_HAVE_GET_TLS_TRAMP 1
#define MONO_ARCH_HAVE_TLS_GET (mono_arm_have_tls_get ())
#define MONO_ARCH_HAVE_TLS_GET_REG 1
mono_thread_state_init_from_handle (MonoThreadUnwindState *tctx, MonoThreadInfo *info)
{
kern_return_t ret;
- mach_msg_type_number_t num_state;
- thread_state_t state;
+ mach_msg_type_number_t num_state, num_fpstate;
+ thread_state_t state, fpstate;
ucontext_t ctx;
mcontext_t mctx;
MonoJitTlsData *jit_tls;
tctx->unwind_data [MONO_UNWIND_DATA_JIT_TLS] = NULL;
state = (thread_state_t) alloca (mono_mach_arch_get_thread_state_size ());
+ fpstate = (thread_state_t) alloca (mono_mach_arch_get_thread_fpstate_size ());
mctx = (mcontext_t) alloca (mono_mach_arch_get_mcontext_size ());
do {
- ret = mono_mach_arch_get_thread_state (info->native_handle, state, &num_state);
+ ret = mono_mach_arch_get_thread_states (info->native_handle, state, &num_state, fpstate, &num_fpstate);
} while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS)
return FALSE;
- mono_mach_arch_thread_state_to_mcontext (state, mctx);
+ mono_mach_arch_thread_states_to_mcontext (state, fpstate, mctx);
ctx.uc_mcontext = mctx;
mono_sigctx_to_monoctx (&ctx, &tctx->ctx);
case MONO_PATCH_INFO_GOT_OFFSET:
case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG:
case MONO_PATCH_INFO_AOT_MODULE:
+ case MONO_PATCH_INFO_GET_TLS_TRAMP:
return (ji->type << 8);
case MONO_PATCH_INFO_CASTCLASS_CACHE:
return (ji->type << 8) | (ji->data.index);
case MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER:
target = mini_get_gsharedvt_wrapper (TRUE, NULL, patch_info->data.sig, NULL, -1, FALSE);
break;
+ case MONO_PATCH_INFO_GET_TLS_TRAMP:
+#ifdef MONO_ARCH_HAVE_GET_TLS_TRAMP
+ target = mono_arch_get_get_tls_tramp ();
+#else
+ target = NULL;
+#endif
+ break;
default:
g_assert_not_reached ();
}
gpointer mono_arch_create_monitor_enter_trampoline (MonoTrampInfo **info, gboolean is_v4, gboolean aot);
gpointer mono_arch_create_monitor_exit_trampoline (MonoTrampInfo **info, gboolean aot);
guint8 *mono_arch_create_llvm_native_thunk (MonoDomain *domain, guint8* addr) MONO_LLVM_INTERNAL;
+gpointer mono_arch_get_get_tls_tramp (void);
GList *mono_arch_get_allocatable_int_vars (MonoCompile *cfg);
GList *mono_arch_get_global_int_regs (MonoCompile *cfg);
GList *mono_arch_get_global_fp_regs (MonoCompile *cfg);
PATCH_INFO(GC_NURSERY_BITS, "gc_nursery_bits")
PATCH_INFO(GSHAREDVT_IN_WRAPPER, "gsharedvt_in_wrapper")
PATCH_INFO(ICALL_ADDR_CALL, "icall_addr_call")
+PATCH_INFO(GET_TLS_TRAMP, "get_tls_tramp")
guint8*
mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
{
- int tramp_size = 256;
+ int tramp_size = 512;
int i, framesize, ctx_offset, cfa_offset, gregs_offset;
guint8 *code, *buf;
GSList *unwind_ops = NULL;
}
void
-mono_mach_arch_thread_state_to_mcontext (thread_state_t state, void *context)
+mono_mach_arch_thread_states_to_mcontext (thread_state_t state, thread_state_t fpstate, void *context)
{
x86_thread_state64_t *arch_state = (x86_thread_state64_t *) state;
+ x86_float_state64_t *arch_fpstate = (x86_float_state64_t *) fpstate;
struct __darwin_mcontext64 *ctx = (struct __darwin_mcontext64 *) context;
-
ctx->__ss = *arch_state;
+ ctx->__fs = *arch_fpstate;
}
void
-mono_mach_arch_mcontext_to_thread_state (void *context, thread_state_t state)
+mono_mach_arch_mcontext_to_thread_states (void *context, thread_state_t state, thread_state_t fpstate)
{
x86_thread_state64_t *arch_state = (x86_thread_state64_t *) state;
+ x86_float_state64_t *arch_fpstate = (x86_float_state64_t *) fpstate;
struct __darwin_mcontext64 *ctx = (struct __darwin_mcontext64 *) context;
-
*arch_state = ctx->__ss;
+ *arch_fpstate = ctx->__fs;
}
void
-mono_mach_arch_thread_state_to_mono_context (thread_state_t state, MonoContext *context)
+mono_mach_arch_thread_states_to_mono_context (thread_state_t state, thread_state_t fpstate, MonoContext *context)
{
x86_thread_state64_t *arch_state = (x86_thread_state64_t *) state;
+ x86_float_state64_t *arch_fpstate = (x86_float_state64_t *) fpstate;
context->gregs [AMD64_RAX] = arch_state->__rax;
context->gregs [AMD64_RBX] = arch_state->__rbx;
context->gregs [AMD64_RCX] = arch_state->__rcx;
context->gregs [AMD64_R14] = arch_state->__r14;
context->gregs [AMD64_R15] = arch_state->__r15;
context->gregs [AMD64_RIP] = arch_state->__rip;
+ context->fregs [AMD64_XMM0] = arch_fpstate->__fpu_xmm0;
+ context->fregs [AMD64_XMM1] = arch_fpstate->__fpu_xmm1;
+ context->fregs [AMD64_XMM2] = arch_fpstate->__fpu_xmm2;
+ context->fregs [AMD64_XMM3] = arch_fpstate->__fpu_xmm3;
+ context->fregs [AMD64_XMM4] = arch_fpstate->__fpu_xmm4;
+ context->fregs [AMD64_XMM5] = arch_fpstate->__fpu_xmm5;
+ context->fregs [AMD64_XMM6] = arch_fpstate->__fpu_xmm6;
+ context->fregs [AMD64_XMM7] = arch_fpstate->__fpu_xmm7;
+ context->fregs [AMD64_XMM8] = arch_fpstate->__fpu_xmm8;
+ context->fregs [AMD64_XMM9] = arch_fpstate->__fpu_xmm9;
+ context->fregs [AMD64_XMM10] = arch_fpstate->__fpu_xmm10;
+ context->fregs [AMD64_XMM11] = arch_fpstate->__fpu_xmm11;
+ context->fregs [AMD64_XMM12] = arch_fpstate->__fpu_xmm12;
+ context->fregs [AMD64_XMM13] = arch_fpstate->__fpu_xmm13;
+ context->fregs [AMD64_XMM14] = arch_fpstate->__fpu_xmm14;
+ context->fregs [AMD64_XMM15] = arch_fpstate->__fpu_xmm15;
}
int
return sizeof (x86_thread_state64_t);
}
+int
+mono_mach_arch_get_thread_fpstate_size ()
+{
+ return sizeof (x86_float_state64_t);
+}
+
kern_return_t
-mono_mach_arch_get_thread_state (thread_port_t thread, thread_state_t state, mach_msg_type_number_t *count)
+mono_mach_arch_get_thread_states (thread_port_t thread, thread_state_t state, mach_msg_type_number_t *count, thread_state_t fpstate, mach_msg_type_number_t *fpcount)
{
- x86_thread_state64_t *arch_state = (x86_thread_state64_t *) state;
+ x86_thread_state64_t *arch_state = (x86_thread_state64_t *)state;
+ x86_float_state64_t *arch_fpstate = (x86_float_state64_t *)fpstate;
kern_return_t ret;
*count = x86_THREAD_STATE64_COUNT;
+ *fpcount = x86_FLOAT_STATE64_COUNT;
- ret = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t) arch_state, count);
+ ret = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)arch_state, count);
+ if (ret != KERN_SUCCESS)
+ return ret;
+ ret = thread_get_state (thread, x86_FLOAT_STATE64, (thread_state_t)arch_fpstate, fpcount);
return ret;
}
kern_return_t
-mono_mach_arch_set_thread_state (thread_port_t thread, thread_state_t state, mach_msg_type_number_t count)
+mono_mach_arch_set_thread_states (thread_port_t thread, thread_state_t state, mach_msg_type_number_t count, thread_state_t fpstate, mach_msg_type_number_t fpcount)
{
- return thread_set_state (thread, x86_THREAD_STATE64, state, count);
+ kern_return_t ret;
+ ret = thread_set_state (thread, x86_THREAD_STATE64, state, count);
+ if (ret != KERN_SUCCESS)
+ return ret;
+ ret = thread_set_state (thread, x86_FLOAT_STATE64, fpstate, fpcount);
+ return ret;
}
void *
#include "utils/mono-sigcontext.h"
#include "mach-support.h"
+// For reg numbers
+#include <mono/arch/amd64/amd64-codegen.h>
+
/* Known offsets used for TLS storage*/
/* All OSX versions up to 10.8 */
}
void
-mono_mach_arch_thread_state_to_mcontext (thread_state_t state, void *context)
+mono_mach_arch_thread_states_to_mcontext (thread_state_t state, thread_state_t fpstate, void *context)
{
x86_thread_state32_t *arch_state = (x86_thread_state32_t *) state;
+ x86_float_state32_t *arch_fpstate = (x86_float_state32_t *) fpstate;
struct __darwin_mcontext32 *ctx = (struct __darwin_mcontext32 *) context;
-
ctx->__ss = *arch_state;
+ ctx->__fs = *arch_fpstate;
}
void
-mono_mach_arch_mcontext_to_thread_state (void *context, thread_state_t state)
+mono_mach_arch_mcontext_to_thread_states (void *context, thread_state_t state, thread_state_t fpstate)
{
x86_thread_state32_t *arch_state = (x86_thread_state32_t *) state;
+ x86_float_state32_t *arch_fpstate = (x86_float_state32_t *) fpstate;
struct __darwin_mcontext32 *ctx = (struct __darwin_mcontext32 *) context;
-
*arch_state = ctx->__ss;
+ *arch_fpstate = ctx->__fs;
}
void
-mono_mach_arch_thread_state_to_mono_context (thread_state_t state, MonoContext *context)
+mono_mach_arch_thread_states_to_mono_context (thread_state_t state, thread_state_t fpstate, MonoContext *context)
{
x86_thread_state32_t *arch_state = (x86_thread_state32_t *) state;
+ x86_float_state32_t *arch_fpstate = (x86_float_state32_t *) state;
context->eax = arch_state->__eax;
context->ebx = arch_state->__ebx;
context->ecx = arch_state->__ecx;
context->esi = arch_state->__edi;
context->edi = arch_state->__esi;
context->eip = arch_state->__eip;
+ context->fregs [X86_XMM0] = arch_fpstate->__fpu_xmm0;
+ context->fregs [X86_XMM1] = arch_fpstate->__fpu_xmm1;
+ context->fregs [X86_XMM2] = arch_fpstate->__fpu_xmm2;
+ context->fregs [X86_XMM3] = arch_fpstate->__fpu_xmm3;
+ context->fregs [X86_XMM4] = arch_fpstate->__fpu_xmm4;
+ context->fregs [X86_XMM5] = arch_fpstate->__fpu_xmm5;
+ context->fregs [X86_XMM6] = arch_fpstate->__fpu_xmm6;
+ context->fregs [X86_XMM7] = arch_fpstate->__fpu_xmm7;
}
-
int
mono_mach_arch_get_thread_state_size ()
{
return sizeof (x86_thread_state32_t);
}
+int
+mono_mach_arch_get_thread_fpstate_size ()
+{
+ return sizeof (x86_float_state32_t);
+}
+
kern_return_t
-mono_mach_arch_get_thread_state (thread_port_t thread, thread_state_t state, mach_msg_type_number_t *count)
+mono_mach_arch_get_thread_states (thread_port_t thread, thread_state_t state, mach_msg_type_number_t *count, thread_state_t fpstate, mach_msg_type_number_t *fpcount)
{
#if defined(HOST_WATCHOS)
g_error ("thread_get_state() is not supported by this platform");
#else
x86_thread_state32_t *arch_state = (x86_thread_state32_t *) state;
+ x86_float_state32_t *arch_fpstate = (x86_float_state32_t *) fpstate;
kern_return_t ret;
*count = x86_THREAD_STATE32_COUNT;
- ret = thread_get_state (thread, x86_THREAD_STATE32, (thread_state_t) arch_state, count);
+ *fpcount = x86_FLOAT_STATE32_COUNT;
+
+ ret = thread_get_state (thread, x86_THREAD_STATE32, (thread_state_t)arch_state, count);
+ if (ret != KERN_SUCCESS)
+ return ret;
+ ret = thread_get_state (thread, x86_FLOAT_STATE32, (thread_state_t)arch_fpstate, fpcount);
return ret;
#endif
}
kern_return_t
-mono_mach_arch_set_thread_state (thread_port_t thread, thread_state_t state, mach_msg_type_number_t count)
+mono_mach_arch_set_thread_states (thread_port_t thread, thread_state_t state, mach_msg_type_number_t count, thread_state_t fpstate, mach_msg_type_number_t fpcount)
{
#if defined(HOST_WATCHOS)
g_error ("thread_set_state() is not supported by this platform");
#else
- return thread_set_state (thread, x86_THREAD_STATE32, state, count);
+ kern_return_t ret;
+ ret = thread_set_state (thread, x86_THREAD_STATE32, state, count);
+ if (ret != KERN_SUCCESS)
+ return ret;
+ ret = thread_set_state (thread, x86_FLOAT_STATE32, fpstate, fpcount);
+ return ret;
#endif
}
void mono_mach_init (pthread_key_t key);
int mono_mach_arch_get_mcontext_size (void);
-void mono_mach_arch_thread_state_to_mcontext (thread_state_t state, void *context);
-void mono_mach_arch_mcontext_to_thread_state (void *context, thread_state_t state);
-void mono_mach_arch_thread_state_to_mono_context (thread_state_t state, MonoContext *context);
+void mono_mach_arch_thread_states_to_mcontext (thread_state_t state, thread_state_t fpstate, void *context);
+void mono_mach_arch_mcontext_to_thread_states (void *context, thread_state_t state, thread_state_t fpstate);
+void mono_mach_arch_thread_states_to_mono_context (thread_state_t state, thread_state_t fpstate, MonoContext *context);
+/* FIXME: Should return size_t, not int. */
int mono_mach_arch_get_thread_state_size (void);
+int mono_mach_arch_get_thread_fpstate_size (void);
kern_return_t mono_mach_get_threads (thread_act_array_t *threads, guint32 *count);
kern_return_t mono_mach_free_threads (thread_act_array_t threads, guint32 count);
-kern_return_t mono_mach_arch_get_thread_state (thread_port_t thread, thread_state_t state, mach_msg_type_number_t *count);
-kern_return_t mono_mach_arch_set_thread_state (thread_port_t thread, thread_state_t state, mach_msg_type_number_t count);
+kern_return_t mono_mach_arch_get_thread_states (thread_port_t thread, thread_state_t state, mach_msg_type_number_t *count, thread_state_t fpstate, mach_msg_type_number_t *fpcount);
+kern_return_t mono_mach_arch_set_thread_states (thread_port_t thread, thread_state_t state, mach_msg_type_number_t count, thread_state_t fpstate, mach_msg_type_number_t fpcount);
void *mono_mach_arch_get_tls_value_from_thread (pthread_t thread, guint32 key);
void *mono_mach_get_tls_address_from_thread (pthread_t thread, pthread_key_t key);
mctx->gregs [AMD64_R14] = UCONTEXT_REG_R14 (ctx);
mctx->gregs [AMD64_R15] = UCONTEXT_REG_R15 (ctx);
mctx->gregs [AMD64_RIP] = UCONTEXT_REG_RIP (ctx);
+
+#ifdef UCONTEXT_REG_XMM
+ mctx->fregs [0] = UCONTEXT_REG_XMM0 (ctx);
+ mctx->fregs [1] = UCONTEXT_REG_XMM1 (ctx);
+ mctx->fregs [2] = UCONTEXT_REG_XMM2 (ctx);
+ mctx->fregs [3] = UCONTEXT_REG_XMM3 (ctx);
+ mctx->fregs [4] = UCONTEXT_REG_XMM4 (ctx);
+ mctx->fregs [5] = UCONTEXT_REG_XMM5 (ctx);
+ mctx->fregs [6] = UCONTEXT_REG_XMM6 (ctx);
+ mctx->fregs [7] = UCONTEXT_REG_XMM7 (ctx);
+ mctx->fregs [8] = UCONTEXT_REG_XMM8 (ctx);
+ mctx->fregs [9] = UCONTEXT_REG_XMM9 (ctx);
+ mctx->fregs [10] = UCONTEXT_REG_XMM10 (ctx);
+ mctx->fregs [11] = UCONTEXT_REG_XMM11 (ctx);
+ mctx->fregs [12] = UCONTEXT_REG_XMM12 (ctx);
+ mctx->fregs [13] = UCONTEXT_REG_XMM13 (ctx);
+ mctx->fregs [14] = UCONTEXT_REG_XMM14 (ctx);
+ mctx->fregs [15] = UCONTEXT_REG_XMM15 (ctx);
+#endif
+
#elif defined(HOST_WIN32)
CONTEXT *context = (CONTEXT*)sigctx;
UCONTEXT_REG_R14 (ctx) = mctx->gregs [AMD64_R14];
UCONTEXT_REG_R15 (ctx) = mctx->gregs [AMD64_R15];
UCONTEXT_REG_RIP (ctx) = mctx->gregs [AMD64_RIP];
+
+#ifdef UCONTEXT_REG_XMM
+ UCONTEXT_REG_XMM0 (ctx) = mctx->fregs [0];
+ UCONTEXT_REG_XMM1 (ctx) = mctx->fregs [1];
+ UCONTEXT_REG_XMM2 (ctx) = mctx->fregs [2];
+ UCONTEXT_REG_XMM3 (ctx) = mctx->fregs [3];
+ UCONTEXT_REG_XMM4 (ctx) = mctx->fregs [4];
+ UCONTEXT_REG_XMM5 (ctx) = mctx->fregs [5];
+ UCONTEXT_REG_XMM6 (ctx) = mctx->fregs [6];
+ UCONTEXT_REG_XMM7 (ctx) = mctx->fregs [7];
+ UCONTEXT_REG_XMM8 (ctx) = mctx->fregs [8];
+ UCONTEXT_REG_XMM9 (ctx) = mctx->fregs [9];
+ UCONTEXT_REG_XMM10 (ctx) = mctx->fregs [10];
+ UCONTEXT_REG_XMM11 (ctx) = mctx->fregs [11];
+ UCONTEXT_REG_XMM12 (ctx) = mctx->fregs [12];
+ UCONTEXT_REG_XMM13 (ctx) = mctx->fregs [13];
+ UCONTEXT_REG_XMM14 (ctx) = mctx->fregs [14];
+ UCONTEXT_REG_XMM15 (ctx) = mctx->fregs [15];
+#endif
+
#elif defined(HOST_WIN32)
CONTEXT *context = (CONTEXT*)sigctx;
#include <signal.h>
#endif
+#define MONO_CONTEXT_OFFSET(field, index, field_type) \
+ "i" (offsetof (MonoContext, field) + (index) * sizeof (field_type))
+
+#if defined(__APPLE__)
+typedef struct __darwin_xmm_reg MonoContextSimdReg;
+#endif
+
/*
* General notes about mono-context.
* Each arch defines a MonoContext struct with all GPR regs + IP/PC.
# define SC_ESI esi
#endif
+#include <mono/arch/x86/x86-codegen.h>
+
typedef struct {
mgreg_t eax;
mgreg_t ebx;
mgreg_t esi;
mgreg_t edi;
mgreg_t eip;
+#ifdef __APPLE__
+ MonoContextSimdReg fregs [X86_XMM_NREG];
+#endif
} MonoContext;
#define MONO_CONTEXT_SET_IP(ctx,ip) do { (ctx)->eip = (mgreg_t)(ip); } while (0);
#else
#define MONO_CONTEXT_GET_CURRENT(ctx) \
__asm__ __volatile__( \
- "movl $0x0, 0x00(%0)\n" \
- "mov %%ebx, 0x04(%0)\n" \
- "mov %%ecx, 0x08(%0)\n" \
- "mov %%edx, 0x0c(%0)\n" \
- "mov %%ebp, 0x10(%0)\n" \
- "mov %%esp, 0x14(%0)\n" \
- "mov %%esi, 0x18(%0)\n" \
- "mov %%edi, 0x1c(%0)\n" \
+ "movl $0x0, %c[eax](%0)\n" \
+ "mov %%ebx, %c[ebx](%0)\n" \
+ "mov %%ecx, %c[ecx](%0)\n" \
+ "mov %%edx, %c[edx](%0)\n" \
+ "mov %%ebp, %c[ebp](%0)\n" \
+ "mov %%esp, %c[esp](%0)\n" \
+ "mov %%esi, %c[esi](%0)\n" \
+ "mov %%edi, %c[edi](%0)\n" \
"call 1f\n" \
"1: pop 0x20(%0)\n" \
: \
- : "a" (&(ctx)) \
+ : "a" (&(ctx)), \
+ [eax] MONO_CONTEXT_OFFSET (eax, 0, mgreg_t), \
+ [ebx] MONO_CONTEXT_OFFSET (ebx, 0, mgreg_t), \
+ [ecx] MONO_CONTEXT_OFFSET (ecx, 0, mgreg_t), \
+ [edx] MONO_CONTEXT_OFFSET (edx, 0, mgreg_t), \
+ [ebp] MONO_CONTEXT_OFFSET (ebp, 0, mgreg_t), \
+ [esp] MONO_CONTEXT_OFFSET (esp, 0, mgreg_t), \
+ [esi] MONO_CONTEXT_OFFSET (esi, 0, mgreg_t), \
+ [edi] MONO_CONTEXT_OFFSET (edi, 0, mgreg_t) \
: "memory")
#endif
typedef struct {
mgreg_t gregs [AMD64_NREG];
+#ifdef __APPLE__
+ MonoContextSimdReg fregs [AMD64_XMM_NREG];
+#else
double fregs [AMD64_XMM_NREG];
+#endif
} MonoContext;
#define MONO_CONTEXT_SET_IP(ctx,ip) do { (ctx)->gregs [AMD64_RIP] = (mgreg_t)(ip); } while (0);
: "rdx", "memory")
#else
-#define MONO_CONTEXT_GET_CURRENT(ctx) \
- __asm__ __volatile__( \
- "movq $0x0, 0x00(%0)\n" \
- "movq %%rcx, 0x08(%0)\n" \
- "movq %%rdx, 0x10(%0)\n" \
- "movq %%rbx, 0x18(%0)\n" \
- "movq %%rsp, 0x20(%0)\n" \
- "movq %%rbp, 0x28(%0)\n" \
- "movq %%rsi, 0x30(%0)\n" \
- "movq %%rdi, 0x38(%0)\n" \
- "movq %%r8, 0x40(%0)\n" \
- "movq %%r9, 0x48(%0)\n" \
- "movq %%r10, 0x50(%0)\n" \
- "movq %%r11, 0x58(%0)\n" \
- "movq %%r12, 0x60(%0)\n" \
- "movq %%r13, 0x68(%0)\n" \
- "movq %%r14, 0x70(%0)\n" \
- "movq %%r15, 0x78(%0)\n" \
- /* "leaq (%%rip), %%rdx\n" is not understood by icc */ \
- ".byte 0x48, 0x8d, 0x15, 0x00, 0x00, 0x00, 0x00\n" \
- "movq %%rdx, 0x80(%0)\n" \
- : \
- : "a" (&(ctx)) \
- : "rdx", "memory")
+#define MONO_CONTEXT_GET_CURRENT_GREGS(ctx) \
+ do { \
+ __asm__ __volatile__( \
+ "movq $0x0, %c[rax](%0)\n" \
+ "movq %%rcx, %c[rcx](%0)\n" \
+ "movq %%rdx, %c[rdx](%0)\n" \
+ "movq %%rbx, %c[rbx](%0)\n" \
+ "movq %%rsp, %c[rsp](%0)\n" \
+ "movq %%rbp, %c[rbp](%0)\n" \
+ "movq %%rsi, %c[rsi](%0)\n" \
+ "movq %%rdi, %c[rdi](%0)\n" \
+ "movq %%r8, %c[r8](%0)\n" \
+ "movq %%r9, %c[r9](%0)\n" \
+ "movq %%r10, %c[r10](%0)\n" \
+ "movq %%r11, %c[r11](%0)\n" \
+ "movq %%r12, %c[r12](%0)\n" \
+ "movq %%r13, %c[r13](%0)\n" \
+ "movq %%r14, %c[r14](%0)\n" \
+ "movq %%r15, %c[r15](%0)\n" \
+ /* "leaq (%%rip), %%rdx\n" is not understood by icc */ \
+ ".byte 0x48, 0x8d, 0x15, 0x00, 0x00, 0x00, 0x00\n" \
+ "movq %%rdx, %c[rip](%0)\n" \
+ : \
+ : "a" (&(ctx)), \
+ [rax] MONO_CONTEXT_OFFSET (gregs, AMD64_RAX, mgreg_t), \
+ [rcx] MONO_CONTEXT_OFFSET (gregs, AMD64_RCX, mgreg_t), \
+ [rdx] MONO_CONTEXT_OFFSET (gregs, AMD64_RDX, mgreg_t), \
+ [rbx] MONO_CONTEXT_OFFSET (gregs, AMD64_RBX, mgreg_t), \
+ [rsp] MONO_CONTEXT_OFFSET (gregs, AMD64_RSP, mgreg_t), \
+ [rbp] MONO_CONTEXT_OFFSET (gregs, AMD64_RBP, mgreg_t), \
+ [rsi] MONO_CONTEXT_OFFSET (gregs, AMD64_RSI, mgreg_t), \
+ [rdi] MONO_CONTEXT_OFFSET (gregs, AMD64_RDI, mgreg_t), \
+ [r8] MONO_CONTEXT_OFFSET (gregs, AMD64_R8, mgreg_t), \
+ [r9] MONO_CONTEXT_OFFSET (gregs, AMD64_R9, mgreg_t), \
+ [r10] MONO_CONTEXT_OFFSET (gregs, AMD64_R10, mgreg_t), \
+ [r11] MONO_CONTEXT_OFFSET (gregs, AMD64_R11, mgreg_t), \
+ [r12] MONO_CONTEXT_OFFSET (gregs, AMD64_R12, mgreg_t), \
+ [r13] MONO_CONTEXT_OFFSET (gregs, AMD64_R13, mgreg_t), \
+ [r14] MONO_CONTEXT_OFFSET (gregs, AMD64_R14, mgreg_t), \
+ [r15] MONO_CONTEXT_OFFSET (gregs, AMD64_R15, mgreg_t), \
+ [rip] MONO_CONTEXT_OFFSET (gregs, AMD64_RIP, mgreg_t) \
+ : "rdx", "memory"); \
+ } while (0)
+
+#ifdef UCONTEXT_REG_XMM
+#define MONO_CONTEXT_GET_CURRENT_FREGS(ctx) \
+ do { \
+ __asm__ __volatile__ ( \
+ "movups %%xmm0, %c[xmm0](%0)\n" \
+ "movups %%xmm1, %c[xmm1](%0)\n" \
+ "movups %%xmm2, %c[xmm2](%0)\n" \
+ "movups %%xmm3, %c[xmm3](%0)\n" \
+ "movups %%xmm4, %c[xmm4](%0)\n" \
+ "movups %%xmm5, %c[xmm5](%0)\n" \
+ "movups %%xmm6, %c[xmm6](%0)\n" \
+ "movups %%xmm7, %c[xmm7](%0)\n" \
+ "movups %%xmm8, %c[xmm8](%0)\n" \
+ "movups %%xmm9, %c[xmm9](%0)\n" \
+ "movups %%xmm10, %c[xmm10](%0)\n" \
+ "movups %%xmm11, %c[xmm11](%0)\n" \
+ "movups %%xmm12, %c[xmm12](%0)\n" \
+ "movups %%xmm12, %c[xmm12](%0)\n" \
+ "movups %%xmm14, %c[xmm14](%0)\n" \
+ "movups %%xmm15, %c[xmm15](%0)\n" \
+ : \
+ : "a" (&(ctx)), \
+ [xmm0] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM0, MonoContextSimdReg), \
+ [xmm1] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM1, MonoContextSimdReg), \
+ [xmm2] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM2, MonoContextSimdReg), \
+ [xmm3] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM3, MonoContextSimdReg), \
+ [xmm4] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM4, MonoContextSimdReg), \
+ [xmm5] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM5, MonoContextSimdReg), \
+ [xmm6] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM6, MonoContextSimdReg), \
+ [xmm7] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM7, MonoContextSimdReg), \
+ [xmm8] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM8, MonoContextSimdReg), \
+ [xmm9] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM9, MonoContextSimdReg), \
+ [xmm10] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM10, MonoContextSimdReg), \
+ [xmm11] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM11, MonoContextSimdReg), \
+ [xmm12] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM12, MonoContextSimdReg), \
+ [xmm13] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM13, MonoContextSimdReg), \
+ [xmm14] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM14, MonoContextSimdReg), \
+ [xmm15] MONO_CONTEXT_OFFSET (fregs, AMD64_XMM15, MonoContextSimdReg)); \
+ } while (0)
+#else
+#define MONO_CONTEXT_GET_CURRENT_FREGS(ctx)
+#endif
+
+#define MONO_CONTEXT_GET_CURRENT(ctx) \
+ do { \
+ MONO_CONTEXT_GET_CURRENT_GREGS(ctx); \
+ MONO_CONTEXT_GET_CURRENT_FREGS(ctx); \
+ } while (0)
#endif
#define MONO_ARCH_HAS_MONO_CONTEXT 1
"push {r0}\n" \
"push {r1}\n" \
"mov r0, %0\n" \
- "ldr r1, [sp, #4]\n" \
- "str r1, [r0]!\n" \
- "ldr r1, [sp, #0]\n" \
- "str r1, [r0]!\n" \
+ "ldr r1, [sp, #4]\n" \
+ "str r1, [r0], #4\n" \
+ "ldr r1, [sp, #0]\n" \
+ "str r1, [r0], #4\n" \
"stmia r0!, {r2-r12}\n" \
- "str sp, [r0]!\n" \
- "str lr, [r0]!\n" \
+ "str sp, [r0], #4\n" \
+ "str lr, [r0], #4\n" \
"mov r1, pc\n" \
- "str r1, [r0]!\n" \
+ "str r1, [r0], #4\n" \
"pop {r1}\n" \
"pop {r0}\n" \
: \
#define UCONTEXT_REG_R13(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__ss.__r13)
#define UCONTEXT_REG_R14(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__ss.__r14)
#define UCONTEXT_REG_R15(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__ss.__r15)
+ #define UCONTEXT_REG_XMM
+ #define UCONTEXT_REG_XMM0(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm0)
+ #define UCONTEXT_REG_XMM1(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm1)
+ #define UCONTEXT_REG_XMM2(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm2)
+ #define UCONTEXT_REG_XMM3(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm3)
+ #define UCONTEXT_REG_XMM4(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm4)
+ #define UCONTEXT_REG_XMM5(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm5)
+ #define UCONTEXT_REG_XMM6(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm6)
+ #define UCONTEXT_REG_XMM7(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm7)
+ #define UCONTEXT_REG_XMM8(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm8)
+ #define UCONTEXT_REG_XMM9(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm9)
+ #define UCONTEXT_REG_XMM10(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm10)
+ #define UCONTEXT_REG_XMM11(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm11)
+ #define UCONTEXT_REG_XMM12(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm12)
+ #define UCONTEXT_REG_XMM13(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm13)
+ #define UCONTEXT_REG_XMM14(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm14)
+ #define UCONTEXT_REG_XMM15(ctx) (((ucontext_t*)(ctx))->uc_mcontext->__fs.__fpu_xmm15)
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#define UCONTEXT_REG_RAX(ctx) (((ucontext_t*)(ctx))->uc_mcontext.mc_rax)
#define UCONTEXT_REG_RBX(ctx) (((ucontext_t*)(ctx))->uc_mcontext.mc_rbx)
#define UCONTEXT_REG_R14(ctx) (((ucontext_t*)(ctx))->sc_r14)
#define UCONTEXT_REG_R15(ctx) (((ucontext_t*)(ctx))->sc_r15)
#elif !defined(HOST_WIN32)
-#define UCONTEXT_GREGS(ctx) ((guint64*)&(((ucontext_t*)(ctx))->uc_mcontext.gregs))
+ #define UCONTEXT_GREGS(ctx) ((guint64*)&(((ucontext_t*)(ctx))->uc_mcontext.gregs))
#endif
#ifdef UCONTEXT_GREGS
if (info->async_target) {
MonoContext tmp = info->thread_saved_state [ASYNC_SUSPEND_STATE_INDEX].ctx;
- mach_msg_type_number_t num_state;
- thread_state_t state;
+ mach_msg_type_number_t num_state, num_fpstate;
+ thread_state_t state, fpstate;
ucontext_t uctx;
mcontext_t mctx;
info->async_target = (void (*)(void *)) info->user_data;
state = (thread_state_t) alloca (mono_mach_arch_get_thread_state_size ());
+ fpstate = (thread_state_t) alloca (mono_mach_arch_get_thread_fpstate_size ());
mctx = (mcontext_t) alloca (mono_mach_arch_get_mcontext_size ());
do {
- ret = mono_mach_arch_get_thread_state (info->native_handle, state, &num_state);
+ ret = mono_mach_arch_get_thread_states (info->native_handle, state, &num_state, fpstate, &num_fpstate);
} while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS)
return FALSE;
- mono_mach_arch_thread_state_to_mcontext (state, mctx);
+ mono_mach_arch_thread_states_to_mcontext (state, fpstate, mctx);
uctx.uc_mcontext = mctx;
mono_monoctx_to_sigctx (&tmp, &uctx);
- mono_mach_arch_mcontext_to_thread_state (mctx, state);
+ mono_mach_arch_mcontext_to_thread_states (mctx, state, fpstate);
do {
- ret = mono_mach_arch_set_thread_state (info->native_handle, state, num_state);
+ ret = mono_mach_arch_set_thread_states (info->native_handle, state, num_state, fpstate, num_fpstate);
} while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS)
gboolean
mono_threads_suspend_begin_async_resume (MonoThreadInfo *info)
{
- mono_threads_add_to_pending_operation_set (info);
- return mono_threads_pthread_kill (info, mono_threads_suspend_get_restart_signal ()) == 0;
+ int sig = mono_threads_suspend_get_restart_signal ();
+
+ if (!mono_threads_pthread_kill (info, sig)) {
+ mono_threads_add_to_pending_operation_set (info);
+ return TRUE;
+ }
+ return FALSE;
}
void