mach_port_t host = mach_host_self();
vm_size_t page_size;
vm_statistics_data_t vmstat;
- if (KERN_SUCCESS != host_statistics(host, HOST_VM_INFO, (host_info_t)&vmstat, &count)) {
+ kern_return_t ret;
+ do {
+ ret = host_statistics(host, HOST_VM_INFO, (host_info_t)&vmstat, &count);
+ } while (ret == KERN_ABORTED);
+
+ if (ret != KERN_SUCCESS) {
g_warning ("Mono was unable to retrieve memory usage!");
return 0;
}
gboolean
sgen_resume_thread (SgenThreadInfo *info)
{
- return thread_resume (info->client_info.info.native_handle) == KERN_SUCCESS;
+ kern_return_t ret;
+ do {
+ ret = thread_resume (info->client_info.info.native_handle);
+ } while (ret == KERN_ABORTED);
+ return ret == KERN_SUCCESS;
}
gboolean
state = (thread_state_t) alloca (mono_mach_arch_get_thread_state_size ());
mctx = (mcontext_t) alloca (mono_mach_arch_get_mcontext_size ());
- ret = thread_suspend (info->client_info.info.native_handle);
+ do {
+ ret = thread_suspend (info->client_info.info.native_handle);
+ } while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS)
return FALSE;
- ret = mono_mach_arch_get_thread_state (info->client_info.info.native_handle, state, &num_state);
+ do {
+ ret = mono_mach_arch_get_thread_state (info->client_info.info.native_handle, state, &num_state);
+ } while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS)
return FALSE;
if (!sgen_suspend_thread (info))
continue;
} else {
- ret = thread_resume (info->client_info.info.native_handle);
+ do {
+ ret = thread_resume (info->client_info.info.native_handle);
+ } while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS)
continue;
}
/* allocate two contiguous pages of memory: the first page will contain the data (like a local constant pool)
* while the second will contain the trampolines.
*/
- ret = vm_allocate (mach_task_self (), &addr, psize * 2, VM_FLAGS_ANYWHERE);
+ do {
+ ret = vm_allocate (mach_task_self (), &addr, psize * 2, VM_FLAGS_ANYWHERE);
+ } while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS) {
g_error ("Cannot allocate memory for trampolines: %d", ret);
break;
state = (thread_state_t) alloca (mono_mach_arch_get_thread_state_size ());
mctx = (mcontext_t) alloca (mono_mach_arch_get_mcontext_size ());
- ret = mono_mach_arch_get_thread_state (info->native_handle, state, &num_state);
+ do {
+ ret = mono_mach_arch_get_thread_state (info->native_handle, state, &num_state);
+ } while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS)
return FALSE;
{
kern_return_t ret;
- if ((ret = host_get_clock_service (mach_host_self (), SYSTEM_CLOCK, &sampling_clock_service)) != KERN_SUCCESS)
+ do {
+ ret = host_get_clock_service (mach_host_self (), SYSTEM_CLOCK, &sampling_clock_service);
+ } while (ret == KERN_ABORTED);
+
+ if (ret != KERN_SUCCESS)
g_error ("%s: host_get_clock_service () returned %d", __func__, ret);
}
{
kern_return_t ret;
- if ((ret = mach_port_deallocate (mach_task_self (), sampling_clock_service)) != KERN_SUCCESS)
+ do {
+ ret = mach_port_deallocate (mach_task_self (), sampling_clock_service);
+ } while (ret == KERN_ABORTED);
+
+ if (ret != KERN_SUCCESS)
g_error ("%s: mach_port_deallocate () returned %d", __func__, ret);
}
kern_return_t ret;
mach_timespec_t mach_ts;
- if ((ret = clock_get_time (sampling_clock_service, &mach_ts)) != KERN_SUCCESS)
+ do {
+ ret = clock_get_time (sampling_clock_service, &mach_ts);
+ } while (ret == KERN_ABORTED);
+
+ if (ret != KERN_SUCCESS)
g_error ("%s: clock_get_time () returned %d", __func__, ret);
return ((guint64) mach_ts.tv_sec * 1000000000) + (guint64) mach_ts.tv_nsec;
do {
ret = clock_sleep (sampling_clock_service, TIME_ABSOLUTE, then, &remain_unused);
-
- if (ret != KERN_SUCCESS && ret != KERN_ABORTED)
- g_error ("%s: clock_sleep () returned %d", __func__, ret);
} while (ret == KERN_ABORTED);
+
+ if (ret != KERN_SUCCESS)
+ g_error ("%s: clock_sleep () returned %d", __func__, ret);
+
}
#else
do {
ret = task_threads (current_task (), threads, count);
- } while (ret != KERN_SUCCESS);
+ } while (ret == KERN_ABORTED);
return ret;
}
mono_os_sem_post (MonoSemType *sem)
{
int res;
-
+retry:
res = semaphore_signal (*sem);
g_assert (res != KERN_INVALID_ARGUMENT);
+ if (res == KERN_ABORTED)
+ goto retry;
+
return res != KERN_SUCCESS ? -1 : 0;
}
mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT, th_count;
thread_array_t th_array;
size_t i;
+ kern_return_t ret;
if (pid == getpid ()) {
/* task_for_pid () doesn't work on ios, even for the current process */
task = mach_task_self ();
} else {
- if (task_for_pid (mach_task_self (), pid, &task) != KERN_SUCCESS)
+ do {
+ ret = task_for_pid (mach_task_self (), pid, &task);
+ } while (ret == KERN_ABORTED);
+
+ if (ret != KERN_SUCCESS)
RET_ERROR (MONO_PROCESS_ERROR_NOT_FOUND);
}
- if (task_info (task, TASK_BASIC_INFO, (task_info_t)&t_info, &t_info_count) != KERN_SUCCESS) {
+ do {
+ ret = task_info (task, TASK_BASIC_INFO, (task_info_t)&t_info, &t_info_count);
+ } while (ret == KERN_ABORTED);
+
+ if (ret != KERN_SUCCESS) {
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
}
+
+ do {
+ ret = task_threads (task, &th_array, &th_count);
+ } while (ret == KERN_ABORTED);
- if (task_threads(task, &th_array, &th_count) != KERN_SUCCESS) {
+ if (ret != KERN_SUCCESS) {
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
struct thread_basic_info th_info;
mach_msg_type_number_t th_info_count = THREAD_BASIC_INFO_COUNT;
- if (thread_info(th_array[i], THREAD_BASIC_INFO, (thread_info_t)&th_info, &th_info_count) == KERN_SUCCESS) {
+ do {
+ ret = thread_info(th_array[i], THREAD_BASIC_INFO, (thread_info_t)&th_info, &th_info_count);
+ } while (ret == KERN_ABORTED);
+
+ if (ret == KERN_SUCCESS) {
thread_user_time = th_info.user_time.seconds + th_info.user_time.microseconds / 1e6;
thread_system_time = th_info.system_time.seconds + th_info.system_time.microseconds / 1e6;
//thread_percent = (double)th_info.cpu_usage / TH_USAGE_SCALE;
task_t task;
struct task_basic_info t_info;
mach_msg_type_number_t th_count = TASK_BASIC_INFO_COUNT;
+ kern_return_t mach_ret;
if (pid == getpid ()) {
/* task_for_pid () doesn't work on ios, even for the current process */
task = mach_task_self ();
} else {
- if (task_for_pid (mach_task_self (), pid, &task) != KERN_SUCCESS)
+ do {
+ mach_ret = task_for_pid (mach_task_self (), pid, &task);
+ } while (mach_ret == KERN_ABORTED);
+
+ if (mach_ret != KERN_SUCCESS)
RET_ERROR (MONO_PROCESS_ERROR_NOT_FOUND);
}
-
- if (task_info (task, TASK_BASIC_INFO, (task_info_t)&t_info, &th_count) != KERN_SUCCESS) {
+
+ do {
+ mach_ret = task_info (task, TASK_BASIC_INFO, (task_info_t)&t_info, &th_count);
+ } while (mach_ret == KERN_ABORTED);
+
+ if (mach_ret != KERN_SUCCESS) {
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
{
kern_return_t ret;
- ret = thread_suspend (info->native_handle);
+ do {
+ ret = thread_suspend (info->native_handle);
+ } while (ret == KERN_ABORTED);
+
if (ret != KERN_SUCCESS)
return;
- ret = thread_abort_safely (info->native_handle);
+ do {
+ ret = thread_abort_safely (info->native_handle);
+ } while (ret == KERN_ABORTED);
/*
* We are doing thread_abort when thread_abort_safely returns KERN_SUCCESS because
if (ret == KERN_SUCCESS)
ret = thread_abort (info->native_handle);
- g_assert (thread_resume (info->native_handle) == KERN_SUCCESS);
+ do {
+ ret = thread_resume (info->native_handle);
+ } while (ret == KERN_ABORTED);
+
+ g_assert (ret == KERN_SUCCESS);
}
gboolean
g_assert (info);
- ret = thread_suspend (info->native_handle);
+
+ do {
+ ret = thread_suspend (info->native_handle);
+ } while (ret == KERN_ABORTED);
+
THREADS_SUSPEND_DEBUG ("SUSPEND %p -> %d\n", (void*)info->native_handle, ret);
if (ret != KERN_SUCCESS)
return FALSE;
/* We're in the middle of a self-suspend, resume and register */
if (!mono_threads_transition_finish_async_suspend (info)) {
mono_threads_add_to_pending_operation_set (info);
- g_assert (thread_resume (info->native_handle) == KERN_SUCCESS);
+ do {
+ ret = thread_resume (info->native_handle);
+ } while (ret == KERN_ABORTED);
+ g_assert (ret == KERN_SUCCESS);
THREADS_SUSPEND_DEBUG ("FAILSAFE RESUME/1 %p -> %d\n", (void*)info->native_handle, 0);
//XXX interrupt_kernel doesn't make sense in this case as the target is not in a syscall
return TRUE;
thread_abort (info->native_handle);
} else {
mono_threads_transition_async_suspend_compensation (info);
- g_assert (thread_resume (info->native_handle) == KERN_SUCCESS);
+ do {
+ ret = thread_resume (info->native_handle);
+ } while (ret == KERN_ABORTED);
+ g_assert (ret == KERN_SUCCESS);
THREADS_SUSPEND_DEBUG ("FAILSAFE RESUME/2 %p -> %d\n", (void*)info->native_handle, 0);
}
return res;
state = (thread_state_t) alloca (mono_mach_arch_get_thread_state_size ());
mctx = (mcontext_t) alloca (mono_mach_arch_get_mcontext_size ());
- ret = mono_mach_arch_get_thread_state (info->native_handle, state, &num_state);
+ do {
+ ret = mono_mach_arch_get_thread_state (info->native_handle, state, &num_state);
+ } while (ret == KERN_ABORTED);
+
if (ret != KERN_SUCCESS)
return FALSE;
mono_mach_arch_mcontext_to_thread_state (mctx, state);
- ret = mono_mach_arch_set_thread_state (info->native_handle, state, num_state);
+ do {
+ ret = mono_mach_arch_set_thread_state (info->native_handle, state, num_state);
+ } while (ret == KERN_ABORTED);
+
if (ret != KERN_SUCCESS)
return FALSE;
}
- ret = thread_resume (info->native_handle);
+ do {
+ ret = thread_resume (info->native_handle);
+ } while (ret == KERN_ABORTED);
THREADS_SUSPEND_DEBUG ("RESUME %p -> %d\n", (void*)info->native_handle, ret);
return ret == KERN_SUCCESS;