kernel: Remove abort and join implementation (UNBISECTABLE)

THIS COMMIT DELIBERATELY BREAKS BISECTABILITY FOR EASE OF REVIEW.
SKIP IF YOU LAND HERE.

Remove the existing implementatoin of k_thread_abort(),
k_thread_join(), and the attendant facilities in the thread subsystem
and idle thread that support them.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2021-02-17 08:19:10 -08:00 committed by Anas Nashif
commit c0c8cb0e97
9 changed files with 0 additions and 412 deletions

View file

@ -112,14 +112,6 @@ struct _thread_base {
/* this thread's entry in a timeout queue */
struct _timeout timeout;
#endif
_wait_q_t join_waiters;
#if __ASSERT_ON
/* For detecting calls to k_thread_create() on threads that are
* already active
*/
atomic_t cookie;
#endif
};
typedef struct _thread_base _thread_base_t;
@ -212,29 +204,6 @@ struct k_thread {
/** static thread init data */
void *init_data;
/**
* abort function
*
* This function pointer, if non-NULL, will be run once after the
* thread has completely exited. It may run in the context of:
* - the idle thread if the thread self-exited
* - another thread calling k_thread_abort()
* - a fatal exception handler on a special stack
*
* It will never run in the context of the thread itself.
*
* A pointer to the thread object that was aborted is provided. At the
* time this runs, this thread object has completely exited. It may
* be re-used with k_thread_create() or return it to a heap or slab
* pool.
*
* This function does not run with any kind of lock active and
* there is the possibility of races leading to undefined behavior
* if other threads are attempting to free or recycle this object
* concurrently.
*/
void (*fn_abort)(struct k_thread *aborted);
#if defined(CONFIG_POLL)
struct z_poller poller;
#endif

View file

@ -109,9 +109,6 @@ struct _cpu {
/* one assigned idle thread per CPU */
struct k_thread *idle_thread;
/* If non-null, self-aborted thread that needs cleanup */
struct k_thread *pending_abort;
#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
/* Coop thread preempted by current metairq, or NULL */
struct k_thread *metairq_preempted;

View file

@ -21,7 +21,6 @@ list(APPEND kernel_files
stack.c
system_work_q.c
thread.c
thread_abort.c
version.c
work_q.c
condvar.c

View file

@ -118,8 +118,6 @@ void z_pm_save_idle_exit(int32_t ticks)
void idle(void *p1, void *unused2, void *unused3)
{
struct _cpu *cpu = p1;
ARG_UNUSED(unused2);
ARG_UNUSED(unused3);
@ -132,41 +130,7 @@ void idle(void *p1, void *unused2, void *unused3)
#endif /* CONFIG_BOOT_TIME_MEASUREMENT */
while (true) {
/* Lock interrupts to atomically check if to_abort is non-NULL,
* and if so clear it
*/
int key = arch_irq_lock();
struct k_thread *to_abort = cpu->pending_abort;
if (to_abort) {
cpu->pending_abort = NULL;
arch_irq_unlock(key);
/* Safe to unlock interrupts here. We've atomically
* checked and stashed cpu->pending_abort into a stack
* variable. If we get preempted here and another
* thread aborts, cpu->pending abort will get set
* again and we'll handle it when the loop iteration
* is continued below.
*/
LOG_DBG("idle %p aborting thread %p",
_current, to_abort);
z_thread_single_abort(to_abort);
/* We have to invoke this scheduler now. If we got
* here, the idle thread preempted everything else
* in order to abort the thread, and we now need to
* figure out what to do next, it's not necessarily
* the case that there are no other runnable threads.
*/
z_reschedule_unlocked();
continue;
}
#if SMP_FALLBACK
arch_irq_unlock(key);
k_busy_wait(100);
k_yield();
#else

View file

@ -18,13 +18,6 @@
#include <sys/atomic.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
/* Maximum time between the time a self-aborting thread flags itself
* DEAD and the last read or write to its stack memory (i.e. the time
* of its next swap()). In theory this might be tuned per platform,
* but in practice this conservative value should be safe.
*/
#define THREAD_ABORT_DELAY_US 500
#if defined(CONFIG_SCHED_DUMB)
#define _priq_run_add z_priq_dumb_add
#define _priq_run_remove z_priq_dumb_remove
@ -227,13 +220,6 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
{
struct k_thread *thread;
/* If a thread self-aborted we need the idle thread to clean it up
* before any other thread can run on this CPU
*/
if (_current_cpu->pending_abort != NULL) {
return _current_cpu->idle_thread;
}
thread = _priq_run_best(&_kernel.ready_q.runq);
#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
@ -253,16 +239,6 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
}
#endif
/* If the current thread is marked aborting, mark it
* dead so it will not be scheduled again.
*/
if (_current->base.thread_state & _THREAD_ABORTING) {
_current->base.thread_state |= _THREAD_DEAD;
#ifdef CONFIG_SMP
_current_cpu->swap_ok = true;
#endif
}
#ifndef CONFIG_SMP
/* In uniprocessor mode, we can leave the current thread in
* the queue (actually we have to, otherwise the assembly
@ -587,121 +563,6 @@ static _wait_q_t *pended_on(struct k_thread *thread)
return thread->base.pended_on;
}
void z_thread_single_abort(struct k_thread *thread)
{
void (*fn_abort)(struct k_thread *aborted) = NULL;
__ASSERT(!(thread->base.user_options & K_ESSENTIAL),
"essential thread aborted");
__ASSERT(thread != _current || arch_is_in_isr(),
"self-abort detected");
/* Prevent any of the further logic in this function from running more
* than once
*/
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
if ((thread->base.thread_state &
(_THREAD_ABORTING | _THREAD_DEAD)) != 0) {
LOG_DBG("Thread %p already dead or on the way out", thread);
k_spin_unlock(&sched_spinlock, key);
return;
}
thread->base.thread_state |= _THREAD_ABORTING;
k_spin_unlock(&sched_spinlock, key);
(void)z_abort_thread_timeout(thread);
if (IS_ENABLED(CONFIG_SMP)) {
z_sched_abort(thread);
}
LOCKED(&sched_spinlock) {
LOG_DBG("Cleanup aborting thread %p", thread);
struct k_thread *waiter;
if (z_is_thread_ready(thread)) {
if (z_is_thread_queued(thread)) {
dequeue_thread(&_kernel.ready_q.runq,
thread);
}
update_cache(thread == _current);
} else {
if (z_is_thread_pending(thread)) {
_priq_wait_remove(&pended_on(thread)->waitq,
thread);
z_mark_thread_as_not_pending(thread);
thread->base.pended_on = NULL;
}
}
/* Wake everybody up who was trying to join with this thread.
* A reschedule is invoked later by k_thread_abort().
*/
while ((waiter = z_waitq_head(&thread->base.join_waiters)) !=
NULL) {
(void)z_abort_thread_timeout(waiter);
_priq_wait_remove(&pended_on(waiter)->waitq, waiter);
z_mark_thread_as_not_pending(waiter);
waiter->base.pended_on = NULL;
arch_thread_return_value_set(waiter, 0);
ready_thread(waiter);
}
if (z_is_idle_thread_object(_current)) {
update_cache(1);
}
thread->base.thread_state |= _THREAD_DEAD;
/* Read this here from the thread struct now instead of
* after we unlock
*/
fn_abort = thread->fn_abort;
/* Keep inside the spinlock as these may use the contents
* of the thread object. As soon as we release this spinlock,
* the thread object could be destroyed at any time.
*/
sys_trace_thread_abort(thread);
z_thread_monitor_exit(thread);
#ifdef CONFIG_USERSPACE
/* Remove this thread from its memory domain, which takes
* it off the domain's thread list and possibly also arch-
* specific tasks.
*/
z_mem_domain_exit_thread(thread);
/* Revoke permissions on thread's ID so that it may be
* recycled
*/
z_thread_perms_all_clear(thread);
/* Clear initialized state so that this thread object may be
* re-used and triggers errors if API calls are made on it from
* user threads
*/
z_object_uninit(thread->stack_obj);
z_object_uninit(thread);
#endif
/* Kernel should never look at the thread object again past
* this point unless another thread API is called. If the
* object doesn't get corrupted, we'll catch other
* k_thread_abort()s on this object, although this is
* somewhat undefined behavoir. It must be safe to call
* k_thread_create() or free the object at this point.
*/
#if __ASSERT_ON
atomic_clear(&thread->base.cookie);
#endif
}
if (fn_abort != NULL) {
/* Thread object provided to be freed or recycled */
fn_abort(thread);
}
}
static void unready_thread(struct k_thread *thread)
{
if (z_is_thread_queued(thread)) {
@ -1472,43 +1333,6 @@ void z_sched_ipi(void)
z_trace_sched_ipi();
#endif
}
void z_sched_abort(struct k_thread *thread)
{
k_spinlock_key_t key;
if (thread == _current) {
z_remove_thread_from_ready_q(thread);
return;
}
/* First broadcast an IPI to the other CPUs so they can stop
* it locally. Not all architectures support that, alas. If
* we don't have it, we need to wait for some other interrupt.
*/
#ifdef CONFIG_SCHED_IPI_SUPPORTED
arch_sched_ipi();
#endif
/* Wait for it to be flagged dead either by the CPU it was
* running on or because we caught it idle in the queue
*/
while ((thread->base.thread_state & _THREAD_DEAD) == 0U) {
key = k_spin_lock(&sched_spinlock);
if (z_is_thread_prevented_from_running(thread)) {
__ASSERT(!z_is_thread_queued(thread), "");
thread->base.thread_state |= _THREAD_DEAD;
k_spin_unlock(&sched_spinlock, key);
} else if (z_is_thread_queued(thread)) {
dequeue_thread(&_kernel.ready_q.runq, thread);
thread->base.thread_state |= _THREAD_DEAD;
k_spin_unlock(&sched_spinlock, key);
} else {
k_spin_unlock(&sched_spinlock, key);
k_busy_wait(100);
}
}
}
#endif
#ifdef CONFIG_USERSPACE
@ -1603,44 +1427,6 @@ int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
#endif /* CONFIG_SCHED_CPU_MASK */
int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
{
k_spinlock_key_t key;
int ret;
__ASSERT(((arch_is_in_isr() == false) ||
K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
key = k_spin_lock(&sched_spinlock);
if ((thread->base.pended_on == &_current->base.join_waiters) ||
(thread == _current)) {
ret = -EDEADLK;
goto out;
}
if ((thread->base.thread_state & _THREAD_DEAD) != 0) {
ret = 0;
goto out;
}
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
ret = -EBUSY;
goto out;
}
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = _current;
#endif
add_to_waitq_locked(_current, &thread->base.join_waiters);
add_thread_timeout(_current, timeout);
return z_swap(&sched_spinlock, key);
out:
k_spin_unlock(&sched_spinlock, key);
return ret;
}
#ifdef CONFIG_USERSPACE
/* Special case: don't oops if the thread is uninitialized. This is because
* the initialization bit does double-duty for thread objects; if false, means

View file

@ -538,15 +538,6 @@ char *z_setup_new_thread(struct k_thread *new_thread,
{
char *stack_ptr;
#if __ASSERT_ON
atomic_val_t old_val = atomic_set(&new_thread->base.cookie,
THREAD_COOKIE);
/* Must be garbage or 0, never already set. Cleared at the end of
* z_thread_single_abort()
*/
__ASSERT(old_val != THREAD_COOKIE,
"re-use of active thread object %p detected", new_thread);
#endif
Z_ASSERT_VALID_PRIO(prio, entry);
#ifdef CONFIG_USERSPACE
@ -561,7 +552,6 @@ char *z_setup_new_thread(struct k_thread *new_thread,
/* Any given thread has access to itself */
k_object_access_grant(new_thread, new_thread);
#endif
z_waitq_init(&new_thread->base.join_waiters);
/* Initialize various struct k_thread members */
z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
@ -579,7 +569,6 @@ char *z_setup_new_thread(struct k_thread *new_thread,
/* static threads overwrite it afterwards with real value */
new_thread->init_data = NULL;
new_thread->fn_abort = NULL;
#ifdef CONFIG_USE_SWITCH
/* switch_handle must be non-null except when inside z_swap()

View file

@ -1,69 +0,0 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Primitive for aborting a thread when an arch-specific one is not
* needed..
*/
#include <kernel.h>
#include <kernel_structs.h>
#include <kernel_internal.h>
#include <kswap.h>
#include <string.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <wait_q.h>
#include <ksched.h>
#include <sys/__assert.h>
#include <syscall_handler.h>
#include <logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
FUNC_NORETURN void z_self_abort(void)
{
/* Self-aborting threads don't clean themselves up, we
* have the idle thread for the current CPU do it.
*/
int key;
struct _cpu *cpu;
/* Lock local IRQs to prevent us from migrating to another CPU
* while we set this up
*/
key = arch_irq_lock();
cpu = _current_cpu;
__ASSERT(cpu->pending_abort == NULL, "already have a thread to abort");
cpu->pending_abort = _current;
LOG_DBG("%p self-aborting, handle on idle thread %p",
_current, cpu->idle_thread);
k_thread_suspend(_current);
z_swap_irqlock(key);
__ASSERT(false, "should never get here");
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}
#if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
void z_impl_k_thread_abort(k_tid_t thread)
{
if (thread == _current && !arch_is_in_isr()) {
/* Thread is self-exiting, idle thread on this CPU will do
* the cleanup
*/
z_self_abort();
}
z_thread_single_abort(thread);
if (!arch_is_in_isr()) {
/* Don't need to do this if we're in an ISR */
z_reschedule_unlocked();
}
}
#endif

View file

@ -29,7 +29,6 @@ extern void test_threads_suspend_resume_preemptible(void);
extern void test_threads_abort_self(void);
extern void test_threads_abort_others(void);
extern void test_threads_abort_repeat(void);
extern void test_abort_handler(void);
extern void test_essential_thread_operation(void);
extern void test_threads_priority_set(void);
extern void test_delayed_thread_abort(void);
@ -499,7 +498,6 @@ void test_main(void)
ztest_user_unit_test(test_threads_abort_self),
ztest_user_unit_test(test_threads_abort_others),
ztest_1cpu_unit_test(test_threads_abort_repeat),
ztest_unit_test(test_abort_handler),
ztest_1cpu_unit_test(test_delayed_thread_abort),
ztest_unit_test(test_essential_thread_operation),
ztest_unit_test(test_essential_thread_abort),

View file

@ -107,51 +107,6 @@ void test_threads_abort_repeat(void)
bool abort_called;
void *block;
static void abort_function(struct k_thread *aborted)
{
printk("Child thread's abort handler called\n");
abort_called = true;
zassert_equal(aborted, &tdata, "wrong thread pointer");
zassert_not_equal(k_current_get(), aborted,
"fn_abort ran on its own thread");
k_free(block);
}
static void uthread_entry(void)
{
block = k_malloc(BLOCK_SIZE);
zassert_true(block != NULL, NULL);
printk("Child thread is running\n");
k_msleep(2);
}
/**
* @ingroup kernel_thread_tests
* @brief Test to validate the call of abort handler
* specified by thread when it is aborted
*
* @see k_thread_abort(), #k_thread.fn_abort
*/
void test_abort_handler(void)
{
k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
(k_thread_entry_t)uthread_entry, NULL, NULL, NULL,
0, 0, K_NO_WAIT);
tdata.fn_abort = &abort_function;
k_msleep(1);
abort_called = false;
printk("Calling abort of child from parent\n");
k_thread_abort(tid);
zassert_true(abort_called == true, "Abort handler"
" is not called");
}
static void delayed_thread_entry(void *p1, void *p2, void *p3)
{
execute_flag = 1;