kernel: Revise thread monitoring exit API naming
Renames _thread_exit() to _thread_monitoring_exit() to make its purpose clearer. Revises the associated comments and removes unnecessary doxygen tags. Change-Id: I010a328d35d2d79d2a29b9d0b6c02097bb655989 Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
This commit is contained in:
parent
e262615280
commit
92e75040a2
7 changed files with 23 additions and 47 deletions
|
@ -52,7 +52,7 @@
|
||||||
|
|
||||||
void fiber_abort(void)
|
void fiber_abort(void)
|
||||||
{
|
{
|
||||||
_thread_exit(_nanokernel.current);
|
_thread_monitor_exit(_nanokernel.current);
|
||||||
if (_ScbIsInThreadMode()) {
|
if (_ScbIsInThreadMode()) {
|
||||||
_nano_fiber_swap();
|
_nano_fiber_swap();
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -252,7 +252,7 @@ static void abort_task(struct k_task *X)
|
||||||
|
|
||||||
/* Do normal thread exit cleanup */
|
/* Do normal thread exit cleanup */
|
||||||
|
|
||||||
_thread_exit((struct tcs *)X->workspace);
|
_thread_monitor_exit((struct tcs *)X->workspace);
|
||||||
|
|
||||||
/* Set TF_TERM and TF_STOP state flags */
|
/* Set TF_TERM and TF_STOP state flags */
|
||||||
|
|
||||||
|
|
|
@ -83,9 +83,9 @@ extern void _thread_essential_clear(void);
|
||||||
/* clean up when a thread is aborted */
|
/* clean up when a thread is aborted */
|
||||||
|
|
||||||
#if defined(CONFIG_THREAD_MONITOR)
|
#if defined(CONFIG_THREAD_MONITOR)
|
||||||
extern void _thread_exit(struct tcs *tcs);
|
extern void _thread_monitor_exit(struct tcs *tcs);
|
||||||
#else
|
#else
|
||||||
#define _thread_exit(tcs) \
|
#define _thread_monitor_exit(tcs) \
|
||||||
do {/* nothing */ \
|
do {/* nothing */ \
|
||||||
} while (0)
|
} while (0)
|
||||||
#endif /* CONFIG_THREAD_MONITOR */
|
#endif /* CONFIG_THREAD_MONITOR */
|
||||||
|
|
|
@ -146,28 +146,16 @@ void *sys_thread_custom_data_get(void)
|
||||||
#endif /* CONFIG_THREAD_CUSTOM_DATA */
|
#endif /* CONFIG_THREAD_CUSTOM_DATA */
|
||||||
|
|
||||||
#if defined(CONFIG_THREAD_MONITOR)
|
#if defined(CONFIG_THREAD_MONITOR)
|
||||||
/**
|
/*
|
||||||
|
* Remove a thread from the kernel's list of active threads.
|
||||||
*
|
*
|
||||||
* @brief Thread exit routine
|
* On entry the current thread must be in a non-preemptible state to ensure
|
||||||
*
|
* the list of threads does not change in mid-operation. (That is, it must
|
||||||
* This function is invoked when the specified thread is aborted, either
|
* be a fiber or interrupts must be locked.) This routine cannot be called
|
||||||
* normally or abnormally. It is called for the termination of any thread,
|
* from an ISR context.
|
||||||
* (fibers and tasks).
|
|
||||||
*
|
|
||||||
* This routine must be invoked either from a fiber or from a task with
|
|
||||||
* interrupts locked to guarantee that the list of threads does not change in
|
|
||||||
* mid-operation. It cannot be called from ISR context.
|
|
||||||
*
|
|
||||||
* @return N/A
|
|
||||||
*/
|
*/
|
||||||
void _thread_exit(struct tcs *thread)
|
void _thread_monitor_exit(struct tcs *thread)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* Remove thread from the list of threads. This singly linked list of
|
|
||||||
* threads maintains ALL the threads in the system: both tasks and
|
|
||||||
* fibers regardless of whether they are runnable.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (thread == _nanokernel.threads) {
|
if (thread == _nanokernel.threads) {
|
||||||
_nanokernel.threads = _nanokernel.threads->next_thread;
|
_nanokernel.threads = _nanokernel.threads->next_thread;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -172,7 +172,7 @@ FUNC_NORETURN void fiber_abort(void)
|
||||||
{
|
{
|
||||||
/* Do normal thread exit cleanup, then give up CPU control */
|
/* Do normal thread exit cleanup, then give up CPU control */
|
||||||
|
|
||||||
_thread_exit(_nanokernel.current);
|
_thread_monitor_exit(_nanokernel.current);
|
||||||
_nano_fiber_swap();
|
_nano_fiber_swap();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -214,7 +214,7 @@ void fiber_delayed_start_cancel(nano_thread_id_t handle)
|
||||||
int key = irq_lock();
|
int key = irq_lock();
|
||||||
|
|
||||||
_nano_timeout_abort(cancelled_tcs);
|
_nano_timeout_abort(cancelled_tcs);
|
||||||
_thread_exit(cancelled_tcs);
|
_thread_monitor_exit(cancelled_tcs);
|
||||||
|
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,9 +75,9 @@ extern void _thread_essential_clear(void);
|
||||||
/* clean up when a thread is aborted */
|
/* clean up when a thread is aborted */
|
||||||
|
|
||||||
#if defined(CONFIG_THREAD_MONITOR)
|
#if defined(CONFIG_THREAD_MONITOR)
|
||||||
extern void _thread_exit(struct tcs *tcs);
|
extern void _thread_monitor_exit(struct tcs *tcs);
|
||||||
#else
|
#else
|
||||||
#define _thread_exit(tcs) \
|
#define _thread_monitor_exit(tcs) \
|
||||||
do {/* nothing */ \
|
do {/* nothing */ \
|
||||||
} while (0)
|
} while (0)
|
||||||
#endif /* CONFIG_THREAD_MONITOR */
|
#endif /* CONFIG_THREAD_MONITOR */
|
||||||
|
|
|
@ -165,28 +165,16 @@ void *k_thread_custom_data_get(void)
|
||||||
#endif /* CONFIG_THREAD_CUSTOM_DATA */
|
#endif /* CONFIG_THREAD_CUSTOM_DATA */
|
||||||
|
|
||||||
#if defined(CONFIG_THREAD_MONITOR)
|
#if defined(CONFIG_THREAD_MONITOR)
|
||||||
/**
|
/*
|
||||||
|
* Remove a thread from the kernel's list of active threads.
|
||||||
*
|
*
|
||||||
* @brief Thread exit routine
|
* On entry the current thread must be in a non-preemptible state to ensure
|
||||||
*
|
* the list of threads does not change in mid-operation. (That is, it must
|
||||||
* This function is invoked when the specified thread is aborted, either
|
* be non-preemptible or have locked the scheduler, or interrupts must be
|
||||||
* normally or abnormally. It is called for the termination of any thread,
|
* locked.) This routine cannot be called from an ISR context.
|
||||||
* (fibers and tasks).
|
|
||||||
*
|
|
||||||
* This routine must be invoked either from a fiber or from a task with
|
|
||||||
* interrupts locked to guarantee that the list of threads does not change in
|
|
||||||
* mid-operation. It cannot be called from ISR context.
|
|
||||||
*
|
|
||||||
* @return N/A
|
|
||||||
*/
|
*/
|
||||||
void _thread_exit(struct k_thread *thread)
|
void _thread_monitor_exit(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* Remove thread from the list of threads. This singly linked list of
|
|
||||||
* threads maintains ALL the threads in the system: both tasks and
|
|
||||||
* fibers regardless of whether they are runnable.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (thread == _nanokernel.threads) {
|
if (thread == _nanokernel.threads) {
|
||||||
_nanokernel.threads = _nanokernel.threads->next_thread;
|
_nanokernel.threads = _nanokernel.threads->next_thread;
|
||||||
} else {
|
} else {
|
||||||
|
@ -308,7 +296,7 @@ int k_thread_cancel(k_tid_t tid)
|
||||||
}
|
}
|
||||||
|
|
||||||
_abort_thread_timeout(thread);
|
_abort_thread_timeout(thread);
|
||||||
_thread_exit(thread);
|
_thread_monitor_exit(thread);
|
||||||
|
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue