kernel/timeout: Add timeout remaining/expires APIs
Add tick-based (i.e. precision resistant) inspection APIs for kernel timeouts visible via k_timer, k_delayed work and thread timeouts (i.e. pended/sleeping threads). These are each available in "remaining" and "expires" variants returning time values relative to current time and system start. All have system calls where applicable (i.e. everywhere but k_delayed_work, which is not a userspace API) The pre-existing millisecond "remaining_get()" predicates for timer and delayed work remain, but are expressed in terms of the newer calls. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
e1bc595947
commit
5a5d3daf6f
5 changed files with 168 additions and 19 deletions
117
include/kernel.h
117
include/kernel.h
|
@ -1054,6 +1054,43 @@ __syscall void k_thread_abort(k_tid_t thread);
|
|||
*/
|
||||
__syscall void k_thread_start(k_tid_t thread);
|
||||
|
||||
extern k_ticks_t z_timeout_expires(struct _timeout *timeout);
|
||||
extern k_ticks_t z_timeout_remaining(struct _timeout *timeout);
|
||||
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
|
||||
/**
|
||||
* @brief Get time when a thread wakes up, in ticks
|
||||
*
|
||||
* This routine computes the system uptime when a waiting thread next
|
||||
* executes, in units of system ticks. If the thread is not waiting,
|
||||
* it returns current system time.
|
||||
*/
|
||||
__syscall k_ticks_t k_thread_timeout_expires_ticks(struct k_thread *t);
|
||||
|
||||
static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
|
||||
struct k_thread *t)
|
||||
{
|
||||
return z_timeout_expires(&t->base.timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get time remaining before a thread wakes up, in ticks
|
||||
*
|
||||
* This routine computes the time remaining before a waiting thread
|
||||
* next executes, in units of system ticks. If the thread is not
|
||||
* waiting, it returns zero.
|
||||
*/
|
||||
__syscall k_ticks_t k_thread_timeout_remaining_ticks(struct k_thread *t);
|
||||
|
||||
static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
|
||||
struct k_thread *t)
|
||||
{
|
||||
return z_timeout_remaining(&t->base.timeout);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SYS_CLOCK_EXISTS */
|
||||
|
||||
/**
|
||||
* @cond INTERNAL_HIDDEN
|
||||
*/
|
||||
|
@ -1924,7 +1961,38 @@ __syscall u32_t k_timer_status_get(struct k_timer *timer);
|
|||
*/
|
||||
__syscall u32_t k_timer_status_sync(struct k_timer *timer);
|
||||
|
||||
extern s32_t z_timeout_remaining(struct _timeout *timeout);
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
|
||||
/**
|
||||
* @brief Get next expiration time of a timer, in ticks
|
||||
*
|
||||
* This routine returns the future system uptime reached at the next
|
||||
* time of expiration of the timer, in units of system ticks. If the
|
||||
* timer is not running, current system time is returned.
|
||||
*
|
||||
* @param timer The timer object
|
||||
* @return Uptime of expiration, in ticks
|
||||
*/
|
||||
__syscall k_ticks_t k_timer_expires_ticks(struct k_timer *timer);
|
||||
|
||||
static inline k_ticks_t z_impl_k_timer_expires_ticks(struct k_timer *timer)
|
||||
{
|
||||
return z_timeout_expires(&timer->timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get time remaining before a timer next expires, in ticks
|
||||
*
|
||||
* This routine computes the time remaining before a running timer
|
||||
* next expires, in units of system ticks. If the timer is not
|
||||
* running, it returns zero.
|
||||
*/
|
||||
__syscall k_ticks_t k_timer_remaining_ticks(struct k_timer *timer);
|
||||
|
||||
static inline k_ticks_t z_impl_k_timer_remaining_ticks(struct k_timer *timer)
|
||||
{
|
||||
return z_timeout_remaining(&timer->timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get time remaining before a timer next expires.
|
||||
|
@ -1936,14 +2004,13 @@ extern s32_t z_timeout_remaining(struct _timeout *timeout);
|
|||
*
|
||||
* @return Remaining time (in milliseconds).
|
||||
*/
|
||||
__syscall u32_t k_timer_remaining_get(struct k_timer *timer);
|
||||
|
||||
static inline u32_t z_impl_k_timer_remaining_get(struct k_timer *timer)
|
||||
static inline u32_t k_timer_remaining_get(struct k_timer *timer)
|
||||
{
|
||||
const s32_t ticks = z_timeout_remaining(&timer->timeout);
|
||||
return (ticks > 0) ? (u32_t)k_ticks_to_ms_floor64(ticks) : 0U;
|
||||
return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SYS_CLOCK_EXISTS */
|
||||
|
||||
/**
|
||||
* @brief Associate user-specific data with a timer.
|
||||
*
|
||||
|
@ -3379,6 +3446,42 @@ static inline int k_delayed_work_submit(struct k_delayed_work *work,
|
|||
return k_delayed_work_submit_to_queue(&k_sys_work_q, work, delay);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get time when a delayed work will be scheduled
|
||||
*
|
||||
* This routine computes the system uptime when a delayed work gets
|
||||
* executed. If the delayed work is not waiting to be scheduled, it
|
||||
* returns current system time.
|
||||
*
|
||||
* @param work Delayed work item.
|
||||
*
|
||||
* @return Uptime of execution (in ticks).
|
||||
* @req K-DWORK-001
|
||||
*/
|
||||
static inline k_ticks_t k_delayed_work_expires_ticks(
|
||||
struct k_delayed_work *work)
|
||||
{
|
||||
return z_timeout_expires(&work->timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get time remaining before a delayed work gets scheduled, in ticks
|
||||
*
|
||||
* This routine computes the time remaining before a delayed work gets
|
||||
* executed. If the delayed work is not waiting to be scheduled, it
|
||||
* returns zero.
|
||||
*
|
||||
* @param work Delayed work item.
|
||||
*
|
||||
* @return Remaining time (in ticks).
|
||||
* @req K-DWORK-001
|
||||
*/
|
||||
static inline k_ticks_t k_delayed_work_remaining_ticks(
|
||||
struct k_delayed_work *work)
|
||||
{
|
||||
return z_timeout_remaining(&work->timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get time remaining before a delayed work gets scheduled.
|
||||
*
|
||||
|
@ -3393,7 +3496,7 @@ static inline int k_delayed_work_submit(struct k_delayed_work *work,
|
|||
*/
|
||||
static inline s32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
|
||||
{
|
||||
return k_ticks_to_ms_floor64(z_timeout_remaining(&work->timeout));
|
||||
return k_ticks_to_ms_floor32(z_timeout_remaining(&work->timeout));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -58,7 +58,7 @@ s32_t z_get_next_timeout_expiry(void);
|
|||
|
||||
void z_set_timeout_expiry(s32_t ticks, bool idle);
|
||||
|
||||
s32_t z_timeout_remaining(struct _timeout *timeout);
|
||||
k_ticks_t z_timeout_remaining(struct _timeout *timeout);
|
||||
|
||||
#else
|
||||
|
||||
|
|
|
@ -944,3 +944,21 @@ int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread,
|
|||
#include <syscalls/k_thread_stack_space_get_mrsh.c>
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
static inline k_ticks_t z_vrfy_k_thread_timeout_remaining_ticks(
|
||||
struct k_thread *t)
|
||||
{
|
||||
Z_OOPS(Z_SYSCALL_OBJ(t, K_OBJ_THREAD));
|
||||
return z_impl_k_thread_timeout_remaining_ticks(t);
|
||||
}
|
||||
#include <syscalls/k_thread_timeout_remaining_ticks_mrsh.c>
|
||||
|
||||
static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
|
||||
struct k_thread *t)
|
||||
{
|
||||
Z_OOPS(Z_SYSCALL_OBJ(t, K_OBJ_THREAD));
|
||||
return z_impl_k_thread_timeout_expires_ticks(t);
|
||||
}
|
||||
#include <syscalls/k_thread_timeout_expires_ticks_mrsh.c>
|
||||
#endif
|
||||
|
|
|
@ -139,26 +139,47 @@ int z_abort_timeout(struct _timeout *to)
|
|||
return ret;
|
||||
}
|
||||
|
||||
s32_t z_timeout_remaining(struct _timeout *timeout)
|
||||
/* must be locked */
|
||||
static k_ticks_t timeout_rem(struct _timeout *timeout)
|
||||
{
|
||||
s32_t ticks = 0;
|
||||
k_ticks_t ticks = 0;
|
||||
|
||||
if (z_is_inactive_timeout(timeout)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
for (struct _timeout *t = first(); t != NULL; t = next(t)) {
|
||||
ticks += t->dticks;
|
||||
if (timeout == t) {
|
||||
break;
|
||||
}
|
||||
for (struct _timeout *t = first(); t != NULL; t = next(t)) {
|
||||
ticks += t->dticks;
|
||||
if (timeout == t) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ticks - elapsed();
|
||||
}
|
||||
|
||||
k_ticks_t z_timeout_remaining(struct _timeout *timeout)
|
||||
{
|
||||
k_ticks_t ticks = 0;
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
ticks = timeout_rem(timeout);
|
||||
}
|
||||
|
||||
return ticks;
|
||||
}
|
||||
|
||||
k_ticks_t z_timeout_expires(struct _timeout *timeout)
|
||||
{
|
||||
k_ticks_t ticks = 0;
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
ticks = curr_tick + timeout_rem(timeout);
|
||||
}
|
||||
|
||||
return ticks;
|
||||
}
|
||||
|
||||
s32_t z_get_next_timeout_expiry(void)
|
||||
{
|
||||
s32_t ret = (s32_t) K_TICKS_FOREVER;
|
||||
|
|
|
@ -236,12 +236,19 @@ static inline u32_t z_vrfy_k_timer_status_sync(struct k_timer *timer)
|
|||
}
|
||||
#include <syscalls/k_timer_status_sync_mrsh.c>
|
||||
|
||||
static inline u32_t z_vrfy_k_timer_remaining_get(struct k_timer *timer)
|
||||
static inline k_ticks_t z_vrfy_k_timer_remaining_ticks(struct k_timer *timer)
|
||||
{
|
||||
Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
|
||||
return z_impl_k_timer_remaining_get(timer);
|
||||
return z_impl_k_timer_remaining_ticks(timer);
|
||||
}
|
||||
#include <syscalls/k_timer_remaining_get_mrsh.c>
|
||||
#include <syscalls/k_timer_remaining_ticks_mrsh.c>
|
||||
|
||||
static inline k_ticks_t z_vrfy_k_timer_expires_ticks(struct k_timer *timer)
|
||||
{
|
||||
Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
|
||||
return z_impl_k_timer_expires_ticks(timer);
|
||||
}
|
||||
#include <syscalls/k_timer_expires_ticks_mrsh.c>
|
||||
|
||||
static inline void *z_vrfy_k_timer_user_data_get(struct k_timer *timer)
|
||||
{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue