kernel/sched.c: add k_usleep() API function

Add k_usleep() API, analogous to k_sleep(), excepting that the argument
is in microseconds rather than milliseconds.

Signed-off-by: Charles E. Youse <charles.youse@intel.com>
This commit is contained in:
Charles E. Youse 2019-05-09 16:46:46 -07:00 committed by Anas Nashif
commit a567831bed
3 changed files with 69 additions and 8 deletions

View file

@ -790,16 +790,30 @@ void k_thread_system_pool_assign(struct k_thread *thread);
/**
* @brief Put the current thread to sleep.
*
* This routine puts the current thread to sleep for @a duration
* milliseconds.
* This routine puts the current thread to sleep for @a duration milliseconds.
*
* @param duration Number of milliseconds to sleep.
* @param ms Number of milliseconds to sleep.
*
* @return Zero if the requested time has elapsed or the number of milliseconds
* left to sleep, if thread was woken up by \ref k_wakeup call.
*
*/
__syscall s32_t k_sleep(s32_t duration);
__syscall s32_t k_sleep(s32_t ms);
/**
* @brief Put the current thread to sleep with microsecond resolution.
*
* This function is unlikely to work as expected without kernel tuning.
* In particular, because the lower bound on the duration of a sleep is
* the duration of a tick, CONFIG_SYS_CLOCK_TICKS_PER_SEC must be adjusted
* to achieve the resolution desired. The implications of doing this must
* be understood before attempting to use k_usleep(). Use with caution.
*
* @param us Number of microseconds to sleep.
*
* @return Zero if the requested time has elapsed or the number of microseconds
* left to sleep, if thread was woken up by \ref k_wakeup call.
*/
__syscall s32_t k_usleep(s32_t us);
/**
* @brief Cause the current thread to busy wait.

View file

@ -129,6 +129,37 @@ static inline u64_t __ticks_to_ms(s64_t ticks)
#endif
}
/*
* These are only currently used by k_usleep(), but they are
* defined here for parity with their ms analogs above. Note:
* we don't bother trying the 32-bit intermediate shortcuts
* possible with ms, because of the magnitudes involved.
*/
static inline s32_t z_us_to_ticks(s32_t us)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
return (s32_t) ceiling_fraction(
(s64_t)us * sys_clock_hw_cycles_per_sec(),
((s64_t)USEC_PER_SEC * sys_clock_hw_cycles_per_sec()) /
CONFIG_SYS_CLOCK_TICKS_PER_SEC);
#else
__ASSERT(us == 0, "us not zero");
return 0;
#endif
}
static inline s32_t __ticks_to_us(s32_t ticks)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
return (s32_t) ((s64_t)ticks * USEC_PER_SEC /
(s64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC);
#else
__ASSERT(ticks == 0, "ticks not zero");
return 0;
#endif
}
/* added tick needed to account for tick in progress */
#define _TICK_ALIGN 1

View file

@ -969,15 +969,31 @@ s32_t z_impl_k_sleep(int ms)
}
#ifdef CONFIG_USERSPACE
Z_SYSCALL_HANDLER(k_sleep, duration)
Z_SYSCALL_HANDLER(k_sleep, ms)
{
/* FIXME there were some discussions recently on whether we should
* relax this, thread would be unscheduled until k_wakeup issued
*/
Z_OOPS(Z_SYSCALL_VERIFY_MSG(duration != K_FOREVER,
Z_OOPS(Z_SYSCALL_VERIFY_MSG(ms != K_FOREVER,
"sleeping forever not allowed"));
return z_impl_k_sleep(duration);
return z_impl_k_sleep(ms);
}
#endif
s32_t z_impl_k_usleep(int us)
{
s32_t ticks;
ticks = z_us_to_ticks(us);
ticks = z_tick_sleep(ticks);
return __ticks_to_us(ticks);
}
#ifdef CONFIG_USERSPACE
Z_SYSCALL_HANDLER(k_usleep, us)
{
return z_impl_k_usleep(us);
}
#endif