diff --git a/kernel/thread.c b/kernel/thread.c index 1d6a7ec8373..0cb1063f8cd 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -120,44 +120,6 @@ bool z_is_thread_essential(void) return (_current->base.user_options & K_ESSENTIAL) == K_ESSENTIAL; } -#ifdef CONFIG_SYS_CLOCK_EXISTS -void z_impl_k_busy_wait(uint32_t usec_to_wait) -{ - if (usec_to_wait == 0U) { - return; - } - -#if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT) - uint32_t start_cycles = k_cycle_get_32(); - - /* use 64-bit math to prevent overflow when multiplying */ - uint32_t cycles_to_wait = (uint32_t)( - (uint64_t)usec_to_wait * - (uint64_t)sys_clock_hw_cycles_per_sec() / - (uint64_t)USEC_PER_SEC - ); - - for (;;) { - uint32_t current_cycles = k_cycle_get_32(); - - /* this handles the rollover on an unsigned 32-bit value */ - if ((current_cycles - start_cycles) >= cycles_to_wait) { - break; - } - } -#else - arch_busy_wait(usec_to_wait); -#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */ -} - -#ifdef CONFIG_USERSPACE -static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait) -{ - z_impl_k_busy_wait(usec_to_wait); -} -#include -#endif /* CONFIG_USERSPACE */ -#endif /* CONFIG_SYS_CLOCK_EXISTS */ #ifdef CONFIG_THREAD_CUSTOM_DATA void z_impl_k_thread_custom_data_set(void *value) diff --git a/kernel/timeout.c b/kernel/timeout.c index 54fce1c0679..f3ad84808e8 100644 --- a/kernel/timeout.c +++ b/kernel/timeout.c @@ -297,6 +297,43 @@ static inline int64_t z_vrfy_k_uptime_ticks(void) #include #endif +void z_impl_k_busy_wait(uint32_t usec_to_wait) +{ + if (usec_to_wait == 0U) { + return; + } + +#if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT) + uint32_t start_cycles = k_cycle_get_32(); + + /* use 64-bit math to prevent overflow when multiplying */ + uint32_t cycles_to_wait = (uint32_t)( + (uint64_t)usec_to_wait * + (uint64_t)sys_clock_hw_cycles_per_sec() / + (uint64_t)USEC_PER_SEC + ); + + for (;;) { + uint32_t current_cycles = k_cycle_get_32(); + + /* this handles the rollover on an unsigned 32-bit value */ + if ((current_cycles - start_cycles) >= cycles_to_wait) { + break; + } + } +#else + arch_busy_wait(usec_to_wait); +#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */ +} + +#ifdef CONFIG_USERSPACE +static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait) +{ + z_impl_k_busy_wait(usec_to_wait); +} +#include +#endif /* CONFIG_USERSPACE */ + /* Returns the uptime expiration (relative to an unlocked "now"!) of a * timeout object. When used correctly, this should be called once, * synchronously with the user passing a new timeout value. It should