From 7832738ae985a63febb8f82e7c4e34824f48486e Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Thu, 5 Mar 2020 15:18:14 -0800 Subject: [PATCH] kernel/timeout: Make timeout arguments an opaque type Add a k_timeout_t type, and use it everywhere that kernel API functions were accepting a millisecond timeout argument. Instead of forcing milliseconds everywhere (which are often not integrally representable as system ticks), do the conversion to ticks at the point where the timeout is created. This avoids an extra unit conversion in some application code, and allows us to express the timeout in units other than milliseconds to achieve greater precision. The existing K_MSEC() et. al. macros now return initializers for a k_timeout_t. The K_NO_WAIT and K_FOREVER constants have now become k_timeout_t values, which means they cannot be operated on as integers. Applications which have their own APIs that need to inspect these vs. user-provided timeouts can now use a K_TIMEOUT_EQ() predicate to test for equality. Timer drivers, which receive an integer tick count in ther z_clock_set_timeout() functions, now use the integer-valued K_TICKS_FOREVER constant instead of K_FOREVER. For the initial release, to preserve source compatibility, a CONFIG_LEGACY_TIMEOUT_API kconfig is provided. When true, the k_timeout_t will remain a compatible 32 bit value that will work with any legacy Zephyr application. Some subsystems present timeout (or timeout-like) values to their own users as APIs that would re-use the kernel's own constants and conventions. These will require some minor design work to adapt to the new scheme (in most cases just using k_timeout_t directly in their own API), and they have not been changed in this patch, instead selecting CONFIG_LEGACY_TIMEOUT_API via kconfig. These subsystems include: CAN Bus, the Microbit display driver, I2S, LoRa modem drivers, the UART Async API, Video hardware drivers, the console subsystem, and the network buffer abstraction. k_sleep() now takes a k_timeout_t argument, with a k_msleep() variant provided that works identically to the original API. Most of the changes here are just type/configuration management and documentation, but there are logic changes in mempool, where a loop that used a timeout numerically has been reworked using a new z_timeout_end_calc() predicate. Also in queue.c, a (when POLL was enabled) a similar loop was needlessly used to try to retry the k_poll() call after a spurious failure. But k_poll() does not fail spuriously, so the loop was removed. Signed-off-by: Andy Ross --- boards/arm/qemu_cortex_m0/nrf_timer_timer.c | 2 +- drivers/can/Kconfig | 1 + drivers/display/Kconfig.microbit | 1 + drivers/i2s/Kconfig | 1 + drivers/lora/Kconfig | 1 + drivers/serial/Kconfig | 1 + drivers/timer/apic_timer.c | 2 +- drivers/timer/arcv2_timer0.c | 5 +- drivers/timer/arm_arch_timer.c | 2 +- drivers/timer/cavs_timer.c | 2 +- drivers/timer/cc13x2_cc26x2_rtc_timer.c | 2 +- drivers/timer/cortex_m_systick.c | 5 +- drivers/timer/hpet.c | 4 +- drivers/timer/legacy_api.h | 2 +- drivers/timer/loapic_timer.c | 4 +- drivers/timer/mchp_xec_rtos_timer.c | 4 +- drivers/timer/native_posix_timer.c | 2 +- drivers/timer/nrf_rtc_timer.c | 2 +- drivers/timer/riscv_machine_timer.c | 2 +- drivers/timer/xlnx_psttc_timer.c | 2 +- drivers/timer/xtensa_sys_timer.c | 2 +- drivers/video/Kconfig | 1 + include/drivers/timer/system_timer.h | 2 +- include/kernel.h | 159 +++++++++--------- include/sys/mutex.h | 10 +- include/sys/sem.h | 4 +- include/sys_clock.h | 57 ++++++- include/timeout_q.h | 14 +- kernel/Kconfig | 9 + kernel/futex.c | 5 +- kernel/include/ksched.h | 9 +- kernel/mailbox.c | 13 +- kernel/mem_slab.c | 4 +- kernel/mempool.c | 20 +-- kernel/msg_q.c | 18 +- kernel/mutex.c | 7 +- kernel/pipes.c | 20 ++- kernel/poll.c | 21 ++- kernel/queue.c | 40 ++--- kernel/sched.c | 70 ++++---- kernel/sem.c | 9 +- kernel/stack.c | 7 +- kernel/thread.c | 22 +-- kernel/timeout.c | 36 +++- kernel/timer.c | 43 +++-- kernel/work_q.c | 11 +- lib/cmsis_rtos_v1/Kconfig | 1 + lib/cmsis_rtos_v2/Kconfig | 1 + lib/os/mutex.c | 4 +- lib/os/sem.c | 4 +- lib/posix/Kconfig | 1 + lib/posix/pthread_common.c | 2 +- samples/cpp_synchronization/src/main.cpp | 8 +- .../scheduler/metairq_dispatch/src/msgdev.c | 4 + soc/arm/ti_simplelink/Kconfig | 1 + subsys/console/Kconfig | 1 + subsys/net/Kconfig | 1 + subsys/power/policy/policy_residency.c | 4 +- tests/kernel/lifo/lifo_usage/src/main.c | 2 +- tests/kernel/mbox/mbox_usage/src/main.c | 5 +- tests/kernel/mem_protect/futex/prj.conf | 1 + tests/kernel/mem_protect/futex/src/main.c | 3 +- tests/kernel/pending/src/main.c | 4 +- .../pipe/pipe_api/src/test_pipe_contexts.c | 6 +- tests/kernel/sleep/src/main.c | 2 +- tests/kernel/workq/work_queue/src/main.c | 2 +- 66 files changed, 440 insertions(+), 277 deletions(-) diff --git a/boards/arm/qemu_cortex_m0/nrf_timer_timer.c b/boards/arm/qemu_cortex_m0/nrf_timer_timer.c index d89aeb0fc09..d70208960be 100644 --- a/boards/arm/qemu_cortex_m0/nrf_timer_timer.c +++ b/boards/arm/qemu_cortex_m0/nrf_timer_timer.c @@ -108,7 +108,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) ARG_UNUSED(idle); #ifdef CONFIG_TICKLESS_KERNEL - ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks; + ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks; ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0); k_spinlock_key_t key = k_spin_lock(&lock); diff --git a/drivers/can/Kconfig b/drivers/can/Kconfig index e6c473ff1da..f5b2f285f4f 100644 --- a/drivers/can/Kconfig +++ b/drivers/can/Kconfig @@ -8,6 +8,7 @@ # menuconfig CAN bool "CAN Drivers" + select LEGACY_TIMEOUT_API help Enable CAN Driver Configuration diff --git a/drivers/display/Kconfig.microbit b/drivers/display/Kconfig.microbit index 6e188874dad..a4d4a9d237d 100644 --- a/drivers/display/Kconfig.microbit +++ b/drivers/display/Kconfig.microbit @@ -8,6 +8,7 @@ config MICROBIT_DISPLAY depends on BOARD_BBC_MICROBIT depends on PRINTK depends on GPIO + select LEGACY_TIMEOUT_API help Enable this to be able to display images and text on the 5x5 LED matrix display on the BBC micro:bit. diff --git a/drivers/i2s/Kconfig b/drivers/i2s/Kconfig index 2ac144192ad..7dd99d0a8b1 100644 --- a/drivers/i2s/Kconfig +++ b/drivers/i2s/Kconfig @@ -8,6 +8,7 @@ # menuconfig I2S bool "I2S bus drivers" + select LEGACY_TIMEOUT_API help Enable support for the I2S (Inter-IC Sound) hardware bus. diff --git a/drivers/lora/Kconfig b/drivers/lora/Kconfig index 2b900c72e12..ba508715aad 100644 --- a/drivers/lora/Kconfig +++ b/drivers/lora/Kconfig @@ -9,6 +9,7 @@ menuconfig LORA bool "LoRa support" depends on NEWLIB_LIBC + select LEGACY_TIMEOUT_API help Include LoRa drivers in the system configuration. diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 52ef05146d8..bb52923bf5a 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -34,6 +34,7 @@ config SERIAL_SUPPORT_INTERRUPT config UART_ASYNC_API bool "Enable new asynchronous UART API [EXPERIMENTAL]" depends on SERIAL_SUPPORT_ASYNC + select LEGACY_TIMEOUT_API help This option enables new asynchronous UART API. diff --git a/drivers/timer/apic_timer.c b/drivers/timer/apic_timer.c index 6ede81acc89..f1866e05ec7 100644 --- a/drivers/timer/apic_timer.c +++ b/drivers/timer/apic_timer.c @@ -89,7 +89,7 @@ void z_clock_set_timeout(s32_t n, bool idle) if (n < 1) { full_ticks = 0; - } else if ((n == K_FOREVER) || (n > MAX_TICKS)) { + } else if ((n == K_TICKS_FOREVER) || (n > MAX_TICKS)) { full_ticks = MAX_TICKS - 1; } else { full_ticks = n - 1; diff --git a/drivers/timer/arcv2_timer0.c b/drivers/timer/arcv2_timer0.c index 8d300c7a924..72a46f48e1b 100644 --- a/drivers/timer/arcv2_timer0.c +++ b/drivers/timer/arcv2_timer0.c @@ -242,7 +242,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) * However for single core using 32-bits arc timer, idle cannot * be ignored, as 32-bits timer will overflow in a not-long time. */ - if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && ticks == K_FOREVER) { + if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && ticks == K_TICKS_FOREVER) { timer0_control_register_set(0); timer0_count_register_set(0); timer0_limit_register_set(0); @@ -268,7 +268,8 @@ void z_clock_set_timeout(s32_t ticks, bool idle) arch_irq_unlock(key); #endif #else - if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle && ticks == K_FOREVER) { + if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle + && ticks == K_TICKS_FOREVER) { timer0_control_register_set(0); timer0_count_register_set(0); timer0_limit_register_set(0); diff --git a/drivers/timer/arm_arch_timer.c b/drivers/timer/arm_arch_timer.c index ed9a7f056ab..d9d52a2aa21 100644 --- a/drivers/timer/arm_arch_timer.c +++ b/drivers/timer/arm_arch_timer.c @@ -68,7 +68,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) return; } - ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks; + ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks; ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0); k_spinlock_key_t key = k_spin_lock(&lock); diff --git a/drivers/timer/cavs_timer.c b/drivers/timer/cavs_timer.c index 3052a61cfb2..607e8f759f1 100644 --- a/drivers/timer/cavs_timer.c +++ b/drivers/timer/cavs_timer.c @@ -120,7 +120,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) ARG_UNUSED(idle); #ifdef CONFIG_TICKLESS_KERNEL - ticks = ticks == K_FOREVER ? MAX_TICKS : ticks; + ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks; ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0); k_spinlock_key_t key = k_spin_lock(&lock); diff --git a/drivers/timer/cc13x2_cc26x2_rtc_timer.c b/drivers/timer/cc13x2_cc26x2_rtc_timer.c index 65197c6c9d9..e542a05253c 100644 --- a/drivers/timer/cc13x2_cc26x2_rtc_timer.c +++ b/drivers/timer/cc13x2_cc26x2_rtc_timer.c @@ -207,7 +207,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) #ifdef CONFIG_TICKLESS_KERNEL - ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks; + ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks; ticks = MAX(MIN(ticks - 1, (s32_t) MAX_TICKS), 0); k_spinlock_key_t key = k_spin_lock(&lock); diff --git a/drivers/timer/cortex_m_systick.c b/drivers/timer/cortex_m_systick.c index 7a428322488..fbcef220307 100644 --- a/drivers/timer/cortex_m_systick.c +++ b/drivers/timer/cortex_m_systick.c @@ -172,7 +172,8 @@ void z_clock_set_timeout(s32_t ticks, bool idle) * the counter. (Note: we can assume if idle==true that * interrupts are already disabled) */ - if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle && ticks == K_FOREVER) { + if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle + && ticks == K_TICKS_FOREVER) { SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk; last_load = TIMER_STOPPED; return; @@ -181,7 +182,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) #if defined(CONFIG_TICKLESS_KERNEL) u32_t delay; - ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks; + ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks; ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0); k_spinlock_key_t key = k_spin_lock(&lock); diff --git a/drivers/timer/hpet.c b/drivers/timer/hpet.c index 8cccfd6474f..05aba21959d 100644 --- a/drivers/timer/hpet.c +++ b/drivers/timer/hpet.c @@ -129,12 +129,12 @@ void z_clock_set_timeout(s32_t ticks, bool idle) ARG_UNUSED(idle); #if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_QEMU_TICKLESS_WORKAROUND) - if (ticks == K_FOREVER && idle) { + if (ticks == K_TICKS_FOREVER && idle) { GENERAL_CONF_REG &= ~GCONF_ENABLE; return; } - ticks = ticks == K_FOREVER ? max_ticks : ticks; + ticks = ticks == K_TICKS_FOREVER ? max_ticks : ticks; ticks = MAX(MIN(ticks - 1, (s32_t)max_ticks), 0); k_spinlock_key_t key = k_spin_lock(&lock); diff --git a/drivers/timer/legacy_api.h b/drivers/timer/legacy_api.h index 41d7ddc8c3d..8f0c42acda9 100644 --- a/drivers/timer/legacy_api.h +++ b/drivers/timer/legacy_api.h @@ -34,7 +34,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) if (idle) { z_timer_idle_enter(ticks); } else { - z_set_time(ticks == K_FOREVER ? 0 : ticks); + z_set_time(ticks == K_TICKS_FOREVER ? 0 : ticks); } #endif } diff --git a/drivers/timer/loapic_timer.c b/drivers/timer/loapic_timer.c index fe9150345ad..bd3a11dde61 100644 --- a/drivers/timer/loapic_timer.c +++ b/drivers/timer/loapic_timer.c @@ -390,7 +390,7 @@ void z_timer_idle_enter(s32_t ticks /* system ticks */ ) { #ifdef CONFIG_TICKLESS_KERNEL - if (ticks != K_FOREVER) { + if (ticks != K_TICKS_FOREVER) { /* Need to reprogram only if current program is smaller */ if (ticks > programmed_full_ticks) { z_set_time(ticks); @@ -417,7 +417,7 @@ void z_timer_idle_enter(s32_t ticks /* system ticks */ cycles = current_count_register_get(); - if ((ticks == K_FOREVER) || (ticks > max_system_ticks)) { + if ((ticks == K_TICKS_FOREVER) || (ticks > max_system_ticks)) { /* * The number of cycles until the timer must fire next might not fit * in the 32-bit counter register. To work around this, program diff --git a/drivers/timer/mchp_xec_rtos_timer.c b/drivers/timer/mchp_xec_rtos_timer.c index 8ed2adfee48..db2a1fb2071 100644 --- a/drivers/timer/mchp_xec_rtos_timer.c +++ b/drivers/timer/mchp_xec_rtos_timer.c @@ -135,7 +135,7 @@ void z_clock_set_timeout(s32_t n, bool idle) u32_t full_cycles; /* full_ticks represented as cycles */ u32_t partial_cycles; /* number of cycles to first tick boundary */ - if (idle && (n == K_FOREVER)) { + if (idle && (n == K_TICKS_FOREVER)) { /* * We are not in a locked section. Are writes to two * global objects safe from pre-emption? @@ -147,7 +147,7 @@ void z_clock_set_timeout(s32_t n, bool idle) if (n < 1) { full_ticks = 0; - } else if ((n == K_FOREVER) || (n > MAX_TICKS)) { + } else if ((n == K_TICKS_FOREVER) || (n > MAX_TICKS)) { full_ticks = MAX_TICKS - 1; } else { full_ticks = n - 1; diff --git a/drivers/timer/native_posix_timer.c b/drivers/timer/native_posix_timer.c index 874dbd30f34..7a966968eab 100644 --- a/drivers/timer/native_posix_timer.c +++ b/drivers/timer/native_posix_timer.c @@ -90,7 +90,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) /* Note that we treat INT_MAX literally as anyhow the maximum amount of * ticks we can report with z_clock_announce() is INT_MAX */ - if (ticks == K_FOREVER) { + if (ticks == K_TICKS_FOREVER) { silent_ticks = INT64_MAX; } else if (ticks > 0) { silent_ticks = ticks - 1; diff --git a/drivers/timer/nrf_rtc_timer.c b/drivers/timer/nrf_rtc_timer.c index a3ed40d5b23..46ee3cc7738 100644 --- a/drivers/timer/nrf_rtc_timer.c +++ b/drivers/timer/nrf_rtc_timer.c @@ -117,7 +117,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) ARG_UNUSED(idle); #ifdef CONFIG_TICKLESS_KERNEL - ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks; + ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks; ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0); k_spinlock_key_t key = k_spin_lock(&lock); diff --git a/drivers/timer/riscv_machine_timer.c b/drivers/timer/riscv_machine_timer.c index 06c735c7c04..2d0e574f349 100644 --- a/drivers/timer/riscv_machine_timer.c +++ b/drivers/timer/riscv_machine_timer.c @@ -104,7 +104,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) return; } - ticks = ticks == K_FOREVER ? MAX_TICKS : ticks; + ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks; ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0); k_spinlock_key_t key = k_spin_lock(&lock); diff --git a/drivers/timer/xlnx_psttc_timer.c b/drivers/timer/xlnx_psttc_timer.c index 6bd8ad67437..c80245781a0 100644 --- a/drivers/timer/xlnx_psttc_timer.c +++ b/drivers/timer/xlnx_psttc_timer.c @@ -161,7 +161,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) cycles = read_count(); /* Calculate timeout counter value */ - if (ticks == K_FOREVER) { + if (ticks == K_TICKS_FOREVER) { next_cycles = cycles + CYCLES_NEXT_MAX; } else { next_cycles = cycles + ((u32_t)ticks * CYCLES_PER_TICK); diff --git a/drivers/timer/xtensa_sys_timer.c b/drivers/timer/xtensa_sys_timer.c index ddc609ee63a..783d11a9a10 100644 --- a/drivers/timer/xtensa_sys_timer.c +++ b/drivers/timer/xtensa_sys_timer.c @@ -71,7 +71,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle) ARG_UNUSED(idle); #if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_QEMU_TICKLESS_WORKAROUND) - ticks = ticks == K_FOREVER ? MAX_TICKS : ticks; + ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks; ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0); k_spinlock_key_t key = k_spin_lock(&lock); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 0f35edb3d9f..384876679f5 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -8,6 +8,7 @@ # menuconfig VIDEO bool "VIDEO hardware support" + select LEGACY_TIMEOUT_API help Enable support for the VIDEO. diff --git a/include/drivers/timer/system_timer.h b/include/drivers/timer/system_timer.h index 9c3c416ee55..f3c4dc9a8ec 100644 --- a/include/drivers/timer/system_timer.h +++ b/include/drivers/timer/system_timer.h @@ -59,7 +59,7 @@ extern int z_clock_device_ctrl(struct device *device, u32_t ctrl_command, * treated identically: it simply indicates the kernel would like the * next tick announcement as soon as possible. * - * Note that ticks can also be passed the special value K_FOREVER, + * Note that ticks can also be passed the special value K_TICKS_FOREVER, * indicating that no future timer interrupts are expected or required * and that the system is permitted to enter an indefinite sleep even * if this could cause rollover of the internal counter (i.e. the diff --git a/include/kernel.h b/include/kernel.h index d5a266d4b14..94bf67313c7 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -810,7 +810,7 @@ extern void k_thread_foreach_unlocked( * @param p3 3rd entry point parameter. * @param prio Thread priority. * @param options Thread options. - * @param delay Scheduling delay (in milliseconds), or K_NO_WAIT (for no delay). + * @param delay Scheduling delay, or K_NO_WAIT (for no delay). * * @return ID of new thread. * @@ -821,7 +821,7 @@ __syscall k_tid_t k_thread_create(struct k_thread *new_thread, size_t stack_size, k_thread_entry_t entry, void *p1, void *p2, void *p3, - int prio, u32_t options, s32_t delay); + int prio, u32_t options, k_timeout_t delay); /** * @brief Drop a thread's privileges permanently to user mode @@ -926,15 +926,27 @@ void k_thread_system_pool_assign(struct k_thread *thread); * This API may only be called from ISRs with a K_NO_WAIT timeout. * * @param thread Thread to wait to exit - * @param timeout non-negative upper bound time in ms to wait for the thread - * to exit. + * @param timeout upper bound time to wait for the thread to exit. * @retval 0 success, target thread has exited or wasn't running * @retval -EBUSY returned without waiting * @retval -EAGAIN waiting period timed out * @retval -EDEADLK target thread is joining on the caller, or target thread * is the caller */ -__syscall int k_thread_join(struct k_thread *thread, s32_t timeout); +__syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout); + +/** + * @brief Put the current thread to sleep. + * + * This routine puts the current thread to sleep for @a duration, + * specified as a k_timeout_t object. + * + * @param timeout Desired duration of sleep. + * + * @return Zero if the requested time has elapsed or the number of milliseconds + * left to sleep, if thread was woken up by \ref k_wakeup call. + */ +__syscall s32_t k_sleep(k_timeout_t timeout); /** * @brief Put the current thread to sleep. @@ -946,7 +958,10 @@ __syscall int k_thread_join(struct k_thread *thread, s32_t timeout); * @return Zero if the requested time has elapsed or the number of milliseconds * left to sleep, if thread was woken up by \ref k_wakeup call. */ -__syscall s32_t k_sleep(s32_t ms); +static inline s32_t k_msleep(s32_t ms) +{ + return k_sleep(Z_TIMEOUT_MS(ms)); +} /** * @brief Put the current thread to sleep with microsecond resolution. @@ -1531,7 +1546,7 @@ const char *k_thread_state_str(k_tid_t thread_id); * * @return Timeout delay value. */ -#define K_NO_WAIT 0 +#define K_NO_WAIT Z_TIMEOUT_NO_WAIT /** * @brief Generate timeout delay from milliseconds. @@ -1543,7 +1558,7 @@ const char *k_thread_state_str(k_tid_t thread_id); * * @return Timeout delay value. */ -#define K_MSEC(ms) (ms) +#define K_MSEC(ms) Z_TIMEOUT_MS(ms) /** * @brief Generate timeout delay from seconds. @@ -1589,7 +1604,7 @@ const char *k_thread_state_str(k_tid_t thread_id); * * @return Timeout delay value. */ -#define K_FOREVER (-1) +#define K_FOREVER Z_FOREVER /** * @} @@ -1617,7 +1632,7 @@ struct k_timer { void (*stop_fn)(struct k_timer *timer); /* timer period */ - s32_t period; + k_timeout_t period; /* timer status */ u32_t status; @@ -1639,7 +1654,6 @@ struct k_timer { .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \ .expiry_fn = expiry, \ .stop_fn = stop, \ - .period = 0, \ .status = 0, \ .user_data = 0, \ _OBJECT_TRACING_INIT \ @@ -1727,13 +1741,13 @@ extern void k_timer_init(struct k_timer *timer, * using the new duration and period values. * * @param timer Address of timer. - * @param duration Initial timer duration (in milliseconds). - * @param period Timer period (in milliseconds). + * @param duration Initial timer duration. + * @param period Timer period. * * @return N/A */ __syscall void k_timer_start(struct k_timer *timer, - s32_t duration, s32_t period); + k_timeout_t duration, k_timeout_t period); /** * @brief Stop a timer. @@ -2189,14 +2203,14 @@ extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list); * @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT. * * @param queue Address of the queue. - * @param timeout Non-negative waiting period to obtain a data item (in - * milliseconds), or one of the special values K_NO_WAIT and + * @param timeout Non-negative waiting period to obtain a data item + * or one of the special values K_NO_WAIT and * K_FOREVER. * * @return Address of the data item if successful; NULL if returned * without waiting, or waiting period timed out. */ -__syscall void *k_queue_get(struct k_queue *queue, s32_t timeout); +__syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout); /** * @brief Remove an element from a queue. @@ -2358,7 +2372,7 @@ struct z_futex_data { * @param futex Address of the futex. * @param expected Expected value of the futex, if it is different the caller * will not wait on it. - * @param timeout Non-negative waiting period on the futex, in milliseconds, or + * @param timeout Non-negative waiting period on the futex, or * one of the special values K_NO_WAIT or K_FOREVER. * @retval -EACCES Caller does not have read access to futex address. * @retval -EAGAIN If the futex value did not match the expected parameter. @@ -2368,7 +2382,8 @@ struct z_futex_data { * should check the futex's value on wakeup to determine if it needs * to block again. */ -__syscall int k_futex_wait(struct k_futex *futex, int expected, s32_t timeout); +__syscall int k_futex_wait(struct k_futex *futex, int expected, + k_timeout_t timeout); /** * @brief Wake one/all threads pending on a futex @@ -2529,7 +2544,7 @@ struct k_fifo { * @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT. * * @param fifo Address of the FIFO queue. - * @param timeout Waiting period to obtain a data item (in milliseconds), + * @param timeout Waiting period to obtain a data item, * or one of the special values K_NO_WAIT and K_FOREVER. * * @return Address of the data item if successful; NULL if returned @@ -2689,7 +2704,7 @@ struct k_lifo { * @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT. * * @param lifo Address of the LIFO queue. - * @param timeout Waiting period to obtain a data item (in milliseconds), + * @param timeout Waiting period to obtain a data item, * or one of the special values K_NO_WAIT and K_FOREVER. * * @return Address of the data item if successful; NULL if returned @@ -2827,8 +2842,8 @@ __syscall int k_stack_push(struct k_stack *stack, stack_data_t data); * * @param stack Address of the stack. * @param data Address of area to hold the value popped from the stack. - * @param timeout Non-negative waiting period to obtain a value (in - * milliseconds), or one of the special values K_NO_WAIT and + * @param timeout Waiting period to obtain a value, + * or one of the special values K_NO_WAIT and * K_FOREVER. * * @retval 0 Element popped from stack. @@ -2837,7 +2852,7 @@ __syscall int k_stack_push(struct k_stack *stack, stack_data_t data); * @req K-STACK-001 */ __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data, - s32_t timeout); + k_timeout_t timeout); /** * @brief Statically define and initialize a stack @@ -3142,8 +3157,7 @@ extern void k_delayed_work_init(struct k_delayed_work *work, * * @param work_q Address of workqueue. * @param work Address of delayed work item. - * @param delay Non-negative delay before submitting the work item (in - * milliseconds). + * @param delay Delay before submitting the work item * * @retval 0 Work item countdown started. * @retval -EINVAL Work item is being processed or has completed its work. @@ -3152,7 +3166,7 @@ extern void k_delayed_work_init(struct k_delayed_work *work, */ extern int k_delayed_work_submit_to_queue(struct k_work_q *work_q, struct k_delayed_work *work, - s32_t delay); + k_timeout_t delay); /** * @brief Cancel a delayed work item. @@ -3228,8 +3242,7 @@ static inline void k_work_submit(struct k_work *work) * @note Can be called by ISRs. * * @param work Address of delayed work item. - * @param delay Non-negative delay before submitting the work item (in - * milliseconds). + * @param delay Delay before submitting the work item * * @retval 0 Work item countdown started. * @retval -EINVAL Work item is being processed or has completed its work. @@ -3237,7 +3250,7 @@ static inline void k_work_submit(struct k_work *work) * @req K-DWORK-001 */ static inline int k_delayed_work_submit(struct k_delayed_work *work, - s32_t delay) + k_timeout_t delay) { return k_delayed_work_submit_to_queue(&k_sys_work_q, work, delay); } @@ -3299,7 +3312,7 @@ extern void k_work_poll_init(struct k_work_poll *work, * @param work Address of delayed work item. * @param events An array of pointers to events which trigger the work. * @param num_events The number of events in the array. - * @param timeout Non-negative timeout after which the work will be scheduled + * @param timeout Timeout after which the work will be scheduled * for execution even if not triggered. * * @@ -3311,7 +3324,7 @@ extern int k_work_poll_submit_to_queue(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, - s32_t timeout); + k_timeout_t timeout); /** * @brief Submit a triggered work item to the system workqueue. @@ -3337,7 +3350,7 @@ extern int k_work_poll_submit_to_queue(struct k_work_q *work_q, * @param work Address of delayed work item. * @param events An array of pointers to events which trigger the work. * @param num_events The number of events in the array. - * @param timeout Non-negative timeout after which the work will be scheduled + * @param timeout Timeout after which the work will be scheduled * for execution even if not triggered. * * @retval 0 Work item started watching for events. @@ -3347,7 +3360,7 @@ extern int k_work_poll_submit_to_queue(struct k_work_q *work_q, static inline int k_work_poll_submit(struct k_work_poll *work, struct k_poll_event *events, int num_events, - s32_t timeout) + k_timeout_t timeout) { return k_work_poll_submit_to_queue(&k_sys_work_q, work, events, num_events, timeout); @@ -3455,8 +3468,8 @@ __syscall int k_mutex_init(struct k_mutex *mutex); * completes immediately and the lock count is increased by 1. * * @param mutex Address of the mutex. - * @param timeout Non-negative waiting period to lock the mutex (in - * milliseconds), or one of the special values K_NO_WAIT and + * @param timeout Waiting period to lock the mutex, + * or one of the special values K_NO_WAIT and * K_FOREVER. * * @retval 0 Mutex locked. @@ -3464,7 +3477,7 @@ __syscall int k_mutex_init(struct k_mutex *mutex); * @retval -EAGAIN Waiting period timed out. * @req K-MUTEX-002 */ -__syscall int k_mutex_lock(struct k_mutex *mutex, s32_t timeout); +__syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout); /** * @brief Unlock a mutex. @@ -3550,16 +3563,15 @@ __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count, * @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT. * * @param sem Address of the semaphore. - * @param timeout Non-negative waiting period to take the semaphore (in - * milliseconds), or one of the special values K_NO_WAIT and - * K_FOREVER. + * @param timeout Waiting period to take the semaphore, + * or one of the special values K_NO_WAIT and K_FOREVER. * * @retval 0 Semaphore taken. * @retval -EBUSY Returned without waiting. * @retval -EAGAIN Waiting period timed out. * @req K-SEM-001 */ -__syscall int k_sem_take(struct k_sem *sem, s32_t timeout); +__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout); /** * @brief Give a semaphore. @@ -3803,8 +3815,8 @@ int k_msgq_cleanup(struct k_msgq *msgq); * * @param msgq Address of the message queue. * @param data Pointer to the message. - * @param timeout Non-negative waiting period to add the message (in - * milliseconds), or one of the special values K_NO_WAIT and + * @param timeout Non-negative waiting period to add the message, + * or one of the special values K_NO_WAIT and * K_FOREVER. * * @retval 0 Message sent. @@ -3812,7 +3824,7 @@ int k_msgq_cleanup(struct k_msgq *msgq); * @retval -EAGAIN Waiting period timed out. * @req K-MSGQ-002 */ -__syscall int k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout); +__syscall int k_msgq_put(struct k_msgq *msgq, void *data, k_timeout_t timeout); /** * @brief Receive a message from a message queue. @@ -3824,8 +3836,8 @@ __syscall int k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout); * * @param msgq Address of the message queue. * @param data Address of area to hold the received message. - * @param timeout Non-negative waiting period to receive the message (in - * milliseconds), or one of the special values K_NO_WAIT and + * @param timeout Waiting period to receive the message, + * or one of the special values K_NO_WAIT and * K_FOREVER. * * @retval 0 Message received. @@ -3833,7 +3845,7 @@ __syscall int k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout); * @retval -EAGAIN Waiting period timed out. * @req K-MSGQ-002 */ -__syscall int k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout); +__syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout); /** * @brief Peek/read a message from a message queue. @@ -4042,8 +4054,8 @@ extern void k_mbox_init(struct k_mbox *mbox); * * @param mbox Address of the mailbox. * @param tx_msg Address of the transmit message descriptor. - * @param timeout Non-negative waiting period for the message to be received (in - * milliseconds), or one of the special values K_NO_WAIT + * @param timeout Waiting period for the message to be received, + * or one of the special values K_NO_WAIT * and K_FOREVER. Once the message has been received, * this routine waits as long as necessary for the message * to be completely processed. @@ -4054,7 +4066,7 @@ extern void k_mbox_init(struct k_mbox *mbox); * @req K-MBOX-002 */ extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, - s32_t timeout); + k_timeout_t timeout); /** * @brief Send a mailbox message in an asynchronous manner. @@ -4085,9 +4097,8 @@ extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, * @param rx_msg Address of the receive message descriptor. * @param buffer Address of the buffer to receive data, or NULL to defer data * retrieval and message disposal until later. - * @param timeout Non-negative waiting period for a message to be received (in - * milliseconds), or one of the special values K_NO_WAIT - * and K_FOREVER. + * @param timeout Waiting period for a message to be received, + * or one of the special values K_NO_WAIT and K_FOREVER. * * @retval 0 Message received. * @retval -ENOMSG Returned without waiting. @@ -4095,7 +4106,7 @@ extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, * @req K-MBOX-002 */ extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, - void *buffer, s32_t timeout); + void *buffer, k_timeout_t timeout); /** * @brief Retrieve mailbox message data into a buffer. @@ -4137,8 +4148,8 @@ extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer); * @param rx_msg Address of a receive message descriptor. * @param pool Address of memory pool, or NULL to discard data. * @param block Address of the area to hold memory pool block info. - * @param timeout Non-negative waiting period to wait for a memory pool block - * (in milliseconds), or one of the special values K_NO_WAIT + * @param timeout Time to wait for a memory pool block, + * or one of the special values K_NO_WAIT * and K_FOREVER. * * @retval 0 Data retrieved. @@ -4148,7 +4159,8 @@ extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer); */ extern int k_mbox_data_block_get(struct k_mbox_msg *rx_msg, struct k_mem_pool *pool, - struct k_mem_block *block, s32_t timeout); + struct k_mem_block *block, + k_timeout_t timeout); /** @} */ @@ -4282,9 +4294,8 @@ __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size); * @param bytes_to_write Size of data (in bytes). * @param bytes_written Address of area to hold the number of bytes written. * @param min_xfer Minimum number of bytes to write. - * @param timeout Non-negative waiting period to wait for the data to be written - * (in milliseconds), or one of the special values K_NO_WAIT - * and K_FOREVER. + * @param timeout Waiting period to wait for the data to be written, + * or one of the special values K_NO_WAIT and K_FOREVER. * * @retval 0 At least @a min_xfer bytes of data were written. * @retval -EIO Returned without waiting; zero data bytes were written. @@ -4294,7 +4305,7 @@ __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size); */ __syscall int k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write, size_t *bytes_written, - size_t min_xfer, s32_t timeout); + size_t min_xfer, k_timeout_t timeout); /** * @brief Read data from a pipe. @@ -4306,9 +4317,8 @@ __syscall int k_pipe_put(struct k_pipe *pipe, void *data, * @param bytes_to_read Maximum number of data bytes to read. * @param bytes_read Address of area to hold the number of bytes read. * @param min_xfer Minimum number of data bytes to read. - * @param timeout Non-negative waiting period to wait for the data to be read - * (in milliseconds), or one of the special values K_NO_WAIT - * and K_FOREVER. + * @param timeout Waiting period to wait for the data to be read, + * or one of the special values K_NO_WAIT and K_FOREVER. * * @retval 0 At least @a min_xfer bytes of data were read. * @retval -EINVAL invalid parameters supplied @@ -4319,7 +4329,7 @@ __syscall int k_pipe_put(struct k_pipe *pipe, void *data, */ __syscall int k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, size_t *bytes_read, - size_t min_xfer, s32_t timeout); + size_t min_xfer, k_timeout_t timeout); /** * @brief Write memory block to a pipe. @@ -4441,8 +4451,8 @@ extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer, * * @param slab Address of the memory slab. * @param mem Pointer to block address area. - * @param timeout Non-negative waiting period to wait for operation to complete - * (in milliseconds). Use K_NO_WAIT to return without waiting, + * @param timeout Non-negative waiting period to wait for operation to complete. + * Use K_NO_WAIT to return without waiting, * or K_FOREVER to wait as long as necessary. * * @retval 0 Memory allocated. The block address area pointed at by @a mem @@ -4453,7 +4463,7 @@ extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer, * @req K-MSLAB-002 */ extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, - s32_t timeout); + k_timeout_t timeout); /** * @brief Free memory allocated from a memory slab. @@ -4565,8 +4575,8 @@ struct k_mem_pool { * @param pool Address of the memory pool. * @param block Pointer to block descriptor for the allocated memory. * @param size Amount of memory to allocate (in bytes). - * @param timeout Non-negative waiting period to wait for operation to complete - * (in milliseconds). Use K_NO_WAIT to return without waiting, + * @param timeout Waiting period to wait for operation to complete. + * Use K_NO_WAIT to return without waiting, * or K_FOREVER to wait as long as necessary. * * @retval 0 Memory allocated. The @a data field of the block descriptor @@ -4576,7 +4586,7 @@ struct k_mem_pool { * @req K-MPOOL-002 */ extern int k_mem_pool_alloc(struct k_mem_pool *pool, struct k_mem_block *block, - size_t size, s32_t timeout); + size_t size, k_timeout_t timeout); /** * @brief Allocate memory from a memory pool with malloc() semantics @@ -4890,9 +4900,8 @@ extern void k_poll_event_init(struct k_poll_event *event, u32_t type, * * @param events An array of pointers to events to be polled for. * @param num_events The number of events in the array. - * @param timeout Non-negative waiting period for an event to be ready (in - * milliseconds), or one of the special values K_NO_WAIT and - * K_FOREVER. + * @param timeout Waiting period for an event to be ready, + * or one of the special values K_NO_WAIT and K_FOREVER. * * @retval 0 One or more events are ready. * @retval -EAGAIN Waiting period timed out. @@ -4907,7 +4916,7 @@ extern void k_poll_event_init(struct k_poll_event *event, u32_t type, */ __syscall int k_poll(struct k_poll_event *events, int num_events, - s32_t timeout); + k_timeout_t timeout); /** * @brief Initialize a poll signal object. diff --git a/include/sys/mutex.h b/include/sys/mutex.h index 9d97a419903..d5d6913a254 100644 --- a/include/sys/mutex.h +++ b/include/sys/mutex.h @@ -19,6 +19,7 @@ #ifdef CONFIG_USERSPACE #include #include +#include struct sys_mutex { /* Currently unused, but will be used to store state for fast mutexes @@ -54,7 +55,8 @@ static inline void sys_mutex_init(struct sys_mutex *mutex) */ } -__syscall int z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout); +__syscall int z_sys_mutex_kernel_lock(struct sys_mutex *mutex, + k_timeout_t timeout); __syscall int z_sys_mutex_kernel_unlock(struct sys_mutex *mutex); @@ -69,7 +71,7 @@ __syscall int z_sys_mutex_kernel_unlock(struct sys_mutex *mutex); * completes immediately and the lock count is increased by 1. * * @param mutex Address of the mutex, which may reside in user memory - * @param timeout Waiting period to lock the mutex (in milliseconds), + * @param timeout Waiting period to lock the mutex, * or one of the special values K_NO_WAIT and K_FOREVER. * * @retval 0 Mutex locked. @@ -78,7 +80,7 @@ __syscall int z_sys_mutex_kernel_unlock(struct sys_mutex *mutex); * @retval -EACCESS Caller has no access to provided mutex address * @retval -EINVAL Provided mutex not recognized by the kernel */ -static inline int sys_mutex_lock(struct sys_mutex *mutex, s32_t timeout) +static inline int sys_mutex_lock(struct sys_mutex *mutex, k_timeout_t timeout) { /* For now, make the syscall unconditionally */ return z_sys_mutex_kernel_lock(mutex, timeout); @@ -126,7 +128,7 @@ static inline void sys_mutex_init(struct sys_mutex *mutex) k_mutex_init(&mutex->kernel_mutex); } -static inline int sys_mutex_lock(struct sys_mutex *mutex, s32_t timeout) +static inline int sys_mutex_lock(struct sys_mutex *mutex, k_timeout_t timeout) { return k_mutex_lock(&mutex->kernel_mutex, timeout); } diff --git a/include/sys/sem.h b/include/sys/sem.h index 1e02a1f0a60..7aea4944de9 100644 --- a/include/sys/sem.h +++ b/include/sys/sem.h @@ -110,7 +110,7 @@ int sys_sem_give(struct sys_sem *sem); * This routine takes @a sem. * * @param sem Address of the sys_sem. - * @param timeout Waiting period to take the sys_sem (in milliseconds), + * @param timeout Waiting period to take the sys_sem, * or one of the special values K_NO_WAIT and K_FOREVER. * * @retval 0 sys_sem taken. @@ -118,7 +118,7 @@ int sys_sem_give(struct sys_sem *sem); * @retval -ETIMEDOUT Waiting period timed out. * @retval -EACCES Caller does not have enough access. */ -int sys_sem_take(struct sys_sem *sem, s32_t timeout); +int sys_sem_take(struct sys_sem *sem, k_timeout_t timeout); /** * @brief Get sys_sem's value diff --git a/include/sys_clock.h b/include/sys_clock.h index 39f749e219b..123ad8235fc 100644 --- a/include/sys_clock.h +++ b/include/sys_clock.h @@ -28,6 +28,59 @@ extern "C" { #endif +/** + * @addtogroup clock_apis + * @{ + */ + +typedef u32_t k_ticks_t; + +#define K_TICKS_FOREVER ((k_ticks_t) -1) + +#ifndef CONFIG_LEGACY_TIMEOUT_API + +typedef struct { + k_ticks_t ticks; +} k_timeout_t; + +/** + * @brief Compare timeouts for equality + * + * The k_timeout_t object is an opaque struct that should not be + * inspected by application code. This macro exists so that users can + * test timeout objects for equality with known constants + * (e.g. K_NO_WAIT and K_FOREVER) when implementing their own APIs in + * terms of Zephyr timeout constants. + * + * @return True if the timeout objects are identical + */ +#define K_TIMEOUT_EQ(a, b) ((a).ticks == (b).ticks) + +#define Z_TIMEOUT_NO_WAIT ((k_timeout_t) {}) +#define Z_TIMEOUT_TICKS(t) ((k_timeout_t) { .ticks = (t) }) +#define Z_FOREVER Z_TIMEOUT_TICKS(K_TICKS_FOREVER) +#define Z_TIMEOUT_MS(t) Z_TIMEOUT_TICKS(k_ms_to_ticks_ceil32(MAX(t, 0))) +#define Z_TIMEOUT_US(t) Z_TIMEOUT_TICKS(k_us_to_ticks_ceil32(MAX(t, 0))) +#define Z_TIMEOUT_NS(t) Z_TIMEOUT_TICKS(k_ns_to_ticks_ceil32(MAX(t, 0))) +#define Z_TIMEOUT_CYC(t) Z_TIMEOUT_TICKS(k_cyc_to_ticks_ceil32(MAX(t, 0))) + +#else + +/* Legacy timeout API */ +typedef s32_t k_timeout_t; +#define K_TIMEOUT_EQ(a, b) ((a) == (b)) +#define Z_TIMEOUT_NO_WAIT 0 +#define Z_TIMEOUT_TICKS(t) k_ticks_to_ms_ceil32(t) +#define Z_FOREVER K_TICKS_FOREVER +#define Z_TIMEOUT_MS(t) (t) +#define Z_TIMEOUT_US(t) ((t) * 1000) +#define Z_TIMEOUT_NS(t) ((t) * 1000000) +#define Z_TIMEOUT_CYC(t) k_cyc_to_ms_ceil32(MAX((t), 0)) + +#endif + +/** @} */ + #ifdef CONFIG_TICKLESS_KERNEL extern int _sys_clock_always_on; extern void z_enable_sys_clock(void); @@ -53,8 +106,6 @@ extern void z_enable_sys_clock(void); /* number of nanoseconds per second */ #define NSEC_PER_SEC ((NSEC_PER_USEC) * (USEC_PER_MSEC) * (MSEC_PER_SEC)) -#define k_msleep(ms) k_sleep(ms) -#define K_TIMEOUT_EQ(a, b) ((a) == (b)) /* kernel clocks */ @@ -132,6 +183,8 @@ s64_t z_tick_get(void); #define z_tick_get_32() (0) #endif +u64_t z_timeout_end_calc(k_timeout_t timeout); + /* timeouts */ struct _timeout; diff --git a/include/timeout_q.h b/include/timeout_q.h index 0615bf181d1..061be6d8999 100644 --- a/include/timeout_q.h +++ b/include/timeout_q.h @@ -27,7 +27,8 @@ static inline void z_init_timeout(struct _timeout *t) sys_dnode_init(&t->node); } -void z_add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks); +void z_add_timeout(struct _timeout *to, _timeout_func_t fn, + k_timeout_t timeout); int z_abort_timeout(struct _timeout *to); @@ -43,7 +44,7 @@ static inline void z_init_thread_timeout(struct _thread_base *thread_base) extern void z_thread_timeout(struct _timeout *to); -static inline void z_add_thread_timeout(struct k_thread *th, s32_t ticks) +static inline void z_add_thread_timeout(struct k_thread *th, k_timeout_t ticks) { z_add_timeout(&th->base.timeout, z_thread_timeout, ticks); } @@ -63,12 +64,17 @@ s32_t z_timeout_remaining(struct _timeout *timeout); /* Stubs when !CONFIG_SYS_CLOCK_EXISTS */ #define z_init_thread_timeout(t) do {} while (false) -#define z_add_thread_timeout(th, to) do {} while (false && to && (void *)th) #define z_abort_thread_timeout(t) (0) #define z_is_inactive_timeout(t) 0 -#define z_get_next_timeout_expiry() (K_FOREVER) +#define z_get_next_timeout_expiry() (K_TICKS_FOREVER) #define z_set_timeout_expiry(t, i) do {} while (false) +static inline void z_add_thread_timeout(struct k_thread *th, k_timeout_t ticks) +{ + ARG_UNUSED(th); + ARG_UNUSED(ticks); +} + #endif #ifdef __cplusplus diff --git a/kernel/Kconfig b/kernel/Kconfig index 4e9e749b980..90125a6f8b5 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -570,6 +570,15 @@ config SYS_CLOCK_EXISTS this is disabled. Obviously timeout-related APIs will not work. +config LEGACY_TIMEOUT_API + bool "Support legacy k_timeout_t API" + help + The k_timeout_t API has changed to become an opaque type + that must be initialized with macros. Older applications + can choose this to continue using the old style of timeouts + (which were s32_t counts of milliseconds), at the cost of + not being able to use new features. + config XIP bool "Execute in place" help diff --git a/kernel/futex.c b/kernel/futex.c index 3ec3b05d2f6..c52f90d655d 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -62,7 +62,8 @@ static inline int z_vrfy_k_futex_wake(struct k_futex *futex, bool wake_all) } #include -int z_impl_k_futex_wait(struct k_futex *futex, int expected, s32_t timeout) +int z_impl_k_futex_wait(struct k_futex *futex, int expected, + k_timeout_t timeout) { int ret; k_spinlock_key_t key; @@ -90,7 +91,7 @@ int z_impl_k_futex_wait(struct k_futex *futex, int expected, s32_t timeout) } static inline int z_vrfy_k_futex_wait(struct k_futex *futex, int expected, - s32_t timeout) + k_timeout_t timeout) { if (Z_SYSCALL_MEMORY_WRITE(futex, sizeof(struct k_futex)) != 0) { return -EACCES; diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 62a4c0c8bca..4a88befbf66 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -42,9 +42,10 @@ void z_remove_thread_from_ready_q(struct k_thread *thread); int z_is_thread_time_slicing(struct k_thread *thread); void z_unpend_thread_no_timeout(struct k_thread *thread); int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, - _wait_q_t *wait_q, s32_t timeout); -int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout); -void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout); + _wait_q_t *wait_q, k_timeout_t timeout); +int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, k_timeout_t timeout); +void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, + k_timeout_t timeout); void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key); void z_reschedule_irqlock(u32_t key); struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q); @@ -63,7 +64,7 @@ void z_sched_ipi(void); void z_sched_start(struct k_thread *thread); void z_ready_thread(struct k_thread *thread); -static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout) +static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, k_timeout_t timeout) { (void) z_pend_curr_irqlock(arch_irq_lock(), wait_q, timeout); } diff --git a/kernel/mailbox.c b/kernel/mailbox.c index d72790459d8..cd9016feb9e 100644 --- a/kernel/mailbox.c +++ b/kernel/mailbox.c @@ -233,7 +233,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg) * @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out */ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, - s32_t timeout) + k_timeout_t timeout) { struct k_thread *sending_thread; struct k_thread *receiving_thread; @@ -286,7 +286,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, } /* didn't find a matching receiver: don't wait for one */ - if (timeout == K_NO_WAIT) { + if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { k_spin_unlock(&mbox->lock, key); return -ENOMSG; } @@ -304,7 +304,8 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, return z_pend_curr(&mbox->lock, key, &mbox->tx_msg_queue, timeout); } -int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, s32_t timeout) +int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, + k_timeout_t timeout) { /* configure things for a synchronous send, then send the message */ tx_msg->_syncing_thread = _current; @@ -351,7 +352,7 @@ void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer) } int k_mbox_data_block_get(struct k_mbox_msg *rx_msg, struct k_mem_pool *pool, - struct k_mem_block *block, s32_t timeout) + struct k_mem_block *block, k_timeout_t timeout) { int result; @@ -416,7 +417,7 @@ static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer) } int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, - s32_t timeout) + k_timeout_t timeout) { struct k_thread *sending_thread; struct k_mbox_msg *tx_msg; @@ -445,7 +446,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, /* didn't find a matching sender */ - if (timeout == K_NO_WAIT) { + if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { /* don't wait for a matching sender to appear */ k_spin_unlock(&mbox->lock, key); return -ENOMSG; diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c index df83d6402f5..6c4dc807dd5 100644 --- a/kernel/mem_slab.c +++ b/kernel/mem_slab.c @@ -101,7 +101,7 @@ out: return rc; } -int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout) +int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) { k_spinlock_key_t key = k_spin_lock(&lock); int result; @@ -112,7 +112,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout) slab->free_list = *(char **)(slab->free_list); slab->num_used++; result = 0; - } else if (timeout == K_NO_WAIT) { + } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { /* don't wait for a free block to become available */ *mem = NULL; result = -ENOMEM; diff --git a/kernel/mempool.c b/kernel/mempool.c index 85f0fede050..49df124506e 100644 --- a/kernel/mempool.c +++ b/kernel/mempool.c @@ -47,16 +47,14 @@ int init_static_pools(struct device *unused) SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, - size_t size, s32_t timeout) + size_t size, k_timeout_t timeout) { int ret; - s64_t end = 0; + u64_t end = 0; - __ASSERT(!(arch_is_in_isr() && timeout != K_NO_WAIT), ""); + __ASSERT(!(arch_is_in_isr() && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)), ""); - if (timeout > 0) { - end = k_uptime_get() + timeout; - } + end = z_timeout_end_calc(timeout); while (true) { u32_t level_num, block_num; @@ -68,18 +66,20 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, block->id.level = level_num; block->id.block = block_num; - if (ret == 0 || timeout == K_NO_WAIT || + if (ret == 0 || K_TIMEOUT_EQ(timeout, K_NO_WAIT) || ret != -ENOMEM) { return ret; } z_pend_curr_unlocked(&p->wait_q, timeout); - if (timeout != K_FOREVER) { - timeout = end - k_uptime_get(); - if (timeout <= 0) { + if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) { + s64_t remaining = end - z_tick_get(); + + if (remaining <= 0) { break; } + timeout = Z_TIMEOUT_TICKS(remaining); } } diff --git a/kernel/msg_q.c b/kernel/msg_q.c index 1d172f51104..f7351f28ea7 100644 --- a/kernel/msg_q.c +++ b/kernel/msg_q.c @@ -113,9 +113,9 @@ int k_msgq_cleanup(struct k_msgq *msgq) } -int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout) +int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, k_timeout_t timeout) { - __ASSERT(!arch_is_in_isr() || timeout == K_NO_WAIT, ""); + __ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), ""); struct k_thread *pending_thread; k_spinlock_key_t key; @@ -145,7 +145,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout) msgq->used_msgs++; } result = 0; - } else if (timeout == K_NO_WAIT) { + } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { /* don't wait for message space to become available */ result = -ENOMSG; } else { @@ -160,7 +160,8 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout) } #ifdef CONFIG_USERSPACE -static inline int z_vrfy_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout) +static inline int z_vrfy_k_msgq_put(struct k_msgq *q, void *data, + k_timeout_t timeout) { Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ)); Z_OOPS(Z_SYSCALL_MEMORY_READ(data, q->msg_size)); @@ -188,9 +189,9 @@ static inline void z_vrfy_k_msgq_get_attrs(struct k_msgq *q, #include #endif -int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout) +int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout) { - __ASSERT(!arch_is_in_isr() || timeout == K_NO_WAIT, ""); + __ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), ""); k_spinlock_key_t key; struct k_thread *pending_thread; @@ -226,7 +227,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout) return 0; } result = 0; - } else if (timeout == K_NO_WAIT) { + } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { /* don't wait for a message to become available */ result = -ENOMSG; } else { @@ -241,7 +242,8 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout) } #ifdef CONFIG_USERSPACE -static inline int z_vrfy_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout) +static inline int z_vrfy_k_msgq_get(struct k_msgq *q, void *data, + k_timeout_t timeout) { Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ)); Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, q->msg_size)); diff --git a/kernel/mutex.c b/kernel/mutex.c index fb40b87315b..b22c9ae76cf 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -116,7 +116,7 @@ static bool adjust_owner_prio(struct k_mutex *mutex, s32_t new_prio) return false; } -int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) +int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) { int new_prio; k_spinlock_key_t key; @@ -144,7 +144,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) return 0; } - if (unlikely(timeout == (s32_t)K_NO_WAIT)) { + if (unlikely(K_TIMEOUT_EQ(timeout, K_NO_WAIT))) { k_spin_unlock(&lock, key); sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK); return -EBUSY; @@ -198,7 +198,8 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) } #ifdef CONFIG_USERSPACE -static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) +static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex, + k_timeout_t timeout) { Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX)); return z_impl_k_mutex_lock(mutex, timeout); diff --git a/kernel/pipes.c b/kernel/pipes.c index f315b435481..efd2855c5ea 100644 --- a/kernel/pipes.c +++ b/kernel/pipes.c @@ -318,13 +318,13 @@ static bool pipe_xfer_prepare(sys_dlist_t *xfer_list, size_t pipe_space, size_t bytes_to_xfer, size_t min_xfer, - s32_t timeout) + k_timeout_t timeout) { struct k_thread *thread; struct k_pipe_desc *desc; size_t num_bytes = 0; - if (timeout == K_NO_WAIT) { + if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { _WAIT_Q_FOR_EACH(wait_q, thread) { desc = (struct k_pipe_desc *)thread->base.swap_data; @@ -429,7 +429,7 @@ static void pipe_thread_ready(struct k_thread *thread) int z_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, unsigned char *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, - s32_t timeout) + k_timeout_t timeout) { struct k_thread *reader; struct k_pipe_desc *desc; @@ -555,7 +555,7 @@ int z_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, pipe_desc.buffer = data + num_bytes_written; pipe_desc.bytes_to_xfer = bytes_to_write - num_bytes_written; - if (timeout != K_NO_WAIT) { + if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { _current->base.swap_data = &pipe_desc; /* * Lock interrupts and unlock the scheduler before @@ -576,7 +576,7 @@ int z_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, } int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, - size_t *bytes_read, size_t min_xfer, s32_t timeout) + size_t *bytes_read, size_t min_xfer, k_timeout_t timeout) { struct k_thread *writer; struct k_pipe_desc *desc; @@ -701,7 +701,7 @@ int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, pipe_desc.buffer = (u8_t *)data + num_bytes_read; pipe_desc.bytes_to_xfer = bytes_to_read - num_bytes_read; - if (timeout != K_NO_WAIT) { + if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { _current->base.swap_data = &pipe_desc; k_spinlock_key_t key = k_spin_lock(&pipe->lock); @@ -720,7 +720,7 @@ int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, #ifdef CONFIG_USERSPACE int z_vrfy_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, - size_t *bytes_read, size_t min_xfer, s32_t timeout) + size_t *bytes_read, size_t min_xfer, k_timeout_t timeout) { Z_OOPS(Z_SYSCALL_OBJ(pipe, K_OBJ_PIPE)); Z_OOPS(Z_SYSCALL_MEMORY_WRITE(bytes_read, sizeof(*bytes_read))); @@ -734,7 +734,8 @@ int z_vrfy_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, #endif int z_impl_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write, - size_t *bytes_written, size_t min_xfer, s32_t timeout) + size_t *bytes_written, size_t min_xfer, + k_timeout_t timeout) { return z_pipe_put_internal(pipe, NULL, data, bytes_to_write, bytes_written, @@ -743,7 +744,8 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write, #ifdef CONFIG_USERSPACE int z_vrfy_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write, - size_t *bytes_written, size_t min_xfer, s32_t timeout) + size_t *bytes_written, size_t min_xfer, + k_timeout_t timeout) { Z_OOPS(Z_SYSCALL_OBJ(pipe, K_OBJ_PIPE)); Z_OOPS(Z_SYSCALL_MEMORY_WRITE(bytes_written, sizeof(*bytes_written))); diff --git a/kernel/poll.c b/kernel/poll.c index 699a33d5cdf..4fe88ffbadd 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -244,7 +244,8 @@ static int k_poll_poller_cb(struct k_poll_event *event, u32_t state) return 0; } -int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) +int z_impl_k_poll(struct k_poll_event *events, int num_events, + k_timeout_t timeout) { int events_registered; k_spinlock_key_t key; @@ -257,7 +258,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) __ASSERT(num_events >= 0, "<0 events\n"); events_registered = register_events(events, num_events, &poller, - (timeout == K_NO_WAIT)); + K_TIMEOUT_EQ(timeout, K_NO_WAIT)); key = k_spin_lock(&lock); @@ -274,7 +275,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) poller.is_polling = false; - if (timeout == K_NO_WAIT) { + if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { k_spin_unlock(&lock, key); return -EAGAIN; } @@ -301,7 +302,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) #ifdef CONFIG_USERSPACE static inline int z_vrfy_k_poll(struct k_poll_event *events, - int num_events, s32_t timeout) + int num_events, k_timeout_t timeout) { int ret; k_spinlock_key_t key; @@ -582,7 +583,7 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, - s32_t timeout) + k_timeout_t timeout) { int events_registered; k_spinlock_key_t key; @@ -626,7 +627,7 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q, &work->poller, false); key = k_spin_lock(&lock); - if (work->poller.is_polling && timeout != K_NO_WAIT) { + if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { /* * Poller is still polling. * No event is ready and all are watched. @@ -634,11 +635,15 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q, __ASSERT(num_events == events_registered, "Some events were not registered!\n"); +#ifdef CONFIG_LEGACY_TIMEOUT_API + timeout = k_ms_to_ticks_ceil32(timeout); +#endif + /* Setup timeout if such action is requested */ - if (timeout != K_FOREVER) { + if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) { z_add_timeout(&work->timeout, triggered_work_expiration_handler, - k_ms_to_ticks_ceil32(timeout)); + timeout); } /* From now, any event will result in submitted work. */ diff --git a/kernel/queue.c b/kernel/queue.c index 329a281edf3..c60c9f7cc9c 100644 --- a/kernel/queue.c +++ b/kernel/queue.c @@ -293,45 +293,32 @@ int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list) } #if defined(CONFIG_POLL) -static void *k_queue_poll(struct k_queue *queue, s32_t timeout) +static void *k_queue_poll(struct k_queue *queue, k_timeout_t timeout) { struct k_poll_event event; - int err, elapsed = 0, done = 0; + int err; k_spinlock_key_t key; void *val; - u32_t start; k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE, K_POLL_MODE_NOTIFY_ONLY, queue); - if (timeout != K_FOREVER) { - start = k_uptime_get_32(); + event.state = K_POLL_STATE_NOT_READY; + err = k_poll(&event, 1, timeout); + + if (err && err != -EAGAIN) { + return NULL; } - do { - event.state = K_POLL_STATE_NOT_READY; - - err = k_poll(&event, 1, timeout - elapsed); - - if (err && err != -EAGAIN) { - return NULL; - } - - key = k_spin_lock(&queue->lock); - val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true); - k_spin_unlock(&queue->lock, key); - - if ((val == NULL) && (timeout != K_FOREVER)) { - elapsed = k_uptime_get_32() - start; - done = elapsed > timeout; - } - } while (!val && !done); + key = k_spin_lock(&queue->lock); + val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true); + k_spin_unlock(&queue->lock, key); return val; } #endif /* CONFIG_POLL */ -void *z_impl_k_queue_get(struct k_queue *queue, s32_t timeout) +void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout) { k_spinlock_key_t key = k_spin_lock(&queue->lock); void *data; @@ -345,7 +332,7 @@ void *z_impl_k_queue_get(struct k_queue *queue, s32_t timeout) return data; } - if (timeout == K_NO_WAIT) { + if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { k_spin_unlock(&queue->lock, key); return NULL; } @@ -363,7 +350,8 @@ void *z_impl_k_queue_get(struct k_queue *queue, s32_t timeout) } #ifdef CONFIG_USERSPACE -static inline void *z_vrfy_k_queue_get(struct k_queue *queue, s32_t timeout) +static inline void *z_vrfy_k_queue_get(struct k_queue *queue, + k_timeout_t timeout) { Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE)); return z_impl_k_queue_get(queue, timeout); diff --git a/kernel/sched.c b/kernel/sched.c index 4bdfe6b8815..d4a55945ee9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -582,31 +582,28 @@ static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q) } } -static void add_thread_timeout_ms(struct k_thread *thread, s32_t timeout) +static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout) { - if (timeout != K_FOREVER) { - s32_t ticks; - - if (timeout < 0) { - timeout = 0; - } - - ticks = _TICK_ALIGN + k_ms_to_ticks_ceil32(timeout); - - z_add_thread_timeout(thread, ticks); + if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) { +#ifdef CONFIG_LEGACY_TIMEOUT_API + timeout = _TICK_ALIGN + k_ms_to_ticks_ceil32(timeout); +#endif + z_add_thread_timeout(thread, timeout); } } -static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout) +static void pend(struct k_thread *thread, _wait_q_t *wait_q, + k_timeout_t timeout) { LOCKED(&sched_spinlock) { add_to_waitq_locked(thread, wait_q); } - add_thread_timeout_ms(thread, timeout); + add_thread_timeout(thread, timeout); } -void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout) +void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, + k_timeout_t timeout) { __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread)); pend(thread, wait_q, timeout); @@ -651,7 +648,7 @@ void z_thread_timeout(struct _timeout *timeout) } #endif -int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout) +int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, k_timeout_t timeout) { pend(_current, wait_q, timeout); @@ -671,7 +668,7 @@ int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout) } int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, - _wait_q_t *wait_q, s32_t timeout) + _wait_q_t *wait_q, k_timeout_t timeout) { #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) pending_current = _current; @@ -1159,7 +1156,15 @@ static s32_t z_tick_sleep(s32_t ticks) return 0; } + k_timeout_t timeout; + +#ifndef CONFIG_LEGACY_TIMEOUT_API + timeout = Z_TIMEOUT_TICKS(ticks); +#else ticks += _TICK_ALIGN; + timeout = (k_ticks_t) ticks; +#endif + expected_wakeup_time = ticks + z_tick_get_32(); /* Spinlock purely for local interrupt locking to prevent us @@ -1173,7 +1178,7 @@ static s32_t z_tick_sleep(s32_t ticks) pending_current = _current; #endif z_remove_thread_from_ready_q(_current); - z_add_thread_timeout(_current, ticks); + z_add_thread_timeout(_current, timeout); z_mark_thread_as_suspended(_current); (void)z_swap(&local_lock, key); @@ -1189,26 +1194,31 @@ static s32_t z_tick_sleep(s32_t ticks) return 0; } -s32_t z_impl_k_sleep(int ms) +s32_t z_impl_k_sleep(k_timeout_t timeout) { - s32_t ticks; + k_ticks_t ticks; __ASSERT(!arch_is_in_isr(), ""); - if (ms == K_FOREVER) { + if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { k_thread_suspend(_current); - return K_FOREVER; + return K_TICKS_FOREVER; } - ticks = k_ms_to_ticks_ceil32(ms); +#ifdef CONFIG_LEGACY_TIMEOUT_API + ticks = k_ms_to_ticks_ceil32(timeout); +#else + ticks = timeout.ticks; +#endif + ticks = z_tick_sleep(ticks); return k_ticks_to_ms_floor64(ticks); } #ifdef CONFIG_USERSPACE -static inline s32_t z_vrfy_k_sleep(int ms) +static inline s32_t z_vrfy_k_sleep(k_timeout_t timeout) { - return z_impl_k_sleep(ms); + return z_impl_k_sleep(timeout); } #include #endif @@ -1407,12 +1417,13 @@ int k_thread_cpu_mask_disable(k_tid_t thread, int cpu) #endif /* CONFIG_SCHED_CPU_MASK */ -int z_impl_k_thread_join(struct k_thread *thread, s32_t timeout) +int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout) { k_spinlock_key_t key; int ret; - __ASSERT(((arch_is_in_isr() == false) || (timeout == K_NO_WAIT)), ""); + __ASSERT(((arch_is_in_isr() == false) || + K_TIMEOUT_EQ(timeout, K_NO_WAIT)), ""); key = k_spin_lock(&sched_spinlock); @@ -1427,7 +1438,7 @@ int z_impl_k_thread_join(struct k_thread *thread, s32_t timeout) goto out; } - if (timeout == K_NO_WAIT) { + if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { ret = -EBUSY; goto out; } @@ -1436,7 +1447,7 @@ int z_impl_k_thread_join(struct k_thread *thread, s32_t timeout) pending_current = _current; #endif add_to_waitq_locked(_current, &thread->base.join_waiters); - add_thread_timeout_ms(_current, timeout); + add_thread_timeout(_current, timeout); return z_swap(&sched_spinlock, key); out: @@ -1472,7 +1483,8 @@ static bool thread_obj_validate(struct k_thread *thread) CODE_UNREACHABLE; } -static inline int z_vrfy_k_thread_join(struct k_thread *thread, s32_t timeout) +static inline int z_vrfy_k_thread_join(struct k_thread *thread, + k_timeout_t timeout) { if (thread_obj_validate(thread)) { return 0; diff --git a/kernel/sem.c b/kernel/sem.c index db317c80a77..acf175eacdb 100644 --- a/kernel/sem.c +++ b/kernel/sem.c @@ -133,11 +133,12 @@ static inline void z_vrfy_k_sem_give(struct k_sem *sem) #include #endif -int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout) +int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout) { int ret = 0; - __ASSERT(((arch_is_in_isr() == false) || (timeout == K_NO_WAIT)), ""); + __ASSERT(((arch_is_in_isr() == false) || + K_TIMEOUT_EQ(timeout, K_NO_WAIT)), ""); sys_trace_void(SYS_TRACE_ID_SEMA_TAKE); k_spinlock_key_t key = k_spin_lock(&lock); @@ -149,7 +150,7 @@ int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout) goto out; } - if (timeout == K_NO_WAIT) { + if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { k_spin_unlock(&lock, key); ret = -EBUSY; goto out; @@ -163,7 +164,7 @@ out: } #ifdef CONFIG_USERSPACE -static inline int z_vrfy_k_sem_take(struct k_sem *sem, s32_t timeout) +static inline int z_vrfy_k_sem_take(struct k_sem *sem, k_timeout_t timeout) { Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM)); return z_impl_k_sem_take((struct k_sem *)sem, timeout); diff --git a/kernel/stack.c b/kernel/stack.c index fc737a56338..be00a1fd348 100644 --- a/kernel/stack.c +++ b/kernel/stack.c @@ -133,7 +133,8 @@ static inline int z_vrfy_k_stack_push(struct k_stack *stack, stack_data_t data) #include #endif -int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, s32_t timeout) +int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, + k_timeout_t timeout) { k_spinlock_key_t key; int result; @@ -147,7 +148,7 @@ int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, s32_t timeout) return 0; } - if (timeout == K_NO_WAIT) { + if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { k_spin_unlock(&stack->lock, key); return -EBUSY; } @@ -163,7 +164,7 @@ int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, s32_t timeout) #ifdef CONFIG_USERSPACE static inline int z_vrfy_k_stack_pop(struct k_stack *stack, - stack_data_t *data, s32_t timeout) + stack_data_t *data, k_timeout_t timeout) { Z_OOPS(Z_SYSCALL_OBJ(stack, K_OBJ_STACK)); Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, sizeof(stack_data_t))); diff --git a/kernel/thread.c b/kernel/thread.c index fc8af5767ff..e219de1bf9b 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -404,15 +404,17 @@ static inline void z_vrfy_k_thread_start(struct k_thread *thread) #endif #ifdef CONFIG_MULTITHREADING -static void schedule_new_thread(struct k_thread *thread, s32_t delay) +static void schedule_new_thread(struct k_thread *thread, k_timeout_t delay) { #ifdef CONFIG_SYS_CLOCK_EXISTS - if (delay == 0) { + if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) { k_thread_start(thread); } else { - s32_t ticks = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay); +#ifdef CONFIG_LEGACY_TIMEOUT_API + delay = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay); +#endif - z_add_thread_timeout(thread, ticks); + z_add_thread_timeout(thread, delay); } #else ARG_UNUSED(delay); @@ -612,7 +614,7 @@ k_tid_t z_impl_k_thread_create(struct k_thread *new_thread, k_thread_stack_t *stack, size_t stack_size, k_thread_entry_t entry, void *p1, void *p2, void *p3, - int prio, u32_t options, s32_t delay) + int prio, u32_t options, k_timeout_t delay) { __ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs"); @@ -626,7 +628,7 @@ k_tid_t z_impl_k_thread_create(struct k_thread *new_thread, z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, prio, options, NULL); - if (delay != K_FOREVER) { + if (!K_TIMEOUT_EQ(delay, K_FOREVER)) { schedule_new_thread(new_thread, delay); } @@ -639,7 +641,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread, k_thread_stack_t *stack, size_t stack_size, k_thread_entry_t entry, void *p1, void *p2, void *p3, - int prio, u32_t options, s32_t delay) + int prio, u32_t options, k_timeout_t delay) { size_t total_size, stack_obj_size; struct z_object *stack_object; @@ -689,7 +691,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread, z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, prio, options, NULL); - if (delay != K_FOREVER) { + if (!K_TIMEOUT_EQ(delay, K_FOREVER)) { schedule_new_thread(new_thread, delay); } @@ -747,9 +749,9 @@ void z_init_static_threads(void) */ k_sched_lock(); _FOREACH_STATIC_THREAD(thread_data) { - if (thread_data->init_delay != K_FOREVER) { + if (thread_data->init_delay != K_TICKS_FOREVER) { schedule_new_thread(thread_data->init_thread, - thread_data->init_delay); + K_MSEC(thread_data->init_delay)); } } k_sched_unlock(); diff --git a/kernel/timeout.c b/kernel/timeout.c index 262366742d5..11aeed765aa 100644 --- a/kernel/timeout.c +++ b/kernel/timeout.c @@ -24,7 +24,7 @@ static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list); static struct k_spinlock timeout_lock; #define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \ - ? K_FOREVER : INT_MAX) + ? K_TICKS_FOREVER : INT_MAX) /* Cycles left to process in the currently-executing z_clock_announce() */ static int announce_remaining; @@ -83,8 +83,15 @@ static s32_t next_timeout(void) return ret; } -void z_add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks) +void z_add_timeout(struct _timeout *to, _timeout_func_t fn, + k_timeout_t timeout) { +#ifdef CONFIG_LEGACY_TIMEOUT_API + k_ticks_t ticks = timeout; +#else + k_ticks_t ticks = timeout.ticks + 1; +#endif + __ASSERT(!sys_dnode_is_linked(&to->node), ""); to->fn = fn; ticks = MAX(1, ticks); @@ -150,7 +157,7 @@ s32_t z_timeout_remaining(struct _timeout *timeout) s32_t z_get_next_timeout_expiry(void) { - s32_t ret = K_FOREVER; + s32_t ret = K_TICKS_FOREVER; LOCKED(&timeout_lock) { ret = next_timeout(); @@ -162,7 +169,7 @@ void z_set_timeout_expiry(s32_t ticks, bool idle) { LOCKED(&timeout_lock) { int next = next_timeout(); - bool sooner = (next == K_FOREVER) || (ticks < next); + bool sooner = (next == K_TICKS_FOREVER) || (ticks < next); bool imminent = next <= 1; /* Only set new timeouts when they are sooner than @@ -248,3 +255,24 @@ static inline s64_t z_vrfy_k_uptime_get(void) } #include #endif + +/* Returns the uptime expiration (relative to an unlocked "now"!) of a + * timeout object. + */ +u64_t z_timeout_end_calc(k_timeout_t timeout) +{ + k_ticks_t dt; + + if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { + return UINT64_MAX; + } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { + return z_tick_get(); + } + +#ifdef CONFIG_LEGACY_TIMEOUT_API + dt = k_ms_to_ticks_ceil32(timeout); +#else + dt = timeout.ticks; +#endif + return z_tick_get() + MAX(1, dt); +} diff --git a/kernel/timer.c b/kernel/timer.c index 47c56394e1e..bcaec8a3ff1 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -52,7 +52,8 @@ void z_timer_expiration_handler(struct _timeout *t) * if the timer is periodic, start it again; don't add _TICK_ALIGN * since we're already aligned to a tick boundary */ - if (timer->period > 0) { + if (!K_TIMEOUT_EQ(timer->period, K_NO_WAIT) && + !K_TIMEOUT_EQ(timer->period, K_FOREVER)) { z_add_timeout(&timer->timeout, z_timer_expiration_handler, timer->period); } @@ -105,29 +106,43 @@ void k_timer_init(struct k_timer *timer, } -void z_impl_k_timer_start(struct k_timer *timer, s32_t duration, s32_t period) +void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration, + k_timeout_t period) { - __ASSERT(duration >= 0 && period >= 0 && - (duration != 0 || period != 0), "invalid parameters\n"); - - volatile s32_t period_in_ticks, duration_in_ticks; - - period_in_ticks = k_ms_to_ticks_ceil32(period); - duration_in_ticks = k_ms_to_ticks_ceil32(duration); +#ifdef CONFIG_LEGACY_TIMEOUT_API + duration = k_ms_to_ticks_ceil32(duration); + period = k_ms_to_ticks_ceil32(period); +#else + /* z_add_timeout() always adds one to the incoming tick count + * to round up to the next tick (by convention it waits for + * "at least as long as the specified timeout"), but the + * period interval is always guaranteed to be reset from + * within the timer ISR, so no round up is desired. Subtract + * one. + * + * Note that the duration (!) value gets the same treatment + * for backwards compatibility. This is unfortunate + * (i.e. k_timer_start() doesn't treat its initial sleep + * argument the same way k_sleep() does), but historical. The + * timer_api test relies on this behavior. + */ + period.ticks = MAX(period.ticks - 1, 0); + duration.ticks = MAX(duration.ticks - 1, 0); +#endif (void)z_abort_timeout(&timer->timeout); - timer->period = period_in_ticks; + timer->period = period; timer->status = 0U; + z_add_timeout(&timer->timeout, z_timer_expiration_handler, - duration_in_ticks); + duration); } #ifdef CONFIG_USERSPACE static inline void z_vrfy_k_timer_start(struct k_timer *timer, - s32_t duration, s32_t period) + k_timeout_t duration, + k_timeout_t period) { - Z_OOPS(Z_SYSCALL_VERIFY(duration >= 0 && period >= 0 && - (duration != 0 || period != 0))); Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER)); z_impl_k_timer_start(timer, duration, period); } diff --git a/kernel/work_q.c b/kernel/work_q.c index 96cd2f644f9..a0706d67946 100644 --- a/kernel/work_q.c +++ b/kernel/work_q.c @@ -82,7 +82,7 @@ static int work_cancel(struct k_delayed_work *work) int k_delayed_work_submit_to_queue(struct k_work_q *work_q, struct k_delayed_work *work, - s32_t delay) + k_timeout_t delay) { k_spinlock_key_t key = k_spin_lock(&lock); int err = 0; @@ -112,15 +112,18 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q, /* Submit work directly if no delay. Note that this is a * blocking operation, so release the lock first. */ - if (delay == 0) { + if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) { k_spin_unlock(&lock, key); k_work_submit_to_queue(work_q, &work->work); return 0; } +#ifdef CONFIG_LEGACY_TIMEOUT_API + delay = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay); +#endif + /* Add timeout */ - z_add_timeout(&work->timeout, work_timeout, - _TICK_ALIGN + k_ms_to_ticks_ceil32(delay)); + z_add_timeout(&work->timeout, work_timeout, delay); done: k_spin_unlock(&lock, key); diff --git a/lib/cmsis_rtos_v1/Kconfig b/lib/cmsis_rtos_v1/Kconfig index fdfbc199823..37c631ccd84 100644 --- a/lib/cmsis_rtos_v1/Kconfig +++ b/lib/cmsis_rtos_v1/Kconfig @@ -5,6 +5,7 @@ config CMSIS_RTOS_V1 bool "CMSIS RTOS v1 API" depends on THREAD_CUSTOM_DATA depends on POLL + select LEGACY_TIMEOUT_API help This enables CMSIS RTOS v1 API support. This is an OS-integration layer which allows applications using CMSIS RTOS APIs to build on diff --git a/lib/cmsis_rtos_v2/Kconfig b/lib/cmsis_rtos_v2/Kconfig index d79fb04e0b5..cb54769085b 100644 --- a/lib/cmsis_rtos_v2/Kconfig +++ b/lib/cmsis_rtos_v2/Kconfig @@ -9,6 +9,7 @@ config CMSIS_RTOS_V2 depends on THREAD_MONITOR depends on INIT_STACKS depends on NUM_PREEMPT_PRIORITIES >= 56 + select LEGACY_TIMEOUT_API help This enables CMSIS RTOS v2 API support. This is an OS-integration layer which allows applications using CMSIS RTOS V2 APIs to build diff --git a/lib/os/mutex.c b/lib/os/mutex.c index 43fc254a7ac..953ee57cf2c 100644 --- a/lib/os/mutex.c +++ b/lib/os/mutex.c @@ -30,7 +30,7 @@ static bool check_sys_mutex_addr(struct sys_mutex *addr) return Z_SYSCALL_MEMORY_WRITE(addr, sizeof(struct sys_mutex)); } -int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout) +int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, k_timeout_t timeout) { struct k_mutex *kernel_mutex = get_k_mutex(mutex); @@ -42,7 +42,7 @@ int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout) } static inline int z_vrfy_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, - s32_t timeout) + k_timeout_t timeout) { if (check_sys_mutex_addr(mutex)) { return -EACCES; diff --git a/lib/os/sem.c b/lib/os/sem.c index 02d3d1f9890..cb81d689382 100644 --- a/lib/os/sem.c +++ b/lib/os/sem.c @@ -79,7 +79,7 @@ int sys_sem_give(struct sys_sem *sem) return ret; } -int sys_sem_take(struct sys_sem *sem, s32_t timeout) +int sys_sem_take(struct sys_sem *sem, k_timeout_t timeout) { int ret = 0; atomic_t old_value; @@ -120,7 +120,7 @@ int sys_sem_give(struct sys_sem *sem) return 0; } -int sys_sem_take(struct sys_sem *sem, s32_t timeout) +int sys_sem_take(struct sys_sem *sem, k_timeout_t timeout) { int ret_value = 0; diff --git a/lib/posix/Kconfig b/lib/posix/Kconfig index 3e9e85c4c59..0fadc770740 100644 --- a/lib/posix/Kconfig +++ b/lib/posix/Kconfig @@ -12,6 +12,7 @@ config POSIX_MAX_FDS config POSIX_API depends on !ARCH_POSIX bool "POSIX APIs" + select LEGACY_TIMEOUT_API help Enable mostly-standards-compliant implementations of various POSIX (IEEE 1003.1) APIs. diff --git a/lib/posix/pthread_common.c b/lib/posix/pthread_common.c index 3043a191bec..a2dbfa7f5f2 100644 --- a/lib/posix/pthread_common.c +++ b/lib/posix/pthread_common.c @@ -24,7 +24,7 @@ s64_t timespec_to_timeoutms(const struct timespec *abstime) nsecs = abstime->tv_nsec - curtime.tv_nsec; if (secs < 0 || (secs == 0 && nsecs < NSEC_PER_MSEC)) { - milli_secs = K_NO_WAIT; + milli_secs = 0; } else { milli_secs = secs * MSEC_PER_SEC + nsecs / NSEC_PER_MSEC; } diff --git a/samples/cpp_synchronization/src/main.cpp b/samples/cpp_synchronization/src/main.cpp index 3959012b40a..7a204696ce9 100644 --- a/samples/cpp_synchronization/src/main.cpp +++ b/samples/cpp_synchronization/src/main.cpp @@ -94,7 +94,7 @@ int cpp_semaphore::wait(void) */ int cpp_semaphore::wait(int timeout) { - return k_sem_take(&_sema_internal, timeout); + return k_sem_take(&_sema_internal, K_MSEC(timeout)); } /** @@ -127,7 +127,7 @@ void coop_thread_entry(void) printk("%s: Hello World!\n", __FUNCTION__); /* wait a while, then let main thread have a turn */ - k_timer_start(&timer, SLEEPTIME, 0); + k_timer_start(&timer, K_MSEC(SLEEPTIME), K_NO_WAIT); k_timer_status_sync(&timer); sem_main.give(); } @@ -139,7 +139,7 @@ void main(void) k_thread_create(&coop_thread, coop_stack, STACKSIZE, (k_thread_entry_t) coop_thread_entry, - NULL, NULL, NULL, K_PRIO_COOP(7), 0, 0); + NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT); k_timer_init(&timer, NULL, NULL); while (1) { @@ -147,7 +147,7 @@ void main(void) printk("%s: Hello World!\n", __FUNCTION__); /* wait a while, then let coop thread have a turn */ - k_timer_start(&timer, SLEEPTIME, 0); + k_timer_start(&timer, K_MSEC(SLEEPTIME), K_NO_WAIT); k_timer_status_sync(&timer); sem_coop.give(); diff --git a/samples/scheduler/metairq_dispatch/src/msgdev.c b/samples/scheduler/metairq_dispatch/src/msgdev.c index 6236686772b..ea5b11f6001 100644 --- a/samples/scheduler/metairq_dispatch/src/msgdev.c +++ b/samples/scheduler/metairq_dispatch/src/msgdev.c @@ -77,7 +77,11 @@ static void timeout_reset(void) { u32_t ticks = rand32() % MAX_EVENT_DELAY_TICKS; +#ifdef CONFIG_LEGACY_TIMEOUT_API z_add_timeout(&timeout, dev_timer_expired, ticks); +#else + z_add_timeout(&timeout, dev_timer_expired, Z_TIMEOUT_TICKS(ticks)); +#endif } void message_dev_init(void) diff --git a/soc/arm/ti_simplelink/Kconfig b/soc/arm/ti_simplelink/Kconfig index e35796ee121..06a3fa67517 100644 --- a/soc/arm/ti_simplelink/Kconfig +++ b/soc/arm/ti_simplelink/Kconfig @@ -4,6 +4,7 @@ config SOC_FAMILY_TISIMPLELINK bool + select LEGACY_TIMEOUT_API if SOC_FAMILY_TISIMPLELINK diff --git a/subsys/console/Kconfig b/subsys/console/Kconfig index 8693baac657..c52d5035c76 100644 --- a/subsys/console/Kconfig +++ b/subsys/console/Kconfig @@ -3,6 +3,7 @@ menuconfig CONSOLE_SUBSYS bool "Console subsystem/support routines [EXPERIMENTAL]" + select LEGACY_TIMEOUT_API help Console subsystem and helper functions diff --git a/subsys/net/Kconfig b/subsys/net/Kconfig index e0fde627eca..a4da7f088fb 100644 --- a/subsys/net/Kconfig +++ b/subsys/net/Kconfig @@ -7,6 +7,7 @@ menu "Networking" config NET_BUF bool "Network buffer support" + select LEGACY_TIMEOUT_API help This option enables support for generic network protocol buffers. diff --git a/subsys/power/policy/policy_residency.c b/subsys/power/policy/policy_residency.c index 849f30c7024..ebe228bd839 100644 --- a/subsys/power/policy/policy_residency.c +++ b/subsys/power/policy/policy_residency.c @@ -49,7 +49,7 @@ enum power_states sys_pm_policy_next_state(s32_t ticks) { int i; - if ((ticks != K_FOREVER) && (ticks < pm_min_residency[0])) { + if ((ticks != K_TICKS_FOREVER) && (ticks < pm_min_residency[0])) { LOG_DBG("Not enough time for PM operations: %d", ticks); return SYS_POWER_STATE_ACTIVE; } @@ -60,7 +60,7 @@ enum power_states sys_pm_policy_next_state(s32_t ticks) continue; } #endif - if ((ticks == K_FOREVER) || + if ((ticks == K_TICKS_FOREVER) || (ticks >= pm_min_residency[i])) { LOG_DBG("Selected power state %d " "(ticks: %d, min_residency: %u)", diff --git a/tests/kernel/lifo/lifo_usage/src/main.c b/tests/kernel/lifo/lifo_usage/src/main.c index 3787b3dfd6b..d5ea87abba7 100644 --- a/tests/kernel/lifo/lifo_usage/src/main.c +++ b/tests/kernel/lifo/lifo_usage/src/main.c @@ -35,7 +35,7 @@ struct reply_packet { struct timeout_order_data { void *link_in_lifo; struct k_lifo *klifo; - s32_t timeout; + k_ticks_t timeout; s32_t timeout_order; s32_t q_order; }; diff --git a/tests/kernel/mbox/mbox_usage/src/main.c b/tests/kernel/mbox/mbox_usage/src/main.c index 34586cd61f2..d97ae505654 100644 --- a/tests/kernel/mbox/mbox_usage/src/main.c +++ b/tests/kernel/mbox/mbox_usage/src/main.c @@ -28,7 +28,7 @@ static enum mmsg_type { TARGET_SOURCE } info_type; -static void msg_sender(struct k_mbox *pmbox, s32_t timeout) +static void msg_sender(struct k_mbox *pmbox, k_timeout_t timeout) { struct k_mbox_msg mmsg; @@ -53,7 +53,8 @@ static void msg_sender(struct k_mbox *pmbox, s32_t timeout) } } -static void msg_receiver(struct k_mbox *pmbox, k_tid_t thd_id, s32_t timeout) +static void msg_receiver(struct k_mbox *pmbox, k_tid_t thd_id, + k_timeout_t timeout) { struct k_mbox_msg mmsg; char rxdata[MAIL_LEN]; diff --git a/tests/kernel/mem_protect/futex/prj.conf b/tests/kernel/mem_protect/futex/prj.conf index af4c2c15ab2..251902f4396 100644 --- a/tests/kernel/mem_protect/futex/prj.conf +++ b/tests/kernel/mem_protect/futex/prj.conf @@ -2,3 +2,4 @@ CONFIG_ZTEST=y CONFIG_IRQ_OFFLOAD=y CONFIG_TEST_USERSPACE=y CONFIG_MP_NUM_CPUS=1 +CONFIG_LEGACY_TIMEOUT_API=y diff --git a/tests/kernel/mem_protect/futex/src/main.c b/tests/kernel/mem_protect/futex/src/main.c index 9a4884d73ff..7fa59a8df92 100644 --- a/tests/kernel/mem_protect/futex/src/main.c +++ b/tests/kernel/mem_protect/futex/src/main.c @@ -64,7 +64,8 @@ void futex_wait_task(void *p1, void *p2, void *p3) s32_t ret_value; int time_val = *(int *)p1; - zassert_true(time_val >= (int)K_FOREVER, "invalid timeout parameter"); + zassert_true(time_val >= (int)K_TICKS_FOREVER, + "invalid timeout parameter"); ret_value = k_futex_wait(&simple_futex, atomic_get(&simple_futex.val), time_val); diff --git a/tests/kernel/pending/src/main.c b/tests/kernel/pending/src/main.c index b1b38cca170..f5d05bd3400 100644 --- a/tests/kernel/pending/src/main.c +++ b/tests/kernel/pending/src/main.c @@ -116,7 +116,7 @@ static void sync_threads(struct k_work *work) static void fifo_tests(s32_t timeout, volatile int *state, void *(*get)(struct k_fifo *, s32_t), - int (*sem_take)(struct k_sem *, s32_t)) + int (*sem_take)(struct k_sem *, k_timeout_t)) { struct fifo_data *data; @@ -154,7 +154,7 @@ static void fifo_tests(s32_t timeout, volatile int *state, static void lifo_tests(s32_t timeout, volatile int *state, void *(*get)(struct k_lifo *, s32_t), - int (*sem_take)(struct k_sem *, s32_t)) + int (*sem_take)(struct k_sem *, k_timeout_t)) { struct lifo_data *data; diff --git a/tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c b/tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c index 2a8c4de96cb..fbc1fff8d71 100644 --- a/tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c +++ b/tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c @@ -42,7 +42,7 @@ K_SEM_DEFINE(end_sema, 0, 1); #endif K_MEM_POOL_DEFINE(test_pool, SZ, SZ, 4, 4); -static void tpipe_put(struct k_pipe *ppipe, s32_t timeout) +static void tpipe_put(struct k_pipe *ppipe, k_timeout_t timeout) { size_t to_wt, wt_byte = 0; @@ -57,7 +57,7 @@ static void tpipe_put(struct k_pipe *ppipe, s32_t timeout) } static void tpipe_block_put(struct k_pipe *ppipe, struct k_sem *sema, - s32_t timeout) + k_timeout_t timeout) { struct k_mem_block block; @@ -73,7 +73,7 @@ static void tpipe_block_put(struct k_pipe *ppipe, struct k_sem *sema, } } -static void tpipe_get(struct k_pipe *ppipe, s32_t timeout) +static void tpipe_get(struct k_pipe *ppipe, k_timeout_t timeout) { unsigned char rx_data[PIPE_LEN]; size_t to_rd, rd_byte = 0; diff --git a/tests/kernel/sleep/src/main.c b/tests/kernel/sleep/src/main.c index 224c3f13212..574355d0f7b 100644 --- a/tests/kernel/sleep/src/main.c +++ b/tests/kernel/sleep/src/main.c @@ -251,7 +251,7 @@ static void forever_thread_entry(void *p1, void *p2, void *p3) s32_t ret; ret = k_sleep(K_FOREVER); - zassert_equal(ret, K_FOREVER, "unexpected return value"); + zassert_equal(ret, K_TICKS_FOREVER, "unexpected return value"); k_sem_give(&test_thread_sem); } diff --git a/tests/kernel/workq/work_queue/src/main.c b/tests/kernel/workq/work_queue/src/main.c index 74705ccd7ce..7a8998dadaf 100644 --- a/tests/kernel/workq/work_queue/src/main.c +++ b/tests/kernel/workq/work_queue/src/main.c @@ -480,7 +480,7 @@ static void test_triggered_init(void) * * @see k_work_poll_init(), k_work_poll_submit() */ -static void test_triggered_submit(s32_t timeout) +static void test_triggered_submit(k_timeout_t timeout) { int i;