diff --git a/drivers/timer/arcv2_timer0.c b/drivers/timer/arcv2_timer0.c index 4f33e1af7a7..4282a37478e 100644 --- a/drivers/timer/arcv2_timer0.c +++ b/drivers/timer/arcv2_timer0.c @@ -216,7 +216,7 @@ void _timer_int_handler(void *unused) z_clock_announce(_sys_idle_elapsed_ticks); - /* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */ + /* z_clock_announce() could cause new programming */ if (!programmed_ticks && _sys_clock_always_on) { z_tick_set(z_clock_uptime()); program_max_cycles(); diff --git a/drivers/timer/cortex_m_systick.c b/drivers/timer/cortex_m_systick.c index ac0a7903197..a30c0ea3979 100644 --- a/drivers/timer/cortex_m_systick.c +++ b/drivers/timer/cortex_m_systick.c @@ -270,7 +270,7 @@ void _timer_int_handler(void *unused) z_clock_announce(_sys_idle_elapsed_ticks); - /* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */ + /* z_clock_announce() could cause new programming */ if (!idle_original_ticks && _sys_clock_always_on) { z_tick_set(z_clock_uptime()); /* clear overflow tracking flag as it is accounted */ @@ -754,15 +754,16 @@ return (u32_t) get_elapsed_count(); do { cac = clock_accumulated_count; #ifdef CONFIG_TICKLESS_IDLE - /* When we leave a tickless period the reload value of the timer - * can be set to a remaining value to wait until end of tick. - * (see z_clock_idle_exit). The remaining value is always smaller - * than default_load_value. In this case the time elapsed until - * the timer restart was not yet added to - * clock_accumulated_count. To retrieve a correct cycle count - * we must therefore consider the number of cycle since current - * tick period start and not only the cycle number since - * the timer restart. + /* When we leave a tickless period the reload value of + * the timer can be set to a remaining value to wait + * until end of tick. (see z_clock_idle_exit). The + * remaining value is always smaller than + * default_load_value. In this case the time elapsed + * until the timer restart was not yet added to + * clock_accumulated_count. To retrieve a correct + * cycle count we must therefore consider the number + * of cycle since current tick period start and not + * only the cycle number since the timer restart. */ if (SysTick->LOAD < default_load_value) { count = default_load_value; diff --git a/drivers/timer/hpet.c b/drivers/timer/hpet.c index d5b4748c1f6..ea5d0c710b0 100644 --- a/drivers/timer/hpet.c +++ b/drivers/timer/hpet.c @@ -301,7 +301,7 @@ void _timer_int_handler(void *unused) programmed_ticks = 0; z_clock_announce(_sys_idle_elapsed_ticks); - /* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */ + /* z_clock_announce() could cause new programming */ if (!programmed_ticks && _sys_clock_always_on) { z_tick_set(z_clock_uptime()); program_max_cycles(); diff --git a/drivers/timer/loapic_timer.c b/drivers/timer/loapic_timer.c index 6d2a33e6ddc..ffc7f0d0d7f 100644 --- a/drivers/timer/loapic_timer.c +++ b/drivers/timer/loapic_timer.c @@ -320,7 +320,7 @@ void _timer_int_handler(void *unused /* parameter is not used */ z_clock_announce(_sys_idle_elapsed_ticks); - /* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */ + /* z_clock_announce() could cause new programming */ if (!programmed_full_ticks && _sys_clock_always_on) { z_tick_set(z_clock_uptime()); program_max_cycles(); @@ -332,16 +332,20 @@ void _timer_int_handler(void *unused /* parameter is not used */ u32_t cycles; /* - * The timer fired unexpectedly. This is due to one of two cases: + * The timer fired unexpectedly. This is due + * to one of two cases: * 1. Entering tickless idle straddled a tick. * 2. Leaving tickless idle straddled the final tick. - * Due to the timer reprogramming in z_clock_idle_exit(), case #2 - * can be handled as a fall-through. + * Due to the timer reprogramming in + * z_clock_idle_exit(), case #2 can be handled + * as a fall-through. * - * NOTE: Although the cycle count is supposed to stop decrementing - * once it hits zero in one-shot mode, not all targets implement - * this properly (and continue to decrement). Thus, we have to - * perform a second comparison to check for wrap-around. + * NOTE: Although the cycle count is supposed + * to stop decrementing once it hits zero in + * one-shot mode, not all targets implement + * this properly (and continue to decrement). + * Thus, we have to perform a second + * comparison to check for wrap-around. */ cycles = current_count_register_get(); @@ -604,7 +608,7 @@ void z_clock_idle_exit(void) * * NOTE #1: In the case of a straddled tick, the '_sys_idle_elapsed_ticks' * calculation below may result in either 0 or 1. If 1, then this may - * result in a harmless extra call to z_clock_announce(_sys_idle_elapsed_ticks). + * result in a harmless extra call to z_clock_announce(). * * NOTE #2: In the case of a straddled tick, it is assumed that when the * timer is reprogrammed, it will be reprogrammed with a cycle count diff --git a/drivers/timer/nrf_rtc_timer.c b/drivers/timer/nrf_rtc_timer.c index be9993e8b6c..3a4d315f995 100644 --- a/drivers/timer/nrf_rtc_timer.c +++ b/drivers/timer/nrf_rtc_timer.c @@ -209,7 +209,8 @@ void _timer_idle_enter(s32_t sys_ticks) /* If ticks is 0, the RTC interrupt handler will be set pending * immediately, meaning that we will not go to sleep. */ - rtc_compare_set(rtc_past + (sys_ticks * sys_clock_hw_cycles_per_tick())); + rtc_compare_set(rtc_past + + (sys_ticks * sys_clock_hw_cycles_per_tick())); #endif } @@ -435,7 +436,7 @@ void z_clock_idle_exit(void) rtc_announce_set_next(); /* After exiting idle, the kernel no longer expects more than one sys - * ticks to have passed when z_clock_announce(_sys_idle_elapsed_ticks) is called. + * ticks to have passed when z_clock_announce() is called. */ expected_sys_ticks = 1; #endif @@ -493,7 +494,7 @@ void rtc1_nrf5_isr(void *arg) /* Anounce elapsed of _sys_idle_elapsed_ticks systicks*/ z_clock_announce(_sys_idle_elapsed_ticks); - /* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */ + /* z_clock_announce() could cause new programming */ if (!expected_sys_ticks && _sys_clock_always_on) { program_max_cycles(); } @@ -530,7 +531,8 @@ int z_clock_driver_init(struct device *device) /* TODO: replace with counter driver to access RTC */ SYS_CLOCK_RTC->PRESCALER = 0; - nrf_rtc_cc_set(SYS_CLOCK_RTC, RTC_CC_IDX, sys_clock_hw_cycles_per_tick()); + nrf_rtc_cc_set(SYS_CLOCK_RTC, RTC_CC_IDX, + sys_clock_hw_cycles_per_tick()); nrf_rtc_event_enable(SYS_CLOCK_RTC, RTC_EVTENSET_COMPARE0_Msk); nrf_rtc_int_enable(SYS_CLOCK_RTC, RTC_INTENSET_COMPARE0_Msk); diff --git a/include/kernel.h b/include/kernel.h index 8d2a7d8d9e6..936dcc9334d 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -1218,6 +1218,10 @@ __syscall void k_thread_name_set(k_tid_t thread_id, const char *value); */ __syscall const char *k_thread_name_get(k_tid_t thread_id); +/** + * @} + */ + /** * @addtogroup clock_apis * @{ diff --git a/include/sys_clock.h b/include/sys_clock.h index 6df55488f41..e7f360e4a65 100644 --- a/include/sys_clock.h +++ b/include/sys_clock.h @@ -89,9 +89,9 @@ static inline int sys_clock_hw_cycles_per_tick(void) */ #if !defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) -#if (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC % CONFIG_SYS_CLOCK_TICKS_PER_SEC) != 0 +#if (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC % CONFIG_SYS_CLOCK_TICKS_PER_SEC) != 0 #define _NEED_PRECISE_TICK_MS_CONVERSION -#elif (MSEC_PER_SEC % CONFIG_SYS_CLOCK_TICKS_PER_SEC) != 0 +#elif (MSEC_PER_SEC % CONFIG_SYS_CLOCK_TICKS_PER_SEC) != 0 #define _NON_OPTIMIZED_TICKS_PER_SEC #endif #endif diff --git a/kernel/include/timeout_q.h b/kernel/include/timeout_q.h index f0a680e7d36..6cb6a7cdb62 100644 --- a/kernel/include/timeout_q.h +++ b/kernel/include/timeout_q.h @@ -53,8 +53,8 @@ s32_t z_timeout_remaining(struct _timeout *timeout); #else /* Stubs when !CONFIG_SYS_CLOCK_EXISTS */ -#define _init_thread_timeout(t) do{}while(0) -#define _add_thread_timeout(th,to) do{}while(0 && (void*)to && (void*)th) +#define _init_thread_timeout(t) do {} while (0) +#define _add_thread_timeout(th, to) do {} while (0 && (void *)to && (void *)th) #define _abort_thread_timeout(t) (0) #define _get_next_timeout_expiry() (K_FOREVER) diff --git a/tests/benchmarks/timing_info/src/msg_passing_bench.c b/tests/benchmarks/timing_info/src/msg_passing_bench.c index 37a67953cc1..2db4fe530e8 100644 --- a/tests/benchmarks/timing_info/src/msg_passing_bench.c +++ b/tests/benchmarks/timing_info/src/msg_passing_bench.c @@ -362,7 +362,7 @@ void thread_producer_get_msgq_w_cxt_switch(void *p1, void *p2, void *p3) void thread_consumer_get_msgq_w_cxt_switch(void *p1, void *p2, void *p3) { - producer_get_w_cxt_switch_tid->base.timeout.dticks =_EXPIRED; + producer_get_w_cxt_switch_tid->base.timeout.dticks = _EXPIRED; __read_swap_end_time_value = 1; TIMING_INFO_PRE_READ(); __msg_q_get_w_cxt_start_time = TIMING_INFO_OS_GET_TIME(); diff --git a/tests/kernel/common/src/clock.c b/tests/kernel/common/src/clock.c index cf189a6479f..6ffd3553e0e 100644 --- a/tests/kernel/common/src/clock.c +++ b/tests/kernel/common/src/clock.c @@ -124,7 +124,8 @@ void test_clock_cycle(void) if (c1 > c0) { /* delta cycle should be greater than 1 milli-second*/ zassert_true((c1 - c0) > - (sys_clock_hw_cycles_per_sec() / MSEC_PER_SEC), NULL); + (sys_clock_hw_cycles_per_sec() / MSEC_PER_SEC), + NULL); /* delta NS should be greater than 1 milli-second */ zassert_true(SYS_CLOCK_HW_CYCLES_TO_NS(c1 - c0) > (NSEC_PER_SEC / MSEC_PER_SEC), NULL);