diff --git a/arch/arc/core/timestamp.c b/arch/arc/core/timestamp.c index 8d5f2a855b8..74d588baf40 100644 --- a/arch/arc/core/timestamp.c +++ b/arch/arc/core/timestamp.c @@ -16,7 +16,7 @@ #include extern volatile u64_t _sys_clock_tick_count; -extern int sys_clock_hw_cycles_per_tick; +extern int sys_clock_hw_cycles_per_tick(); /* * @brief Read 64-bit timestamp value @@ -36,7 +36,7 @@ u64_t _tsc_read(void) t = (u64_t)_sys_clock_tick_count; count = _arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT); irq_unlock(key); - t *= (u64_t)sys_clock_hw_cycles_per_tick; + t *= (u64_t)sys_clock_hw_cycles_per_tick(); t += (u64_t)count; return t; } diff --git a/drivers/timer/altera_avalon_timer_hal.c b/drivers/timer/altera_avalon_timer_hal.c index 822c71167a6..932a1a5ced1 100644 --- a/drivers/timer/altera_avalon_timer_hal.c +++ b/drivers/timer/altera_avalon_timer_hal.c @@ -24,7 +24,7 @@ static void timer_irq_handler(void *unused) read_timer_start_of_tick_handler(); #endif - accumulated_cycle_count += sys_clock_hw_cycles_per_tick; + accumulated_cycle_count += sys_clock_hw_cycles_per_tick(); /* Clear the interrupt */ alt_handle_irq((void *)TIMER_0_BASE, TIMER_0_IRQ); @@ -42,15 +42,15 @@ int _sys_clock_driver_init(struct device *device) ARG_UNUSED(device); IOWR_ALTERA_AVALON_TIMER_PERIODL(TIMER_0_BASE, - sys_clock_hw_cycles_per_tick & 0xFFFF); + sys_clock_hw_cycles_per_tick() & 0xFFFF); IOWR_ALTERA_AVALON_TIMER_PERIODH(TIMER_0_BASE, - (sys_clock_hw_cycles_per_tick >> 16) & 0xFFFF); + (sys_clock_hw_cycles_per_tick() >> 16) & 0xFFFF); IRQ_CONNECT(TIMER_0_IRQ, 0, timer_irq_handler, NULL, 0); irq_enable(TIMER_0_IRQ); alt_avalon_timer_sc_init((void *)TIMER_0_BASE, 0, - TIMER_0_IRQ, sys_clock_hw_cycles_per_tick); + TIMER_0_IRQ, sys_clock_hw_cycles_per_tick()); return 0; } diff --git a/drivers/timer/arcv2_timer0.c b/drivers/timer/arcv2_timer0.c index e487493f763..d58edd070ad 100644 --- a/drivers/timer/arcv2_timer0.c +++ b/drivers/timer/arcv2_timer0.c @@ -487,7 +487,7 @@ int _sys_clock_driver_init(struct device *device) timer0_control_register_set(0); timer0_count_register_set(0); - cycles_per_tick = sys_clock_hw_cycles_per_tick; + cycles_per_tick = sys_clock_hw_cycles_per_tick(); IRQ_CONNECT(IRQ_TIMER0, CONFIG_ARCV2_TIMER_IRQ_PRIORITY, _timer_int_handler, NULL, 0); diff --git a/drivers/timer/cortex_m_systick.c b/drivers/timer/cortex_m_systick.c index 7f709500e99..f68a6a3bc30 100644 --- a/drivers/timer/cortex_m_systick.c +++ b/drivers/timer/cortex_m_systick.c @@ -313,7 +313,7 @@ void _timer_int_handler(void *unused) * No tickless idle: * Update the total tick count and announce this tick to the kernel. */ - clock_accumulated_count += sys_clock_hw_cycles_per_tick; + clock_accumulated_count += sys_clock_hw_cycles_per_tick(); _sys_clock_tick_announce(); #endif /* CONFIG_TICKLESS_IDLE */ @@ -336,7 +336,7 @@ void _timer_int_handler(void *unused) #else /* !CONFIG_SYS_POWER_MANAGEMENT */ /* accumulate total counter value */ - clock_accumulated_count += sys_clock_hw_cycles_per_tick; + clock_accumulated_count += sys_clock_hw_cycles_per_tick(); /* * one more tick has occurred -- don't need to do anything special since @@ -709,9 +709,9 @@ int _sys_clock_driver_init(struct device *device) */ /* systick supports 24-bit H/W counter */ - __ASSERT(sys_clock_hw_cycles_per_tick <= (1 << 24), - "sys_clock_hw_cycles_per_tick too large"); - sysTickReloadSet(sys_clock_hw_cycles_per_tick - 1); + __ASSERT(sys_clock_hw_cycles_per_tick() <= (1 << 24), + "sys_clock_hw_cycles_per_tick() too large"); + sysTickReloadSet(sys_clock_hw_cycles_per_tick() - 1); #ifdef CONFIG_TICKLESS_IDLE diff --git a/drivers/timer/hpet.c b/drivers/timer/hpet.c index 233fb26951a..b8f1f897dc2 100644 --- a/drivers/timer/hpet.c +++ b/drivers/timer/hpet.c @@ -589,7 +589,7 @@ int _sys_clock_driver_init(struct device *device) /* * This driver shall read the COUNTER_CLK_PERIOD value from the general * capabilities register rather than rely on a board.h provide macro - * (or the global variable 'sys_clock_hw_cycles_per_tick') + * (or the global variable 'sys_clock_hw_cycles_per_tick()') * to determine the frequency of clock applied to the HPET device. */ @@ -611,10 +611,9 @@ int _sys_clock_driver_init(struct device *device) DBG("HPET: timer0: available interrupts mask 0x%x\n", (u32_t)(*_HPET_TIMER0_CONFIG_CAPS >> 32)); - /* Initialize sys_clock_hw_cycles_per_tick/sec */ + /* Initialize sys_clock_hw_cycles_per_sec */ - sys_clock_hw_cycles_per_tick = counter_load_value; - z_clock_hw_cycles_per_sec = sys_clock_hw_cycles_per_tick * + z_clock_hw_cycles_per_sec = counter_load_value * CONFIG_SYS_CLOCK_TICKS_PER_SEC; diff --git a/drivers/timer/loapic_timer.c b/drivers/timer/loapic_timer.c index fb829dc80ad..39d6837f8b6 100644 --- a/drivers/timer/loapic_timer.c +++ b/drivers/timer/loapic_timer.c @@ -648,7 +648,7 @@ int _sys_clock_driver_init(struct device *device) /* determine the timer counter value (in timer clock cycles/system tick) */ - cycles_per_tick = sys_clock_hw_cycles_per_tick; + cycles_per_tick = sys_clock_hw_cycles_per_tick(); tickless_idle_init(); diff --git a/drivers/timer/nrf_rtc_timer.c b/drivers/timer/nrf_rtc_timer.c index 8ce9dbe5047..81f6c51a4f9 100644 --- a/drivers/timer/nrf_rtc_timer.c +++ b/drivers/timer/nrf_rtc_timer.c @@ -38,7 +38,7 @@ /* * rtc_past holds the value of RTC_COUNTER at the time the last sys tick was * announced, in RTC ticks. It is therefore always a multiple of - * sys_clock_hw_cycles_per_tick. + * sys_clock_hw_cycles_per_tick(). */ static u32_t rtc_past; @@ -114,12 +114,12 @@ static void rtc_announce_set_next(void) /* If no sys ticks have elapsed, there is no point in incrementing the * counters or announcing it. */ - if (rtc_elapsed >= sys_clock_hw_cycles_per_tick) { + if (rtc_elapsed >= sys_clock_hw_cycles_per_tick()) { #ifdef CONFIG_TICKLESS_IDLE /* Calculate how many sys ticks elapsed since the last sys tick * and notify the kernel if necessary. */ - sys_elapsed = rtc_elapsed / sys_clock_hw_cycles_per_tick; + sys_elapsed = rtc_elapsed / sys_clock_hw_cycles_per_tick(); if (sys_elapsed > expected_sys_ticks) { /* Never announce more sys ticks than the kernel asked @@ -141,7 +141,7 @@ static void rtc_announce_set_next(void) * has passed. */ rtc_past = (rtc_past + - (sys_elapsed * sys_clock_hw_cycles_per_tick) + (sys_elapsed * sys_clock_hw_cycles_per_tick()) ) & RTC_MASK; _sys_idle_elapsed_ticks = sys_elapsed; @@ -149,7 +149,7 @@ static void rtc_announce_set_next(void) } /* Set the RTC to the next sys tick */ - rtc_compare_set(rtc_past + sys_clock_hw_cycles_per_tick); + rtc_compare_set(rtc_past + sys_clock_hw_cycles_per_tick()); } #endif @@ -196,8 +196,8 @@ void _timer_idle_enter(s32_t sys_ticks) #else /* Restrict ticks to max supported by RTC without risking overflow*/ if ((sys_ticks < 0) || - (sys_ticks > (RTC_HALF / sys_clock_hw_cycles_per_tick))) { - sys_ticks = RTC_HALF / sys_clock_hw_cycles_per_tick; + (sys_ticks > (RTC_HALF / sys_clock_hw_cycles_per_tick()))) { + sys_ticks = RTC_HALF / sys_clock_hw_cycles_per_tick(); } expected_sys_ticks = sys_ticks; @@ -205,7 +205,7 @@ void _timer_idle_enter(s32_t sys_ticks) /* If ticks is 0, the RTC interrupt handler will be set pending * immediately, meaning that we will not go to sleep. */ - rtc_compare_set(rtc_past + (sys_ticks * sys_clock_hw_cycles_per_tick)); + rtc_compare_set(rtc_past + (sys_ticks * sys_clock_hw_cycles_per_tick())); #endif } @@ -222,11 +222,11 @@ static inline void program_max_cycles(void) _sys_clock_tick_count = _get_elapsed_clock_time(); /* Update rtc_past to track rtc timer count*/ rtc_past = (_sys_clock_tick_count * - sys_clock_hw_cycles_per_tick) & RTC_MASK; + sys_clock_hw_cycles_per_tick()) & RTC_MASK; /* Programe RTC compare register to generate interrupt*/ rtc_compare_set(rtc_past + - (max_cycles * sys_clock_hw_cycles_per_tick)); + (max_cycles * sys_clock_hw_cycles_per_tick())); } @@ -283,7 +283,7 @@ u32_t _get_elapsed_program_time(void) rtc_elapsed = (RTC_COUNTER - rtc_past_copy) & RTC_MASK; /* Convert number of Machine cycles to SYS_TICKS */ - return (rtc_elapsed / sys_clock_hw_cycles_per_tick); + return (rtc_elapsed / sys_clock_hw_cycles_per_tick()); } @@ -307,14 +307,14 @@ void _set_time(u32_t time) expected_sys_ticks = time; _sys_clock_tick_count = _get_elapsed_clock_time(); /* Update rtc_past to track rtc timer count*/ - rtc_past = (_sys_clock_tick_count * sys_clock_hw_cycles_per_tick) & RTC_MASK; + rtc_past = (_sys_clock_tick_count * sys_clock_hw_cycles_per_tick()) & RTC_MASK; expected_sys_ticks = expected_sys_ticks > _get_max_clock_time() ? _get_max_clock_time() : expected_sys_ticks; /* Programe RTC compare register to generate interrupt*/ rtc_compare_set(rtc_past + - (expected_sys_ticks * sys_clock_hw_cycles_per_tick)); + (expected_sys_ticks * sys_clock_hw_cycles_per_tick())); } @@ -336,8 +336,8 @@ s32_t _get_max_clock_time(void) rtc_away = rtc_away > RTC_HALF ? RTC_HALF : rtc_away; /* Convert RTC Ticks to SYS TICKS*/ - if (rtc_away >= sys_clock_hw_cycles_per_tick) { - sys_away = rtc_away / sys_clock_hw_cycles_per_tick; + if (rtc_away >= sys_clock_hw_cycles_per_tick()) { + sys_away = rtc_away / sys_clock_hw_cycles_per_tick(); } return sys_away; @@ -378,9 +378,9 @@ u64_t _get_elapsed_clock_time(void) compiler_barrier(); rtc_elapsed = (RTC_COUNTER - rtc_past_copy) & RTC_MASK; - if (rtc_elapsed >= sys_clock_hw_cycles_per_tick) { + if (rtc_elapsed >= sys_clock_hw_cycles_per_tick()) { /* Update total number of SYS_TICKS passed */ - elapsed += (rtc_elapsed / sys_clock_hw_cycles_per_tick); + elapsed += (rtc_elapsed / sys_clock_hw_cycles_per_tick()); } return elapsed; @@ -526,7 +526,7 @@ int _sys_clock_driver_init(struct device *device) /* TODO: replace with counter driver to access RTC */ SYS_CLOCK_RTC->PRESCALER = 0; - nrf_rtc_cc_set(SYS_CLOCK_RTC, RTC_CC_IDX, sys_clock_hw_cycles_per_tick); + nrf_rtc_cc_set(SYS_CLOCK_RTC, RTC_CC_IDX, sys_clock_hw_cycles_per_tick()); nrf_rtc_event_enable(SYS_CLOCK_RTC, RTC_EVTENSET_COMPARE0_Msk); nrf_rtc_int_enable(SYS_CLOCK_RTC, RTC_INTENSET_COMPARE0_Msk); @@ -549,7 +549,7 @@ u32_t _timer_cycle_get_32(void) u32_t elapsed_cycles; /* Number of timer cycles announced as ticks so far. */ - ticked_cycles = _sys_clock_tick_count * sys_clock_hw_cycles_per_tick; + ticked_cycles = _sys_clock_tick_count * sys_clock_hw_cycles_per_tick(); /* Make sure that compiler will not reverse access to RTC and * _sys_clock_tick_count. diff --git a/drivers/timer/pulpino_timer.c b/drivers/timer/pulpino_timer.c index d9dd6d6bbd8..691e7b2b9bc 100644 --- a/drivers/timer/pulpino_timer.c +++ b/drivers/timer/pulpino_timer.c @@ -31,7 +31,7 @@ static void pulpino_timer_irq_handler(void *unused) /* Reset counter */ timer->val = 0; - accumulated_cycle_count += sys_clock_hw_cycles_per_tick; + accumulated_cycle_count += sys_clock_hw_cycles_per_tick(); _sys_clock_tick_announce(); } @@ -50,10 +50,10 @@ int _sys_clock_driver_init(struct device *device) /* * Initialize timer. * Reset counter and set timer to generate interrupt - * every sys_clock_hw_cycles_per_tick + * every sys_clock_hw_cycles_per_tick() */ timer->val = 0; - timer->cmp = sys_clock_hw_cycles_per_tick; + timer->cmp = sys_clock_hw_cycles_per_tick(); timer->ctrl = TIMER_CTRL_EN; return 0; diff --git a/drivers/timer/riscv_machine_timer.c b/drivers/timer/riscv_machine_timer.c index 6b9f5949f61..507deb4375c 100644 --- a/drivers/timer/riscv_machine_timer.c +++ b/drivers/timer/riscv_machine_timer.c @@ -51,9 +51,9 @@ static ALWAYS_INLINE void riscv_machine_rearm_timer(void) /* * Rearm timer to generate an interrupt after - * sys_clock_hw_cycles_per_tick + * sys_clock_hw_cycles_per_tick() */ - rtc += sys_clock_hw_cycles_per_tick; + rtc += sys_clock_hw_cycles_per_tick(); mtimecmp->val_low = (u32_t)(rtc & 0xffffffff); mtimecmp->val_high = (u32_t)((rtc >> 32) & 0xffffffff); diff --git a/drivers/timer/xtensa_sys_timer.c b/drivers/timer/xtensa_sys_timer.c index 1ac83daf9ce..d22d6689eaa 100644 --- a/drivers/timer/xtensa_sys_timer.c +++ b/drivers/timer/xtensa_sys_timer.c @@ -242,7 +242,7 @@ u64_t _get_elapsed_clock_time(void) static ALWAYS_INLINE void tickless_idle_init(void) { - cycles_per_tick = sys_clock_hw_cycles_per_tick; + cycles_per_tick = sys_clock_hw_cycles_per_tick(); /* calculate the max number of ticks with this 32-bit H/W counter */ max_system_ticks = MAX_TIMER_CYCLES / cycles_per_tick; max_load_value = max_system_ticks * cycles_per_tick; diff --git a/include/sys_clock.h b/include/sys_clock.h index 12b2f41d985..3a6bdaed559 100644 --- a/include/sys_clock.h +++ b/include/sys_clock.h @@ -42,6 +42,20 @@ static inline int sys_clock_hw_cycles_per_sec(void) #endif } +/* Note that some systems with comparatively slow cycle counters + * experience precision loss when doing math like this. In the + * general case it is not correct that "cycles" are much faster than + * "ticks". + */ +static inline int sys_clock_hw_cycles_per_tick(void) +{ +#ifdef CONFIG_SYS_CLOCK_EXISTS + return sys_clock_hw_cycles_per_sec() / CONFIG_SYS_CLOCK_TICKS_PER_SEC; +#else + return 1; /* Just to avoid a division by zero */ +#endif +} + #if defined(CONFIG_SYS_CLOCK_EXISTS) && \ (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC == 0) #error "SYS_CLOCK_HW_CYCLES_PER_SEC must be non-zero!" @@ -138,22 +152,6 @@ static inline s64_t __ticks_to_ms(s64_t ticks) #define _TICK_ALIGN 1 #endif -/* - * sys_clock_us_per_tick global variable represents a number - * of microseconds in one OS timer tick - * - * Note: This variable is deprecated and will be removed soon! - */ -__deprecated extern int sys_clock_us_per_tick; - -/* - * sys_clock_hw_cycles_per_tick global variable represents a number - * of platform clock ticks in one OS timer tick. - * sys_clock_hw_cycles_per_tick often represents a value of divider - * of the board clock frequency - */ -extern int sys_clock_hw_cycles_per_tick; - /* SYS_CLOCK_HW_CYCLES_TO_NS64 converts CPU clock cycles to nanoseconds */ #define SYS_CLOCK_HW_CYCLES_TO_NS64(X) \ (((u64_t)(X) * NSEC_PER_SEC) / sys_clock_hw_cycles_per_sec()) diff --git a/kernel/sys_clock.c b/kernel/sys_clock.c index f89780dae71..6563458590f 100644 --- a/kernel/sys_clock.c +++ b/kernel/sys_clock.c @@ -21,14 +21,10 @@ #endif #ifdef CONFIG_SYS_CLOCK_EXISTS -int sys_clock_hw_cycles_per_tick = - CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC; #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; #endif #else -/* don't initialize to avoid division-by-zero error */ -int sys_clock_hw_cycles_per_tick; #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) int z_clock_hw_cycles_per_sec; #endif diff --git a/tests/kernel/common/src/clock.c b/tests/kernel/common/src/clock.c index 3a94bd47577..cf189a6479f 100644 --- a/tests/kernel/common/src/clock.c +++ b/tests/kernel/common/src/clock.c @@ -101,7 +101,7 @@ void test_clock_cycle(void) c32 = k_cycle_get_32(); /*break if cycle counter wrap around*/ while (k_cycle_get_32() > c32 && - k_cycle_get_32() < (c32 + sys_clock_hw_cycles_per_tick)) + k_cycle_get_32() < (c32 + sys_clock_hw_cycles_per_tick())) #if defined(CONFIG_ARCH_POSIX) posix_halt_cpu(); #else diff --git a/tests/kernel/early_sleep/src/main.c b/tests/kernel/early_sleep/src/main.c index 590547d705a..ba4eeeb9a36 100644 --- a/tests/kernel/early_sleep/src/main.c +++ b/tests/kernel/early_sleep/src/main.c @@ -61,7 +61,7 @@ static int ticks_to_sleep(int ticks) k_sleep(__ticks_to_ms(ticks)); stop_time = k_cycle_get_32(); - return (stop_time - start_time) / sys_clock_hw_cycles_per_tick; + return (stop_time - start_time) / sys_clock_hw_cycles_per_tick(); } diff --git a/tests/kernel/timer/timer_monotonic/src/main.c b/tests/kernel/timer/timer_monotonic/src/main.c index 4fe138bde64..895fc7f805f 100644 --- a/tests/kernel/timer/timer_monotonic/src/main.c +++ b/tests/kernel/timer/timer_monotonic/src/main.c @@ -54,8 +54,8 @@ void test_timer(void) errors = 0; - TC_PRINT("sys_clock_hw_cycles_per_tick = %d\n", - sys_clock_hw_cycles_per_tick); + TC_PRINT("sys_clock_hw_cycles_per_tick() = %d\n", + sys_clock_hw_cycles_per_tick()); TC_PRINT("sys_clock_hw_cycles_per_sec() = %d\n", sys_clock_hw_cycles_per_sec());