sys_clock: Fix unsafe tick count usage

The system tick count is a 64 bit quantity that gets updated from
interrupt context, meaning that it's dangerously non-atomic and has to
be locked.  The core kernel clock code did this right.

But the value was also exposed to the rest of the universe as a global
variable, and virtually nothing else was doing this correctly.  Even
in the timer ISRs themselves, the interrupts may be themselves
preempted (most of our architectures support nested interrupts) by
code that wants to set timeouts and inspect system uptime.

Define a z_tick_{get,set}() API, eliminate the old variable, and make
sure everyone uses the right mechanism.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-09-19 14:17:00 -07:00 committed by Anas Nashif
commit 317178b88f
16 changed files with 105 additions and 100 deletions

View file

@ -15,9 +15,6 @@
#include <toolchain.h>
#include <kernel_structs.h>
extern volatile u64_t _sys_clock_tick_count;
extern int sys_clock_hw_cycles_per_tick();
/*
* @brief Read 64-bit timestamp value
*
@ -33,7 +30,7 @@ u64_t _tsc_read(void)
u32_t count;
key = irq_lock();
t = (u64_t)_sys_clock_tick_count;
t = (u64_t)z_tick_get();
count = _arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
irq_unlock(key);
t *= (u64_t)sys_clock_hw_cycles_per_tick();

View file

@ -196,7 +196,7 @@ void _timer_int_handler(void *unused)
#ifdef CONFIG_TICKLESS_KERNEL
if (!programmed_ticks) {
if (_sys_clock_always_on) {
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
program_max_cycles();
}
return;
@ -216,7 +216,7 @@ void _timer_int_handler(void *unused)
/* _sys_clock_tick_announce() could cause new programming */
if (!programmed_ticks && _sys_clock_always_on) {
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
program_max_cycles();
}
#else
@ -280,7 +280,7 @@ void _set_time(u32_t time)
programmed_ticks = time > max_system_ticks ? max_system_ticks : time;
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
timer0_limit_register_set(programmed_ticks * cycles_per_tick);
timer0_count_register_set(0);
@ -306,7 +306,7 @@ static inline u64_t get_elapsed_count(void)
elapsed = timer0_count_register_get();
}
elapsed += _sys_clock_tick_count * cycles_per_tick;
elapsed += z_tick_get() * cycles_per_tick;
return elapsed;
}

View file

@ -242,7 +242,7 @@ void _timer_int_handler(void *unused)
#if defined(CONFIG_TICKLESS_KERNEL)
if (!idle_original_ticks) {
if (_sys_clock_always_on) {
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
/* clear overflow tracking flag as it is accounted */
timer_overflow = 0;
sysTickStop();
@ -272,7 +272,7 @@ void _timer_int_handler(void *unused)
/* _sys_clock_tick_announce() could cause new programming */
if (!idle_original_ticks && _sys_clock_always_on) {
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
/* clear overflow tracking flag as it is accounted */
timer_overflow = 0;
sysTickStop();
@ -389,7 +389,7 @@ void _set_time(u32_t time)
idle_original_ticks = time > max_system_ticks ? max_system_ticks : time;
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
/* clear overflow tracking flag as it is accounted */
timer_overflow = 0;
@ -415,14 +415,14 @@ static inline u64_t get_elapsed_count(void)
if ((SysTick->CTRL & SysTick_CTRL_COUNTFLAG_Msk) || (timer_overflow)) {
elapsed = SysTick->LOAD;
/* Keep track of overflow till it is accounted in
* _sys_clock_tick_count as COUNTFLAG bit is clear on read
* z_tick_get() as COUNTFLAG bit is clear on read
*/
timer_overflow = 1;
} else {
elapsed = (SysTick->LOAD - SysTick->VAL);
}
elapsed += (_sys_clock_tick_count * default_load_value);
elapsed += (z_tick_get() * default_load_value);
return elapsed;
}
@ -604,7 +604,7 @@ void _timer_idle_exit(void)
if (idle_mode == IDLE_TICKLESS) {
idle_mode = IDLE_NOT_TICKLESS;
if (!idle_original_ticks && _sys_clock_always_on) {
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
timer_overflow = 0;
sysTickReloadSet(max_load_value);
sysTickStart();

View file

@ -272,7 +272,7 @@ void _timer_int_handler(void *unused)
/* If timer not programmed or already consumed exit */
if (!programmed_ticks) {
if (_sys_clock_always_on) {
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
program_max_cycles();
}
return;
@ -301,7 +301,7 @@ void _timer_int_handler(void *unused)
/* _sys_clock_tick_announce() could cause new programming */
if (!programmed_ticks && _sys_clock_always_on) {
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
program_max_cycles();
}
#else
@ -354,7 +354,7 @@ void _set_time(u32_t time)
programmed_ticks = time;
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
stale_irq_check = 1;
@ -375,7 +375,7 @@ u64_t _get_elapsed_clock_time(void)
{
u64_t elapsed;
elapsed = _sys_clock_tick_count;
elapsed = z_tick_get();
elapsed += ((s64_t)(_hpetMainCounterAtomic() -
counter_last_value) / counter_load_value);

View file

@ -295,7 +295,7 @@ void _timer_int_handler(void *unused /* parameter is not used */
#if defined(CONFIG_TICKLESS_KERNEL)
if (!programmed_full_ticks) {
if (_sys_clock_always_on) {
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
program_max_cycles();
}
return;
@ -321,7 +321,7 @@ void _timer_int_handler(void *unused /* parameter is not used */
/* _sys_clock_tick_announce() could cause new programming */
if (!programmed_full_ticks && _sys_clock_always_on) {
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
program_max_cycles();
}
#else
@ -409,7 +409,7 @@ void _set_time(u32_t time)
programmed_full_ticks =
time > max_system_ticks ? max_system_ticks : time;
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
programmed_cycles = programmed_full_ticks * cycles_per_tick;
initial_count_register_set(programmed_cycles);
@ -426,7 +426,7 @@ u64_t _get_elapsed_clock_time(void)
{
u64_t elapsed;
elapsed = _sys_clock_tick_count;
elapsed = z_tick_get();
if (programmed_cycles) {
elapsed +=
(programmed_cycles -

View file

@ -213,15 +213,15 @@ void _timer_idle_enter(s32_t sys_ticks)
#ifdef CONFIG_TICKLESS_KERNEL
/*
* Set RTC Counter Compare (CC) register to max value
* and update the _sys_clock_tick_count.
* and update the z_tick_get().
*/
static inline void program_max_cycles(void)
{
u32_t max_cycles = _get_max_clock_time();
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
/* Update rtc_past to track rtc timer count*/
rtc_past = (_sys_clock_tick_count *
rtc_past = (z_tick_get() *
sys_clock_hw_cycles_per_tick()) & RTC_MASK;
/* Programe RTC compare register to generate interrupt*/
@ -305,9 +305,9 @@ void _set_time(u32_t time)
/* Update expected_sys_ticls to time to programe*/
expected_sys_ticks = time;
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
/* Update rtc_past to track rtc timer count*/
rtc_past = (_sys_clock_tick_count * sys_clock_hw_cycles_per_tick()) & RTC_MASK;
rtc_past = (z_tick_get() * sys_clock_hw_cycles_per_tick()) & RTC_MASK;
expected_sys_ticks = expected_sys_ticks > _get_max_clock_time() ?
_get_max_clock_time() : expected_sys_ticks;
@ -368,8 +368,8 @@ u64_t _get_elapsed_clock_time(void)
u64_t elapsed;
u32_t rtc_elapsed, rtc_past_copy;
/* Read _sys_clock_tick_count and rtc_past before RTC_COUNTER */
elapsed = _sys_clock_tick_count;
/* Read z_tick_get() and rtc_past before RTC_COUNTER */
elapsed = z_tick_get();
rtc_past_copy = rtc_past;
/* Make sure that compiler will not reverse access to RTC and
@ -549,17 +549,17 @@ u32_t _timer_cycle_get_32(void)
u32_t elapsed_cycles;
/* Number of timer cycles announced as ticks so far. */
ticked_cycles = _sys_clock_tick_count * sys_clock_hw_cycles_per_tick();
ticked_cycles = z_tick_get() * sys_clock_hw_cycles_per_tick();
/* Make sure that compiler will not reverse access to RTC and
* _sys_clock_tick_count.
* z_tick_get().
*/
compiler_barrier();
/* Number of timer cycles since last announced tick we know about.
*
* The value of RTC_COUNTER is not reset on tick, so it will
* compensate potentialy missed update of _sys_clock_tick_count
* compensate potentialy missed update of z_tick_get()
* which could have happen between the ticked_cycles calculation
* and the code below.
*/

View file

@ -165,7 +165,7 @@ static inline void _set_max_clock_time(void)
unsigned int key;
key = irq_lock();
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
last_timer_value = GET_TIMER_CURRENT_TIME();
irq_unlock(key);
SET_TIMER_FIRE_TIME(MAX_TIMER_CYCLES); /* Program timer to max value */
@ -189,7 +189,7 @@ void _set_time(u32_t time)
}
key = irq_lock();
/* Update System Level Ticks Time Keeping */
_sys_clock_tick_count = _get_elapsed_clock_time();
z_tick_set(_get_elapsed_clock_time());
C = GET_TIMER_CURRENT_TIME();
last_timer_value = C;
irq_unlock(key);
@ -233,7 +233,7 @@ u64_t _get_elapsed_clock_time(void)
C = GET_TIMER_CURRENT_TIME();
elapsed = (last_timer_value <= C) ? (C - last_timer_value) :
(MAX_TIMER_CYCLES - last_timer_value) + C;
total = (_sys_clock_tick_count + (elapsed / cycles_per_tick));
total = (z_tick_get() + (elapsed / cycles_per_tick));
irq_unlock(key);
return total;

View file

@ -12,10 +12,8 @@
#include <metal/time.h>
#include <sys_clock.h>
extern volatile u64_t _sys_clock_tick_count;
unsigned long long metal_get_timestamp(void)
{
return (unsigned long long)_sys_clock_tick_count;
return (unsigned long long)z_tick_get();
}

View file

@ -185,7 +185,32 @@ static inline s64_t __ticks_to_ms(s64_t ticks)
* @} end defgroup clock_apis
*/
extern volatile u64_t _sys_clock_tick_count;
/**
*
* @brief Return the lower part of the current system tick count
*
* @return the current system tick count
*
*/
u32_t z_tick_get_32(void);
/**
*
* @brief Return the current system tick count
*
* @return the current system tick count
*
*/
s64_t z_tick_get(void);
/**
*
* @brief Sets the current system tick count
*
* @param ticks Ticks since system start
*
*/
void z_tick_set(s64_t ticks);
/* timeouts */

View file

@ -16,8 +16,6 @@
extern struct k_mem_pool _k_mem_pool_list_start[];
extern struct k_mem_pool _k_mem_pool_list_end[];
s64_t _tick_get(void);
static struct k_mem_pool *get_pool(int id)
{
return &_k_mem_pool_list_start[id];
@ -57,7 +55,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
__ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), "");
if (timeout > 0) {
end = _tick_get() + _ms_to_ticks(timeout);
end = z_tick_get() + _ms_to_ticks(timeout);
}
while (true) {
@ -95,7 +93,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
(void)_pend_current_thread(irq_lock(), &p->wait_q, timeout);
if (timeout != K_FOREVER) {
timeout = end - _tick_get();
timeout = end - z_tick_get();
if (timeout < 0) {
break;

View file

@ -33,7 +33,11 @@ int z_clock_hw_cycles_per_sec;
/* updated by timer driver for tickless, stays at 1 for non-tickless */
s32_t _sys_idle_elapsed_ticks = 1;
volatile u64_t _sys_clock_tick_count;
/* Note that this value is 64 bits, and thus non-atomic on almost all
* Zephyr archtictures. And of course it's routinely updated inside
* timer interrupts. Access to it must be locked.
*/
static volatile u64_t tick_count;
#ifdef CONFIG_TICKLESS_KERNEL
/*
@ -46,22 +50,15 @@ int _sys_clock_always_on = 1;
static u32_t next_ts;
#endif
/**
*
* @brief Return the lower part of the current system tick count
*
* @return the current system tick count
*
*/
u32_t _tick_get_32(void)
u32_t z_tick_get_32(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
return (u32_t)_get_elapsed_clock_time();
#else
return (u32_t)_sys_clock_tick_count;
return (u32_t)tick_count;
#endif
}
FUNC_ALIAS(_tick_get_32, sys_tick_get_32, u32_t);
u32_t _impl_k_uptime_get_32(void)
{
@ -69,7 +66,7 @@ u32_t _impl_k_uptime_get_32(void)
__ASSERT(_sys_clock_always_on,
"Call k_enable_sys_clock_always_on to use clock API");
#endif
return __ticks_to_ms(_tick_get_32());
return __ticks_to_ms(z_tick_get_32());
}
#ifdef CONFIG_USERSPACE
@ -82,33 +79,26 @@ Z_SYSCALL_HANDLER(k_uptime_get_32)
}
#endif
/**
*
* @brief Return the current system tick count
*
* @return the current system tick count
*
*/
s64_t _tick_get(void)
s64_t z_tick_get(void)
{
s64_t tmp_sys_clock_tick_count;
/*
* Lock the interrupts when reading _sys_clock_tick_count 64-bit
* variable. Some architectures (x86) do not handle 64-bit atomically,
* so we have to lock the timer interrupt that causes change of
* _sys_clock_tick_count
*/
unsigned int imask = irq_lock();
#ifdef CONFIG_TICKLESS_KERNEL
tmp_sys_clock_tick_count = _get_elapsed_clock_time();
return _get_elapsed_clock_time();
#else
tmp_sys_clock_tick_count = _sys_clock_tick_count;
unsigned int key = irq_lock();
s64_t ret = tick_count;
irq_unlock(key);
return ret;
#endif
irq_unlock(imask);
return tmp_sys_clock_tick_count;
}
FUNC_ALIAS(_tick_get, sys_tick_get, s64_t);
void z_tick_set(s64_t val)
{
unsigned int key = irq_lock();
tick_count = val;
irq_unlock(key);
}
s64_t _impl_k_uptime_get(void)
{
@ -116,7 +106,7 @@ s64_t _impl_k_uptime_get(void)
__ASSERT(_sys_clock_always_on,
"Call k_enable_sys_clock_always_on to use clock API");
#endif
return __ticks_to_ms(_tick_get());
return __ticks_to_ms(z_tick_get());
}
#ifdef CONFIG_USERSPACE
@ -316,9 +306,8 @@ void _nano_sys_clock_tick_announce(s32_t ticks)
K_DEBUG("ticks: %d\n", ticks);
/* 64-bit value, ensure atomic access with irq lock */
key = irq_lock();
_sys_clock_tick_count += ticks;
tick_count += ticks;
irq_unlock(key);
#endif
handle_timeouts(ticks);

View file

@ -79,9 +79,9 @@ minimal
-------
This configuration does NOT produce any output. To observe its operation,
invoke it using gdb and observe that:
- the kernel's timer ISR & main thread increment "_sys_clock_tick_count" on a
- the kernel's timer ISR & main thread increment "z_tick_get()" on a
regular basis
- k_cpu_idle() is invoked by the idle task each time _sys_clock_tick_count
- k_cpu_idle() is invoked by the idle task each time z_tick_get()
is incremented
regular

View file

@ -147,11 +147,11 @@ void main(void)
/* The following code is needed to make the benchmakring run on
* slower platforms.
*/
u64_t time_stamp = _sys_clock_tick_count;
u64_t time_stamp = z_tick_get();
k_sleep(1);
u64_t time_stamp_2 = _sys_clock_tick_count;
u64_t time_stamp_2 = z_tick_get();
if (time_stamp_2 - time_stamp > 1) {
number_of_loops = 10;

View file

@ -24,6 +24,7 @@
#include <kernel_structs.h>
#include <arch/cpu.h>
#include <irq_offload.h>
#include <sys_clock.h>
/*
* Include board.h from platform to get IRQ number.
@ -95,9 +96,6 @@
extern u32_t _tick_get_32(void);
extern s64_t _tick_get(void);
typedef struct {
int command; /* command to process */
int error; /* error value (if any) */
@ -310,15 +308,15 @@ static void _test_kernel_interrupts(disable_int_func disable_int,
int imask;
/* Align to a "tick boundary" */
tick = _tick_get_32();
while (_tick_get_32() == tick) {
tick = z_tick_get_32();
while (z_tick_get_32() == tick) {
#if defined(CONFIG_ARCH_POSIX)
k_busy_wait(1000);
#endif
}
tick++;
while (_tick_get_32() == tick) {
while (z_tick_get_32() == tick) {
#if defined(CONFIG_ARCH_POSIX)
k_busy_wait(1000);
#endif
@ -335,15 +333,15 @@ static void _test_kernel_interrupts(disable_int_func disable_int,
count <<= 4;
imask = disable_int(irq);
tick = _tick_get_32();
tick = z_tick_get_32();
for (i = 0; i < count; i++) {
_tick_get_32();
z_tick_get_32();
#if defined(CONFIG_ARCH_POSIX)
k_busy_wait(1000);
#endif
}
tick2 = _tick_get_32();
tick2 = z_tick_get_32();
/*
* Re-enable interrupts before returning (for both success and failure
@ -356,13 +354,13 @@ static void _test_kernel_interrupts(disable_int_func disable_int,
/* Now repeat with interrupts unlocked. */
for (i = 0; i < count; i++) {
_tick_get_32();
z_tick_get_32();
#if defined(CONFIG_ARCH_POSIX)
k_busy_wait(1000);
#endif
}
tick2 = _tick_get_32();
tick2 = z_tick_get_32();
zassert_not_equal(tick, tick2,
"tick didn't advance as expected");
}

View file

@ -97,7 +97,6 @@ int fpu_sharing_error;
static volatile unsigned int load_store_low_count;
static volatile unsigned int load_store_high_count;
extern u32_t _tick_get_32(void);
extern void calculate_pi_low(void);
extern void calculate_pi_high(void);
@ -169,11 +168,11 @@ void load_store_low(void)
* thread an opportunity to run when the low priority thread is
* using the floating point registers.
*
* IMPORTANT: This logic requires that sys_tick_get_32() not
* IMPORTANT: This logic requires that z_tick_get_32() not
* perform any floating point operations!
*/
while ((_tick_get_32() % 5) != 0) {
while ((z_tick_get_32() % 5) != 0) {
/*
* Use a volatile variable to prevent compiler
* optimizing out the spin loop.

View file

@ -8,6 +8,7 @@
#include <tc_util.h>
#include <linker/linker-defs.h>
#include <ztest.h>
#include <kernel_structs.h>
/**
* @brief Memory protection tests
@ -28,7 +29,7 @@ struct test_struct __kernel_bss kernel_bss;
struct test_struct __kernel_noinit kernel_noinit;
/* Real kernel variable, check it is in the right place */
extern volatile u64_t _sys_clock_tick_count;
extern struct _kernel _kernel;
struct test_struct app_data = {3, 4, NULL};
struct test_struct app_bss;
@ -73,7 +74,7 @@ void test_app_memory(void)
zassert_true(kernel_loc(&kernel_bss), "not in kernel memory");
zassert_true(kernel_loc(&kernel_noinit), "not in kernel memory");
zassert_true(kernel_loc((void *)&_sys_clock_tick_count),
zassert_true(kernel_loc((void *)&_kernel),
"not in kernel memory");
}