clock: remove z_ from semi-public APIs

The clock/timer APIs are not application facing APIs, however, similar
to arch_ and a few other APIs they are available to implement drivers
and add support for new hardware and are documented and available to be
used outside of the clock/kernel subsystems.

Remove the leading z_ and provide them as clock_* APIs for someone
writing a new timer driver to use.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2021-02-25 15:33:15 -05:00
commit 9c1efe6b4b
33 changed files with 171 additions and 168 deletions

View file

@ -346,9 +346,9 @@ config TICKLESS_CAPABLE
help
Timer drivers should select this flag if they are capable of
supporting tickless operation. That is, a call to
z_clock_set_timeout() with a number of ticks greater than
sys_clock_set_timeout() with a number of ticks greater than
one should be expected not to produce a call to
z_clock_announce() (really, not to produce an interrupt at
sys_clock_announce() (really, not to produce an interrupt at
all) until the specified expiration.
endmenu

View file

@ -28,10 +28,10 @@ static void timer_irq_handler(const void *unused)
/* Clear the interrupt */
alt_handle_irq((void *)TIMER_0_BASE, TIMER_0_IRQ);
z_clock_announce(_sys_idle_elapsed_ticks);
sys_clock_announce(_sys_idle_elapsed_ticks);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);

View file

@ -76,9 +76,9 @@ static uint32_t cached_icr = CYCLES_PER_TICK;
#ifdef CONFIG_TICKLESS_KERNEL
static uint64_t last_announcement; /* last time we called z_clock_announce() */
static uint64_t last_announcement; /* last time we called sys_clock_announce() */
void z_clock_set_timeout(int32_t n, bool idle)
void sys_clock_set_timeout(int32_t n, bool idle)
{
ARG_UNUSED(idle);
@ -117,7 +117,7 @@ void z_clock_set_timeout(int32_t n, bool idle)
k_spin_unlock(&lock, key);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
uint32_t ccr;
uint32_t ticks;
@ -143,7 +143,7 @@ static void isr(const void *arg)
/*
* If we get here and the CCR isn't zero, then this interrupt is
* stale: it was queued while z_clock_set_timeout() was setting
* stale: it was queued while sys_clock_set_timeout() was setting
* a new counter. Just ignore it. See above for more info.
*/
@ -161,7 +161,7 @@ static void isr(const void *arg)
ticks = (total_cycles - last_announcement) / CYCLES_PER_TICK;
last_announcement = total_cycles;
k_spin_unlock(&lock, key);
z_clock_announce(ticks);
sys_clock_announce(ticks);
}
#else
@ -175,10 +175,10 @@ static void isr(const void *arg)
x86_write_loapic(LOAPIC_TIMER_ICR, cached_icr);
k_spin_unlock(&lock, key);
z_clock_announce(1);
sys_clock_announce(1);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
return 0U;
}
@ -213,7 +213,7 @@ uint32_t z_timer_cycle_get_32(void)
#endif
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
uint32_t val;

View file

@ -67,7 +67,7 @@ static uint32_t last_load;
/*
* This local variable holds the amount of timer cycles elapsed
* and it is updated in z_timer_int_handler and z_clock_set_timeout().
* and it is updated in z_timer_int_handler and sys_clock_set_timeout().
*
* Note:
* At an arbitrary point in time the "current" value of the
@ -166,7 +166,7 @@ static ALWAYS_INLINE void timer0_limit_register_set(uint32_t count)
/* This internal function calculates the amount of HW cycles that have
* elapsed since the last time the absolute HW cycles counter has been
* updated. 'cycle_count' may be updated either by the ISR, or
* in z_clock_set_timeout().
* in sys_clock_set_timeout().
*
* Additionally, the function updates the 'overflow_cycles' counter, that
* holds the amount of elapsed HW cycles due to (possibly) multiple
@ -241,13 +241,13 @@ static void timer_int_handler(const void *unused)
k_spin_unlock(&lock, key);
z_clock_announce(dticks);
sys_clock_announce(dticks);
#else
/* timer_int_handler may be triggered by timer irq or
* software helper irq
*/
/* irq with higher priority may call z_clock_set_timeout
/* irq with higher priority may call sys_clock_set_timeout
* so need a lock here
*/
uint32_t key;
@ -262,7 +262,7 @@ static void timer_int_handler(const void *unused)
dticks = (cycle_count - announced_cycles) / CYC_PER_TICK;
announced_cycles += dticks * CYC_PER_TICK;
z_clock_announce(TICKLESS ? dticks : 1);
sys_clock_announce(TICKLESS ? dticks : 1);
#endif
}
@ -277,7 +277,7 @@ static void timer_int_handler(const void *unused)
*
* @return 0
*/
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -314,7 +314,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
/* If the kernel allows us to miss tick announcements in idle,
* then shut off the counter. (Note: we can assume if idle==true
@ -417,7 +417,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!TICKLESS) {
return 0;

View file

@ -43,10 +43,10 @@ static void arm_arch_timer_compare_isr(const void *arg)
k_spin_unlock(&lock, key);
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? delta_ticks : 1);
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? delta_ticks : 1);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -61,7 +61,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
#if defined(CONFIG_TICKLESS_KERNEL)
@ -95,7 +95,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

View file

@ -118,10 +118,10 @@ static void compare_isr(const void *arg)
k_spin_unlock(&lock, key);
z_clock_announce(dticks);
sys_clock_announce(dticks);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
uint64_t curr = count();
@ -132,7 +132,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -164,7 +164,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

View file

@ -109,7 +109,7 @@ void rtc_isr(const void *arg)
rtc_last += ticks * RTC_COUNTS_PER_TICK;
k_spin_unlock(&lock, key);
z_clock_announce(ticks);
sys_clock_announce(ticks);
#else /* !CONFIG_TICKLESS_KERNEL */
@ -123,7 +123,7 @@ void rtc_isr(const void *arg)
rtc_last += RTC_COUNTS_PER_TICK;
z_clock_announce(1);
sys_clock_announce(1);
#endif /* CONFIG_TICKLESS_KERNEL */
}
@ -183,7 +183,7 @@ static void startDevice(void)
irq_unlock(key);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -201,7 +201,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -230,7 +230,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif /* CONFIG_TICKLESS_KERNEL */
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
uint32_t ret = (AONRTCCurrent64BitValueGet() - rtc_last) /
RTC_COUNTS_PER_TICK;

View file

@ -35,7 +35,7 @@ static uint32_t last_load;
/*
* This local variable holds the amount of SysTick HW cycles elapsed
* and it is updated in z_clock_isr() and z_clock_set_timeout().
* and it is updated in z_clock_isr() and sys_clock_set_timeout().
*
* Note:
* At an arbitrary point in time the "current" value of the SysTick
@ -65,7 +65,7 @@ static volatile uint32_t overflow_cyc;
/* This internal function calculates the amount of HW cycles that have
* elapsed since the last time the absolute HW cycles counter has been
* updated. 'cycle_count' may be updated either by the ISR, or when we
* re-program the SysTick.LOAD register, in z_clock_set_timeout().
* re-program the SysTick.LOAD register, in sys_clock_set_timeout().
*
* Additionally, the function updates the 'overflow_cyc' counter, that
* holds the amount of elapsed HW cycles due to (possibly) multiple
@ -129,11 +129,11 @@ void z_clock_isr(void *arg)
if (TICKLESS) {
/* In TICKLESS mode, the SysTick.LOAD is re-programmed
* in z_clock_set_timeout(), followed by resetting of
* in sys_clock_set_timeout(), followed by resetting of
* the counter (VAL = 0).
*
* If a timer wrap occurs right when we re-program LOAD,
* the ISR is triggered immediately after z_clock_set_timeout()
* the ISR is triggered immediately after sys_clock_set_timeout()
* returns; in that case we shall not increment the cycle_count
* because the value has been updated before LOAD re-program.
*
@ -142,14 +142,14 @@ void z_clock_isr(void *arg)
dticks = (cycle_count - announced_cycles) / CYC_PER_TICK;
announced_cycles += dticks * CYC_PER_TICK;
z_clock_announce(dticks);
sys_clock_announce(dticks);
} else {
z_clock_announce(1);
sys_clock_announce(1);
}
z_arm_int_exit();
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -164,7 +164,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
/* Fast CPUs and a 24 bit counter mean that even idle systems
* need to wake up multiple times per second. If the kernel
@ -225,7 +225,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!TICKLESS) {
return 0;
@ -247,7 +247,7 @@ uint32_t z_timer_cycle_get_32(void)
return ret;
}
void z_clock_idle_exit(void)
void sys_clock_idle_exit(void)
{
if (last_load == TIMER_STOPPED) {
SysTick->CTRL |= SysTick_CTRL_ENABLE_Msk;

View file

@ -90,7 +90,7 @@ static void hpet_isr(const void *arg)
}
k_spin_unlock(&lock, key);
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
}
static void set_timer0_irq(unsigned int irq)
@ -106,7 +106,7 @@ static void set_timer0_irq(unsigned int irq)
TIMER0_CONF_REG = val;
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
extern int z_clock_hw_cycles_per_sec;
uint32_t hz;
@ -154,7 +154,7 @@ void smp_timer_init(void)
*/
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -191,7 +191,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;
@ -209,7 +209,7 @@ uint32_t z_timer_cycle_get_32(void)
return MAIN_COUNTER_REG;
}
void z_clock_idle_exit(void)
void sys_clock_idle_exit(void)
{
GENERAL_CONF_REG |= GCONF_ENABLE;
}

View file

@ -201,10 +201,10 @@ static void timer_isr(const void *unused)
- accumulated_cycle_count) / CYC_PER_TICK;
accumulated_cycle_count += dticks * CYC_PER_TICK;
k_spin_unlock(&lock, key);
z_clock_announce(dticks);
sys_clock_announce(dticks);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
timer_init_combine(CTIMER_HW_TIMER_INDEX, TRUE);
timer_init(CTIMER_HW_TIMER_INDEX, ET_PSR_32K, TRUE, FALSE, 0);
@ -215,7 +215,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -227,7 +227,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
k_spin_unlock(&lock, key);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

View file

@ -6,7 +6,7 @@
#ifndef ZEPHYR_LEGACY_SET_TIME_H__
#define ZEPHYR_LEGACY_SET_TIME_H__
/* Stub implementation of z_clock_set_timeout() and z_clock_elapsed()
/* Stub implementation of sys_clock_set_timeout() and sys_clock_elapsed()
* in terms of the original APIs. Used by older timer drivers.
* Should be replaced.
*
@ -16,7 +16,7 @@
#ifdef CONFIG_TICKLESS_IDLE
void z_timer_idle_enter(int32_t ticks);
void z_clock_idle_exit(void);
void clock_idle_exit(void);
#endif
#ifdef CONFIG_TICKLESS_KERNEL
@ -26,9 +26,9 @@ extern uint32_t z_get_remaining_program_time(void);
extern uint32_t z_get_elapsed_program_time(void);
#endif
extern uint64_t z_clock_uptime(void);
extern uint64_t clock_uptime(void);
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
#if defined(CONFIG_TICKLESS_IDLE) && defined(CONFIG_TICKLESS_KERNEL)
if (idle) {
@ -46,10 +46,10 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
*/
static uint32_t driver_uptime;
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
return (uint32_t)(z_clock_uptime() - driver_uptime);
return (uint32_t)(clock_uptime() - driver_uptime);
#else
return 0;
#endif
@ -58,16 +58,16 @@ uint32_t z_clock_elapsed(void)
static void wrapped_announce(int32_t ticks)
{
driver_uptime += ticks;
z_clock_announce(ticks);
sys_clock_announce(ticks);
}
#define z_clock_announce(t) wrapped_announce(t)
#define sys_clock_announce(t) wrapped_announce(t)
#define _sys_clock_always_on (0)
static inline void z_tick_set(int64_t val)
{
/* noop with current kernel code, use z_clock_announce() */
/* noop with current kernel code, use sys_clock_announce() */
ARG_UNUSED(val);
}

View file

@ -78,10 +78,10 @@ static void timer_isr(const void *unused)
tmr->ctrl = GPTIMER_CTRL_IE | GPTIMER_CTRL_RS |
GPTIMER_CTRL_EN | gptimer_ctrl_clear_ip;
z_clock_announce(1);
sys_clock_announce(1);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
return 0;
}
@ -101,7 +101,7 @@ static void init_downcounter(volatile struct gptimer_timer_regs *tmr)
tmr->ctrl = GPTIMER_CTRL_LD | GPTIMER_CTRL_RS | GPTIMER_CTRL_EN;
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
const int timer_interrupt = get_timer_irq();
volatile struct gptimer_regs *regs = get_regs();

View file

@ -34,7 +34,7 @@ static void litex_timer_irq_handler(const void *device)
int key = irq_lock();
sys_write8(TIMER_EV, TIMER_EV_PENDING_ADDR);
z_clock_announce(1);
sys_clock_announce(1);
irq_unlock(key);
}
@ -54,12 +54,12 @@ uint32_t z_timer_cycle_get_32(void)
}
/* tickless kernel is not supported */
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
return 0;
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
IRQ_CONNECT(TIMER_IRQ, DT_INST_IRQ(0, priority),

View file

@ -112,7 +112,7 @@ static inline uint32_t timer_count(void)
#ifdef CONFIG_TICKLESS_KERNEL
static uint32_t last_announcement; /* last time we called z_clock_announce() */
static uint32_t last_announcement; /* last time we called sys_clock_announce() */
/*
* Request a timeout n Zephyr ticks in the future from now.
@ -126,7 +126,7 @@ static uint32_t last_announcement; /* last time we called z_clock_announce() */
* Writing a new value to preload only takes effect once the count
* register reaches 0.
*/
void z_clock_set_timeout(int32_t n, bool idle)
void sys_clock_set_timeout(int32_t n, bool idle)
{
ARG_UNUSED(idle);
@ -185,10 +185,10 @@ void z_clock_set_timeout(int32_t n, bool idle)
/*
* Return the number of Zephyr ticks elapsed from last call to
* z_clock_announce in the ISR. The caller casts uint32_t to int32_t.
* sys_clock_announce in the ISR. The caller casts uint32_t to int32_t.
* We must make sure bit[31] is 0 in the return value.
*/
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
uint32_t ccr;
uint32_t ticks;
@ -242,7 +242,7 @@ static void xec_rtos_timer_isr(const void *arg)
last_announcement = total_cycles;
k_spin_unlock(&lock, key);
z_clock_announce(ticks);
sys_clock_announce(ticks);
}
#else
@ -266,10 +266,10 @@ static void xec_rtos_timer_isr(const void *arg)
total_cycles = temp & TIMER_COUNT_MASK;
k_spin_unlock(&lock, key);
z_clock_announce(1);
sys_clock_announce(1);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
return 0U;
}
@ -301,7 +301,7 @@ uint32_t z_timer_cycle_get_32(void)
return ret;
}
void z_clock_idle_exit(void)
void sys_clock_idle_exit(void)
{
if (cached_icr == TIMER_STOPPED) {
cached_icr = CYCLES_PER_TICK;
@ -314,7 +314,7 @@ void sys_clock_disable(void)
TIMER_REGS->CTRL = 0U;
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);

View file

@ -44,7 +44,7 @@ static void np_timer_isr(const void *arg)
int32_t elapsed_ticks = (now - last_tick_time)/tick_period;
last_tick_time += elapsed_ticks*tick_period;
z_clock_announce(elapsed_ticks);
sys_clock_announce(elapsed_ticks);
}
/**
@ -60,7 +60,7 @@ void np_timer_isr_test_hook(const void *arg)
*
* Enable the hw timer, setting its tick period, and setup its interrupt
*/
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -79,7 +79,7 @@ int z_clock_driver_init(const struct device *device)
* @brief Set system clock timeout
*
* Informs the system clock driver that the next needed call to
* z_clock_announce() will not be until the specified number of ticks
* sys_clock_announce() will not be until the specified number of ticks
* from the the current time have elapsed.
*
* See system_timer.h for more information
@ -88,7 +88,7 @@ int z_clock_driver_init(const struct device *device)
* @param idle Hint to the driver that the system is about to enter
* the idle state immediately after setting the timeout
*/
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -96,7 +96,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
uint64_t silent_ticks;
/* Note that we treat INT_MAX literally as anyhow the maximum amount of
* ticks we can report with z_clock_announce() is INT_MAX
* ticks we can report with sys_clock_announce() is INT_MAX
*/
if (ticks == K_TICKS_FOREVER) {
silent_ticks = INT64_MAX;
@ -110,14 +110,14 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
}
/**
* @brief Ticks elapsed since last z_clock_announce() call
* @brief Ticks elapsed since last sys_clock_announce() call
*
* Queries the clock driver for the current time elapsed since the
* last call to z_clock_announce() was made. The kernel will call
* last call to sys_clock_announce() was made. The kernel will call
* this with appropriate locking, the driver needs only provide an
* instantaneous answer.
*/
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
return (hwm_get_time() - last_tick_time)/tick_period;
}

View file

@ -65,7 +65,7 @@ static struct itim32_reg *const evt_tmr = (struct itim32_reg *)
static const struct npcx_clk_cfg itim_clk_cfg[] = NPCX_DT_CLK_CFG_ITEMS_LIST(0);
static struct k_spinlock lock;
/* Announced cycles in system timer before executing z_clock_announce() */
/* Announced cycles in system timer before executing sys_clock_announce() */
static uint64_t cyc_sys_announced;
/* Current target cycles of time-out signal in event timer */
static uint32_t cyc_evt_timeout;
@ -178,13 +178,13 @@ static void npcx_itim_evt_isr(const struct device *dev)
k_spin_unlock(&lock, key);
/* Informs kernel that specified number of ticks have elapsed */
z_clock_announce(delta_ticks);
sys_clock_announce(delta_ticks);
} else {
/* Enable event timer for ticking and wait to it take effect */
npcx_itim_evt_enable();
/* Informs kernel that one tick has elapsed */
z_clock_announce(1);
sys_clock_announce(1);
}
}
@ -224,7 +224,7 @@ static uint32_t npcx_itim_evt_elapsed_cyc32(void)
#endif /* CONFIG_PM */
/* System timer api functions */
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -238,7 +238,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
npcx_itim_start_evt_tmr_by_tick(ticks);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
/* Always return 0 for tickful kernel system */
@ -250,7 +250,7 @@ uint32_t z_clock_elapsed(void)
k_spin_unlock(&lock, key);
/* Return how many ticks elapsed since last z_clock_announce() call */
/* Return how many ticks elapsed since last sys_clock_announce() call */
return (uint32_t)((current - cyc_sys_announced) / SYS_CYCLES_PER_TICK);
}
@ -265,7 +265,7 @@ uint32_t z_timer_cycle_get_32(void)
return (uint32_t)(current);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
int ret;

View file

@ -242,7 +242,7 @@ static void sys_clock_timeout_handler(uint32_t chan,
sys_clock_timeout_handler, NULL);
}
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ?
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ?
dticks : (dticks > 0));
}
@ -299,7 +299,7 @@ void z_nrf_rtc_timer_chan_free(uint32_t chan)
atomic_or(&alloc_mask, BIT(chan));
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
static const enum nrf_lfclk_start_mode mode =
@ -339,7 +339,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
uint32_t cyc;
@ -380,7 +380,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
compare_set(0, cyc, sys_clock_timeout_handler, NULL);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

View file

@ -76,10 +76,10 @@ static void timer_isr(const void *arg)
}
k_spin_unlock(&lock, key);
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -90,7 +90,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -130,7 +130,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

View file

@ -50,10 +50,10 @@ static void lptmr_irq_handler(const struct device *unused)
SYSTEM_TIMER_INSTANCE->CSR |= LPTMR_CSR_TCF(1); /* Rearm timer. */
cycle_count += CYCLES_PER_TICK; /* Track cycles. */
z_clock_announce(1); /* Poke the scheduler. */
sys_clock_announce(1); /* Poke the scheduler. */
}
int z_clock_driver_init(const struct device *unused)
int sys_clock_driver_init(const struct device *unused)
{
uint32_t csr, psr, sircdiv; /* LPTMR registers */
@ -139,7 +139,7 @@ uint32_t z_timer_cycle_get_32(void)
/*
* Since we're not tickless, this is identically zero.
*/
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
return 0;
}

View file

@ -155,7 +155,7 @@ static void rtc_isr(const void *arg)
if (count != rtc_last) {
uint32_t ticks = (count - rtc_last) / CYCLES_PER_TICK;
z_clock_announce(ticks);
sys_clock_announce(ticks);
rtc_last += ticks * CYCLES_PER_TICK;
}
@ -164,18 +164,18 @@ static void rtc_isr(const void *arg)
if (status) {
/* RTC just ticked one more tick... */
if (++rtc_counter == rtc_timeout) {
z_clock_announce(rtc_counter - rtc_last);
sys_clock_announce(rtc_counter - rtc_last);
rtc_last = rtc_counter;
}
} else {
/* ISR was invoked directly from z_clock_set_timeout. */
z_clock_announce(0);
/* ISR was invoked directly from sys_clock_set_timeout. */
sys_clock_announce(0);
}
#endif /* CONFIG_TICKLESS_KERNEL */
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -252,7 +252,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -301,7 +301,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif /* CONFIG_TICKLESS_KERNEL */
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
return (rtc_count() - rtc_last) / CYCLES_PER_TICK;

View file

@ -55,7 +55,7 @@ static void lptim_irq_handler(const struct device *unused)
k_spinlock_key_t key = k_spin_lock(&lock);
/* do not change ARR yet, z_clock_announce will do */
/* do not change ARR yet, sys_clock_announce will do */
LL_LPTIM_ClearFLAG_ARRM(LPTIM1);
/* increase the total nb of autoreload count
@ -73,12 +73,12 @@ static void lptim_irq_handler(const struct device *unused)
* CONFIG_SYS_CLOCK_TICKS_PER_SEC)
/ LPTIM_CLOCK;
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL)
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL)
? dticks : (dticks > 0));
}
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -188,7 +188,7 @@ static inline uint32_t z_clock_lptim_getcounter(void)
return lp_time;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
/* new LPTIM1 AutoReload value to set (aligned on Kernel ticks) */
uint32_t next_arr = 0;
@ -268,7 +268,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
k_spin_unlock(&lock, key);
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;

View file

@ -23,7 +23,7 @@ void __weak z_clock_isr(void *arg)
__ASSERT_NO_MSG(false);
}
int __weak z_clock_driver_init(const struct device *device)
int __weak sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -37,11 +37,11 @@ int __weak z_clock_device_ctrl(const struct device *device,
return -ENOTSUP;
}
void __weak z_clock_set_timeout(int32_t ticks, bool idle)
void __weak sys_clock_set_timeout(int32_t ticks, bool idle)
{
}
void __weak z_clock_idle_exit(void)
void __weak sys_clock_idle_exit(void)
{
}
@ -49,5 +49,5 @@ void __weak sys_clock_disable(void)
{
}
SYS_DEVICE_DEFINE("sys_clock", z_clock_driver_init, z_clock_device_ctrl,
SYS_DEVICE_DEFINE("sys_clock", sys_clock_driver_init, z_clock_device_ctrl,
PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);

View file

@ -93,10 +93,10 @@ static void ttc_isr(const void *arg)
#endif
/* Announce to the kernel*/
z_clock_announce(ticks);
sys_clock_announce(ticks);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
uint32_t reg_val;
@ -152,7 +152,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
#ifdef CONFIG_TICKLESS_KERNEL
uint32_t cycles;
@ -173,7 +173,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
uint32_t cycles;

View file

@ -53,10 +53,10 @@ static void ccompare_isr(const void *arg)
}
k_spin_unlock(&lock, key);
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
}
int z_clock_driver_init(const struct device *device)
int sys_clock_driver_init(const struct device *device)
{
ARG_UNUSED(device);
@ -66,7 +66,7 @@ int z_clock_driver_init(const struct device *device)
return 0;
}
void z_clock_set_timeout(int32_t ticks, bool idle)
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -97,7 +97,7 @@ void z_clock_set_timeout(int32_t ticks, bool idle)
#endif
}
uint32_t z_clock_elapsed(void)
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;