clocks: rename z_tick_get -> sys_clock_tick_get

Do not use z_ for internal APIs, z_ is for private APIs within one
subsystem only.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2021-03-13 08:21:21 -05:00
commit fe0872c0ab
13 changed files with 32 additions and 32 deletions

View file

@ -30,7 +30,7 @@ uint64_t z_tsc_read(void)
uint32_t count;
key = arch_irq_lock();
t = (uint64_t)z_tick_get();
t = (uint64_t)sys_clock_tick_get();
count = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
arch_irq_unlock(key);
t *= k_ticks_to_cyc_floor64(1);

View file

@ -161,7 +161,7 @@ static int w5500_command(const struct device *dev, uint8_t cmd)
w5500_spi_write(dev, W5500_S0_CR, &cmd, 1);
do {
int64_t remaining = end - z_tick_get();
int64_t remaining = end - sys_clock_tick_get();
if (remaining <= 0) {
return -EIO;

View file

@ -129,7 +129,7 @@ int z_nrf_rtc_timer_get_ticks(k_timeout_t t)
do {
curr_count = counter();
curr_tick = z_tick_get();
curr_tick = sys_clock_tick_get();
} while (curr_count != counter());
abs_ticks = Z_TICK_ABS(t.ticks);

View file

@ -184,10 +184,10 @@ uint32_t sys_clock_tick_get_32(void);
* @return the current system tick count
*
*/
int64_t z_tick_get(void);
int64_t sys_clock_tick_get(void);
#ifndef CONFIG_SYS_CLOCK_EXISTS
#define z_tick_get() (0)
#define sys_clock_tick_get() (0)
#define sys_clock_tick_get_32() (0)
#endif

View file

@ -38,7 +38,7 @@ void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
while (ret == NULL) {
ret = sys_heap_aligned_alloc(&h->heap, align, bytes);
now = z_tick_get();
now = sys_clock_tick_get();
if ((ret != NULL) || ((end - now) <= 0)) {
break;
}

View file

@ -265,7 +265,7 @@ void sys_clock_announce(int32_t ticks)
k_spin_unlock(&timeout_lock, key);
}
int64_t z_tick_get(void)
int64_t sys_clock_tick_get(void)
{
uint64_t t = 0U;
@ -278,7 +278,7 @@ int64_t z_tick_get(void)
uint32_t sys_clock_tick_get_32(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
return (uint32_t)z_tick_get();
return (uint32_t)sys_clock_tick_get();
#else
return (uint32_t)curr_tick;
#endif
@ -286,7 +286,7 @@ uint32_t sys_clock_tick_get_32(void)
int64_t z_impl_k_uptime_ticks(void)
{
return z_tick_get();
return sys_clock_tick_get();
}
#ifdef CONFIG_USERSPACE
@ -309,7 +309,7 @@ uint64_t z_timeout_end_calc(k_timeout_t timeout)
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
return UINT64_MAX;
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
return z_tick_get();
return sys_clock_tick_get();
}
dt = timeout.ticks;
@ -317,5 +317,5 @@ uint64_t z_timeout_end_calc(k_timeout_t timeout)
if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) {
return Z_TICK_ABS(dt);
}
return z_tick_get() + MAX(1, dt);
return sys_clock_tick_get() + MAX(1, dt);
}

View file

@ -313,7 +313,7 @@ success:
#endif
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
int64_t remaining = end - z_tick_get();
int64_t remaining = end - sys_clock_tick_get();
if (remaining <= 0) {
timeout = K_NO_WAIT;
@ -600,7 +600,7 @@ struct net_buf *net_buf_clone(struct net_buf *buf, k_timeout_t timeout)
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
int64_t remaining = end - z_tick_get();
int64_t remaining = end - sys_clock_tick_get();
if (remaining <= 0) {
timeout = K_NO_WAIT;

View file

@ -884,7 +884,7 @@ static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
int64_t remaining = end - z_tick_get();
int64_t remaining = end - sys_clock_tick_get();
if (remaining <= 0) {
break;
@ -1144,7 +1144,7 @@ int net_pkt_alloc_buffer(struct net_pkt *pkt,
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
int64_t remaining = end - z_tick_get();
int64_t remaining = end - sys_clock_tick_get();
if (remaining <= 0) {
timeout = K_NO_WAIT;
@ -1395,7 +1395,7 @@ pkt_alloc_with_buffer(struct k_mem_slab *slab,
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
int64_t remaining = end - z_tick_get();
int64_t remaining = end - sys_clock_tick_get();
if (remaining <= 0) {
timeout = K_NO_WAIT;

View file

@ -625,7 +625,7 @@ ssize_t zsock_sendto_ctx(struct net_context *ctx, const void *buf, size_t len,
* it means that the sending window is blocked
* and we just cannot send anything.
*/
int64_t remaining = buf_timeout - z_tick_get();
int64_t remaining = buf_timeout - sys_clock_tick_get();
if (remaining <= 0) {
if (status == -ENOBUFS) {
@ -1141,7 +1141,7 @@ static inline ssize_t zsock_recv_stream(struct net_context *ctx,
/* Update the timeout value in case loop is repeated. */
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
int64_t remaining = end - z_tick_get();
int64_t remaining = end - sys_clock_tick_get();
if (remaining <= 0) {
timeout = K_NO_WAIT;
@ -1384,7 +1384,7 @@ int z_impl_zsock_poll(struct zsock_pollfd *fds, int nfds, int poll_timeout)
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
int64_t remaining = end - z_tick_get();
int64_t remaining = end - sys_clock_tick_get();
if (remaining <= 0) {
timeout = K_NO_WAIT;
@ -1449,7 +1449,7 @@ int z_impl_zsock_poll(struct zsock_pollfd *fds, int nfds, int poll_timeout)
}
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
int64_t remaining = end - z_tick_get();
int64_t remaining = end - sys_clock_tick_get();
if (remaining <= 0) {
break;

View file

@ -171,7 +171,7 @@ int task_wdt_feed(int channel_id)
*/
k_sched_lock();
current_ticks = z_tick_get();
current_ticks = sys_clock_tick_get();
/* feed the specified channel */
channels[channel_id].timeout_abs_ticks = current_ticks +

View file

@ -134,11 +134,11 @@ void main(void)
/* The following code is needed to make the benchmakring run on
* slower platforms.
*/
uint64_t time_stamp = z_tick_get();
uint64_t time_stamp = sys_clock_tick_get();
k_sleep(K_MSEC(1));
uint64_t time_stamp_2 = z_tick_get();
uint64_t time_stamp_2 = sys_clock_tick_get();
if (time_stamp_2 - time_stamp > 1) {
number_of_loops = 10U;

View file

@ -97,7 +97,7 @@ static void test_basic(void)
zassert_true(chan >= 0, "Failed to allocate RTC channel (%d).", chan);
k_timeout_t t0 =
Z_TIMEOUT_TICKS(Z_TICK_ABS(z_tick_get() + K_MSEC(1).ticks));
Z_TIMEOUT_TICKS(Z_TICK_ABS(sys_clock_tick_get() + K_MSEC(1).ticks));
test_timeout(chan, t0, false);
@ -112,7 +112,7 @@ static void test_basic(void)
/* value in the past should expire immediately (2 ticks from now)*/
k_timeout_t t3 =
Z_TIMEOUT_TICKS(Z_TICK_ABS(z_tick_get() - K_MSEC(1).ticks));
Z_TIMEOUT_TICKS(Z_TICK_ABS(sys_clock_tick_get() - K_MSEC(1).ticks));
test_timeout(chan, t3, true);
@ -172,21 +172,21 @@ static void test_get_ticks(void)
"Unexpected result %d (expected: %d)", ticks, exp_ticks);
/* Absolute timeout 1ms in the past */
t = Z_TIMEOUT_TICKS(Z_TICK_ABS(z_tick_get() - K_MSEC(1).ticks));
t = Z_TIMEOUT_TICKS(Z_TICK_ABS(sys_clock_tick_get() - K_MSEC(1).ticks));
exp_ticks = z_nrf_rtc_timer_read() - K_MSEC(1).ticks;
ticks = z_nrf_rtc_timer_get_ticks(t);
zassert_true((ticks >= exp_ticks - 1) && (ticks <= exp_ticks),
"Unexpected result %d (expected: %d)", ticks, exp_ticks);
/* Absolute timeout 10ms in the future */
t = Z_TIMEOUT_TICKS(Z_TICK_ABS(z_tick_get() + K_MSEC(10).ticks));
t = Z_TIMEOUT_TICKS(Z_TICK_ABS(sys_clock_tick_get() + K_MSEC(10).ticks));
exp_ticks = z_nrf_rtc_timer_read() + K_MSEC(10).ticks;
ticks = z_nrf_rtc_timer_get_ticks(t);
zassert_true((ticks >= exp_ticks - 1) && (ticks <= exp_ticks),
"Unexpected result %d (expected: %d)", ticks, exp_ticks);
/* too far in the future */
t = Z_TIMEOUT_TICKS(z_tick_get() + 0x00800001);
t = Z_TIMEOUT_TICKS(sys_clock_tick_get() + 0x00800001);
ticks = z_nrf_rtc_timer_get_ticks(t);
zassert_equal(ticks, -EINVAL, "Unexpected ticks: %d", ticks);
}
@ -194,7 +194,7 @@ static void test_get_ticks(void)
static void sched_handler(uint32_t id, uint32_t cc_val, void *user_data)
{
int64_t now = z_tick_get();
int64_t now = sys_clock_tick_get();
int rtc_ticks_now =
z_nrf_rtc_timer_get_ticks(Z_TIMEOUT_TICKS(Z_TICK_ABS(now)));
uint64_t *evt_uptime_us = user_data;
@ -205,7 +205,7 @@ static void sched_handler(uint32_t id, uint32_t cc_val, void *user_data)
static void test_absolute_scheduling(void)
{
k_timeout_t t;
int64_t now_us = k_ticks_to_us_floor64(z_tick_get());
int64_t now_us = k_ticks_to_us_floor64(sys_clock_tick_get());
uint64_t target_us = now_us + 5678;
uint64_t evt_uptime_us;
int rtc_ticks;
@ -228,7 +228,7 @@ static void test_absolute_scheduling(void)
(uint32_t)now_us, (uint32_t)target_us, (uint32_t)evt_uptime_us);
/* schedule event now. */
now_us = k_ticks_to_us_floor64(z_tick_get());
now_us = k_ticks_to_us_floor64(sys_clock_tick_get());
t = Z_TIMEOUT_TICKS(Z_TICK_ABS(K_USEC(now_us).ticks));
rtc_ticks = z_nrf_rtc_timer_get_ticks(t);

View file

@ -72,7 +72,7 @@ static void test_starve(void)
last_now = now;
/* Assume tick delta fits in printable 32 bits */
uint64_t ticks = z_tick_get();
uint64_t ticks = sys_clock_tick_get();
int64_t ticks_diff = ticks - last_ticks;
zassert_true(ticks_diff > 0,