2015-04-10 16:44:37 -07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2014-2015 Wind River Systems, Inc.
|
2018-11-22 17:55:27 +08:00
|
|
|
* Copyright (c) 2018 Synopsys Inc, Inc.
|
2015-04-10 16:44:37 -07:00
|
|
|
*
|
2017-01-18 17:01:01 -08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-10 16:44:37 -07:00
|
|
|
*/
|
|
|
|
|
2019-06-21 12:55:37 -04:00
|
|
|
#include <drivers/timer/system_timer.h>
|
2018-11-22 17:55:27 +08:00
|
|
|
#include <sys_clock.h>
|
|
|
|
#include <spinlock.h>
|
|
|
|
#include <arch/arc/v2/aux_regs.h>
|
|
|
|
#include <soc.h>
|
2015-04-10 16:44:37 -07:00
|
|
|
/*
|
2016-05-06 12:56:17 -07:00
|
|
|
* note: This implementation assumes Timer0 is present. Be sure
|
|
|
|
* to build the ARC CPU with Timer0.
|
2019-08-01 12:39:35 +08:00
|
|
|
*
|
|
|
|
* If secureshield is present and secure firmware is configured,
|
|
|
|
* use secure Timer 0
|
2015-04-10 16:44:37 -07:00
|
|
|
*/
|
|
|
|
|
2019-08-01 12:39:35 +08:00
|
|
|
#ifdef CONFIG_ARC_SECURE_FIRMWARE
|
|
|
|
|
|
|
|
#undef _ARC_V2_TMR0_COUNT
|
|
|
|
#undef _ARC_V2_TMR0_CONTROL
|
|
|
|
#undef _ARC_V2_TMR0_LIMIT
|
|
|
|
#undef IRQ_TIMER0
|
|
|
|
|
|
|
|
#define _ARC_V2_TMR0_COUNT _ARC_V2_S_TMR0_COUNT
|
|
|
|
#define _ARC_V2_TMR0_CONTROL _ARC_V2_S_TMR0_CONTROL
|
|
|
|
#define _ARC_V2_TMR0_LIMIT _ARC_V2_S_TMR0_LIMIT
|
|
|
|
#define IRQ_TIMER0 IRQ_SEC_TIMER0
|
|
|
|
|
|
|
|
#endif
|
2018-09-20 13:56:45 -07:00
|
|
|
|
2015-04-10 16:44:37 -07:00
|
|
|
#define _ARC_V2_TMR_CTRL_IE 0x1 /* interrupt enable */
|
|
|
|
#define _ARC_V2_TMR_CTRL_NH 0x2 /* count only while not halted */
|
|
|
|
#define _ARC_V2_TMR_CTRL_W 0x4 /* watchdog mode enable */
|
|
|
|
#define _ARC_V2_TMR_CTRL_IP 0x8 /* interrupt pending flag */
|
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
/* Minimum cycles in the future to try to program. */
|
2019-01-17 02:10:28 +08:00
|
|
|
#define MIN_DELAY 512
|
2018-11-22 17:55:27 +08:00
|
|
|
#define COUNTER_MAX 0xffffffff
|
|
|
|
#define TIMER_STOPPED 0x0
|
2019-04-23 15:08:00 +02:00
|
|
|
#define CYC_PER_TICK (sys_clock_hw_cycles_per_sec() \
|
2018-11-22 17:55:27 +08:00
|
|
|
/ CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
2017-04-08 16:17:14 -07:00
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
#define MAX_TICKS ((COUNTER_MAX / CYC_PER_TICK) - 1)
|
|
|
|
#define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
|
2015-04-10 16:44:37 -07:00
|
|
|
|
2018-11-30 17:24:53 +08:00
|
|
|
#define TICKLESS (IS_ENABLED(CONFIG_TICKLESS_KERNEL))
|
2018-11-22 17:55:27 +08:00
|
|
|
|
2019-12-26 01:55:07 +08:00
|
|
|
#define SMP_TIMER_DRIVER (CONFIG_SMP && CONFIG_MP_NUM_CPUS > 1)
|
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
static struct k_spinlock lock;
|
|
|
|
|
2019-07-10 16:51:27 +08:00
|
|
|
|
2019-12-26 01:55:07 +08:00
|
|
|
#if SMP_TIMER_DRIVER
|
2019-07-10 16:51:27 +08:00
|
|
|
volatile static u64_t last_time;
|
|
|
|
volatile static u64_t start_time;
|
|
|
|
|
|
|
|
#else
|
2018-11-22 17:55:27 +08:00
|
|
|
static u32_t last_load;
|
|
|
|
|
|
|
|
static u32_t cycle_count;
|
2019-07-10 16:51:27 +08:00
|
|
|
#endif
|
2016-11-03 13:39:23 +00:00
|
|
|
|
2015-07-01 17:22:39 -04:00
|
|
|
/**
|
|
|
|
*
|
2015-10-05 07:07:20 -07:00
|
|
|
* @brief Get contents of Timer0 count register
|
|
|
|
*
|
|
|
|
* @return Current Timer0 count
|
|
|
|
*/
|
2017-04-21 10:03:20 -05:00
|
|
|
static ALWAYS_INLINE u32_t timer0_count_register_get(void)
|
2015-10-05 07:07:20 -07:00
|
|
|
{
|
2019-03-08 14:19:05 -07:00
|
|
|
return z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
|
2015-10-05 07:07:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2015-10-05 07:07:20 -07:00
|
|
|
* @brief Set Timer0 count register to the specified value
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2015-07-01 17:29:04 -04:00
|
|
|
* @return N/A
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2017-04-21 10:03:20 -05:00
|
|
|
static ALWAYS_INLINE void timer0_count_register_set(u32_t value)
|
2015-04-10 16:44:37 -07:00
|
|
|
{
|
2019-03-08 14:19:05 -07:00
|
|
|
z_arc_v2_aux_reg_write(_ARC_V2_TMR0_COUNT, value);
|
2015-04-10 16:44:37 -07:00
|
|
|
}
|
|
|
|
|
2015-07-01 17:22:39 -04:00
|
|
|
/**
|
|
|
|
*
|
2015-10-05 07:07:20 -07:00
|
|
|
* @brief Get contents of Timer0 control register
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2015-10-05 07:07:20 -07:00
|
|
|
* @return N/A
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2017-04-21 10:03:20 -05:00
|
|
|
static ALWAYS_INLINE u32_t timer0_control_register_get(void)
|
2015-04-10 16:44:37 -07:00
|
|
|
{
|
2019-03-08 14:19:05 -07:00
|
|
|
return z_arc_v2_aux_reg_read(_ARC_V2_TMR0_CONTROL);
|
2015-04-10 16:44:37 -07:00
|
|
|
}
|
|
|
|
|
2015-07-01 17:22:39 -04:00
|
|
|
/**
|
|
|
|
*
|
2015-10-05 07:07:20 -07:00
|
|
|
* @brief Set Timer0 control register to the specified value
|
|
|
|
*
|
|
|
|
* @return N/A
|
|
|
|
*/
|
2017-04-21 10:03:20 -05:00
|
|
|
static ALWAYS_INLINE void timer0_control_register_set(u32_t value)
|
2015-10-05 07:07:20 -07:00
|
|
|
{
|
2019-03-08 14:19:05 -07:00
|
|
|
z_arc_v2_aux_reg_write(_ARC_V2_TMR0_CONTROL, value);
|
2015-10-05 07:07:20 -07:00
|
|
|
}
|
|
|
|
|
2016-11-03 13:39:23 +00:00
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @brief Get contents of Timer0 limit register
|
|
|
|
*
|
|
|
|
* @return N/A
|
|
|
|
*/
|
2017-04-21 10:03:20 -05:00
|
|
|
static ALWAYS_INLINE u32_t timer0_limit_register_get(void)
|
2016-11-03 13:39:23 +00:00
|
|
|
{
|
2019-03-08 14:19:05 -07:00
|
|
|
return z_arc_v2_aux_reg_read(_ARC_V2_TMR0_LIMIT);
|
2016-11-03 13:39:23 +00:00
|
|
|
}
|
|
|
|
|
2015-10-05 07:07:20 -07:00
|
|
|
/**
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2015-10-05 07:07:20 -07:00
|
|
|
* @brief Set Timer0 limit register to the specified value
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2015-10-05 07:07:20 -07:00
|
|
|
* @return N/A
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2017-04-21 10:03:20 -05:00
|
|
|
static ALWAYS_INLINE void timer0_limit_register_set(u32_t count)
|
2015-10-05 07:07:20 -07:00
|
|
|
{
|
2019-03-08 14:19:05 -07:00
|
|
|
z_arc_v2_aux_reg_write(_ARC_V2_TMR0_LIMIT, count);
|
2015-10-05 07:07:20 -07:00
|
|
|
}
|
|
|
|
|
2019-12-26 01:55:07 +08:00
|
|
|
#if !SMP_TIMER_DRIVER
|
2018-11-22 17:55:27 +08:00
|
|
|
static u32_t elapsed(void)
|
2015-04-10 16:44:37 -07:00
|
|
|
{
|
2018-12-06 14:58:12 +08:00
|
|
|
u32_t val, ov, ctrl;
|
2015-04-10 16:44:37 -07:00
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
do {
|
|
|
|
val = timer0_count_register_get();
|
2018-12-06 14:58:12 +08:00
|
|
|
ctrl = timer0_control_register_get();
|
2018-11-22 17:55:27 +08:00
|
|
|
} while (timer0_count_register_get() < val);
|
|
|
|
|
2018-12-06 14:58:12 +08:00
|
|
|
ov = (ctrl & _ARC_V2_TMR_CTRL_IP) ? last_load : 0;
|
2018-11-22 17:55:27 +08:00
|
|
|
return val + ov;
|
2017-04-08 16:17:14 -07:00
|
|
|
}
|
2019-07-10 16:51:27 +08:00
|
|
|
#endif
|
2017-04-08 16:17:14 -07:00
|
|
|
|
2015-07-01 17:22:39 -04:00
|
|
|
/**
|
|
|
|
*
|
2015-07-01 17:51:40 -04:00
|
|
|
* @brief System clock periodic tick handler
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2018-11-22 17:55:27 +08:00
|
|
|
* This routine handles the system clock tick interrupt. It always
|
|
|
|
* announces one tick when TICKLESS is not enabled, or multiple ticks
|
|
|
|
* when TICKLESS is enabled.
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2015-07-01 17:29:04 -04:00
|
|
|
* @return N/A
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2019-03-12 15:15:42 -06:00
|
|
|
static void timer_int_handler(void *unused)
|
2015-04-10 16:44:37 -07:00
|
|
|
{
|
|
|
|
ARG_UNUSED(unused);
|
2018-11-22 17:55:27 +08:00
|
|
|
u32_t dticks;
|
|
|
|
|
2015-04-10 16:44:37 -07:00
|
|
|
/* clear the interrupt by writing 0 to IP bit of the control register */
|
2015-10-05 07:07:20 -07:00
|
|
|
timer0_control_register_set(_ARC_V2_TMR_CTRL_NH | _ARC_V2_TMR_CTRL_IE);
|
|
|
|
|
2019-12-26 01:55:07 +08:00
|
|
|
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1
|
2019-07-10 16:51:27 +08:00
|
|
|
u64_t curr_time;
|
|
|
|
k_spinlock_key_t key;
|
|
|
|
|
|
|
|
key = k_spin_lock(&lock);
|
|
|
|
/* gfrc is the wall clock */
|
|
|
|
curr_time = z_arc_connect_gfrc_read();
|
|
|
|
|
|
|
|
dticks = (curr_time - last_time) / CYC_PER_TICK;
|
|
|
|
last_time = curr_time;
|
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
|
|
|
|
z_clock_announce(dticks);
|
|
|
|
#else
|
2018-11-22 17:55:27 +08:00
|
|
|
cycle_count += last_load;
|
|
|
|
dticks = last_load / CYC_PER_TICK;
|
|
|
|
z_clock_announce(TICKLESS ? dticks : 1);
|
2019-07-10 16:51:27 +08:00
|
|
|
#endif
|
2017-04-08 16:17:14 -07:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-04-10 16:44:37 -07:00
|
|
|
|
2015-07-01 17:22:39 -04:00
|
|
|
/**
|
|
|
|
*
|
2015-07-01 17:51:40 -04:00
|
|
|
* @brief Initialize and enable the system clock
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
|
|
|
* This routine is used to program the ARCv2 timer to deliver interrupts at the
|
2018-11-22 22:26:02 +08:00
|
|
|
* rate specified via the CYC_PER_TICK.
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2015-07-06 16:31:38 -04:00
|
|
|
* @return 0
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2018-09-21 09:33:36 -07:00
|
|
|
int z_clock_driver_init(struct device *device)
|
2015-04-10 16:44:37 -07:00
|
|
|
{
|
2015-07-06 16:31:38 -04:00
|
|
|
ARG_UNUSED(device);
|
|
|
|
|
2015-04-10 16:44:37 -07:00
|
|
|
/* ensure that the timer will not generate interrupts */
|
2015-10-05 07:07:20 -07:00
|
|
|
timer0_control_register_set(0);
|
|
|
|
|
2019-12-26 01:55:07 +08:00
|
|
|
#if SMP_TIMER_DRIVER
|
2019-07-10 16:51:27 +08:00
|
|
|
IRQ_CONNECT(IRQ_TIMER0, CONFIG_ARCV2_TIMER_IRQ_PRIORITY,
|
|
|
|
timer_int_handler, NULL, 0);
|
|
|
|
|
|
|
|
timer0_limit_register_set(CYC_PER_TICK - 1);
|
|
|
|
last_time = z_arc_connect_gfrc_read();
|
|
|
|
start_time = last_time;
|
|
|
|
#else
|
2018-11-22 17:55:27 +08:00
|
|
|
last_load = CYC_PER_TICK;
|
2015-04-10 16:44:37 -07:00
|
|
|
|
2016-08-16 10:58:40 -07:00
|
|
|
IRQ_CONNECT(IRQ_TIMER0, CONFIG_ARCV2_TIMER_IRQ_PRIORITY,
|
2019-03-12 15:15:42 -06:00
|
|
|
timer_int_handler, NULL, 0);
|
2015-04-10 16:44:37 -07:00
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
timer0_limit_register_set(last_load - 1);
|
2019-03-19 17:06:25 +08:00
|
|
|
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
|
|
|
|
cycle_count = timer0_count_register_get();
|
2019-07-10 16:51:27 +08:00
|
|
|
#endif
|
2019-03-19 17:06:25 +08:00
|
|
|
#endif
|
2019-01-17 02:10:28 +08:00
|
|
|
timer0_count_register_set(0);
|
2015-10-05 07:07:20 -07:00
|
|
|
timer0_control_register_set(_ARC_V2_TMR_CTRL_NH | _ARC_V2_TMR_CTRL_IE);
|
2015-04-10 16:44:37 -07:00
|
|
|
|
|
|
|
/* everything has been configured: safe to enable the interrupt */
|
2015-10-05 07:07:20 -07:00
|
|
|
|
2016-07-25 11:34:34 -07:00
|
|
|
irq_enable(IRQ_TIMER0);
|
2015-07-06 16:31:38 -04:00
|
|
|
|
|
|
|
return 0;
|
2015-04-10 16:44:37 -07:00
|
|
|
}
|
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
void z_clock_set_timeout(s32_t ticks, bool idle)
|
2016-11-03 13:39:23 +00:00
|
|
|
{
|
2018-11-22 17:55:27 +08:00
|
|
|
/* If the kernel allows us to miss tick announcements in idle,
|
2018-11-22 22:26:02 +08:00
|
|
|
* then shut off the counter. (Note: we can assume if idle==true
|
|
|
|
* that interrupts are already disabled)
|
2018-11-22 17:55:27 +08:00
|
|
|
*/
|
2019-12-26 01:55:07 +08:00
|
|
|
#if SMP_TIMER_DRIVER
|
2019-10-25 07:41:34 +08:00
|
|
|
/* as 64-bits GFRC is used as wall clock, it's ok to ignore idle
|
|
|
|
* systick will not be missed.
|
|
|
|
* However for single core using 32-bits arc timer, idle cannot
|
|
|
|
* be ignored, as 32-bits timer will overflow in a not-long time.
|
|
|
|
*/
|
kernel/timeout: Make timeout arguments an opaque type
Add a k_timeout_t type, and use it everywhere that kernel API
functions were accepting a millisecond timeout argument. Instead of
forcing milliseconds everywhere (which are often not integrally
representable as system ticks), do the conversion to ticks at the
point where the timeout is created. This avoids an extra unit
conversion in some application code, and allows us to express the
timeout in units other than milliseconds to achieve greater precision.
The existing K_MSEC() et. al. macros now return initializers for a
k_timeout_t.
The K_NO_WAIT and K_FOREVER constants have now become k_timeout_t
values, which means they cannot be operated on as integers.
Applications which have their own APIs that need to inspect these
vs. user-provided timeouts can now use a K_TIMEOUT_EQ() predicate to
test for equality.
Timer drivers, which receive an integer tick count in ther
z_clock_set_timeout() functions, now use the integer-valued
K_TICKS_FOREVER constant instead of K_FOREVER.
For the initial release, to preserve source compatibility, a
CONFIG_LEGACY_TIMEOUT_API kconfig is provided. When true, the
k_timeout_t will remain a compatible 32 bit value that will work with
any legacy Zephyr application.
Some subsystems present timeout (or timeout-like) values to their own
users as APIs that would re-use the kernel's own constants and
conventions. These will require some minor design work to adapt to
the new scheme (in most cases just using k_timeout_t directly in their
own API), and they have not been changed in this patch, instead
selecting CONFIG_LEGACY_TIMEOUT_API via kconfig. These subsystems
include: CAN Bus, the Microbit display driver, I2S, LoRa modem
drivers, the UART Async API, Video hardware drivers, the console
subsystem, and the network buffer abstraction.
k_sleep() now takes a k_timeout_t argument, with a k_msleep() variant
provided that works identically to the original API.
Most of the changes here are just type/configuration management and
documentation, but there are logic changes in mempool, where a loop
that used a timeout numerically has been reworked using a new
z_timeout_end_calc() predicate. Also in queue.c, a (when POLL was
enabled) a similar loop was needlessly used to try to retry the
k_poll() call after a spurious failure. But k_poll() does not fail
spuriously, so the loop was removed.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2020-03-05 15:18:14 -08:00
|
|
|
if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && ticks == K_TICKS_FOREVER) {
|
2019-07-10 16:51:27 +08:00
|
|
|
timer0_control_register_set(0);
|
|
|
|
timer0_count_register_set(0);
|
|
|
|
timer0_limit_register_set(0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_TICKLESS_KERNEL)
|
|
|
|
u32_t delay;
|
|
|
|
u32_t key;
|
|
|
|
|
2019-10-25 07:41:34 +08:00
|
|
|
ticks = MIN(MAX_TICKS, ticks);
|
2019-07-10 16:51:27 +08:00
|
|
|
|
|
|
|
/* Desired delay in the future */
|
|
|
|
delay = (ticks == 0) ? CYC_PER_TICK : ticks * CYC_PER_TICK;
|
|
|
|
|
2019-11-07 12:43:29 -08:00
|
|
|
key = arch_irq_lock();
|
2019-07-10 16:51:27 +08:00
|
|
|
|
|
|
|
timer0_limit_register_set(delay - 1);
|
|
|
|
timer0_count_register_set(0);
|
|
|
|
timer0_control_register_set(_ARC_V2_TMR_CTRL_NH |
|
|
|
|
_ARC_V2_TMR_CTRL_IE);
|
|
|
|
|
2019-11-07 12:43:29 -08:00
|
|
|
arch_irq_unlock(key);
|
2019-07-10 16:51:27 +08:00
|
|
|
#endif
|
|
|
|
#else
|
kernel/timeout: Make timeout arguments an opaque type
Add a k_timeout_t type, and use it everywhere that kernel API
functions were accepting a millisecond timeout argument. Instead of
forcing milliseconds everywhere (which are often not integrally
representable as system ticks), do the conversion to ticks at the
point where the timeout is created. This avoids an extra unit
conversion in some application code, and allows us to express the
timeout in units other than milliseconds to achieve greater precision.
The existing K_MSEC() et. al. macros now return initializers for a
k_timeout_t.
The K_NO_WAIT and K_FOREVER constants have now become k_timeout_t
values, which means they cannot be operated on as integers.
Applications which have their own APIs that need to inspect these
vs. user-provided timeouts can now use a K_TIMEOUT_EQ() predicate to
test for equality.
Timer drivers, which receive an integer tick count in ther
z_clock_set_timeout() functions, now use the integer-valued
K_TICKS_FOREVER constant instead of K_FOREVER.
For the initial release, to preserve source compatibility, a
CONFIG_LEGACY_TIMEOUT_API kconfig is provided. When true, the
k_timeout_t will remain a compatible 32 bit value that will work with
any legacy Zephyr application.
Some subsystems present timeout (or timeout-like) values to their own
users as APIs that would re-use the kernel's own constants and
conventions. These will require some minor design work to adapt to
the new scheme (in most cases just using k_timeout_t directly in their
own API), and they have not been changed in this patch, instead
selecting CONFIG_LEGACY_TIMEOUT_API via kconfig. These subsystems
include: CAN Bus, the Microbit display driver, I2S, LoRa modem
drivers, the UART Async API, Video hardware drivers, the console
subsystem, and the network buffer abstraction.
k_sleep() now takes a k_timeout_t argument, with a k_msleep() variant
provided that works identically to the original API.
Most of the changes here are just type/configuration management and
documentation, but there are logic changes in mempool, where a loop
that used a timeout numerically has been reworked using a new
z_timeout_end_calc() predicate. Also in queue.c, a (when POLL was
enabled) a similar loop was needlessly used to try to retry the
k_poll() call after a spurious failure. But k_poll() does not fail
spuriously, so the loop was removed.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2020-03-05 15:18:14 -08:00
|
|
|
if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle
|
|
|
|
&& ticks == K_TICKS_FOREVER) {
|
2018-11-22 17:55:27 +08:00
|
|
|
timer0_control_register_set(0);
|
|
|
|
timer0_count_register_set(0);
|
|
|
|
timer0_limit_register_set(0);
|
|
|
|
last_load = TIMER_STOPPED;
|
|
|
|
return;
|
|
|
|
}
|
2016-11-03 13:39:23 +00:00
|
|
|
|
2018-11-30 17:24:53 +08:00
|
|
|
#if defined(CONFIG_TICKLESS_KERNEL)
|
2018-11-22 17:55:27 +08:00
|
|
|
u32_t delay;
|
2016-11-03 13:39:23 +00:00
|
|
|
|
2019-02-11 17:14:19 +00:00
|
|
|
ticks = MIN(MAX_TICKS, MAX(ticks - 1, 0));
|
2016-11-03 13:39:23 +00:00
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
/* Desired delay in the future */
|
|
|
|
delay = (ticks == 0) ? MIN_DELAY : ticks * CYC_PER_TICK;
|
2016-11-03 13:39:23 +00:00
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2016-11-03 13:39:23 +00:00
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
delay += elapsed();
|
2016-11-03 13:39:23 +00:00
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
/* Round delay up to next tick boundary */
|
|
|
|
delay = ((delay + CYC_PER_TICK - 1) / CYC_PER_TICK) * CYC_PER_TICK;
|
2016-11-03 13:39:23 +00:00
|
|
|
|
2019-01-17 02:10:28 +08:00
|
|
|
if (last_load != delay) {
|
|
|
|
if (timer0_control_register_get() & _ARC_V2_TMR_CTRL_IP) {
|
|
|
|
delay -= last_load;
|
|
|
|
}
|
|
|
|
timer0_limit_register_set(delay - 1);
|
|
|
|
last_load = delay;
|
|
|
|
timer0_control_register_set(_ARC_V2_TMR_CTRL_NH |
|
|
|
|
_ARC_V2_TMR_CTRL_IE);
|
|
|
|
}
|
2016-11-03 13:39:23 +00:00
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
#endif
|
2019-07-10 16:51:27 +08:00
|
|
|
#endif
|
2016-11-03 13:39:23 +00:00
|
|
|
}
|
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
u32_t z_clock_elapsed(void)
|
2016-11-03 13:39:23 +00:00
|
|
|
{
|
2018-11-22 17:55:27 +08:00
|
|
|
if (!TICKLESS) {
|
2016-11-03 13:39:23 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-10 16:51:27 +08:00
|
|
|
u32_t cyc;
|
2018-11-22 17:55:27 +08:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2019-07-10 16:51:27 +08:00
|
|
|
|
2019-12-26 01:55:07 +08:00
|
|
|
#if SMP_TIMER_DRIVER
|
2019-07-10 16:51:27 +08:00
|
|
|
cyc = (z_arc_connect_gfrc_read() - last_time) / CYC_PER_TICK;
|
|
|
|
#else
|
|
|
|
cyc = elapsed() / CYC_PER_TICK;
|
|
|
|
#endif
|
2018-11-22 17:55:27 +08:00
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
2019-07-10 16:51:27 +08:00
|
|
|
|
|
|
|
return cyc;
|
2016-11-03 13:39:23 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 14:19:05 -07:00
|
|
|
u32_t z_timer_cycle_get_32(void)
|
2015-04-10 16:44:37 -07:00
|
|
|
{
|
2019-12-26 01:55:07 +08:00
|
|
|
#if SMP_TIMER_DRIVER
|
2019-07-10 16:51:27 +08:00
|
|
|
return z_arc_connect_gfrc_read() - start_time;
|
|
|
|
#else
|
2018-11-22 17:55:27 +08:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
u32_t ret = elapsed() + cycle_count;
|
2017-02-15 16:00:43 -08:00
|
|
|
|
2018-11-22 17:55:27 +08:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
return ret;
|
2019-07-10 16:51:27 +08:00
|
|
|
#endif
|
2015-04-10 16:44:37 -07:00
|
|
|
}
|
|
|
|
|
2015-07-01 17:22:39 -04:00
|
|
|
/**
|
|
|
|
*
|
2015-07-01 17:51:40 -04:00
|
|
|
* @brief Stop announcing ticks into the kernel
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
|
|
|
* This routine disables timer interrupt generation and delivery.
|
|
|
|
* Note that the timer's counting cannot be stopped by software.
|
|
|
|
*
|
2015-07-01 17:29:04 -04:00
|
|
|
* @return N/A
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2015-09-28 14:23:35 -04:00
|
|
|
void sys_clock_disable(void)
|
2015-04-10 16:44:37 -07:00
|
|
|
{
|
|
|
|
unsigned int key; /* interrupt lock level */
|
2017-04-21 10:03:20 -05:00
|
|
|
u32_t control; /* timer control register value */
|
2015-04-10 16:44:37 -07:00
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
|
|
|
|
/* disable interrupt generation */
|
|
|
|
|
2015-10-05 07:07:20 -07:00
|
|
|
control = timer0_control_register_get();
|
|
|
|
timer0_control_register_set(control & ~_ARC_V2_TMR_CTRL_IE);
|
2015-04-10 16:44:37 -07:00
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
/* disable interrupt in the interrupt controller */
|
|
|
|
|
2018-11-30 17:24:53 +08:00
|
|
|
irq_disable(IRQ_TIMER0);
|
2015-04-10 16:44:37 -07:00
|
|
|
}
|
2019-07-10 16:51:27 +08:00
|
|
|
|
|
|
|
|
2019-12-26 01:55:07 +08:00
|
|
|
#if SMP_TIMER_DRIVER
|
2019-07-10 16:51:27 +08:00
|
|
|
void smp_timer_init(void)
|
|
|
|
{
|
|
|
|
/* set the initial status of timer0 of each slave core
|
|
|
|
*/
|
|
|
|
timer0_control_register_set(0);
|
|
|
|
timer0_count_register_set(0);
|
|
|
|
timer0_limit_register_set(0);
|
|
|
|
|
|
|
|
z_irq_priority_set(IRQ_TIMER0, CONFIG_ARCV2_TIMER_IRQ_PRIORITY, 0);
|
|
|
|
irq_enable(IRQ_TIMER0);
|
|
|
|
}
|
|
|
|
#endif
|