zephyr: replace zephyr integer types with C99 types

git grep -l 'u\(8\|16\|32\|64\)_t' | \
		xargs sed -i "s/u\(8\|16\|32\|64\)_t/uint\1_t/g"
	git grep -l 's\(8\|16\|32\|64\)_t' | \
		xargs sed -i "s/s\(8\|16\|32\|64\)_t/int\1_t/g"

Signed-off-by: Kumar Gala <kumar.gala@linaro.org>
This commit is contained in:
Kumar Gala 2020-05-27 11:26:57 -05:00 committed by Kumar Gala
commit a1b77fd589
2364 changed files with 32505 additions and 32505 deletions

View file

@ -31,7 +31,7 @@
static struct k_spinlock lock;
static u32_t last_load;
static uint32_t last_load;
/*
* This local variable holds the amount of SysTick HW cycles elapsed
@ -43,13 +43,13 @@ static u32_t last_load;
*
* t = cycle_counter + elapsed();
*/
static u32_t cycle_count;
static uint32_t cycle_count;
/*
* This local variable holds the amount of elapsed SysTick HW cycles
* that have been announced to the kernel.
*/
static u32_t announced_cycles;
static uint32_t announced_cycles;
/*
* This local variable holds the amount of elapsed HW cycles due to
@ -60,7 +60,7 @@ static u32_t announced_cycles;
* Each time cycle_count is updated with the value from overflow_cyc,
* the overflow_cyc must be reset to zero.
*/
static volatile u32_t overflow_cyc;
static volatile uint32_t overflow_cyc;
/* This internal function calculates the amount of HW cycles that have
* elapsed since the last time the absolute HW cycles counter has been
@ -80,11 +80,11 @@ static volatile u32_t overflow_cyc;
* - and until the current call of the function is completed.
* - the function is invoked with interrupts disabled.
*/
static u32_t elapsed(void)
static uint32_t elapsed(void)
{
u32_t val1 = SysTick->VAL; /* A */
u32_t ctrl = SysTick->CTRL; /* B */
u32_t val2 = SysTick->VAL; /* C */
uint32_t val1 = SysTick->VAL; /* A */
uint32_t ctrl = SysTick->CTRL; /* B */
uint32_t val2 = SysTick->VAL; /* C */
/* SysTick behavior: The counter wraps at zero automatically,
* setting the COUNTFLAG field of the CTRL register when it
@ -116,7 +116,7 @@ static u32_t elapsed(void)
void z_clock_isr(void *arg)
{
ARG_UNUSED(arg);
u32_t dticks;
uint32_t dticks;
/* Update overflow_cyc and clear COUNTFLAG by invoking elapsed() */
elapsed();
@ -162,7 +162,7 @@ int z_clock_driver_init(struct device *device)
return 0;
}
void z_clock_set_timeout(s32_t ticks, bool idle)
void z_clock_set_timeout(int32_t ticks, bool idle)
{
/* Fast CPUs and a 24 bit counter mean that even idle systems
* need to wake up multiple times per second. If the kernel
@ -178,21 +178,21 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
}
#if defined(CONFIG_TICKLESS_KERNEL)
u32_t delay;
uint32_t delay;
ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
ticks = MAX(MIN(ticks - 1, (int32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);
u32_t pending = elapsed();
uint32_t pending = elapsed();
cycle_count += pending;
overflow_cyc = 0U;
u32_t unannounced = cycle_count - announced_cycles;
uint32_t unannounced = cycle_count - announced_cycles;
if ((s32_t)unannounced < 0) {
if ((int32_t)unannounced < 0) {
/* We haven't announced for more than half the 32-bit
* wrap duration, because new timeouts keep being set
* before the existing one fires. Force an announce
@ -223,23 +223,23 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
#endif
}
u32_t z_clock_elapsed(void)
uint32_t z_clock_elapsed(void)
{
if (!TICKLESS) {
return 0;
}
k_spinlock_key_t key = k_spin_lock(&lock);
u32_t cyc = elapsed() + cycle_count - announced_cycles;
uint32_t cyc = elapsed() + cycle_count - announced_cycles;
k_spin_unlock(&lock, key);
return cyc / CYC_PER_TICK;
}
u32_t z_timer_cycle_get_32(void)
uint32_t z_timer_cycle_get_32(void)
{
k_spinlock_key_t key = k_spin_lock(&lock);
u32_t ret = elapsed() + cycle_count;
uint32_t ret = elapsed() + cycle_count;
k_spin_unlock(&lock, key);
return ret;