code-guideline: Fixing code violation 10.4 Rule
Both operands of an operator in the arithmetic conversions performed shall have the same essential type category. Changes are related to converting the integer constants to the unsigned integer constants Signed-off-by: Aastha Grover <aastha.grover@intel.com>
This commit is contained in:
parent
fc67409291
commit
83b9f69755
35 changed files with 112 additions and 112 deletions
|
@ -175,8 +175,8 @@ static bool memory_fault_recoverable(z_arch_esf_t *esf)
|
|||
#ifdef CONFIG_USERSPACE
|
||||
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
|
||||
/* Mask out instruction mode */
|
||||
uint32_t start = (uint32_t)exceptions[i].start & ~0x1;
|
||||
uint32_t end = (uint32_t)exceptions[i].end & ~0x1;
|
||||
uint32_t start = (uint32_t)exceptions[i].start & ~0x1U;
|
||||
uint32_t end = (uint32_t)exceptions[i].end & ~0x1U;
|
||||
|
||||
if (esf->basic.pc >= start && esf->basic.pc < end) {
|
||||
esf->basic.pc = (uint32_t)(exceptions[i].fixup);
|
||||
|
|
|
@ -80,7 +80,7 @@ static int region_allocate_and_init(const uint8_t index,
|
|||
const struct arm_mpu_region *region_conf)
|
||||
{
|
||||
/* Attempt to allocate new region index. */
|
||||
if (index > (get_num_regions() - 1)) {
|
||||
if (index > (get_num_regions() - 1U)) {
|
||||
|
||||
/* No available MPU region index. */
|
||||
LOG_ERR("Failed to allocate new MPU region %u\n", index);
|
||||
|
|
|
@ -54,11 +54,11 @@ static int mpu_partition_is_valid(const struct k_mem_partition *part)
|
|||
* partition must align with size.
|
||||
*/
|
||||
int partition_is_valid =
|
||||
((part->size & (part->size - 1)) == 0U)
|
||||
((part->size & (part->size - 1U)) == 0U)
|
||||
&&
|
||||
(part->size >= CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
|
||||
&&
|
||||
((part->start & (part->size - 1)) == 0U);
|
||||
((part->start & (part->size - 1U)) == 0U);
|
||||
|
||||
return partition_is_valid;
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ static inline uint32_t size_to_mpu_rasr_size(uint32_t size)
|
|||
return REGION_4G;
|
||||
}
|
||||
|
||||
return ((32 - __builtin_clz(size - 1) - 2 + 1) << MPU_RASR_SIZE_Pos) &
|
||||
return ((32 - __builtin_clz(size - 1U) - 2 + 1) << MPU_RASR_SIZE_Pos) &
|
||||
MPU_RASR_SIZE_Msk;
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ static inline int get_dyn_region_min_index(void)
|
|||
*/
|
||||
static inline uint32_t mpu_rasr_size_to_size(uint32_t rasr_size)
|
||||
{
|
||||
return 1 << (rasr_size + 1);
|
||||
return 1 << (rasr_size + 1U);
|
||||
}
|
||||
|
||||
static inline uint32_t mpu_region_get_base(uint32_t index)
|
||||
|
@ -216,10 +216,10 @@ static inline int is_in_region(uint32_t r_index, uint32_t start, uint32_t size)
|
|||
|
||||
r_addr_start = rbar & MPU_RBAR_ADDR_Msk;
|
||||
r_size_lshift = ((rasr & MPU_RASR_SIZE_Msk) >>
|
||||
MPU_RASR_SIZE_Pos) + 1;
|
||||
r_addr_end = r_addr_start + (1UL << r_size_lshift) - 1;
|
||||
MPU_RASR_SIZE_Pos) + 1U;
|
||||
r_addr_end = r_addr_start + (1UL << r_size_lshift) - 1UL;
|
||||
|
||||
size = size == 0 ? 0 : size - 1;
|
||||
size = size == 0U ? 0U : size - 1U;
|
||||
if (u32_add_overflow(start, size, &end)) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -258,7 +258,7 @@ static inline int mpu_buffer_validate(void *addr, size_t size, int write)
|
|||
int32_t r_index;
|
||||
|
||||
/* Iterate all mpu regions in reversed order */
|
||||
for (r_index = get_num_regions() - 1; r_index >= 0; r_index--) {
|
||||
for (r_index = get_num_regions() - 1U; r_index >= 0; r_index--) {
|
||||
if (!is_enabled_region(r_index) ||
|
||||
!is_in_region(r_index, (uint32_t)addr, size)) {
|
||||
continue;
|
||||
|
|
|
@ -274,7 +274,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
|
|||
#if defined(CONFIG_USERSPACE)
|
||||
if (thread->arch.priv_stack_start) {
|
||||
/* User thread */
|
||||
if ((__get_CONTROL() & CONTROL_nPRIV_Msk) == 0) {
|
||||
if ((__get_CONTROL() & CONTROL_nPRIV_Msk) == 0U) {
|
||||
/* User thread in privilege mode */
|
||||
if (IS_MPU_GUARD_VIOLATION(
|
||||
thread->arch.priv_stack_start - guard_len,
|
||||
|
|
|
@ -35,7 +35,7 @@ extern volatile irq_offload_routine_t offload_routine;
|
|||
/* Check the CPSR mode bits to see if we are in IRQ or FIQ mode */
|
||||
static ALWAYS_INLINE bool arch_is_in_isr(void)
|
||||
{
|
||||
return (_kernel.cpus[0].nested != 0);
|
||||
return (_kernel.cpus[0].nested != 0U);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -97,13 +97,13 @@ static void prevent_false_prev_evt(void)
|
|||
*/
|
||||
static void handle_next_tick_case(uint32_t t)
|
||||
{
|
||||
set_comparator(t + 2);
|
||||
set_comparator(t + 2U);
|
||||
while (t != counter()) {
|
||||
/* already expired, tick elapsed but event might not be
|
||||
* generated. Trigger interrupt.
|
||||
*/
|
||||
t = counter();
|
||||
set_comparator(t + 2);
|
||||
set_comparator(t + 2U);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -117,7 +117,7 @@ static void set_absolute_ticks(uint32_t abs_val)
|
|||
uint32_t t = counter();
|
||||
|
||||
diff = counter_sub(abs_val, t);
|
||||
if (diff == 1) {
|
||||
if (diff == 1U) {
|
||||
handle_next_tick_case(t);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
|
|||
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
/* This convention works for both PRIMASK and BASEPRI */
|
||||
return key == 0;
|
||||
return key == 0U;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#define P_RW_U_RO 0x2
|
||||
#define P_RW_U_RO_Msk ((P_RW_U_RO << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
|
||||
/* Privileged Read Write, Unprivileged Read Write */
|
||||
#define P_RW_U_RW 0x3
|
||||
#define P_RW_U_RW 0x3U
|
||||
#define P_RW_U_RW_Msk ((P_RW_U_RW << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
|
||||
/* Privileged Read Write, Unprivileged Read Write */
|
||||
#define FULL_ACCESS 0x3
|
||||
|
|
|
@ -22,9 +22,9 @@
|
|||
/**
|
||||
* USB endpoint direction and number.
|
||||
*/
|
||||
#define USB_EP_DIR_MASK 0x80
|
||||
#define USB_EP_DIR_IN 0x80
|
||||
#define USB_EP_DIR_OUT 0x00
|
||||
#define USB_EP_DIR_MASK 0x80U
|
||||
#define USB_EP_DIR_IN 0x80U
|
||||
#define USB_EP_DIR_OUT 0x00U
|
||||
|
||||
/** Get endpoint index (number) from endpoint address */
|
||||
#define USB_EP_GET_IDX(ep) ((ep) & ~USB_EP_DIR_MASK)
|
||||
|
@ -40,7 +40,7 @@
|
|||
/**
|
||||
* USB endpoint Transfer Type mask.
|
||||
*/
|
||||
#define USB_EP_TRANSFER_TYPE_MASK 0x3
|
||||
#define USB_EP_TRANSFER_TYPE_MASK 0x3U
|
||||
|
||||
/**
|
||||
* USB endpoint Synchronization Type mask.
|
||||
|
|
|
@ -4912,7 +4912,7 @@ enum _poll_types_bits {
|
|||
_POLL_NUM_TYPES
|
||||
};
|
||||
|
||||
#define Z_POLL_TYPE_BIT(type) (1 << ((type) - 1))
|
||||
#define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
|
||||
|
||||
/* private - states bit positions */
|
||||
enum _poll_states_bits {
|
||||
|
@ -4934,7 +4934,7 @@ enum _poll_states_bits {
|
|||
_POLL_NUM_STATES
|
||||
};
|
||||
|
||||
#define Z_POLL_STATE_BIT(state) (1 << ((state) - 1))
|
||||
#define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
|
||||
|
||||
#define _POLL_EVENT_NUM_UNUSED_BITS \
|
||||
(32 - (0 \
|
||||
|
|
|
@ -72,10 +72,10 @@
|
|||
#endif
|
||||
|
||||
/* lowest value of _thread_base.preempt at which a thread is non-preemptible */
|
||||
#define _NON_PREEMPT_THRESHOLD 0x0080
|
||||
#define _NON_PREEMPT_THRESHOLD 0x0080U
|
||||
|
||||
/* highest value of _thread_base.preempt at which a thread is preemptible */
|
||||
#define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1)
|
||||
#define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1U)
|
||||
|
||||
#if !defined(_ASMLANGUAGE)
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#if !defined(CONFIG_LOG) || defined(CONFIG_LOG_MINIMAL)
|
||||
#define CONFIG_LOG_DOMAIN_ID 0
|
||||
#define CONFIG_LOG_DOMAIN_ID 0U
|
||||
#endif
|
||||
|
||||
#define LOG_FUNCTION_PREFIX_MASK \
|
||||
|
@ -120,14 +120,14 @@ extern "C" {
|
|||
* @brief Macro for getting ID of current module.
|
||||
*/
|
||||
#define LOG_CURRENT_MODULE_ID() (__log_level != 0 ? \
|
||||
log_const_source_id(__log_current_const_data) : 0)
|
||||
log_const_source_id(__log_current_const_data) : 0U)
|
||||
|
||||
/**
|
||||
* @def LOG_CURRENT_DYNAMIC_DATA_ADDR
|
||||
* @brief Macro for getting address of dynamic structure of current module.
|
||||
*/
|
||||
#define LOG_CURRENT_DYNAMIC_DATA_ADDR() (__log_level ? \
|
||||
__log_current_dynamic_data : (struct log_source_dynamic_data *)0)
|
||||
__log_current_dynamic_data : (struct log_source_dynamic_data *)0U)
|
||||
|
||||
/** @brief Macro for getting ID of the element of the section.
|
||||
*
|
||||
|
|
|
@ -35,9 +35,9 @@ typedef unsigned long log_arg_t;
|
|||
|
||||
/** @brief Number of arguments in the log entry which fits in one chunk.*/
|
||||
#ifdef CONFIG_64BIT
|
||||
#define LOG_MSG_NARGS_SINGLE_CHUNK 4
|
||||
#define LOG_MSG_NARGS_SINGLE_CHUNK 4U
|
||||
#else
|
||||
#define LOG_MSG_NARGS_SINGLE_CHUNK 3
|
||||
#define LOG_MSG_NARGS_SINGLE_CHUNK 3U
|
||||
#endif
|
||||
|
||||
/** @brief Number of arguments in the head of extended standard log message..*/
|
||||
|
@ -64,7 +64,7 @@ typedef unsigned long log_arg_t;
|
|||
#define ARGS_CONT_MSG (HEXDUMP_BYTES_CONT_MSG / sizeof(log_arg_t))
|
||||
|
||||
/** @brief Flag indicating standard log message. */
|
||||
#define LOG_MSG_TYPE_STD 0
|
||||
#define LOG_MSG_TYPE_STD 0U
|
||||
|
||||
/** @brief Flag indicating hexdump log message. */
|
||||
#define LOG_MSG_TYPE_HEXDUMP 1
|
||||
|
|
|
@ -408,7 +408,7 @@ extern atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value);
|
|||
*/
|
||||
|
||||
#define ATOMIC_BITS (sizeof(atomic_val_t) * 8)
|
||||
#define ATOMIC_MASK(bit) (1 << ((uint32_t)(bit) & (ATOMIC_BITS - 1)))
|
||||
#define ATOMIC_MASK(bit) (1U << ((uint32_t)(bit) & (ATOMIC_BITS - 1U)))
|
||||
#define ATOMIC_ELEM(addr, bit) ((addr) + ((bit) / ATOMIC_BITS))
|
||||
|
||||
/**
|
||||
|
|
|
@ -47,7 +47,7 @@ struct sys_notify;
|
|||
*/
|
||||
#define SYS_NOTIFY_METHOD_CALLBACK 3
|
||||
|
||||
#define SYS_NOTIFY_METHOD_MASK 0x03
|
||||
#define SYS_NOTIFY_METHOD_MASK 0x03U
|
||||
#define SYS_NOTIFY_METHOD_POS 0
|
||||
|
||||
/**
|
||||
|
|
|
@ -49,7 +49,7 @@ extern "C" {
|
|||
/**
|
||||
* @brief Value exposed by ONOFF_STATE_MASK when service is off.
|
||||
*/
|
||||
#define ONOFF_STATE_OFF 0
|
||||
#define ONOFF_STATE_OFF 0U
|
||||
|
||||
/**
|
||||
* @brief Value exposed by ONOFF_STATE_MASK when service is on.
|
||||
|
|
|
@ -150,7 +150,7 @@ static inline void ring_buf_init(struct ring_buf *buf, uint32_t size, void *data
|
|||
buf->size = size;
|
||||
buf->buf.buf32 = (uint32_t *)data;
|
||||
if (is_power_of_two(size)) {
|
||||
buf->mask = size - 1;
|
||||
buf->mask = size - 1U;
|
||||
} else {
|
||||
buf->mask = 0U;
|
||||
}
|
||||
|
@ -170,11 +170,11 @@ static inline uint32_t z_ring_buf_custom_space_get(uint32_t size, uint32_t head,
|
|||
uint32_t tail)
|
||||
{
|
||||
if (tail < head) {
|
||||
return head - tail - 1;
|
||||
return head - tail - 1U;
|
||||
}
|
||||
|
||||
/* buf->tail > buf->head */
|
||||
return (size - tail) + head - 1;
|
||||
return (size - tail) + head - 1U;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -81,9 +81,9 @@ static TIME_CONSTEXPR ALWAYS_INLINE uint64_t z_tmcvt(uint64_t t, uint32_t from_h
|
|||
bool round_off)
|
||||
{
|
||||
bool mul_ratio = const_hz &&
|
||||
(to_hz > from_hz) && ((to_hz % from_hz) == 0);
|
||||
(to_hz > from_hz) && ((to_hz % from_hz) == 0U);
|
||||
bool div_ratio = const_hz &&
|
||||
(from_hz > to_hz) && ((from_hz % to_hz) == 0);
|
||||
(from_hz > to_hz) && ((from_hz % to_hz) == 0U);
|
||||
|
||||
if (from_hz == to_hz) {
|
||||
return result32 ? ((uint32_t)t) : t;
|
||||
|
@ -95,9 +95,9 @@ static TIME_CONSTEXPR ALWAYS_INLINE uint64_t z_tmcvt(uint64_t t, uint32_t from_h
|
|||
uint32_t rdivisor = div_ratio ? (from_hz / to_hz) : from_hz;
|
||||
|
||||
if (round_up) {
|
||||
off = rdivisor - 1;
|
||||
off = rdivisor - 1U;
|
||||
} else if (round_off) {
|
||||
off = rdivisor / 2;
|
||||
off = rdivisor / 2U;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ extern "C" {
|
|||
*/
|
||||
static inline bool is_power_of_two(unsigned int x)
|
||||
{
|
||||
return (x != 0U) && ((x & (x - 1)) == 0U);
|
||||
return (x != 0U) && ((x & (x - 1U)) == 0U);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -59,23 +59,23 @@
|
|||
#define USB_INTERFACE_ASSOC_DESC_SIZE 8
|
||||
|
||||
/* Descriptor type */
|
||||
#define USB_DEVICE_DESC 0x01
|
||||
#define USB_CONFIGURATION_DESC 0x02
|
||||
#define USB_STRING_DESC 0x03
|
||||
#define USB_INTERFACE_DESC 0x04
|
||||
#define USB_ENDPOINT_DESC 0x05
|
||||
#define USB_DEVICE_QUAL_DESC 0x06
|
||||
#define USB_OTHER_SPEED 0x07
|
||||
#define USB_INTERFACE_POWER 0x08
|
||||
#define USB_INTERFACE_ASSOC_DESC 0x0B
|
||||
#define USB_DEVICE_CAPABILITY_DESC 0x10
|
||||
#define USB_HID_DESC 0x21
|
||||
#define USB_HID_REPORT_DESC 0x22
|
||||
#define USB_CS_INTERFACE_DESC 0x24
|
||||
#define USB_CS_ENDPOINT_DESC 0x25
|
||||
#define USB_DFU_FUNCTIONAL_DESC 0x21
|
||||
#define USB_ASSOCIATION_DESC 0x0B
|
||||
#define USB_BINARY_OBJECT_STORE_DESC 0x0F
|
||||
#define USB_DEVICE_DESC 0x01U
|
||||
#define USB_CONFIGURATION_DESC 0x02U
|
||||
#define USB_STRING_DESC 0x03U
|
||||
#define USB_INTERFACE_DESC 0x04U
|
||||
#define USB_ENDPOINT_DESC 0x05U
|
||||
#define USB_DEVICE_QUAL_DESC 0x06U
|
||||
#define USB_OTHER_SPEED 0x07U
|
||||
#define USB_INTERFACE_POWER 0x08U
|
||||
#define USB_INTERFACE_ASSOC_DESC 0x0BU
|
||||
#define USB_DEVICE_CAPABILITY_DESC 0x10U
|
||||
#define USB_HID_DESC 0x21U
|
||||
#define USB_HID_REPORT_DESC 0x22U
|
||||
#define USB_CS_INTERFACE_DESC 0x24U
|
||||
#define USB_CS_ENDPOINT_DESC 0x25U
|
||||
#define USB_DFU_FUNCTIONAL_DESC 0x21U
|
||||
#define USB_ASSOCIATION_DESC 0x0BU
|
||||
#define USB_BINARY_OBJECT_STORE_DESC 0x0FU
|
||||
|
||||
/* Useful define */
|
||||
#define USB_1_1 0x0110
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
#define ZEPHYR_INCLUDE_USB_USBSTRUCT_H_
|
||||
|
||||
#define REQTYPE_GET_DIR(x) (((x)>>7)&0x01)
|
||||
#define REQTYPE_GET_TYPE(x) (((x)>>5)&0x03)
|
||||
#define REQTYPE_GET_TYPE(x) (((x)>>5)&0x03U)
|
||||
#define REQTYPE_GET_RECIP(x) ((x)&0x1F)
|
||||
|
||||
#define REQTYPE_DIR_TO_DEVICE 0
|
||||
|
@ -84,7 +84,7 @@ struct usb_desc_header {
|
|||
uint8_t bDescriptorType; /**< descriptor type */
|
||||
};
|
||||
|
||||
#define GET_DESC_TYPE(x) (((x)>>8)&0xFF)
|
||||
#define GET_DESC_INDEX(x) ((x)&0xFF)
|
||||
#define GET_DESC_TYPE(x) (((x)>>8)&0xFFU)
|
||||
#define GET_DESC_INDEX(x) ((x)&0xFFU)
|
||||
|
||||
#endif /* ZEPHYR_INCLUDE_USB_USBSTRUCT_H_ */
|
||||
|
|
|
@ -126,7 +126,7 @@ static inline bool z_is_thread_timeout_active(struct k_thread *thread)
|
|||
|
||||
static inline bool z_is_thread_ready(struct k_thread *thread)
|
||||
{
|
||||
return !((z_is_thread_prevented_from_running(thread)) != 0 ||
|
||||
return !((z_is_thread_prevented_from_running(thread)) != 0U ||
|
||||
z_is_thread_timeout_active(thread));
|
||||
}
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state)
|
|||
{
|
||||
switch (event->type) {
|
||||
case K_POLL_TYPE_SEM_AVAILABLE:
|
||||
if (k_sem_count_get(event->sem) > 0) {
|
||||
if (k_sem_count_get(event->sem) > 0U) {
|
||||
*state = K_POLL_STATE_SEM_AVAILABLE;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -840,7 +840,7 @@ bool z_spin_lock_valid(struct k_spinlock *l)
|
|||
uintptr_t thread_cpu = l->thread_cpu;
|
||||
|
||||
if (thread_cpu) {
|
||||
if ((thread_cpu & 3) == _current_cpu->id) {
|
||||
if ((thread_cpu & 3U) == _current_cpu->id) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ static void remove_timeout(struct _timeout *t)
|
|||
|
||||
static int32_t elapsed(void)
|
||||
{
|
||||
return announce_remaining == 0 ? z_clock_elapsed() : 0;
|
||||
return announce_remaining == 0 ? z_clock_elapsed() : 0U;
|
||||
}
|
||||
|
||||
static int32_t next_timeout(void)
|
||||
|
|
|
@ -51,7 +51,7 @@ static void free_list_add_bidx(struct z_heap *h, chunkid_t c, int bidx)
|
|||
{
|
||||
struct z_heap_bucket *b = &h->buckets[bidx];
|
||||
|
||||
if (b->next == 0) {
|
||||
if (b->next == 0U) {
|
||||
CHECK((h->avail_buckets & (1 << bidx)) == 0);
|
||||
|
||||
/* Empty list, first item */
|
||||
|
@ -208,7 +208,7 @@ static chunkid_t alloc_chunk(struct z_heap *h, size_t sz)
|
|||
*/
|
||||
size_t bmask = h->avail_buckets & ~((1 << (bi + 1)) - 1);
|
||||
|
||||
if ((bmask & h->avail_buckets) != 0) {
|
||||
if ((bmask & h->avail_buckets) != 0U) {
|
||||
int minbucket = __builtin_ctz(bmask & h->avail_buckets);
|
||||
chunkid_t c = h->buckets[minbucket].next;
|
||||
|
||||
|
@ -222,14 +222,14 @@ static chunkid_t alloc_chunk(struct z_heap *h, size_t sz)
|
|||
|
||||
void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
|
||||
{
|
||||
if (bytes == 0) {
|
||||
if (bytes == 0U) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct z_heap *h = heap->heap;
|
||||
size_t chunk_sz = bytes_to_chunksz(h, bytes);
|
||||
chunkid_t c = alloc_chunk(h, chunk_sz);
|
||||
if (c == 0) {
|
||||
if (c == 0U) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
*/
|
||||
typedef size_t chunkid_t;
|
||||
|
||||
#define CHUNK_UNIT 8
|
||||
#define CHUNK_UNIT 8U
|
||||
|
||||
typedef struct { char bytes[CHUNK_UNIT]; } chunk_unit_t;
|
||||
|
||||
|
@ -66,7 +66,7 @@ struct z_heap {
|
|||
|
||||
static inline bool big_heap_chunks(size_t chunks)
|
||||
{
|
||||
return sizeof(void *) > 4 || chunks > 0x7fff;
|
||||
return sizeof(void *) > 4U || chunks > 0x7fffU;
|
||||
}
|
||||
|
||||
static inline bool big_heap_bytes(size_t bytes)
|
||||
|
@ -117,7 +117,7 @@ static inline void chunk_set(struct z_heap *h, chunkid_t c,
|
|||
|
||||
static inline bool chunk_used(struct z_heap *h, chunkid_t c)
|
||||
{
|
||||
return chunk_field(h, c, SIZE_AND_USED) & 1;
|
||||
return chunk_field(h, c, SIZE_AND_USED) & 1U;
|
||||
}
|
||||
|
||||
static inline size_t chunk_size(struct z_heap *h, chunkid_t c)
|
||||
|
@ -132,15 +132,15 @@ static inline void set_chunk_used(struct z_heap *h, chunkid_t c, bool used)
|
|||
|
||||
if (big_heap(h)) {
|
||||
if (used) {
|
||||
((uint32_t *)cmem)[SIZE_AND_USED] |= 1;
|
||||
((uint32_t *)cmem)[SIZE_AND_USED] |= 1U;
|
||||
} else {
|
||||
((uint32_t *)cmem)[SIZE_AND_USED] &= ~1;
|
||||
((uint32_t *)cmem)[SIZE_AND_USED] &= ~1U;
|
||||
}
|
||||
} else {
|
||||
if (used) {
|
||||
((uint16_t *)cmem)[SIZE_AND_USED] |= 1;
|
||||
((uint16_t *)cmem)[SIZE_AND_USED] |= 1U;
|
||||
} else {
|
||||
((uint16_t *)cmem)[SIZE_AND_USED] &= ~1;
|
||||
((uint16_t *)cmem)[SIZE_AND_USED] &= ~1U;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ static inline void set_left_chunk_size(struct z_heap *h, chunkid_t c,
|
|||
|
||||
static inline bool solo_free_header(struct z_heap *h, chunkid_t c)
|
||||
{
|
||||
return big_heap(h) && chunk_size(h, c) == 1;
|
||||
return big_heap(h) && chunk_size(h, c) == 1U;
|
||||
}
|
||||
|
||||
static inline size_t chunk_header_bytes(struct z_heap *h)
|
||||
|
@ -210,7 +210,7 @@ static inline size_t heap_footer_bytes(size_t size)
|
|||
|
||||
static inline size_t chunksz(size_t bytes)
|
||||
{
|
||||
return (bytes + CHUNK_UNIT - 1) / CHUNK_UNIT;
|
||||
return (bytes + CHUNK_UNIT - 1U) / CHUNK_UNIT;
|
||||
}
|
||||
|
||||
static inline size_t bytes_to_chunksz(struct z_heap *h, size_t bytes)
|
||||
|
|
|
@ -64,7 +64,7 @@ static inline bool alloc_bit_is_set(struct sys_mem_pool_base *p,
|
|||
uint32_t *word;
|
||||
int bit = get_bit_ptr(p, level, bn, &word);
|
||||
|
||||
return (*word >> bit) & 1;
|
||||
return (*word >> bit) & 1U;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -76,7 +76,7 @@ static int partner_alloc_bits(struct sys_mem_pool_base *p, int level, int bn)
|
|||
uint32_t *word;
|
||||
int bit = get_bit_ptr(p, level, bn, &word);
|
||||
|
||||
return (*word >> (4*(bit / 4))) & 0xf;
|
||||
return (*word >> (4*(bit / 4))) & 0xfU;
|
||||
}
|
||||
|
||||
void z_sys_mem_pool_base_init(struct sys_mem_pool_base *p)
|
||||
|
|
|
@ -217,7 +217,7 @@ static int process_recheck(struct onoff_manager *mgr)
|
|||
&& !sys_slist_is_empty(&mgr->clients)) {
|
||||
evt = EVT_START;
|
||||
} else if ((state == ONOFF_STATE_ON)
|
||||
&& (mgr->refs == 0)) {
|
||||
&& (mgr->refs == 0U)) {
|
||||
evt = EVT_STOP;
|
||||
} else if ((state == ONOFF_STATE_ERROR)
|
||||
&& !sys_slist_is_empty(&mgr->clients)) {
|
||||
|
|
|
@ -40,7 +40,7 @@ typedef uint32_t printk_val_t;
|
|||
* less, obviously). Funny formula produces 10 max digits for 32 bit,
|
||||
* 21 for 64.
|
||||
*/
|
||||
#define DIGITS_BUFLEN (11 * (sizeof(printk_val_t) / 4) - 1)
|
||||
#define DIGITS_BUFLEN (11U * (sizeof(printk_val_t) / 4U) - 1U)
|
||||
|
||||
#ifdef CONFIG_PRINTK_SYNC
|
||||
static struct k_spinlock lock;
|
||||
|
@ -96,25 +96,25 @@ void *__printk_get_hook(void)
|
|||
}
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
||||
static void print_digits(out_func_t out, void *ctx, printk_val_t num, int base,
|
||||
static void print_digits(out_func_t out, void *ctx, printk_val_t num, unsigned int base,
|
||||
bool pad_before, char pad_char, int min_width)
|
||||
{
|
||||
char buf[DIGITS_BUFLEN];
|
||||
int i;
|
||||
unsigned int i;
|
||||
|
||||
/* Print it backwards into the end of the buffer, low digits first */
|
||||
for (i = DIGITS_BUFLEN - 1; num != 0; i--) {
|
||||
for (i = DIGITS_BUFLEN - 1U; num != 0U; i--) {
|
||||
buf[i] = "0123456789abcdef"[num % base];
|
||||
num /= base;
|
||||
}
|
||||
|
||||
if (i == DIGITS_BUFLEN - 1) {
|
||||
if (i == DIGITS_BUFLEN - 1U) {
|
||||
buf[i] = '0';
|
||||
} else {
|
||||
i++;
|
||||
}
|
||||
|
||||
int pad = MAX(min_width - (DIGITS_BUFLEN - i), 0);
|
||||
int pad = MAX(min_width - (int)(DIGITS_BUFLEN - i), 0);
|
||||
|
||||
for (/**/; pad > 0 && pad_before; pad--) {
|
||||
out(pad_char, ctx);
|
||||
|
@ -130,20 +130,20 @@ static void print_digits(out_func_t out, void *ctx, printk_val_t num, int base,
|
|||
static void print_hex(out_func_t out, void *ctx, printk_val_t num,
|
||||
enum pad_type padding, int min_width)
|
||||
{
|
||||
print_digits(out, ctx, num, 16, padding != PAD_SPACE_AFTER,
|
||||
print_digits(out, ctx, num, 16U, padding != PAD_SPACE_AFTER,
|
||||
padding == PAD_ZERO_BEFORE ? '0' : ' ', min_width);
|
||||
}
|
||||
|
||||
static void print_dec(out_func_t out, void *ctx, printk_val_t num,
|
||||
enum pad_type padding, int min_width)
|
||||
{
|
||||
print_digits(out, ctx, num, 10, padding != PAD_SPACE_AFTER,
|
||||
print_digits(out, ctx, num, 10U, padding != PAD_SPACE_AFTER,
|
||||
padding == PAD_ZERO_BEFORE ? '0' : ' ', min_width);
|
||||
}
|
||||
|
||||
static bool ok64(out_func_t out, void *ctx, long long val)
|
||||
{
|
||||
if (sizeof(printk_val_t) < 8 && val != (long) val) {
|
||||
if (sizeof(printk_val_t) < 8U && val != (long) val) {
|
||||
out('E', ctx);
|
||||
out('R', ctx);
|
||||
out('R', ctx);
|
||||
|
@ -154,9 +154,9 @@ static bool ok64(out_func_t out, void *ctx, long long val)
|
|||
|
||||
static bool negative(printk_val_t val)
|
||||
{
|
||||
const printk_val_t hibit = ~(((printk_val_t) ~1) >> 1);
|
||||
const printk_val_t hibit = ~(((printk_val_t) ~1) >> 1U);
|
||||
|
||||
return (val & hibit) != 0;
|
||||
return (val & hibit) != 0U;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -266,7 +266,7 @@ void z_vprintk(out_func_t out, void *ctx, const char *fmt, va_list ap)
|
|||
out('x', ctx);
|
||||
/* left-pad pointers with zeros */
|
||||
padding = PAD_ZERO_BEFORE;
|
||||
min_width = sizeof(void *) * 2;
|
||||
min_width = sizeof(void *) * 2U;
|
||||
__fallthrough;
|
||||
case 'x':
|
||||
case 'X': {
|
||||
|
|
|
@ -40,7 +40,7 @@ static inline atomic_t bounded_inc(atomic_t *val, atomic_t minimum,
|
|||
|
||||
new_value = old_value < minimum ?
|
||||
minimum + 1 : old_value + 1;
|
||||
} while (atomic_cas(val, old_value, new_value) == 0);
|
||||
} while (atomic_cas(val, old_value, new_value) == 0U);
|
||||
|
||||
return old_value;
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
static void minimal_hexdump_line_print(const char *data, size_t length)
|
||||
{
|
||||
for (int i = 0; i < HEXDUMP_BYTES_IN_LINE; i++) {
|
||||
for (size_t i = 0; i < HEXDUMP_BYTES_IN_LINE; i++) {
|
||||
if (i < length) {
|
||||
printk("%02x ", data[i] & 0xFF);
|
||||
} else {
|
||||
|
@ -22,7 +22,7 @@ static void minimal_hexdump_line_print(const char *data, size_t length)
|
|||
|
||||
printk("|");
|
||||
|
||||
for (int i = 0; i < HEXDUMP_BYTES_IN_LINE; i++) {
|
||||
for (size_t i = 0; i < HEXDUMP_BYTES_IN_LINE; i++) {
|
||||
if (i < length) {
|
||||
char c = data[i];
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
*/
|
||||
static atomic_val_t _rand32_counter;
|
||||
|
||||
#define _RAND32_INC 1000000013
|
||||
#define _RAND32_INC 1000000013U
|
||||
|
||||
/**
|
||||
*
|
||||
|
|
|
@ -228,7 +228,7 @@ static int usb_validate_ep_cfg_data(struct usb_ep_descriptor * const ep_descr,
|
|||
struct usb_cfg_data * const cfg_data,
|
||||
uint32_t *requested_ep)
|
||||
{
|
||||
for (int i = 0; i < cfg_data->num_endpoints; i++) {
|
||||
for (unsigned int i = 0; i < cfg_data->num_endpoints; i++) {
|
||||
struct usb_ep_cfg_data *ep_data = cfg_data->endpoint;
|
||||
|
||||
/*
|
||||
|
@ -238,7 +238,7 @@ static int usb_validate_ep_cfg_data(struct usb_ep_descriptor * const ep_descr,
|
|||
continue;
|
||||
}
|
||||
|
||||
for (uint8_t idx = 1; idx < 16; idx++) {
|
||||
for (uint8_t idx = 1; idx < 16U; idx++) {
|
||||
struct usb_dc_ep_cfg_data ep_cfg;
|
||||
|
||||
ep_cfg.ep_type = (ep_descr->bmAttributes &
|
||||
|
@ -246,13 +246,13 @@ static int usb_validate_ep_cfg_data(struct usb_ep_descriptor * const ep_descr,
|
|||
ep_cfg.ep_mps = ep_descr->wMaxPacketSize;
|
||||
ep_cfg.ep_addr = ep_descr->bEndpointAddress;
|
||||
if (ep_cfg.ep_addr & USB_EP_DIR_IN) {
|
||||
if ((*requested_ep & (1 << (idx + 16)))) {
|
||||
if ((*requested_ep & (1U << (idx + 16U)))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ep_cfg.ep_addr = (USB_EP_DIR_IN | idx);
|
||||
} else {
|
||||
if ((*requested_ep & (1 << (idx)))) {
|
||||
if ((*requested_ep & (1U << (idx)))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -265,9 +265,9 @@ static int usb_validate_ep_cfg_data(struct usb_ep_descriptor * const ep_descr,
|
|||
ep_descr->bEndpointAddress = ep_cfg.ep_addr;
|
||||
ep_data[i].ep_addr = ep_cfg.ep_addr;
|
||||
if (ep_cfg.ep_addr & USB_EP_DIR_IN) {
|
||||
*requested_ep |= (1 << (idx + 16));
|
||||
*requested_ep |= (1U << (idx + 16U));
|
||||
} else {
|
||||
*requested_ep |= (1 << idx);
|
||||
*requested_ep |= (1U << idx);
|
||||
}
|
||||
LOG_DBG("endpoint 0x%x", ep_data[i].ep_addr);
|
||||
return 0;
|
||||
|
|
|
@ -92,12 +92,12 @@ LOG_MODULE_REGISTER(usb_device);
|
|||
#define INTF_DESC_bAlternateSetting 3 /** Alternate setting offset */
|
||||
|
||||
/* endpoint descriptor field offsets */
|
||||
#define ENDP_DESC_bEndpointAddress 2 /** Endpoint address offset */
|
||||
#define ENDP_DESC_bmAttributes 3 /** Bulk or interrupt? */
|
||||
#define ENDP_DESC_wMaxPacketSize 4 /** Maximum packet size offset */
|
||||
#define ENDP_DESC_bEndpointAddress 2U /** Endpoint address offset */
|
||||
#define ENDP_DESC_bmAttributes 3U /** Bulk or interrupt? */
|
||||
#define ENDP_DESC_wMaxPacketSize 4U /** Maximum packet size offset */
|
||||
|
||||
#define MAX_NUM_REQ_HANDLERS 4
|
||||
#define MAX_STD_REQ_MSG_SIZE 8
|
||||
#define MAX_NUM_REQ_HANDLERS 4U
|
||||
#define MAX_STD_REQ_MSG_SIZE 8U
|
||||
|
||||
/* Default USB control EP, always 0 and 0x80 */
|
||||
#define USB_CONTROL_OUT_EP0 0
|
||||
|
@ -415,7 +415,7 @@ static bool usb_get_descriptor(uint16_t type_index, uint16_t lang_id,
|
|||
uint8_t type = 0U;
|
||||
uint8_t index = 0U;
|
||||
uint8_t *p = NULL;
|
||||
int32_t cur_index = 0;
|
||||
uint32_t cur_index = 0U;
|
||||
bool found = false;
|
||||
|
||||
/*Avoid compiler warning until this is used for something*/
|
||||
|
@ -434,7 +434,7 @@ static bool usb_get_descriptor(uint16_t type_index, uint16_t lang_id,
|
|||
}
|
||||
|
||||
p = (uint8_t *)usb_dev.descriptors;
|
||||
cur_index = 0;
|
||||
cur_index = 0U;
|
||||
|
||||
while (p[DESC_bLength] != 0U) {
|
||||
if (p[DESC_bDescriptorType] == type) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue