kernel: sched: Change boolean APIs to return bool
Change APIs that essentially return a boolean expression - 0 for false and 1 for true - to return a bool. MISRA-C rule 14.4 Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
parent
420bb62071
commit
02ed85bd82
7 changed files with 42 additions and 40 deletions
|
@ -81,7 +81,7 @@ typedef struct {
|
|||
struct _priq_rb waitq;
|
||||
} _wait_q_t;
|
||||
|
||||
extern int _priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
|
||||
extern bool _priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
|
||||
|
||||
#define _WAIT_Q_INIT(wait_q) { { { .lessthan_fn = _priq_rb_lessthan } } }
|
||||
|
||||
|
|
|
@ -43,6 +43,8 @@
|
|||
#ifndef ZEPHYR_INCLUDE_MISC_RB_H_
|
||||
#define ZEPHYR_INCLUDE_MISC_RB_H_
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
struct rbnode {
|
||||
struct rbnode *children[2];
|
||||
};
|
||||
|
@ -51,8 +53,8 @@ struct rbnode {
|
|||
* @typedef rb_lessthan_t
|
||||
* @brief Red/black tree comparison predicate
|
||||
*
|
||||
* Compares the two nodes and returns 1 if node A is strictly less
|
||||
* than B according to the tree's sorting criteria, 0 otherwise.
|
||||
* Compares the two nodes and returns true if node A is strictly less
|
||||
* than B according to the tree's sorting criteria, false otherwise.
|
||||
*
|
||||
* Note that during insert, the new node being inserted will always be
|
||||
* "A", where "B" is the existing node within the tree against which
|
||||
|
@ -60,7 +62,7 @@ struct rbnode {
|
|||
* implement "most/least recently added" semantics between nodes which
|
||||
* would otherwise compare as equal.
|
||||
*/
|
||||
typedef int (*rb_lessthan_t)(struct rbnode *a, struct rbnode *b);
|
||||
typedef bool (*rb_lessthan_t)(struct rbnode *a, struct rbnode *b);
|
||||
|
||||
struct rbtree {
|
||||
struct rbnode *root;
|
||||
|
|
|
@ -65,12 +65,12 @@ static ALWAYS_INLINE struct k_thread *_get_next_ready_thread(void)
|
|||
#endif
|
||||
|
||||
|
||||
static inline int _is_idle_thread(void *entry_point)
|
||||
static inline bool _is_idle_thread(void *entry_point)
|
||||
{
|
||||
return entry_point == idle;
|
||||
}
|
||||
|
||||
static inline int _is_thread_pending(struct k_thread *thread)
|
||||
static inline bool _is_thread_pending(struct k_thread *thread)
|
||||
{
|
||||
return !!(thread->base.thread_state & _THREAD_PENDING);
|
||||
}
|
||||
|
@ -84,32 +84,32 @@ static inline int _is_thread_prevented_from_running(struct k_thread *thread)
|
|||
|
||||
}
|
||||
|
||||
static inline int _is_thread_timeout_active(struct k_thread *thread)
|
||||
static inline bool _is_thread_timeout_active(struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
return thread->base.timeout.delta_ticks_from_prev != _INACTIVE;
|
||||
#else
|
||||
return 0;
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int _is_thread_ready(struct k_thread *thread)
|
||||
static inline bool _is_thread_ready(struct k_thread *thread)
|
||||
{
|
||||
return !(_is_thread_prevented_from_running(thread) ||
|
||||
return !((_is_thread_prevented_from_running(thread)) != 0 ||
|
||||
_is_thread_timeout_active(thread));
|
||||
}
|
||||
|
||||
static inline int _has_thread_started(struct k_thread *thread)
|
||||
static inline bool _has_thread_started(struct k_thread *thread)
|
||||
{
|
||||
return !(thread->base.thread_state & _THREAD_PRESTART);
|
||||
return (thread->base.thread_state & _THREAD_PRESTART) == 0;
|
||||
}
|
||||
|
||||
static inline int _is_thread_state_set(struct k_thread *thread, u32_t state)
|
||||
static inline bool _is_thread_state_set(struct k_thread *thread, u32_t state)
|
||||
{
|
||||
return !!(thread->base.thread_state & state);
|
||||
}
|
||||
|
||||
static inline int _is_thread_queued(struct k_thread *thread)
|
||||
static inline bool _is_thread_queued(struct k_thread *thread)
|
||||
{
|
||||
return _is_thread_state_set(thread, _THREAD_QUEUED);
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ static inline void _mark_thread_as_not_queued(struct k_thread *thread)
|
|||
_reset_thread_states(thread, _THREAD_QUEUED);
|
||||
}
|
||||
|
||||
static inline int _is_under_prio_ceiling(int prio)
|
||||
static inline bool _is_under_prio_ceiling(int prio)
|
||||
{
|
||||
return prio >= CONFIG_PRIORITY_CEILING;
|
||||
}
|
||||
|
@ -170,55 +170,55 @@ static inline int _get_new_prio_with_ceiling(int prio)
|
|||
return _is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
|
||||
}
|
||||
|
||||
static inline int _is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
|
||||
static inline bool _is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
|
||||
{
|
||||
return prio1 <= prio2;
|
||||
}
|
||||
|
||||
static inline int _is_prio_higher_or_equal(int prio1, int prio2)
|
||||
static inline bool _is_prio_higher_or_equal(int prio1, int prio2)
|
||||
{
|
||||
return _is_prio1_higher_than_or_equal_to_prio2(prio1, prio2);
|
||||
}
|
||||
|
||||
static inline int _is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
|
||||
static inline bool _is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
|
||||
{
|
||||
return prio1 >= prio2;
|
||||
}
|
||||
|
||||
static inline int _is_prio1_higher_than_prio2(int prio1, int prio2)
|
||||
static inline bool _is_prio1_higher_than_prio2(int prio1, int prio2)
|
||||
{
|
||||
return prio1 < prio2;
|
||||
}
|
||||
|
||||
static inline int _is_prio_higher(int prio, int test_prio)
|
||||
static inline bool _is_prio_higher(int prio, int test_prio)
|
||||
{
|
||||
return _is_prio1_higher_than_prio2(prio, test_prio);
|
||||
}
|
||||
|
||||
static inline int _is_prio_lower_or_equal(int prio1, int prio2)
|
||||
static inline bool _is_prio_lower_or_equal(int prio1, int prio2)
|
||||
{
|
||||
return _is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
|
||||
}
|
||||
|
||||
int _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2);
|
||||
bool _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2);
|
||||
|
||||
static inline int _is_valid_prio(int prio, void *entry_point)
|
||||
static inline bool _is_valid_prio(int prio, void *entry_point)
|
||||
{
|
||||
if (prio == K_IDLE_PRIO && _is_idle_thread(entry_point)) {
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!_is_prio_higher_or_equal(prio,
|
||||
K_LOWEST_APPLICATION_THREAD_PRIO)) {
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!_is_prio_lower_or_equal(prio,
|
||||
K_HIGHEST_APPLICATION_THREAD_PRIO)) {
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void _ready_thread(struct k_thread *thread)
|
||||
|
@ -272,7 +272,7 @@ static ALWAYS_INLINE void _sched_unlock_no_reschedule(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE int _is_thread_timeout_expired(struct k_thread *thread)
|
||||
static ALWAYS_INLINE bool _is_thread_timeout_expired(struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
return thread->base.timeout.delta_ticks_from_prev == _EXPIRED;
|
||||
|
|
|
@ -84,10 +84,10 @@ static inline int _is_idle(struct k_thread *thread)
|
|||
#endif
|
||||
}
|
||||
|
||||
int _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2)
|
||||
bool _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2)
|
||||
{
|
||||
if (t1->base.prio < t2->base.prio) {
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_DEADLINE
|
||||
|
@ -106,7 +106,7 @@ int _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2)
|
|||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int should_preempt(struct k_thread *th, int preempt_ok)
|
||||
|
@ -355,7 +355,7 @@ void _unpend_thread(struct k_thread *thread)
|
|||
*/
|
||||
void _thread_priority_set(struct k_thread *thread, int prio)
|
||||
{
|
||||
int need_sched = 0;
|
||||
bool need_sched = 0;
|
||||
|
||||
LOCKED(&sched_lock) {
|
||||
need_sched = _is_thread_ready(thread);
|
||||
|
@ -496,7 +496,7 @@ struct k_thread *_priq_dumb_best(sys_dlist_t *pq)
|
|||
struct k_thread, base.qnode_dlist);
|
||||
}
|
||||
|
||||
int _priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
|
||||
bool _priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
|
||||
{
|
||||
struct k_thread *ta, *tb;
|
||||
|
||||
|
@ -504,9 +504,9 @@ int _priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
|
|||
tb = CONTAINER_OF(b, struct k_thread, base.qnode_rb);
|
||||
|
||||
if (_is_t1_higher_prio_than_t2(ta, tb)) {
|
||||
return 1;
|
||||
return true;
|
||||
} else if (_is_t1_higher_prio_than_t2(tb, ta)) {
|
||||
return 0;
|
||||
return false;
|
||||
} else {
|
||||
return ta->base.order_key < tb->base.order_key ? 1 : 0;
|
||||
}
|
||||
|
@ -670,7 +670,7 @@ int _unpend_all(_wait_q_t *waitq)
|
|||
int need_sched = 0;
|
||||
struct k_thread *th;
|
||||
|
||||
while ((th = _waitq_head(waitq))) {
|
||||
while ((th = _waitq_head(waitq)) != NULL) {
|
||||
_unpend_thread(th);
|
||||
_ready_thread(th);
|
||||
need_sched = 1;
|
||||
|
|
|
@ -74,7 +74,7 @@ extern struct _k_object *_k_object_gperf_find(void *obj);
|
|||
extern void _k_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
|
||||
void *context);
|
||||
|
||||
static int node_lessthan(struct rbnode *a, struct rbnode *b);
|
||||
static bool node_lessthan(struct rbnode *a, struct rbnode *b);
|
||||
|
||||
/*
|
||||
* Red/black tree of allocated kernel objects, for reasonably fast lookups
|
||||
|
@ -109,7 +109,7 @@ static size_t obj_size_get(enum k_objects otype)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int node_lessthan(struct rbnode *a, struct rbnode *b)
|
||||
static bool node_lessthan(struct rbnode *a, struct rbnode *b)
|
||||
{
|
||||
return a < b;
|
||||
}
|
||||
|
|
|
@ -227,7 +227,7 @@ void rb_insert(struct rbtree *tree, struct rbnode *node)
|
|||
|
||||
struct rbnode *parent = stack[stacksz - 1];
|
||||
|
||||
int side = !tree->lessthan_fn(node, parent);
|
||||
int side = tree->lessthan_fn(node, parent) ? 0 : 1;
|
||||
|
||||
set_child(parent, side, node);
|
||||
set_color(node, RED);
|
||||
|
|
|
@ -49,7 +49,7 @@ int node_index(struct rbnode *n)
|
|||
}
|
||||
|
||||
/* Our "lessthan" is just the location of the struct */
|
||||
int node_lessthan(struct rbnode *a, struct rbnode *b)
|
||||
bool node_lessthan(struct rbnode *a, struct rbnode *b)
|
||||
{
|
||||
if (current_insertee) {
|
||||
CHECK(a == current_insertee);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue