diff --git a/lib/os/cbprintf_complete.c b/lib/os/cbprintf_complete.c index 8f99abeb74f..a1a23efe549 100644 --- a/lib/os/cbprintf_complete.c +++ b/lib/os/cbprintf_complete.c @@ -557,8 +557,6 @@ int_conv: default: break; } - } else { - ; } break; @@ -588,8 +586,6 @@ int_conv: } else if ((conv->length_mod != LENGTH_NONE) && (conv->length_mod != LENGTH_UPPER_L)) { conv->invalid = true; - } else { - ; } break; @@ -806,8 +802,6 @@ static char *encode_uint(uint_value_type value, conv->altform_0 = true; } else if (radix == 16) { conv->altform_0c = true; - } else { - ; } } @@ -884,8 +878,6 @@ static char *encode_float(double value, *sign = '+'; } else if (conv->flag_space) { *sign = ' '; - } else { - ; } /* Extract the non-negative offset exponent and fraction. Record @@ -1391,34 +1383,31 @@ int cbvprintf(cbprintf_cb out, void *ctx, const char *fp, va_list ap) /* If dynamic width is specified, process it, * otherwise set with if present. */ - if (conv->width_present) { - if (conv->width_star) { - width = va_arg(ap, int); - if (width < 0) { - conv->flag_dash = true; - width = -width; - } - } else { - width = conv->width_value; + if (conv->width_star) { + width = va_arg(ap, int); + + if (width < 0) { + conv->flag_dash = true; + width = -width; } + } else if (conv->width_present) { + width = conv->width_value; } /* If dynamic precision is specified, process it, otherwise * set precision if present. For floating point where * precision is not present use 6. */ - if (conv->prec_present) { - if (conv->prec_star) { - int arg = va_arg(ap, int); + if (conv->prec_star) { + int arg = va_arg(ap, int); - if (arg < 0) { - conv->prec_present = false; - } else { - precision = arg; - } + if (arg < 0) { + conv->prec_present = false; } else { - precision = conv->prec_value; + precision = arg; } + } else if (conv->prec_present) { + precision = conv->prec_value; } /* Reuse width and precision memory in conv for value diff --git a/lib/os/cbprintf_nano.c b/lib/os/cbprintf_nano.c index d1eba64ad44..ff51ea5232b 100644 --- a/lib/os/cbprintf_nano.c +++ b/lib/os/cbprintf_nano.c @@ -204,8 +204,6 @@ start: } else if (special == '+') { prefix = "+"; min_width--; - } else { - ; } data_len = convert_value(d, 10, 0, buf + sizeof(buf)); data = buf + sizeof(buf) - data_len; diff --git a/lib/os/heap-validate.c b/lib/os/heap-validate.c index b2324108dfa..7cfefe3d687 100644 --- a/lib/os/heap-validate.c +++ b/lib/os/heap-validate.c @@ -207,34 +207,33 @@ static bool rand_alloc_choice(struct z_heap_stress_rec *sr) return true; } else if (sr->blocks_alloced >= sr->nblocks) { return false; - } else { - - /* The way this works is to scale the chance of choosing to - * allocate vs. free such that it's even odds when the heap is - * at the target percent, with linear tapering on the low - * slope (i.e. we choose to always allocate with an empty - * heap, allocate 50% of the time when the heap is exactly at - * the target, and always free when above the target). In - * practice, the operations aren't quite symmetric (you can - * always free, but your allocation might fail), and the units - * aren't matched (we're doing math based on bytes allocated - * and ignoring the overhead) but this is close enough. And - * yes, the math here is coarse (in units of percent), but - * that's good enough and fits well inside 32 bit quantities. - * (Note precision issue when heap size is above 40MB - * though!). - */ - __ASSERT(sr->total_bytes < 0xffffffffU / 100, "too big for u32!"); - uint32_t full_pct = (100 * sr->bytes_alloced) / sr->total_bytes; - uint32_t target = sr->target_percent ? sr->target_percent : 1; - uint32_t free_chance = 0xffffffffU; - - if (full_pct < sr->target_percent) { - free_chance = full_pct * (0x80000000U / target); - } - - return rand32() > free_chance; } + + /* The way this works is to scale the chance of choosing to + * allocate vs. free such that it's even odds when the heap is + * at the target percent, with linear tapering on the low + * slope (i.e. we choose to always allocate with an empty + * heap, allocate 50% of the time when the heap is exactly at + * the target, and always free when above the target). In + * practice, the operations aren't quite symmetric (you can + * always free, but your allocation might fail), and the units + * aren't matched (we're doing math based on bytes allocated + * and ignoring the overhead) but this is close enough. And + * yes, the math here is coarse (in units of percent), but + * that's good enough and fits well inside 32 bit quantities. + * (Note precision issue when heap size is above 40MB + * though!). + */ + __ASSERT(sr->total_bytes < 0xffffffffU / 100, "too big for u32!"); + uint32_t full_pct = (100 * sr->bytes_alloced) / sr->total_bytes; + uint32_t target = sr->target_percent ? sr->target_percent : 1; + uint32_t free_chance = 0xffffffffU; + + if (full_pct < sr->target_percent) { + free_chance = full_pct * (0x80000000U / target); + } + + return rand32() > free_chance; } /* Chooses a size of block to allocate, logarithmically favoring diff --git a/lib/os/heap.c b/lib/os/heap.c index 2dee196404c..363427a3e58 100644 --- a/lib/os/heap.c +++ b/lib/os/heap.c @@ -361,8 +361,6 @@ void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr, merge_chunks(h, c, rc); set_chunk_used(h, c, true); return ptr; - } else { - ; } /* Fallback: allocate and copy */ diff --git a/lib/os/onoff.c b/lib/os/onoff.c index cad1bac28fd..a7a2da37298 100644 --- a/lib/os/onoff.c +++ b/lib/os/onoff.c @@ -222,8 +222,6 @@ static int process_recheck(struct onoff_manager *mgr) } else if ((state == ONOFF_STATE_ERROR) && !sys_slist_is_empty(&mgr->clients)) { evt = EVT_RESET; - } else { - ; } return evt; @@ -408,8 +406,6 @@ static void process_event(struct onoff_manager *mgr, } else if ((mgr->flags & ONOFF_FLAG_RECHECK) != 0) { mgr->flags &= ~ONOFF_FLAG_RECHECK; evt = EVT_RECHECK; - } else { - ; } state = mgr->flags & ONOFF_STATE_MASK; diff --git a/lib/os/p4wq.c b/lib/os/p4wq.c index 9d97824b0d2..ca98a578997 100644 --- a/lib/os/p4wq.c +++ b/lib/os/p4wq.c @@ -63,8 +63,6 @@ static inline bool item_lessthan(struct k_p4wq_work *a, struct k_p4wq_work *b) } else if ((a->priority == b->priority) && (a->deadline != b->deadline)) { return a->deadline - b->deadline > 0; - } else { - ; } return false; } diff --git a/lib/os/sem.c b/lib/os/sem.c index e4b502e657b..3f1ad4df424 100644 --- a/lib/os/sem.c +++ b/lib/os/sem.c @@ -74,9 +74,8 @@ int sys_sem_give(struct sys_sem *sem) } } else if (old_value >= sem->limit) { return -EAGAIN; - } else { - ; } + return ret; }