Revert "lib: os: various places fix missing final else"

This reverts commit 163b7f0d82.

This is causing test failures, see #34624

Fixes #34624

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2021-04-27 21:44:16 -04:00
commit b8312fab4c
7 changed files with 42 additions and 65 deletions

View file

@ -557,8 +557,6 @@ int_conv:
default: default:
break; break;
} }
} else {
;
} }
break; break;
@ -588,8 +586,6 @@ int_conv:
} else if ((conv->length_mod != LENGTH_NONE) } else if ((conv->length_mod != LENGTH_NONE)
&& (conv->length_mod != LENGTH_UPPER_L)) { && (conv->length_mod != LENGTH_UPPER_L)) {
conv->invalid = true; conv->invalid = true;
} else {
;
} }
break; break;
@ -806,8 +802,6 @@ static char *encode_uint(uint_value_type value,
conv->altform_0 = true; conv->altform_0 = true;
} else if (radix == 16) { } else if (radix == 16) {
conv->altform_0c = true; conv->altform_0c = true;
} else {
;
} }
} }
@ -884,8 +878,6 @@ static char *encode_float(double value,
*sign = '+'; *sign = '+';
} else if (conv->flag_space) { } else if (conv->flag_space) {
*sign = ' '; *sign = ' ';
} else {
;
} }
/* Extract the non-negative offset exponent and fraction. Record /* Extract the non-negative offset exponent and fraction. Record
@ -1391,34 +1383,31 @@ int cbvprintf(cbprintf_cb out, void *ctx, const char *fp, va_list ap)
/* If dynamic width is specified, process it, /* If dynamic width is specified, process it,
* otherwise set with if present. * otherwise set with if present.
*/ */
if (conv->width_present) { if (conv->width_star) {
if (conv->width_star) { width = va_arg(ap, int);
width = va_arg(ap, int);
if (width < 0) { if (width < 0) {
conv->flag_dash = true; conv->flag_dash = true;
width = -width; width = -width;
}
} else {
width = conv->width_value;
} }
} else if (conv->width_present) {
width = conv->width_value;
} }
/* If dynamic precision is specified, process it, otherwise /* If dynamic precision is specified, process it, otherwise
* set precision if present. For floating point where * set precision if present. For floating point where
* precision is not present use 6. * precision is not present use 6.
*/ */
if (conv->prec_present) { if (conv->prec_star) {
if (conv->prec_star) { int arg = va_arg(ap, int);
int arg = va_arg(ap, int);
if (arg < 0) { if (arg < 0) {
conv->prec_present = false; conv->prec_present = false;
} else {
precision = arg;
}
} else { } else {
precision = conv->prec_value; precision = arg;
} }
} else if (conv->prec_present) {
precision = conv->prec_value;
} }
/* Reuse width and precision memory in conv for value /* Reuse width and precision memory in conv for value

View file

@ -204,8 +204,6 @@ start:
} else if (special == '+') { } else if (special == '+') {
prefix = "+"; prefix = "+";
min_width--; min_width--;
} else {
;
} }
data_len = convert_value(d, 10, 0, buf + sizeof(buf)); data_len = convert_value(d, 10, 0, buf + sizeof(buf));
data = buf + sizeof(buf) - data_len; data = buf + sizeof(buf) - data_len;

View file

@ -207,34 +207,33 @@ static bool rand_alloc_choice(struct z_heap_stress_rec *sr)
return true; return true;
} else if (sr->blocks_alloced >= sr->nblocks) { } else if (sr->blocks_alloced >= sr->nblocks) {
return false; return false;
} else {
/* The way this works is to scale the chance of choosing to
* allocate vs. free such that it's even odds when the heap is
* at the target percent, with linear tapering on the low
* slope (i.e. we choose to always allocate with an empty
* heap, allocate 50% of the time when the heap is exactly at
* the target, and always free when above the target). In
* practice, the operations aren't quite symmetric (you can
* always free, but your allocation might fail), and the units
* aren't matched (we're doing math based on bytes allocated
* and ignoring the overhead) but this is close enough. And
* yes, the math here is coarse (in units of percent), but
* that's good enough and fits well inside 32 bit quantities.
* (Note precision issue when heap size is above 40MB
* though!).
*/
__ASSERT(sr->total_bytes < 0xffffffffU / 100, "too big for u32!");
uint32_t full_pct = (100 * sr->bytes_alloced) / sr->total_bytes;
uint32_t target = sr->target_percent ? sr->target_percent : 1;
uint32_t free_chance = 0xffffffffU;
if (full_pct < sr->target_percent) {
free_chance = full_pct * (0x80000000U / target);
}
return rand32() > free_chance;
} }
/* The way this works is to scale the chance of choosing to
* allocate vs. free such that it's even odds when the heap is
* at the target percent, with linear tapering on the low
* slope (i.e. we choose to always allocate with an empty
* heap, allocate 50% of the time when the heap is exactly at
* the target, and always free when above the target). In
* practice, the operations aren't quite symmetric (you can
* always free, but your allocation might fail), and the units
* aren't matched (we're doing math based on bytes allocated
* and ignoring the overhead) but this is close enough. And
* yes, the math here is coarse (in units of percent), but
* that's good enough and fits well inside 32 bit quantities.
* (Note precision issue when heap size is above 40MB
* though!).
*/
__ASSERT(sr->total_bytes < 0xffffffffU / 100, "too big for u32!");
uint32_t full_pct = (100 * sr->bytes_alloced) / sr->total_bytes;
uint32_t target = sr->target_percent ? sr->target_percent : 1;
uint32_t free_chance = 0xffffffffU;
if (full_pct < sr->target_percent) {
free_chance = full_pct * (0x80000000U / target);
}
return rand32() > free_chance;
} }
/* Chooses a size of block to allocate, logarithmically favoring /* Chooses a size of block to allocate, logarithmically favoring

View file

@ -361,8 +361,6 @@ void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr,
merge_chunks(h, c, rc); merge_chunks(h, c, rc);
set_chunk_used(h, c, true); set_chunk_used(h, c, true);
return ptr; return ptr;
} else {
;
} }
/* Fallback: allocate and copy */ /* Fallback: allocate and copy */

View file

@ -222,8 +222,6 @@ static int process_recheck(struct onoff_manager *mgr)
} else if ((state == ONOFF_STATE_ERROR) } else if ((state == ONOFF_STATE_ERROR)
&& !sys_slist_is_empty(&mgr->clients)) { && !sys_slist_is_empty(&mgr->clients)) {
evt = EVT_RESET; evt = EVT_RESET;
} else {
;
} }
return evt; return evt;
@ -408,8 +406,6 @@ static void process_event(struct onoff_manager *mgr,
} else if ((mgr->flags & ONOFF_FLAG_RECHECK) != 0) { } else if ((mgr->flags & ONOFF_FLAG_RECHECK) != 0) {
mgr->flags &= ~ONOFF_FLAG_RECHECK; mgr->flags &= ~ONOFF_FLAG_RECHECK;
evt = EVT_RECHECK; evt = EVT_RECHECK;
} else {
;
} }
state = mgr->flags & ONOFF_STATE_MASK; state = mgr->flags & ONOFF_STATE_MASK;

View file

@ -63,8 +63,6 @@ static inline bool item_lessthan(struct k_p4wq_work *a, struct k_p4wq_work *b)
} else if ((a->priority == b->priority) && } else if ((a->priority == b->priority) &&
(a->deadline != b->deadline)) { (a->deadline != b->deadline)) {
return a->deadline - b->deadline > 0; return a->deadline - b->deadline > 0;
} else {
;
} }
return false; return false;
} }

View file

@ -74,9 +74,8 @@ int sys_sem_give(struct sys_sem *sem)
} }
} else if (old_value >= sem->limit) { } else if (old_value >= sem->limit) {
return -EAGAIN; return -EAGAIN;
} else {
;
} }
return ret; return ret;
} }