lib: os: various places fix missing final else
The lib/os/ had several places missing final else statement in the if else if construct. This commit adds else {} or simple refactor to comply with coding guideline 15.7. - cbprintf_complete.c - cbprintf_nano.c - heap-validate.c - heap.c - onoff.c - p4wq.c - sem.c Also resolves the checkpatch issue of comments should align * on each line. Signed-off-by: Jennifer Williams <jennifer.m.williams@intel.com>
This commit is contained in:
parent
c00bdcf1a8
commit
163b7f0d82
7 changed files with 65 additions and 42 deletions
|
@ -557,6 +557,8 @@ int_conv:
|
|||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -586,6 +588,8 @@ int_conv:
|
|||
} else if ((conv->length_mod != LENGTH_NONE)
|
||||
&& (conv->length_mod != LENGTH_UPPER_L)) {
|
||||
conv->invalid = true;
|
||||
} else {
|
||||
;
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -802,6 +806,8 @@ static char *encode_uint(uint_value_type value,
|
|||
conv->altform_0 = true;
|
||||
} else if (radix == 16) {
|
||||
conv->altform_0c = true;
|
||||
} else {
|
||||
;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -878,6 +884,8 @@ static char *encode_float(double value,
|
|||
*sign = '+';
|
||||
} else if (conv->flag_space) {
|
||||
*sign = ' ';
|
||||
} else {
|
||||
;
|
||||
}
|
||||
|
||||
/* Extract the non-negative offset exponent and fraction. Record
|
||||
|
@ -1383,31 +1391,34 @@ int cbvprintf(cbprintf_cb out, void *ctx, const char *fp, va_list ap)
|
|||
/* If dynamic width is specified, process it,
|
||||
* otherwise set with if present.
|
||||
*/
|
||||
if (conv->width_star) {
|
||||
width = va_arg(ap, int);
|
||||
|
||||
if (width < 0) {
|
||||
conv->flag_dash = true;
|
||||
width = -width;
|
||||
if (conv->width_present) {
|
||||
if (conv->width_star) {
|
||||
width = va_arg(ap, int);
|
||||
if (width < 0) {
|
||||
conv->flag_dash = true;
|
||||
width = -width;
|
||||
}
|
||||
} else {
|
||||
width = conv->width_value;
|
||||
}
|
||||
} else if (conv->width_present) {
|
||||
width = conv->width_value;
|
||||
}
|
||||
|
||||
/* If dynamic precision is specified, process it, otherwise
|
||||
* set precision if present. For floating point where
|
||||
* precision is not present use 6.
|
||||
*/
|
||||
if (conv->prec_star) {
|
||||
int arg = va_arg(ap, int);
|
||||
if (conv->prec_present) {
|
||||
if (conv->prec_star) {
|
||||
int arg = va_arg(ap, int);
|
||||
|
||||
if (arg < 0) {
|
||||
conv->prec_present = false;
|
||||
if (arg < 0) {
|
||||
conv->prec_present = false;
|
||||
} else {
|
||||
precision = arg;
|
||||
}
|
||||
} else {
|
||||
precision = arg;
|
||||
precision = conv->prec_value;
|
||||
}
|
||||
} else if (conv->prec_present) {
|
||||
precision = conv->prec_value;
|
||||
}
|
||||
|
||||
/* Reuse width and precision memory in conv for value
|
||||
|
|
|
@ -204,6 +204,8 @@ start:
|
|||
} else if (special == '+') {
|
||||
prefix = "+";
|
||||
min_width--;
|
||||
} else {
|
||||
;
|
||||
}
|
||||
data_len = convert_value(d, 10, 0, buf + sizeof(buf));
|
||||
data = buf + sizeof(buf) - data_len;
|
||||
|
|
|
@ -207,33 +207,34 @@ static bool rand_alloc_choice(struct z_heap_stress_rec *sr)
|
|||
return true;
|
||||
} else if (sr->blocks_alloced >= sr->nblocks) {
|
||||
return false;
|
||||
} else {
|
||||
|
||||
/* The way this works is to scale the chance of choosing to
|
||||
* allocate vs. free such that it's even odds when the heap is
|
||||
* at the target percent, with linear tapering on the low
|
||||
* slope (i.e. we choose to always allocate with an empty
|
||||
* heap, allocate 50% of the time when the heap is exactly at
|
||||
* the target, and always free when above the target). In
|
||||
* practice, the operations aren't quite symmetric (you can
|
||||
* always free, but your allocation might fail), and the units
|
||||
* aren't matched (we're doing math based on bytes allocated
|
||||
* and ignoring the overhead) but this is close enough. And
|
||||
* yes, the math here is coarse (in units of percent), but
|
||||
* that's good enough and fits well inside 32 bit quantities.
|
||||
* (Note precision issue when heap size is above 40MB
|
||||
* though!).
|
||||
*/
|
||||
__ASSERT(sr->total_bytes < 0xffffffffU / 100, "too big for u32!");
|
||||
uint32_t full_pct = (100 * sr->bytes_alloced) / sr->total_bytes;
|
||||
uint32_t target = sr->target_percent ? sr->target_percent : 1;
|
||||
uint32_t free_chance = 0xffffffffU;
|
||||
|
||||
if (full_pct < sr->target_percent) {
|
||||
free_chance = full_pct * (0x80000000U / target);
|
||||
}
|
||||
|
||||
return rand32() > free_chance;
|
||||
}
|
||||
|
||||
/* The way this works is to scale the chance of choosing to
|
||||
* allocate vs. free such that it's even odds when the heap is
|
||||
* at the target percent, with linear tapering on the low
|
||||
* slope (i.e. we choose to always allocate with an empty
|
||||
* heap, allocate 50% of the time when the heap is exactly at
|
||||
* the target, and always free when above the target). In
|
||||
* practice, the operations aren't quite symmetric (you can
|
||||
* always free, but your allocation might fail), and the units
|
||||
* aren't matched (we're doing math based on bytes allocated
|
||||
* and ignoring the overhead) but this is close enough. And
|
||||
* yes, the math here is coarse (in units of percent), but
|
||||
* that's good enough and fits well inside 32 bit quantities.
|
||||
* (Note precision issue when heap size is above 40MB
|
||||
* though!).
|
||||
*/
|
||||
__ASSERT(sr->total_bytes < 0xffffffffU / 100, "too big for u32!");
|
||||
uint32_t full_pct = (100 * sr->bytes_alloced) / sr->total_bytes;
|
||||
uint32_t target = sr->target_percent ? sr->target_percent : 1;
|
||||
uint32_t free_chance = 0xffffffffU;
|
||||
|
||||
if (full_pct < sr->target_percent) {
|
||||
free_chance = full_pct * (0x80000000U / target);
|
||||
}
|
||||
|
||||
return rand32() > free_chance;
|
||||
}
|
||||
|
||||
/* Chooses a size of block to allocate, logarithmically favoring
|
||||
|
|
|
@ -361,6 +361,8 @@ void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr,
|
|||
merge_chunks(h, c, rc);
|
||||
set_chunk_used(h, c, true);
|
||||
return ptr;
|
||||
} else {
|
||||
;
|
||||
}
|
||||
|
||||
/* Fallback: allocate and copy */
|
||||
|
|
|
@ -222,6 +222,8 @@ static int process_recheck(struct onoff_manager *mgr)
|
|||
} else if ((state == ONOFF_STATE_ERROR)
|
||||
&& !sys_slist_is_empty(&mgr->clients)) {
|
||||
evt = EVT_RESET;
|
||||
} else {
|
||||
;
|
||||
}
|
||||
|
||||
return evt;
|
||||
|
@ -406,6 +408,8 @@ static void process_event(struct onoff_manager *mgr,
|
|||
} else if ((mgr->flags & ONOFF_FLAG_RECHECK) != 0) {
|
||||
mgr->flags &= ~ONOFF_FLAG_RECHECK;
|
||||
evt = EVT_RECHECK;
|
||||
} else {
|
||||
;
|
||||
}
|
||||
|
||||
state = mgr->flags & ONOFF_STATE_MASK;
|
||||
|
|
|
@ -63,6 +63,8 @@ static inline bool item_lessthan(struct k_p4wq_work *a, struct k_p4wq_work *b)
|
|||
} else if ((a->priority == b->priority) &&
|
||||
(a->deadline != b->deadline)) {
|
||||
return a->deadline - b->deadline > 0;
|
||||
} else {
|
||||
;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -74,8 +74,9 @@ int sys_sem_give(struct sys_sem *sem)
|
|||
}
|
||||
} else if (old_value >= sem->limit) {
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue