lib/os/heap: rename struct z_heap.len to struct z_heap.end_chunk

The end marker chunk was represented by the len field of struct z_heap.
It is now renamed to end_chunk to make it more obvious what it is.

And while at it...

Given that it is used in size_too_big() to cap the allocation size
already, we no longer need to test the bucket index against the
biggest index possible derived from end_chunk in alloc_chunk(). The
corresponding bucket_idx() call is relatively expensive on some
architectures so avoiding it (turning it into a CHECK() instead) is
a good thing.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2021-03-17 21:05:49 -04:00 committed by Anas Nashif
commit a54e101a1e
3 changed files with 20 additions and 26 deletions

View file

@ -19,7 +19,7 @@
static size_t max_chunkid(struct z_heap *h) static size_t max_chunkid(struct z_heap *h)
{ {
return h->len - min_chunk_size(h); return h->end_chunk - min_chunk_size(h);
} }
#define VALIDATE(cond) do { if (!(cond)) { return false; } } while (0) #define VALIDATE(cond) do { if (!(cond)) { return false; } } while (0)
@ -28,14 +28,14 @@ static bool in_bounds(struct z_heap *h, chunkid_t c)
{ {
VALIDATE(c >= right_chunk(h, 0)); VALIDATE(c >= right_chunk(h, 0));
VALIDATE(c <= max_chunkid(h)); VALIDATE(c <= max_chunkid(h));
VALIDATE(chunk_size(h, c) < h->len); VALIDATE(chunk_size(h, c) < h->end_chunk);
return true; return true;
} }
static bool valid_chunk(struct z_heap *h, chunkid_t c) static bool valid_chunk(struct z_heap *h, chunkid_t c)
{ {
VALIDATE(chunk_size(h, c) > 0); VALIDATE(chunk_size(h, c) > 0);
VALIDATE(c + chunk_size(h, c) <= h->len); VALIDATE(c + chunk_size(h, c) <= h->end_chunk);
VALIDATE(in_bounds(h, c)); VALIDATE(in_bounds(h, c));
VALIDATE(right_chunk(h, left_chunk(h, c)) == c); VALIDATE(right_chunk(h, left_chunk(h, c)) == c);
VALIDATE(left_chunk(h, right_chunk(h, c)) == c); VALIDATE(left_chunk(h, right_chunk(h, c)) == c);
@ -85,7 +85,7 @@ bool sys_heap_validate(struct sys_heap *heap)
return false; return false;
} }
} }
if (c != h->len) { if (c != h->end_chunk) {
return false; /* Should have exactly consumed the buffer */ return false; /* Should have exactly consumed the buffer */
} }
@ -93,7 +93,7 @@ bool sys_heap_validate(struct sys_heap *heap)
* should be correct, and all chunk entries should point into * should be correct, and all chunk entries should point into
* valid unused chunks. Mark those chunks USED, temporarily. * valid unused chunks. Mark those chunks USED, temporarily.
*/ */
for (int b = 0; b <= bucket_idx(h, h->len); b++) { for (int b = 0; b <= bucket_idx(h, h->end_chunk); b++) {
chunkid_t c0 = h->buckets[b].next; chunkid_t c0 = h->buckets[b].next;
uint32_t n = 0; uint32_t n = 0;
@ -137,7 +137,7 @@ bool sys_heap_validate(struct sys_heap *heap)
set_chunk_used(h, c, solo_free_header(h, c)); set_chunk_used(h, c, solo_free_header(h, c));
} }
if (c != h->len) { if (c != h->end_chunk) {
return false; /* Should have exactly consumed the buffer */ return false; /* Should have exactly consumed the buffer */
} }
@ -145,7 +145,7 @@ bool sys_heap_validate(struct sys_heap *heap)
* pass caught all the blocks and that they now show UNUSED. * pass caught all the blocks and that they now show UNUSED.
* Mark them USED. * Mark them USED.
*/ */
for (int b = 0; b <= bucket_idx(h, h->len); b++) { for (int b = 0; b <= bucket_idx(h, h->end_chunk); b++) {
chunkid_t c0 = h->buckets[b].next; chunkid_t c0 = h->buckets[b].next;
int n = 0; int n = 0;
@ -318,11 +318,11 @@ void sys_heap_stress(void *(*alloc)(void *arg, size_t bytes),
*/ */
void heap_print_info(struct z_heap *h, bool dump_chunks) void heap_print_info(struct z_heap *h, bool dump_chunks)
{ {
int i, nb_buckets = bucket_idx(h, h->len) + 1; int i, nb_buckets = bucket_idx(h, h->end_chunk) + 1;
size_t free_bytes, allocated_bytes, total, overhead; size_t free_bytes, allocated_bytes, total, overhead;
printk("Heap at %p contains %d units in %d buckets\n\n", printk("Heap at %p contains %d units in %d buckets\n\n",
chunk_buf(h), h->len, nb_buckets); chunk_buf(h), h->end_chunk, nb_buckets);
printk(" bucket# min units total largest largest\n" printk(" bucket# min units total largest largest\n"
" threshold chunks (units) (bytes)\n" " threshold chunks (units) (bytes)\n"
@ -352,7 +352,7 @@ void heap_print_info(struct z_heap *h, bool dump_chunks)
} }
free_bytes = allocated_bytes = 0; free_bytes = allocated_bytes = 0;
for (chunkid_t c = 0; ; c = right_chunk(h, c)) { for (chunkid_t c = 0; ; c = right_chunk(h, c)) {
if (c == 0 || c == h->len) { if (c == 0 || c == h->end_chunk) {
/* those are always allocated for internal purposes */ /* those are always allocated for internal purposes */
} else if (chunk_used(h, c)) { } else if (chunk_used(h, c)) {
allocated_bytes += chunk_size(h, c) * CHUNK_UNIT allocated_bytes += chunk_size(h, c) * CHUNK_UNIT
@ -371,16 +371,13 @@ void heap_print_info(struct z_heap *h, bool dump_chunks)
left_chunk(h, c), left_chunk(h, c),
right_chunk(h, c)); right_chunk(h, c));
} }
if (c == h->len) { if (c == h->end_chunk) {
break; break;
} }
} }
/* /* The end marker chunk has a header. It is part of the overhead. */
* The final chunk at h->len is just a header serving as a end total = h->end_chunk * CHUNK_UNIT + chunk_header_bytes(h);
* marker. It is part of the overhead.
*/
total = h->len * CHUNK_UNIT + chunk_header_bytes(h);
overhead = total - free_bytes - allocated_bytes; overhead = total - free_bytes - allocated_bytes;
printk("\n%zd free bytes, %zd allocated bytes, overhead = %zd bytes (%zd.%zd%%)\n", printk("\n%zd free bytes, %zd allocated bytes, overhead = %zd bytes (%zd.%zd%%)\n",
free_bytes, allocated_bytes, overhead, free_bytes, allocated_bytes, overhead,

View file

@ -172,9 +172,7 @@ static chunkid_t alloc_chunk(struct z_heap *h, size_t sz)
int bi = bucket_idx(h, sz); int bi = bucket_idx(h, sz);
struct z_heap_bucket *b = &h->buckets[bi]; struct z_heap_bucket *b = &h->buckets[bi];
if (bi > bucket_idx(h, h->len)) { CHECK(bi <= bucket_idx(h, h->end_chunk));
return 0;
}
/* First try a bounded count of items from the minimal bucket /* First try a bounded count of items from the minimal bucket
* size. These may not fit, trying (e.g.) three means that * size. These may not fit, trying (e.g.) three means that
@ -383,7 +381,7 @@ void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
/* Must fit in a 31 bit count of HUNK_UNIT */ /* Must fit in a 31 bit count of HUNK_UNIT */
__ASSERT(bytes / CHUNK_UNIT <= 0x7fffffffU, "heap size is too big"); __ASSERT(bytes / CHUNK_UNIT <= 0x7fffffffU, "heap size is too big");
/* Reserve the final marker chunk's header */ /* Reserve the end marker chunk's header */
__ASSERT(bytes > heap_footer_bytes(bytes), "heap size is too small"); __ASSERT(bytes > heap_footer_bytes(bytes), "heap size is too small");
bytes -= heap_footer_bytes(bytes); bytes -= heap_footer_bytes(bytes);
@ -398,7 +396,7 @@ void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
struct z_heap *h = (struct z_heap *)addr; struct z_heap *h = (struct z_heap *)addr;
heap->heap = h; heap->heap = h;
h->chunk0_hdr_area = 0; h->chunk0_hdr_area = 0;
h->len = buf_sz; h->end_chunk = buf_sz;
h->avail_buckets = 0; h->avail_buckets = 0;
int nb_buckets = bucket_idx(h, buf_sz) + 1; int nb_buckets = bucket_idx(h, buf_sz) + 1;

View file

@ -64,7 +64,7 @@ struct z_heap_bucket {
struct z_heap { struct z_heap {
uint64_t chunk0_hdr_area; /* matches the largest header */ uint64_t chunk0_hdr_area; /* matches the largest header */
uint32_t len; chunkid_t end_chunk;
uint32_t avail_buckets; uint32_t avail_buckets;
struct z_heap_bucket buckets[0]; struct z_heap_bucket buckets[0];
}; };
@ -81,7 +81,7 @@ static inline bool big_heap_bytes(size_t bytes)
static inline bool big_heap(struct z_heap *h) static inline bool big_heap(struct z_heap *h)
{ {
return big_heap_chunks(h->len); return big_heap_chunks(h->end_chunk);
} }
static inline chunk_unit_t *chunk_buf(struct z_heap *h) static inline chunk_unit_t *chunk_buf(struct z_heap *h)
@ -106,7 +106,7 @@ static inline size_t chunk_field(struct z_heap *h, chunkid_t c,
static inline void chunk_set(struct z_heap *h, chunkid_t c, static inline void chunk_set(struct z_heap *h, chunkid_t c,
enum chunk_fields f, chunkid_t val) enum chunk_fields f, chunkid_t val)
{ {
CHECK(c <= h->len); CHECK(c <= h->end_chunk);
chunk_unit_t *buf = chunk_buf(h); chunk_unit_t *buf = chunk_buf(h);
void *cmem = &buf[c]; void *cmem = &buf[c];
@ -239,9 +239,8 @@ static inline bool size_too_big(struct z_heap *h, size_t bytes)
/* /*
* Quick check to bail out early if size is too big. * Quick check to bail out early if size is too big.
* Also guards against potential arithmetic overflows elsewhere. * Also guards against potential arithmetic overflows elsewhere.
* There is a minimum of one chunk always in use by the heap header.
*/ */
return (bytes / CHUNK_UNIT) >= h->len; return (bytes / CHUNK_UNIT) >= h->end_chunk;
} }
/* For debugging */ /* For debugging */