kernel: Re-factor k_mem_slab definition

Rearranges the k_mem_slab fields so that information that describes
how much of the memory slab is used is co-located. This will allow
easier of its statistics into the object core statistics reporting
framework.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
Peter Mitsis 2023-05-11 10:09:08 -04:00 committed by Johan Hedberg
commit 2f003e59e4
6 changed files with 58 additions and 52 deletions

View file

@ -4999,31 +4999,33 @@ __syscall void k_pipe_buffer_flush(struct k_pipe *pipe);
* @cond INTERNAL_HIDDEN * @cond INTERNAL_HIDDEN
*/ */
struct k_mem_slab { struct k_mem_slab_info {
_wait_q_t wait_q;
struct k_spinlock lock;
uint32_t num_blocks; uint32_t num_blocks;
size_t block_size; size_t block_size;
char *buffer;
char *free_list;
uint32_t num_used; uint32_t num_used;
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
uint32_t max_used; uint32_t max_used;
#endif #endif
};
struct k_mem_slab {
_wait_q_t wait_q;
struct k_spinlock lock;
char *buffer;
char *free_list;
struct k_mem_slab_info info;
SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab) SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
}; };
#define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \ #define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
slab_num_blocks) \ slab_num_blocks) \
{ \ { \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \ .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
.lock = {}, \ .lock = {}, \
.num_blocks = slab_num_blocks, \ .buffer = slab_buffer, \
.block_size = slab_block_size, \ .free_list = NULL, \
.buffer = slab_buffer, \ .info = {slab_num_blocks, slab_block_size, 0} \
.free_list = NULL, \
.num_used = 0, \
} }
@ -5162,7 +5164,7 @@ extern void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
*/ */
static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab) static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
{ {
return slab->num_used; return slab->info.num_used;
} }
/** /**
@ -5178,7 +5180,7 @@ static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab) static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
{ {
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
return slab->max_used; return slab->info.max_used;
#else #else
ARG_UNUSED(slab); ARG_UNUSED(slab);
return 0; return 0;
@ -5197,7 +5199,7 @@ static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
*/ */
static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab) static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
{ {
return slab->num_blocks - slab->num_used; return slab->info.num_blocks - slab->info.num_used;
} }
/** /**

View file

@ -32,7 +32,7 @@ static int create_free_list(struct k_mem_slab *slab)
char *p; char *p;
/* blocks must be word aligned */ /* blocks must be word aligned */
CHECKIF(((slab->block_size | (uintptr_t)slab->buffer) & CHECKIF(((slab->info.block_size | (uintptr_t)slab->buffer) &
(sizeof(void *) - 1)) != 0U) { (sizeof(void *) - 1)) != 0U) {
return -EINVAL; return -EINVAL;
} }
@ -40,10 +40,10 @@ static int create_free_list(struct k_mem_slab *slab)
slab->free_list = NULL; slab->free_list = NULL;
p = slab->buffer; p = slab->buffer;
for (j = 0U; j < slab->num_blocks; j++) { for (j = 0U; j < slab->info.num_blocks; j++) {
*(char **)p = slab->free_list; *(char **)p = slab->free_list;
slab->free_list = p; slab->free_list = p;
p += slab->block_size; p += slab->info.block_size;
} }
return 0; return 0;
} }
@ -79,14 +79,14 @@ int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
{ {
int rc = 0; int rc = 0;
slab->num_blocks = num_blocks; slab->info.num_blocks = num_blocks;
slab->block_size = block_size; slab->info.block_size = block_size;
slab->buffer = buffer; slab->buffer = buffer;
slab->num_used = 0U; slab->info.num_used = 0U;
slab->lock = (struct k_spinlock) {}; slab->lock = (struct k_spinlock) {};
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
slab->max_used = 0U; slab->info.max_used = 0U;
#endif #endif
rc = create_free_list(slab); rc = create_free_list(slab);
@ -113,10 +113,11 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
/* take a free block */ /* take a free block */
*mem = slab->free_list; *mem = slab->free_list;
slab->free_list = *(char **)(slab->free_list); slab->free_list = *(char **)(slab->free_list);
slab->num_used++; slab->info.num_used++;
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
slab->max_used = MAX(slab->num_used, slab->max_used); slab->info.max_used = MAX(slab->info.num_used,
slab->info.max_used);
#endif #endif
result = 0; result = 0;
@ -151,8 +152,9 @@ void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
k_spinlock_key_t key = k_spin_lock(&slab->lock); k_spinlock_key_t key = k_spin_lock(&slab->lock);
__ASSERT(((char *)mem >= slab->buffer) && __ASSERT(((char *)mem >= slab->buffer) &&
((((char *)mem - slab->buffer) % slab->block_size) == 0) && ((((char *)mem - slab->buffer) % slab->info.block_size) == 0) &&
((char *)mem <= (slab->buffer + (slab->block_size * (slab->num_blocks - 1)))), ((char *)mem <= (slab->buffer + (slab->info.block_size *
(slab->info.num_blocks - 1)))),
"Invalid memory pointer provided"); "Invalid memory pointer provided");
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, free, slab); SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, free, slab);
@ -170,7 +172,7 @@ void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
} }
*(char **) mem = slab->free_list; *(char **) mem = slab->free_list;
slab->free_list = (char *) mem; slab->free_list = (char *) mem;
slab->num_used--; slab->info.num_used--;
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab);
@ -185,10 +187,12 @@ int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stat
k_spinlock_key_t key = k_spin_lock(&slab->lock); k_spinlock_key_t key = k_spin_lock(&slab->lock);
stats->allocated_bytes = slab->num_used * slab->block_size; stats->allocated_bytes = slab->info.num_used * slab->info.block_size;
stats->free_bytes = (slab->num_blocks - slab->num_used) * slab->block_size; stats->free_bytes = (slab->info.num_blocks - slab->info.num_used) *
slab->info.block_size;
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
stats->max_allocated_bytes = slab->max_used * slab->block_size; stats->max_allocated_bytes = slab->info.max_used *
slab->info.block_size;
#else #else
stats->max_allocated_bytes = 0; stats->max_allocated_bytes = 0;
#endif #endif
@ -207,7 +211,7 @@ int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab)
k_spinlock_key_t key = k_spin_lock(&slab->lock); k_spinlock_key_t key = k_spin_lock(&slab->lock);
slab->max_used = slab->num_used; slab->info.max_used = slab->info.num_used;
k_spin_unlock(&slab->lock, key); k_spin_unlock(&slab->lock, key);

View file

@ -4080,9 +4080,9 @@ static void context_info(struct net_context *context, void *user_data)
#if defined(CONFIG_NET_BUF_POOL_USAGE) #if defined(CONFIG_NET_BUF_POOL_USAGE)
PR("%p\t%u\t%u\tETX\n", PR("%p\t%u\t%u\tETX\n",
slab, slab->num_blocks, k_mem_slab_num_free_get(slab)); slab, slab->info.num_blocks, k_mem_slab_num_free_get(slab));
#else #else
PR("%p\t%d\tETX\n", slab, slab->num_blocks); PR("%p\t%d\tETX\n", slab, slab->info.num_blocks);
#endif #endif
info->are_external_pools = true; info->are_external_pools = true;
info->tx_slabs[info->pos] = slab; info->tx_slabs[info->pos] = slab;
@ -4133,10 +4133,10 @@ static int cmd_net_mem(const struct shell *sh, size_t argc, char *argv[])
PR("Address\t\tTotal\tAvail\tName\n"); PR("Address\t\tTotal\tAvail\tName\n");
PR("%p\t%d\t%u\tRX\n", PR("%p\t%d\t%u\tRX\n",
rx, rx->num_blocks, k_mem_slab_num_free_get(rx)); rx, rx->info.num_blocks, k_mem_slab_num_free_get(rx));
PR("%p\t%d\t%u\tTX\n", PR("%p\t%d\t%u\tTX\n",
tx, tx->num_blocks, k_mem_slab_num_free_get(tx)); tx, tx->info.num_blocks, k_mem_slab_num_free_get(tx));
PR("%p\t%d\t%ld\tRX DATA (%s)\n", rx_data, rx_data->buf_count, PR("%p\t%d\t%ld\tRX DATA (%s)\n", rx_data, rx_data->buf_count,
atomic_get(&rx_data->avail_count), rx_data->name); atomic_get(&rx_data->avail_count), rx_data->name);
@ -4146,8 +4146,8 @@ static int cmd_net_mem(const struct shell *sh, size_t argc, char *argv[])
#else #else
PR("Address\t\tTotal\tName\n"); PR("Address\t\tTotal\tName\n");
PR("%p\t%d\tRX\n", rx, rx->num_blocks); PR("%p\t%d\tRX\n", rx, rx->info.num_blocks);
PR("%p\t%d\tTX\n", tx, tx->num_blocks); PR("%p\t%d\tTX\n", tx, tx->info.num_blocks);
PR("%p\t%d\tRX DATA\n", rx_data, rx_data->buf_count); PR("%p\t%d\tRX DATA\n", rx_data, rx_data->buf_count);
PR("%p\t%d\tTX DATA\n", tx_data, tx_data->buf_count); PR("%p\t%d\tTX DATA\n", tx_data, tx_data->buf_count);
PR_INFO("Set %s to enable %s support.\n", PR_INFO("Set %s to enable %s support.\n",
@ -4772,7 +4772,7 @@ static int cmd_net_ping(const struct shell *sh, size_t argc, char *argv[])
static bool is_pkt_part_of_slab(const struct k_mem_slab *slab, const char *ptr) static bool is_pkt_part_of_slab(const struct k_mem_slab *slab, const char *ptr)
{ {
size_t last_offset = (slab->num_blocks - 1) * slab->block_size; size_t last_offset = (slab->info.num_blocks - 1) * slab->info.block_size;
size_t ptr_offset; size_t ptr_offset;
/* Check if pointer fits into slab buffer area. */ /* Check if pointer fits into slab buffer area. */
@ -4782,7 +4782,7 @@ static bool is_pkt_part_of_slab(const struct k_mem_slab *slab, const char *ptr)
/* Check if pointer offset is correct. */ /* Check if pointer offset is correct. */
ptr_offset = ptr - slab->buffer; ptr_offset = ptr - slab->buffer;
if (ptr_offset % slab->block_size != 0) { if (ptr_offset % slab->info.block_size != 0) {
return false; return false;
} }

View file

@ -172,7 +172,7 @@ uint32_t osMemoryPoolGetCapacity(osMemoryPoolId_t mp_id)
if (mslab == NULL) { if (mslab == NULL) {
return 0; return 0;
} else { } else {
return mslab->z_mslab.num_blocks; return mslab->z_mslab.info.num_blocks;
} }
} }
@ -186,7 +186,7 @@ uint32_t osMemoryPoolGetBlockSize(osMemoryPoolId_t mp_id)
if (mslab == NULL) { if (mslab == NULL) {
return 0; return 0;
} else { } else {
return mslab->z_mslab.block_size; return mslab->z_mslab.info.block_size;
} }
} }

View file

@ -9,20 +9,20 @@
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
{ {
if (slab->num_used >= slab->num_blocks) { if (slab->info.num_used >= slab->info.num_blocks) {
*mem = NULL; *mem = NULL;
return -ENOMEM; return -ENOMEM;
} }
*mem = malloc(slab->block_size); *mem = malloc(slab->info.block_size);
zassert_not_null(*mem); zassert_not_null(*mem);
slab->num_used++; slab->info.num_used++;
return 0; return 0;
} }
void k_mem_slab_free(struct k_mem_slab *slab, void *mem) void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
{ {
free(mem); free(mem);
slab->num_used--; slab->info.num_used--;
} }

View file

@ -183,8 +183,8 @@ static void get_free_packet_count(void)
struct k_mem_slab *rx, *tx; struct k_mem_slab *rx, *tx;
net_pkt_get_info(&rx, &tx, NULL, NULL); net_pkt_get_info(&rx, &tx, NULL, NULL);
orig_rx_num_blocks = rx->num_blocks; orig_rx_num_blocks = rx->info.num_blocks;
orig_tx_num_blocks = tx->num_blocks; orig_tx_num_blocks = tx->info.num_blocks;
} }
static void check_free_packet_count(void) static void check_free_packet_count(void)
@ -192,8 +192,8 @@ static void check_free_packet_count(void)
struct k_mem_slab *rx, *tx; struct k_mem_slab *rx, *tx;
net_pkt_get_info(&rx, &tx, NULL, NULL); net_pkt_get_info(&rx, &tx, NULL, NULL);
zassert_equal(rx->num_blocks, orig_rx_num_blocks, ""); zassert_equal(rx->info.num_blocks, orig_rx_num_blocks, "");
zassert_equal(tx->num_blocks, orig_tx_num_blocks, ""); zassert_equal(tx->info.num_blocks, orig_tx_num_blocks, "");
} }
static void test_iface_setup(void) static void test_iface_setup(void)