kernel: Re-factor k_mem_slab definition
Rearranges the k_mem_slab fields so that information that describes how much of the memory slab is used is co-located. This will allow easier of its statistics into the object core statistics reporting framework. Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
parent
77e193eb03
commit
2f003e59e4
6 changed files with 58 additions and 52 deletions
|
@ -4999,17 +4999,21 @@ __syscall void k_pipe_buffer_flush(struct k_pipe *pipe);
|
|||
* @cond INTERNAL_HIDDEN
|
||||
*/
|
||||
|
||||
struct k_mem_slab {
|
||||
_wait_q_t wait_q;
|
||||
struct k_spinlock lock;
|
||||
struct k_mem_slab_info {
|
||||
uint32_t num_blocks;
|
||||
size_t block_size;
|
||||
char *buffer;
|
||||
char *free_list;
|
||||
uint32_t num_used;
|
||||
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
|
||||
uint32_t max_used;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct k_mem_slab {
|
||||
_wait_q_t wait_q;
|
||||
struct k_spinlock lock;
|
||||
char *buffer;
|
||||
char *free_list;
|
||||
struct k_mem_slab_info info;
|
||||
|
||||
SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
|
||||
};
|
||||
|
@ -5019,11 +5023,9 @@ struct k_mem_slab {
|
|||
{ \
|
||||
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
|
||||
.lock = {}, \
|
||||
.num_blocks = slab_num_blocks, \
|
||||
.block_size = slab_block_size, \
|
||||
.buffer = slab_buffer, \
|
||||
.free_list = NULL, \
|
||||
.num_used = 0, \
|
||||
.info = {slab_num_blocks, slab_block_size, 0} \
|
||||
}
|
||||
|
||||
|
||||
|
@ -5162,7 +5164,7 @@ extern void k_mem_slab_free(struct k_mem_slab *slab, void *mem);
|
|||
*/
|
||||
static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
|
||||
{
|
||||
return slab->num_used;
|
||||
return slab->info.num_used;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -5178,7 +5180,7 @@ static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
|
|||
static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
|
||||
{
|
||||
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
|
||||
return slab->max_used;
|
||||
return slab->info.max_used;
|
||||
#else
|
||||
ARG_UNUSED(slab);
|
||||
return 0;
|
||||
|
@ -5197,7 +5199,7 @@ static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
|
|||
*/
|
||||
static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
|
||||
{
|
||||
return slab->num_blocks - slab->num_used;
|
||||
return slab->info.num_blocks - slab->info.num_used;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -32,7 +32,7 @@ static int create_free_list(struct k_mem_slab *slab)
|
|||
char *p;
|
||||
|
||||
/* blocks must be word aligned */
|
||||
CHECKIF(((slab->block_size | (uintptr_t)slab->buffer) &
|
||||
CHECKIF(((slab->info.block_size | (uintptr_t)slab->buffer) &
|
||||
(sizeof(void *) - 1)) != 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -40,10 +40,10 @@ static int create_free_list(struct k_mem_slab *slab)
|
|||
slab->free_list = NULL;
|
||||
p = slab->buffer;
|
||||
|
||||
for (j = 0U; j < slab->num_blocks; j++) {
|
||||
for (j = 0U; j < slab->info.num_blocks; j++) {
|
||||
*(char **)p = slab->free_list;
|
||||
slab->free_list = p;
|
||||
p += slab->block_size;
|
||||
p += slab->info.block_size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -79,14 +79,14 @@ int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
|
|||
{
|
||||
int rc = 0;
|
||||
|
||||
slab->num_blocks = num_blocks;
|
||||
slab->block_size = block_size;
|
||||
slab->info.num_blocks = num_blocks;
|
||||
slab->info.block_size = block_size;
|
||||
slab->buffer = buffer;
|
||||
slab->num_used = 0U;
|
||||
slab->info.num_used = 0U;
|
||||
slab->lock = (struct k_spinlock) {};
|
||||
|
||||
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
|
||||
slab->max_used = 0U;
|
||||
slab->info.max_used = 0U;
|
||||
#endif
|
||||
|
||||
rc = create_free_list(slab);
|
||||
|
@ -113,10 +113,11 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
|
|||
/* take a free block */
|
||||
*mem = slab->free_list;
|
||||
slab->free_list = *(char **)(slab->free_list);
|
||||
slab->num_used++;
|
||||
slab->info.num_used++;
|
||||
|
||||
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
|
||||
slab->max_used = MAX(slab->num_used, slab->max_used);
|
||||
slab->info.max_used = MAX(slab->info.num_used,
|
||||
slab->info.max_used);
|
||||
#endif
|
||||
|
||||
result = 0;
|
||||
|
@ -151,8 +152,9 @@ void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
|
|||
k_spinlock_key_t key = k_spin_lock(&slab->lock);
|
||||
|
||||
__ASSERT(((char *)mem >= slab->buffer) &&
|
||||
((((char *)mem - slab->buffer) % slab->block_size) == 0) &&
|
||||
((char *)mem <= (slab->buffer + (slab->block_size * (slab->num_blocks - 1)))),
|
||||
((((char *)mem - slab->buffer) % slab->info.block_size) == 0) &&
|
||||
((char *)mem <= (slab->buffer + (slab->info.block_size *
|
||||
(slab->info.num_blocks - 1)))),
|
||||
"Invalid memory pointer provided");
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, free, slab);
|
||||
|
@ -170,7 +172,7 @@ void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
|
|||
}
|
||||
*(char **) mem = slab->free_list;
|
||||
slab->free_list = (char *) mem;
|
||||
slab->num_used--;
|
||||
slab->info.num_used--;
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab);
|
||||
|
||||
|
@ -185,10 +187,12 @@ int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stat
|
|||
|
||||
k_spinlock_key_t key = k_spin_lock(&slab->lock);
|
||||
|
||||
stats->allocated_bytes = slab->num_used * slab->block_size;
|
||||
stats->free_bytes = (slab->num_blocks - slab->num_used) * slab->block_size;
|
||||
stats->allocated_bytes = slab->info.num_used * slab->info.block_size;
|
||||
stats->free_bytes = (slab->info.num_blocks - slab->info.num_used) *
|
||||
slab->info.block_size;
|
||||
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
|
||||
stats->max_allocated_bytes = slab->max_used * slab->block_size;
|
||||
stats->max_allocated_bytes = slab->info.max_used *
|
||||
slab->info.block_size;
|
||||
#else
|
||||
stats->max_allocated_bytes = 0;
|
||||
#endif
|
||||
|
@ -207,7 +211,7 @@ int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab)
|
|||
|
||||
k_spinlock_key_t key = k_spin_lock(&slab->lock);
|
||||
|
||||
slab->max_used = slab->num_used;
|
||||
slab->info.max_used = slab->info.num_used;
|
||||
|
||||
k_spin_unlock(&slab->lock, key);
|
||||
|
||||
|
|
|
@ -4080,9 +4080,9 @@ static void context_info(struct net_context *context, void *user_data)
|
|||
|
||||
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
||||
PR("%p\t%u\t%u\tETX\n",
|
||||
slab, slab->num_blocks, k_mem_slab_num_free_get(slab));
|
||||
slab, slab->info.num_blocks, k_mem_slab_num_free_get(slab));
|
||||
#else
|
||||
PR("%p\t%d\tETX\n", slab, slab->num_blocks);
|
||||
PR("%p\t%d\tETX\n", slab, slab->info.num_blocks);
|
||||
#endif
|
||||
info->are_external_pools = true;
|
||||
info->tx_slabs[info->pos] = slab;
|
||||
|
@ -4133,10 +4133,10 @@ static int cmd_net_mem(const struct shell *sh, size_t argc, char *argv[])
|
|||
PR("Address\t\tTotal\tAvail\tName\n");
|
||||
|
||||
PR("%p\t%d\t%u\tRX\n",
|
||||
rx, rx->num_blocks, k_mem_slab_num_free_get(rx));
|
||||
rx, rx->info.num_blocks, k_mem_slab_num_free_get(rx));
|
||||
|
||||
PR("%p\t%d\t%u\tTX\n",
|
||||
tx, tx->num_blocks, k_mem_slab_num_free_get(tx));
|
||||
tx, tx->info.num_blocks, k_mem_slab_num_free_get(tx));
|
||||
|
||||
PR("%p\t%d\t%ld\tRX DATA (%s)\n", rx_data, rx_data->buf_count,
|
||||
atomic_get(&rx_data->avail_count), rx_data->name);
|
||||
|
@ -4146,8 +4146,8 @@ static int cmd_net_mem(const struct shell *sh, size_t argc, char *argv[])
|
|||
#else
|
||||
PR("Address\t\tTotal\tName\n");
|
||||
|
||||
PR("%p\t%d\tRX\n", rx, rx->num_blocks);
|
||||
PR("%p\t%d\tTX\n", tx, tx->num_blocks);
|
||||
PR("%p\t%d\tRX\n", rx, rx->info.num_blocks);
|
||||
PR("%p\t%d\tTX\n", tx, tx->info.num_blocks);
|
||||
PR("%p\t%d\tRX DATA\n", rx_data, rx_data->buf_count);
|
||||
PR("%p\t%d\tTX DATA\n", tx_data, tx_data->buf_count);
|
||||
PR_INFO("Set %s to enable %s support.\n",
|
||||
|
@ -4772,7 +4772,7 @@ static int cmd_net_ping(const struct shell *sh, size_t argc, char *argv[])
|
|||
|
||||
static bool is_pkt_part_of_slab(const struct k_mem_slab *slab, const char *ptr)
|
||||
{
|
||||
size_t last_offset = (slab->num_blocks - 1) * slab->block_size;
|
||||
size_t last_offset = (slab->info.num_blocks - 1) * slab->info.block_size;
|
||||
size_t ptr_offset;
|
||||
|
||||
/* Check if pointer fits into slab buffer area. */
|
||||
|
@ -4782,7 +4782,7 @@ static bool is_pkt_part_of_slab(const struct k_mem_slab *slab, const char *ptr)
|
|||
|
||||
/* Check if pointer offset is correct. */
|
||||
ptr_offset = ptr - slab->buffer;
|
||||
if (ptr_offset % slab->block_size != 0) {
|
||||
if (ptr_offset % slab->info.block_size != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ uint32_t osMemoryPoolGetCapacity(osMemoryPoolId_t mp_id)
|
|||
if (mslab == NULL) {
|
||||
return 0;
|
||||
} else {
|
||||
return mslab->z_mslab.num_blocks;
|
||||
return mslab->z_mslab.info.num_blocks;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -186,7 +186,7 @@ uint32_t osMemoryPoolGetBlockSize(osMemoryPoolId_t mp_id)
|
|||
if (mslab == NULL) {
|
||||
return 0;
|
||||
} else {
|
||||
return mslab->z_mslab.block_size;
|
||||
return mslab->z_mslab.info.block_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,20 +9,20 @@
|
|||
|
||||
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
|
||||
{
|
||||
if (slab->num_used >= slab->num_blocks) {
|
||||
if (slab->info.num_used >= slab->info.num_blocks) {
|
||||
*mem = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
*mem = malloc(slab->block_size);
|
||||
*mem = malloc(slab->info.block_size);
|
||||
zassert_not_null(*mem);
|
||||
|
||||
slab->num_used++;
|
||||
slab->info.num_used++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
|
||||
{
|
||||
free(mem);
|
||||
slab->num_used--;
|
||||
slab->info.num_used--;
|
||||
}
|
||||
|
|
|
@ -183,8 +183,8 @@ static void get_free_packet_count(void)
|
|||
struct k_mem_slab *rx, *tx;
|
||||
|
||||
net_pkt_get_info(&rx, &tx, NULL, NULL);
|
||||
orig_rx_num_blocks = rx->num_blocks;
|
||||
orig_tx_num_blocks = tx->num_blocks;
|
||||
orig_rx_num_blocks = rx->info.num_blocks;
|
||||
orig_tx_num_blocks = tx->info.num_blocks;
|
||||
}
|
||||
|
||||
static void check_free_packet_count(void)
|
||||
|
@ -192,8 +192,8 @@ static void check_free_packet_count(void)
|
|||
struct k_mem_slab *rx, *tx;
|
||||
|
||||
net_pkt_get_info(&rx, &tx, NULL, NULL);
|
||||
zassert_equal(rx->num_blocks, orig_rx_num_blocks, "");
|
||||
zassert_equal(tx->num_blocks, orig_tx_num_blocks, "");
|
||||
zassert_equal(rx->info.num_blocks, orig_rx_num_blocks, "");
|
||||
zassert_equal(tx->info.num_blocks, orig_tx_num_blocks, "");
|
||||
}
|
||||
|
||||
static void test_iface_setup(void)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue