mem_slab: move global lock to per slab lock
This avoids contention between unrelated slabs and allows for userspace accessible slabs when located in memory partitions. Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
parent
2b32e47a9a
commit
2bed37e534
2 changed files with 9 additions and 8 deletions
|
@ -4724,6 +4724,7 @@ __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
|
|||
|
||||
struct k_mem_slab {
|
||||
_wait_q_t wait_q;
|
||||
struct k_spinlock lock;
|
||||
uint32_t num_blocks;
|
||||
size_t block_size;
|
||||
char *buffer;
|
||||
|
@ -4740,6 +4741,7 @@ struct k_mem_slab {
|
|||
#define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
|
||||
slab_num_blocks) \
|
||||
{ \
|
||||
.lock = {}, \
|
||||
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
|
||||
.num_blocks = slab_num_blocks, \
|
||||
.block_size = slab_block_size, \
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
#include <init.h>
|
||||
#include <sys/check.h>
|
||||
|
||||
static struct k_spinlock lock;
|
||||
|
||||
#ifdef CONFIG_OBJECT_TRACING
|
||||
struct k_mem_slab *_trace_list_k_mem_slab;
|
||||
#endif /* CONFIG_OBJECT_TRACING */
|
||||
|
@ -88,6 +86,7 @@ int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
|
|||
slab->block_size = block_size;
|
||||
slab->buffer = buffer;
|
||||
slab->num_used = 0U;
|
||||
slab->lock = (struct k_spinlock) {};
|
||||
|
||||
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
|
||||
slab->max_used = 0U;
|
||||
|
@ -108,7 +107,7 @@ out:
|
|||
|
||||
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
|
||||
{
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
k_spinlock_key_t key = k_spin_lock(&slab->lock);
|
||||
int result;
|
||||
|
||||
if (slab->free_list != NULL) {
|
||||
|
@ -128,21 +127,21 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
|
|||
result = -ENOMEM;
|
||||
} else {
|
||||
/* wait for a free block or timeout */
|
||||
result = z_pend_curr(&lock, key, &slab->wait_q, timeout);
|
||||
result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout);
|
||||
if (result == 0) {
|
||||
*mem = _current->base.swap_data;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
k_spin_unlock(&lock, key);
|
||||
k_spin_unlock(&slab->lock, key);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
|
||||
{
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
k_spinlock_key_t key = k_spin_lock(&slab->lock);
|
||||
|
||||
if (slab->free_list == NULL) {
|
||||
struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);
|
||||
|
@ -150,12 +149,12 @@ void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
|
|||
if (pending_thread != NULL) {
|
||||
z_thread_return_value_set_with_data(pending_thread, 0, *mem);
|
||||
z_ready_thread(pending_thread);
|
||||
z_reschedule(&lock, key);
|
||||
z_reschedule(&slab->lock, key);
|
||||
return;
|
||||
}
|
||||
}
|
||||
**(char ***) mem = slab->free_list;
|
||||
slab->free_list = *(char **) mem;
|
||||
slab->num_used--;
|
||||
k_spin_unlock(&lock, key);
|
||||
k_spin_unlock(&slab->lock, key);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue