libc/common: Don't use malloc mutex when CONFIG_MULTITHREADING=n
When multithreading is disabled, the whole mutex infrastructure isn't available. The common malloc code wasn't checking for this case which caused build failures. Signed-off-by: Keith Packard <keithp@keithp.com> Tested-by: Detlev Zundel dzu@member.fsf.org
This commit is contained in:
parent
a833d20ffb
commit
ae92df1e4a
1 changed files with 28 additions and 18 deletions
|
@ -11,7 +11,9 @@
|
|||
#include <zephyr/sys/math_extras.h>
|
||||
#include <string.h>
|
||||
#include <zephyr/app_memory/app_memdomain.h>
|
||||
#ifdef CONFIG_MULTITHREADING
|
||||
#include <zephyr/sys/mutex.h>
|
||||
#endif
|
||||
#include <zephyr/sys/sys_heap.h>
|
||||
#include <zephyr/sys/libc-hooks.h>
|
||||
#include <zephyr/types.h>
|
||||
|
@ -113,14 +115,31 @@ extern char _heap_sentry[];
|
|||
# endif /* else ALLOCATE_HEAP_AT_STARTUP */
|
||||
|
||||
POOL_SECTION static struct sys_heap z_malloc_heap;
|
||||
|
||||
#ifdef CONFIG_MULTITHREADING
|
||||
MALLOC_SECTION SYS_MUTEX_DEFINE(z_malloc_heap_mutex);
|
||||
|
||||
void *malloc(size_t size)
|
||||
{
|
||||
static inline void
|
||||
malloc_lock(void) {
|
||||
int lock_ret;
|
||||
|
||||
lock_ret = sys_mutex_lock(&z_malloc_heap_mutex, K_FOREVER);
|
||||
__ASSERT_NO_MSG(lock_ret == 0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
malloc_unlock(void)
|
||||
{
|
||||
(void) sys_mutex_unlock(&z_malloc_heap_mutex);
|
||||
}
|
||||
#else
|
||||
#define malloc_lock()
|
||||
#define malloc_unlock()
|
||||
#endif
|
||||
|
||||
void *malloc(size_t size)
|
||||
{
|
||||
malloc_lock();
|
||||
|
||||
void *ret = sys_heap_aligned_alloc(&z_malloc_heap,
|
||||
__alignof__(z_max_align_t),
|
||||
|
@ -129,17 +148,14 @@ void *malloc(size_t size)
|
|||
errno = ENOMEM;
|
||||
}
|
||||
|
||||
(void) sys_mutex_unlock(&z_malloc_heap_mutex);
|
||||
malloc_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *aligned_alloc(size_t alignment, size_t size)
|
||||
{
|
||||
int lock_ret;
|
||||
|
||||
lock_ret = sys_mutex_lock(&z_malloc_heap_mutex, K_FOREVER);
|
||||
__ASSERT_NO_MSG(lock_ret == 0);
|
||||
malloc_lock();
|
||||
|
||||
void *ret = sys_heap_aligned_alloc(&z_malloc_heap,
|
||||
alignment,
|
||||
|
@ -148,7 +164,7 @@ void *aligned_alloc(size_t alignment, size_t size)
|
|||
errno = ENOMEM;
|
||||
}
|
||||
|
||||
(void) sys_mutex_unlock(&z_malloc_heap_mutex);
|
||||
malloc_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -222,10 +238,7 @@ static int malloc_prepare(void)
|
|||
|
||||
void *realloc(void *ptr, size_t requested_size)
|
||||
{
|
||||
int lock_ret;
|
||||
|
||||
lock_ret = sys_mutex_lock(&z_malloc_heap_mutex, K_FOREVER);
|
||||
__ASSERT_NO_MSG(lock_ret == 0);
|
||||
malloc_lock();
|
||||
|
||||
void *ret = sys_heap_aligned_realloc(&z_malloc_heap, ptr,
|
||||
__alignof__(z_max_align_t),
|
||||
|
@ -235,19 +248,16 @@ void *realloc(void *ptr, size_t requested_size)
|
|||
errno = ENOMEM;
|
||||
}
|
||||
|
||||
(void) sys_mutex_unlock(&z_malloc_heap_mutex);
|
||||
malloc_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void free(void *ptr)
|
||||
{
|
||||
int lock_ret;
|
||||
|
||||
lock_ret = sys_mutex_lock(&z_malloc_heap_mutex, K_FOREVER);
|
||||
__ASSERT_NO_MSG(lock_ret == 0);
|
||||
malloc_lock();
|
||||
sys_heap_free(&z_malloc_heap, ptr);
|
||||
(void) sys_mutex_unlock(&z_malloc_heap_mutex);
|
||||
malloc_unlock();
|
||||
}
|
||||
|
||||
SYS_INIT(malloc_prepare, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue