kernel: Fix possible overflow in k_mem_map
k_mem_map additionally allocates two guard pages that are not mapped. These pages are not being accounted when checking the provided size and when they are added an overflow can happen and the mapped memory is not correct. Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
parent
37a1d28a36
commit
e2f3840380
1 changed files with 7 additions and 0 deletions
|
@ -16,6 +16,8 @@
|
|||
#include <zephyr/toolchain.h>
|
||||
#include <zephyr/linker/linker-defs.h>
|
||||
#include <zephyr/sys/bitarray.h>
|
||||
#include <zephyr/sys/check.h>
|
||||
#include <zephyr/sys/math_extras.h>
|
||||
#include <zephyr/timing/timing.h>
|
||||
#include <zephyr/logging/log.h>
|
||||
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
||||
|
@ -575,6 +577,11 @@ void *k_mem_map(size_t size, uint32_t flags)
|
|||
__ASSERT((flags & K_MEM_CACHE_MASK) == 0U,
|
||||
"%s does not support explicit cache settings", __func__);
|
||||
|
||||
CHECKIF(size_add_overflow(size, CONFIG_MMU_PAGE_SIZE * 2, &total_size)) {
|
||||
LOG_ERR("too large size %zu passed to %s", size, __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
key = k_spin_lock(&z_mm_lock);
|
||||
|
||||
/* Need extra for the guard pages (before and after) which we
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue