newlib: fix heap user mode access for MPU devices

MPU devices that enforce power-of-two alignment now
specify the size of the buffer used for the newlib heap.
This buffer will be properly aligned and a pointer
exposed in a kernel header, such that it can be added
to a user thread's memory domain configuration if
necessary.

MPU devices that don't have these restrictions allocate
the heap as normal.

In all cases, if an MPU/MMU region needs to be programmed,
the z_newlib_get_heap_bounds() API will return the necessary
information.

Given how precious MPU regions are, no automatic programming
of the MPU is done; applications will need to do this as
needed in their memory domain configurations.

On x86, the x86 MMU-specific code has been moved to arch/x86
using the new z_newlib_get_heap_bounds() API.

Fixes: #6814

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2018-05-09 16:36:44 -07:00 committed by Andrew Boie
commit 42a2c96422
4 changed files with 71 additions and 20 deletions

View file

@ -7,6 +7,8 @@
#include <kernel.h>
#include <mmustructs.h>
#include <linker/linker-defs.h>
#include <kernel_internal.h>
#include <init.h>
/* Common regions for all x86 processors.
* Peripheral I/O ranges configured at the SOC level
@ -301,9 +303,33 @@ void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
return;
}
u8_t _arch_mem_domain_max_partitions_get(void)
int _arch_mem_domain_max_partitions_get(void)
{
return CONFIG_MAX_DOMAIN_PARTITIONS;
}
#ifdef CONFIG_NEWLIB_LIBC
static int newlib_mmu_prepare(struct device *unused)
{
ARG_UNUSED(unused);
void *heap_base;
size_t heap_size;
z_newlib_get_heap_bounds(&heap_base, &heap_size);
/* Set up the newlib heap area as a globally user-writable region.
* We can't do this at build time with MMU_BOOT_REGION() as the _end
* pointer shifts significantly between build phases due to the
* introduction of page tables.
*/
_x86_mmu_set_flags(heap_base, heap_size,
MMU_ENTRY_PRESENT | MMU_ENTRY_WRITE |
MMU_ENTRY_USER,
MMU_PTE_P_MASK | MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
return 0;
}
SYS_INIT(newlib_mmu_prepare, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* CONFIG_NEWLIB_LIBC */
#endif /* CONFIG_X86_USERSPACE*/

View file

@ -178,6 +178,21 @@ extern void smp_init(void);
extern void smp_timer_init(void);
#ifdef CONFIG_NEWLIB_LIBC
/**
* @brief Fetch dimentions of newlib heap area for _sbrk()
*
* This memory region is used for heap allocations by the newlib C library.
* If user threads need to have access to this, the results returned can be
* used to program memory protection hardware appropriately.
*
* @param base Pointer to void pointer, filled in with the heap starting
* address
* @param size Pointer to a size_y, filled in with the maximum heap size
*/
extern void z_newlib_get_heap_bounds(void **base, size_t *size);
#endif
#ifdef __cplusplus
}
#endif

View file

@ -16,6 +16,22 @@ config NEWLIB_LIBC
Build with newlib library. The newlib library is expected to be
part of the SDK in this case.
config NEWLIB_LIBC_ALIGNED_HEAP_SIZE
int
prompt "Newlib aligned heap size"
depends on CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
depends on NEWLIB_LIBC
depends on USERSPACE
default 0
help
If user mode is enabled, and MPU hardware has requirements that
regions be sized to a power of two and aligned to their size,
and user mode threads need to access this heap, then this is necessary
to properly define an MPU region for the heap.
If this is left at 0, then remaining system RAM will be used for this
area and it may not be possible to program it as an MPU region.
config NEWLIB_LIBC_FLOAT_PRINTF
bool "Build with newlib float printf"
default n

View file

@ -10,10 +10,17 @@
#include <sys/stat.h>
#include <linker/linker-defs.h>
#include <misc/util.h>
#include <init.h>
#include <kernel_internal.h>
#define USED_RAM_END_ADDR POINTER_TO_UINT(&_end)
#if CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE
/* Compiler will throw an error if the provided value isn't a power of two */
static unsigned char __kernel __aligned(CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE)
heap_base[CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE];
#define MAX_HEAP_SIZE CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE
#else
#if CONFIG_X86
#define USED_RAM_SIZE (USED_RAM_END_ADDR - CONFIG_PHYS_RAM_ADDR)
#define MAX_HEAP_SIZE ((KB(CONFIG_RAM_SIZE)) - USED_RAM_SIZE)
@ -38,6 +45,8 @@ extern void *_heap_sentry;
#endif
static unsigned char *heap_base = UINT_TO_POINTER(USED_RAM_END_ADDR);
#endif /* CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE */
static unsigned int heap_sz;
static int _stdout_hook_default(int c)
@ -159,23 +168,8 @@ void *_sbrk(int count)
}
FUNC_ALIAS(_sbrk, sbrk, void *);
#ifdef CONFIG_X86_MMU
static int newlib_mmu_prepare(struct device *unused)
void z_newlib_get_heap_bounds(void **base, size_t *size)
{
ARG_UNUSED(unused);
/* Set up the newlib heap area as a globally user-writable region.
* We can't do this at build time with MMU_BOOT_REGION() as the _end
* pointer shifts significantly between build phases due to the
* introduction of page tables.
*/
_x86_mmu_set_flags(UINT_TO_POINTER(USED_RAM_END_ADDR), MAX_HEAP_SIZE,
MMU_ENTRY_PRESENT | MMU_ENTRY_WRITE |
MMU_ENTRY_USER,
MMU_PTE_P_MASK | MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
return 0;
*base = heap_base;
*size = MAX_HEAP_SIZE;
}
SYS_INIT(newlib_mmu_prepare, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* CONFIG_X86_MMU */