x86: don't automatically configure newlib malloc
This diverges from policy for all of our other arches and C libraries. Global access to the malloc arena may not be desirable. Forthcoming patch will expose, for all C libraries, a k_mem_partition with the malloc arena which can be added to domains as desired. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
686bd913c3
commit
71a3b53504
1 changed files with 0 additions and 25 deletions
|
@ -292,29 +292,4 @@ int _arch_mem_domain_max_partitions_get(void)
|
|||
{
|
||||
return CONFIG_MAX_DOMAIN_PARTITIONS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NEWLIB_LIBC
|
||||
static int newlib_mmu_prepare(struct device *unused)
|
||||
{
|
||||
ARG_UNUSED(unused);
|
||||
void *heap_base;
|
||||
size_t heap_size;
|
||||
|
||||
z_newlib_get_heap_bounds(&heap_base, &heap_size);
|
||||
|
||||
/* Set up the newlib heap area as a globally user-writable region.
|
||||
* We can't do this at build time with MMU_BOOT_REGION() as the _end
|
||||
* pointer shifts significantly between build phases due to the
|
||||
* introduction of page tables.
|
||||
*/
|
||||
_x86_mmu_set_flags(heap_base, heap_size,
|
||||
MMU_ENTRY_PRESENT | MMU_ENTRY_WRITE |
|
||||
MMU_ENTRY_USER,
|
||||
MMU_PTE_P_MASK | MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(newlib_mmu_prepare, APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
||||
#endif /* CONFIG_NEWLIB_LIBC */
|
||||
#endif /* CONFIG_X86_USERSPACE*/
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue