Revert "mmu: ensure gperf data is mapped"
This reverts commit e9bfd64110
.
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
0f24e09bcf
commit
d887e078f9
5 changed files with 1 additions and 31 deletions
|
@ -5,7 +5,6 @@
|
|||
*/
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
z_kobject_data_begin = .;
|
||||
/* Constraints:
|
||||
*
|
||||
* - changes to the size of this section between build phases
|
||||
|
|
|
@ -303,7 +303,6 @@ extern char z_priv_stacks_ram_start[];
|
|||
extern char z_priv_stacks_ram_end[];
|
||||
extern char z_user_stacks_start[];
|
||||
extern char z_user_stacks_end[];
|
||||
extern char z_kobject_data_begin[];
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#ifdef CONFIG_THREAD_LOCAL_STORAGE
|
||||
|
|
|
@ -197,9 +197,6 @@ void z_thread_mark_switched_out(void);
|
|||
*/
|
||||
void z_mem_manage_init(void);
|
||||
|
||||
/* Workaround for build-time page table mapping of the kernel */
|
||||
void z_kernel_map_fixup(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -391,9 +391,7 @@ FUNC_NORETURN void z_cstart(void)
|
|||
|
||||
z_dummy_thread_init(&dummy_thread);
|
||||
#endif
|
||||
#if defined(CONFIG_MMU) && defined(CONFIG_USERSPACE)
|
||||
z_kernel_map_fixup();
|
||||
#endif
|
||||
|
||||
/* perform basic hardware initialization */
|
||||
z_sys_init_run_level(_SYS_INIT_LEVEL_PRE_KERNEL_1);
|
||||
z_sys_init_run_level(_SYS_INIT_LEVEL_PRE_KERNEL_2);
|
||||
|
|
23
kernel/mmu.c
23
kernel/mmu.c
|
@ -453,29 +453,6 @@ size_t k_mem_region_align(uintptr_t *aligned_phys, size_t *aligned_size,
|
|||
*/
|
||||
#define BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)virt) + VM_OFFSET))
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
void z_kernel_map_fixup(void)
|
||||
{
|
||||
/* XXX: Gperf kernel object data created at build time will not have
|
||||
* visibility in zephyr_prebuilt.elf. There is a possibility that this
|
||||
* data would not be memory-mapped if it shifts z_mapped_end between
|
||||
* builds. Ensure this area is mapped.
|
||||
*
|
||||
* A third build phase for page tables would solve this.
|
||||
*/
|
||||
uint8_t *kobject_page_begin =
|
||||
(uint8_t *)ROUND_DOWN((uintptr_t)&z_kobject_data_begin,
|
||||
CONFIG_MMU_PAGE_SIZE);
|
||||
size_t kobject_size = (size_t)(Z_KERNEL_VIRT_END - kobject_page_begin);
|
||||
|
||||
if (kobject_size != 0) {
|
||||
arch_mem_map(kobject_page_begin,
|
||||
BOOT_VIRT_TO_PHYS(kobject_page_begin),
|
||||
kobject_size, K_MEM_PERM_RW | K_MEM_CACHE_WB);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
void z_mem_manage_init(void)
|
||||
{
|
||||
uintptr_t phys;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue