diff --git a/include/linker/kobject.ld b/include/linker/kobject.ld index d5499efc6e4..c02c10719b6 100644 --- a/include/linker/kobject.ld +++ b/include/linker/kobject.ld @@ -5,6 +5,7 @@ */ #ifdef CONFIG_USERSPACE + z_kobject_data_begin = .; /* Constraints: * * - changes to the size of this section between build phases diff --git a/include/linker/linker-defs.h b/include/linker/linker-defs.h index 48d3cc80e38..383fef15e84 100644 --- a/include/linker/linker-defs.h +++ b/include/linker/linker-defs.h @@ -303,6 +303,7 @@ extern char z_priv_stacks_ram_start[]; extern char z_priv_stacks_ram_end[]; extern char z_user_stacks_start[]; extern char z_user_stacks_end[]; +extern char z_kobject_data_begin[]; #endif /* CONFIG_USERSPACE */ #ifdef CONFIG_THREAD_LOCAL_STORAGE diff --git a/kernel/include/kernel_internal.h b/kernel/include/kernel_internal.h index 3e0c4f2a455..702d5c512cd 100644 --- a/kernel/include/kernel_internal.h +++ b/kernel/include/kernel_internal.h @@ -197,6 +197,9 @@ void z_thread_mark_switched_out(void); */ void z_mem_manage_init(void); +/* Workaround for build-time page table mapping of the kernel */ +void z_kernel_map_fixup(void); + #ifdef __cplusplus } #endif diff --git a/kernel/init.c b/kernel/init.c index 7d1ce4a9953..b12db355d86 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -391,7 +391,9 @@ FUNC_NORETURN void z_cstart(void) z_dummy_thread_init(&dummy_thread); #endif - +#if defined(CONFIG_MMU) && defined(CONFIG_USERSPACE) + z_kernel_map_fixup(); +#endif /* perform basic hardware initialization */ z_sys_init_run_level(_SYS_INIT_LEVEL_PRE_KERNEL_1); z_sys_init_run_level(_SYS_INIT_LEVEL_PRE_KERNEL_2); diff --git a/kernel/mmu.c b/kernel/mmu.c index 15d984cade0..9cead314582 100644 --- a/kernel/mmu.c +++ b/kernel/mmu.c @@ -453,6 +453,29 @@ size_t k_mem_region_align(uintptr_t *aligned_phys, size_t *aligned_size, */ #define BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)virt) + VM_OFFSET)) +#ifdef CONFIG_USERSPACE +void z_kernel_map_fixup(void) +{ + /* XXX: Gperf kernel object data created at build time will not have + * visibility in zephyr_prebuilt.elf. There is a possibility that this + * data would not be memory-mapped if it shifts z_mapped_end between + * builds. Ensure this area is mapped. + * + * A third build phase for page tables would solve this. + */ + uint8_t *kobject_page_begin = + (uint8_t *)ROUND_DOWN((uintptr_t)&z_kobject_data_begin, + CONFIG_MMU_PAGE_SIZE); + size_t kobject_size = (size_t)(Z_KERNEL_VIRT_END - kobject_page_begin); + + if (kobject_size != 0) { + arch_mem_map(kobject_page_begin, + BOOT_VIRT_TO_PHYS(kobject_page_begin), + kobject_size, K_MEM_PERM_RW | K_MEM_CACHE_WB); + } +} +#endif /* CONFIG_USERSPACE */ + void z_mem_manage_init(void) { uintptr_t phys;