Revert "x86: linker: define z_mapped_* symbols"

This reverts commit 891776ec2a.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2021-01-22 07:37:44 -05:00
commit 25d589d46d
2 changed files with 12 additions and 31 deletions

View file

@ -51,20 +51,14 @@
#define RAMABLE_REGION RAM
#endif
#ifdef CONFIG_MMU
#define MMU_PAGE_ALIGN . = ALIGN(CONFIG_MMU_PAGE_SIZE);
#else
#define MMU_PAGE_ALIGN
#endif
/* Used to align areas with separate memory permission characteristics
* so that the page permissions can be set in the MMU. Without this,
* the kernel is just one blob with the same RWX permissions on all RAM
*/
#ifdef CONFIG_SRAM_REGION_PERMISSIONS
#define MMU_PAGE_ALIGN_PERM MMU_PAGE_ALIGN
#define MMU_PAGE_ALIGN . = ALIGN(CONFIG_MMU_PAGE_SIZE);
#else
#define MMU_PAGE_ALIGN_PERM
#define MMU_PAGE_ALIGN
#endif
ENTRY(CONFIG_KERNEL_ENTRY)
@ -96,9 +90,6 @@ SECTIONS
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
_image_text_start = .;
#ifndef CONFIG_XIP
z_mapped_start = .;
#endif
/* Located in generated directory. This file is populated by calling
* zephyr_linker_sources(ROM_START ...). This typically contains the vector
@ -121,7 +112,7 @@ SECTIONS
#include <linker/kobject-text.ld>
MMU_PAGE_ALIGN_PERM
MMU_PAGE_ALIGN
} GROUP_LINK_IN(ROMABLE_REGION)
_image_text_end = .;
@ -165,7 +156,7 @@ SECTIONS
#include <linker/cplusplus-rom.ld>
MMU_PAGE_ALIGN_PERM
MMU_PAGE_ALIGN
/* ROM ends here, position counter will now be in RAM areas */
#ifdef CONFIG_XIP
_image_rom_end = .;
@ -186,10 +177,6 @@ SECTIONS
/* RAMABLE_REGION */
GROUP_START(RAMABLE_REGION)
#ifdef CONFIG_XIP
MMU_PAGE_ALIGN
z_mapped_start = .;
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
@ -197,8 +184,8 @@ SECTIONS
#ifdef CONFIG_USERSPACE
/* APP SHARED MEMORY REGION */
#define SMEM_PARTITION_ALIGN(size) MMU_PAGE_ALIGN_PERM
#define APP_SHARED_ALIGN MMU_PAGE_ALIGN_PERM
#define SMEM_PARTITION_ALIGN(size) MMU_PAGE_ALIGN
#define APP_SHARED_ALIGN MMU_PAGE_ALIGN
#include <app_smem.ld>
@ -211,7 +198,7 @@ SECTIONS
SECTION_PROLOGUE(_BSS_SECTION_NAME, (NOLOAD),)
{
MMU_PAGE_ALIGN_PERM
MMU_PAGE_ALIGN
#if !defined(CONFIG_USERSPACE)
_image_ram_start = .;
#endif
@ -240,7 +227,7 @@ SECTIONS
#include <linker/common-noinit.ld>
MMU_PAGE_ALIGN_PERM
MMU_PAGE_ALIGN
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
@ -274,7 +261,7 @@ SECTIONS
#include <snippets-rwdata.ld>
#ifdef CONFIG_X86_KPTI
MMU_PAGE_ALIGN_PERM
MMU_PAGE_ALIGN
z_shared_kernel_page_start = .;
/* Special page containing supervisor data that is still mapped in
* user mode page tables. IDT, GDT, TSSes, trampoline stack, and
@ -315,7 +302,7 @@ SECTIONS
#ifdef CONFIG_X86_KPTI
z_trampoline_stack_start = .;
MMU_PAGE_ALIGN_PERM
MMU_PAGE_ALIGN
z_trampoline_stack_end = .;
z_shared_kernel_page_end = .;
@ -347,8 +334,6 @@ SECTIONS
_image_ram_end = .;
_image_ram_all = (KERNEL_BASE_ADDR + KERNEL_RAM_SIZE) - _image_ram_start;
z_mapped_end = .;
z_mapped_size = z_mapped_end - z_mapped_start;
_end = .; /* end of image */
GROUP_END(RAMABLE_REGION)

View file

@ -70,7 +70,7 @@ SECTIONS
"shared kernel area is not one memory page");
#endif /* CONFIG_X86_KPTI */
. = ALIGN(CONFIG_MMU_PAGE_SIZE);
MMU_PAGE_ALIGN
_lodata_end = .;
} > LOCORE
@ -87,7 +87,6 @@ SECTIONS
{
_image_rom_start = .;
_image_text_start = .;
z_mapped_start = .;
*(.text)
*(.text.*)
@ -180,17 +179,14 @@ SECTIONS
/* Must be last in RAM */
#include <linker/kobject.ld>
. = ALIGN(CONFIG_MMU_PAGE_SIZE);
_image_ram_end = .;
z_mapped_end = .;
_end = .;
/* All unused memory also owned by the kernel for heaps */
__kernel_ram_end = KERNEL_BASE_ADDR + KERNEL_RAM_SIZE;
__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
z_mapped_size = z_mapped_end - z_mapped_start;
#include <linker/debug-sections.ld>
/DISCARD/ :