From f9461d1ac43f14a8d38ed261b2b1159488ea9326 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 5 Feb 2021 13:20:20 -0500 Subject: [PATCH] mmu: fix ARM64 compilation by removing z_mapped_size usage The linker script defines `z_mapped_size` as follows: ``` z_mapped_size = z_mapped_end - z_mapped_start; ``` This is done with the belief that precomputed values at link time will make the code smaller and faster. On Aarch64, symbol values are relocated and loaded relative to the PC as those are normally meant to be memory addresses. Now if you have e.g. `CONFIG_SRAM_BASE_ADDRESS=0x2000000000` then `z_mapped_size` might still have a reasonable value, say 0x59334. But, when interpreted as an address, that's very very far from the PC whose value is in the neighborhood of 0x2000000000. That overflows the 4GB relocation range: ``` kernel/libkernel.a(mmu.c.obj): in function `z_mem_manage_init': kernel/mmu.c:527:(.text.z_mem_manage_init+0x1c): relocation truncated to fit: R_AARCH64_ADR_PREL_PG_HI21 ``` The solution is to define `Z_KERNEL_VIRT_SIZE` in terms of `z_mapped_end - z_mapped_start` at the source code level. Given this is used within loops that already start with `z_mapped_start` anyway, the compiler is smart enough to combine the two occurrences and dispense with a size counter, making the code effectively slightly better for all while avoiding the Aarch64 relocation overflow: ``` text data bss dec hex filename 1216 8 294936 296160 484e0 mmu.c.obj.arm64.before 1212 8 294936 296156 484dc mmu.c.obj.arm64.after 1110 8 9244 10362 287a mmu.c.obj.x86-64.before 1106 8 9244 10358 2876 mmu.c.obj.x86-64.after ``` Signed-off-by: Nicolas Pitre --- include/arch/arm/aarch64/scripts/linker.ld | 1 - include/linker/linker-defs.h | 1 - kernel/include/mmu.h | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/include/arch/arm/aarch64/scripts/linker.ld b/include/arch/arm/aarch64/scripts/linker.ld index ab4de8f6c84..9fb837867fc 100644 --- a/include/arch/arm/aarch64/scripts/linker.ld +++ b/include/arch/arm/aarch64/scripts/linker.ld @@ -297,7 +297,6 @@ SECTIONS _image_ram_end = .; _end = .; /* end of image */ z_mapped_end = .; - z_mapped_size = z_mapped_end - z_mapped_start; __kernel_ram_end = RAM_ADDR + RAM_SIZE; __kernel_ram_size = __kernel_ram_end - __kernel_ram_start; diff --git a/include/linker/linker-defs.h b/include/linker/linker-defs.h index 383fef15e84..be75c318d28 100644 --- a/include/linker/linker-defs.h +++ b/include/linker/linker-defs.h @@ -186,7 +186,6 @@ extern char __data_ram_end[]; /* Virtual addresses of page-aligned kernel image mapped into RAM at boot */ extern char z_mapped_start[]; extern char z_mapped_end[]; -extern char z_mapped_size[]; #endif /* CONFIG_MMU */ /* Includes text and rodata */ diff --git a/kernel/include/mmu.h b/kernel/include/mmu.h index 50981e3ecf3..5b62b9f1efc 100644 --- a/kernel/include/mmu.h +++ b/kernel/include/mmu.h @@ -38,7 +38,7 @@ /* Boot-time virtual location of the kernel image. */ #define Z_KERNEL_VIRT_START ((uint8_t *)(&z_mapped_start)) #define Z_KERNEL_VIRT_END ((uint8_t *)(&z_mapped_end)) -#define Z_KERNEL_VIRT_SIZE ((size_t)(&z_mapped_size)) +#define Z_KERNEL_VIRT_SIZE (Z_KERNEL_VIRT_END - Z_KERNEL_VIRT_START) #define Z_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \ CONFIG_SRAM_BASE_ADDRESS)