zephyr/include/arch/x86/linker.ld

394 lines
9.8 KiB
Text
Raw Normal View History

/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Common linker sections
*
* This script defines the memory location of the various sections that make up
* a Zephyr Kernel image. This file is used by the linker.
*
* This script places the various sections of the image according to what
* features are enabled by the kernel's configuration options.
*
* For a build that does not use the execute in place (XIP) feature, the script
* generates an image suitable for loading into and executing from RAMABLE_REGION by
* placing all the sections adjacent to each other. There is also no separate
* load address for the DATA section which means it doesn't have to be copied
* into RAMABLE_REGION.
*
* For builds using XIP, there is a different load memory address (LMA) and
* virtual memory address (VMA) for the DATA section. In this case the DATA
* section is copied into RAMABLE_REGION at runtime.
*
* When building an XIP image the data section is placed into ROMABLE_REGION. In this
* case, the LMA is set to __data_rom_start so the data section is concatenated
* at the end of the RODATA section. At runtime, the DATA section is copied
* into the RAMABLE_REGION region so it can be accessed with read and write permission.
*
* Most symbols defined in the sections below are subject to be referenced in
* the Zephyr Kernel image. If a symbol is used but not defined the linker will
* emit an undefined symbol error.
*
* Please do not change the order of the section as the kernel expects this
* order when programming the MMU.
*/
#define _LINKER
x86: declare internal API for interrupt controllers Originally, x86 just supported APIC. Then later support for the Mint Valley Interrupt Controller was added. This controller is mostly similar to the APIC with some differences, but was integrated in a somewhat hacked-up fashion. Now we define irq_controller.h, which is a layer of abstraction between the core arch code and the interrupt controller implementation. Contents of the API: - Controllers with a fixed irq-to-vector mapping define _IRQ_CONTROLLER_VECTOR_MAPPING(irq) to obtain a compile-time map between the two. - _irq_controller_program() notifies the interrupt controller what vector will be used for a particular IRQ along with triggering flags - _irq_controller_isr_vector_get() reports the vector number of the IRQ currently being serviced - In assembly language domain, _irq_controller_eoi implements EOI handling. - Since triggering options can vary, some common defines for triggering IRQ_TRIGGER_EDGE, IRQ_TRIGGER_LEVEL, IRQ_POLARITY_HIGH, IRQ_POLARITY_LOW introduced. Specific changes made: - New Kconfig X86_FIXED_IRQ_MAPPING for those interrupt controllers that have a fixed relationship between IRQ lines and IDT vectors. - MVIC driver rewritten per the HAS instead of the tortuous methods used to get it to behave like LOAPIC. We are no longer writing values to reserved registers. Additional assertions added. - Some cleanup in the loapic_timer driver to make the MVIC differences clearer. - Unused APIs removed, or folded into calling code when used just once. - MVIC doesn't bother to write a -1 to the intList priority field since it gets ignored anyway Issue: ZEP-48 Change-Id: I071a477ea68c36e00c3d0653ce74b3583454154d Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2016-08-02 12:05:08 -07:00
#define _ASMLANGUAGE
#include <linker/linker-defs.h>
#include <offsets.h>
#include <misc/util.h>
#include <linker/linker-tool.h>
#ifdef CONFIG_XIP
#define ROMABLE_REGION ROM
#define RAMABLE_REGION RAM
#else
#define ROMABLE_REGION RAM
#define RAMABLE_REGION RAM
#endif
#ifdef CONFIG_X86_MMU
#define MMU_PAGE_SIZE KB(4)
#define MMU_PAGE_ALIGN . = ALIGN(MMU_PAGE_SIZE);
#else
#define MMU_PAGE_ALIGN
#endif
ENTRY(CONFIG_KERNEL_ENTRY)
/* SECTIONS definitions */
SECTIONS
{
GROUP_START(ROMABLE_REGION)
#ifdef CONFIG_REALMODE
x86: Jailhouse port, tested for UART (# 0, polling) and LOAPIC timer This is an introductory port for Zephyr to be run as a Jailhouse hypervisor[1]'s "inmate cell", on x86 64-bit CPUs (running on 32-bit mode). This was tested with their "tiny-demo" inmate demo cell configuration, which takes one of the CPUs of the QEMU-VM root cell config, along with some RAM and serial controller access (it will even do nice things like reserving some L3 cache for it via Intel CAT) and Zephyr samples: - hello_world - philosophers - synchronization The final binary receives an additional boot sequence preamble that conforms to Jailhouse's expectations (starts at 0x0 in real mode). It will put the processor in 32-bit protected mode and then proceed to Zephyr's __start function. Testing it is just a matter of: $ mmake -C samples/<sample_dir> BOARD=x86_jailhouse JAILHOUSE_QEMU_IMG_FILE=<path_to_image.qcow2> run $ sudo insmod <path to jailhouse.ko> $ sudo jailhouse enable <path to configs/qemu-x86.cell> $ sudo jailhouse cell create <path to configs/tiny-demo.cell> $ sudo mount -t 9p -o trans/virtio host /mnt $ sudo jailhouse cell load tiny-demo /mnt/zephyr.bin $ sudo jailhouse cell start tiny-demo $ sudo jailhouse cell destroy tiny-demo $ sudo jailhouse disable $ sudo rmmod jailhouse For the hello_world demo case, one should then get QEMU's serial port output similar to: """ Created cell "tiny-demo" Page pool usage after cell creation: mem 275/1480, remap 65607/131072 Cell "tiny-demo" can be loaded CPU 3 received SIPI, vector 100 Started cell "tiny-demo" ***** BOOTING ZEPHYR OS v1.9.0 - BUILD: Sep 12 2017 20:03:22 ***** Hello World! x86 """ Note that the Jailhouse's root cell *has to be started in xAPIC mode* (kernel command line argument 'nox2apic') in order for this to work. x2APIC support and its reasoning will come on a separate commit. As a reminder, the make run target introduced for x86_jailhouse board involves a root cell image with Jailhouse in it, to be launched and then partitioned (with >= 2 64-bit CPUs in it). Inmate cell configs with no JAILHOUSE_CELL_PASSIVE_COMMREG flag set (e.g. apic-demo one) would need extra code in Zephyr to deal with cell shutdown command responses from the hypervisor. You may want to fine tune CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC for your specific CPU—there is no detection from Zephyr with regard to that. Other config differences from pristine QEMU defaults worth of mention are: - there is no HPET when running as Jailhouse guest. We use the LOAPIC timer, instead - there is no PIC_DISABLE, because there is no 8259A PIC when running as a Jailhouse guest - XIP makes no sense also when running as Jailhouse guest, and both PHYS_RAM_ADDR/PHYS_LOAD_ADD are set to zero, what tiny-demo cell config is set to This opens up new possibilities for Zephyr, so that usages beyond just MCUs come to the table. I see special demand coming from functional-safety related use cases on industry, automotive, etc. [1] https://github.com/siemens/jailhouse Reference to Jailhouse's booting preamble code: Origin: Jailhouse License: BSD 2-Clause URL: https://github.com/siemens/jailhouse commit: 607251b44397666a3cbbf859d784dccf20aba016 Purpose: Dual-licensing of inmate lib code Maintained-by: Zephyr Signed-off-by: Gustavo Lima Chaves <gustavo.lima.chaves@intel.com>
2017-10-11 14:13:00 -07:00
/* 16-bit sections */
. = PHYS_RAM_ADDR;
SECTION_PROLOGUE(boot, (OPTIONAL),)
{
*(.boot)
. = ALIGN(16);
} GROUP_LINK_IN(ROMABLE_REGION)
#endif
. = ALIGN(8);
_image_rom_start = PHYS_LOAD_ADDR;
#ifndef CONFIG_REALMODE
_image_text_start = PHYS_LOAD_ADDR;
x86: Jailhouse port, tested for UART (# 0, polling) and LOAPIC timer This is an introductory port for Zephyr to be run as a Jailhouse hypervisor[1]'s "inmate cell", on x86 64-bit CPUs (running on 32-bit mode). This was tested with their "tiny-demo" inmate demo cell configuration, which takes one of the CPUs of the QEMU-VM root cell config, along with some RAM and serial controller access (it will even do nice things like reserving some L3 cache for it via Intel CAT) and Zephyr samples: - hello_world - philosophers - synchronization The final binary receives an additional boot sequence preamble that conforms to Jailhouse's expectations (starts at 0x0 in real mode). It will put the processor in 32-bit protected mode and then proceed to Zephyr's __start function. Testing it is just a matter of: $ mmake -C samples/<sample_dir> BOARD=x86_jailhouse JAILHOUSE_QEMU_IMG_FILE=<path_to_image.qcow2> run $ sudo insmod <path to jailhouse.ko> $ sudo jailhouse enable <path to configs/qemu-x86.cell> $ sudo jailhouse cell create <path to configs/tiny-demo.cell> $ sudo mount -t 9p -o trans/virtio host /mnt $ sudo jailhouse cell load tiny-demo /mnt/zephyr.bin $ sudo jailhouse cell start tiny-demo $ sudo jailhouse cell destroy tiny-demo $ sudo jailhouse disable $ sudo rmmod jailhouse For the hello_world demo case, one should then get QEMU's serial port output similar to: """ Created cell "tiny-demo" Page pool usage after cell creation: mem 275/1480, remap 65607/131072 Cell "tiny-demo" can be loaded CPU 3 received SIPI, vector 100 Started cell "tiny-demo" ***** BOOTING ZEPHYR OS v1.9.0 - BUILD: Sep 12 2017 20:03:22 ***** Hello World! x86 """ Note that the Jailhouse's root cell *has to be started in xAPIC mode* (kernel command line argument 'nox2apic') in order for this to work. x2APIC support and its reasoning will come on a separate commit. As a reminder, the make run target introduced for x86_jailhouse board involves a root cell image with Jailhouse in it, to be launched and then partitioned (with >= 2 64-bit CPUs in it). Inmate cell configs with no JAILHOUSE_CELL_PASSIVE_COMMREG flag set (e.g. apic-demo one) would need extra code in Zephyr to deal with cell shutdown command responses from the hypervisor. You may want to fine tune CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC for your specific CPU—there is no detection from Zephyr with regard to that. Other config differences from pristine QEMU defaults worth of mention are: - there is no HPET when running as Jailhouse guest. We use the LOAPIC timer, instead - there is no PIC_DISABLE, because there is no 8259A PIC when running as a Jailhouse guest - XIP makes no sense also when running as Jailhouse guest, and both PHYS_RAM_ADDR/PHYS_LOAD_ADD are set to zero, what tiny-demo cell config is set to This opens up new possibilities for Zephyr, so that usages beyond just MCUs come to the table. I see special demand coming from functional-safety related use cases on industry, automotive, etc. [1] https://github.com/siemens/jailhouse Reference to Jailhouse's booting preamble code: Origin: Jailhouse License: BSD 2-Clause URL: https://github.com/siemens/jailhouse commit: 607251b44397666a3cbbf859d784dccf20aba016 Purpose: Dual-licensing of inmate lib code Maintained-by: Zephyr Signed-off-by: Gustavo Lima Chaves <gustavo.lima.chaves@intel.com>
2017-10-11 14:13:00 -07:00
#else
_image_text_start = .;
#endif
SECTION_PROLOGUE(_TEXT_SECTION_NAME, (OPTIONAL),)
{
. = CONFIG_TEXT_SECTION_OFFSET;
*(.text_start)
*(".text_start.*")
*(.text)
*(".text.*")
*(.gnu.linkonce.t.*)
*(.eh_frame)
*(.init)
*(.fini)
*(.eini)
KEEP(*(.openocd_dbg))
KEEP(*(".openocd_dbg.*"))
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
#include <linker/kobject-text.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
_image_text_end = .;
_image_rodata_start = .;
#include <linker/common-rom.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME, (OPTIONAL),)
{
*(.rodata)
*(".rodata.*")
*(.gnu.linkonce.r.*)
. = ALIGN(8);
_idt_base_address = .;
#ifdef LINKER_PASS2
KEEP(*(staticIdt))
#else
. += CONFIG_IDT_NUM_VECTORS * 8;
#endif
x86: declare internal API for interrupt controllers Originally, x86 just supported APIC. Then later support for the Mint Valley Interrupt Controller was added. This controller is mostly similar to the APIC with some differences, but was integrated in a somewhat hacked-up fashion. Now we define irq_controller.h, which is a layer of abstraction between the core arch code and the interrupt controller implementation. Contents of the API: - Controllers with a fixed irq-to-vector mapping define _IRQ_CONTROLLER_VECTOR_MAPPING(irq) to obtain a compile-time map between the two. - _irq_controller_program() notifies the interrupt controller what vector will be used for a particular IRQ along with triggering flags - _irq_controller_isr_vector_get() reports the vector number of the IRQ currently being serviced - In assembly language domain, _irq_controller_eoi implements EOI handling. - Since triggering options can vary, some common defines for triggering IRQ_TRIGGER_EDGE, IRQ_TRIGGER_LEVEL, IRQ_POLARITY_HIGH, IRQ_POLARITY_LOW introduced. Specific changes made: - New Kconfig X86_FIXED_IRQ_MAPPING for those interrupt controllers that have a fixed relationship between IRQ lines and IDT vectors. - MVIC driver rewritten per the HAS instead of the tortuous methods used to get it to behave like LOAPIC. We are no longer writing values to reserved registers. Additional assertions added. - Some cleanup in the loapic_timer driver to make the MVIC differences clearer. - Unused APIs removed, or folded into calling code when used just once. - MVIC doesn't bother to write a -1 to the intList priority field since it gets ignored anyway Issue: ZEP-48 Change-Id: I071a477ea68c36e00c3d0653ce74b3583454154d Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2016-08-02 12:05:08 -07:00
#ifndef CONFIG_X86_FIXED_IRQ_MAPPING
. = ALIGN(4);
_irq_to_interrupt_vector = .;
#ifdef LINKER_PASS2
KEEP(*(irq_int_vector_map))
#else
. += CONFIG_MAX_IRQ_LINES;
#endif
#endif
#ifdef CONFIG_CUSTOM_RODATA_LD
/* Located in project source directory */
#include <custom-rodata.ld>
#endif
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
#include <linker/kobject-rom.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
_image_rodata_end = .;
MMU_PAGE_ALIGN
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
#ifdef CONFIG_XIP
/* Kernel ROM extends to the end of flash. Need to do this to program
* the MMU
*/
_image_rom_end = _image_rom_start + KB(CONFIG_ROM_SIZE);
#else
/* ROM ends here, position counter will now be in RAM areas */
_image_rom_end = .;
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
#endif
_image_rom_size = _image_rom_end - _image_rom_start;
GROUP_END(ROMABLE_REGION)
/* RAMABLE_REGION */
GROUP_START(RAMABLE_REGION)
userspace: compartmentalized app memory organization Summary: revised attempt at addressing issue 6290. The following provides an alternative to using CONFIG_APPLICATION_MEMORY by compartmentalizing data into Memory Domains. Dependent on MPU limitations, supports compartmentalized Memory Domains for 1...N logical applications. This is considered an initial attempt at designing flexible compartmentalized Memory Domains for multiple logical applications and, with the provided python script and edited CMakeLists.txt, provides support for power of 2 aligned MPU architectures. Overview: The current patch uses qualifiers to group data into subsections. The qualifier usage allows for dynamic subsection creation and affords the developer a large amount of flexibility in the grouping, naming, and size of the resulting partitions and domains that are built on these subsections. By additional macro calls, functions are created that help calculate the size, address, and permissions for the subsections and enable the developer to control application data in specified partitions and memory domains. Background: Initial attempts focused on creating a single section in the linker script that then contained internally grouped variables/data to allow MPU/MMU alignment and protection. This did not provide additional functionality beyond CONFIG_APPLICATION_MEMORY as we were unable to reliably group data or determine their grouping via exported linker symbols. Thus, the resulting decision was made to dynamically create subsections using the current qualifier method. An attempt to group the data by object file was tested, but found that this broke applications such as ztest where two object files are created: ztest and main. This also creates an issue of grouping the two object files together in the same memory domain while also allowing for compartmenting other data among threads. Because it is not possible to know a) the name of the partition and thus the symbol in the linker, b) the size of all the data in the subsection, nor c) the overall number of partitions created by the developer, it was not feasible to align the subsections at compile time without using dynamically generated linker script for MPU architectures requiring power of 2 alignment. In order to provide support for MPU architectures that require a power of 2 alignment, a python script is run at build prior to when linker_priv_stacks.cmd is generated. This script scans the built object files for all possible partitions and the names given to them. It then generates a linker file (app_smem.ld) that is included in the main linker.ld file. This app_smem.ld allows the compiler and linker to then create each subsection and align to the next power of 2. Usage: - Requires: app_memory/app_memdomain.h . - _app_dmem(id) marks a variable to be placed into a data section for memory partition id. - _app_bmem(id) marks a variable to be placed into a bss section for memory partition id. - These are seen in the linker.map as "data_smem_id" and "data_smem_idb". - To create a k_mem_partition, call the macro app_mem_partition(part0) where "part0" is the name then used to refer to that partition. This macro only creates a function and necessary data structures for the later "initialization". - To create a memory domain for the partition, the macro app_mem_domain(dom0) is called where "dom0" is the name then used for the memory domain. - To initialize the partition (effectively adding the partition to a linked list), init_part_part0() is called. This is followed by init_app_memory(), which walks all partitions in the linked list and calculates the sizes for each partition. - Once the partition is initialized, the domain can be initialized with init_domain_dom0(part0) which initializes the domain with partition part0. - After the domain has been initialized, the current thread can be added using add_thread_dom0(k_current_get()). - The code used in ztests ans kernel/init has been added under a conditional #ifdef to isolate the code from other tests. The userspace test CMakeLists.txt file has commands to insert the CONFIG_APP_SHARED_MEM definition into the required build targets. Example: /* create partition at top of file outside functions */ app_mem_partition(part0); /* create domain */ app_mem_domain(dom0); _app_dmem(dom0) int var1; _app_bmem(dom0) static volatile int var2; int main() { init_part_part0(); init_app_memory(); init_domain_dom0(part0); add_thread_dom0(k_current_get()); ... } - If multiple partitions are being created, a variadic preprocessor macro can be used as provided in app_macro_support.h: FOR_EACH(app_mem_partition, part0, part1, part2); or, for multiple domains, similarly: FOR_EACH(app_mem_domain, dom0, dom1); Similarly, the init_part_* can also be used in the macro: FOR_EACH(init_part, part0, part1, part2); Testing: - This has been successfully tested on qemu_x86 and the ARM frdm_k64f board. It compiles and builds power of 2 aligned subsections for the linker script on the 96b_carbon boards. These power of 2 alignments have been checked by hand and are viewable in the zephyr.map file that is produced during build. However, due to a shortage of available MPU regions on the 96b_carbon board, we are unable to test this. - When run on the 96b_carbon board, the test suite will enter execution, but each individaul test will fail due to an MPU FAULT. This is expected as the required number of MPU regions exceeds the number allowed due to the static allocation. As the MPU driver does not detect this issue, the fault occurs because the data being accessed has been placed outside the active MPU region. - This now compiles successfully for the ARC boards em_starterkit_em7d and em_starterkit_em7d_v22. However, as we lack ARC hardware to run this build on, we are unable to test this build. Current known issues: 1) While the script and edited CMakeLists.txt creates the ability to align to the next power of 2, this does not address the shortage of available MPU regions on certain devices (e.g. 96b_carbon). In testing the APB and PPB regions were commented out. 2) checkpatch.pl lists several issues regarding the following: a) Complex macros. The FOR_EACH macros as defined in app_macro_support.h are listed as complex macros needing parentheses. Adding parentheses breaks their functionality, and we have otherwise been unable to resolve the reported error. b) __aligned() preferred. The _app_dmem_pad() and _app_bmem_pad() macros give warnings that __aligned() is preferred. Prior iterations had this implementation, which resulted in errors due to "complex macros". c) Trailing semicolon. The macro init_part(name) has a trailing semicolon as the semicolon is needed for the inlined macro call that is generated when this macro expands. Update: updated to alternative CONFIG_APPLCATION_MEMORY. Added config option CONFIG_APP_SHARED_MEM to enable a new section app_smem to contain the shared memory component. This commit seperates the Kconfig definition from the definition used for the conditional code. The change is in response to changes in the way the build system treats definitions. The python script used to generate a linker script for app_smem was also midified to simplify the alignment directives. A default linker script app_smem.ld was added to remove the conditional includes dependency on CONFIG_APP_SHARED_MEM. By addining the default linker script the prebuild stages link properly prior to the python script running Signed-off-by: Joshua Domagalski <jedomag@tycho.nsa.gov> Signed-off-by: Shawn Mosley <smmosle@tycho.nsa.gov>
2018-04-26 10:14:02 -04:00
/* APP SHARED MEMORY REGION */
SECTION_PROLOGUE(_APP_SMEM_SECTION_NAME, (OPTIONAL),)
{
_image_ram_start = .;
_app_smem_start = .;
APP_SMEM_SECTION()
MMU_PAGE_ALIGN
_app_smem_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
_app_smem_size = _app_smem_end - _app_smem_start;
_app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME);
#ifdef CONFIG_APPLICATION_MEMORY
SECTION_DATA_PROLOGUE(_APP_DATA_SECTION_NAME, (OPTIONAL),)
{
#ifndef CONFIG_XIP
MMU_PAGE_ALIGN
#endif
__app_ram_start = .;
__app_data_ram_start = .;
APP_INPUT_SECTION(.data)
APP_INPUT_SECTION(".data.*")
__app_data_ram_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__app_data_rom_start = LOADADDR(_APP_DATA_SECTION_NAME);
SECTION_PROLOGUE(_APP_BSS_SECTION_NAME, (NOLOAD OPTIONAL),)
{
__app_bss_start = .;
APP_INPUT_SECTION(.bss)
APP_INPUT_SECTION(".bss.*")
APP_INPUT_SECTION(COMMON)
__app_bss_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
__app_bss_num_words = (__app_bss_end - __app_bss_start) >> 2;
SECTION_PROLOGUE(_APP_NOINIT_SECTION_NAME, (NOLOAD OPTIONAL),)
{
APP_INPUT_SECTION(.noinit)
APP_INPUT_SECTION(".noinit.*")
MMU_PAGE_ALIGN
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
__app_ram_end = .;
__app_ram_size = __app_ram_end - __app_ram_start;
#endif /* CONFIG_APPLICATION_MEMORY */
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
SECTION_PROLOGUE(_BSS_SECTION_NAME, (NOLOAD OPTIONAL),)
{
x86: Jailhouse port, tested for UART (# 0, polling) and LOAPIC timer This is an introductory port for Zephyr to be run as a Jailhouse hypervisor[1]'s "inmate cell", on x86 64-bit CPUs (running on 32-bit mode). This was tested with their "tiny-demo" inmate demo cell configuration, which takes one of the CPUs of the QEMU-VM root cell config, along with some RAM and serial controller access (it will even do nice things like reserving some L3 cache for it via Intel CAT) and Zephyr samples: - hello_world - philosophers - synchronization The final binary receives an additional boot sequence preamble that conforms to Jailhouse's expectations (starts at 0x0 in real mode). It will put the processor in 32-bit protected mode and then proceed to Zephyr's __start function. Testing it is just a matter of: $ mmake -C samples/<sample_dir> BOARD=x86_jailhouse JAILHOUSE_QEMU_IMG_FILE=<path_to_image.qcow2> run $ sudo insmod <path to jailhouse.ko> $ sudo jailhouse enable <path to configs/qemu-x86.cell> $ sudo jailhouse cell create <path to configs/tiny-demo.cell> $ sudo mount -t 9p -o trans/virtio host /mnt $ sudo jailhouse cell load tiny-demo /mnt/zephyr.bin $ sudo jailhouse cell start tiny-demo $ sudo jailhouse cell destroy tiny-demo $ sudo jailhouse disable $ sudo rmmod jailhouse For the hello_world demo case, one should then get QEMU's serial port output similar to: """ Created cell "tiny-demo" Page pool usage after cell creation: mem 275/1480, remap 65607/131072 Cell "tiny-demo" can be loaded CPU 3 received SIPI, vector 100 Started cell "tiny-demo" ***** BOOTING ZEPHYR OS v1.9.0 - BUILD: Sep 12 2017 20:03:22 ***** Hello World! x86 """ Note that the Jailhouse's root cell *has to be started in xAPIC mode* (kernel command line argument 'nox2apic') in order for this to work. x2APIC support and its reasoning will come on a separate commit. As a reminder, the make run target introduced for x86_jailhouse board involves a root cell image with Jailhouse in it, to be launched and then partitioned (with >= 2 64-bit CPUs in it). Inmate cell configs with no JAILHOUSE_CELL_PASSIVE_COMMREG flag set (e.g. apic-demo one) would need extra code in Zephyr to deal with cell shutdown command responses from the hypervisor. You may want to fine tune CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC for your specific CPU—there is no detection from Zephyr with regard to that. Other config differences from pristine QEMU defaults worth of mention are: - there is no HPET when running as Jailhouse guest. We use the LOAPIC timer, instead - there is no PIC_DISABLE, because there is no 8259A PIC when running as a Jailhouse guest - XIP makes no sense also when running as Jailhouse guest, and both PHYS_RAM_ADDR/PHYS_LOAD_ADD are set to zero, what tiny-demo cell config is set to This opens up new possibilities for Zephyr, so that usages beyond just MCUs come to the table. I see special demand coming from functional-safety related use cases on industry, automotive, etc. [1] https://github.com/siemens/jailhouse Reference to Jailhouse's booting preamble code: Origin: Jailhouse License: BSD 2-Clause URL: https://github.com/siemens/jailhouse commit: 607251b44397666a3cbbf859d784dccf20aba016 Purpose: Dual-licensing of inmate lib code Maintained-by: Zephyr Signed-off-by: Gustavo Lima Chaves <gustavo.lima.chaves@intel.com>
2017-10-11 14:13:00 -07:00
/*
* Without Jailhouse, we get the page alignment here for free by
* definition of the beginning of the "RAMable" region on the board
* configurations. With Jailhouse, everything falls in RAM and we
* try to glue sections in sequence, thus we have to realign here so
* that gen_mmu.py does not complain.
*/
#ifdef CONFIG_JAILHOUSE
MMU_PAGE_ALIGN
#endif
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
/*
* For performance, BSS section is forced to be both 4 byte aligned and
* a multiple of 4 bytes.
*/
. = ALIGN(4);
__kernel_ram_start = .;
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
__bss_start = .;
KERNEL_INPUT_SECTION(.bss)
KERNEL_INPUT_SECTION(".bss.*")
KERNEL_INPUT_SECTION(COMMON)
*(".kernel_bss.*")
/*
* As memory is cleared in words only, it is simpler to ensure the BSS
* section ends on a 4 byte boundary. This wastes a maximum of 3 bytes.
*/
. = ALIGN(4);
__bss_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
__bss_num_words = (__bss_end - __bss_start) >> 2;
SECTION_PROLOGUE(_NOINIT_SECTION_NAME, (NOLOAD OPTIONAL),)
{
/*
* This section is used for non-initialized objects that
* will not be cleared during the boot process.
*/
KERNEL_INPUT_SECTION(.noinit)
KERNEL_INPUT_SECTION(".noinit.*")
*(".kernel_noinit.*")
MMU_PAGE_ALIGN
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME, (OPTIONAL),)
{
__data_ram_start = .;
KERNEL_INPUT_SECTION(.data)
KERNEL_INPUT_SECTION(".data.*")
*(".kernel.*")
#ifdef CONFIG_CUSTOM_RWDATA_LD
/* Located in project source directory */
#include <custom-rwdata.ld>
#endif
#ifdef CONFIG_GDT_DYNAMIC
KEEP(*(.tss))
. = ALIGN(8);
_gdt = .;
#ifdef LINKER_PASS2
KEEP(*(gdt_ram_data))
#else /* LINKER_PASS2 */
#ifdef CONFIG_USERSPACE
#define GDT_NUM_ENTRIES 7
#elif defined(CONFIG_HW_STACK_PROTECTION)
#define GDT_NUM_ENTRIES 5
#else
#define GDT_NUM_ENTRIES 3
#endif /* CONFIG_X86_USERSPACE */
. += GDT_NUM_ENTRIES * 8;
#endif /* LINKER_PASS2 */
#endif /* CONFIG_GDT_DYNAMIC */
. = ALIGN(4);
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__data_rom_start = LOADADDR(_DATA_SECTION_NAME);
#include <linker/common-ram.ld>
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
#ifdef CONFIG_X86_MMU
/* Can't really predict the size of this section. Anything after this
* should not be affected if addresses change between builds (currently
* just the gperf tables which is fine).
*
* However, __mmu_tables_start *must* remain stable between builds,
* we can't have anything shifting the memory map beforehand.
*/
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
SECTION_DATA_PROLOGUE(mmu_tables, (OPTIONAL),)
{
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
/* Page Tables are located here if MMU is enabled.*/
MMU_PAGE_ALIGN
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
__mmu_tables_start = .;
KEEP(*(.mmu_data));
__mmu_tables_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#endif
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
#include <linker/kobject.ld>
MMU_PAGE_ALIGN
kernel: introduce object validation mechanism All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-08-22 13:15:23 -07:00
__data_ram_end = .;
/* All unused memory also owned by the kernel for heaps */
__kernel_ram_end = PHYS_RAM_ADDR + KB(CONFIG_RAM_SIZE);
__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
_image_ram_end = .;
_image_ram_all = (PHYS_RAM_ADDR + KB(CONFIG_RAM_SIZE)) - _image_ram_start;
_end = .; /* end of image */
GROUP_END(RAMABLE_REGION)
#ifndef LINKER_PASS2
/* static interrupts */
SECTION_PROLOGUE(intList, (OPTIONAL),)
{
KEEP(*(.spurIsr))
KEEP(*(.spurNoErrIsr))
KEEP(*(.intList))
KEEP(*(.gnu.linkonce.intList.*))
} > IDT_LIST
#ifdef CONFIG_X86_MMU
/* Memory management unit*/
SECTION_PROLOGUE(mmulist, (OPTIONAL),)
{
/* get size of the mmu lists needed for gen_mmu_x86.py*/
LONG((__MMU_LIST_END__ - __MMU_LIST_START__) / __MMU_REGION_SIZEOF)
/* Get the start of mmu tables in data section so that the address
* of the page tables can be calculated.
*/
LONG(__mmu_tables_start)
__MMU_LIST_START__ = .;
KEEP(*(.mmulist))
__MMU_LIST_END__ = .;
} > MMU_LIST
#endif /* CONFIG_X86_MMU */
#else
/DISCARD/ :
{
KEEP(*(.spurIsr))
KEEP(*(.spurNoErrIsr))
KEEP(*(.intList))
KEEP(*(.gnu.linkonce.intList.*))
KEEP(*(.mmulist))
}
#endif
#ifdef CONFIG_CUSTOM_SECTIONS_LD
/* Located in project source directory */
#include <custom-sections.ld>
#endif
}
#ifdef CONFIG_XIP
/*
* Round up number of words for DATA section to ensure that XIP copies the
* entire data section. XIP copy is done in words only, so there may be up
* to 3 extra bytes copied in next section (BSS). At run time, the XIP copy
* is done first followed by clearing the BSS section.
*/
__data_size = (__data_ram_end - __data_ram_start);
__data_num_words = (__data_size + 3) >> 2;
#ifdef CONFIG_APPLICATION_MEMORY
__app_data_size = (__app_data_ram_end - __app_data_ram_start);
__app_data_num_words = (__app_data_size + 3) >> 2;
#endif
#endif