llext: Support memory protection
Sets up memory partitions and allows for the partitions to be added to a memory domain after loading an extension. This allows for applying memory protection attributes to all of the needed memory regions an extension requires to execute code correctly. Currently only works when usermode is enabled as otherwise memory protection APIs are unavailable. Signed-off-by: Tom Burdick <thomas.burdick@intel.com>
This commit is contained in:
parent
172bc0c238
commit
84e883b611
5 changed files with 174 additions and 7 deletions
|
@ -17,6 +17,13 @@ LOG_MODULE_REGISTER(llext, CONFIG_LLEXT_LOG_LEVEL);
|
|||
|
||||
#include <string.h>
|
||||
|
||||
#ifdef CONFIG_MMU_PAGE_SIZE
|
||||
#define LLEXT_PAGE_SIZE CONFIG_MMU_PAGE_SIZE
|
||||
#else
|
||||
/* Arm's MPU wants a 32 byte minimum mpu region */
|
||||
#define LLEXT_PAGE_SIZE 32
|
||||
#endif
|
||||
|
||||
K_HEAP_DEFINE(llext_heap, CONFIG_LLEXT_HEAP_SIZE * 1024);
|
||||
|
||||
static const char ELF_MAGIC[] = {0x7f, 'E', 'L', 'F'};
|
||||
|
@ -251,6 +258,38 @@ static int llext_map_sections(struct llext_loader *ldr, struct llext *ext)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the memory partition associated with the extension memory
|
||||
*/
|
||||
static void llext_init_mem_part(struct llext *ext, enum llext_mem mem_idx,
|
||||
uintptr_t start, size_t len)
|
||||
{
|
||||
#ifdef CONFIG_USERSPACE
|
||||
if (mem_idx < LLEXT_MEM_PARTITIONS) {
|
||||
ext->mem_parts[mem_idx].start = start;
|
||||
ext->mem_parts[mem_idx].size = len;
|
||||
|
||||
switch (mem_idx) {
|
||||
case LLEXT_MEM_TEXT:
|
||||
ext->mem_parts[mem_idx].attr = K_MEM_PARTITION_P_RX_U_RX;
|
||||
break;
|
||||
case LLEXT_MEM_DATA:
|
||||
case LLEXT_MEM_BSS:
|
||||
ext->mem_parts[mem_idx].attr = K_MEM_PARTITION_P_RW_U_RW;
|
||||
break;
|
||||
case LLEXT_MEM_RODATA:
|
||||
ext->mem_parts[mem_idx].attr = K_MEM_PARTITION_P_RO_U_RO;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
LOG_DBG("mem partition %d start 0x%lx, size %d", mem_idx,
|
||||
ext->mem_parts[mem_idx].start,
|
||||
ext->mem_parts[mem_idx].size);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static int llext_copy_section(struct llext_loader *ldr, struct llext *ext,
|
||||
enum llext_mem mem_idx)
|
||||
{
|
||||
|
@ -265,18 +304,41 @@ static int llext_copy_section(struct llext_loader *ldr, struct llext *ext,
|
|||
IS_ENABLED(CONFIG_LLEXT_STORAGE_WRITABLE)) {
|
||||
ext->mem[mem_idx] = llext_peek(ldr, ldr->sects[mem_idx].sh_offset);
|
||||
if (ext->mem[mem_idx]) {
|
||||
llext_init_mem_part(ext, mem_idx, (uintptr_t)ext->mem[mem_idx],
|
||||
ldr->sects[mem_idx].sh_size);
|
||||
ext->mem_on_heap[mem_idx] = false;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
ext->mem[mem_idx] = k_heap_aligned_alloc(&llext_heap, sizeof(uintptr_t),
|
||||
ldr->sects[mem_idx].sh_size,
|
||||
/* On ARM with an MPU a pow(2, N)*32 sized and aligned region is needed,
|
||||
* otherwise its typically an mmu page (sized and aligned memory region)
|
||||
* we are after that we can assign memory permission bits on.
|
||||
*/
|
||||
#ifndef CONFIG_ARM_MPU
|
||||
const uintptr_t sect_alloc = ROUND_UP(ldr->sects[mem_idx].sh_size, LLEXT_PAGE_SIZE);
|
||||
const uintptr_t sect_align = LLEXT_PAGE_SIZE;
|
||||
#else
|
||||
uintptr_t sect_alloc = LLEXT_PAGE_SIZE;
|
||||
|
||||
while (sect_alloc < ldr->sects[mem_idx].sh_size) {
|
||||
sect_alloc *= 2;
|
||||
}
|
||||
uintptr_t sect_align = sect_alloc;
|
||||
#endif
|
||||
|
||||
ext->mem[mem_idx] = k_heap_aligned_alloc(&llext_heap, sect_align,
|
||||
sect_alloc,
|
||||
K_NO_WAIT);
|
||||
|
||||
if (!ext->mem[mem_idx]) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
ext->alloc_size += ldr->sects[mem_idx].sh_size;
|
||||
|
||||
ext->alloc_size += sect_alloc;
|
||||
|
||||
llext_init_mem_part(ext, mem_idx, (uintptr_t)ext->mem[mem_idx],
|
||||
sect_alloc);
|
||||
|
||||
if (ldr->sects[mem_idx].sh_type == SHT_NOBITS) {
|
||||
memset(ext->mem[mem_idx], 0, ldr->sects[mem_idx].sh_size);
|
||||
|
@ -769,6 +831,14 @@ static int do_llext_load(struct llext_loader *ldr, struct llext *ext,
|
|||
ldr->sect_cnt = ldr->hdr.e_shnum;
|
||||
ext->alloc_size += sect_map_sz;
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
ret = k_mem_domain_init(&ext->mem_domain, 0, NULL);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("Failed to initialize extenion memory domain %d", ret);
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
|
||||
LOG_DBG("Finding ELF tables...");
|
||||
ret = llext_find_tables(ldr);
|
||||
if (ret != 0) {
|
||||
|
@ -974,3 +1044,26 @@ int llext_call_fn(struct llext *ext, const char *sym_name)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int llext_add_domain(struct llext *ext, struct k_mem_domain *domain)
|
||||
{
|
||||
#ifdef CONFIG_USERSPACE
|
||||
int ret = 0;
|
||||
|
||||
for (int i = 0; i < LLEXT_MEM_PARTITIONS; i++) {
|
||||
if (ext->mem_size[i] == 0) {
|
||||
continue;
|
||||
}
|
||||
ret = k_mem_domain_add_partition(domain, &ext->mem_parts[i]);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("Failed adding memory partition %d to domain %p",
|
||||
i, domain);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
#else
|
||||
return -ENOSYS;
|
||||
#endif
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue