From cd789a7ac71e26aedc5610b4b871b9d91d9fa001 Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Fri, 23 Oct 2020 16:58:25 -0700 Subject: [PATCH] x86: add logs for tuning page pool size This will do until we can set up a proper page pool using all unused ram for paging structures, heaps, and anonymous mappings. Signed-off-by: Andrew Boie --- arch/x86/core/x86_mmu.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/x86/core/x86_mmu.c b/arch/x86/core/x86_mmu.c index 9263353531c..1f557405ef4 100644 --- a/arch/x86/core/x86_mmu.c +++ b/arch/x86/core/x86_mmu.c @@ -694,6 +694,12 @@ static void *page_pool_get(void) return ret; } +/* Debugging function to show how many pages are free in the pool */ +static inline unsigned int pages_free(void) +{ + return (page_pos - page_pool) / CONFIG_MMU_PAGE_SIZE; +} + /* Reset permissions on a PTE to original state when the mapping was made */ static inline pentry_t reset_pte(pentry_t old_val) { @@ -1011,6 +1017,10 @@ static int range_map(void *virt, uintptr_t phys, size_t size, #ifdef CONFIG_USERSPACE out_unlock: #endif /* CONFIG_USERSPACE */ + if (ret == 0 && (options & OPTION_ALLOC) != 0) { + LOG_DBG("page pool pages free: %u / %u", pages_free(), + CONFIG_X86_MMU_PAGE_POOL_PAGES); + } k_spin_unlock(&x86_mmu_lock, key); #ifdef CONFIG_SMP @@ -1311,6 +1321,9 @@ int arch_mem_domain_init(struct k_mem_domain *domain) if (ret == 0) { sys_slist_append(&x86_domain_list, &domain->arch.node); } + + LOG_DBG("page pool pages free: %u / %u\n", pages_free(), + CONFIG_X86_MMU_PAGE_POOL_PAGES); k_spin_unlock(&x86_mmu_lock, key); return ret;