kernel: mmu: demand paging execution time histogram

This adds the bits to record execution time of eviction selection,
and backing store page-in/page-out in histograms.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2021-03-30 14:38:00 -07:00 committed by Anas Nashif
commit 8eea5119d7
5 changed files with 323 additions and 17 deletions

View file

@ -690,6 +690,27 @@ config DEMAND_PAGING_THREAD_STATS
Should say N in production system as this is not without cost.
config DEMAND_PAGING_TIMING_HISTOGRAM
bool "Gather Demand Paging Execution Timing Histogram"
depends on DEMAND_PAGING_STATS
help
This gathers the histogram of execution time on page eviction
selection, and backing store page in and page out.
Should say N in production system as this is not without cost.
config DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS
int "Number of bins (buckets) in Demand Paging Timing Histogrm"
depends on DEMAND_PAGING_TIMING_HISTOGRAM
default 10
help
Defines the number of bins (buckets) in the histogram used for
gathering execution timing information for demand paging.
This requires z_eviction_histogram_bounds[] and
z_backing_store_histogram_bounds[] to define the upper bounds
for each bin. See kernel/statistics.c for information.
endif # DEMAND_PAGING
endif # MMU

View file

@ -107,6 +107,18 @@ struct k_mem_paging_stats_t {
#endif /* CONFIG_DEMAND_PAGING_STATS */
};
struct k_mem_paging_histogram_t {
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
/* Counts for each bin in timing histogram */
unsigned long counts[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
/* Bounds for the bins in timing histogram,
* excluding the first and last (hence, NUM_SLOTS - 1).
*/
unsigned long bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
};
/* Just like Z_MEM_PHYS_ADDR() but with type safety and assertions */
static inline uintptr_t z_mem_phys_addr(void *virt)
{
@ -389,20 +401,56 @@ void k_mem_unpin(void *addr, size_t size);
__syscall void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats);
#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
struct k_thread;
/**
* Get the paging statistics since system startup for a thread
*
* This populates the paging statistics struct being passed in
* as argument for a particular thread.
*
* @param[in] tid Thread ID
* @param[in] thread Thread
* @param[in,out] stats Paging statistics struct to be filled.
*/
__syscall
void k_mem_paging_thread_stats_get(k_tid_t tid,
void k_mem_paging_thread_stats_get(struct k_thread *thread,
struct k_mem_paging_stats_t *stats);
#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
/**
* Get the eviction timing histogram
*
* This populates the timing histogram struct being passed in
* as argument.
*
* @param[in,out] stats Timing histogram struct to be filled.
*/
__syscall void k_mem_paging_histogram_eviction_get(
struct k_mem_paging_histogram_t *hist);
/**
* Get the backing store page-in timing histogram
*
* This populates the timing histogram struct being passed in
* as argument.
*
* @param[in,out] stats Timing histogram struct to be filled.
*/
__syscall void k_mem_paging_histogram_backing_store_page_in_get(
struct k_mem_paging_histogram_t *hist);
/**
* Get the backing store page-out timing histogram
*
* This populates the timing histogram struct being passed in
* as argument.
*
* @param[in,out] stats Timing histogram struct to be filled.
*/
__syscall void k_mem_paging_histogram_backing_store_page_out_get(
struct k_mem_paging_histogram_t *hist);
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
#include <syscalls/mem_manage.h>
#endif /* CONFIG_DEMAND_PAGING_STATS */

View file

@ -249,6 +249,22 @@ void pm_system_resume(void);
#endif
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
/**
* Initialize the timing histograms for demand paging.
*/
void z_paging_histogram_init(void);
/**
* Increment the counter in the timing histogram.
*
* @param hist The timing histogram to be updated.
* @param cycles Time spent in measured operation.
*/
void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
uint32_t cycles);
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
#ifdef __cplusplus
}
#endif

View file

@ -279,6 +279,9 @@ static void frame_mapped_set(struct z_page_frame *pf, void *addr)
#ifdef CONFIG_DEMAND_PAGING
static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
bool page_in, uintptr_t *location_ptr);
static inline void do_backing_store_page_in(uintptr_t location);
static inline void do_backing_store_page_out(uintptr_t location);
#endif /* CONFIG_DEMAND_PAGING */
/* Allocate a free page frame, and map it to a specified virtual address
@ -313,7 +316,7 @@ static int map_anon_page(void *addr, uint32_t flags)
return -ENOMEM;
}
if (dirty) {
z_backing_store_page_out(location);
do_backing_store_page_out(location);
}
pf->flags = 0;
#else
@ -554,6 +557,9 @@ void z_mem_manage_init(void)
LOG_DBG("free page frames: %zu", z_free_page_count);
#ifdef CONFIG_DEMAND_PAGING
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
z_paging_histogram_init();
#endif
z_backing_store_init();
z_eviction_init();
#endif
@ -567,8 +573,49 @@ void z_mem_manage_init(void)
#ifdef CONFIG_DEMAND_PAGING_STATS
struct k_mem_paging_stats_t paging_stats;
extern struct k_mem_paging_histogram_t z_paging_histogram_eviction;
extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_in;
extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_out;
#endif
static inline void do_backing_store_page_in(uintptr_t location)
{
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
uint32_t time_diff;
uint32_t time_start;
time_start = k_cycle_get_32();
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
z_backing_store_page_in(location);
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
time_diff = k_cycle_get_32() - time_start;
z_paging_histogram_inc(&z_paging_histogram_backing_store_page_in,
time_diff);
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
}
static inline void do_backing_store_page_out(uintptr_t location)
{
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
uint32_t time_diff;
uint32_t time_start;
time_start = k_cycle_get_32();
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
z_backing_store_page_out(location);
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
time_diff = k_cycle_get_32() - time_start;
z_paging_histogram_inc(&z_paging_histogram_backing_store_page_out,
time_diff);
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
}
/* Current implementation relies on interrupt locking to any prevent page table
* access, which falls over if other CPUs are active. Addressing this is not
* as simple as using spinlocks as regular memory reads/writes constitute
@ -702,7 +749,7 @@ static int do_mem_evict(void *addr)
irq_unlock(key);
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
if (dirty) {
z_backing_store_page_out(location);
do_backing_store_page_out(location);
}
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
key = irq_lock();
@ -776,7 +823,7 @@ int z_page_frame_evict(uintptr_t phys)
irq_unlock(key);
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
if (dirty) {
z_backing_store_page_out(location);
do_backing_store_page_out(location);
}
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
key = irq_lock();
@ -849,6 +896,25 @@ static inline void paging_stats_eviction_inc(struct k_thread *faulting_thread,
#endif /* CONFIG_DEMAND_PAGING_STATS */
}
static inline struct z_page_frame *do_eviction_select(bool *dirty)
{
struct z_page_frame *pf;
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
uint32_t time_start = k_cycle_get_32();
uint32_t time_diff;
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
pf = z_eviction_select(dirty);
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
time_diff = k_cycle_get_32() - time_start;
z_paging_histogram_inc(&z_paging_histogram_eviction, time_diff);
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
return pf;
}
static bool do_page_fault(void *addr, bool pin)
{
struct z_page_frame *pf;
@ -867,10 +933,7 @@ static bool do_page_fault(void *addr, bool pin)
/*
* TODO: Add performance accounting:
* - z_eviction_select() metrics
* * execution time histogram
* * periodic timer execution time histogram (if implemented)
* - z_backing_store_page_out() execution time histogram
* - z_backing_store_page_in() execution time histogram
*/
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
@ -931,7 +994,7 @@ static bool do_page_fault(void *addr, bool pin)
pf = free_page_frame_list_get();
if (pf == NULL) {
/* Need to evict a page frame */
pf = z_eviction_select(&dirty);
pf = do_eviction_select(&dirty);
__ASSERT(pf != NULL, "failed to get a page frame");
LOG_DBG("evicting %p at 0x%lx", pf->addr,
z_page_frame_to_phys(pf));
@ -949,9 +1012,9 @@ static bool do_page_fault(void *addr, bool pin)
*/
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
if (dirty) {
z_backing_store_page_out(page_out_location);
do_backing_store_page_out(page_out_location);
}
z_backing_store_page_in(page_in_location);
do_backing_store_page_in(page_in_location);
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
key = irq_lock();

View file

@ -12,6 +12,49 @@
extern struct k_mem_paging_stats_t paging_stats;
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
struct k_mem_paging_histogram_t z_paging_histogram_eviction;
struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_in;
struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_out;
#define NS_TO_CYC(ns) (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / 1000000U * ns)
/*
* This provides the upper bounds of the bins in eviction timing histogram.
*/
__weak unsigned long
z_eviction_histogram_bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS] = {
NS_TO_CYC(1),
NS_TO_CYC(5),
NS_TO_CYC(10),
NS_TO_CYC(50),
NS_TO_CYC(100),
NS_TO_CYC(200),
NS_TO_CYC(500),
NS_TO_CYC(1000),
NS_TO_CYC(2000),
ULONG_MAX
};
/*
* This provides the upper bounds of the bins in backing store timing histogram
* (both page-in and page-out).
*/
__weak unsigned long
z_backing_store_histogram_bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS] = {
NS_TO_CYC(10),
NS_TO_CYC(100),
NS_TO_CYC(125),
NS_TO_CYC(250),
NS_TO_CYC(500),
NS_TO_CYC(1000),
NS_TO_CYC(2000),
NS_TO_CYC(5000),
NS_TO_CYC(10000),
ULONG_MAX
};
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
unsigned long z_num_pagefaults_get(void)
{
unsigned long ret;
@ -45,27 +88,142 @@ void z_vrfy_k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats)
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
void z_impl_k_mem_paging_thread_stats_get(k_tid_t tid,
void z_impl_k_mem_paging_thread_stats_get(struct k_thread *thread,
struct k_mem_paging_stats_t *stats)
{
if ((tid == NULL) || (stats == NULL)) {
if ((thread == NULL) || (stats == NULL)) {
return;
}
/* Copy statistics */
memcpy(stats, &tid->paging_stats, sizeof(tid->paging_stats));
memcpy(stats, &thread->paging_stats, sizeof(thread->paging_stats));
}
#ifdef CONFIG_USERSPACE
static inline
void z_vrfy_k_mem_paging_thread_stats_get(k_tid_t tid,
void z_vrfy_k_mem_paging_thread_stats_get(struct k_thread *thread,
struct k_mem_paging_stats_t *stats)
{
Z_OOPS(Z_SYSCALL_OBJ(tid, K_OBJ_THREAD));
Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(stats, sizeof(*stats)));
z_impl_k_mem_paging_thread_stats_get(tid, stats);
z_impl_k_mem_paging_thread_stats_get(thread, stats);
}
#include <syscalls/k_mem_paging_thread_stats_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
void z_paging_histogram_init(void)
{
/*
* Zero out the histogram structs and copy the bounds.
* The copying is done as the histogram structs need
* to be pinned in memory and never swapped out, while
* the source bound array may not be pinned.
*/
memset(&z_paging_histogram_eviction, 0, sizeof(z_paging_histogram_eviction));
memcpy(z_paging_histogram_eviction.bounds, z_eviction_histogram_bounds,
sizeof(z_paging_histogram_eviction.bounds));
memset(&z_paging_histogram_backing_store_page_in, 0,
sizeof(z_paging_histogram_backing_store_page_in));
memcpy(z_paging_histogram_backing_store_page_in.bounds,
z_backing_store_histogram_bounds,
sizeof(z_paging_histogram_backing_store_page_in.bounds));
memset(&z_paging_histogram_backing_store_page_out, 0,
sizeof(z_paging_histogram_backing_store_page_out));
memcpy(z_paging_histogram_backing_store_page_out.bounds,
z_backing_store_histogram_bounds,
sizeof(z_paging_histogram_backing_store_page_out.bounds));
}
/**
* Increment the counter in the timing histogram.
*
* @param hist The timing histogram to be updated.
* @param cycles Time spent in measured operation.
*/
void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
uint32_t cycles)
{
int idx;
for (idx = 0;
idx < CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS;
idx++) {
if (cycles <= hist->bounds[idx]) {
hist->counts[idx]++;
break;
}
}
}
void z_impl_k_mem_paging_histogram_eviction_get(
struct k_mem_paging_histogram_t *hist)
{
if (hist == NULL) {
return;
}
/* Copy statistics */
memcpy(hist, &z_paging_histogram_eviction,
sizeof(z_paging_histogram_eviction));
}
void z_impl_k_mem_paging_histogram_backing_store_page_in_get(
struct k_mem_paging_histogram_t *hist)
{
if (hist == NULL) {
return;
}
/* Copy histogram */
memcpy(hist, &z_paging_histogram_backing_store_page_in,
sizeof(z_paging_histogram_backing_store_page_in));
}
void z_impl_k_mem_paging_histogram_backing_store_page_out_get(
struct k_mem_paging_histogram_t *hist)
{
if (hist == NULL) {
return;
}
/* Copy histogram */
memcpy(hist, &z_paging_histogram_backing_store_page_out,
sizeof(z_paging_histogram_backing_store_page_out));
}
#ifdef CONFIG_USERSPACE
static inline
void z_vrfy_k_mem_paging_histogram_eviction_get(
struct k_mem_paging_histogram_t *hist)
{
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(hist, sizeof(*hist)));
z_impl_k_mem_paging_histogram_eviction_get(hist);
}
#include <syscalls/k_mem_paging_histogram_eviction_get_mrsh.c>
static inline
void z_vrfy_k_mem_paging_histogram_backing_store_page_in_get(
struct k_mem_paging_histogram_t *hist)
{
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(hist, sizeof(*hist)));
z_impl_k_mem_paging_histogram_backing_store_page_in_get(hist);
}
#include <syscalls/k_mem_paging_histogram_backing_store_page_in_get_mrsh.c>
static inline
void z_vrfy_k_mem_paging_histogram_backing_store_page_out_get(
struct k_mem_paging_histogram_t *hist)
{
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(hist, sizeof(*hist)));
z_impl_k_mem_paging_histogram_backing_store_page_out_get(hist);
}
#include <syscalls/k_mem_paging_histogram_backing_store_page_out_get_mrsh.c>
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */