diff --git a/kernel/Kconfig b/kernel/Kconfig index 0319a933a70..a912bda3e6d 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -276,7 +276,7 @@ choice DYNAMIC_THREAD_PREFER help If both CONFIG_DYNAMIC_THREAD_ALLOC=y and CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0, then the user may - specify the order in which allocation is attmpted. + specify the order in which allocation is attempted. config DYNAMIC_THREAD_PREFER_ALLOC bool "Prefer heap-based allocation" diff --git a/kernel/Kconfig.smp b/kernel/Kconfig.smp index 624d13c0670..a7af7ff75cb 100644 --- a/kernel/Kconfig.smp +++ b/kernel/Kconfig.smp @@ -96,7 +96,7 @@ config IPI_OPTIMIZE O(N) in the number of CPUs, and in exchange reduces the number of interrupts delivered. Which to choose is going to depend on application behavior. If the architecture also supports directing - IPIs to specific CPUs then this has the potential to signficantly + IPIs to specific CPUs then this has the potential to significantly reduce the number of IPIs (and consequently ISRs) processed by the system as the number of CPUs increases. If not, the only benefit would be to not issue any IPIs if the newly readied thread is of diff --git a/kernel/dynamic.c b/kernel/dynamic.c index a9ec33d87bd..a17a84f724a 100644 --- a/kernel/dynamic.c +++ b/kernel/dynamic.c @@ -169,7 +169,7 @@ static inline int z_vrfy_k_thread_stack_free(k_thread_stack_t *stack) /* The thread stack object must not be in initialized state. * * Thread stack objects are initialized when the thread is created - * and de-initialized whent the thread is destroyed. Since we can't + * and de-initialized when the thread is destroyed. Since we can't * free a stack that is in use, we have to check that the caller * has access to the object but that it is not in use anymore. */ diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index a25755aac27..7d5a880a229 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -277,7 +277,7 @@ int z_sched_waitq_walk(_wait_q_t *wait_q, * * This function assumes local interrupts are masked (so that the * current CPU pointer and current thread are safe to modify), but - * requires no other synchronizaton. Architecture layers don't need + * requires no other synchronization. Architecture layers don't need * to do anything more. */ void z_sched_usage_stop(void); diff --git a/kernel/mmu.c b/kernel/mmu.c index f38d22ab784..5b080dd4e9a 100644 --- a/kernel/mmu.c +++ b/kernel/mmu.c @@ -615,7 +615,7 @@ void *k_mem_map_phys_guard(uintptr_t phys, size_t size, uint32_t flags, bool is_ dst += CONFIG_MMU_PAGE_SIZE; if (is_anon) { - /* Mapping from annoymous memory */ + /* Mapping from anonymous memory */ VIRT_FOREACH(dst, size, pos) { ret = map_anon_page(pos, flags); diff --git a/kernel/sched.c b/kernel/sched.c index 501b2736157..72d52a5854a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -685,7 +685,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, /* We do a "lock swap" prior to calling z_swap(), such that * the caller's lock gets released as desired. But we ensure * that we hold the scheduler lock and leave local interrupts - * masked until we reach the context swich. z_swap() itself + * masked until we reach the context switch. z_swap() itself * has similar code; the duplication is because it's a legacy * API that doesn't expect to be called with scheduler lock * held. diff --git a/kernel/timeout.c b/kernel/timeout.c index 1a8a361457a..f751c2f20a5 100644 --- a/kernel/timeout.c +++ b/kernel/timeout.c @@ -213,7 +213,7 @@ void sys_clock_announce(int32_t ticks) /* We release the lock around the callbacks below, so on SMP * systems someone might be already running the loop. Don't - * race (which will cause paralllel execution of "sequential" + * race (which will cause parallel execution of "sequential" * timeouts and confuse apps), just increment the tick count * and return. */