kernel: fix spelling errors
Fix spelling errors found in comment of the kernel source code. Signed-off-by: Nguyen Minh Thien <nguyenmthien@live.com>
This commit is contained in:
parent
6aedc06982
commit
8188be57d3
7 changed files with 8 additions and 8 deletions
|
@ -75,7 +75,7 @@ void idle(void *unused1, void *unused2, void *unused3)
|
||||||
*
|
*
|
||||||
* This function is entered with interrupts disabled.
|
* This function is entered with interrupts disabled.
|
||||||
* If a low power state was entered, then the hook
|
* If a low power state was entered, then the hook
|
||||||
* function should enable inerrupts before exiting.
|
* function should enable interrupts before exiting.
|
||||||
* This is because the kernel does not do its own idle
|
* This is because the kernel does not do its own idle
|
||||||
* processing in those cases i.e. skips k_cpu_idle().
|
* processing in those cases i.e. skips k_cpu_idle().
|
||||||
* The kernel's idle processing re-enables interrupts
|
* The kernel's idle processing re-enables interrupts
|
||||||
|
|
|
@ -192,7 +192,7 @@ int arch_float_disable(struct k_thread *thread);
|
||||||
*
|
*
|
||||||
* The function is used to enable the preservation of floating
|
* The function is used to enable the preservation of floating
|
||||||
* point context information for a particular thread.
|
* point context information for a particular thread.
|
||||||
* This API depends on each architecture implimentation. If the architecture
|
* This API depends on each architecture implementation. If the architecture
|
||||||
* does not support enabling, this API will always be failed.
|
* does not support enabling, this API will always be failed.
|
||||||
*
|
*
|
||||||
* The @a options parameter indicates which floating point register sets will
|
* The @a options parameter indicates which floating point register sets will
|
||||||
|
|
|
@ -375,8 +375,8 @@ int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
|
||||||
* waiting thread while holding sched_spinlock. This can be useful for routines
|
* waiting thread while holding sched_spinlock. This can be useful for routines
|
||||||
* that need to operate on multiple waiting threads.
|
* that need to operate on multiple waiting threads.
|
||||||
*
|
*
|
||||||
* CAUTION! As a wait queue is of indeterminant length, the scheduler will be
|
* CAUTION! As a wait queue is of indeterminate length, the scheduler will be
|
||||||
* locked for an indeterminant amount of time. This may impact system
|
* locked for an indeterminate amount of time. This may impact system
|
||||||
* performance. As such, care must be taken when using both this function and
|
* performance. As such, care must be taken when using both this function and
|
||||||
* the specified callback.
|
* the specified callback.
|
||||||
*
|
*
|
||||||
|
|
|
@ -452,7 +452,7 @@ static int init_mailbox_obj_core_list(void)
|
||||||
z_obj_type_init(&obj_type_mailbox, K_OBJ_TYPE_MBOX_ID,
|
z_obj_type_init(&obj_type_mailbox, K_OBJ_TYPE_MBOX_ID,
|
||||||
offsetof(struct k_mbox, obj_core));
|
offsetof(struct k_mbox, obj_core));
|
||||||
|
|
||||||
/* Initialize and link satically defined mailboxes */
|
/* Initialize and link statically defined mailboxes */
|
||||||
|
|
||||||
STRUCT_SECTION_FOREACH(k_mbox, mbox) {
|
STRUCT_SECTION_FOREACH(k_mbox, mbox) {
|
||||||
k_obj_core_init_and_link(K_OBJ_CORE(mbox), &obj_type_mailbox);
|
k_obj_core_init_and_link(K_OBJ_CORE(mbox), &obj_type_mailbox);
|
||||||
|
|
|
@ -301,7 +301,7 @@ static int init_mutex_obj_core_list(void)
|
||||||
z_obj_type_init(&obj_type_mutex, K_OBJ_TYPE_MUTEX_ID,
|
z_obj_type_init(&obj_type_mutex, K_OBJ_TYPE_MUTEX_ID,
|
||||||
offsetof(struct k_mutex, obj_core));
|
offsetof(struct k_mutex, obj_core));
|
||||||
|
|
||||||
/* Initialize and link statically defined mutexs */
|
/* Initialize and link statically defined mutexes */
|
||||||
|
|
||||||
STRUCT_SECTION_FOREACH(k_mutex, mutex) {
|
STRUCT_SECTION_FOREACH(k_mutex, mutex) {
|
||||||
k_obj_core_init_and_link(K_OBJ_CORE(mutex), &obj_type_mutex);
|
k_obj_core_init_and_link(K_OBJ_CORE(mutex), &obj_type_mutex);
|
||||||
|
|
|
@ -551,7 +551,7 @@ static void unref_check(struct k_object *ko, uintptr_t index)
|
||||||
|
|
||||||
/* This object has no more references. Some objects may have
|
/* This object has no more references. Some objects may have
|
||||||
* dynamically allocated resources, require cleanup, or need to be
|
* dynamically allocated resources, require cleanup, or need to be
|
||||||
* marked as uninitailized when all references are gone. What
|
* marked as uninitialized when all references are gone. What
|
||||||
* specifically needs to happen depends on the object type.
|
* specifically needs to happen depends on the object type.
|
||||||
*/
|
*/
|
||||||
switch (ko->type) {
|
switch (ko->type) {
|
||||||
|
|
|
@ -92,7 +92,7 @@ static inline void init_work_cancel(struct z_work_canceller *canceler,
|
||||||
sys_slist_append(&pending_cancels, &canceler->node);
|
sys_slist_append(&pending_cancels, &canceler->node);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Comeplete flushing of a work item.
|
/* Complete flushing of a work item.
|
||||||
*
|
*
|
||||||
* Invoked with work lock held.
|
* Invoked with work lock held.
|
||||||
*
|
*
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue