kernel/arch: cleanup function definitions

make identifiers used in the declaration and definition identical. This
is based on MISRA rule 8.3.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2021-03-29 10:54:23 -04:00
commit 25c87db860
12 changed files with 100 additions and 100 deletions

View file

@ -40,18 +40,18 @@ void z_x86_spurious_irq(const z_arch_esf_t *esf)
z_x86_fatal_error(K_ERR_SPURIOUS_IRQ, esf); z_x86_fatal_error(K_ERR_SPURIOUS_IRQ, esf);
} }
void arch_syscall_oops(void *ssf_ptr) void arch_syscall_oops(void *ssf)
{ {
struct _x86_syscall_stack_frame *ssf = struct _x86_syscall_stack_frame *ssf_ptr =
(struct _x86_syscall_stack_frame *)ssf_ptr; (struct _x86_syscall_stack_frame *)ssf;
z_arch_esf_t oops = { z_arch_esf_t oops = {
.eip = ssf->eip, .eip = ssf_ptr->eip,
.cs = ssf->cs, .cs = ssf_ptr->cs,
.eflags = ssf->eflags .eflags = ssf_ptr->eflags
}; };
if (oops.cs == USER_CODE_SEG) { if (oops.cs == USER_CODE_SEG) {
oops.esp = ssf->esp; oops.esp = ssf_ptr->esp;
} }
z_x86_fatal_error(K_ERR_KERNEL_OOPS, &oops); z_x86_fatal_error(K_ERR_KERNEL_OOPS, &oops);

View file

@ -726,18 +726,18 @@ const char *device_pm_state_str(uint32_t state);
* Called by a device driver to indicate that it is in the middle of a * Called by a device driver to indicate that it is in the middle of a
* transaction. * transaction.
* *
* @param busy_dev Pointer to device structure of the driver instance. * @param dev Pointer to device structure of the driver instance.
*/ */
void device_busy_set(const struct device *busy_dev); void device_busy_set(const struct device *dev);
/** /**
* @brief Indicate that the device has completed its transaction * @brief Indicate that the device has completed its transaction
* *
* Called by a device driver to indicate the end of a transaction. * Called by a device driver to indicate the end of a transaction.
* *
* @param busy_dev Pointer to device structure of the driver instance. * @param dev Pointer to device structure of the driver instance.
*/ */
void device_busy_clear(const struct device *busy_dev); void device_busy_clear(const struct device *dev);
#ifdef CONFIG_PM_DEVICE #ifdef CONFIG_PM_DEVICE
/* /*

View file

@ -51,7 +51,7 @@ struct init_entry {
const struct device *dev; const struct device *dev;
}; };
void z_sys_init_run_level(int32_t _level); void z_sys_init_run_level(int32_t level);
/* A counter is used to avoid issues when two or more system devices /* A counter is used to avoid issues when two or more system devices
* are declared in the same C file with the same init function. * are declared in the same C file with the same init function.

View file

@ -1016,29 +1016,29 @@ __syscall void *k_thread_custom_data_get(void);
* Set the name of the thread to be used when @option{CONFIG_THREAD_MONITOR} * Set the name of the thread to be used when @option{CONFIG_THREAD_MONITOR}
* is enabled for tracing and debugging. * is enabled for tracing and debugging.
* *
* @param thread_id Thread to set name, or NULL to set the current thread * @param thread Thread to set name, or NULL to set the current thread
* @param value Name string * @param str Name string
* @retval 0 on success * @retval 0 on success
* @retval -EFAULT Memory access error with supplied string * @retval -EFAULT Memory access error with supplied string
* @retval -ENOSYS Thread name configuration option not enabled * @retval -ENOSYS Thread name configuration option not enabled
* @retval -EINVAL Thread name too long * @retval -EINVAL Thread name too long
*/ */
__syscall int k_thread_name_set(k_tid_t thread_id, const char *value); __syscall int k_thread_name_set(k_tid_t thread, const char *str);
/** /**
* @brief Get thread name * @brief Get thread name
* *
* Get the name of a thread * Get the name of a thread
* *
* @param thread_id Thread ID * @param thread Thread ID
* @retval Thread name, or NULL if configuration not enabled * @retval Thread name, or NULL if configuration not enabled
*/ */
const char *k_thread_name_get(k_tid_t thread_id); const char *k_thread_name_get(k_tid_t thread);
/** /**
* @brief Copy the thread name into a supplied buffer * @brief Copy the thread name into a supplied buffer
* *
* @param thread_id Thread to obtain name information * @param thread Thread to obtain name information
* @param buf Destination buffer * @param buf Destination buffer
* @param size Destination buffer size * @param size Destination buffer size
* @retval -ENOSPC Destination buffer too small * @retval -ENOSPC Destination buffer too small
@ -1046,7 +1046,7 @@ const char *k_thread_name_get(k_tid_t thread_id);
* @retval -ENOSYS Thread name feature not enabled * @retval -ENOSYS Thread name feature not enabled
* @retval 0 Success * @retval 0 Success
*/ */
__syscall int k_thread_name_copy(k_tid_t thread_id, char *buf, __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
size_t size); size_t size);
/** /**
@ -4207,14 +4207,14 @@ struct k_msgq_attrs {
* that each message is similarly aligned to this boundary, @a q_msg_size * that each message is similarly aligned to this boundary, @a q_msg_size
* must also be a multiple of N. * must also be a multiple of N.
* *
* @param q Address of the message queue. * @param msgq Address of the message queue.
* @param buffer Pointer to ring buffer that holds queued messages. * @param buffer Pointer to ring buffer that holds queued messages.
* @param msg_size Message size (in bytes). * @param msg_size Message size (in bytes).
* @param max_msgs Maximum number of messages that can be queued. * @param max_msgs Maximum number of messages that can be queued.
* *
* @return N/A * @return N/A
*/ */
void k_msgq_init(struct k_msgq *q, char *buffer, size_t msg_size, void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
uint32_t max_msgs); uint32_t max_msgs);
/** /**

View file

@ -65,11 +65,11 @@ struct z_heap_stress_result {
* *
* Initializes a sys_heap struct to manage the specified memory. * Initializes a sys_heap struct to manage the specified memory.
* *
* @param h Heap to initialize * @param heap Heap to initialize
* @param mem Untyped pointer to unused memory * @param mem Untyped pointer to unused memory
* @param bytes Size of region pointed to by @a mem * @param bytes Size of region pointed to by @a mem
*/ */
void sys_heap_init(struct sys_heap *h, void *mem, size_t bytes); void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes);
/** @brief Allocate memory from a sys_heap /** @brief Allocate memory from a sys_heap
* *
@ -84,11 +84,11 @@ void sys_heap_init(struct sys_heap *h, void *mem, size_t bytes);
* No two sys_heap functions should operate on the same heap at the * No two sys_heap functions should operate on the same heap at the
* same time. All locking must be provided by the user. * same time. All locking must be provided by the user.
* *
* @param h Heap from which to allocate * @param heap Heap from which to allocate
* @param bytes Number of bytes requested * @param bytes Number of bytes requested
* @return Pointer to memory the caller can now use * @return Pointer to memory the caller can now use
*/ */
void *sys_heap_alloc(struct sys_heap *h, size_t bytes); void *sys_heap_alloc(struct sys_heap *heap, size_t bytes);
/** @brief Allocate aligned memory from a sys_heap /** @brief Allocate aligned memory from a sys_heap
* *
@ -98,12 +98,12 @@ void *sys_heap_alloc(struct sys_heap *h, size_t bytes);
* bytes. With align=0 this behaves exactly like sys_heap_alloc(). * bytes. With align=0 this behaves exactly like sys_heap_alloc().
* The resulting memory can be returned to the heap using sys_heap_free(). * The resulting memory can be returned to the heap using sys_heap_free().
* *
* @param h Heap from which to allocate * @param heap Heap from which to allocate
* @param align Alignment in bytes, must be a power of two * @param align Alignment in bytes, must be a power of two
* @param bytes Number of bytes requested * @param bytes Number of bytes requested
* @return Pointer to memory the caller can now use * @return Pointer to memory the caller can now use
*/ */
void *sys_heap_aligned_alloc(struct sys_heap *h, size_t align, size_t bytes); void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes);
/** @brief Free memory into a sys_heap /** @brief Free memory into a sys_heap
* *
@ -115,10 +115,10 @@ void *sys_heap_aligned_alloc(struct sys_heap *h, size_t align, size_t bytes);
* No two sys_heap functions should operate on the same heap at the * No two sys_heap functions should operate on the same heap at the
* same time. All locking must be provided by the user. * same time. All locking must be provided by the user.
* *
* @param h Heap to which to return the memory * @param heap Heap to which to return the memory
* @param mem A pointer previously returned from sys_heap_alloc() * @param mem A pointer previously returned from sys_heap_alloc()
*/ */
void sys_heap_free(struct sys_heap *h, void *mem); void sys_heap_free(struct sys_heap *heap, void *mem);
/** @brief Expand the size of an existing allocation /** @brief Expand the size of an existing allocation
* *
@ -159,10 +159,10 @@ void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr,
* handle any sys_heap_alloc() request and free any live pointer * handle any sys_heap_alloc() request and free any live pointer
* returned from a previou allocation. * returned from a previou allocation.
* *
* @param h Heap to validate * @param heap Heap to validate
* @return true, if the heap is valid, otherwise false * @return true, if the heap is valid, otherwise false
*/ */
bool sys_heap_validate(struct sys_heap *h); bool sys_heap_validate(struct sys_heap *heap);
/** @brief sys_heap stress test rig /** @brief sys_heap stress test rig
* *
@ -175,9 +175,9 @@ bool sys_heap_validate(struct sys_heap *h);
* Results, including counts of frees and successful/unsuccessful * Results, including counts of frees and successful/unsuccessful
* allocations, are returnewd via the @result struct. * allocations, are returnewd via the @result struct.
* *
* @param alloc Callback to perform an allocation. Passes back the @a * @param alloc_fn Callback to perform an allocation. Passes back the @a
* arg parameter as a context handle. * arg parameter as a context handle.
* @param free Callback to perform a free of a pointer returned from * @param free_fn Callback to perform a free of a pointer returned from
* @a alloc. Passes back the @a arg parameter as a * @a alloc. Passes back the @a arg parameter as a
* context handle. * context handle.
* @param arg Context handle to pass back to the callbacks * @param arg Context handle to pass back to the callbacks
@ -193,8 +193,8 @@ bool sys_heap_validate(struct sys_heap *h);
* failures and a very fragmented heap. * failures and a very fragmented heap.
* @param result Struct into which to store test results. * @param result Struct into which to store test results.
*/ */
void sys_heap_stress(void *(*alloc)(void *arg, size_t bytes), void sys_heap_stress(void *(*alloc_fn)(void *arg, size_t bytes),
void (*free)(void *arg, void *p), void (*free_fn)(void *arg, void *p),
void *arg, size_t total_bytes, void *arg, size_t total_bytes,
uint32_t op_count, uint32_t op_count,
void *scratch_mem, size_t scratch_bytes, void *scratch_mem, size_t scratch_bytes,
@ -206,9 +206,9 @@ void sys_heap_stress(void *(*alloc)(void *arg, size_t bytes),
* Print information on the heap structure such as its size, chunk buckets, * Print information on the heap structure such as its size, chunk buckets,
* chunk list and some statistics for debugging purpose. * chunk list and some statistics for debugging purpose.
* *
* @param h Heap to print information about * @param heap Heap to print information about
* @param dump_chunks True to print the entire heap chunk list * @param dump_chunks True to print the entire heap chunk list
*/ */
void sys_heap_print_info(struct sys_heap *h, bool dump_chunks); void sys_heap_print_info(struct sys_heap *heap, bool dump_chunks);
#endif /* ZEPHYR_INCLUDE_SYS_SYS_HEAP_H_ */ #endif /* ZEPHYR_INCLUDE_SYS_SYS_HEAP_H_ */

View file

@ -22,9 +22,9 @@ extern "C" {
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
static inline void z_init_timeout(struct _timeout *t) static inline void z_init_timeout(struct _timeout *to)
{ {
sys_dnode_init(&t->node); sys_dnode_init(&to->node);
} }
void z_add_timeout(struct _timeout *to, _timeout_func_t fn, void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
@ -32,9 +32,9 @@ void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
int z_abort_timeout(struct _timeout *to); int z_abort_timeout(struct _timeout *to);
static inline bool z_is_inactive_timeout(const struct _timeout *t) static inline bool z_is_inactive_timeout(const struct _timeout *to)
{ {
return !sys_dnode_is_linked(&t->node); return !sys_dnode_is_linked(&to->node);
} }
static inline void z_init_thread_timeout(struct _thread_base *thread_base) static inline void z_init_thread_timeout(struct _thread_base *thread_base)
@ -42,11 +42,11 @@ static inline void z_init_thread_timeout(struct _thread_base *thread_base)
z_init_timeout(&thread_base->timeout); z_init_timeout(&thread_base->timeout);
} }
extern void z_thread_timeout(struct _timeout *to); extern void z_thread_timeout(struct _timeout *timeout);
static inline void z_add_thread_timeout(struct k_thread *th, k_timeout_t ticks) static inline void z_add_thread_timeout(struct k_thread *thread, k_timeout_t ticks)
{ {
z_add_timeout(&th->base.timeout, z_thread_timeout, ticks); z_add_timeout(&thread->base.timeout, z_thread_timeout, ticks);
} }
static inline int z_abort_thread_timeout(struct k_thread *thread) static inline int z_abort_thread_timeout(struct k_thread *thread)
@ -56,22 +56,22 @@ static inline int z_abort_thread_timeout(struct k_thread *thread)
int32_t z_get_next_timeout_expiry(void); int32_t z_get_next_timeout_expiry(void);
void z_set_timeout_expiry(int32_t ticks, bool idle); void z_set_timeout_expiry(int32_t ticks, bool is_idle);
k_ticks_t z_timeout_remaining(const struct _timeout *timeout); k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
#else #else
/* Stubs when !CONFIG_SYS_CLOCK_EXISTS */ /* Stubs when !CONFIG_SYS_CLOCK_EXISTS */
#define z_init_thread_timeout(t) do {} while (false) #define z_init_thread_timeout(thread_base) do {} while (false)
#define z_abort_thread_timeout(t) (0) #define z_abort_thread_timeout(to) (0)
#define z_is_inactive_timeout(t) 0 #define z_is_inactive_timeout(to) 0
#define z_get_next_timeout_expiry() ((int32_t) K_TICKS_FOREVER) #define z_get_next_timeout_expiry() ((int32_t) K_TICKS_FOREVER)
#define z_set_timeout_expiry(t, i) do {} while (false) #define z_set_timeout_expiry(ticks, is_idle) do {} while (false)
static inline void z_add_thread_timeout(struct k_thread *th, k_timeout_t ticks) static inline void z_add_thread_timeout(struct k_thread *thread, k_timeout_t ticks)
{ {
ARG_UNUSED(th); ARG_UNUSED(thread);
ARG_UNUSED(ticks); ARG_UNUSED(ticks);
} }

View file

@ -275,12 +275,12 @@ static inline bool arch_is_in_isr(void);
* This API is part of infrastructure still under development and may * This API is part of infrastructure still under development and may
* change. * change.
* *
* @param dest Page-aligned Destination virtual address to map * @param virt Page-aligned Destination virtual address to map
* @param addr Page-aligned Source physical address to map * @param phys Page-aligned Source physical address to map
* @param size Page-aligned size of the mapped memory region in bytes * @param size Page-aligned size of the mapped memory region in bytes
* @param flags Caching, access and control flags, see K_MAP_* macros * @param flags Caching, access and control flags, see K_MAP_* macros
*/ */
void arch_mem_map(void *dest, uintptr_t addr, size_t size, uint32_t flags); void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags);
/** /**
* Remove mappings for a provided virtual address range * Remove mappings for a provided virtual address range

View file

@ -97,12 +97,12 @@ SYS_INIT(init_mbox_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS or CONFIG_OBJECT_TRACING */ #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS or CONFIG_OBJECT_TRACING */
void k_mbox_init(struct k_mbox *mbox_ptr) void k_mbox_init(struct k_mbox *mbox)
{ {
z_waitq_init(&mbox_ptr->tx_msg_queue); z_waitq_init(&mbox->tx_msg_queue);
z_waitq_init(&mbox_ptr->rx_msg_queue); z_waitq_init(&mbox->rx_msg_queue);
mbox_ptr->lock = (struct k_spinlock) {}; mbox->lock = (struct k_spinlock) {};
SYS_TRACING_OBJ_INIT(k_mbox, mbox_ptr); SYS_TRACING_OBJ_INIT(k_mbox, mbox);
} }
/** /**

View file

@ -466,16 +466,16 @@ fail:
* Miscellaneous * Miscellaneous
*/ */
size_t k_mem_region_align(uintptr_t *aligned_phys, size_t *aligned_size, size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
uintptr_t phys_addr, size_t size, size_t align) uintptr_t addr, size_t size, size_t align)
{ {
size_t addr_offset; size_t addr_offset;
/* The actual mapped region must be page-aligned. Round down the /* The actual mapped region must be page-aligned. Round down the
* physical address and pad the region size appropriately * physical address and pad the region size appropriately
*/ */
*aligned_phys = ROUND_DOWN(phys_addr, align); *aligned_addr = ROUND_DOWN(addr, align);
addr_offset = phys_addr - *aligned_phys; addr_offset = addr - *aligned_addr;
*aligned_size = ROUND_UP(size + addr_offset, align); *aligned_size = ROUND_UP(size + addr_offset, align);
return addr_offset; return addr_offset;

View file

@ -89,12 +89,12 @@ int z_impl_k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
int z_vrfy_k_msgq_alloc_init(struct k_msgq *q, size_t msg_size, int z_vrfy_k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
uint32_t max_msgs) uint32_t max_msgs)
{ {
Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(q, K_OBJ_MSGQ)); Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(msgq, K_OBJ_MSGQ));
return z_impl_k_msgq_alloc_init(q, msg_size, max_msgs); return z_impl_k_msgq_alloc_init(msgq, msg_size, max_msgs);
} }
#include <syscalls/k_msgq_alloc_init_mrsh.c> #include <syscalls/k_msgq_alloc_init_mrsh.c>
#endif #endif
@ -160,13 +160,13 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_msgq_put(struct k_msgq *q, const void *data, static inline int z_vrfy_k_msgq_put(struct k_msgq *msgq, const void *data,
k_timeout_t timeout) k_timeout_t timeout)
{ {
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ)); Z_OOPS(Z_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
Z_OOPS(Z_SYSCALL_MEMORY_READ(data, q->msg_size)); Z_OOPS(Z_SYSCALL_MEMORY_READ(data, msgq->msg_size));
return z_impl_k_msgq_put(q, data, timeout); return z_impl_k_msgq_put(msgq, data, timeout);
} }
#include <syscalls/k_msgq_put_mrsh.c> #include <syscalls/k_msgq_put_mrsh.c>
#endif #endif
@ -179,12 +179,12 @@ void z_impl_k_msgq_get_attrs(struct k_msgq *msgq, struct k_msgq_attrs *attrs)
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_msgq_get_attrs(struct k_msgq *q, static inline void z_vrfy_k_msgq_get_attrs(struct k_msgq *msgq,
struct k_msgq_attrs *attrs) struct k_msgq_attrs *attrs)
{ {
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ)); Z_OOPS(Z_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(attrs, sizeof(struct k_msgq_attrs))); Z_OOPS(Z_SYSCALL_MEMORY_WRITE(attrs, sizeof(struct k_msgq_attrs)));
z_impl_k_msgq_get_attrs(q, attrs); z_impl_k_msgq_get_attrs(msgq, attrs);
} }
#include <syscalls/k_msgq_get_attrs_mrsh.c> #include <syscalls/k_msgq_get_attrs_mrsh.c>
#endif #endif
@ -242,13 +242,13 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_msgq_get(struct k_msgq *q, void *data, static inline int z_vrfy_k_msgq_get(struct k_msgq *msgq, void *data,
k_timeout_t timeout) k_timeout_t timeout)
{ {
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ)); Z_OOPS(Z_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, q->msg_size)); Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, msgq->msg_size));
return z_impl_k_msgq_get(q, data, timeout); return z_impl_k_msgq_get(msgq, data, timeout);
} }
#include <syscalls/k_msgq_get_mrsh.c> #include <syscalls/k_msgq_get_mrsh.c>
#endif #endif
@ -275,12 +275,12 @@ int z_impl_k_msgq_peek(struct k_msgq *msgq, void *data)
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_msgq_peek(struct k_msgq *q, void *data) static inline int z_vrfy_k_msgq_peek(struct k_msgq *msgq, void *data)
{ {
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ)); Z_OOPS(Z_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, q->msg_size)); Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, msgq->msg_size));
return z_impl_k_msgq_peek(q, data); return z_impl_k_msgq_peek(msgq, data);
} }
#include <syscalls/k_msgq_peek_mrsh.c> #include <syscalls/k_msgq_peek_mrsh.c>
#endif #endif
@ -305,24 +305,24 @@ void z_impl_k_msgq_purge(struct k_msgq *msgq)
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_msgq_purge(struct k_msgq *q) static inline void z_vrfy_k_msgq_purge(struct k_msgq *msgq)
{ {
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ)); Z_OOPS(Z_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
z_impl_k_msgq_purge(q); z_impl_k_msgq_purge(msgq);
} }
#include <syscalls/k_msgq_purge_mrsh.c> #include <syscalls/k_msgq_purge_mrsh.c>
static inline uint32_t z_vrfy_k_msgq_num_free_get(struct k_msgq *q) static inline uint32_t z_vrfy_k_msgq_num_free_get(struct k_msgq *msgq)
{ {
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ)); Z_OOPS(Z_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
return z_impl_k_msgq_num_free_get(q); return z_impl_k_msgq_num_free_get(msgq);
} }
#include <syscalls/k_msgq_num_free_get_mrsh.c> #include <syscalls/k_msgq_num_free_get_mrsh.c>
static inline uint32_t z_vrfy_k_msgq_num_used_get(struct k_msgq *q) static inline uint32_t z_vrfy_k_msgq_num_used_get(struct k_msgq *msgq)
{ {
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ)); Z_OOPS(Z_SYSCALL_OBJ(msgq, K_OBJ_MSGQ));
return z_impl_k_msgq_num_used_get(q); return z_impl_k_msgq_num_used_get(msgq);
} }
#include <syscalls/k_msgq_num_used_get_mrsh.c> #include <syscalls/k_msgq_num_used_get_mrsh.c>

View file

@ -1124,7 +1124,7 @@ static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
#include <syscalls/k_thread_priority_get_mrsh.c> #include <syscalls/k_thread_priority_get_mrsh.c>
#endif #endif
void z_impl_k_thread_priority_set(k_tid_t tid, int prio) void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
{ {
/* /*
* Use NULL, since we cannot know what the entry point is (we do not * Use NULL, since we cannot know what the entry point is (we do not
@ -1133,9 +1133,9 @@ void z_impl_k_thread_priority_set(k_tid_t tid, int prio)
Z_ASSERT_VALID_PRIO(prio, NULL); Z_ASSERT_VALID_PRIO(prio, NULL);
__ASSERT(!arch_is_in_isr(), ""); __ASSERT(!arch_is_in_isr(), "");
struct k_thread *thread = (struct k_thread *)tid; struct k_thread *th = (struct k_thread *)thread;
z_thread_priority_set(thread, prio); z_thread_priority_set(th, prio);
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE

View file

@ -234,13 +234,13 @@ int z_impl_k_thread_name_set(struct k_thread *thread, const char *value)
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_thread_name_set(struct k_thread *t, const char *str) static inline int z_vrfy_k_thread_name_set(struct k_thread *thread, const char *str)
{ {
#ifdef CONFIG_THREAD_NAME #ifdef CONFIG_THREAD_NAME
char name[CONFIG_THREAD_MAX_NAME_LEN]; char name[CONFIG_THREAD_MAX_NAME_LEN];
if (t != NULL) { if (thread != NULL) {
if (Z_SYSCALL_OBJ(t, K_OBJ_THREAD) != 0) { if (Z_SYSCALL_OBJ(thread, K_OBJ_THREAD) != 0) {
return -EINVAL; return -EINVAL;
} }
} }
@ -253,7 +253,7 @@ static inline int z_vrfy_k_thread_name_set(struct k_thread *t, const char *str)
return -EFAULT; return -EFAULT;
} }
return z_impl_k_thread_name_set(t, name); return z_impl_k_thread_name_set(thread, name);
#else #else
return -ENOSYS; return -ENOSYS;
#endif /* CONFIG_THREAD_NAME */ #endif /* CONFIG_THREAD_NAME */
@ -271,13 +271,13 @@ const char *k_thread_name_get(struct k_thread *thread)
#endif /* CONFIG_THREAD_NAME */ #endif /* CONFIG_THREAD_NAME */
} }
int z_impl_k_thread_name_copy(k_tid_t thread_id, char *buf, size_t size) int z_impl_k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
{ {
#ifdef CONFIG_THREAD_NAME #ifdef CONFIG_THREAD_NAME
strncpy(buf, thread_id->name, size); strncpy(buf, thread->name, size);
return 0; return 0;
#else #else
ARG_UNUSED(thread_id); ARG_UNUSED(thread);
ARG_UNUSED(buf); ARG_UNUSED(buf);
ARG_UNUSED(size); ARG_UNUSED(size);
return -ENOSYS; return -ENOSYS;