diff --git a/arch/arm/core/thread_abort.c b/arch/arm/core/thread_abort.c index 86b76595859..3d398a53076 100644 --- a/arch/arm/core/thread_abort.c +++ b/arch/arm/core/thread_abort.c @@ -26,7 +26,7 @@ extern void _k_thread_single_abort(struct k_thread *thread); -void k_thread_abort(k_tid_t thread) +void _impl_k_thread_abort(k_tid_t thread) { unsigned int key; diff --git a/include/kernel.h b/include/kernel.h index 76b3553e371..394ccbd016d 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -620,7 +620,7 @@ extern void k_busy_wait(u32_t usec_to_wait); * * @return N/A */ -extern void k_yield(void); +__syscall void k_yield(void); /** * @brief Wake up a sleeping thread. @@ -633,7 +633,7 @@ extern void k_yield(void); * * @return N/A */ -extern void k_wakeup(k_tid_t thread); +__syscall void k_wakeup(k_tid_t thread); /** * @brief Get thread ID of the current thread. @@ -653,7 +653,7 @@ __syscall k_tid_t k_current_get(void); * @retval 0 Thread spawning canceled. * @retval -EINVAL Thread has already started executing. */ -extern int k_thread_cancel(k_tid_t thread); +__syscall int k_thread_cancel(k_tid_t thread); /** * @brief Abort a thread. @@ -669,7 +669,7 @@ extern int k_thread_cancel(k_tid_t thread); * * @return N/A */ -extern void k_thread_abort(k_tid_t thread); +__syscall void k_thread_abort(k_tid_t thread); /** @@ -681,7 +681,7 @@ extern void k_thread_abort(k_tid_t thread); * * @param thread thread to start */ -extern void k_thread_start(k_tid_t thread); +__syscall void k_thread_start(k_tid_t thread); /** * @cond INTERNAL_HIDDEN @@ -808,7 +808,7 @@ __syscall int k_thread_priority_get(k_tid_t thread); * * @return N/A */ -extern void k_thread_priority_set(k_tid_t thread, int prio); +__syscall void k_thread_priority_set(k_tid_t thread, int prio); /** * @brief Suspend a thread. @@ -824,7 +824,7 @@ extern void k_thread_priority_set(k_tid_t thread, int prio); * * @return N/A */ -extern void k_thread_suspend(k_tid_t thread); +__syscall void k_thread_suspend(k_tid_t thread); /** * @brief Resume a suspended thread. @@ -838,7 +838,7 @@ extern void k_thread_suspend(k_tid_t thread); * * @return N/A */ -extern void k_thread_resume(k_tid_t thread); +__syscall void k_thread_resume(k_tid_t thread); /** * @brief Set time-slicing period and scope. @@ -908,7 +908,7 @@ extern int k_is_in_isr(void); * @return 0 if invoked by an ISR or by a cooperative thread. * @return Non-zero if invoked by a preemptible thread. */ -extern int k_is_preempt_thread(void); +__syscall int k_is_preempt_thread(void); /** * @} end addtogroup isr_apis @@ -963,7 +963,7 @@ extern void k_sched_unlock(void); * * @return N/A */ -extern void k_thread_custom_data_set(void *value); +__syscall void k_thread_custom_data_set(void *value); /** * @brief Get current thread's custom data. @@ -972,7 +972,7 @@ extern void k_thread_custom_data_set(void *value); * * @return Current custom data value. */ -extern void *k_thread_custom_data_get(void); +__syscall void *k_thread_custom_data_get(void); /** * @} end addtogroup thread_apis diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 3e6482ee6d8..6a30042778e 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -50,18 +50,22 @@ static inline int _is_idle_thread_ptr(k_tid_t thread) } #ifdef CONFIG_MULTITHREADING -#define _ASSERT_VALID_PRIO(prio, entry_point) do { \ - __ASSERT(((prio) == K_IDLE_PRIO && _is_idle_thread(entry_point)) || \ +#define _VALID_PRIO(prio, entry_point) \ + (((prio) == K_IDLE_PRIO && _is_idle_thread(entry_point)) || \ (_is_prio_higher_or_equal((prio), \ K_LOWEST_APPLICATION_THREAD_PRIO) && \ _is_prio_lower_or_equal((prio), \ - K_HIGHEST_APPLICATION_THREAD_PRIO)), \ + K_HIGHEST_APPLICATION_THREAD_PRIO))) + +#define _ASSERT_VALID_PRIO(prio, entry_point) do { \ + __ASSERT(_VALID_PRIO((prio), (entry_point)), \ "invalid priority (%d); allowed range: %d to %d", \ (prio), \ K_LOWEST_APPLICATION_THREAD_PRIO, \ K_HIGHEST_APPLICATION_THREAD_PRIO); \ } while ((0)) #else +#define _VALID_PRIO(prio, entry_point) ((prio) == -1) #define _ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "") #endif diff --git a/kernel/sched.c b/kernel/sched.c index 0de0d60dd77..fdb0cef7405 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -278,7 +278,7 @@ u32_t _handler_k_thread_priority_get(u32_t arg1, u32_t arg2, u32_t arg3, } #endif -void k_thread_priority_set(k_tid_t tid, int prio) +void _impl_k_thread_priority_set(k_tid_t tid, int prio) { /* * Use NULL, since we cannot know what the entry point is (we do not @@ -294,6 +294,20 @@ void k_thread_priority_set(k_tid_t tid, int prio) _reschedule_threads(key); } +#ifdef CONFIG_USERSPACE +u32_t _handler_k_thread_priority_set(u32_t thread, u32_t prio, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, + void *ssf) +{ + _SYSCALL_ARG2; + + _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf); + _SYSCALL_VERIFY(_VALID_PRIO(prio, NULL), ssf); + _impl_k_thread_priority_set((k_tid_t)thread, prio); + return 0; +} +#endif + /* * Interrupts must be locked when calling this function. * @@ -320,7 +334,7 @@ void _move_thread_to_end_of_prio_q(struct k_thread *thread) #endif } -void k_yield(void) +void _impl_k_yield(void) { __ASSERT(!_is_in_isr(), ""); @@ -338,6 +352,17 @@ void k_yield(void) } } +#ifdef CONFIG_USERSPACE +u32_t _handler_k_yield(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, void *ssf) +{ + _SYSCALL_ARG0; + + _impl_k_yield(); + return 0; +} +#endif + void _impl_k_sleep(s32_t duration) { #ifdef CONFIG_MULTITHREADING @@ -381,7 +406,7 @@ u32_t _handler_k_sleep(u32_t arg1, u32_t arg2, u32_t arg3, } #endif -void k_wakeup(k_tid_t thread) +void _impl_k_wakeup(k_tid_t thread) { int key = irq_lock(); @@ -405,6 +430,18 @@ void k_wakeup(k_tid_t thread) } } +#ifdef CONFIG_USERSPACE +u32_t _handler_k_wakeup(u32_t thread, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, void *ssf) +{ + _SYSCALL_ARG1; + + _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf); + _impl_k_wakeup((k_tid_t)thread); + return 0; +} +#endif + k_tid_t _impl_k_current_get(void) { return _current; @@ -483,7 +520,18 @@ void _update_time_slice_before_swap(void) } #endif /* CONFIG_TIMESLICING */ -int k_is_preempt_thread(void) +int _impl_k_is_preempt_thread(void) { return !_is_in_isr() && _is_preempt(_current); } + +#ifdef CONFIG_USERSPACE +u32_t _handler_k_is_preempt_thread(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, + void *ssf) +{ + _SYSCALL_ARG0; + + return _impl_k_is_preempt_thread(); +} +#endif diff --git a/kernel/thread.c b/kernel/thread.c index 0cbace6aefa..a933fc7a849 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -23,6 +23,7 @@ #include #include #include +#include extern struct _static_thread_data _static_thread_data_list_start[]; extern struct _static_thread_data _static_thread_data_list_end[]; @@ -112,17 +113,38 @@ int saved_always_on = k_enable_sys_clock_always_on(); } #ifdef CONFIG_THREAD_CUSTOM_DATA - -void k_thread_custom_data_set(void *value) +void _impl_k_thread_custom_data_set(void *value) { _current->custom_data = value; } -void *k_thread_custom_data_get(void) +#ifdef CONFIG_USERSPACE +u32_t _handler_k_thread_custom_data_set(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, + void *ssf) +{ + _SYSCALL_ARG1; + + _impl_k_thread_custom_data_set((void *)arg1); + return 0; +} +#endif + +void *_impl_k_thread_custom_data_get(void) { return _current->custom_data; } +#ifdef CONFIG_USERSPACE +u32_t _handler_k_thread_custom_data_get(u32_t arg1, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, + void *ssf) +{ + _SYSCALL_ARG0; + + return (u32_t)_impl_k_thread_custom_data_get(); +} +#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_THREAD_CUSTOM_DATA */ #if defined(CONFIG_THREAD_MONITOR) @@ -214,7 +236,7 @@ FUNC_NORETURN void _thread_entry(k_thread_entry_t entry, } #ifdef CONFIG_MULTITHREADING -void k_thread_start(struct k_thread *thread) +void _impl_k_thread_start(struct k_thread *thread) { int key = irq_lock(); /* protect kernel queues */ @@ -235,6 +257,18 @@ void k_thread_start(struct k_thread *thread) irq_unlock(key); } + +#ifdef CONFIG_USERSPACE +u32_t _handler_k_thread_start(u32_t thread, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, void *ssf) +{ + _SYSCALL_ARG1; + + _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf); + _impl_k_thread_start((struct k_thread *)thread); + return 0; +} +#endif #endif #ifdef CONFIG_MULTITHREADING @@ -292,7 +326,7 @@ k_tid_t k_thread_create(struct k_thread *new_thread, } #endif -int k_thread_cancel(k_tid_t tid) +int _impl_k_thread_cancel(k_tid_t tid) { struct k_thread *thread = tid; @@ -312,6 +346,17 @@ int k_thread_cancel(k_tid_t tid) return 0; } +#ifdef CONFIG_USERSPACE +u32_t _handler_k_thread_cancel(u32_t thread, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, void *ssf) +{ + _SYSCALL_ARG1; + + _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf); + return _impl_k_thread_cancel((struct k_thread *)thread); +} +#endif + static inline int is_in_any_group(struct _static_thread_data *thread_data, u32_t groups) { @@ -369,7 +414,7 @@ void _k_thread_single_suspend(struct k_thread *thread) _mark_thread_as_suspended(thread); } -void k_thread_suspend(struct k_thread *thread) +void _impl_k_thread_suspend(struct k_thread *thread) { unsigned int key = irq_lock(); @@ -382,6 +427,18 @@ void k_thread_suspend(struct k_thread *thread) } } +#ifdef CONFIG_USERSPACE +u32_t _handler_k_thread_suspend(u32_t thread, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, void *ssf) +{ + _SYSCALL_ARG1; + + _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf); + _impl_k_thread_suspend((k_tid_t)thread); + return 0; +} +#endif + void _k_thread_single_resume(struct k_thread *thread) { _mark_thread_as_not_suspended(thread); @@ -391,7 +448,7 @@ void _k_thread_single_resume(struct k_thread *thread) } } -void k_thread_resume(struct k_thread *thread) +void _impl_k_thread_resume(struct k_thread *thread) { unsigned int key = irq_lock(); @@ -400,6 +457,18 @@ void k_thread_resume(struct k_thread *thread) _reschedule_threads(key); } +#ifdef CONFIG_USERSPACE +u32_t _handler_k_thread_resume(u32_t thread, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, void *ssf) +{ + _SYSCALL_ARG1; + + _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf); + _impl_k_thread_resume((k_tid_t)thread); + return 0; +} +#endif + void _k_thread_single_abort(struct k_thread *thread) { if (thread->fn_abort != NULL) { diff --git a/kernel/thread_abort.c b/kernel/thread_abort.c index 4ed44563db9..97170e3c6aa 100644 --- a/kernel/thread_abort.c +++ b/kernel/thread_abort.c @@ -19,11 +19,12 @@ #include #include #include +#include extern void _k_thread_single_abort(struct k_thread *thread); #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT) -void k_thread_abort(k_tid_t thread) +void _impl_k_thread_abort(k_tid_t thread) { unsigned int key; @@ -44,3 +45,16 @@ void k_thread_abort(k_tid_t thread) _reschedule_threads(key); } #endif + +#ifdef CONFIG_USERSPACE +u32_t _handler_k_thread_abort(u32_t thread_p, u32_t arg2, u32_t arg3, + u32_t arg4, u32_t arg5, u32_t arg6, void *ssf) +{ + struct k_thread *thread = (struct k_thread *)thread_p; + _SYSCALL_IS_OBJ(thread, K_OBJ_THREAD, 0, ssf); + _SYSCALL_VERIFY(!(thread->base.user_options & K_ESSENTIAL), ssf); + + _impl_k_thread_abort((struct k_thread *)thread); + return 0; +} +#endif