From 456c6daa9f0c706131cff4eedcf3290fdfcf98e9 Mon Sep 17 00:00:00 2001 From: Benjamin Walsh Date: Fri, 2 Sep 2016 18:55:39 -0400 Subject: [PATCH] unified: initial unified kernel implementation Summary of what this includes: initialization: Copy from nano_init.c, with the following changes: - the main thread is the continuation of the init thread, but an idle thread is created as well - _main() initializes threads in groups and starts the EXE group - the ready queues are initialized - the main thread is marked as non-essential once the system init is done - a weak main() symbol is provided if the application does not provide a main() function scheduler: Not an exhaustive list, but basically provide primitives for: - adding/removing a thread to/from a wait queue - adding/removing a thread to/from the ready queue - marking thread as ready - locking/unlocking the scheduler - instead of locking interrupts - getting/setting thread priority - checking what state (coop/preempt) a thread is currenlty running in - rescheduling threads - finding what thread is the next to run - yielding/sleeping/aborting sleep - finding the current thread threads: - Add operationns on threads, such as creating and starting them. standardized handling of kernel object return codes: - Kernel objects now cause _Swap() to return the following values: 0 => operation successful -EAGAIN => operation timed out -Exxxxx => operation failed for another reason - The thread's swap_data field can be used to return any additional information required to complete the operation, such as the actual result of a successful operation. timeouts: - same as nano timeouts, renamed to simply 'timeouts' - the kernel is still tick-based, but objects take timeout values in ms for forward compatibility with a tickless kernel. semaphores: - Port of the nanokernel semaphores, which have the same basic behaviour as the microkernel ones. Semaphore groups are not yet implemented. - These semaphores are enhanced in that they accept an initial count and a count limit. This allows configuring them as binary semaphores, and also provisioning them without having to "give" the semaphore multiple times before using them. mutexes: - Straight port of the microkernel mutexes. An init function is added to allow defining them at runtime. pipes: - straight port timers: - amalgamation of nano and micro timers, with all functionalities intact. events: - re-implementation, using semaphores and workqueues. mailboxes: - straight port message queues: - straight port of microkernel FIFOs memory maps: - straight port workqueues: - Basically, have all APIs follow the k_ naming rule, and use the _timeout subsystem from the unified kernel directory, and not the _nano_timeout one. stacks: - Port of the nanokernel stacks. They can now have multiple threads pending on them and threads can wait with a timeout. LIFOs: - Straight port of the nanokernel LIFOs. FIFOs: - Straight port of the nanokernel FIFOs. Work by: Dmitriy Korovkin Peter Mitsis Allan Stephens Benjamin Walsh Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6 Signed-off-by: Benjamin Walsh --- include/kernel.h | 1050 +++++++++++++++++++++++ include/legacy.h | 702 +++++++++++++++ kernel/configs/unified.config | 3 + kernel/unified/Kconfig | 248 ++++++ kernel/unified/Makefile | 43 + kernel/unified/atomic_c.c | 1 + kernel/unified/compiler_stack_protect.c | 1 + kernel/unified/device.c | 1 + kernel/unified/errno.c | 7 + kernel/unified/event.c | 78 ++ kernel/unified/event_logger.c | 1 + kernel/unified/fifo.c | 133 +++ kernel/unified/idle.c | 1 + kernel/unified/include/gen_offset.h | 94 ++ kernel/unified/include/nano_internal.h | 107 +++ kernel/unified/include/nano_offsets.h | 66 ++ kernel/unified/include/sched.h | 350 ++++++++ kernel/unified/include/timeout_q.h | 295 +++++++ kernel/unified/include/wait_q.h | 134 +++ kernel/unified/init.c | 383 +++++++++ kernel/unified/int_latency_bench.c | 1 + kernel/unified/kernel_event_logger.c | 1 + kernel/unified/legacy/Makefile | 9 + kernel/unified/legacy/timer_legacy.c | 37 + kernel/unified/lifo.c | 88 ++ kernel/unified/mailbox.c | 651 ++++++++++++++ kernel/unified/mem_map.c | 166 ++++ kernel/unified/mem_pool.c | 55 ++ kernel/unified/msg_q.c | 216 +++++ kernel/unified/mutex.c | 242 ++++++ kernel/unified/pipes.c | 693 +++++++++++++++ kernel/unified/ring_buffer.c | 1 + kernel/unified/sched.c | 282 ++++++ kernel/unified/sem.c | 93 ++ kernel/unified/stack.c | 101 +++ kernel/unified/sys_clock.c | 201 +++++ kernel/unified/thread.c | 459 ++++++++++ kernel/unified/thread_abort.c | 56 ++ kernel/unified/timer.c | 326 +++++++ kernel/unified/version.c | 1 + kernel/unified/work_q.c | 171 ++++ scripts/sysgen | 461 +++++++--- 42 files changed, 7888 insertions(+), 121 deletions(-) create mode 100644 include/kernel.h create mode 100644 include/legacy.h create mode 100644 kernel/configs/unified.config create mode 100644 kernel/unified/Kconfig create mode 100644 kernel/unified/Makefile create mode 100644 kernel/unified/atomic_c.c create mode 100644 kernel/unified/compiler_stack_protect.c create mode 100644 kernel/unified/device.c create mode 100644 kernel/unified/errno.c create mode 100644 kernel/unified/event.c create mode 100644 kernel/unified/event_logger.c create mode 100644 kernel/unified/fifo.c create mode 100644 kernel/unified/idle.c create mode 100644 kernel/unified/include/gen_offset.h create mode 100644 kernel/unified/include/nano_internal.h create mode 100644 kernel/unified/include/nano_offsets.h create mode 100644 kernel/unified/include/sched.h create mode 100644 kernel/unified/include/timeout_q.h create mode 100644 kernel/unified/include/wait_q.h create mode 100644 kernel/unified/init.c create mode 100644 kernel/unified/int_latency_bench.c create mode 100644 kernel/unified/kernel_event_logger.c create mode 100644 kernel/unified/legacy/Makefile create mode 100644 kernel/unified/legacy/timer_legacy.c create mode 100644 kernel/unified/lifo.c create mode 100644 kernel/unified/mailbox.c create mode 100644 kernel/unified/mem_map.c create mode 100644 kernel/unified/mem_pool.c create mode 100644 kernel/unified/msg_q.c create mode 100644 kernel/unified/mutex.c create mode 100644 kernel/unified/pipes.c create mode 100644 kernel/unified/ring_buffer.c create mode 100644 kernel/unified/sched.c create mode 100644 kernel/unified/sem.c create mode 100644 kernel/unified/stack.c create mode 100644 kernel/unified/sys_clock.c create mode 100644 kernel/unified/thread.c create mode 100644 kernel/unified/thread_abort.c create mode 100644 kernel/unified/timer.c create mode 100644 kernel/unified/version.c create mode 100644 kernel/unified/work_q.c diff --git a/include/kernel.h b/include/kernel.h new file mode 100644 index 00000000000..74dcd2f0324 --- /dev/null +++ b/include/kernel.h @@ -0,0 +1,1050 @@ +/* + * Copyright (c) 2016, Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * + * @brief Public kernel APIs. + */ + +#ifndef _kernel__h_ +#define _kernel__h_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef CONFIG_KERNEL_V2_DEBUG +#define K_DEBUG(fmt, ...) printk("[%s] " fmt, __func__, ##__VA_ARGS__) +#else +#define K_DEBUG(fmt, ...) +#endif + +#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x))) +#define K_PRIO_PREEMPT(x) (x) + +#define K_FOREVER (-1) +#define K_NO_WAIT 0 + +#define K_ANY NULL +#define K_END NULL + +#define K_OBJ(name, size) char name[size] __aligned(4) + +#if CONFIG_NUM_COOP_PRIORITIES > 0 +#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES) +#else +#define K_HIGHEST_THREAD_PRIO 0 +#endif + +#if CONFIG_NUM_PREEMPT_PRIORITIES > 0 +#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES +#else +#define K_LOWEST_THREAD_PRIO -1 +#endif + +#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO) +#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1) + +typedef sys_dlist_t _wait_q_t; + +#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS +#define _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(type) struct type *__next +#define _DEBUG_TRACING_KERNEL_OBJECTS_INIT .__next = NULL, +#else +#define _DEBUG_TRACING_KERNEL_OBJECTS_INIT +#define _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(type) +#endif + +#define k_thread tcs +struct tcs; +struct k_mutex; +struct k_sem; +struct k_event; +struct k_msgq; +struct k_mbox; +struct k_pipe; +struct k_fifo; +struct k_lifo; +struct k_stack; +struct k_mem_map; +struct k_mem_pool; +struct k_timer; + +typedef struct tcs *k_tid_t; +typedef struct k_mem_pool *k_mem_pool_t; + +/* threads/scheduler/execution contexts */ + +enum execution_context_types { + K_ISR = 0, + K_COOP_THREAD, + K_PREEMPT_THREAD, +}; + +struct k_thread_config { + char *stack; + unsigned stack_size; + unsigned prio; +}; + +typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3); +extern k_tid_t k_thread_spawn(char *stack, unsigned stack_size, + void (*entry)(void *, void *, void*), + void *p1, void *p2, void *p3, + int32_t prio, uint32_t options, int32_t delay); + +extern void k_sleep(int32_t duration); +extern void k_busy_wait(uint32_t usec_to_wait); +extern void k_yield(void); +extern void k_wakeup(k_tid_t thread); +extern k_tid_t k_current_get(void); +extern k_tid_t k_current_get(void); +extern int k_current_priority_get(void); +extern int k_thread_cancel(k_tid_t thread); + +extern void k_thread_abort(k_tid_t thread); + +#define K_THREAD_GROUP_EXE 0x1 +#define K_THREAD_GROUP_SYS 0x2 +#define K_THREAD_GROUP_FPU 0x4 + +/* XXX - doesn't work because CONFIG_ARCH is a string */ +#if 0 +/* arch-specific groups */ +#if CONFIG_ARCH == "x86" +#define K_THREAD_GROUP_SSE 0x4 +#endif +#endif + +#ifdef CONFIG_NANO_TIMEOUTS +#define _THREAD_TIMEOUT_INIT(obj) \ + (obj).nano_timeout = { \ + .node = { {0}, {0} }, \ + .tcs = NULL, \ + .wait_q = NULL, \ + .delta_ticks_from_prev = -1, \ + }, +#else +#define _THREAD_TIMEOUT_INIT(obj) +#endif + +#ifdef CONFIG_ERRNO +#define _THREAD_ERRNO_INIT(obj) (obj).errno_var = 0, +#else +#define _THREAD_ERRNO_INIT(obj) +#endif + +struct k_thread_static_init { + uint32_t init_groups; + int init_prio; + void (*init_entry)(void *, void *, void *); + void *init_p1; + void *init_p2; + void *init_p3; + void (*init_abort)(void); + union { + char *init_stack; + struct k_thread *thread; + }; + unsigned int init_stack_size; +}; + +#define K_THREAD_INITIALIZER(stack, stack_size, \ + entry, p1, p2, p3, \ + abort, prio, groups) \ + { \ + .init_groups = (groups), \ + .init_prio = (prio), \ + .init_entry = entry, \ + .init_p1 = (void *)p1, \ + .init_p2 = (void *)p2, \ + .init_p3 = (void *)p3, \ + .init_abort = abort, \ + .init_stack = (stack), \ + .init_stack_size = (stack_size), \ + } + +/* + * Define thread initializer object and initialize it + * NOTE: For thread group functions thread initializers must be organized + * in array and thus should not have gaps between them. + * On x86 by default compiler aligns them by 32 byte boundary. To prevent + * this 32-bit alignment in specified here. + * k_thread_static_init structure sise needs to be kept 32-bit aligned as well + */ +#define K_THREAD_OBJ_DEFINE(name, stack_size, \ + entry, p1, p2, p3, \ + abort, prio, groups) \ + extern void entry(void *, void *, void *); \ + char __noinit __stack _k_thread_obj_##name[stack_size]; \ + struct k_thread_static_init _k_thread_init_##name __aligned(4) \ + __in_section(_k_task_list, private, task) = \ + K_THREAD_INITIALIZER(_k_thread_obj_##name, stack_size, \ + entry, p1, p2, p3, abort, prio, groups) + +#define K_THREAD_DEFINE(name, stack_size, entry, p1, p2, p3, \ + abort, prio, groups) \ + K_THREAD_OBJ_DEFINE(name, stack_size, entry, p1, p2, p3, \ + abort, prio, groups); \ + k_tid_t const name = (k_tid_t)_k_thread_obj_##name + +/* extern int k_thread_prio_get(k_tid_t thread); in sched.h */ +extern void k_thread_priority_set(k_tid_t thread, int prio); + +#if 0 +extern int k_thread_suspend(k_tid_t thread); +extern int k_thread_resume(k_tid_t thread); +extern int k_thread_entry_set(k_tid_t thread, + void (*entry)(void*, void*, void*); +extern int k_thread_abort_handler_set(k_tid_t thread, + void (*handler)(void)); +#endif + +extern void k_sched_time_slice_set(int32_t slice, int prio); +extern int k_workload_get(void); +extern void k_workload_time_slice_set(int32_t slice); + +extern int k_am_in_isr(void); + +extern void k_thread_custom_data_set(void *value); +extern void *k_thread_custom_data_get(void); + +/** + * kernel timing + */ + +/* timeouts */ + +struct _timeout; +typedef void (*_timeout_func_t)(struct _timeout *t); + +struct _timeout { + sys_dlist_t node; + struct tcs *tcs; + sys_dlist_t *wait_q; + int32_t delta_ticks_from_prev; + _timeout_func_t func; +}; + +/* timers */ + +struct k_timer { + /* + * _timeout structure must be first here if we want to use + * dynamic timer allocation. timeout.node is used in the double-linked + * list of free timers + */ + struct _timeout timeout; + + /* wait queue for the threads waiting on this timer */ + _wait_q_t wait_q; + + /* runs in ISR context */ + void (*handler)(void *); + void *handler_arg; + + /* runs in the context of the thread that calls k_timer_stop() */ + void (*stop_handler)(void *); + void *stop_handler_arg; + + /* timer period */ + int32_t period; + + /* user supplied data pointer returned to the thread*/ + void *user_data; + + /* user supplied data pointer */ + void *user_data_internal; + + _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_timer); +}; + +#define K_TIMER_INITIALIZER(obj) \ + { \ + .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ + _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ + } + +#define K_TIMER_DEFINE(name) \ + struct k_timer name = K_TIMER_INITIALIZER(name) + +extern void k_timer_init(struct k_timer *timer, void *data); +extern struct k_timer *k_timer_alloc(void); +extern void k_timer_free(struct k_timer *timer); +extern void k_timer_start(struct k_timer *timer, + int32_t duration, int32_t period, + void (*handler)(void *), void *handler_arg, + void (*stop_handler)(void *), void *stop_handler_arg); +extern void k_timer_restart(struct k_timer *timer, int32_t duration, + int32_t period); +extern void k_timer_stop(struct k_timer *timer); +extern int k_timer_test(struct k_timer *timer, void **data, int wait); +extern int32_t k_timer_remaining_get(struct k_timer *timer); +extern int64_t k_uptime_get(void); +extern int64_t k_uptime_delta(int64_t *reftime); +extern bool k_timer_pool_is_empty(void); + +extern uint32_t k_cycle_get_32(void); + +#if (CONFIG_NUM_DYNAMIC_TIMERS > 0) +extern void _k_dyamic_timer_init(void); +#else +#define _k_dyamic_timer_init() +#endif + +/** + * data transfers (basic) + */ + +/* fifos */ + +struct k_fifo { + _wait_q_t wait_q; + sys_slist_t data_q; + + _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_fifo); +}; + +extern void k_fifo_init(struct k_fifo *fifo); +extern void k_fifo_put(struct k_fifo *fifo, void *data); +extern void k_fifo_put_list(struct k_fifo *fifo, void *head, void *tail); +extern void k_fifo_put_slist(struct k_fifo *fifo, sys_slist_t *list); +extern void *k_fifo_get(struct k_fifo *fifo, int32_t timeout); + +#define K_FIFO_INITIALIZER(obj) \ + { \ + .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ + .data_q = SYS_DLIST_STATIC_INIT(&obj.data_q), \ + _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ + } + +#define K_FIFO_DEFINE(name) \ + struct k_fifo _k_fifo_obj_##name = \ + K_FIFO_INITIALIZER(_k_fifo_obj_##name); \ + struct k_fifo * const name = &_k_fifo_obj_##name + +/* lifos */ + +struct k_lifo { + _wait_q_t wait_q; + void *list; + + _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_lifo); +}; + +extern void k_lifo_init(struct k_lifo *lifo); +extern void k_lifo_put(struct k_lifo *lifo, void *data); +extern void *k_lifo_get(struct k_lifo *lifo, int32_t timeout); + +#define K_LIFO_INITIALIZER(obj) \ + { \ + .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ + .list = NULL, \ + _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ + } + +#define K_LIFO_DEFINE(name) \ + struct k_lifo _k_lifo_obj_##name = \ + K_LIFO_INITIALIZER(_k_lifo_obj_##name); \ + struct k_lifo * const name = &_k_lifo_obj_##name + +/* stacks */ + +struct k_stack { + _wait_q_t wait_q; + uint32_t *base, *next, *top; + + _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_stack); +}; + +extern void k_stack_init(struct k_stack *stack, int num_entries); +extern void k_stack_init_with_buffer(struct k_stack *stack, int num_entries, + uint32_t *buffer); +extern void k_stack_push(struct k_stack *stack, uint32_t data); +extern int k_stack_pop(struct k_stack *stack, uint32_t *data, int32_t timeout); + +#define K_STACK_INITIALIZER(obj, stack_num_entries, stack_buffer) \ + { \ + .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ + .base = stack_buffer, \ + .next = stack_buffer, \ + .top = stack_buffer + stack_num_entries, \ + _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ + } + +#define K_STACK_DEFINE(name, stack_num_entries) \ + uint32_t __noinit _k_stack_buf_##name[stack_num_entries]; \ + struct k_stack _k_stack_obj_##name = \ + K_STACK_INITIALIZER(_k_stack_obj_##name, stack_num_entries, \ + _k_stack_buf_##name); \ + struct k_stack * const name = &_k_stack_obj_##name + +#define K_STACK_SIZE(stack_num_entries) \ + (sizeof(struct k_stack) + (stack_num_entries * sizeof(uint32_t))) + +/** + * workqueues + */ + +struct k_work; + +typedef void (*k_work_handler_t)(struct k_work *); + +/** + * A workqueue is a fiber that executes @ref k_work items that are + * queued to it. This is useful for drivers which need to schedule + * execution of code which might sleep from ISR context. The actual + * fiber identifier is not stored in the structure in order to save + * space. + */ +struct k_work_q { + struct k_fifo fifo; +}; + +/** + * @brief Work flags. + */ +enum { + K_WORK_STATE_IDLE, /* Work item idle state */ +}; + +/** + * @brief An item which can be scheduled on a @ref k_work_q. + */ +struct k_work { + void *_reserved; /* Used by k_fifo implementation. */ + k_work_handler_t handler; + atomic_t flags[1]; +}; + +/** + * @brief Statically initialize work item + */ +#define K_WORK_INITIALIZER(work_handler) \ + { \ + ._reserved = NULL, \ + .handler = work_handler, \ + .flags = { 1 } \ + } + +/** + * @brief Dynamically initialize work item + */ +static inline void k_work_init(struct k_work *work, k_work_handler_t handler) +{ + atomic_set_bit(work->flags, K_WORK_STATE_IDLE); + work->handler = handler; +} + +/** + * @brief Submit a work item to a workqueue. + */ +static inline void k_work_submit_to_queue(struct k_work_q *work_q, + struct k_work *work) +{ + if (!atomic_test_and_clear_bit(work->flags, K_WORK_STATE_IDLE)) { + __ASSERT_NO_MSG(0); + } else { + k_fifo_put(&work_q->fifo, work); + } +} + +/** + * @brief Start a new workqueue. This routine can be called from either + * fiber or task context. + */ +extern void k_work_q_start(struct k_work_q *work_q, + const struct k_thread_config *config); + +#if defined(CONFIG_NANO_TIMEOUTS) + + /* + * @brief An item which can be scheduled on a @ref k_work_q with a + * delay. + */ +struct k_delayed_work { + struct k_work work; + struct _timeout timeout; + struct k_work_q *work_q; +}; + +/** + * @brief Initialize delayed work + */ +void k_delayed_work_init(struct k_delayed_work *work, + k_work_handler_t handler); + +/** + * @brief Submit a delayed work item to a workqueue. + * + * This procedure schedules a work item to be processed after a delay. + * Once the delay has passed, the work item is submitted to the work queue: + * at this point, it is no longer possible to cancel it. Once the work item's + * handler is about to be executed, the work is considered complete and can be + * resubmitted. + * + * Care must be taken if the handler blocks or yield as there is no implicit + * mutual exclusion mechanism. Such usage is not recommended and if necessary, + * it should be explicitly done between the submitter and the handler. + * + * @param work_q to schedule the work item + * @param work Delayed work item + * @param ticks Ticks to wait before scheduling the work item + * + * @return 0 in case of success or negative value in case of error. + */ +int k_delayed_work_submit_to_queue(struct k_work_q *work_q, + struct k_delayed_work *work, + int32_t ticks); + +/** + * @brief Cancel a delayed work item + * + * This procedure cancels a scheduled work item. If the work has been completed + * or is idle, this will do nothing. The only case where this can fail is when + * the work has been submitted to the work queue, but the handler has not run + * yet. + * + * @param work Delayed work item to be canceled + * + * @return 0 in case of success or negative value in case of error. + */ +int k_delayed_work_cancel(struct k_delayed_work *work); + +#endif /* CONFIG_NANO_TIMEOUTS */ + +#if defined(CONFIG_SYSTEM_WORKQUEUE) + +extern struct k_work_q k_sys_work_q; + +/* + * @brief Submit a work item to the system workqueue. + * + * @ref k_work_submit_to_queue + * + * When using the system workqueue it is not recommended to block or yield + * on the handler since its fiber is shared system wide it may cause + * unexpected behavior. + */ +static inline void k_work_submit(struct k_work *work) +{ + k_work_submit_to_queue(&k_sys_work_q, work); +} + +#if defined(CONFIG_NANO_TIMEOUTS) +/* + * @brief Submit a delayed work item to the system workqueue. + * + * @ref k_delayed_work_submit_to_queue + * + * When using the system workqueue it is not recommended to block or yield + * on the handler since its fiber is shared system wide it may cause + * unexpected behavior. + */ +static inline int k_delayed_work_submit(struct k_delayed_work *work, + int ticks) +{ + return k_delayed_work_submit_to_queue(&k_sys_work_q, work, ticks); +} + +#endif /* CONFIG_NANO_TIMEOUTS */ +#endif /* CONFIG_SYSTEM_WORKQUEUE */ + +/** + * synchronization + */ + +/* mutexes */ + +struct k_mutex { + _wait_q_t wait_q; + struct tcs *owner; + uint32_t lock_count; + int owner_orig_prio; +#ifdef CONFIG_OBJECT_MONITOR + int num_lock_state_changes; + int num_conflicts; +#endif + + _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_mutex); +}; + +#ifdef CONFIG_OBJECT_MONITOR +#define _MUTEX_INIT_OBJECT_MONITOR \ + .num_lock_state_changes = 0, .num_conflicts = 0, +#else +#define _MUTEX_INIT_OBJECT_MONITOR +#endif + +#define K_MUTEX_INITIALIZER(obj) \ + { \ + .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ + .owner = NULL, \ + .lock_count = 0, \ + .owner_orig_prio = K_LOWEST_THREAD_PRIO, \ + _MUTEX_INIT_OBJECT_MONITOR \ + _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ + } + +#define K_MUTEX_DEFINE(name) \ + struct k_mutex name = K_MUTEX_INITIALIZER(name) + +extern void k_mutex_init(struct k_mutex *mutex); +extern int k_mutex_lock(struct k_mutex *mutex, int32_t timeout); +extern void k_mutex_unlock(struct k_mutex *mutex); + +/* semaphores */ + +struct k_sem { + _wait_q_t wait_q; + unsigned int count; + unsigned int limit; + + _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_sem); +}; + +extern void k_sem_init(struct k_sem *sem, unsigned int initial_count, + unsigned int limit); +extern int k_sem_take(struct k_sem *sem, int32_t timeout); +extern void k_sem_give(struct k_sem *sem); + +static inline int k_sem_reset(struct k_sem *sem) +{ + sem->count = 0; + + return 0; +} + +static inline int k_sem_count_get(struct k_sem *sem) +{ + return sem->count; +} + +extern struct k_sem *k_sem_group_take(struct k_sem **sem_array, + int32_t timeout); +extern void k_sem_group_give(struct k_sem **sem_array); +extern void k_sem_group_reset(struct k_sem **sem_array); + +#define K_SEM_INITIALIZER(obj, initial_count, count_limit) \ + { \ + .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ + .count = initial_count, \ + .limit = count_limit, \ + _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ + } + +#define K_SEM_DEFINE(name, initial_count, count_limit) \ + struct k_sem name = \ + K_SEM_INITIALIZER(name, initial_count, count_limit) + +/* events */ + +#define K_EVT_DEFAULT NULL +#define K_EVT_IGNORE ((void *)(-1)) + +typedef int (*k_event_handler_t)(struct k_event *); + +struct k_event { + k_event_handler_t handler; + atomic_t send_count; + struct k_work work_item; + struct k_sem sem; + + _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_event); +}; + +extern void _k_event_deliver(struct k_work *work); + +#define K_EVENT_INITIALIZER(obj, event_handler) \ + { \ + .handler = (k_event_handler_t)event_handler, \ + .send_count = ATOMIC_INIT(0), \ + .work_item = K_WORK_INITIALIZER(_k_event_deliver), \ + .sem = K_SEM_INITIALIZER(obj.sem, 0, 1), \ + _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ + } + +#define K_EVENT_DEFINE(name, event_handler) \ + struct k_event name \ + __in_section(_k_event_list, event, name) = \ + K_EVENT_INITIALIZER(name, event_handler) + +extern void k_event_init(struct k_event *event, k_event_handler_t handler); +extern int k_event_recv(struct k_event *event, int32_t timeout); +extern void k_event_send(struct k_event *event); + +/** + * data transfers (complex) + */ + +/* message queues */ + +struct k_msgq { + _wait_q_t wait_q; + uint32_t msg_size; + uint32_t max_msgs; + char *buffer_start; + char *buffer_end; + char *read_ptr; + char *write_ptr; + uint32_t used_msgs; + + _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_msgq); +}; + +#define K_MSGQ_INITIALIZER(obj, q_depth, q_width, q_buffer) \ + { \ + .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ + .max_msgs = q_depth, \ + .msg_size = q_width, \ + .buffer_start = q_buffer, \ + .buffer_end = q_buffer + (q_depth * q_width), \ + .read_ptr = q_buffer, \ + .write_ptr = q_buffer, \ + .used_msgs = 0, \ + _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ + } + +#define K_MSGQ_DEFINE(name, q_depth, q_width) \ + static char __noinit _k_fifo_buf_##name[(q_depth) * (q_width)]; \ + struct k_msgq name = \ + K_MSGQ_INITIALIZER(name, q_depth, q_width, _k_fifo_buf_##name) + +#define K_MSGQ_SIZE(q_depth, q_width) \ + ((sizeof(struct k_msgq)) + ((q_width) * (q_depth))) + +void k_msgq_init(struct k_msgq *q, uint32_t msg_size, uint32_t max_msgs, + char *buffer); +extern int k_msgq_put(struct k_msgq *q, void *data, int32_t timeout); +extern int k_msgq_get(struct k_msgq *q, void *data, int32_t timeout); +extern void k_msgq_purge(struct k_msgq *q); + +static inline int k_msgq_num_used_get(struct k_msgq *q) +{ + return q->used_msgs; +} + +struct k_mem_block { + k_mem_pool_t pool_id; + void *addr_in_pool; + void *data; + uint32_t req_size; +}; + +/* mailboxes */ + +struct k_mbox_msg { + /** internal use only - needed for legacy API support */ + uint32_t _mailbox; + /** size of message (in bytes) */ + uint32_t size; + /** application-defined information value */ + uint32_t info; + /** sender's message data buffer */ + void *tx_data; + /** internal use only - needed for legacy API support */ + void *_rx_data; + /** message data block descriptor */ + struct k_mem_block tx_block; + /** source thread id */ + k_tid_t rx_source_thread; + /** target thread id */ + k_tid_t tx_target_thread; + /** internal use only - thread waiting on send (may be a dummy) */ + k_tid_t _syncing_thread; +#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) + /** internal use only - semaphore used during asynchronous send */ + struct k_sem *_async_sem; +#endif +}; + +struct k_mbox { + _wait_q_t tx_msg_queue; + _wait_q_t rx_msg_queue; + + _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_mbox); +}; + +#define K_MBOX_INITIALIZER(obj) \ + { \ + .tx_msg_queue = SYS_DLIST_STATIC_INIT(&obj.tx_msg_queue), \ + .rx_msg_queue = SYS_DLIST_STATIC_INIT(&obj.rx_msg_queue), \ + _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ + } + +#define K_MBOX_DEFINE(name) \ + struct k_mbox name = \ + K_MBOX_INITIALIZER(name) \ + +#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) +extern void _k_mbox_init(void); +#else +#define _k_mbox_init() +#endif + +extern void k_mbox_init(struct k_mbox *mbox); + +extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *msg, + int32_t timeout); +extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *msg, + struct k_sem *sem); + +extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *msg, + void *buffer, int32_t timeout); +extern void k_mbox_data_get(struct k_mbox_msg *msg, void *buffer); +extern int k_mbox_data_block_get(struct k_mbox_msg *msg, k_mem_pool_t pool, + struct k_mem_block *block, int32_t timeout); + +/* pipes */ + +struct k_pipe { + unsigned char *buffer; /* Pipe buffer: may be NULL */ + size_t size; /* Buffer size */ + size_t bytes_used; /* # bytes used in buffer */ + size_t read_index; /* Where in buffer to read from */ + size_t write_index; /* Where in buffer to write */ + + struct { + _wait_q_t readers; /* Reader wait queue */ + _wait_q_t writers; /* Writer wait queue */ + } wait_q; + + _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_pipe); +}; + +#define K_PIPE_INITIALIZER(obj, pipe_buffer_size, pipe_buffer) \ + { \ + .buffer = pipe_buffer, \ + .size = pipe_buffer_size, \ + .bytes_used = 0, \ + .read_index = 0, \ + .write_index = 0, \ + .wait_q.writers = SYS_DLIST_STATIC_INIT(&obj.wait_q.writers), \ + .wait_q.readers = SYS_DLIST_STATIC_INIT(&obj.wait_q.readers), \ + _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ + } + +#define K_PIPE_DEFINE(name, pipe_buffer_size) \ + static unsigned char __noinit _k_pipe_buf_##name[pipe_buffer_size]; \ + struct k_pipe name = \ + K_PIPE_INITIALIZER(name, pipe_buffer_size, _k_pipe_buf_##name) + +#define K_PIPE_SIZE(buffer_size) (sizeof(struct k_pipe) + buffer_size) + +#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) +extern void _k_pipes_init(void); +#else +#define _k_pipes_init() do { } while (0) +#endif + +/** + * @brief Runtime initialization of a pipe + * + * @param pipe Pointer to pipe to initialize + * @param buffer Pointer to buffer to use for pipe's ring buffer + * @param size Size of the pipe's ring buffer + * + * @return N/A + */ +extern void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, + size_t size); + +/** + * @brief Put a message into the specified pipe + * + * This routine synchronously adds a message into the pipe specified by + * @a pipe. It will wait up to @a timeout for the pipe to accept + * @a num_bytes_to_write bytes of data. If by @a timeout, the pipe could not + * accept @a min_bytes bytes of data, it fails. Fewer than @a min_bytes will + * only ever be written to the pipe if K_NO_WAIT < @a timeout < K_FOREVER. + * + * @param pipe Pointer to the pipe + * @param buffer Data to put into the pipe + * @param num_bytes_to_write Desired number of bytes to put into the pipe + * @param num_bytes_written Number of bytes the pipe accepted + * @param min_bytes Minimum number of bytes accepted for success + * @param timeout Maximum number of milliseconds to wait + * + * @retval 0 At least @a min_bytes were sent + * @retval -EIO Request can not be satisfied (@a timeout is K_NO_WAIT) + * @retval -EAGAIN Fewer than @a min_bytes were sent + */ +extern int k_pipe_put(struct k_pipe *pipe, void *buffer, + size_t num_bytes_to_write, size_t *num_bytes_written, + size_t min_bytes, int32_t timeout); + +/** + * @brief Get a message from the specified pipe + * + * This routine synchronously retrieves a message from the pipe specified by + * @a pipe. It will wait up to @a timeout to retrieve @a num_bytes_to_read + * bytes of data from the pipe. If by @a timeout, the pipe could not retrieve + * @a min_bytes bytes of data, it fails. Fewer than @a min_bytes will + * only ever be retrieved from the pipe if K_NO_WAIT < @a timeout < K_FOREVER. + * + * @param pipe Pointer to the pipe + * @param buffer Location to place retrieved data + * @param num_bytes_to_read Desired number of bytes to retrieve from the pipe + * @param num_bytes_read Number of bytes retrieved from the pipe + * @param min_bytes Minimum number of bytes retrieved for success + * @param timeout Maximum number of milliseconds to wait + * + * @retval 0 At least @a min_bytes were transferred + * @retval -EIO Request can not be satisfied (@a timeout is K_NO_WAIT) + * @retval -EAGAIN Fewer than @a min_bytes were retrieved + */ +extern int k_pipe_get(struct k_pipe *pipe, void *buffer, + size_t num_bytes_to_read, size_t *num_bytes_read, + size_t min_bytes, int32_t timeout); + +/** + * @brief Send a message to the specified pipe + * + * This routine asynchronously sends a message from the pipe specified by + * @a pipe. Once all @a size bytes have been accepted by the pipe, it will + * free the memory block @a block and give the semaphore @a sem (if specified). + * Up to CONFIG_NUM_PIPE_ASYNC_MSGS asynchronous pipe messages can be in-flight + * at any given time. + * + * @param pipe Pointer to the pipe + * @param block Memory block containing data to send + * @param size Number of data bytes in memory block to send + * @param sem Semaphore to signal upon completion (else NULL) + * + * @retval N/A + */ +extern void k_pipe_block_put(struct k_pipe *pipe, struct k_mem_block *block, + size_t size, struct k_sem *sem); + +/** + * memory management + */ + +/* memory maps */ + +struct k_mem_map { + _wait_q_t wait_q; + int num_blocks; + int block_size; + char *buffer; + char *free_list; + int num_used; + + _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_mem_map); +}; + +#define K_MEM_MAP_INITIALIZER(obj, map_num_blocks, map_block_size, \ + map_buffer) \ + { \ + .wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \ + .num_blocks = map_num_blocks, \ + .block_size = map_block_size, \ + .buffer = map_buffer, \ + .free_list = NULL, \ + .num_used = 0, \ + _DEBUG_TRACING_KERNEL_OBJECTS_INIT \ + } + +#define K_MEM_MAP_DEFINE(name, map_num_blocks, map_block_size) \ + char _k_mem_map_buf_##name[(map_num_blocks) * (map_block_size)]; \ + struct k_mem_map name \ + __in_section(_k_mem_map_ptr, private, mem_map) = \ + K_MEM_MAP_INITIALIZER(name, map_num_blocks, \ + map_block_size, _k_mem_map_buf_##name) + +#define K_MEM_MAP_SIZE(map_num_blocks, map_block_size) \ + (sizeof(struct k_mem_map) + ((map_num_blocks) * (map_block_size))) + +extern void _k_mem_map_init(void); + +extern void k_mem_map_init(struct k_mem_map *map, int num_blocks, + int block_size, void *buffer); +extern int k_mem_map_alloc(struct k_mem_map *map, void **mem, int32_t timeout); +extern void k_mem_map_free(struct k_mem_map *map, void **mem); + +static inline int k_mem_map_num_used_get(struct k_mem_map *map) +{ + return map->num_used; +} + +/* memory pools */ + +struct k_mem_pool { + _wait_q_t wait_q; + int max_block_size; + int num_max_blocks; + + _DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_mem_pool); +}; + +/* cannot initialize pools statically */ + +/* XXX - review this computation */ +#define K_MEM_POOL_SIZE(max_block_size, num_max_blocks) \ + (sizeof(struct k_mem_pool) + ((max_block_size) * (num_max_blocks))) + +extern void k_mem_pool_init(struct k_mem_pool *mem, int max_block_size, + int num_max_blocks); +extern int k_mem_pool_alloc(k_mem_pool_t id, struct k_mem_block *block, + int size, int32_t timeout); +extern void k_mem_pool_free(struct k_mem_block *block); +extern void k_mem_pool_defrag(k_mem_pool_t id); +extern void *k_malloc(uint32_t size); +extern void k_free(void *p); + +/* + * legacy.h must be before arch/cpu.h to allow the ioapic/loapic drivers to + * hook into the device subsystem, which itself uses nanokernel semaphores, + * and thus currently requires the definition of nano_sem. + */ +#include +#include + +/* + * private APIs that are utilized by one or more public APIs + */ + +extern struct k_thread_static_init _k_task_list_start[]; +extern struct k_thread_static_init _k_task_list_end[]; + +#define _FOREACH_STATIC_THREAD(thread_init) \ + for (struct k_thread_static_init *thread_init = _k_task_list_start; \ + thread_init < _k_task_list_end; thread_init++) + +extern int _is_thread_essential(void); +static inline int is_in_any_group(struct k_thread_static_init *thread_init, + uint32_t groups) +{ + return !!(thread_init->init_groups & groups); +} +extern void _init_static_threads(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _kernel__h_ */ diff --git a/include/legacy.h b/include/legacy.h new file mode 100644 index 00000000000..b16fb687210 --- /dev/null +++ b/include/legacy.h @@ -0,0 +1,702 @@ +/* + * Copyright (c) 2016, Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * + * @brief Public legacy kernel APIs. + */ + +#ifndef _legacy__h_ +#define _legacy__h_ + +#include +#include +#include +#include + +/* nanokernel/microkernel execution context types */ +#define NANO_CTX_ISR (K_ISR) +#define NANO_CTX_FIBER (K_COOP_THREAD) +#define NANO_CTX_TASK (K_PREEMPT_THREAD) + +/* timeout special values */ +#define TICKS_UNLIMITED (K_FOREVER) +#define TICKS_NONE (K_NO_WAIT) + +/* microkernel object return codes */ +#define RC_OK 0 +#define RC_FAIL 1 +#define RC_TIME 2 +#define RC_ALIGNMENT 3 +#define RC_INCOMPLETE 4 + +#define ANYTASK K_ANY + +/* end-of-list, mostly used for semaphore groups */ +#define ENDLIST K_END + +/* pipe amount of content to receive (0+, 1+, all) */ +typedef enum { + _0_TO_N = 0x0, + _1_TO_N = 0x1, + _ALL_N = 0x2, +} K_PIPE_OPTION; + +#define kpriority_t uint32_t + +static inline int32_t _ticks_to_ms(int32_t ticks) +{ + return (ticks == TICKS_UNLIMITED) ? K_FOREVER : + (MSEC_PER_SEC * (uint64_t)ticks) / sys_clock_ticks_per_sec; +} + +static inline int _error_to_rc(int err) +{ + return err == 0 ? RC_OK : err == -EAGAIN ? RC_TIME : RC_FAIL; +} + +static inline int _error_to_rc_no_timeout(int err) +{ + return err == 0 ? RC_OK : RC_FAIL; +} + +/* tasks/fibers/scheduler */ + +#define ktask_t k_tid_t +#define nano_thread_id_t k_tid_t +typedef void (*nano_fiber_entry_t)(int i1, int i2); +typedef int nano_context_type_t; + +#define sys_thread_self_get k_current_get +#define sys_thread_busy_wait k_busy_wait + +extern int sys_execution_context_type_get(void); + +static inline nano_thread_id_t fiber_start(char *stack, unsigned stack_size, + nano_fiber_entry_t entry, + int arg1, int arg2, + unsigned prio, + unsigned options) +{ + return k_thread_spawn(stack, stack_size, (k_thread_entry_t)entry, + (void *)arg1, (void *)arg2, NULL, + K_PRIO_COOP(prio), options, 0); +} +#define fiber_fiber_start fiber_start +#define task_fiber_start fiber_start + +#define fiber_config k_thread_config + +#define fiber_start_config(config, entry, arg1, arg2, options) \ + fiber_start(config->stack, config->stack_size, \ + entry, arg1, arg2, \ + config->prio, options) +#define fiber_fiber_start_config fiber_start_config +#define task_fiber_start_config fiber_start_config + +static inline nano_thread_id_t +fiber_delayed_start(char *stack, unsigned int stack_size_in_bytes, + nano_fiber_entry_t entry_point, int param1, + int param2, unsigned int priority, + unsigned int options, int32_t timeout_in_ticks) +{ + return k_thread_spawn(stack, stack_size_in_bytes, + (k_thread_entry_t)entry_point, + (void *)param1, (void *)param2, NULL, + K_PRIO_COOP(priority), options, + _ticks_to_ms(timeout_in_ticks)); +} + +#define fiber_fiber_delayed_start fiber_delayed_start +#define task_fiber_delayed_start fiber_delayed_start + +#define fiber_delayed_start_cancel(fiber) k_thread_cancel((k_tid_t)fiber) +#define fiber_fiber_delayed_start_cancel fiber_delayed_start_cancel +#define task_fiber_delayed_start_cancel fiber_delayed_start_cancel + +#define fiber_yield k_yield +#define fiber_abort() k_thread_abort(k_current_get()) +static inline void fiber_sleep(int32_t timeout) +{ + k_sleep(_ticks_to_ms(timeout)); +} + +#define fiber_wakeup k_wakeup +#define isr_fiber_wakeup k_wakeup +#define fiber_fiber_wakeup k_wakeup +#define task_fiber_wakeup k_wakeup + +#define task_sleep fiber_sleep +#define task_yield k_yield +#define task_priority_set(task, prio) k_thread_priority_set(task, (int)prio) +#define task_entry_set(task, entry) \ + k_thread_entry_set(task, (k_thread_entry_t)entry) +#define task_abort_handler_set k_thread_abort_handler_set +static inline void task_offload_to_fiber(int (*func)(), void *argp) +{ + /* XXX - implement via work queue */ +} + +#define task_id_get k_current_get +#define task_priority_get (kpriority_t)k_current_priority_get +#define task_abort k_thread_abort +#define task_suspend k_thread_suspend +#define task_resume k_thread_resume + +extern void task_start(ktask_t task); + +static inline void sys_scheduler_time_slice_set(int32_t ticks, + kpriority_t prio) +{ + k_sched_time_slice_set(_ticks_to_ms(ticks), (int)prio); +} + +extern void _k_thread_group_op(uint32_t groups, void (*func)(struct tcs *)); + +static inline uint32_t task_group_mask_get(void) +{ + extern uint32_t _k_thread_group_mask_get(struct tcs *thread); + + return _k_thread_group_mask_get(k_current_get()); +} +#define isr_task_group_mask_get task_group_mask_get + +static inline void task_group_join(uint32_t groups) +{ + extern void _k_thread_group_join(uint32_t groups, struct tcs *thread); + + _k_thread_group_join(groups, k_current_get()); +} + +static inline void task_group_leave(uint32_t groups) +{ + extern void _k_thread_group_leave(uint32_t groups, struct tcs *thread); + + _k_thread_group_leave(groups, k_current_get()); +} + +static inline void task_group_start(uint32_t groups) +{ + extern void _k_thread_single_start(struct tcs *thread); + + return _k_thread_group_op(groups, _k_thread_single_start); +} + +static inline void task_group_suspend(uint32_t groups) +{ + extern void _k_thread_single_suspend(struct tcs *thread); + + return _k_thread_group_op(groups, _k_thread_single_suspend); +} + +static inline void task_group_resume(uint32_t groups) +{ + extern void _k_thread_single_resume(struct tcs *thread); + + return _k_thread_group_op(groups, _k_thread_single_resume); +} + +static inline void task_group_abort(uint32_t groups) +{ + extern void _k_thread_single_abort(struct tcs *thread); + + return _k_thread_group_op(groups, _k_thread_single_abort); +} + +#if 0 +#define isr_task_id_get() task_id_get() +#define isr_task_priority_get() task_priority_get() +#endif + +/* mutexes */ + +#define kmutex_t struct k_mutex * + +static inline int task_mutex_lock(kmutex_t id, int32_t timeout) +{ + return _error_to_rc(k_mutex_lock(id, _ticks_to_ms(timeout))); +} + +#define task_mutex_unlock k_mutex_unlock + +#define DEFINE_MUTEX(name) \ + K_MUTEX_DEFINE(_k_mutex_obj_##name); \ + struct k_mutex * const name = &_k_mutex_obj_##name + +/* semaphores */ + +#define nano_sem k_sem +#define ksem_t struct k_sem * + +static inline void nano_sem_init(struct nano_sem *sem) +{ + k_sem_init(sem, 0, UINT_MAX); +} + +#define nano_sem_give(id) k_sem_give((struct k_sem *)id) +#define nano_isr_sem_give(id) k_sem_give((struct k_sem *)id) +#define nano_fiber_sem_give(id) k_sem_give((struct k_sem *)id) +#define nano_task_sem_give(id) k_sem_give((struct k_sem *)id) + +static inline int nano_sem_take(struct nano_sem *sem, int32_t timeout) +{ + return k_sem_take((struct k_sem *)sem, _ticks_to_ms(timeout)) + == 0 ? 1 : 0; +} +#define nano_isr_sem_take nano_sem_take +#define nano_fiber_sem_take nano_sem_take +#define nano_task_sem_take nano_sem_take + +#define isr_sem_give k_sem_give +#define fiber_sem_give k_sem_give +#define task_sem_give k_sem_give + +static inline int task_sem_take(ksem_t sem, int32_t timeout) +{ + return _error_to_rc(k_sem_take(sem, _ticks_to_ms(timeout))); +} + +#define task_sem_reset k_sem_reset +#define task_sem_count_get k_sem_count_get + +typedef ksem_t *ksemg_t; + +static inline ksem_t task_sem_group_take(ksemg_t group, int32_t timeout) +{ + return k_sem_group_take(group, _ticks_to_ms(timeout)); +} + +#define task_sem_group_give k_sem_group_give +#define task_sem_group_reset k_sem_group_reset + +#define DEFINE_SEMAPHORE(name) \ + K_SEM_DEFINE(_k_sem_obj_##name, 0, UINT_MAX); \ + struct k_sem * const name = &_k_sem_obj_##name + +/* workqueues */ + +#define nano_work k_work +#define work_handler_t k_work_handler_t +#define nano_workqueue k_work_q +#define nano_delayed_work k_delayed_work + +#define nano_work_init k_work_init +#define nano_work_submit_to_queue k_work_submit_to_queue +#define nano_workqueue_start k_work_q_start +#define nano_task_workqueue_start nano_fiber_workqueue_start +#define nano_fiber_workqueue_start nano_fiber_workqueue_start + +#define nano_delayed_work_init k_delayed_work_init + +static inline int nano_delayed_work_submit_to_queue(struct nano_workqueue *wq, + struct nano_delayed_work *work, + int ticks) +{ + return k_delayed_work_submit_to_queue(wq, work, _ticks_to_ms(ticks)); +} + +#define nano_delayed_work_cancel k_delayed_work_cancel +#define nano_work_submit k_work_submit + +#define nano_delayed_work_submit(work, ticks) \ + nano_delayed_work_submit_to_queue(&k_sys_work_q, work, ticks) + +/* events */ + +#define kevent_t const struct k_event * +typedef int (*kevent_handler_t)(int event); + +#define isr_event_send task_event_send +#define fiber_event_send task_event_send + +static inline int task_event_handler_set(kevent_t legacy_event, + kevent_handler_t handler) +{ + struct k_event *event = (struct k_event *)legacy_event; + + if ((event->handler != NULL) && (handler != NULL)) { + /* can't overwrite an existing event handler */ + return RC_FAIL; + } + + event->handler = (k_event_handler_t)handler; + return RC_OK; +} + +static inline int task_event_send(kevent_t legacy_event) +{ + k_event_send((struct k_event *)legacy_event); + return RC_OK; +} + +static inline int task_event_recv(kevent_t legacy_event, int32_t timeout) +{ + return _error_to_rc(k_event_recv((struct k_event *)legacy_event, + _ticks_to_ms(timeout))); +} + +#define DEFINE_EVENT(name, event_handler) \ + K_EVENT_DEFINE(_k_event_obj_##name, event_handler); \ + struct k_event * const name = &(_k_event_obj_##name) + +/* memory maps */ + +#define kmemory_map_t struct k_mem_map * + +static inline int task_mem_map_alloc(kmemory_map_t map, void **mptr, + int32_t timeout) +{ + return _error_to_rc(k_mem_map_alloc(map, mptr, _ticks_to_ms(timeout))); +} + +#define task_mem_map_free k_mem_map_free +#define task_mem_map_used_get k_mem_map_num_used_get + +#define DEFINE_MEM_MAP(name, map_num_blocks, map_block_size) \ + K_MEM_MAP_DEFINE(_k_mem_map_obj_##name, \ + map_num_blocks, map_block_size); \ + struct k_mem_map *const name = &_k_mem_map_obj_##name + + +/* memory pools */ + +#define k_block k_mem_block +#define kmemory_pool_t k_mem_pool_t + +#if 0 /* unimplemented object */ + +static inline int task_mem_pool_alloc(struct k_block *blockptr, + kmemory_pool_t pool_id, + int reqsize, int32_t timeout) +{ + return _error_to_rc(k_mem_pool_alloc(pool_id, blockptr, reqsize, + _ticks_to_ms(timeout))); +} + +#define task_mem_pool_free k_mem_pool_free +#define task_mem_pool_defragment k_mem_pool_defrag +#define task_malloc k_malloc +#define task_free k_free + +#endif + +/* message queues */ + +#define kfifo_t struct k_msgq * + +static inline int task_fifo_put(kfifo_t queue, void *data, int32_t timeout) +{ + return _error_to_rc(k_msgq_put(queue, data, _ticks_to_ms(timeout))); +} + +static inline int task_fifo_get(kfifo_t queue, void *data, int32_t timeout) +{ + return _error_to_rc(k_msgq_get(queue, data, _ticks_to_ms(timeout))); +} + +static inline int task_fifo_purge(kfifo_t queue) +{ + k_msgq_purge(queue); + return RC_OK; +} + +static inline int task_fifo_size_get(kfifo_t queue) +{ + return queue->used_msgs; +} + +#define DEFINE_FIFO(name, q_depth, q_width) \ + K_MSGQ_DEFINE(_k_fifo_obj_##name, q_depth, q_width); \ + struct k_msgq * const name = &_k_fifo_obj_##name + +/* mailboxes */ + +#define kmbox_t struct k_mbox * + +struct k_msg { + /** Mailbox ID */ + kmbox_t mailbox; + /** size of message (bytes) */ + uint32_t size; + /** information field, free for user */ + uint32_t info; + /** pointer to message data at sender side */ + void *tx_data; + /** pointer to message data at receiver */ + void *rx_data; + /** for async message posting */ + struct k_block tx_block; + /** sending task */ + ktask_t tx_task; + /** receiving task */ + ktask_t rx_task; + /** internal use only */ + union { + /** for 2-steps data transfer operation */ + struct k_args *transfer; + /** semaphore to signal when asynchr. call */ + ksem_t sema; + } extra; +}; + +int task_mbox_put(kmbox_t mbox, kpriority_t prio, struct k_msg *msg, + int32_t timeout); +void task_mbox_block_put(kmbox_t mbox, kpriority_t prio, struct k_msg *msg, + ksem_t sema); + +int task_mbox_get(kmbox_t mbox, struct k_msg *msg, int32_t timeout); +void task_mbox_data_get(struct k_msg *msg); +int task_mbox_data_block_get(struct k_msg *msg, struct k_block *block, + kmemory_pool_t pool_id, int32_t timeout); + +#define DEFINE_MAILBOX(name) \ + K_MBOX_DEFINE(_k_mbox_obj_##name); \ + struct k_mbox * const name = &_k_mbox_obj_##name + +/* pipes */ + +#define kpipe_t struct k_pipe * + +static inline int task_pipe_put(kpipe_t id, void *buffer, int bytes_to_write, + int *bytes_written, K_PIPE_OPTION options, + int32_t timeout) +{ + size_t min_xfer = (size_t)options; + + __ASSERT((options == _0_TO_N) || + (options == _1_TO_N) || + (options == _ALL_N), "Invalid pipe option"); + + *bytes_written = 0; + + if (bytes_to_write == 0) { + return RC_FAIL; + } + + if ((options == _0_TO_N) && (timeout != K_NO_WAIT)) { + return RC_FAIL; + } + + if (options == _ALL_N) { + min_xfer = bytes_to_write; + } + + return _error_to_rc(k_pipe_put(id, buffer, bytes_to_write, + (size_t *)bytes_written, min_xfer, + _ticks_to_ms(timeout))); +} + +static inline int task_pipe_get(kpipe_t id, void *buffer, int bytes_to_read, + int *bytes_read, K_PIPE_OPTION options, + int32_t timeout) +{ + size_t min_xfer = (size_t)options; + + __ASSERT((options == _0_TO_N) || + (options == _1_TO_N) || + (options == _ALL_N), "Invalid pipe option"); + + *bytes_read = 0; + + if (bytes_to_read == 0) { + return RC_FAIL; + } + + if ((options == _0_TO_N) && (timeout != K_NO_WAIT)) { + return RC_FAIL; + } + + if (options == _ALL_N) { + min_xfer = bytes_to_read; + } + + return _error_to_rc(k_pipe_get(id, buffer, bytes_to_read, + (size_t *)bytes_read, min_xfer, + _ticks_to_ms(timeout))); +} + +static inline int task_pipe_block_put(kpipe_t id, struct k_block block, + int size, ksem_t sema) +{ + if (size == 0) { + return RC_FAIL; + } + + k_pipe_block_put(id, &block, size, sema); + + return RC_OK; +} + +#define DEFINE_PIPE(name, pipe_buffer_size) \ + K_PIPE_DEFINE(_k_pipe_obj_##name, pipe_buffer_size); \ + struct k_pipe * const name = &_k_pipe_obj_##name + +#define nano_fifo k_fifo +#define nano_fifo_init k_fifo_init + +/* nanokernel fifos */ + +#define nano_fifo_put k_fifo_put +#define nano_isr_fifo_put k_fifo_put +#define nano_fiber_fifo_put k_fifo_put +#define nano_task_fifo_put k_fifo_put + +#define nano_fifo_put_list k_fifo_put_list +#define nano_isr_fifo_put_list k_fifo_put_list +#define nano_fiber_fifo_put_list k_fifo_put_list +#define nano_task_fifo_put_list k_fifo_put_list + +#define nano_fifo_put_slist k_fifo_put_slist +#define nano_isr_fifo_put_slist k_fifo_put_slist +#define nano_fiber_fifo_put_slist k_fifo_put_slist +#define nano_task_fifo_put_slist k_fifo_put_slist + +static inline void *nano_fifo_get(struct nano_fifo *fifo, + int32_t timeout_in_ticks) +{ + return k_fifo_get((struct k_fifo *)fifo, + _ticks_to_ms(timeout_in_ticks)); +} + +#define nano_isr_fifo_get nano_fifo_get +#define nano_fiber_fifo_get nano_fifo_get +#define nano_task_fifo_get nano_fifo_get + +/* nanokernel lifos */ + +#define nano_lifo k_lifo + +#define nano_lifo_init k_lifo_init + +#define nano_lifo_put k_lifo_put +#define nano_isr_lifo_put k_lifo_put +#define nano_fiber_lifo_put k_lifo_put +#define nano_task_lifo_put k_lifo_put + +static inline void *nano_lifo_get(struct nano_lifo *lifo, + int32_t timeout_in_ticks) +{ + return k_lifo_get((struct k_lifo *)lifo, + _ticks_to_ms(timeout_in_ticks)); +} + + +#define nano_isr_lifo_get nano_lifo_get +#define nano_fiber_lifo_get nano_lifo_get +#define nano_task_lifo_get nano_lifo_get + +/* nanokernel stacks */ + +#define nano_stack k_stack + +static inline void nano_stack_init(struct nano_stack *stack, uint32_t *data) +{ + k_stack_init_with_buffer(stack, UINT_MAX, data); +} + +#define nano_stack_push k_stack_push +#define nano_isr_stack_push k_stack_push +#define nano_fiber_stack_push k_stack_push +#define nano_task_stack_push k_stack_push + +static inline int nano_stack_pop(struct nano_stack *stack, uint32_t *data, + int32_t timeout_in_ticks) +{ + return k_stack_pop((struct k_stack *)stack, data, + _ticks_to_ms(timeout_in_ticks)) == 0 ? 1 : 0; +} + +#define nano_isr_stack_pop nano_stack_pop +#define nano_fiber_stack_pop nano_stack_pop +#define nano_task_stack_pop nano_stack_pop + +/* timers */ + +#define CONFIG_NUM_TIMER_PACKETS CONFIG_NUM_DYNAMIC_TIMERS + +#define ktimer_t struct k_timer * + +#define task_timer_alloc k_timer_alloc +#define task_timer_free k_timer_free + +extern void task_timer_start(ktimer_t timer, int32_t duration, + int32_t period, ksem_t sema); + +static inline void task_timer_restart(ktimer_t timer, int32_t duration, + int32_t period) +{ + k_timer_restart(timer, _ticks_to_ms(duration), _ticks_to_ms(period)); +} + +#define nano_timer k_timer + +#define nano_timer_init k_timer_init + +static inline void nano_timer_start(struct nano_timer *timer, int ticks) +{ + k_timer_start((struct k_timer *)timer, _ticks_to_ms(ticks), 0, + NULL, NULL, NULL, NULL); +} + +#define nano_isr_timer_start nano_timer_start +#define nano_fiber_timer_start nano_timer_start +#define nano_task_timer_start nano_timer_start + +static inline void *nano_timer_test(struct nano_timer *timer, + int32_t timeout_in_ticks) +{ + void *data; + + if (k_timer_test(timer, &data, _ticks_to_ms(timeout_in_ticks)) < 0) { + return NULL; + } + + return data; +} + +#define nano_isr_timer_test nano_timer_test +#define nano_fiber_timer_test nano_timer_test +#define nano_task_timer_test nano_timer_test + +#define task_timer_stop k_timer_stop + +#define nano_isr_timer_stop k_timer_stop +#define nano_fiber_timer_stop k_timer_stop +#define nano_task_timer_stop k_timer_stop + +extern int32_t _ms_to_ticks(int32_t ms); + +static inline int32_t nano_timer_ticks_remain(struct nano_timer *timer) +{ + return _ms_to_ticks(k_timer_remaining_get(timer)); +} + +extern int64_t sys_tick_get(void); +extern uint32_t sys_tick_get_32(void); +extern int64_t sys_tick_delta(int64_t *reftime); +extern uint32_t sys_tick_delta_32(int64_t *reftime); + +#define sys_cycle_get_32 k_cycle_get_32 + +/* floating point services */ + +#define fiber_float_enable k_float_enable +#define task_float_enable k_float_enable +#define fiber_float_disable k_float_disable +#define task_float_disable k_float_disable + +#endif /* _legacy__h_ */ diff --git a/kernel/configs/unified.config b/kernel/configs/unified.config new file mode 100644 index 00000000000..42889d56652 --- /dev/null +++ b/kernel/configs/unified.config @@ -0,0 +1,3 @@ +CONFIG_KERNEL_V2=y +CONFIG_MICROKERNEL=y +CONFIG_INIT_STACKS=y diff --git a/kernel/unified/Kconfig b/kernel/unified/Kconfig new file mode 100644 index 00000000000..f8704b2b2b4 --- /dev/null +++ b/kernel/unified/Kconfig @@ -0,0 +1,248 @@ +# Kconfig - nanokernel configuration options + +# +# Copyright (c) 2014-2015 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +menu "Unified Kernel Options" + +config KERNEL_V2_DEBUG + bool + prompt "Kernel V2 debug help" + default n + depends on KERNEL_V2 + select INIT_STACKS + +config NUM_COOP_PRIORITIES + int + prompt "Kernel V2: number of coop priorities" + default 16 + help + Number of cooperative priorities configured in the system. Gives access + to priorities: + + K_PRIO_COOP(0) to K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1) + + or seen another way, priorities: + + -CONFIG_NUM_COOP_PRIORITIES to -1 + + This can be set to zero to disable cooperative scheduling. Cooperative + threads always preempt preemptible threads. + + Each priority requires an extra 8 bytes of RAM. If there are more than + 32 total priorities, an extra 4 bytes is required. + +config NUM_PREEMPT_PRIORITIES + int + prompt "Kernel V2: number of preemptible priorities" + default 15 + help + Number of preemptible priorities available in the system. Gives access + to priorities 0 to CONFIG_NUM_PREEMPT_PRIORITIES - 1. + + This can be set to 0 to disable preemptible scheduling. + + The idle thread is always installed as a preemptible thread of the + lowest priority. + + Each priority requires an extra 8 bytes of RAM. If there are more than + 32 total priorities, an extra 4 bytes is required. + +config PRIORITY_CEILING + int + prompt "Kernel V2: priority inheritance ceiling" + default 0 + +config BOOT_BANNER + bool + prompt "Boot banner" + default n + select PRINTK + depends on EARLY_CONSOLE + help + This option outputs a banner to the console device during boot up. It + also embeds a date & time stamp in the kernel and in each USAP image. + +config BUILD_TIMESTAMP + bool + prompt "Build Timestamp" + help + Build timestamp and add it to the boot banner. + +config INT_LATENCY_BENCHMARK + bool + prompt "Interrupt latency metrics [EXPERIMENTAL]" + default n + depends on ARCH="x86" + help + This option enables the tracking of interrupt latency metrics; + the exact set of metrics being tracked is board-dependent. + Tracking begins when int_latency_init() is invoked by an application. + The metrics are displayed (and a new sampling interval is started) + each time int_latency_show() is called thereafter. + +config MAIN_THREAD_PRIORITY + int + prompt "Priority of initialization/main thread" + default 0 + default -1 if NUM_PREEMPT_PRIORITIES = 0 + help + Priority at which the initialization thread runs, including the start + of the main() function. main() can then change its priority if desired. + +config MAIN_STACK_SIZE + int + prompt "Size of stack for initialization and main thread" + default 1024 + help + When the intitialization is complete, the thread executing it then + executes the main() routine, so as to reuse the stack used by the + initialization, which would be wasted RAM otherwise. + + After initialization is complete, the thread runs main(). + +config ISR_STACK_SIZE + int + prompt "ISR and initialization stack size (in bytes)" + default 2048 + help + This option specifies the size of the stack used by interrupt + service routines (ISRs), and during nanokernel initialization. + +config THREAD_CUSTOM_DATA + bool + prompt "Task and fiber custom data" + default n + help + This option allows each task and fiber to store 32 bits of custom data, + which can be accessed using the sys_thread_custom_data_xxx() APIs. + +config NANO_TIMEOUTS + bool + prompt "Enable timeouts on nanokernel objects" + default y + depends on SYS_CLOCK_EXISTS + help + Allow fibers and tasks to wait on nanokernel objects with a timeout, by + enabling the nano_xxx_wait_timeout APIs, and allow fibers to sleep for a + period of time, by enabling the fiber_sleep API. + +config NANO_TIMERS + bool + prompt "Enable nanokernel timers" + default y + depends on SYS_CLOCK_EXISTS + help + Allow fibers and tasks to wait on nanokernel timers, which can be + accessed using the nano_timer_xxx() APIs. + +config NUM_DYNAMIC_TIMERS + int + prompt "Number of timers available for dynamic allocation" + default 10 + depends on NANO_TIMERS + help + Number of timers available for dynamic allocation via the + k_timer_alloc()/k_timer_free() API. + +config NANOKERNEL_TICKLESS_IDLE_SUPPORTED + bool + default n + help + To be selected by an architecture if it does support tickless idle in + nanokernel systems. + +config ERRNO + bool + prompt "Enable errno support" + default y + help + Enable per-thread errno in the kernel. Application and library code must + include errno.h provided by the C library (libc) to use the errno + symbol. The C library must access the per-thread errno via the + _get_errno() symbol. + +config NANO_WORKQUEUE + bool "Enable nano workqueue support" + default y + help + Nano workqueues allow scheduling work items to be executed in a fiber + context. Typically such work items are scheduled from ISRs, when the + work cannot be executed in interrupt context. + +config SYSTEM_WORKQUEUE + bool "Start a system workqueue" + default y + depends on NANO_WORKQUEUE + help + Start a system-wide nano_workqueue that can be used by any system + component. + +config SYSTEM_WORKQUEUE_STACK_SIZE + int "System workqueue stack size" + default 1024 + depends on SYSTEM_WORKQUEUE + +config SYSTEM_WORKQUEUE_PRIORITY + int "System workqueue priority" + default -1 + depends on SYSTEM_WORKQUEUE + +config NUM_MBOX_ASYNC_MSGS + int "" + default 10 + help + This option specifies the total number of asynchronous mailbox + messages that can exist simultaneously, across all mailboxes + in the system. + + Setting this option to 0 disables support for asynchronous + mailbox messages. + +config NUM_PIPE_ASYNC_MSGS + int "Maximum number of in-flight asynchronous pipe messages" + default 10 + help + This option specifies the total number of asynchronous pipe + messages that can exist simultaneously, across all pipes in + the system. + + Setting this option to 0 disables support for asynchronous + pipe messages. + +config ATOMIC_OPERATIONS_BUILTIN + bool + help + Use the compiler builtin functions for atomic operations. This is + the preferred method. However, support for all arches in GCC is + incomplete. + +config ATOMIC_OPERATIONS_CUSTOM + bool + help + Use when there isn't support for compiler built-ins, but you have + written optimized assembly code under arch/ which implements these. + +config ATOMIC_OPERATIONS_C + bool + help + Use atomic operations routines that are implemented entirely + in C by locking interrupts. Selected by architectures which either + do not have support for atomic operations in their instruction + set, or haven't been implemented yet during bring-up, and also + the compiler does not have support for the atomic __sync_* builtins. + +endmenu diff --git a/kernel/unified/Makefile b/kernel/unified/Makefile new file mode 100644 index 00000000000..2b06019a8e6 --- /dev/null +++ b/kernel/unified/Makefile @@ -0,0 +1,43 @@ +ccflags-y += -I$(srctree)/kernel/unified/include + +asflags-y := ${ccflags-y} + +obj-y = +obj-y += $(strip \ + sys_clock.o \ + thread.o \ + init.o \ + sem.o \ + version.o \ + device.o \ + thread_abort.o \ +) + +obj-y += $(strip \ + sched.o \ + mutex.o \ +) +obj-y += $(strip \ + lifo.o \ + fifo.o \ + stack.o \ + mem_map.o \ + msg_q.o \ + mailbox.o \ + mem_pool.o \ + event.o \ + pipes.o \ +) + +obj-$(CONFIG_INT_LATENCY_BENCHMARK) += int_latency_bench.o +obj-$(CONFIG_STACK_CANARIES) += compiler_stack_protect.o +obj-$(CONFIG_SYS_POWER_MANAGEMENT) += idle.o +obj-$(CONFIG_NANO_TIMERS) += timer.o +obj-$(CONFIG_KERNEL_EVENT_LOGGER) += event_logger.o +obj-$(CONFIG_KERNEL_EVENT_LOGGER) += kernel_event_logger.o +obj-$(CONFIG_RING_BUFFER) += ring_buffer.o +obj-$(CONFIG_ATOMIC_OPERATIONS_C) += atomic_c.o +obj-$(CONFIG_ERRNO) += errno.o +obj-$(CONFIG_NANO_WORKQUEUE) += work_q.o + +obj-y += legacy/ diff --git a/kernel/unified/atomic_c.c b/kernel/unified/atomic_c.c new file mode 100644 index 00000000000..5faf6cb521e --- /dev/null +++ b/kernel/unified/atomic_c.c @@ -0,0 +1 @@ +#include "../nanokernel/atomic_c.c" diff --git a/kernel/unified/compiler_stack_protect.c b/kernel/unified/compiler_stack_protect.c new file mode 100644 index 00000000000..8348a35e47d --- /dev/null +++ b/kernel/unified/compiler_stack_protect.c @@ -0,0 +1 @@ +#include "../nanokernel/compiler_stack_protect.c" diff --git a/kernel/unified/device.c b/kernel/unified/device.c new file mode 100644 index 00000000000..e3d813d3063 --- /dev/null +++ b/kernel/unified/device.c @@ -0,0 +1 @@ +#include "../nanokernel/device.c" diff --git a/kernel/unified/errno.c b/kernel/unified/errno.c new file mode 100644 index 00000000000..b94f2a47de4 --- /dev/null +++ b/kernel/unified/errno.c @@ -0,0 +1,7 @@ +#include "../nanokernel/errno.c" + +/* + * Define _k_neg_eagain for use in assembly files as errno.h is + * not assembly language safe. + */ +const int _k_neg_eagain = -EAGAIN; diff --git a/kernel/unified/event.c b/kernel/unified/event.c new file mode 100644 index 00000000000..eb604060b9f --- /dev/null +++ b/kernel/unified/event.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief kernel events. +*/ + +#include +#include +#include +#include +#include +#include + +void _k_event_deliver(struct k_work *work) +{ + struct k_event *event = CONTAINER_OF(work, struct k_event, work_item); + + while (1) { + if ((event->handler)(event) == 0) { + /* do nothing -- handler has processed the event */ + } else { + /* pend the event */ + k_sem_give(&event->sem); + } + if (atomic_dec(&event->send_count) == 1) { + /* have finished delivering events */ + break; + } + } +} + +void k_event_init(struct k_event *event, k_event_handler_t handler) +{ + const struct k_work my_work_item = { NULL, _k_event_deliver, { 1 } }; + + event->handler = handler; + event->send_count = ATOMIC_INIT(0); + event->work_item = my_work_item; + k_sem_init(&event->sem, 0, 1); + SYS_TRACING_OBJ_INIT(event, event); +} + +void k_event_send(struct k_event *event) +{ + if (event->handler == K_EVT_IGNORE) { + /* ignore the event */ + } else if (event->handler == K_EVT_DEFAULT) { + /* pend the event */ + k_sem_give(&event->sem); + } else { + /* deliver the event */ + if (atomic_inc(&event->send_count) == 0) { + /* add event's work item to system work queue */ + k_work_submit_to_queue(&k_sys_work_q, + &event->work_item); + } + } +} + +int k_event_recv(struct k_event *event, int32_t timeout) +{ + return k_sem_take(&event->sem, timeout); +} diff --git a/kernel/unified/event_logger.c b/kernel/unified/event_logger.c new file mode 100644 index 00000000000..bc78b2db5af --- /dev/null +++ b/kernel/unified/event_logger.c @@ -0,0 +1 @@ +#include "../nanokernel/event_logger.c" diff --git a/kernel/unified/fifo.c b/kernel/unified/fifo.c new file mode 100644 index 00000000000..b6647b916d1 --- /dev/null +++ b/kernel/unified/fifo.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2010-2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * + * @brief dynamic-size FIFO queue object. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include + +void k_fifo_init(struct k_fifo *fifo) +{ + sys_slist_init(&fifo->data_q); + sys_dlist_init(&fifo->wait_q); + + SYS_TRACING_OBJ_INIT(k_fifo, fifo); +} + +static void prepare_thread_to_run(struct k_thread *thread, void *data) +{ + _timeout_abort(thread); + _ready_thread(thread); + _set_thread_return_value_with_data(thread, 0, data); +} + +void k_fifo_put(struct k_fifo *fifo, void *data) +{ + struct k_thread *first_pending_thread; + unsigned int key; + + key = irq_lock(); + + first_pending_thread = _unpend_first_thread(&fifo->wait_q); + + if (first_pending_thread) { + prepare_thread_to_run(first_pending_thread, data); + if (!_is_in_isr() && _must_switch_threads()) { + (void)_Swap(key); + return; + } + } else { + sys_slist_append(&fifo->data_q, data); + } + + irq_unlock(key); +} + +void k_fifo_put_list(struct k_fifo *fifo, void *head, void *tail) +{ + __ASSERT(head && tail, "invalid head or tail"); + + struct k_thread *first_thread, *thread; + unsigned int key; + + key = irq_lock(); + + first_thread = _peek_first_pending_thread(&fifo->wait_q); + while (head && ((thread = _unpend_first_thread(&fifo->wait_q)))) { + prepare_thread_to_run(thread, head); + head = *(void **)head; + } + + if (head) { + sys_slist_append_list(&fifo->data_q, head, tail); + } + + if (first_thread) { + if (!_is_in_isr() && _must_switch_threads()) { + (void)_Swap(key); + return; + } + } + + irq_unlock(key); +} + +void k_fifo_put_slist(struct k_fifo *fifo, sys_slist_t *list) +{ + __ASSERT(!sys_slist_is_empty(list), "list must not be empty"); + + /* + * note: this works as long as: + * - the slist implementation keeps the next pointer as the first + * field of the node object type + * - list->tail->next = NULL. + */ + return k_fifo_put_list(fifo, list->head, list->tail); +} + +void *k_fifo_get(struct k_fifo *fifo, int32_t timeout) +{ + unsigned int key; + void *data; + + key = irq_lock(); + + if (likely(!sys_slist_is_empty(&fifo->data_q))) { + data = sys_slist_get_not_empty(&fifo->data_q); + irq_unlock(key); + return data; + } + + if (timeout == K_NO_WAIT) { + irq_unlock(key); + return NULL; + } + + _pend_current_thread(&fifo->wait_q, timeout); + + return _Swap(key) ? NULL : _current->swap_data; +} diff --git a/kernel/unified/idle.c b/kernel/unified/idle.c new file mode 100644 index 00000000000..fb889e16aa6 --- /dev/null +++ b/kernel/unified/idle.c @@ -0,0 +1 @@ +#include "../nanokernel/idle.c" diff --git a/kernel/unified/include/gen_offset.h b/kernel/unified/include/gen_offset.h new file mode 100644 index 00000000000..c44d554716e --- /dev/null +++ b/kernel/unified/include/gen_offset.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2010, 2012, 2014 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Macros to generate structure member offset definitions + * + * This header contains macros to allow a nanokernel implementation to + * generate absolute symbols whose values represents the member offsets for + * various nanokernel structures. These absolute symbols are typically + * utilized by assembly source files rather than hardcoding the values in + * some local header file. + * + * WARNING: Absolute symbols can potentially be utilized by external tools -- + * for example, to locate a specific field within a data structure. + * Consequently, changes made to such symbols may require modifications to the + * associated tool(s). Typically, relocating a member of a structure merely + * requires that a tool be rebuilt; however, moving a member to another + * structure (or to a new sub-structure within an existing structure) may + * require that the tool itself be modified. Likewise, deleting, renaming, or + * changing the meaning of an absolute symbol may require modifications to a + * tool. + * + * The macro "GEN_OFFSET_SYM(structure, member)" is used to generate a single + * absolute symbol. The absolute symbol will appear in the object module + * generated from the source file that utilizes the GEN_OFFSET_SYM() macro. + * Absolute symbols representing a structure member offset have the following + * form: + * + * ____OFFSET + * + * This header also defines the GEN_ABSOLUTE_SYM macro to simply define an + * absolute symbol, irrespective of whether the value represents a structure + * or offset. + * + * The following sample file illustrates the usage of the macros available + * in this file: + * + * + * + * #include + * /@ include struct definitions for which offsets symbols are to be + * generated @/ + * + * #include + * GEN_ABS_SYM_BEGIN (_OffsetAbsSyms) /@ the name parameter is arbitrary @/ + * /@ tNANO structure member offsets @/ + * + * GEN_OFFSET_SYM (tNANO, fiber); + * GEN_OFFSET_SYM (tNANO, task); + * GEN_OFFSET_SYM (tNANO, current); + * GEN_OFFSET_SYM (tNANO, nested); + * GEN_OFFSET_SYM (tNANO, common_isp); + * + * GEN_ABSOLUTE_SYM (__tNANO_SIZEOF, sizeof(tNANO)); + * + * GEN_ABS_SYM_END + * + * + * Compiling the sample offsets.c results in the following symbols in offsets.o: + * + * $ nm offsets.o + * 00000010 A __tNANO_common_isp_OFFSET + * 00000008 A __tNANO_current_OFFSET + * 0000000c A __tNANO_nested_OFFSET + * 00000000 A __tNANO_fiber_OFFSET + * 00000004 A __tNANO_task_OFFSET + */ + +#ifndef _GEN_OFFSET_H +#define _GEN_OFFSET_H + +#include +#include + +/* definition of the GEN_OFFSET_SYM() macros is toolchain independent */ + +#define GEN_OFFSET_SYM(S, M) \ + GEN_ABSOLUTE_SYM(__##S##_##M##_##OFFSET, offsetof(S, M)) + +#endif /* _GEN_OFFSET_H */ diff --git a/kernel/unified/include/nano_internal.h b/kernel/unified/include/nano_internal.h new file mode 100644 index 00000000000..65ca2b0ae07 --- /dev/null +++ b/kernel/unified/include/nano_internal.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Architecture-independent private nanokernel APIs + * + * This file contains private nanokernel APIs that are not + * architecture-specific. + */ + +#ifndef _NANO_INTERNAL__H_ +#define _NANO_INTERNAL__H_ + +#ifdef CONFIG_KERNEL_V2 +#define K_NUM_PRIORITIES \ + (CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES + 1) +#endif + +#ifndef _ASMLANGUAGE + +#ifdef __cplusplus +extern "C" { +#endif + +/* Early boot functions */ + +void _bss_zero(void); +#ifdef CONFIG_XIP +void _data_copy(void); +#else +static inline void _data_copy(void) +{ + /* Do nothing */ +} +#endif +FUNC_NORETURN void _Cstart(void); + +/* helper type alias for thread control structure */ + +typedef struct tcs tTCS; +typedef void (*_thread_entry_t)(void *, void *, void *); + +extern void _thread_entry(void (*)(void *, void *, void *), + void *, void *, void *); + +extern void _new_thread(char *pStack, unsigned stackSize, + void *uk_task_ptr, + void (*pEntry)(void *, void *, void *), + void *p1, void *p2, void *p3, + int prio, unsigned options); + +/* context switching and scheduling-related routines */ + +extern void _nano_fiber_ready(struct tcs *tcs); +extern void _nano_fiber_swap(void); + +extern unsigned int _Swap(unsigned int); + +/* set and clear essential fiber/task flag */ + +extern void _thread_essential_set(void); +extern void _thread_essential_clear(void); + +/* clean up when a thread is aborted */ + +#if defined(CONFIG_THREAD_MONITOR) +extern void _thread_exit(struct tcs *tcs); +#else +#define _thread_exit(tcs) \ + do {/* nothing */ \ + } while (0) +#endif /* CONFIG_THREAD_MONITOR */ + +/* special nanokernel object APIs */ + +struct nano_lifo; + +extern void *_nano_fiber_lifo_get_panic(struct nano_lifo *lifo); + +#define _TASK_PENDQ_INIT(queue) do { } while (0) +#define _NANO_UNPEND_TASKS(queue) do { } while (0) +#define _TASK_NANO_UNPEND_TASKS(queue) do { } while (0) +#define _NANO_TASK_READY(tcs) do { } while (0) +#define _NANO_TIMER_TASK_READY(tcs) do { } while (0) +#define _IS_MICROKERNEL_TASK(tcs) (0) + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ + +#endif /* _NANO_INTERNAL__H_ */ diff --git a/kernel/unified/include/nano_offsets.h b/kernel/unified/include/nano_offsets.h new file mode 100644 index 00000000000..ed4e22c412f --- /dev/null +++ b/kernel/unified/include/nano_offsets.h @@ -0,0 +1,66 @@ +/* nano_offsets.h - nanokernel structure member offset definitions */ + +/* + * Copyright (c) 2013-2014 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +#ifndef _NANO_OFFSETS__H_ +#define _NANO_OFFSETS__H_ + +/* + * The final link step uses the symbol _OffsetAbsSyms to force the linkage of + * offsets.o into the ELF image. + */ + +GEN_ABS_SYM_BEGIN(_OffsetAbsSyms) + +/* arch-agnostic tNANO structure member offsets */ + +GEN_OFFSET_SYM(tNANO, current); + +#if defined(CONFIG_THREAD_MONITOR) +GEN_OFFSET_SYM(tNANO, threads); +#endif + +#ifdef CONFIG_FP_SHARING +GEN_OFFSET_SYM(tNANO, current_fp); +#endif + +/* size of the entire tNANO structure */ + +GEN_ABSOLUTE_SYM(__tNANO_SIZEOF, sizeof(tNANO)); + +/* arch-agnostic struct tcs structure member offsets */ + +GEN_OFFSET_SYM(tTCS, prio); +GEN_OFFSET_SYM(tTCS, flags); +GEN_OFFSET_SYM(tTCS, coopReg); /* start of coop register set */ +GEN_OFFSET_SYM(tTCS, preempReg); /* start of prempt register set */ + +#if defined(CONFIG_THREAD_MONITOR) +GEN_OFFSET_SYM(tTCS, next_thread); +#endif + +GEN_OFFSET_SYM(tTCS, sched_locked); + +/* size of the entire struct tcs structure */ + +GEN_ABSOLUTE_SYM(__tTCS_SIZEOF, sizeof(tTCS)); + +/* size of the device structure. Used by linker scripts */ +GEN_ABSOLUTE_SYM(__DEVICE_STR_SIZEOF, sizeof(struct device)); + +#endif /* _NANO_OFFSETS__H_ */ diff --git a/kernel/unified/include/sched.h b/kernel/unified/include/sched.h new file mode 100644 index 00000000000..ce5af68a9fd --- /dev/null +++ b/kernel/unified/include/sched.h @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _sched__h_ +#define _sched__h_ + +#include +#include +#include +#include + +extern k_tid_t const _main_thread; +extern k_tid_t const _idle_thread; + +extern void _add_thread_to_ready_q(struct tcs *t); +extern void _remove_thread_from_ready_q(struct tcs *t); +extern void _reschedule_threads(int key); +extern void k_sched_unlock(void); +extern void _pend_thread(struct tcs *thread, + _wait_q_t *wait_q, int32_t timeout); +extern void _pend_current_thread(_wait_q_t *wait_q, int32_t timeout); +extern struct tcs *_get_next_ready_thread(void); +extern int __must_switch_threads(void); +extern void k_thread_priority_set(struct tcs *thread, int32_t priority); +extern int k_current_priority_get(void); +extern int32_t _ms_to_ticks(int32_t ms); + +/* + * The _is_prio_higher family: I created this because higher priorities are + * lower numerically and I always found somewhat confusing seeing, e.g.: + * + * if (t1.prio < t2.prio) /# is t1's priority higher then t2's priority ? #/ + * + * in code. And the fact that most of the time that kind of code has this + * exact comment warrants a function where it is embedded in the name. + * + * IMHO, feel free to remove them and do the comparison directly if this feels + * like overkill. + */ + +static inline int _is_prio1_higher_than_prio2(int prio1, int prio2) +{ + return prio1 < prio2; +} + +static inline int _is_prio_higher(int prio, int test_prio) +{ + return _is_prio1_higher_than_prio2(prio, test_prio); +} + +static inline int _is_t1_higher_prio_than_t2(struct tcs *t1, struct tcs *t2) +{ + return _is_prio1_higher_than_prio2(t1->prio, t2->prio); +} + +static inline int _is_higher_prio_than_current(struct tcs *thread) +{ + return _is_t1_higher_prio_than_t2(thread, _nanokernel.current); +} + +/* is thread currenlty cooperative ? */ +static inline int _is_coop(struct tcs *thread) +{ + return thread->prio < 0; +} + +/* is thread currently preemptible ? */ +static inline int _is_preempt(struct tcs *thread) +{ + return !_is_coop(thread); +} + +/* is current thread preemptible and we are not running in ISR context */ +static inline int _is_current_execution_context_preemptible(void) +{ + return !_is_in_isr() && _is_preempt(_nanokernel.current); +} + +/* find out if priority is under priority inheritance ceiling */ +static inline int _is_under_prio_ceiling(int prio) +{ + return prio >= CONFIG_PRIORITY_CEILING; +} + +/* + * Find out what priority to set a thread to taking the prio ceiling into + * consideration. + */ +static inline int _get_new_prio_with_ceiling(int prio) +{ + return _is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING; +} + +/* find out the prio bitmap index for a given prio */ +static inline int _get_ready_q_prio_bmap_index(int prio) +{ + return (prio + CONFIG_NUM_COOP_PRIORITIES) >> 5; +} + +/* find out the prio bit for a given prio */ +static inline int _get_ready_q_prio_bit(int prio) +{ + return (1 << ((prio + CONFIG_NUM_COOP_PRIORITIES) & 0x1f)); +} + +/* find out the ready queue array index for a given prio */ +static inline int _get_ready_q_q_index(int prio) +{ + return prio + CONFIG_NUM_COOP_PRIORITIES; +} + +#if (K_NUM_PRIORITIES > 32) + #error not supported yet +#endif + +/* find out the currently highest priority where a thread is ready to run */ +/* interrupts must be locked */ +static inline int _get_highest_ready_prio(void) +{ + uint32_t ready = _nanokernel.ready_q.prio_bmap[0]; + + return find_lsb_set(ready) - 1 - CONFIG_NUM_COOP_PRIORITIES; +} + +/* + * Checks if current thread must be context-switched out. The caller must + * already know that the execution context is a thread. + */ +static inline int _must_switch_threads(void) +{ + return _is_preempt(_current) && __must_switch_threads(); +} + +/* + * Application API. + * + * lock the scheduler: prevents another thread from preempting the current one + * except if the current thread does an operation that causes it to pend + * + * Can be called recursively. + */ +static inline void k_sched_lock(void) +{ + __ASSERT(!_is_in_isr(), ""); + + atomic_inc(&_nanokernel.current->sched_locked); + + K_DEBUG("scheduler locked (%p:%d)\n", + _current, _current->sched_locked); +} + +/** + * @brief Unlock the scheduler but do NOT reschedule + * + * It is incumbent upon the caller to ensure that the reschedule occurs + * sometime after the scheduler is unlocked. + */ +static inline void _sched_unlock_no_reschedule(void) +{ + __ASSERT(!_is_in_isr(), ""); + + atomic_dec(&_nanokernel.current->sched_locked); +} + +static inline void _set_thread_states(struct k_thread *thread, uint32_t states) +{ + thread->flags |= states; +} + +static inline void _reset_thread_states(struct k_thread *thread, + uint32_t states) +{ + thread->flags &= ~states; +} + +/* mark a thread as being suspended */ +static inline void _mark_thread_as_suspended(struct tcs *thread) +{ + thread->flags |= K_SUSPENDED; +} + +/* mark a thread as not being suspended */ +static inline void _mark_thread_as_not_suspended(struct tcs *thread) +{ + thread->flags &= ~K_SUSPENDED; +} + +/* mark a thread as being in the timer queue */ +static inline void _mark_thread_as_timing(struct tcs *thread) +{ + thread->flags |= K_TIMING; +} + +/* mark a thread as not being in the timer queue */ +static inline void _mark_thread_as_not_timing(struct tcs *thread) +{ + thread->flags &= ~K_TIMING; +} + +/* check if a thread is on the timer queue */ +static inline int _is_thread_timing(struct tcs *thread) +{ + return !!(thread->flags & K_TIMING); +} + +static inline int _has_thread_started(struct tcs *thread) +{ + return !(thread->flags & K_PRESTART); +} + +/* check if a thread is ready */ +static inline int _is_thread_ready(struct tcs *thread) +{ + return (thread->flags & K_EXECUTION_MASK) == K_READY; +} + +/* mark a thread as pending in its TCS */ +static inline void _mark_thread_as_pending(struct tcs *thread) +{ + thread->flags |= K_PENDING; +} + +/* mark a thread as not pending in its TCS */ +static inline void _mark_thread_as_not_pending(struct tcs *thread) +{ + thread->flags &= ~K_PENDING; +} + +/* check if a thread is pending */ +static inline int _is_thread_pending(struct tcs *thread) +{ + return !!(thread->flags & K_PENDING); +} + +/* + * Mark the thread as not being in the timer queue. If this makes it ready, + * then add it to the ready queue according to its priority. + */ +/* must be called with interrupts locked */ +static inline void _ready_thread(struct tcs *thread) +{ + __ASSERT(_is_prio_higher(thread->prio, K_LOWEST_THREAD_PRIO) || + ((thread->prio == K_LOWEST_THREAD_PRIO) && + (thread == _idle_thread)), + "thread %p prio too low (is %d, cannot be lower than %d)", + thread, thread->prio, + thread == _idle_thread ? K_LOWEST_THREAD_PRIO : + K_LOWEST_APPLICATION_THREAD_PRIO); + + __ASSERT(!_is_prio_higher(thread->prio, K_HIGHEST_THREAD_PRIO), + "thread %p prio too high (id %d, cannot be higher than %d)", + thread, thread->prio, K_HIGHEST_THREAD_PRIO); + + /* K_PRESTART is needed to handle the start-with-delay case */ + _reset_thread_states(thread, K_TIMING|K_PRESTART); + + if (_is_thread_ready(thread)) { + _add_thread_to_ready_q(thread); + } +} + +/** + * @brief Mark a thread as started + * + * This routine must be called with interrupts locked. + */ +static inline void _mark_thread_as_started(struct tcs *thread) +{ + thread->flags &= ~K_PRESTART; +} + +/** + * @brief Mark thread as dead + * + * This routine must be called with interrupts locked. + */ +static inline void _mark_thread_as_dead(struct tcs *thread) +{ + thread->flags |= K_DEAD; +} + +/* + * Application API. + * + * Get a thread's priority. Note that it might have changed by the time this + * function returns. + */ +static inline int32_t k_thread_priority_get(struct tcs *thread) +{ + return thread->prio; +} + +/* + * Set a thread's priority. If the thread is ready, place it in the correct + * queue. + */ +/* must be called with interrupts locked */ +static inline void _thread_priority_set(struct tcs *thread, int prio) +{ + if (_is_thread_ready(thread)) { + _remove_thread_from_ready_q(thread); + thread->prio = prio; + _add_thread_to_ready_q(thread); + } else { + thread->prio = prio; + } +} + +/* check if thread is a thread pending on a particular wait queue */ +static inline struct k_thread *_peek_first_pending_thread(_wait_q_t *wait_q) +{ + return (struct k_thread *)sys_dlist_peek_head(wait_q); +} + +/* unpend the first thread from a wait queue */ +static inline struct tcs *_unpend_first_thread(_wait_q_t *wait_q) +{ + struct k_thread *thread = (struct k_thread *)sys_dlist_get(wait_q); + + if (thread) { + _mark_thread_as_not_pending(thread); + } + + return thread; +} + +/* Unpend a thread from the wait queue it is on. Thread must be pending. */ +/* must be called with interrupts locked */ +static inline void _unpend_thread(struct k_thread *thread) +{ + __ASSERT(thread->flags & K_PENDING, ""); + + sys_dlist_remove(&thread->k_q_node); + _mark_thread_as_not_pending(thread); +} + +#endif /* _sched__h_ */ diff --git a/kernel/unified/include/timeout_q.h b/kernel/unified/include/timeout_q.h new file mode 100644 index 00000000000..9aead81e966 --- /dev/null +++ b/kernel/unified/include/timeout_q.h @@ -0,0 +1,295 @@ +/** @file + * @brief timeout queue for fibers on nanokernel objects + * + * This file is meant to be included by nanokernel/include/wait_q.h only + */ + +/* + * Copyright (c) 2015 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _kernel_nanokernel_include_timeout_q__h_ +#define _kernel_nanokernel_include_timeout_q__h_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +static inline int _do_timeout_abort(struct _timeout *t); +static inline void _do_timeout_add(struct tcs *tcs, + struct _timeout *t, + _wait_q_t *wait_q, + int32_t timeout); + +#if defined(CONFIG_NANO_TIMEOUTS) +/* initialize the nano timeouts part of TCS when enabled in the kernel */ + +static inline void _timeout_init(struct _timeout *t, _timeout_func_t func) +{ + /* + * Must be initialized here and when dequeueing a timeout so that code + * not dealing with timeouts does not have to handle this, such as when + * waiting forever on a semaphore. + */ + t->delta_ticks_from_prev = -1; + + /* + * Must be initialized here so that the _fiber_wakeup family of APIs can + * verify the fiber is not on a wait queue before aborting a timeout. + */ + t->wait_q = NULL; + + /* + * Must be initialized here, so the _timeout_handle_one_timeout() + * routine can check if there is a fiber waiting on this timeout + */ + t->tcs = NULL; + + /* + * Function must be initialized before being potentially called. + */ + t->func = func; + + /* + * These are initialized when enqueing on the timeout queue: + * + * tcs->timeout.node.next + * tcs->timeout.node.prev + */ +} + +static inline void _timeout_tcs_init(struct tcs *tcs) +{ + _timeout_init(&tcs->timeout, NULL); +} + +/* + * XXX - backwards compatibility until the arch part is updated to call + * _timeout_tcs_init() + */ +static inline void _nano_timeout_tcs_init(struct tcs *tcs) +{ + _timeout_tcs_init(tcs); +} + +/** + * @brief Remove the thread from nanokernel object wait queue + * + * If a thread waits on a nanokernel object with timeout, + * remove the thread from the wait queue + * + * @param tcs Waiting thread + * @param t nano timer + * + * @return N/A + */ +static inline void _timeout_object_dequeue(struct tcs *tcs, struct _timeout *t) +{ + if (t->wait_q) { + _timeout_remove_tcs_from_wait_q(tcs); + } +} + +/* abort a timeout for a specified fiber */ +static inline int _timeout_abort(struct tcs *tcs) +{ + return _do_timeout_abort(&tcs->timeout); +} + +/* put a fiber on the timeout queue and record its wait queue */ +static inline void _timeout_add(struct tcs *tcs, _wait_q_t *wait_q, + int32_t timeout) +{ + _do_timeout_add(tcs, &tcs->timeout, wait_q, timeout); +} + +#else +#define _timeout_object_dequeue(tcs, t) do { } while (0) +#endif /* CONFIG_NANO_TIMEOUTS */ + +/* + * Handle one expired timeout. + * This removes the fiber from the timeout queue head, and also removes it + * from the wait queue it is on if waiting for an object. In that case, it + * also sets the return value to 0/NULL. + */ + +/* must be called with interrupts locked */ +static inline struct _timeout *_timeout_handle_one_timeout( + sys_dlist_t *timeout_q) +{ + struct _timeout *t = (void *)sys_dlist_get(timeout_q); + struct tcs *tcs = t->tcs; + + K_DEBUG("timeout %p\n", t); + if (tcs != NULL) { + _timeout_object_dequeue(tcs, t); + _ready_thread(tcs); + } else if (t->func) { + t->func(t); + } + /* + * Note: t->func() may add timeout again. Make sure that + * delta_ticks_from_prev is set to -1 only if timeout is + * still expired (delta_ticks_from_prev == 0) + */ + if (t->delta_ticks_from_prev == 0) { + t->delta_ticks_from_prev = -1; + } + + return (struct _timeout *)sys_dlist_peek_head(timeout_q); +} + +/* loop over all expired timeouts and handle them one by one */ +/* must be called with interrupts locked */ +static inline void _timeout_handle_timeouts(void) +{ + sys_dlist_t *timeout_q = &_nanokernel.timeout_q; + struct _timeout *next; + + next = (struct _timeout *)sys_dlist_peek_head(timeout_q); + while (next && next->delta_ticks_from_prev == 0) { + next = _timeout_handle_one_timeout(timeout_q); + } +} + +/** + * + * @brief abort a timeout + * + * @param t Timeout to abort + * + * @return 0 in success and -1 if the timer has expired + */ +static inline int _do_timeout_abort(struct _timeout *t) +{ + sys_dlist_t *timeout_q = &_nanokernel.timeout_q; + + if (-1 == t->delta_ticks_from_prev) { + return -1; + } + + if (!sys_dlist_is_tail(timeout_q, &t->node)) { + struct _timeout *next = + (struct _timeout *)sys_dlist_peek_next(timeout_q, + &t->node); + next->delta_ticks_from_prev += t->delta_ticks_from_prev; + } + sys_dlist_remove(&t->node); + t->delta_ticks_from_prev = -1; + + return 0; +} + +static inline int _nano_timer_timeout_abort(struct _timeout *t) +{ + return _do_timeout_abort(t); +} + +/* + * callback for sys_dlist_insert_at(): + * + * Returns 1 if the timeout to insert is lower or equal than the next timeout + * in the queue, signifying that it should be inserted before the next. + * Returns 0 if it is greater. + * + * If it is greater, the timeout to insert is decremented by the next timeout, + * since the timeout queue is a delta queue. If it lower or equal, decrement + * the timeout of the insert point to update its delta queue value, since the + * current timeout will be inserted before it. + */ +static int _timeout_insert_point_test(sys_dnode_t *test, void *timeout) +{ + struct _timeout *t = (void *)test; + int32_t *timeout_to_insert = timeout; + + if (*timeout_to_insert > t->delta_ticks_from_prev) { + *timeout_to_insert -= t->delta_ticks_from_prev; + return 0; + } + + t->delta_ticks_from_prev -= *timeout_to_insert; + return 1; +} + +/** + * + * @brief Put timeout on the timeout queue, record waiting fiber and wait queue + * + * @param tcs Fiber waiting on a timeout + * @param t Timeout structure to be added to the nanokernel queue + * @wait_q nanokernel object wait queue + * @timeout Timeout in ticks + * + * @return N/A + */ +static inline void _do_timeout_add(struct tcs *tcs, struct _timeout *t, + _wait_q_t *wait_q, int32_t timeout) +{ + K_DEBUG("thread %p on wait_q %p, for timeout: %d\n", + tcs, wait_q, timeout); + + sys_dlist_t *timeout_q = &_nanokernel.timeout_q; + + K_DEBUG("timeout_q %p before: head: %p, tail: %p\n", + &_nanokernel.timeout_q, + sys_dlist_peek_head(&_nanokernel.timeout_q), + _nanokernel.timeout_q.tail); + + K_DEBUG("timeout %p before: next: %p, prev: %p\n", + t, t->node.next, t->node.prev); + + t->tcs = tcs; + t->delta_ticks_from_prev = timeout; + t->wait_q = (sys_dlist_t *)wait_q; + sys_dlist_insert_at(timeout_q, (void *)t, + _timeout_insert_point_test, + &t->delta_ticks_from_prev); + + K_DEBUG("timeout_q %p after: head: %p, tail: %p\n", + &_nanokernel.timeout_q, + sys_dlist_peek_head(&_nanokernel.timeout_q), + _nanokernel.timeout_q.tail); + + K_DEBUG("timeout %p after: next: %p, prev: %p\n", + t, t->node.next, t->node.prev); +} + +static inline void _nano_timer_timeout_add(struct _timeout *t, + _wait_q_t *wait_q, + int32_t timeout) +{ + _do_timeout_add(NULL, t, wait_q, timeout); +} + +/* find the closest deadline in the timeout queue */ +static inline uint32_t _nano_get_earliest_timeouts_deadline(void) +{ + sys_dlist_t *q = &_nanokernel.timeout_q; + struct _timeout *t = + (struct _timeout *)sys_dlist_peek_head(q); + + return t ? min((uint32_t)t->delta_ticks_from_prev, + (uint32_t)_nanokernel.task_timeout) + : (uint32_t)_nanokernel.task_timeout; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _kernel_nanokernel_include_timeout_q__h_ */ diff --git a/kernel/unified/include/wait_q.h b/kernel/unified/include/wait_q.h new file mode 100644 index 00000000000..134e3a5d31a --- /dev/null +++ b/kernel/unified/include/wait_q.h @@ -0,0 +1,134 @@ +/* wait queue for multiple fibers on nanokernel objects */ + +/* + * Copyright (c) 2015 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _kernel_nanokernel_include_wait_q__h_ +#define _kernel_nanokernel_include_wait_q__h_ + +#include + +#ifdef CONFIG_KERNEL_V2 +#include +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if 0 +/* reset a wait queue, call during operation */ +static inline void _nano_wait_q_reset(struct _nano_queue *wait_q) +{ + sys_dlist_init((sys_dlist_t *)wait_q); +} + +/* initialize a wait queue: call only during object initialization */ +static inline void _nano_wait_q_init(struct _nano_queue *wait_q) +{ + _nano_wait_q_reset(wait_q); +} + +/* + * Remove first fiber from a wait queue and put it on the ready queue, knowing + * that the wait queue is not empty. + */ +static inline +struct tcs *_nano_wait_q_remove_no_check(struct _nano_queue *wait_q) +{ + struct tcs *tcs = (struct tcs *)sys_dlist_get((sys_dlist_t *)wait_q); + + _ready_thread(tcs); + + return tcs; +} + +/* + * Remove first fiber from a wait queue and put it on the ready queue. + * Abort and return NULL if the wait queue is empty. + */ +static inline struct tcs *_nano_wait_q_remove(struct _nano_queue *wait_q) +{ + return _nano_wait_q_remove_no_check(wait_q); +} + +/* put current fiber on specified wait queue */ +static inline void _nano_wait_q_put(struct _nano_queue *wait_q) +{ + /* unused */ +} +#endif + +#if defined(CONFIG_NANO_TIMEOUTS) +static inline void _timeout_remove_tcs_from_wait_q(struct tcs *tcs) +{ + _unpend_thread(tcs); + tcs->timeout.wait_q = NULL; +} +#include + + #define _TIMEOUT_TICK_GET() sys_tick_get() + + #define _TIMEOUT_ADD(thread, pq, ticks) \ + do { \ + if ((ticks) != TICKS_UNLIMITED) { \ + _timeout_add(thread, pq, ticks); \ + } \ + } while (0) + #define _TIMEOUT_SET_TASK_TIMEOUT(ticks) \ + _nanokernel.task_timeout = (ticks) + + #define _TIMEOUT_UPDATE(timeout, limit, cur_ticks) \ + do { \ + if ((timeout) != TICKS_UNLIMITED) { \ + (timeout) = (int32_t)((limit) - (cur_ticks)); \ + } \ + } while (0) + +#elif defined(CONFIG_NANO_TIMERS) +#include + #define _timeout_tcs_init(tcs) do { } while ((0)) + #define _timeout_abort(tcs) do { } while ((0)) + + #define _TIMEOUT_TICK_GET() 0 + #define _TIMEOUT_ADD(thread, pq, ticks) do { } while (0) + #define _TIMEOUT_SET_TASK_TIMEOUT(ticks) do { } while ((0)) + #define _TIMEOUT_UPDATE(timeout, limit, cur_ticks) do { } while (0) +#else + #define _timeout_tcs_init(tcs) do { } while ((0)) + #define _timeout_abort(tcs) do { } while ((0)) + #define _nano_get_earliest_timeouts_deadline() \ + ((uint32_t)TICKS_UNLIMITED) + + #define _TIMEOUT_TICK_GET() 0 + #define _TIMEOUT_ADD(thread, pq, ticks) do { } while (0) + #define _TIMEOUT_SET_TASK_TIMEOUT(ticks) do { } while ((0)) + #define _TIMEOUT_UPDATE(timeout, limit, cur_ticks) do { } while (0) +#endif + + #define _NANO_OBJECT_WAIT(queue, data, timeout, key) \ + do { \ + _TIMEOUT_SET_TASK_TIMEOUT(timeout); \ + nano_cpu_atomic_idle(key); \ + key = irq_lock(); \ + } while (0) + +#ifdef __cplusplus +} +#endif + +#endif /* _kernel_nanokernel_include_wait_q__h_ */ diff --git a/kernel/unified/init.c b/kernel/unified/init.c new file mode 100644 index 00000000000..4a60d5dc4cf --- /dev/null +++ b/kernel/unified/init.c @@ -0,0 +1,383 @@ +/* + * Copyright (c) 2010-2014 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Nanokernel initialization module + * + * This module contains routines that are used to initialize the nanokernel. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* kernel build timestamp items */ + +#define BUILD_TIMESTAMP "BUILD: " __DATE__ " " __TIME__ + +#ifdef CONFIG_BUILD_TIMESTAMP +const char * const build_timestamp = BUILD_TIMESTAMP; +#endif + +/* boot banner items */ + +#define BOOT_BANNER "BOOTING ZEPHYR OS" + +#if !defined(CONFIG_BOOT_BANNER) +#define PRINT_BOOT_BANNER() do { } while (0) +#elif !defined(CONFIG_BUILD_TIMESTAMP) +#define PRINT_BOOT_BANNER() printk("***** " BOOT_BANNER " *****\n") +#else +#define PRINT_BOOT_BANNER() \ + printk("***** " BOOT_BANNER " - %s *****\n", build_timestamp) +#endif + +/* boot time measurement items */ + +#ifdef CONFIG_BOOT_TIME_MEASUREMENT +uint64_t __noinit __start_tsc; /* timestamp when kernel starts */ +uint64_t __noinit __main_tsc; /* timestamp when main task starts */ +uint64_t __noinit __idle_tsc; /* timestamp when CPU goes idle */ +#endif + +/* random number generator items */ +#if defined(CONFIG_TEST_RANDOM_GENERATOR) || \ + defined(CONFIG_CUSTOM_RANDOM_GENERATOR) +#define RAND32_INIT() sys_rand32_init() +#else +#define RAND32_INIT() +#endif + +/* init/main and idle threads */ + +#define IDLE_STACK_SIZE 256 + +#if CONFIG_MAIN_STACK_SIZE & (STACK_ALIGN - 1) + #error "MAIN_STACK_SIZE must be a multiple of the stack alignment" +#endif + +#if IDLE_STACK_SIZE & (STACK_ALIGN - 1) + #error "IDLE_STACK_SIZE must be a multiple of the stack alignment" +#endif + +static char __noinit __stack main_stack[CONFIG_MAIN_STACK_SIZE]; +static char __noinit __stack idle_stack[IDLE_STACK_SIZE]; + +k_tid_t const _main_thread = (k_tid_t)main_stack; +k_tid_t const _idle_thread = (k_tid_t)idle_stack; + +/* + * storage space for the interrupt stack + * + * Note: This area is used as the system stack during nanokernel initialization, + * since the nanokernel hasn't yet set up its own stack areas. The dual + * purposing of this area is safe since interrupts are disabled until the + * nanokernel context switches to the background (or idle) task. + */ +#if CONFIG_ISR_STACK_SIZE & (STACK_ALIGN - 1) + #error "ISR_STACK_SIZE must be a multiple of the stack alignment" +#endif +char __noinit __stack _interrupt_stack[CONFIG_ISR_STACK_SIZE]; + +#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS) + #include + #define initialize_timeouts() do { \ + sys_dlist_init(&_nanokernel.timeout_q); \ + _nanokernel.task_timeout = TICKS_UNLIMITED; \ + } while ((0)) +#else + #define initialize_timeouts() do { } while ((0)) +#endif + +/** + * + * @brief Clear BSS + * + * This routine clears the BSS region, so all bytes are 0. + * + * @return N/A + */ + +void _bss_zero(void) +{ + uint32_t *pos = (uint32_t *)&__bss_start; + + for ( ; pos < (uint32_t *)&__bss_end; pos++) { + *pos = 0; + } +} + + +#ifdef CONFIG_XIP +/** + * + * @brief Copy the data section from ROM to RAM + * + * This routine copies the data section from ROM to RAM. + * + * @return N/A + */ +void _data_copy(void) +{ + uint32_t *pROM, *pRAM; + + pROM = (uint32_t *)&__data_rom_start; + pRAM = (uint32_t *)&__data_ram_start; + + for ( ; pRAM < (uint32_t *)&__data_ram_end; pROM++, pRAM++) { + *pRAM = *pROM; + } +} +#endif + +/** + * + * @brief Mainline for nanokernel's background task + * + * This routine completes kernel initialization by invoking the remaining + * init functions, then invokes application's main() routine. + * + * @return N/A + */ +static void _main(void *unused1, void *unused2, void *unused3) +{ + ARG_UNUSED(unused1); + ARG_UNUSED(unused2); + ARG_UNUSED(unused3); + + _sys_device_do_config_level(_SYS_INIT_LEVEL_SECONDARY); + _sys_device_do_config_level(_SYS_INIT_LEVEL_NANOKERNEL); + _sys_device_do_config_level(_SYS_INIT_LEVEL_APPLICATION); + +#ifdef CONFIG_CPLUSPLUS + /* Process the .ctors and .init_array sections */ + extern void __do_global_ctors_aux(void); + extern void __do_init_array_aux(void); + __do_global_ctors_aux(); + __do_init_array_aux(); +#endif + + _init_static_threads(); + + _main_thread->flags &= ~ESSENTIAL; + + extern void main(void); + main(); +} + +void __weak main(void) +{ + /* NOP default main() if the application does not provide one. */ +} + +static void idle(void *unused1, void *unused2, void *unused3) +{ + ARG_UNUSED(unused1); + ARG_UNUSED(unused2); + ARG_UNUSED(unused3); + + for (;;) { + nano_cpu_idle(); + if (_is_coop(_current)) { + k_yield(); + } + } +} + +/** + * + * @brief Initializes nanokernel data structures + * + * This routine initializes various nanokernel data structures, including + * the background (or idle) task and any architecture-specific initialization. + * + * Note that all fields of "_nanokernel" are set to zero on entry, which may + * be all the initialization many of them require. + * + * @return N/A + */ +static void nano_init(struct tcs *dummy_thread) +{ + /* + * Initialize the current execution thread to permit a level of + * debugging output if an exception should happen during nanokernel + * initialization. However, don't waste effort initializing the + * fields of the dummy thread beyond those needed to identify it as a + * dummy thread. + */ + + _current = dummy_thread; + + /* + * Do not insert dummy execution context in the list of fibers, so + * that it does not get scheduled back in once context-switched out. + */ + dummy_thread->flags = ESSENTIAL; + dummy_thread->prio = K_PRIO_COOP(0); + + /* _nanokernel.ready_q is all zeroes */ + + + /* + * The interrupt library needs to be initialized early since a series + * of handlers are installed into the interrupt table to catch + * spurious interrupts. This must be performed before other nanokernel + * subsystems install bonafide handlers, or before hardware device + * drivers are initialized. + */ + + _IntLibInit(); + + /* ready the init/main and idle threads */ + + for (int ii = 0; ii < K_NUM_PRIORITIES; ii++) { + sys_dlist_init(&_nanokernel.ready_q.q[ii]); + } + + _new_thread(main_stack, CONFIG_MAIN_STACK_SIZE, NULL, + _main, NULL, NULL, NULL, + CONFIG_MAIN_THREAD_PRIORITY, ESSENTIAL); + _mark_thread_as_started(_main_thread); + _add_thread_to_ready_q(_main_thread); + + _new_thread(idle_stack, IDLE_STACK_SIZE, NULL, + idle, NULL, NULL, NULL, + K_LOWEST_THREAD_PRIO, ESSENTIAL); + _mark_thread_as_started(_idle_thread); + _add_thread_to_ready_q(_idle_thread); + + initialize_timeouts(); + + /* perform any architecture-specific initialization */ + + nanoArchInit(); + + /* handle any kernel objects that require run-time initialization */ + + _k_mem_map_init(); + _k_mbox_init(); + _k_dyamic_timer_init(); + _k_pipes_init(); +} + +#ifdef CONFIG_STACK_CANARIES +/** + * + * @brief Initialize the kernel's stack canary + * + * This macro initializes the kernel's stack canary global variable, + * __stack_chk_guard, with a random value. + * + * INTERNAL + * Depending upon the compiler, modifying __stack_chk_guard directly at runtime + * may generate a build error. In-line assembly is used as a workaround. + */ + +extern void *__stack_chk_guard; + +#if defined(CONFIG_X86) +#define _MOVE_INSTR "movl " +#elif defined(CONFIG_ARM) +#define _MOVE_INSTR "str " +#elif defined(CONFIG_ARC) +#define _MOVE_INSTR "st " +#else +#error "Unknown Architecture type" +#endif /* CONFIG_X86 */ + +#define STACK_CANARY_INIT() \ + do { \ + register void *tmp; \ + tmp = (void *)sys_rand32_get(); \ + __asm__ volatile(_MOVE_INSTR "%1, %0;\n\t" \ + : "=m"(__stack_chk_guard) \ + : "r"(tmp)); \ + } while (0) + +#else /* !CONFIG_STACK_CANARIES */ +#define STACK_CANARY_INIT() +#endif /* CONFIG_STACK_CANARIES */ + +/** + * + * @brief Initialize nanokernel + * + * This routine is invoked when the system is ready to run C code. The + * processor must be running in 32-bit mode, and the BSS must have been + * cleared/zeroed. + * + * @return Does not return + */ +FUNC_NORETURN void _Cstart(void) +{ + /* floating point operations are NOT performed during nanokernel init */ + + char dummyTCS[__tTCS_NOFLOAT_SIZEOF]; + + /* + * Initialize nanokernel data structures. This step includes + * initializing the interrupt subsystem, which must be performed + * before the hardware initialization phase. + */ + + nano_init((struct tcs *)&dummyTCS); + + /* perform basic hardware initialization */ + + _sys_device_do_config_level(_SYS_INIT_LEVEL_PRIMARY); + + /* + * Initialize random number generator + * As a platform may implement it in hardware, it has to be + * initialized after rest of hardware initialization and + * before stack canaries that use it + */ + + RAND32_INIT(); + + /* initialize stack canaries */ + + STACK_CANARY_INIT(); + + /* display boot banner */ + + PRINT_BOOT_BANNER(); + + /* + * Context switch to main task (entry function is _main()): the + * current fake thread is not on a wait queue or ready queue, so it + * will never be rescheduled in. + */ + + _Swap(irq_lock()); + + /* + * Compiler can't tell that the above routines won't return and issues + * a warning unless we explicitly tell it that control never gets this + * far. + */ + + CODE_UNREACHABLE; +} diff --git a/kernel/unified/int_latency_bench.c b/kernel/unified/int_latency_bench.c new file mode 100644 index 00000000000..6fc4685f582 --- /dev/null +++ b/kernel/unified/int_latency_bench.c @@ -0,0 +1 @@ +#include "../nanokernel/int_latency_bench.c" diff --git a/kernel/unified/kernel_event_logger.c b/kernel/unified/kernel_event_logger.c new file mode 100644 index 00000000000..5523331f560 --- /dev/null +++ b/kernel/unified/kernel_event_logger.c @@ -0,0 +1 @@ +#include "../nanokernel/kernel_event_logger.c" diff --git a/kernel/unified/legacy/Makefile b/kernel/unified/legacy/Makefile new file mode 100644 index 00000000000..0666dd439b4 --- /dev/null +++ b/kernel/unified/legacy/Makefile @@ -0,0 +1,9 @@ +ccflags-y += -I$(srctree)/kernel/unified/include + +asflags-y := ${ccflags-y} + +obj-y = +obj-y += $(strip \ +) + +obj-$(CONFIG_NANO_TIMERS) += timer_legacy.o diff --git a/kernel/unified/legacy/timer_legacy.c b/kernel/unified/legacy/timer_legacy.c new file mode 100644 index 00000000000..78f23267d27 --- /dev/null +++ b/kernel/unified/legacy/timer_legacy.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +void task_timer_start(ktimer_t timer, int32_t duration, + int32_t period, ksem_t sema) +{ + if (duration < 0 || period < 0 || (duration == 0 && period == 0)) { + int key = irq_lock(); + + if (timer->timeout.delta_ticks_from_prev != -1) { + k_timer_stop(timer); + } + + irq_unlock(key); + + return; + } + + k_timer_start(timer, _ticks_to_ms(duration), + _ticks_to_ms(period), + (void(*)(void *))k_sem_give, sema, NULL, NULL); +} diff --git a/kernel/unified/lifo.c b/kernel/unified/lifo.c new file mode 100644 index 00000000000..09716112ea0 --- /dev/null +++ b/kernel/unified/lifo.c @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2010-2015 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** @file + * + * @brief dynamic-size LIFO queue object + */ + +#include +#include +#include +#include +#include +#include +#include + +void k_lifo_init(struct k_lifo *lifo) +{ + lifo->list = (void *)0; + sys_dlist_init(&lifo->wait_q); + + SYS_TRACING_OBJ_INIT(k_lifo, lifo); +} + +void k_lifo_put(struct k_lifo *lifo, void *data) +{ + struct k_thread *first_pending_thread; + unsigned int key; + + key = irq_lock(); + + first_pending_thread = _unpend_first_thread(&lifo->wait_q); + + if (first_pending_thread) { + _timeout_abort(first_pending_thread); + _ready_thread(first_pending_thread); + + _set_thread_return_value_with_data(first_pending_thread, + 0, data); + + if (!_is_in_isr() && _must_switch_threads()) { + (void)_Swap(key); + return; + } + } else { + *(void **)data = lifo->list; + lifo->list = data; + } + + irq_unlock(key); +} + +void *k_lifo_get(struct k_lifo *lifo, int32_t timeout) +{ + unsigned int key; + void *data; + + key = irq_lock(); + + if (likely(lifo->list)) { + data = lifo->list; + lifo->list = *(void **)data; + irq_unlock(key); + return data; + } + + if (timeout == K_NO_WAIT) { + irq_unlock(key); + return NULL; + } + + _pend_current_thread(&lifo->wait_q, timeout); + + return _Swap(key) ? NULL : _current->swap_data; +} diff --git a/kernel/unified/mailbox.c b/kernel/unified/mailbox.c new file mode 100644 index 00000000000..2deba5d9449 --- /dev/null +++ b/kernel/unified/mailbox.c @@ -0,0 +1,651 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @brief Mailboxes. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) + +/* asynchronous message descriptor type */ +struct k_mbox_async { + struct tcs_base thread; /* dummy thread object */ + struct k_mbox_msg tx_msg; /* transmit message descriptor */ +}; + +/* array of asynchronous message descriptors */ +static struct k_mbox_async __noinit async_msg[CONFIG_NUM_MBOX_ASYNC_MSGS]; + +/* stack of unused asynchronous message descriptors */ +K_STACK_DEFINE(async_msg_free, CONFIG_NUM_MBOX_ASYNC_MSGS); + +/** + * @brief Create pool of asynchronous message descriptors. + * + * A dummy thread requires minimal initialization, since it never actually + * gets to execute. The K_DUMMY flag is sufficient to distinguish a dummy + * thread from a real one. The threads are *not* added to the kernel's list of + * known threads. + * + * Once initialized, the address of each descriptor is added to a stack + * that governs access to them. + * + * @return N/A + */ +void _k_mbox_init(void) +{ + int i; + + for (i = 0; i < CONFIG_NUM_MBOX_ASYNC_MSGS; i++) { + async_msg[i].thread.flags = K_DUMMY; + k_stack_push(async_msg_free, (uint32_t)&async_msg[i]); + } +} + +/** + * @brief Allocate an asynchronous message descriptor. + * + * @param async Address of area to hold the descriptor pointer. + * + * @return N/A. + */ +static inline void _mbox_async_alloc(struct k_mbox_async **async) +{ + k_stack_pop(async_msg_free, (uint32_t *)async, K_FOREVER); +} + +/** + * @brief Free an asynchronous message descriptor. + * + * @param Descriptor pointer. + */ +static inline void _mbox_async_free(struct k_mbox_async *async) +{ + k_stack_push(async_msg_free, (uint32_t)async); +} + +#endif + +/** + * @brief Initialize a mailbox. + * + * @return N/A + */ +void k_mbox_init(struct k_mbox *mbox_ptr) +{ + sys_dlist_init(&mbox_ptr->tx_msg_queue); + sys_dlist_init(&mbox_ptr->rx_msg_queue); + SYS_TRACING_OBJ_INIT(mbox, mbox_ptr); +} + +/** + * @brief Check compatibility of sender's and receiver's message descriptors. + * + * Compares sender's and receiver's message descriptors to see if they are + * compatible. If so, the descriptor fields are updated to reflect that a + * match has occurred. + * + * @param tx_msg Pointer to transmit message descriptor. + * @param rx_msg Pointer to receive message descriptor. + * + * @return 0 if successfully matched, otherwise -1. + */ +static int _mbox_message_match(struct k_mbox_msg *tx_msg, + struct k_mbox_msg *rx_msg) +{ + uint32_t temp_info; + + if (((tx_msg->tx_target_thread == (k_tid_t)K_ANY) || + (tx_msg->tx_target_thread == rx_msg->tx_target_thread)) && + ((rx_msg->rx_source_thread == (k_tid_t)K_ANY) || + (rx_msg->rx_source_thread == tx_msg->rx_source_thread))) { + + /* update thread identifier fields for both descriptors */ + rx_msg->rx_source_thread = tx_msg->rx_source_thread; + tx_msg->tx_target_thread = rx_msg->tx_target_thread; + + /* update application info fields for both descriptors */ + temp_info = rx_msg->info; + rx_msg->info = tx_msg->info; + tx_msg->info = temp_info; + + /* update data size field for receiver only */ + if (rx_msg->size > tx_msg->size) { + rx_msg->size = tx_msg->size; + } + + /* update data location fields for receiver only */ + rx_msg->tx_data = tx_msg->tx_data; + rx_msg->tx_block = tx_msg->tx_block; + if (rx_msg->tx_data != NULL) { + rx_msg->tx_block.pool_id = NULL; + } else if (rx_msg->tx_block.pool_id != NULL) { + rx_msg->tx_data = rx_msg->tx_block.data; + } + + /* update syncing thread field for receiver only */ + rx_msg->_syncing_thread = tx_msg->_syncing_thread; + + return 0; + } + + return -1; +} + +/** + * @brief Dispose of received message. + * + * Releases any memory pool block still associated with the message, + * then notifies the sender that message processing is complete. + * + * @param rx_msg Pointer to receive message descriptor. + * + * @return N/A + */ +static void _mbox_message_dispose(struct k_mbox_msg *rx_msg) +{ + struct tcs *sending_thread; + struct k_mbox_msg *tx_msg; + unsigned int key; + + /* do nothing if message was disposed of when it was received */ + if (rx_msg->_syncing_thread == NULL) { + return; + } + + /* release sender's memory pool block */ + if (rx_msg->tx_block.pool_id != NULL) { +#if 0 + /* NEED TO WAIT FOR MEMORY POOL SUPPORT */ + k_mem_pool_free(&rx_msg->tx_block); +#endif + rx_msg->tx_block.pool_id = NULL; + } + + /* recover sender info */ + sending_thread = rx_msg->_syncing_thread; + rx_msg->_syncing_thread = NULL; + tx_msg = (struct k_mbox_msg *)sending_thread->swap_data; + + /* update data size field for sender */ + tx_msg->size = rx_msg->size; + +#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) + /* + * asynchronous send: free asynchronous message descriptor + + * dummy thread pair, then give semaphore (if needed) + */ + if (sending_thread->flags & K_DUMMY) { + struct k_sem *async_sem = tx_msg->_async_sem; + + _mbox_async_free((struct k_mbox_async *)sending_thread); + if (async_sem != NULL) { + k_sem_give(async_sem); + } + return; + } +#endif + + /* synchronous send: wake up sending thread */ + key = irq_lock(); + _set_thread_return_value(sending_thread, 0); + _mark_thread_as_not_pending(sending_thread); + _ready_thread(sending_thread); + _reschedule_threads(key); +} + +/** + * @brief Send a mailbox message. + * + * Helper routine that handles both synchronous and asynchronous sends. + * + * @param mbox Pointer to the mailbox object. + * @param tx_msg Pointer to transmit message descriptor. + * @param timeout Maximum time (nanoseconds) to wait for the message to be + * received (although not necessarily completely processed). + * Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long + * as necessary. + * + * @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out + */ +static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, + int32_t timeout) +{ + struct tcs *sending_thread; + struct tcs *receiving_thread; + struct k_mbox_msg *rx_msg; + sys_dnode_t *wait_q_item; + unsigned int key; + + /* save sender id so it can be used during message matching */ + tx_msg->rx_source_thread = _current; + + /* finish readying sending thread (actual or dummy) for send */ + sending_thread = tx_msg->_syncing_thread; + sending_thread->swap_data = tx_msg; + + /* search mailbox's rx queue for a compatible receiver */ + key = irq_lock(); + + SYS_DLIST_FOR_EACH_NODE(&mbox->rx_msg_queue, wait_q_item) { + receiving_thread = (struct tcs *)wait_q_item; + rx_msg = (struct k_mbox_msg *)receiving_thread->swap_data; + + if (_mbox_message_match(tx_msg, rx_msg) == 0) { + /* take receiver out of rx queue */ + _unpend_thread(receiving_thread); + _timeout_abort(receiving_thread); + + /* ready receiver for execution */ + _set_thread_return_value(receiving_thread, 0); + _ready_thread(receiving_thread); + +#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) + /* + * asynchronous send: swap out current thread + * if receiver has priority, otherwise let it continue + * + * note: dummy sending thread sits (unqueued) + * until the receiver consumes the message + */ + if (sending_thread->flags & K_DUMMY) { + _reschedule_threads(key); + return 0; + } +#endif + + /* + * synchronous send: pend current thread (unqueued) + * until the receiver consumes the message + */ + _remove_thread_from_ready_q(_current); + _mark_thread_as_pending(_current); + return _Swap(key); + } + } + + /* didn't find a matching receiver: don't wait for one */ + if (timeout == K_NO_WAIT) { + irq_unlock(key); + return -ENOMSG; + } + +#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) + /* asynchronous send: dummy thread waits on tx queue for receiver */ + if (sending_thread->flags & K_DUMMY) { + _pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER); + irq_unlock(key); + return 0; + } +#endif + + /* synchronous send: sender waits on tx queue for receiver or timeout */ + _pend_current_thread(&mbox->tx_msg_queue, timeout); + return _Swap(key); +} + +/** + * @brief Send a mailbox message in a synchronous manner. + * + * Sends a message to a mailbox and waits for a receiver to process it. + * The message data may be in a buffer, in a memory pool block, or non-existent + * (i.e. empty message). + * + * @param mbox Pointer to the mailbox object. + * @param tx_msg Pointer to transmit message descriptor. + * @param timeout Maximum time (nanoseconds) to wait for the message to be + * received (although not necessarily completely processed). + * Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long + * as necessary. + * + * @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out + */ +int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, int32_t timeout) +{ + /* configure things for a synchronous send, then send the message */ + tx_msg->_syncing_thread = _current; + + return _mbox_message_put(mbox, tx_msg, timeout); +} + +#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) +/** + * @brief Send a mailbox message in an asynchronous manner. + * + * Sends a message to a mailbox without waiting for a receiver to process it. + * The message data may be in a buffer, in a memory pool block, or non-existent + * (i.e. an empty message). Optionally, the specified semaphore will be given + * by the mailbox when the message has been both received and disposed of + * by the receiver. + * + * @param mbox Pointer to the mailbox object. + * @param tx_msg Pointer to transmit message descriptor. + * @param sem Semaphore identifier, or NULL if none specified. + * + * @return N/A + */ +void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, + struct k_sem *sem) +{ + struct k_mbox_async *async; + + /* + * allocate an asynchronous message descriptor, configure both parts, + * then send the message asynchronously + */ + _mbox_async_alloc(&async); + + async->thread.prio = _current->prio; + + async->tx_msg = *tx_msg; + async->tx_msg._syncing_thread = (struct tcs *)&async->thread; + async->tx_msg._async_sem = sem; + + _mbox_message_put(mbox, &async->tx_msg, K_FOREVER); +} +#endif + +/** + * @brief Retrieve mailbox message data into a buffer. + * + * Completes the processing of a received message by retrieving its data + * into a buffer, then disposing of the message. + * + * Alternatively, this routine can be used to dispose of a received message + * without retrieving its data. + * + * @param rx_msg Pointer to receive message descriptor. + * @param buffer Pointer to buffer to receive data. (Use NULL to discard data.) + * + * @return N/A + */ +void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer) +{ + /* handle case where data is to be discarded */ + if (buffer == NULL) { + rx_msg->size = 0; + _mbox_message_dispose(rx_msg); + return; + } + + /* copy message data to buffer, then dispose of message */ + if ((rx_msg->tx_data != NULL) && (rx_msg->size > 0)) { + memcpy(buffer, rx_msg->tx_data, rx_msg->size); + } + _mbox_message_dispose(rx_msg); +} + +/** + * @brief Retrieve mailbox message data into a memory pool block. + * + * Completes the processing of a received message by retrieving its data + * into a memory pool block, then disposing of the message. The memory pool + * block that results from successful retrieval must be returned to the pool + * once the data has been processed, even in cases where zero bytes of data + * are retrieved. + * + * Alternatively, this routine can be used to dispose of a received message + * without retrieving its data. In this case there is no need to return a + * memory pool block to the pool. + * + * This routine allocates a new memory pool block for the data only if the + * data is not already in one. If a new block cannot be allocated, the routine + * returns a failure code and the received message is left unchanged. This + * permits the caller to reattempt data retrieval at a later time or to dispose + * of the received message without retrieving its data. + * + * @param msg Pointer to receive message descriptor. + * @param pool Memory pool identifier. (Use NULL to discard data.) + * @param block Pointer to area to hold memory pool block info. + * @param timeout Maximum time (nanoseconds) to wait for a memory pool block. + * Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long as + * necessary. + * + * @return 0 if successful, -ENOMEM if failed immediately, -EAGAIN if timed out + */ +int k_mbox_data_block_get(struct k_mbox_msg *rx_msg, k_mem_pool_t pool, + struct k_mem_block *block, int32_t timeout) +{ + int result; + + /* handle case where data is to be discarded */ + if (pool == NULL) { + rx_msg->size = 0; + _mbox_message_dispose(rx_msg); + return 0; + } + + /* handle case where data is already in a memory pool block */ + if (rx_msg->tx_block.pool_id != NULL) { + /* give ownership of the block to receiver */ + *block = rx_msg->tx_block; + rx_msg->tx_block.pool_id = NULL; + + /* now dispose of message */ + _mbox_message_dispose(rx_msg); + return 0; + } + + /* allocate memory pool block (even when message size is 0!) */ + result = k_mem_pool_alloc(pool, block, rx_msg->size, timeout); + if (result != 0) { + return result; + } + + /* retrieve non-block data into new block, then dispose of message */ + k_mbox_data_get(rx_msg, block->data); + return 0; +} + +/** + * @brief Handle immediate consumption of received mailbox message data. + * + * Checks to see if received message data should be kept for later retrieval, + * or if the data should consumed immediately and the message disposed of. + * + * The data is consumed immediately in either of the following cases: + * 1) The receiver requested immediate retrieval by suppling a buffer + * to receive the data. + * 2) There is no data to be retrieved. (i.e. Data size is 0 bytes.) + * + * @param rx_msg Pointer to receive message descriptor. + * @param buffer Pointer to buffer to receive data. + * + * @return 0 + */ +static int _mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer) +{ + if (buffer != NULL) { + /* retrieve data now, then dispose of message */ + k_mbox_data_get(rx_msg, buffer); + } else if (rx_msg->size == 0) { + /* there is no data to get, so just dispose of message */ + _mbox_message_dispose(rx_msg); + } else { + /* keep message around for later data retrieval */ + } + + return 0; +} + +/** + * @brief Receive a mailbox message. + * + * Receives a message from a mailbox, then optionally retrieves its data + * and disposes of the message. + * + * @param mbox Pointer to the mailbox object. + * @param msg Pointer to receive message descriptor. + * @param buffer Pointer to buffer to receive data. + * (Use NULL to defer data retrieval and message disposal until later.) + * @param timeout Maximum time (nanoseconds) to wait for a message. + * Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long as + * necessary. + * + * @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out + */ +int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, + int32_t timeout) +{ + struct tcs *sending_thread; + struct k_mbox_msg *tx_msg; + sys_dnode_t *wait_q_item; + unsigned int key; + int result; + + /* save receiver id so it can be used during message matching */ + rx_msg->tx_target_thread = _current; + + /* search mailbox's tx queue for a compatible sender */ + key = irq_lock(); + + SYS_DLIST_FOR_EACH_NODE(&mbox->tx_msg_queue, wait_q_item) { + sending_thread = (struct tcs *)wait_q_item; + tx_msg = (struct k_mbox_msg *)sending_thread->swap_data; + + if (_mbox_message_match(tx_msg, rx_msg) == 0) { + /* take sender out of mailbox's tx queue */ + _unpend_thread(sending_thread); + _timeout_abort(sending_thread); + + irq_unlock(key); + + /* consume message data immediately, if needed */ + return _mbox_message_data_check(rx_msg, buffer); + } + } + + /* didn't find a matching sender */ + + if (timeout == K_NO_WAIT) { + /* don't wait for a matching sender to appear */ + irq_unlock(key); + return -ENOMSG; + } + + /* wait until a matching sender appears or a timeout occurs */ + _pend_current_thread(&mbox->rx_msg_queue, timeout); + _current->swap_data = rx_msg; + result = _Swap(key); + + /* consume message data immediately, if needed */ + if (result == 0) { + result = _mbox_message_data_check(rx_msg, buffer); + } + + return result; +} + + + +int task_mbox_put(kmbox_t mbox, kpriority_t prio, struct k_msg *msg, + int32_t timeout) +{ + struct k_mbox_msg *tx_msg = (struct k_mbox_msg *)msg; + kpriority_t curr_prio; + unsigned int key; + int result; + + /* handle old-style request to send an empty message */ + if (tx_msg->size == 0) { + tx_msg->tx_block.pool_id = NULL; + } + + /* handle sending message of current thread priority */ + curr_prio = _current->prio; + if (prio == curr_prio) { + return _error_to_rc(k_mbox_put(mbox, tx_msg, + _ticks_to_ms(timeout))); + } + + /* handle sending message of a different thread priority */ + key = irq_lock(); + _thread_priority_set(_current, prio); + _reschedule_threads(key); + + result = _error_to_rc(k_mbox_put(mbox, tx_msg, _ticks_to_ms(timeout))); + + key = irq_lock(); + _thread_priority_set(_current, curr_prio); + _reschedule_threads(key); + + return result; +} + +void task_mbox_block_put(kmbox_t mbox, kpriority_t prio, struct k_msg *msg, + ksem_t sema) +{ + struct k_mbox_msg *tx_msg = (struct k_mbox_msg *)msg; + kpriority_t curr_prio; + unsigned int key; + + /* handle sending message of current thread priority */ + curr_prio = _current->prio; + if (prio == curr_prio) { + k_mbox_async_put(mbox, tx_msg, sema); + return; + } + + /* handle sending message of a different thread priority */ + key = irq_lock(); + _thread_priority_set(_current, prio); + _reschedule_threads(key); + + k_mbox_async_put(mbox, tx_msg, sema); + + key = irq_lock(); + _thread_priority_set(_current, curr_prio); + _reschedule_threads(key); +} + +int task_mbox_get(kmbox_t mbox, struct k_msg *msg, int32_t timeout) +{ + struct k_mbox_msg *rx_msg = (struct k_mbox_msg *)msg; + + return _error_to_rc(k_mbox_get(mbox, rx_msg, rx_msg->_rx_data, + _ticks_to_ms(timeout))); +} + +void task_mbox_data_get(struct k_msg *msg) +{ + struct k_mbox_msg *rx_msg = (struct k_mbox_msg *)msg; + + /* handle old-style request to discard message data */ + if (rx_msg->size == 0) { + rx_msg->_rx_data = NULL; + } + + k_mbox_data_get(rx_msg, rx_msg->_rx_data); +} + +int task_mbox_data_block_get(struct k_msg *msg, struct k_block *block, + kmemory_pool_t pool_id, int32_t timeout) +{ + struct k_mbox_msg *rx_msg = (struct k_mbox_msg *)msg; + + return _error_to_rc(k_mbox_data_block_get(rx_msg, pool_id, block, + _ticks_to_ms(timeout))); +} diff --git a/kernel/unified/mem_map.c b/kernel/unified/mem_map.c new file mode 100644 index 00000000000..9e7e6fb6636 --- /dev/null +++ b/kernel/unified/mem_map.c @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +extern struct k_mem_map _k_mem_map_ptr_start[]; +extern struct k_mem_map _k_mem_map_ptr_end[]; + +/** + * @brief Initialize kernel memory map subsystem. + * + * Perform any initialization of memory maps that wasn't done at build time. + * Currently this just involves creating the list of free blocks for each map. + * + * @return N/A + */ +static void create_free_list(struct k_mem_map *map) +{ + char *p; + int j; + + map->free_list = NULL; + p = map->buffer; + + for (j = 0; j < map->num_blocks; j++) { + *(char **)p = map->free_list; + map->free_list = p; + p += map->block_size; + } +} + +/** + * @brief Complete initialization of statically defined memory maps. + * + * Perform any initialization that wasn't done at build time. + * + * @return N/A + */ +void _k_mem_map_init(void) +{ + struct k_mem_map *map; + + for (map = _k_mem_map_ptr_start; map < _k_mem_map_ptr_end; map++) { + create_free_list(map); + } +} + +/** + * @brief Initialize a memory map. + * + * Initializes the memory map and creates its list of free blocks. + * + * @param map Address of memory map. + * @param num_blocks Number of blocks. + * @param block_size Size of each block, in bytes. + * @param buffer Pointer to buffer used for the blocks. + * + * @return N/A + */ +void k_mem_map_init(struct k_mem_map *map, int num_blocks, int block_size, + void *buffer) +{ + map->num_blocks = num_blocks; + map->block_size = block_size; + map->buffer = buffer; + map->num_used = 0; + create_free_list(map); + sys_dlist_init(&map->wait_q); + SYS_TRACING_OBJ_INIT(mem_map, map); +} + +/** + * @brief Allocate a memory map block. + * + * Takes a block from the list of unused blocks. + * + * @param map Pointer to memory map object. + * @param mem Pointer to area to receive block address. + * @param timeout Maximum time (nanoseconds) to wait for allocation to complete. + * Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long as + * necessary. + * + * @return 0 if successful, -ENOMEM if failed immediately, -EAGAIN if timed out + */ +int k_mem_map_alloc(struct k_mem_map *map, void **mem, int32_t timeout) +{ + unsigned int key = irq_lock(); + int result; + + if (map->free_list != NULL) { + /* take a free block */ + *mem = map->free_list; + map->free_list = *(char **)(map->free_list); + map->num_used++; + result = 0; + } else if (timeout == K_NO_WAIT) { + /* don't wait for a free block to become available */ + *mem = NULL; + result = -ENOMEM; + } else { + /* wait for a free block or timeout */ + _pend_current_thread(&map->wait_q, timeout); + result = _Swap(key); + if (result == 0) { + *mem = _current->swap_data; + } + return result; + } + + irq_unlock(key); + + return result; +} + +/** + * @brief Free a memory map block. + * + * Gives block to a waiting thread if there is one, otherwise returns it to + * the list of unused blocks. + * + * @param map Pointer to memory map object. + * @param mem Pointer to area to containing block address. + * + * @return N/A + */ +void k_mem_map_free(struct k_mem_map *map, void **mem) +{ + int key = irq_lock(); + struct tcs *pending_thread = _unpend_first_thread(&map->wait_q); + + if (pending_thread) { + _set_thread_return_value_with_data(pending_thread, 0, *mem); + _timeout_abort(pending_thread); + _ready_thread(pending_thread); + if (_must_switch_threads()) { + _Swap(key); + return; + } + } else { + **(char ***)mem = map->free_list; + map->free_list = *(char **)mem; + map->num_used--; + } + + irq_unlock(key); +} diff --git a/kernel/unified/mem_pool.c b/kernel/unified/mem_pool.c new file mode 100644 index 00000000000..0f400749ba8 --- /dev/null +++ b/kernel/unified/mem_pool.c @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @brief Memory pools. + */ + +#include +#include +#include +#include +#include +#include +#include + +void k_mem_pool_init(struct k_mem_pool *mem, int max_block_size, + int num_max_blocks) +{ +} + +int k_mem_pool_alloc(k_mem_pool_t id, struct k_block *block, int size, + int32_t timeout) +{ + return 0; +} + +void k_mem_pool_free(struct k_block *block) +{ +} + +void k_mem_pool_defrag(k_mem_pool_t id) +{ +} + +void *k_malloc(uint32_t size) +{ + return NULL; +} + +void k_free(void *p) +{ +} diff --git a/kernel/unified/msg_q.c b/kernel/unified/msg_q.c new file mode 100644 index 00000000000..e670593c10e --- /dev/null +++ b/kernel/unified/msg_q.c @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Message queues. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * @brief Initialize a message queue. + * + * @param q Pointer to the message queue object. + * @param msg_size Message size, in bytes. + * @param max_msgs Maximum number of messages that can be queued. + * @param buffer Pointer to memory area that holds queued messages. + * + * @return N/A + */ +void k_msgq_init(struct k_msgq *q, uint32_t msg_size, uint32_t max_msgs, + char *buffer) +{ + q->msg_size = msg_size; + q->max_msgs = max_msgs; + q->buffer_start = buffer; + q->buffer_end = buffer + (max_msgs * msg_size); + q->read_ptr = buffer; + q->write_ptr = buffer; + q->used_msgs = 0; + sys_dlist_init(&q->wait_q); + SYS_TRACING_OBJ_INIT(msgq, q); +} + +/** + * @brief Adds a message to a message queue. + * + * @param q Pointer to the message queue object. + * @param data Pointer to message data area. + * @param timeout Maximum time (nanoseconds) to wait for operation to complete. + * Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long as + * necessary. + * + * @return 0 if successful, -ENOMSG if failed immediately or after queue purge, + * -EAGAIN if timed out + */ +int k_msgq_put(struct k_msgq *q, void *data, int32_t timeout) +{ + unsigned int key = irq_lock(); + struct tcs *pending_thread; + int result; + + if (q->used_msgs < q->max_msgs) { + /* message queue isn't full */ + pending_thread = _unpend_first_thread(&q->wait_q); + if (pending_thread) { + /* give message to waiting thread */ + memcpy(pending_thread->swap_data, data, q->msg_size); + /* wake up waiting thread */ + _set_thread_return_value(pending_thread, 0); + _timeout_abort(pending_thread); + _ready_thread(pending_thread); + if (_must_switch_threads()) { + _Swap(key); + return 0; + } + } else { + /* put message in queue */ + memcpy(q->write_ptr, data, q->msg_size); + q->write_ptr += q->msg_size; + if (q->write_ptr == q->buffer_end) { + q->write_ptr = q->buffer_start; + } + q->used_msgs++; + } + result = 0; + } else if (timeout == K_NO_WAIT) { + /* don't wait for message space to become available */ + result = -ENOMSG; + } else { + /* wait for put message success, failure, or timeout */ + _pend_current_thread(&q->wait_q, timeout); + _current->swap_data = data; + return _Swap(key); + } + + irq_unlock(key); + + return result; +} + +/** + * @brief Removes a message from a message queue. + * + * @param q Pointer to the message queue object. + * @param data Pointer to message data area. + * @param timeout Maximum time (nanoseconds) to wait for operation to complete. + * Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long as + * necessary. + * + * @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out + */ +int k_msgq_get(struct k_msgq *q, void *data, int32_t timeout) +{ + unsigned int key = irq_lock(); + struct tcs *pending_thread; + int result; + + if (q->used_msgs > 0) { + /* take first available message from queue */ + memcpy(data, q->read_ptr, q->msg_size); + q->read_ptr += q->msg_size; + if (q->read_ptr == q->buffer_end) { + q->read_ptr = q->buffer_start; + } + q->used_msgs--; + + /* handle first thread waiting to write (if any) */ + pending_thread = _unpend_first_thread(&q->wait_q); + if (pending_thread) { + /* add thread's message to queue */ + memcpy(q->write_ptr, pending_thread->swap_data, + q->msg_size); + q->write_ptr += q->msg_size; + if (q->write_ptr == q->buffer_end) { + q->write_ptr = q->buffer_start; + } + q->used_msgs++; + + /* wake up waiting thread */ + _set_thread_return_value(pending_thread, 0); + _timeout_abort(pending_thread); + _ready_thread(pending_thread); + if (_must_switch_threads()) { + _Swap(key); + return 0; + } + } + result = 0; + } else if (timeout == K_NO_WAIT) { + /* don't wait for a message to become available */ + result = -ENOMSG; + } else { + /* wait for get message success or timeout */ + _pend_current_thread(&q->wait_q, timeout); + _current->swap_data = data; + return _Swap(key); + } + + irq_unlock(key); + + return result; +} + +/** + * @brief Purge contents of a message queue. + * + * Discards all messages currently in the message queue, and cancels + * any "add message" operations initiated by waiting threads. + * + * @param q Pointer to the message queue object. + * + * @return N/A + */ +void k_msgq_purge(struct k_msgq *q) +{ + unsigned int key = irq_lock(); + + if (q->used_msgs) { + /* wake up any threads that are waiting to write */ + while (1) { + struct tcs *pending_thread = + _unpend_first_thread(&q->wait_q); + + if (pending_thread == NULL) { + break; + } + _set_thread_return_value(pending_thread, -ENOMSG); + _timeout_abort(pending_thread); + _ready_thread(pending_thread); + } + + q->used_msgs = 0; + q->read_ptr = q->write_ptr; + + if (_must_switch_threads()) { + _Swap(key); + return; + } + } else { + /* queue is empty, so no need to do anything ... */ + } + + irq_unlock(key); +} diff --git a/kernel/unified/mutex.c b/kernel/unified/mutex.c new file mode 100644 index 00000000000..d2826c525fb --- /dev/null +++ b/kernel/unified/mutex.c @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file @brief mutex kernel services + * + * This module contains routines for handling mutex locking and unlocking. + * + * Mutexes implement a priority inheritance algorithm that boosts the priority + * level of the owning thread to match the priority level of the highest + * priority thread waiting on the mutex. + * + * Each mutex that contributes to priority inheritance must be released in the + * reverse order in which is was acquired. Furthermore each subsequent mutex + * that contributes to raising the owning thread's priority level must be + * acquired at a point after the most recent "bumping" of the priority level. + * + * For example, if thread A has two mutexes contributing to the raising of its + * priority level, the second mutex M2 must be acquired by thread A after + * thread A's priority level was bumped due to owning the first mutex M1. + * When releasing the mutex, thread A must release M2 before it releases M1. + * Failure to follow this nested model may result in threads running at + * unexpected priority levels (too high, or too low). + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_OBJECT_MONITOR +#define RECORD_STATE_CHANGE(mutex) \ + do { (mutex)->num_lock_state_changes++; } while ((0)) +#define RECORD_CONFLICT(mutex) \ + do { (mutex)->num_conflicts++; } while ((0)) +#else +#define RECORD_STATE_CHANGE(mutex) do { } while ((0)) +#define RECORD_CONFLICT(mutex) do { } while ((0)) +#endif + +#ifdef CONFIG_OBJECT_MONITOR +#define INIT_OBJECT_MONITOR(mutex) do { \ + mutex->num_lock_state_changes = 0; \ + mutex->num_conflicts = 0; \ + } while ((0)) +#else +#define INIT_OBJECT_MONITOR(mutex) do { } while ((0)) +#endif + +#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS +#define INIT_KERNEL_TRACING(mutex) do { \ + mutex->__next = NULL; \ + } while ((0)) +#else +#define INIT_KERNEL_TRACING(mutex) do { } while ((0)) +#endif + +void k_mutex_init(struct k_mutex *mutex) +{ + mutex->owner = NULL; + mutex->lock_count = 0; + + /* initialized upon first use */ + /* mutex->owner_orig_prio = 0; */ + + sys_dlist_init(&mutex->wait_q); + + INIT_OBJECT_MONITOR(mutex); + INIT_KERNEL_TRACING(mutex); +} + +static int new_prio_for_inheritance(int target, int limit) +{ + int new_prio = _is_prio_higher(target, limit) ? target : limit; + + new_prio = _get_new_prio_with_ceiling(new_prio); + + return new_prio; +} + +static void adjust_owner_prio(struct k_mutex *mutex, int new_prio) +{ + if (mutex->owner->prio != new_prio) { + + K_DEBUG("%p (ready (y/n): %c) prio changed to %d (was %d)\n", + mutex->owner, _is_thread_ready(mutex->owner) ? + 'y' : 'n', + new_prio, mutex->owner->prio); + + _thread_priority_set(mutex->owner, new_prio); + } +} + +int k_mutex_lock(struct k_mutex *mutex, int32_t timeout) +{ + int new_prio, key; + + k_sched_lock(); + + if (likely(mutex->lock_count == 0 || mutex->owner == _current)) { + + RECORD_STATE_CHANGE(); + + mutex->owner_orig_prio = mutex->lock_count == 0 ? + _current->prio : + mutex->owner_orig_prio; + + mutex->lock_count++; + mutex->owner = _current; + + K_DEBUG("%p took mutex %p, count: %d, orig prio: %d\n", + _current, mutex, mutex->lock_count, + mutex->owner_orig_prio); + + k_sched_unlock(); + + return 0; + } + + RECORD_CONFLICT(); + + if (unlikely(timeout == K_NO_WAIT)) { + k_sched_unlock(); + return -EBUSY; + } + +#if 0 + if (_is_prio_higher(_current->prio, mutex->owner->prio)) { + new_prio = _current->prio; + } + new_prio = _get_new_prio_with_ceiling(new_prio); +#endif + new_prio = new_prio_for_inheritance(_current->prio, mutex->owner->prio); + + key = irq_lock(); + + K_DEBUG("adjusting prio up on mutex %p\n", mutex); + + adjust_owner_prio(mutex, new_prio); + + _pend_current_thread(&mutex->wait_q, timeout); + + int got_mutex = _Swap(key); + + K_DEBUG("on mutex %p got_mutex value: %d\n", mutex, got_mutex); + + K_DEBUG("%p got mutex %p (y/n): %c\n", _current, mutex, + got_mutex ? 'y' : 'n'); + + if (got_mutex == 0) { + k_sched_unlock(); + return 0; + } + + /* timed out */ + + K_DEBUG("%p timeout on mutex %p\n", _current, mutex); + + struct tcs *waiter = (struct tcs *)sys_dlist_peek_head(&mutex->wait_q); + + new_prio = mutex->owner_orig_prio; + new_prio = waiter ? new_prio_for_inheritance(waiter->prio, new_prio) : + new_prio; + + K_DEBUG("adjusting prio down on mutex %p\n", mutex); + + key = irq_lock(); + adjust_owner_prio(mutex, new_prio); + irq_unlock(key); + + k_sched_unlock(); + + return -EAGAIN; +} + +void k_mutex_unlock(struct k_mutex *mutex) +{ + int key; + + __ASSERT(mutex->owner == _current, ""); + + k_sched_lock(); + + RECORD_STATE_CHANGE(); + + mutex->lock_count--; + + K_DEBUG("mutex %p lock_count: %d\n", mutex, mutex->lock_count); + + if (mutex->lock_count != 0) { + k_sched_unlock(); + return; + } + + key = irq_lock(); + + adjust_owner_prio(mutex, mutex->owner_orig_prio); + + struct tcs *new_owner = _unpend_first_thread(&mutex->wait_q); + + K_DEBUG("new owner of mutex %p: %p (prio: %d)\n", + mutex, new_owner, new_owner ? new_owner->prio : -1000); + + if (new_owner) { + _timeout_abort(new_owner); + _ready_thread(new_owner); + + irq_unlock(key); + + _set_thread_return_value(new_owner, 0); + + /* + * new owner is already of higher or equal prio than first + * waiter since the wait queue is priority-based: no need to + * ajust its priority + */ + mutex->owner = new_owner; + mutex->lock_count++; + mutex->owner_orig_prio = new_owner->prio; + } else { + irq_unlock(key); + mutex->owner = NULL; + } + + k_sched_unlock(); +} diff --git a/kernel/unified/pipes.c b/kernel/unified/pipes.c new file mode 100644 index 00000000000..f43deb69099 --- /dev/null +++ b/kernel/unified/pipes.c @@ -0,0 +1,693 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * + * @brief Pipes + */ + +#include +#include +#include +#include +#include +#include +#include + +struct k_pipe_desc { + unsigned char *buffer; /* Position in src/dest buffer */ + size_t bytes_to_xfer; /* # bytes left to transfer */ +#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) + struct k_mem_block *block; /* Pointer to memory block */ + struct k_mem_block copy_block; /* For backwards compatibility */ + struct k_sem *sem; /* Semaphore to give if async */ +#endif +}; + +struct k_pipe_async { + struct tcs_base thread; /* Dummy thread object */ + struct k_pipe_desc desc; /* Pipe message descriptor */ +}; + +#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) +/* Array of asynchronous message descriptors */ +static struct k_pipe_async __noinit async_msg[CONFIG_NUM_PIPE_ASYNC_MSGS]; + +/* stack of unused asynchronous message descriptors */ +K_STACK_DEFINE(pipe_async_msgs, CONFIG_NUM_PIPE_ASYNC_MSGS); + +/** + * @brief Create pool of asynchronous pipe message descriptors + * + * A dummy thread requires minimal initialization since it never gets to + * execute. The K_DUMMY flag is sufficient to distinguish a dummy thread + * from a real one. The dummy threads are *not* added to the kernel's list of + * known threads. + * + * Once initialized, the address of each descriptor is added to a stack that + * governs access to them. + * + * @return N/A + */ +void _k_pipes_init(void) +{ + for (int i = 0; i < CONFIG_NUM_PIPE_ASYNC_MSGS; i++) { + async_msg[i].thread.flags = K_DUMMY; + async_msg[i].thread.swap_data = &async_msg[i].desc; + k_stack_push(pipe_async_msgs, (uint32_t)&async_msg[i]); + } +} + +void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size) +{ + pipe->buffer = buffer; + pipe->size = size; + pipe->bytes_used = 0; + pipe->read_index = 0; + pipe->write_index = 0; + sys_dlist_init(&pipe->wait_q.writers); + sys_dlist_init(&pipe->wait_q.readers); + SYS_TRACING_OBJ_INIT(pipe, pipe); +} + +/** + * @brief Allocate an asynchronous message descriptor + * + * @param async Address of area to hold the descriptor pointer + * + * @return N/A + */ +static void _pipe_async_alloc(struct k_pipe_async **async) +{ + k_stack_pop(pipe_async_msgs, (uint32_t *)async, K_FOREVER); +} + +/** + * @brief Free an asynchronous message descriptor + * + * @param async Descriptor pointer + * + * @return N/A + */ +static void _pipe_async_free(struct k_pipe_async *async) +{ + k_stack_push(pipe_async_msgs, (uint32_t)async); +} + +/** + * @brief Finish an asynchronous operation + * + * The asynchronous operation is finished with the scheduler locked to prevent + * the called routines from scheduling a new thread. + * + * @return N/A + */ + +static void _pipe_async_finish(struct k_pipe_async *async_desc) +{ + k_mem_pool_free(async_desc->desc.block); + + if (async_desc->desc.sem != NULL) { + k_sem_give(async_desc->desc.sem); + } + + _pipe_async_free(async_desc); +} +#endif + +/** + * @brief Copy bytes from @a src to @a dest + * + * @return Number of bytes copied + */ +static size_t _pipe_xfer(unsigned char *dest, size_t dest_size, + const unsigned char *src, size_t src_size) +{ + size_t num_bytes = min(dest_size, src_size); + const unsigned char *end = src + num_bytes; + + while (src != end) { + *dest = *src; + dest++; + src++; + } + + return num_bytes; +} + +/** + * @brief Put data from @a src into the pipe's circular buffer + * + * Modifies the following fields in @a pipe: + * buffer, bytes_used, write_index + * + * @return Number of bytes written to the pipe's circular buffer + */ +static size_t _pipe_buffer_put(struct k_pipe *pipe, + const unsigned char *src, size_t src_size) +{ + size_t bytes_copied; + size_t run_length; + size_t num_bytes_written = 0; + int i; + + + for (i = 0; i < 2; i++) { + run_length = min(pipe->size - pipe->bytes_used, + pipe->size - pipe->write_index); + + bytes_copied = _pipe_xfer(pipe->buffer + pipe->write_index, + run_length, + src + num_bytes_written, + src_size - num_bytes_written); + + num_bytes_written += bytes_copied; + pipe->bytes_used += bytes_copied; + pipe->write_index += bytes_copied; + if (pipe->write_index == pipe->size) { + pipe->write_index = 0; + } + } + + return num_bytes_written; +} + +/** + * @brief Get data from the pipe's circular buffer + * + * Modifies the following fields in @a pipe: + * bytes_used, read_index + * + * @return Number of bytes read from the pipe's circular buffer + */ +static size_t _pipe_buffer_get(struct k_pipe *pipe, + unsigned char *dest, size_t dest_size) +{ + size_t bytes_copied; + size_t run_length; + size_t num_bytes_read = 0; + int i; + + for (i = 0; i < 2; i++) { + run_length = min(pipe->bytes_used, + pipe->size - pipe->read_index); + + bytes_copied = _pipe_xfer(dest + num_bytes_read, + dest_size - num_bytes_read, + pipe->buffer + pipe->read_index, + run_length); + + num_bytes_read += bytes_copied; + pipe->bytes_used -= bytes_copied; + pipe->read_index += bytes_copied; + if (pipe->read_index == pipe->size) { + pipe->read_index = 0; + } + } + + return num_bytes_read; +} + +/** + * @brief Prepare a working set of readers/writers + * + * Prepare a list of "working threads" into/from which the data + * will be directly copied. This list is useful as it is used to ... + * + * 1. avoid double copying + * 2. minimize interrupt latency as interrupts are unlocked + * while copying data + * 3. ensure a timeout can not make the request impossible to satisfy + * + * The list is populated with previously pended threads that will be ready to + * run after the pipe call is complete. + * + * Important things to remember when reading from the pipe ... + * 1. If there are writers int @a wait_q, then the pipe's buffer is full. + * 2. Conversely if the pipe's buffer is not full, there are no writers. + * 3. The amount of available data in the pipe is the sum the bytes used in + * the pipe (@a pipe_space) and all the requests from the waiting writers. + * 4. Since data is read from the pipe's buffer first, the working set must + * include writers that will (try to) re-fill the pipe's buffer afterwards. + * + * Important things to remember when writing to the pipe ... + * 1. If there are readers in @a wait_q, then the pipe's buffer is empty. + * 2. Conversely if the pipe's buffer is not empty, then there are no readers. + * 3. The amount of space available in the pipe is the sum of the bytes unused + * in the pipe (@a pipe_space) and all the requests from the waiting readers. + * + * @return false if request is unsatisfiable, otherwise true + */ +static bool _pipe_xfer_prepare(sys_dlist_t *xfer_list, + struct k_thread **waiter, + _wait_q_t *wait_q, + size_t pipe_space, + size_t bytes_to_xfer, + size_t min_xfer, + int32_t timeout) +{ + sys_dnode_t *node; + struct k_thread *thread; + struct k_pipe_desc *desc; + size_t num_bytes = 0; + + if (timeout == K_NO_WAIT) { + for (node = sys_dlist_peek_head(wait_q); node != NULL; + node = sys_dlist_peek_next(wait_q, node)) { + thread = (struct k_thread *)node; + desc = (struct k_pipe_desc *)thread->swap_data; + + num_bytes += desc->bytes_to_xfer; + + if (num_bytes >= bytes_to_xfer) { + break; + } + } + + if (num_bytes + pipe_space < min_xfer) { + return false; + } + } + + /* + * Either @a timeout is not K_NO_WAIT (so the thread may pend) or + * the entire request can be satisfied. Generate the working list. + */ + + sys_dlist_init(xfer_list); + num_bytes = 0; + + while ((thread = (struct k_thread *) sys_dlist_peek_head(wait_q))) { + desc = (struct k_pipe_desc *)thread->swap_data; + num_bytes += desc->bytes_to_xfer; + + if (num_bytes > bytes_to_xfer) { + /* + * This request can not be fully satisfied. + * Do not remove it from the wait_q. + * Do not abort its timeout (if applicable). + * Do not add it to the transfer list + */ + break; + } + + /* + * This request can be fully satisfied. + * Remove it from the wait_q. + * Abort its timeout. + * Add it to the transfer list. + */ + _unpend_thread(thread); + _timeout_abort(thread); + sys_dlist_append(xfer_list, &thread->k_q_node); + } + + *waiter = (num_bytes > bytes_to_xfer) ? thread : NULL; + + return true; +} + +/** + * @brief Determine the correct return code + * + * Bytes Xferred No Wait Wait + * >= Minimum 0 0 + * < Minimum -EIO* -EAGAIN + * + * * The "-EIO No Wait" case was already checked when the "working set" + * was created in _pipe_xfer_prepare(). + * + * @return See table above + */ +static int _pipe_return_code(size_t min_xfer, size_t bytes_remaining, + size_t bytes_requested) +{ + if (bytes_requested - bytes_remaining >= min_xfer) { + /* + * At least the minimum number of requested + * bytes have been transferred. + */ + return 0; + } + + return -EAGAIN; +} + +/** + * @brief Ready a pipe thread + * + * If the pipe thread is a real thread, then add it to the ready queue. + * If it is a dummy thread, then finish the asynchronous work. + * + * @return N/A + */ +static void _pipe_thread_ready(struct k_thread *thread) +{ + unsigned int key; + +#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) + if (thread->flags & K_DUMMY) { + _pipe_async_finish((struct k_pipe_async *)thread); + return; + } +#endif + + key = irq_lock(); + _ready_thread(thread); + irq_unlock(key); +} + +/** + * @brief Internal API used to send data to a pipe + */ +int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, + unsigned char *data, size_t bytes_to_write, + size_t *bytes_written, size_t min_xfer, + int32_t timeout) +{ + struct k_thread *reader; + struct k_pipe_desc *desc; + sys_dlist_t xfer_list; + unsigned int key; + size_t num_bytes_written = 0; + size_t bytes_copied; + +#if (CONFIG_NUM_PIPE_ASYNC_MSGS == 0) + ARG_UNUSED(async_desc); +#endif + + key = irq_lock(); + + /* + * Create a list of "working readers" into which the data will be + * directly copied. + */ + + if (!_pipe_xfer_prepare(&xfer_list, &reader, &pipe->wait_q.readers, + pipe->size - pipe->bytes_used, bytes_to_write, + min_xfer, timeout)) { + irq_unlock(key); + *bytes_written = 0; + return -EIO; + } + + k_sched_lock(); + irq_unlock(key); + + /* + * 1. 'xfer_list' currently contains a list of reader threads that can + * have their read requests fulfilled by the current call. + * 2. 'reader' if not NULL points to a thread on the reader wait_q + * that can get some of its requested data. + * 3. Interrupts are unlocked but the scheduler is locked to allow + * ticks to be delivered but no scheduling to occur + * 4. If 'reader' times out while we are copying data, not only do we + * still have a pointer to it, but it can not execute until this call + * is complete so it is still safe to copy data to it. + */ + + struct k_thread *thread = (struct k_thread *) + sys_dlist_get(&xfer_list); + while (thread) { + desc = (struct k_pipe_desc *)thread->swap_data; + bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer, + data + num_bytes_written, + bytes_to_write - num_bytes_written); + + num_bytes_written += bytes_copied; + desc->buffer += bytes_copied; + desc->bytes_to_xfer -= bytes_copied; + + /* The thread's read request has been satisfied. Ready it. */ + key = irq_lock(); + _ready_thread(thread); + irq_unlock(key); + + thread = (struct k_thread *)sys_dlist_get(&xfer_list); + } + + /* + * Copy any data to the reader that we left on the wait_q. + * It is possible no data will be copied. + */ + if (reader) { + desc = (struct k_pipe_desc *)reader->swap_data; + bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer, + data + num_bytes_written, + bytes_to_write - num_bytes_written); + + num_bytes_written += bytes_copied; + desc->buffer += bytes_copied; + desc->bytes_to_xfer -= bytes_copied; + } + + /* + * As much data as possible has been directly copied to any waiting + * readers. Add as much as possible to the pipe's circular buffer. + */ + + num_bytes_written += + _pipe_buffer_put(pipe, data + num_bytes_written, + bytes_to_write - num_bytes_written); + + if (num_bytes_written == bytes_to_write) { + *bytes_written = num_bytes_written; +#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) + if (async_desc != NULL) { + _pipe_async_finish(async_desc); + } +#endif + k_sched_unlock(); + return 0; + } + + /* Not all data was copied. */ + +#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) + if (async_desc != NULL) { + /* + * Lock interrupts and unlock the scheduler before + * manipulating the writers wait_q. + */ + key = irq_lock(); + _sched_unlock_no_reschedule(); + _pend_thread((struct tcs *) &async_desc->thread, + &pipe->wait_q.writers, K_FOREVER); + _reschedule_threads(key); + return 0; + } +#endif + + struct k_pipe_desc pipe_desc; + + pipe_desc.buffer = data + num_bytes_written; + pipe_desc.bytes_to_xfer = bytes_to_write - num_bytes_written; + + if (timeout != K_NO_WAIT) { + _current->swap_data = &pipe_desc; + /* + * Lock interrupts and unlock the scheduler before + * manipulating the writers wait_q. + */ + key = irq_lock(); + _sched_unlock_no_reschedule(); + _pend_current_thread(&pipe->wait_q.writers, timeout); + _Swap(key); + } else { + k_sched_unlock(); + } + + *bytes_written = bytes_to_write - pipe_desc.bytes_to_xfer; + + return _pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer, + bytes_to_write); +} + +int k_pipe_get(struct k_pipe *pipe, void *buffer, size_t bytes_to_read, + size_t *bytes_read, size_t min_xfer, int32_t timeout) +{ + struct k_thread *writer; + struct k_pipe_desc *desc; + sys_dlist_t xfer_list; + unsigned int key; + size_t num_bytes_read = 0; + size_t bytes_copied; + + __ASSERT(min_xfer <= bytes_to_read, ""); + __ASSERT(bytes_read != NULL, ""); + + key = irq_lock(); + + /* + * Create a list of "working readers" into which the data will be + * directly copied. + */ + + if (!_pipe_xfer_prepare(&xfer_list, &writer, &pipe->wait_q.writers, + pipe->bytes_used, bytes_to_read, + min_xfer, timeout)) { + irq_unlock(key); + *bytes_read = 0; + return -EIO; + } + + k_sched_lock(); + irq_unlock(key); + + num_bytes_read = _pipe_buffer_get(pipe, buffer, bytes_to_read); + + /* + * 1. 'xfer_list' currently contains a list of writer threads that can + * have their write requests fulfilled by the current call. + * 2. 'writer' if not NULL points to a thread on the writer wait_q + * that can post some of its requested data. + * 3. Data will be copied from each writer's buffer to either the + * reader's buffer and/or to the pipe's circular buffer. + * 4. Interrupts are unlocked but the scheduler is locked to allow + * ticks to be delivered but no scheduling to occur + * 5. If 'writer' times out while we are copying data, not only do we + * still have a pointer to it, but it can not execute until this + * call is complete so it is still safe to copy data from it. + */ + + struct k_thread *thread = (struct k_thread *) + sys_dlist_get(&xfer_list); + while (thread && (num_bytes_read < bytes_to_read)) { + desc = (struct k_pipe_desc *)thread->swap_data; + bytes_copied = _pipe_xfer(buffer + num_bytes_read, + bytes_to_read - num_bytes_read, + desc->buffer, desc->bytes_to_xfer); + + num_bytes_read += bytes_copied; + desc->buffer += bytes_copied; + desc->bytes_to_xfer -= bytes_copied; + + /* + * It is expected that the write request will be satisfied. + * However, if the read request was satisfied before the + * write request was satisfied, then the write request must + * finish later when writing to the pipe's circular buffer. + */ + if (num_bytes_read == bytes_to_read) { + break; + } + _pipe_thread_ready(thread); + + thread = (struct k_thread *)sys_dlist_get(&xfer_list); + } + + if (writer && (num_bytes_read < bytes_to_read)) { + desc = (struct k_pipe_desc *)writer->swap_data; + bytes_copied = _pipe_xfer(buffer + num_bytes_read, + bytes_to_read - num_bytes_read, + desc->buffer, desc->bytes_to_xfer); + + num_bytes_read += bytes_copied; + desc->buffer += bytes_copied; + desc->bytes_to_xfer -= bytes_copied; + } + + /* + * Copy as much data as possible from the writers (if any) + * into the pipe's circular buffer. + */ + + while (thread) { + desc = (struct k_pipe_desc *)thread->swap_data; + bytes_copied = _pipe_buffer_put(pipe, desc->buffer, + desc->bytes_to_xfer); + + desc->buffer += bytes_copied; + desc->bytes_to_xfer -= bytes_copied; + + /* Write request has been satsified */ + _pipe_thread_ready(thread); + + thread = (struct k_thread *)sys_dlist_get(&xfer_list); + } + + if (writer) { + desc = (struct k_pipe_desc *)writer->swap_data; + bytes_copied = _pipe_buffer_put(pipe, desc->buffer, + desc->bytes_to_xfer); + + desc->buffer += bytes_copied; + desc->bytes_to_xfer -= bytes_copied; + } + + if (num_bytes_read == bytes_to_read) { + k_sched_unlock(); + + *bytes_read = num_bytes_read; + + return 0; + } + + /* Not all data was read. */ + + struct k_pipe_desc pipe_desc; + + pipe_desc.buffer = buffer + num_bytes_read; + pipe_desc.bytes_to_xfer = bytes_to_read - num_bytes_read; + + if (timeout != K_NO_WAIT) { + _current->swap_data = &pipe_desc; + key = irq_lock(); + _sched_unlock_no_reschedule(); + _pend_current_thread(&pipe->wait_q.readers, timeout); + _Swap(key); + } else { + k_sched_unlock(); + } + + *bytes_read = bytes_to_read - pipe_desc.bytes_to_xfer; + + return _pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer, + bytes_to_read); +} + +int k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write, + size_t *bytes_written, size_t min_xfer, int32_t timeout) +{ + __ASSERT(min_xfer <= bytes_to_write, ""); + __ASSERT(bytes_written != NULL, ""); + + return _k_pipe_put_internal(pipe, NULL, data, + bytes_to_write, bytes_written, + min_xfer, timeout); +} + +#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0) +void k_pipe_block_put(struct k_pipe *pipe, struct k_mem_block *block, + size_t bytes_to_write, struct k_sem *sem) +{ + struct k_pipe_async *async_desc; + size_t dummy_bytes_written; + + /* For simplicity, always allocate an asynchronous descriptor */ + _pipe_async_alloc(&async_desc); + + async_desc->desc.block = &async_desc->desc.copy_block; + async_desc->desc.copy_block = *block; + async_desc->desc.sem = sem; + async_desc->thread.prio = k_current_priority_get(); + + (void) _k_pipe_put_internal(pipe, async_desc, block->data, + block->req_size, &dummy_bytes_written, + block->req_size, K_FOREVER); +} +#endif diff --git a/kernel/unified/ring_buffer.c b/kernel/unified/ring_buffer.c new file mode 100644 index 00000000000..7304a715a4a --- /dev/null +++ b/kernel/unified/ring_buffer.c @@ -0,0 +1 @@ +#include "../nanokernel/ring_buffer.c" diff --git a/kernel/unified/sched.c b/kernel/unified/sched.c new file mode 100644 index 00000000000..683197a420e --- /dev/null +++ b/kernel/unified/sched.c @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +/* set the bit corresponding to prio in ready q bitmap */ +static void _set_ready_q_prio_bit(int prio) +{ + int bmap_index = _get_ready_q_prio_bmap_index(prio); + uint32_t *bmap = &_nanokernel.ready_q.prio_bmap[bmap_index]; + + *bmap |= _get_ready_q_prio_bit(prio); +} + +/* clear the bit corresponding to prio in ready q bitmap */ +static void _clear_ready_q_prio_bit(int prio) +{ + int bmap_index = _get_ready_q_prio_bmap_index(prio); + uint32_t *bmap = &_nanokernel.ready_q.prio_bmap[bmap_index]; + + *bmap &= ~_get_ready_q_prio_bit(prio); +} + +/* + * Add thread to the ready queue, in the slot for its priority; the thread + * must not be on a wait queue. + */ +void _add_thread_to_ready_q(struct tcs *thread) +{ + int q_index = _get_ready_q_q_index(thread->prio); + sys_dlist_t *q = &_nanokernel.ready_q.q[q_index]; + + _set_ready_q_prio_bit(thread->prio); + sys_dlist_append(q, &thread->k_q_node); +} + +/* remove thread from the ready queue */ +void _remove_thread_from_ready_q(struct tcs *thread) +{ + int q_index = _get_ready_q_q_index(thread->prio); + sys_dlist_t *q = &_nanokernel.ready_q.q[q_index]; + + sys_dlist_remove(&thread->k_q_node); + if (sys_dlist_is_empty(q)) { + _clear_ready_q_prio_bit(thread->prio); + } +} + +/* reschedule threads if the scheduler is not locked */ +/* not callable from ISR */ +/* must be called with interrupts locked */ +void _reschedule_threads(int key) +{ + K_DEBUG("rescheduling threads\n"); + + if (unlikely(_nanokernel.current->sched_locked > 0)) { + K_DEBUG("aborted: scheduler was locked\n"); + irq_unlock(key); + return; + } + + if (_must_switch_threads()) { + K_DEBUG("context-switching out %p\n", _current); + _Swap(key); + } else { + irq_unlock(key); + } +} + +/* application API: lock the scheduler */ +void k_sched_unlock(void) +{ + __ASSERT(_nanokernel.current->sched_locked > 0, ""); + __ASSERT(!_is_in_isr(), ""); + + int key = irq_lock(); + + atomic_dec(&_nanokernel.current->sched_locked); + + K_DEBUG("scheduler unlocked (%p:%d)\n", + _current, _current->sched_locked); + + _reschedule_threads(key); +} + +/* + * Callback for sys_dlist_insert_at() to find the correct insert point in a + * wait queue (priority-based). + */ +static int _is_wait_q_insert_point(sys_dnode_t *dnode_info, void *insert_prio) +{ + struct tcs *waitq_node = CONTAINER_OF(dnode_info, struct tcs, k_q_node); + + return _is_prio_higher((int)insert_prio, waitq_node->prio); +} + +/* convert milliseconds to ticks */ + +#define ceiling(numerator, divider) \ + (((numerator) + ((divider) - 1)) / (divider)) + +int32_t _ms_to_ticks(int32_t ms) +{ + int64_t ms_ticks_per_sec = (int64_t)ms * sys_clock_ticks_per_sec; + + return (int32_t)ceiling(ms_ticks_per_sec, MSEC_PER_SEC); +} + +/* pend the specified thread: it must *not* be in the ready queue */ +/* must be called with interrupts locked */ +void _pend_thread(struct tcs *thread, _wait_q_t *wait_q, int32_t timeout) +{ + sys_dlist_t *dlist = (sys_dlist_t *)wait_q; + + sys_dlist_insert_at(dlist, &thread->k_q_node, + _is_wait_q_insert_point, (void *)thread->prio); + + _mark_thread_as_pending(thread); + + if (timeout != K_FOREVER) { + _mark_thread_as_timing(thread); + _TIMEOUT_ADD(thread, wait_q, _ms_to_ticks(timeout)); + } +} + +/* pend the current thread */ +/* must be called with interrupts locked */ +void _pend_current_thread(_wait_q_t *wait_q, int32_t timeout) +{ + _remove_thread_from_ready_q(_current); + _pend_thread(_current, wait_q, timeout); +} + +/* find which one is the next thread to run */ +/* must be called with interrupts locked */ +struct tcs *_get_next_ready_thread(void) +{ + int prio = _get_highest_ready_prio(); + int q_index = _get_ready_q_q_index(prio); + sys_dlist_t *list = &_nanokernel.ready_q.q[q_index]; + struct k_thread *thread = (struct k_thread *)sys_dlist_peek_head(list); + + __ASSERT(thread, "no thread to run (prio: %d, queue index: %u)!\n", + prio, q_index); + + return thread; +} + +/* + * Check if there is a thread of higher prio than the current one. Should only + * be called if we already know that the current thread is preemptible. + */ +int __must_switch_threads(void) +{ + K_DEBUG("current prio: %d, highest prio: %d\n", + _current->prio, _get_highest_ready_prio()); + + extern void _dump_ready_q(void); + _dump_ready_q(); + + return _is_prio_higher(_get_highest_ready_prio(), _current->prio); +} + +/* application API: change a thread's priority. Not callable from ISR */ +void k_thread_priority_set(struct tcs *thread, int prio) +{ + __ASSERT(!_is_in_isr(), ""); + + int key = irq_lock(); + + _thread_priority_set(thread, prio); + _reschedule_threads(key); +} + +/* application API: find out the priority of the current thread */ +int k_current_priority_get(void) +{ + return k_thread_priority_get(_current); +} + +/* + * application API: the current thread yields control to threads of higher or + * equal priorities. This is done by remove the thread from the ready queue, + * putting it back at the end of its priority's list and invoking the + * scheduler. + */ +void k_yield(void) +{ + __ASSERT(!_is_in_isr(), ""); + + int key = irq_lock(); + + _remove_thread_from_ready_q(_current); + _add_thread_to_ready_q(_current); + + if (_current == _get_next_ready_thread()) { + irq_unlock(key); + } else { + _Swap(key); + } +} + +/* application API: put the current thread to sleep */ +void k_sleep(int32_t duration) +{ + __ASSERT(!_is_in_isr(), ""); + + K_DEBUG("thread %p for %d ns\n", _current, duration); + + /* wait of 0 ns is treated as a 'yield' */ + if (duration == 0) { + k_yield(); + return; + } + + int key = irq_lock(); + + _mark_thread_as_timing(_current); + _remove_thread_from_ready_q(_current); + _timeout_add(_current, NULL, _ms_to_ticks(duration)); + + _Swap(key); +} + +/* application API: wakeup a sleeping thread */ +void k_wakeup(k_tid_t thread) +{ + int key = irq_lock(); + + /* verify first if thread is not waiting on an object */ + if (thread->timeout.wait_q) { + irq_unlock(key); + return; + } + + if (_timeout_abort(thread) < 0) { + irq_unlock(key); + return; + } + + _ready_thread(thread); + + if (_is_in_isr()) { + irq_unlock(key); + } else { + _reschedule_threads(key); + } +} + +/* application API: get current thread ID */ +k_tid_t k_current_get(void) +{ + return _current; +} + +/* debug aid */ +void _dump_ready_q(void) +{ + K_DEBUG("bitmap: %x\n", _ready_q.prio_bmap[0]); + for (int prio = 0; prio < K_NUM_PRIORITIES; prio++) { + K_DEBUG("prio: %d, head: %p\n", + prio - CONFIG_NUM_COOP_PRIORITIES, + sys_dlist_peek_head(&_ready_q.q[prio])); + } +} diff --git a/kernel/unified/sem.c b/kernel/unified/sem.c new file mode 100644 index 00000000000..43ef1599aeb --- /dev/null +++ b/kernel/unified/sem.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2010-2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * + * @brief Nanokernel semaphore object. + * + * The semaphores are of the 'counting' type, i.e. each 'give' operation will + * increment the internal count by 1, if no fiber is pending on it. The 'init' + * call initializes the count to 0. Following multiple 'give' operations, the + * same number of 'take' operations can be performed without the calling fiber + * having to pend on the semaphore, or the calling task having to poll. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +void k_sem_init(struct k_sem *sem, unsigned int initial_count, + unsigned int limit) +{ + __ASSERT(limit != 0, "limit cannot be zero"); + + sem->count = initial_count; + sem->limit = limit; + sys_dlist_init(&sem->wait_q); + SYS_TRACING_OBJ_INIT(nano_sem, sem); +} + +void k_sem_give(struct k_sem *sem) +{ + int key = irq_lock(); + struct tcs *first_pending_thread = _unpend_first_thread(&sem->wait_q); + + if (first_pending_thread) { + _timeout_abort(first_pending_thread); + _ready_thread(first_pending_thread); + + _set_thread_return_value(first_pending_thread, 0); + + if (!_is_in_isr() && _must_switch_threads()) { + _Swap(key); + return; + } + } else { + if (likely(sem->count != sem->limit)) { + sem->count++; + } + } + + irq_unlock(key); +} + +int k_sem_take(struct k_sem *sem, int32_t timeout) +{ + __ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, ""); + + unsigned int key = irq_lock(); + + if (likely(sem->count > 0)) { + sem->count--; + irq_unlock(key); + return 0; + } + + if (timeout == K_NO_WAIT) { + irq_unlock(key); + return -EBUSY; + } + + _pend_current_thread(&sem->wait_q, timeout); + + return _Swap(key); +} diff --git a/kernel/unified/stack.c b/kernel/unified/stack.c new file mode 100644 index 00000000000..d4ec53e0227 --- /dev/null +++ b/kernel/unified/stack.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2010-2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @brief fixed-size stack object + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +void k_stack_init_with_buffer(struct k_stack *stack, int num_entries, + uint32_t *buffer) +{ + sys_dlist_init(&stack->wait_q); + stack->next = stack->base = buffer; + stack->top = stack->base + num_entries; + + SYS_TRACING_OBJ_INIT(k_stack, stack); +} + +void k_stack_init(struct k_stack *stack, int num_entries) +{ + k_stack_init_with_buffer(stack, num_entries, (uint32_t *)(stack + 1)); +} + +void k_stack_push(struct k_stack *stack, uint32_t data) +{ + struct k_thread *first_pending_thread; + unsigned int key; + + __ASSERT(stack->next != stack->top, "stack is full"); + + key = irq_lock(); + + first_pending_thread = _unpend_first_thread(&stack->wait_q); + + if (first_pending_thread) { + _timeout_abort(first_pending_thread); + _ready_thread(first_pending_thread); + + _set_thread_return_value_with_data(first_pending_thread, + 0, (void *)data); + + if (!_is_in_isr() && _must_switch_threads()) { + (void)_Swap(key); + return; + } + } else { + *(stack->next) = data; + stack->next++; + } + + irq_unlock(key); +} + +int k_stack_pop(struct k_stack *stack, uint32_t *data, int32_t timeout) +{ + unsigned int key; + int result; + + key = irq_lock(); + + if (likely(stack->next > stack->base)) { + stack->next--; + *data = *(stack->next); + irq_unlock(key); + return 0; + } + + if (timeout == K_NO_WAIT) { + irq_unlock(key); + return -EBUSY; + } + + _pend_current_thread(&stack->wait_q, timeout); + + result = _Swap(key); + if (result == 0) { + *data = (uint32_t)_current->swap_data; + } + return result; +} diff --git a/kernel/unified/sys_clock.c b/kernel/unified/sys_clock.c new file mode 100644 index 00000000000..f4f0b0acf04 --- /dev/null +++ b/kernel/unified/sys_clock.c @@ -0,0 +1,201 @@ +/* system clock support for nanokernel-only systems */ + +/* + * Copyright (c) 1997-2015 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include +#include +#include +#include +#include + +#ifdef CONFIG_SYS_CLOCK_EXISTS +int sys_clock_us_per_tick = 1000000 / sys_clock_ticks_per_sec; +int sys_clock_hw_cycles_per_tick = + CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / sys_clock_ticks_per_sec; +#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) +int sys_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; +#endif +#else +/* don't initialize to avoid division-by-zero error */ +int sys_clock_us_per_tick; +int sys_clock_hw_cycles_per_tick; +#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) +int sys_clock_hw_cycles_per_sec; +#endif +#endif + +/* updated by timer driver for tickless, stays at 1 for non-tickless */ +int32_t _sys_idle_elapsed_ticks = 1; + +int64_t _sys_clock_tick_count; + +/** + * + * @brief Return the lower part of the current system tick count + * + * @return the current system tick count + * + */ +uint32_t sys_tick_get_32(void) +{ + return (uint32_t)_sys_clock_tick_count; +} + +/** + * + * @brief Return the current system tick count + * + * @return the current system tick count + * + */ +int64_t sys_tick_get(void) +{ + int64_t tmp_sys_clock_tick_count; + /* + * Lock the interrupts when reading _sys_clock_tick_count 64-bit + * variable. Some architectures (x86) do not handle 64-bit atomically, + * so we have to lock the timer interrupt that causes change of + * _sys_clock_tick_count + */ + unsigned int imask = irq_lock(); + + tmp_sys_clock_tick_count = _sys_clock_tick_count; + irq_unlock(imask); + return tmp_sys_clock_tick_count; +} + +/** + * + * @brief Return number of ticks since a reference time + * + * This function is meant to be used in contained fragments of code. The first + * call to it in a particular code fragment fills in a reference time variable + * which then gets passed and updated every time the function is called. From + * the second call on, the delta between the value passed to it and the current + * tick count is the return value. Since the first call is meant to only fill in + * the reference time, its return value should be discarded. + * + * Since a code fragment that wants to use sys_tick_delta() passes in its + * own reference time variable, multiple code fragments can make use of this + * function concurrently. + * + * e.g. + * uint64_t reftime; + * (void) sys_tick_delta(&reftime); /# prime it #/ + * [do stuff] + * x = sys_tick_delta(&reftime); /# how long since priming #/ + * [do more stuff] + * y = sys_tick_delta(&reftime); /# how long since [do stuff] #/ + * + * @return tick count since reference time; undefined for first invocation + * + * NOTE: We use inline function for both 64-bit and 32-bit functions. + * Compiler optimizes out 64-bit result handling in 32-bit version. + */ +static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime) +{ + int64_t delta; + int64_t saved; + + /* + * Lock the interrupts when reading _sys_clock_tick_count 64-bit + * variable. Some architectures (x86) do not handle 64-bit atomically, + * so we have to lock the timer interrupt that causes change of + * _sys_clock_tick_count + */ + unsigned int imask = irq_lock(); + + saved = _sys_clock_tick_count; + irq_unlock(imask); + delta = saved - (*reftime); + *reftime = saved; + + return delta; +} + +/** + * + * @brief Return number of ticks since a reference time + * + * @return tick count since reference time; undefined for first invocation + */ +int64_t sys_tick_delta(int64_t *reftime) +{ + return _nano_tick_delta(reftime); +} + + +uint32_t sys_tick_delta_32(int64_t *reftime) +{ + return (uint32_t)_nano_tick_delta(reftime); +} + +/* handle the expired timeouts in the nano timeout queue */ + +#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS) +#include + +static inline void handle_expired_timeouts(int32_t ticks) +{ + struct _timeout *head = + (struct _timeout *)sys_dlist_peek_head(&_timeout_q); + + _nanokernel.task_timeout = TICKS_UNLIMITED; + + K_DEBUG("head: %p, delta: %d\n", + head, head ? head->delta_ticks_from_prev : -2112); + + if (head) { + head->delta_ticks_from_prev -= ticks; + _timeout_handle_timeouts(); + } +} +#else + #define handle_expired_timeouts(ticks) do { } while ((0)) +#endif + +/** + * + * @brief Announce a tick to the nanokernel + * + * This function is only to be called by the system clock timer driver when a + * tick is to be announced to the nanokernel. It takes care of dequeuing the + * timers that have expired and wake up the fibers pending on them. + * + * @return N/A + */ +void _nano_sys_clock_tick_announce(int32_t ticks) +{ + unsigned int key; + + K_DEBUG("ticks: %d\n", ticks); + + key = irq_lock(); + _sys_clock_tick_count += ticks; + handle_expired_timeouts(ticks); + irq_unlock(key); +} + +/* + * Get closest nano timeouts/timers deadline expiry, (uint32_t)TICKS_UNLIMITED + * if none. + */ +uint32_t _nano_get_earliest_deadline(void) +{ + return _nano_get_earliest_timeouts_deadline(); +} diff --git a/kernel/unified/thread.c b/kernel/unified/thread.c new file mode 100644 index 00000000000..845fc31b2e3 --- /dev/null +++ b/kernel/unified/thread.c @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2010-2014 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Nanokernel thread support + * + * This module provides general purpose thread support, with applies to both + * tasks or fibers. + */ + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Legacy API */ + +int sys_execution_context_type_get(void) +{ + if (k_am_in_isr()) + return NANO_CTX_ISR; + + if (_current->prio < 0) + return NANO_CTX_FIBER; + + return NANO_CTX_TASK; +} + +/** + * + * @brief Determine if code is running at interrupt level + * + * @return 0 if invoked by a thread, or non-zero if invoked by an ISR + */ +int k_am_in_isr(void) +{ + return _IS_IN_ISR(); +} + +/** + * + * @brief Mark thread as essential to system + * + * This function tags the running fiber or task as essential to system + * operation; exceptions raised by this thread will be treated as a fatal + * system error. + * + * @return N/A + */ +void _thread_essential_set(void) +{ + _current->flags |= ESSENTIAL; +} + +/** + * + * @brief Mark thread as not essential to system + * + * This function tags the running fiber or task as not essential to system + * operation; exceptions raised by this thread may be recoverable. + * (This is the default tag for a thread.) + * + * @return N/A + */ +void _thread_essential_clear(void) +{ + _current->flags &= ~ESSENTIAL; +} + +/** + * + * @brief Is the specified thread essential? + * + * This routine indicates if the running fiber or task is an essential system + * thread. + * + * @return Non-zero if current thread is essential, zero if it is not + */ +int _is_thread_essential(void) +{ + return _current->flags & ESSENTIAL; +} + +void k_busy_wait(uint32_t usec_to_wait) +{ + /* use 64-bit math to prevent overflow when multiplying */ + uint32_t cycles_to_wait = (uint32_t)( + (uint64_t)usec_to_wait * + (uint64_t)sys_clock_hw_cycles_per_sec / + (uint64_t)USEC_PER_SEC + ); + uint32_t start_cycles = k_cycle_get_32(); + + for (;;) { + uint32_t current_cycles = k_cycle_get_32(); + + /* this handles the rollover on an unsigned 32-bit value */ + if ((current_cycles - start_cycles) >= cycles_to_wait) { + break; + } + } +} + +#ifdef CONFIG_THREAD_CUSTOM_DATA + +/** + * + * @brief Set thread's custom data + * + * This routine sets the custom data value for the current task or fiber. + * Custom data is not used by the kernel itself, and is freely available + * for the thread to use as it sees fit. + * + * @param value New to set the thread's custom data to. + * + * @return N/A + */ +void k_thread_custom_data_set(void *value) +{ + _current->custom_data = value; +} + +/** + * + * @brief Get thread's custom data + * + * This function returns the custom data value for the current task or fiber. + * + * @return current handle value + */ +void *k_thread_custom_data_get(void) +{ + return _current->custom_data; +} + +#endif /* CONFIG_THREAD_CUSTOM_DATA */ + +#if defined(CONFIG_THREAD_MONITOR) +/** + * + * @brief Thread exit routine + * + * This function is invoked when the specified thread is aborted, either + * normally or abnormally. It is called for the termination of any thread, + * (fibers and tasks). + * + * This routine must be invoked either from a fiber or from a task with + * interrupts locked to guarantee that the list of threads does not change in + * mid-operation. It cannot be called from ISR context. + * + * @return N/A + */ +void _thread_exit(struct tcs *thread) +{ + /* + * Remove thread from the list of threads. This singly linked list of + * threads maintains ALL the threads in the system: both tasks and + * fibers regardless of whether they are runnable. + */ + + if (thread == _nanokernel.threads) { + _nanokernel.threads = _nanokernel.threads->next_thread; + } else { + struct tcs *prev_thread; + + prev_thread = _nanokernel.threads; + while (thread != prev_thread->next_thread) { + prev_thread = prev_thread->next_thread; + } + prev_thread->next_thread = thread->next_thread; + } +} +#endif /* CONFIG_THREAD_MONITOR */ + +/** + * + * @brief Common thread entry point function + * + * This function serves as the entry point for _all_ threads, i.e. both + * task and fibers are instantiated such that initial execution starts + * here. + * + * This routine invokes the actual task or fiber entry point function and + * passes it three arguments. It also handles graceful termination of the + * task or fiber if the entry point function ever returns. + * + * @param pEntry address of the app entry point function + * @param parameter1 1st arg to the app entry point function + * @param parameter2 2nd arg to the app entry point function + * @param parameter3 3rd arg to the app entry point function + * + * @internal + * The 'noreturn' attribute is applied to this function so that the compiler + * can dispense with generating the usual preamble that is only required for + * functions that actually return. + * + * @return Does not return + * + */ +FUNC_NORETURN void _thread_entry(void (*entry)(void *, void *, void *), + void *p1, void *p2, void *p3) +{ + entry(p1, p2, p3); + + if (_is_thread_essential()) { + _NanoFatalErrorHandler(_NANO_ERR_INVALID_TASK_EXIT, + &_default_esf); + } + + k_thread_abort(_current); + + /* + * Compiler can't tell that fiber_abort() won't return and issues a + * warning unless we explicitly tell it that control never gets this + * far. + */ + + CODE_UNREACHABLE; +} + +static void start_thread(struct tcs *thread) +{ + int key = irq_lock(); /* protect kernel queues */ + + _mark_thread_as_started(thread); + + if (_is_thread_ready(thread)) { + _add_thread_to_ready_q(thread); + if (_must_switch_threads()) { + _Swap(key); + return; + } + } + + irq_unlock(key); +} + +static void schedule_new_thread(struct k_thread *thread, int32_t delay) +{ +#ifdef CONFIG_NANO_TIMEOUTS + if (delay == 0) { + start_thread(thread); + } else { + _mark_thread_as_timing(thread); + _timeout_add(thread, NULL, _ms_to_ticks(delay)); + } +#else + ARG_UNUSED(delay); + start_thread(thread); +#endif +} + +k_tid_t k_thread_spawn(char *stack, unsigned stack_size, + void (*entry)(void *, void *, void*), + void *p1, void *p2, void *p3, + int32_t prio, uint32_t options, int32_t delay) +{ + __ASSERT(!_is_in_isr(), ""); + + struct tcs *new_thread = (struct tcs *)stack; + + _new_thread(stack, stack_size, NULL, entry, p1, p2, p3, prio, options); + + schedule_new_thread(new_thread, delay); + + return new_thread; +} + +int k_thread_cancel(k_tid_t tid) +{ + struct tcs *thread = tid; + + int key = irq_lock(); + + if (_has_thread_started(thread) || !_is_thread_timing(thread)) { + irq_unlock(key); + return -EINVAL; + } + + _timeout_abort(thread); + _thread_exit(thread); + + irq_unlock(key); + + return 0; +} + +void _k_thread_group_op(uint32_t groups, void (*func)(struct tcs *)) +{ + unsigned int key; + + __ASSERT(!_is_in_isr(), ""); + + k_sched_lock(); + + /* Invoke func() on each static thread in the specified group set. */ + + _FOREACH_STATIC_THREAD(thread_init) { + if (is_in_any_group(thread_init, groups)) { + key = irq_lock(); + func(thread_init->thread); + irq_unlock(key); + } + } + + /* + * If the current thread is still in a ready state, then let the + * "unlock scheduler" code determine if any rescheduling is needed. + */ + if (_is_thread_ready(_current)) { + k_sched_unlock(); + return; + } + + /* The current thread is no longer in a ready state--reschedule. */ + key = irq_lock(); + _sched_unlock_no_reschedule(); + _Swap(key); +} + +void _k_thread_single_start(struct tcs *thread) +{ + _mark_thread_as_started(thread); + + if (_is_thread_ready(thread)) { + _add_thread_to_ready_q(thread); + } +} + +void _k_thread_single_suspend(struct tcs *thread) +{ + if (_is_thread_ready(thread)) { + _remove_thread_from_ready_q(thread); + } + + _mark_thread_as_suspended(thread); +} + +void k_thread_suspend(struct tcs *thread) +{ + unsigned int key = irq_lock(); + + _k_thread_single_suspend(thread); + + if (thread == _current) { + _Swap(key); + } else { + irq_unlock(key); + } +} + +void _k_thread_single_resume(struct tcs *thread) +{ + _mark_thread_as_not_suspended(thread); + + if (_is_thread_ready(thread)) { + _add_thread_to_ready_q(thread); + } +} + +void k_thread_resume(struct tcs *thread) +{ + unsigned int key = irq_lock(); + + _k_thread_single_resume(thread); + + _reschedule_threads(key); +} + +void _k_thread_single_abort(struct tcs *thread) +{ + if (thread->fn_abort != NULL) { + thread->fn_abort(); + } + + if (_is_thread_ready(thread)) { + _remove_thread_from_ready_q(thread); + } else { + if (_is_thread_pending(thread)) { + _unpend_thread(thread); + } + if (_is_thread_timing(thread)) { + _timeout_abort(thread); + _mark_thread_as_not_timing(thread); + } + } + _mark_thread_as_dead(thread); +} + +void _init_static_threads(void) +{ + _FOREACH_STATIC_THREAD(thread_init) { + _new_thread( + thread_init->init_stack, + thread_init->init_stack_size, + NULL, + thread_init->init_entry, + thread_init->init_p1, + thread_init->init_p2, + thread_init->init_p3, + thread_init->init_prio, + 0); + + thread_init->thread->init_data = thread_init; + } + _k_thread_group_op(K_THREAD_GROUP_EXE, _k_thread_single_start); +} + +uint32_t _k_thread_group_mask_get(struct tcs *thread) +{ + struct k_thread_static_init *thread_init = thread->init_data; + + return thread_init->init_groups; +} + +void _k_thread_group_join(uint32_t groups, struct tcs *thread) +{ + struct k_thread_static_init *thread_init = thread->init_data; + + thread_init->init_groups |= groups; +} + +void _k_thread_group_leave(uint32_t groups, struct tcs *thread) +{ + struct k_thread_static_init *thread_init = thread->init_data; + + thread_init->init_groups &= groups; +} + +/* legacy API */ + +void task_start(ktask_t task) +{ + int key = irq_lock(); + + _k_thread_single_start(task); + _reschedule_threads(key); +} diff --git a/kernel/unified/thread_abort.c b/kernel/unified/thread_abort.c new file mode 100644 index 00000000000..9ca15f938d7 --- /dev/null +++ b/kernel/unified/thread_abort.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Primitive for aborting a thread when an arch-specific one is not + * needed.. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +extern void _k_thread_single_abort(struct tcs *thread); + +#if !defined(CONFIG_ARCH_HAS_NANO_FIBER_ABORT) +void k_thread_abort(k_tid_t thread) +{ + unsigned int key; + + key = irq_lock(); + + _k_thread_single_abort(thread); + + if (_current == thread) { + _Swap(key); + CODE_UNREACHABLE; + } + + /* The abort handler might have altered the ready queue. */ + _reschedule_threads(key); +} +#endif + +void k_thread_abort_handler_set(void (*func)(void)) +{ + _current->fn_abort = func; +} diff --git a/kernel/unified/timer.c b/kernel/unified/timer.c new file mode 100644 index 00000000000..30be1c34bab --- /dev/null +++ b/kernel/unified/timer.c @@ -0,0 +1,326 @@ +/* + * Copyright (c) 1997-2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +/** + * @brief Timer expire handler + * + * @param t Internal timeout structure + * + * @return N/A + */ +void timer_expiration_handler(struct _timeout *t) +{ + int key = irq_lock(); + struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout); + struct tcs *first_pending_thread = _unpend_first_thread(&timer->wait_q); + + /* if the time is periodic, start it again */ + if (timer->period > 0) { + _do_timeout_add(NULL, &timer->timeout, &timer->wait_q, + timer->period); + } + + /* once timer is expired, it can return valid user data pointer */ + timer->user_data = timer->user_data_internal; + /* resume thread waiting on the timer */ + if (first_pending_thread) { + _ready_thread(first_pending_thread); + _set_thread_return_value(first_pending_thread, 0); + /* + * Since the routine is called from timer interrupt handler + * _Swap() is not invoked + */ + } + if (timer->handler) { + timer->handler(timer->handler_arg); + } + irq_unlock(key); +} + + +/** + * @brief Initialize timer structure + * + * Routine initializes timer structure parameters and assigns the user + * supplied data. + * Routine needs to be called before timer is used + * + * @param timer Pointer to the timer structure to be initialized + * @param data Pointer to user supplied data + * + * @return N/A + */ +void k_timer_init(struct k_timer *timer, void *data) +{ + timer->user_data = NULL; + timer->user_data_internal = data; + timer->period = 0; + sys_dlist_init(&timer->wait_q); + _timeout_init(&timer->timeout, timer_expiration_handler); + SYS_TRACING_OBJ_INIT(micro_timer, timer); +} + + +#if (CONFIG_NUM_DYNAMIC_TIMERS > 0) + +static struct k_timer _dynamic_timers[CONFIG_NUM_DYNAMIC_TIMERS]; +static sys_dlist_t _timer_pool; + +/* Initialize the pool of timers for dynamic timer allocation */ +void _k_dyamic_timer_init(void) +{ + int i; + int n_timers = ARRAY_SIZE(_dynamic_timers); + + sys_dlist_init(&_timer_pool); + for (i = 0; i < n_timers; i++) { + k_timer_init(&_dynamic_timers[i], NULL); + sys_dlist_append(&_timer_pool, + &_dynamic_timers[i].timeout.node); + } +} + +/** + * @brief Allocate timer + * + * Allocates a new timer timer. + * + * @return pointer to the new timer structure + */ +struct k_timer *k_timer_alloc(void) +{ + k_sched_lock(); + + /* + * This conversion works only if timeout member + * variable is the first in time structure. + */ + struct k_timer *timer = (struct k_timer *)sys_dlist_get(&_timer_pool); + + k_sched_unlock(); + return timer; +} + + +/** + * @brief Deallocate timer + * + * Deallocates timer and inserts it into the timer queue. + * @param timer Timer to free + * + * @return N/A + */ +void k_timer_free(struct k_timer *timer) +{ + k_timer_stop(timer); + k_sched_lock(); + sys_dlist_append(&_timer_pool, &timer->timeout.node); + k_sched_unlock(); +} + +/** + * + * @brief Check if the timer pool is empty + * + * @return true if the timer pool is empty, false otherwise + */ +bool k_timer_pool_is_empty(void) +{ + k_sched_lock(); + + bool is_empty = sys_dlist_is_empty(&_timer_pool); + + k_sched_unlock(); + return is_empty; +} +#endif /* (CONFIG_NUM_DYNAMIC_TIMERS > 0) */ + +/** + * + * @brief Start timer + * + * @param timer Timer structure + * @param duration Initial timer duration (ns) + * @param period Timer period (ns) + * @param sem Semaphore to signal timer expiration + * + * @return N/A + */ +void k_timer_start(struct k_timer *timer, int32_t duration, int32_t period, + void (*handler)(void *), void *handler_arg, + void (*stop_handler)(void *), void *stop_handler_arg) +{ + __ASSERT(duration >= 0 && period >= 0 && + (duration != 0 || period != 0), "invalid parameters\n"); + + unsigned int key = irq_lock(); + + if (timer->timeout.delta_ticks_from_prev != -1) { + _do_timeout_abort(&timer->timeout); + } + + timer->period = _ms_to_ticks(period); + + timer->handler = handler; + timer->handler_arg = handler_arg; + timer->stop_handler = stop_handler; + timer->stop_handler_arg = stop_handler_arg; + + _do_timeout_add(NULL, &timer->timeout, &timer->wait_q, + _ms_to_ticks(duration)); + irq_unlock(key); +} + + +/** + * + * @brief Restart timer with new parameters + * + * @param timer Timer structure + * @param duration Initial timer duration (ns) + * @param period Timer period (ns) + * + * @return N/A + */ +void k_timer_restart(struct k_timer *timer, int32_t duration, int32_t period) +{ + k_timer_start(timer, duration, period, + timer->handler, timer->handler_arg, + timer->stop_handler, timer->stop_handler_arg); +} + + +/** + * + * @brief Stop the timer + * + * @param timer Timer structure + * + * @return N/A + */ +void k_timer_stop(struct k_timer *timer) +{ + __ASSERT(!_is_in_isr(), ""); + + int key = irq_lock(); + + _do_timeout_abort(&timer->timeout); + + irq_unlock(key); + + if (timer->stop_handler) { + timer->stop_handler(timer->stop_handler_arg); + } + + key = irq_lock(); + + struct tcs *pending_thread = _unpend_first_thread(&timer->wait_q); + + if (pending_thread) { + _set_thread_return_value(pending_thread, -ECANCELED); + _ready_thread(pending_thread); + } + + _reschedule_threads(key); +} + + +/** + * + * @brief Test the timer for expiration + * + * The routine checks if the timer is expired and returns the pointer + * to user data. Otherwise makes the thread wait for the timer expiration. + * + * @param timer Timer structure + * @param data User data pointer + * @param wait May be K_NO_WAIT or K_FOREVER + * + * @return 0 or error code + */ +int k_timer_test(struct k_timer *timer, void **user_data_ptr, int wait) +{ + int result = 0; + unsigned int key = irq_lock(); + + /* check if the timer has expired */ + if (timer->timeout.delta_ticks_from_prev == -1) { + *user_data_ptr = timer->user_data; + timer->user_data = NULL; + } else if (wait == K_NO_WAIT) { + /* if the thread should not wait, return immediately */ + *user_data_ptr = NULL; + result = -EAGAIN; + } else { + /* otherwise pend the thread */ + _pend_current_thread(&timer->wait_q, K_FOREVER); + result = _Swap(key); + key = irq_lock(); + if (result == 0) { + *user_data_ptr = timer->user_data; + timer->user_data = NULL; + } + } + + irq_unlock(key); + return result; +} + + +/** + * + * @brief Get timer remaining time + * + * @param timer Timer descriptor + * + * @return remaining time (ns) + */ + +int32_t k_timer_remaining_get(struct k_timer *timer) +{ + unsigned int key = irq_lock(); + int32_t remaining_ticks; + sys_dlist_t *timeout_q = &_nanokernel.timeout_q; + + if (timer->timeout.delta_ticks_from_prev == -1) { + remaining_ticks = 0; + } else { + /* + * As nanokernel timeouts are stored in a linked list with + * delta_ticks_from_prev, to get the actual number of ticks + * remaining for the timer, walk through the timeouts list + * and accumulate all the delta_ticks_from_prev values up to + * the timer. + */ + struct _timeout *t = + (struct _timeout *)sys_dlist_peek_head(timeout_q); + + remaining_ticks = t->delta_ticks_from_prev; + while (t != &timer->timeout) { + t = (struct _timeout *)sys_dlist_peek_next(timeout_q, + &t->node); + remaining_ticks += t->delta_ticks_from_prev; + } + } + + irq_unlock(key); + return _ticks_to_ms(remaining_ticks); +} diff --git a/kernel/unified/version.c b/kernel/unified/version.c new file mode 100644 index 00000000000..73e015edbb7 --- /dev/null +++ b/kernel/unified/version.c @@ -0,0 +1 @@ +#include "../nanokernel/version.c" diff --git a/kernel/unified/work_q.c b/kernel/unified/work_q.c new file mode 100644 index 00000000000..5bb9ddcd73b --- /dev/null +++ b/kernel/unified/work_q.c @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2016 Wind River Systems, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * + * Workqueue support functions + */ + +#include +#include +#include + +static void work_q_main(void *work_q_ptr, void *p2, void *p3) +{ + struct k_work_q *work_q = work_q_ptr; + + ARG_UNUSED(p2); + ARG_UNUSED(p3); + + while (1) { + struct k_work *work; + k_work_handler_t handler; + + work = k_fifo_get(&work_q->fifo, K_FOREVER); + + handler = work->handler; + + /* Set state to idle so it can be resubmitted by handler */ + if (!atomic_test_and_set_bit(work->flags, K_WORK_STATE_IDLE)) { + handler(work); + } + + /* Make sure we don't hog up the CPU if the FIFO never (or + * very rarely) gets empty. + */ + k_yield(); + } +} + +void k_work_q_start(struct k_work_q *work_q, + const struct k_thread_config *config) +{ + k_fifo_init(&work_q->fifo); + + k_thread_spawn(config->stack, config->stack_size, + work_q_main, work_q, 0, 0, + config->prio, 0, 0); +} + +static void work_timeout(struct _timeout *t) +{ + struct k_delayed_work *w = CONTAINER_OF(t, struct k_delayed_work, + timeout); + + /* submit work to workqueue */ + k_work_submit_to_queue(w->work_q, &w->work); +} + +void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler) +{ + k_work_init(&work->work, handler); + _timeout_init(&work->timeout, work_timeout); + work->work_q = NULL; +} + +int k_delayed_work_submit_to_queue(struct k_work_q *work_q, + struct k_delayed_work *work, + int32_t timeout) +{ + int key = irq_lock(); + int err; + + /* Work cannot be active in multiple queues */ + if (work->work_q && work->work_q != work_q) { + err = -EADDRINUSE; + goto done; + } + + /* Cancel if work has been submitted */ + if (work->work_q == work_q) { + err = k_delayed_work_cancel(work); + if (err < 0) { + goto done; + } + } + + /* Attach workqueue so the timeout callback can submit it */ + work->work_q = work_q; + + if (!timeout) { + /* Submit work if no ticks is 0 */ + k_work_submit_to_queue(work_q, &work->work); + } else { + /* Add timeout */ + _do_timeout_add(NULL, &work->timeout, NULL, + _ms_to_ticks(timeout)); + } + + err = 0; + +done: + irq_unlock(key); + + return err; +} + +int k_delayed_work_cancel(struct k_delayed_work *work) +{ + int key = irq_lock(); + + if (!atomic_test_bit(work->work.flags, K_WORK_STATE_IDLE)) { + irq_unlock(key); + return -EINPROGRESS; + } + + if (!work->work_q) { + irq_unlock(key); + return -EINVAL; + } + + /* Abort timeout, if it has expired this will do nothing */ + _do_timeout_abort(&work->timeout); + + /* Detach from workqueue */ + work->work_q = NULL; + + irq_unlock(key); + + return 0; +} + +#ifdef CONFIG_SYSTEM_WORKQUEUE + +#include + +static char __stack sys_work_q_stack[CONFIG_SYSTEM_WORKQUEUE_STACK_SIZE]; + +static const struct k_thread_config sys_work_q_config = { + .stack = sys_work_q_stack, + .stack_size = sizeof(sys_work_q_stack), + .prio = CONFIG_SYSTEM_WORKQUEUE_PRIORITY, +}; + +struct k_work_q k_sys_work_q; + +static int k_sys_work_q_init(struct device *dev) +{ + ARG_UNUSED(dev); + + k_work_q_start(&k_sys_work_q, &sys_work_q_config); + + return 0; +} + +SYS_INIT(k_sys_work_q_init, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); + +#endif diff --git a/scripts/sysgen b/scripts/sysgen index c406bf0d1ed..f5a18facf84 100755 --- a/scripts/sysgen +++ b/scripts/sysgen @@ -85,6 +85,7 @@ def get_cmdline_args(): global input_mdef_file global output_dir + global kernel_type output_dir_help='output directory for kernel_main.*, sysgen.h, etc' input_mdef_file_help='input MDEF file' @@ -171,11 +172,34 @@ def mdef_parse(): continue if (words[0] == "TASK"): - if (len(words) != 6): - error_arg_count(line) - task_list.append((words[1], int(words[2]), words[3], - int(words[4]), words[5])) - continue + if kernel_type == 'micro': + if (len(words) != 6): + error_arg_count(line) + task_list.append((words[1], int(words[2]), words[3], + int(words[4]), words[5])) + continue + elif (kernel_type == 'unified'): + if len(words) < 6 and len(words) > 10: + error_arg_count(line) + + p1 = 0 + p2 = 0 + p3 = 0 + + if len(words) >= 7: + p1 = words[6] + if len(words) >= 8: + p2 = words[7] + if len(words) == 9: + p3 = words[8] + + abort = 0 + if len(words) == 10: + abort = words[9] + + task_list.append((words[1], int(words[2]), words[3], + int(words[4]), words[5], p1, p2, p3, abort)) + continue if (words[0] == "TASKGROUP"): if (len(words) != 2): @@ -194,10 +218,21 @@ def mdef_parse(): continue if (words[0] == "SEMA"): - if (len(words) != 2): - error_arg_count(line) - sema_list.append((words[1],)) - continue + if (kernel_type == "micro"): + if (len(words) != 2): + error_arg_count(line) + sema_list.append((words[1],)) + continue + elif (kernel_type == "unified"): + if len(words) < 2 and len(words) > 4: + error_arg_count(line) + if len(words) == 2: + sema_list.append((words[1], 0, 0xffffffff)) + elif len(words) == 3: + sema_list.append((words[1], int(words[2]), 0xffffffff)) + else: + sema_list.append((words[1], int(words[2]), int(words[3]))) + continue if (words[0] == "MUTEX"): if (len(words) != 2): @@ -270,17 +305,29 @@ def kernel_main_c_out(string): def kernel_main_c_header(): """ Generate initial portion of kernel_main.c """ - kernel_main_c_out( - kernel_main_c_filename_str + - copyright + - do_not_edit_warning + - "\n" + - "#include \n" + - "#include \n" + - "#include \n" + - "#include \n" + - "#include \n" + - "#include \n") + if kernel_type == 'micro': + kernel_main_c_out( + kernel_main_c_filename_str + + copyright + + do_not_edit_warning + + "\n" + + "#include \n" + + "#include \n" + + "#include \n" + + "#include \n" + + "#include \n" + + "#include \n") + else: + kernel_main_c_out( + kernel_main_c_filename_str + + copyright + + do_not_edit_warning + + "\n" + + "#include \n" + + "#include \n" + + "#include \n" + + "#include \n" + + "#include \n") def kernel_main_c_kargs(): @@ -337,9 +384,77 @@ def kernel_main_c_timers(): "{{NULL, &_k_timer_free.wait_q.head}, " + "(void *) &_k_timer_blocks[%d]};\n" % (num_timers - 1)) +def get_group_bitmask(group_str): -def kernel_main_c_tasks(): - """ Generate task variables """ + # create bitmask of group(s) task belongs to + group_bitmask = 0 + group_set = group_str[1:len(group_str) - 1] # drop [] surrounding groups + if (group_set != ""): + group_list = group_set.split(',') + for group in group_list: + group_bitmask |= group_dictionary[group] + + return group_bitmask + +def is_float(x): + try: + float(x) + return True + except ValueError: + return False + +def is_int(x): + try: + int(x) + return True + except ValueError: + return False + +def is_number(x): + return is_float(x) or is_int(x) + +def kernel_main_c_tasks_unified(): + global num_prios + + kernel_main_c_out("\n") + + # declare task entry points + + kernel_main_c_out("\n") + for task in task_list: + kernel_main_c_out("EXTERN_C void %s(void *, void *, void *);\n" % + task[2]) + + + # thread_init objects + + kernel_main_c_out("\n") + + for task in task_list: + name = task[0] + prio = task[1] + entry = task[2] + stack_size = task[3] + + groups = get_group_bitmask(task[4]) + + params = (task[5], task[6], task[7]) + for param in params: + if not is_number(param): + kernel_main_c_out("extern void *%s;\n" % (param)); + + abort = task[8] + if abort != 0 and abort != 'NULL': + kernel_main_c_out("EXTERN_C void %s(void);\n" % abort) + + kernel_main_c_out( + "K_THREAD_OBJ_DEFINE(%s, %u, %s, %s, %s, %s, %s, %d, 0x%x);\n" % + (name, int(stack_size), entry, + params[0], params[1], params[2], + abort, int(prio), int(groups))) + + +def kernel_main_c_tasks_micro(): global num_prios @@ -375,12 +490,7 @@ def kernel_main_c_tasks(): stack = "__" + task[0] + "_stack" # create bitmask of group(s) task belongs to - group_bitmask = 0 - group_set = task[4][1:len(task[4]) - 1] # drop [] surrounding groups - if (group_set != ""): - group_list = group_set.split(',') - for group in group_list: - group_bitmask |= group_dictionary[group] + group_bitmask = get_group_bitmask(task[4]) # invert bitmask to convert SYS indication to non-SYS indication # @@ -420,6 +530,15 @@ def kernel_main_c_tasks(): "struct k_task * _k_current_task = &_k_task_idle;\n") +def kernel_main_c_tasks(): + """ Generate task variables """ + + if kernel_type == 'micro': + kernel_main_c_tasks_micro() + else: + kernel_main_c_tasks_unified() + + def kernel_main_c_priorities(): """ Generate task scheduling variables """ @@ -461,6 +580,11 @@ def kernel_main_c_priorities(): def kernel_main_c_events(): """ Generate event variables """ + if kernel_type == 'micro': + event_type = 'int' + else: + event_type = 'struct k_event *' + # event descriptors # pre-defined event for timer @@ -478,9 +602,14 @@ def kernel_main_c_events(): # in other words, no declaration if handler is NULL or 0 handler = event[1].strip().lower() if handler != "null" and handler != "0": - kernel_main_c_out("extern int %s(int event);\n" % (event[1])) + kernel_main_c_out("extern int %s(%s event);\n" % + (event[1], event_type)) - kernel_main_c_out("DEFINE_EVENT(%s, %s);\n" % (event[0], event[1])) + if kernel_type == 'micro': + kernel_main_c_out("DEFINE_EVENT(%s, %s);\n" % (event[0], event[1])) + else: + kernel_main_c_out("K_EVENT_DEFINE(_k_event_obj_%s, %s);\n" % + (event[0], event[1])) def kernel_main_c_mutexes(): """ Generate mutex variables """ @@ -495,8 +624,11 @@ def kernel_main_c_mutexes(): kernel_main_c_out("\n") for mutex in mutex_list: name = mutex[0] - kernel_main_c_out("struct _k_mutex_struct _k_mutex_obj_%s = " % (name) + - "__MUTEX_DEFAULT;\n") + if kernel_type == 'micro': + kernel_main_c_out("struct _k_mutex_struct _k_mutex_obj_%s = " % + (name) + "__MUTEX_DEFAULT;\n") + else: + kernel_main_c_out("K_MUTEX_DEFINE(_k_mutex_obj_%s);\n" % (name)) def kernel_main_c_semas(): @@ -512,8 +644,14 @@ def kernel_main_c_semas(): kernel_main_c_out("\n") for semaphore in sema_list: name = semaphore[0] - kernel_main_c_out("struct _k_sem_struct _k_sem_obj_%s = " % (name) + - "__K_SEMAPHORE_DEFAULT;\n") + if kernel_type == 'micro': + kernel_main_c_out("struct _k_sem_struct _k_sem_obj_%s = " % + (name) + "__K_SEMAPHORE_DEFAULT;\n") + else: + initial_count = semaphore[1] + limit = semaphore[2] + kernel_main_c_out("K_SEM_DEFINE(_k_sem_obj_%s, %s, %s);\n" % + (name, initial_count, limit)) def kernel_main_c_fifos(): @@ -524,25 +662,30 @@ def kernel_main_c_fifos(): if (total_fifos == 0): return - # FIFO buffers - kernel_main_c_out("\n") - for fifo in fifo_list: - kernel_main_c_out( - "char __noinit __%s_buffer[%d];\n" % (fifo[0], fifo[1] * fifo[2])) + if kernel_type == 'micro': + # FIFO buffers and descriptors - # FIFO descriptors + for fifo in fifo_list: + name = fifo[0] + depth = fifo[1] + width = fifo[2] + buffer = "__" + name + "_buffer" + kernel_main_c_out("char __noinit %s[%d];\n" % + (buffer, depth * width)) + kernel_main_c_out( + "struct _k_fifo_struct _k_fifo_obj_%s = " % (name) + + "__K_FIFO_DEFAULT(%d, %d, %s);\n" % (depth, width, buffer)) + else: + # message queue objects - kernel_main_c_out("\n") - for fifo in fifo_list: - name = fifo[0] - depth = fifo[1] - width = fifo[2] - buffer = "__" + fifo[0] + "_buffer" - kernel_main_c_out("struct _k_fifo_struct _k_fifo_obj_%s = " % (name) + - "__K_FIFO_DEFAULT(%d, %d, %s);\n" % (depth, width, buffer)) - kernel_main_c_out("\n") + for fifo in fifo_list: + name = fifo[0] + depth = fifo[1] + width = fifo[2] + kernel_main_c_out("K_MSGQ_DEFINE(_k_fifo_obj_%s, %s, %s);\n" % + (name, depth, width)) def kernel_main_c_pipes(): @@ -557,21 +700,32 @@ def kernel_main_c_pipes(): kernel_main_c_out("\n") - for pipe in pipe_list: - kernel_main_c_out( - "char __noinit __%s_buffer[%d];\n" % (pipe[0], pipe[1])) + if kernel_type == 'micro': + for pipe in pipe_list: + kernel_main_c_out( + "char __noinit __%s_buffer[%d];\n" % (pipe[0], pipe[1])) - # pipe descriptors + # pipe descriptors - for pipe in pipe_list: - name = pipe[0] - size = pipe[1] - buffer = "__" + pipe[0] + "_buffer" - kernel_main_c_out("struct _k_pipe_struct _k_pipe_obj_%s = " % (name) + - " __K_PIPE_INITIALIZER(%d, %s);\n" % (size, buffer) + - "kpipe_t _k_pipe_ptr_%s " % (name) + - " __in_section(_k_pipe_ptr, public, pipe) =\n" + - " (kpipe_t)&_k_pipe_obj_%s;\n" % (name)) + for pipe in pipe_list: + name = pipe[0] + size = pipe[1] + buffer = "__" + pipe[0] + "_buffer" + kernel_main_c_out("struct _k_pipe_struct _k_pipe_obj_%s = " + % (name) + + " __K_PIPE_INITIALIZER(%d, %s);\n" % (size, buffer) + + "kpipe_t _k_pipe_ptr_%s " % (name) + + " __in_section(_k_pipe_ptr, public, pipe) =\n" + + " (kpipe_t)&_k_pipe_obj_%s;\n" % (name)) + + else: + # pipe objects + + for pipe in pipe_list: + name = pipe[0] + size = pipe[1] + kernel_main_c_out("K_PIPE_DEFINE(_k_pipe_obj_%s, %d);\n" % + (name, size)) def kernel_main_c_mailboxes(): @@ -582,14 +736,22 @@ def kernel_main_c_mailboxes(): if (total_mbxs == 0): return - # mailbox descriptors + kernel_main_c_out("\n") - kernel_main_c_out("\n") - for mbx in mbx_list: - name = mbx[0] - kernel_main_c_out("struct _k_mbox_struct _k_mbox_obj_%s = " % (name) + - "__K_MAILBOX_DEFAULT;\n") - kernel_main_c_out("\n") + if kernel_type == 'micro': + # mailbox descriptors + + for mbx in mbx_list: + name = mbx[0] + kernel_main_c_out( + "struct _k_mbox_struct _k_mbox_obj_%s = " % (name) + + "__K_MAILBOX_DEFAULT;\n") + else: + # mailbox objects + + for mbx in mbx_list: + name = mbx[0] + kernel_main_c_out("K_MBOX_DEFINE(_k_mbox_obj_%s);\n" % (name)) def kernel_main_c_maps(): @@ -600,29 +762,34 @@ def kernel_main_c_maps(): if (total_maps == 0): return - # memory map buffers - kernel_main_c_out("\n") - for map in map_list: - blocks = map[1] - block_size = map[2] - kernel_main_c_out("char __noinit __MAP_%s_buffer[%d];\n" % - (map[0], blocks * block_size)) + if kernel_type == 'micro': + # memory map buffers and descriptors - # memory map descriptors + for map in map_list: + name = map[0] + blocks = map[1] + block_size = map[2] + kernel_main_c_out("char __noinit __MAP_%s_buffer[%d];\n" % + (map[0], blocks * block_size)) + kernel_main_c_out( + "struct _k_mem_map_struct _k_mem_map_obj_%s = " % (name) + + "__K_MEM_MAP_INITIALIZER(%d, %d, __MAP_%s_buffer);\n" % + (blocks, block_size, map[0])) + kernel_main_c_out( + "kmemory_map_t _k_mem_map_ptr_%s " % (name) + + " __in_section(_k_mem_map_ptr, public, mem_map) =\n" + + " (kmemory_map_t)&_k_mem_map_obj_%s;\n" % (name)) + else: + # memory map objects - for map in map_list: - name = map[0] - blocks = map[1] - block_size = map[2] - kernel_main_c_out( - "struct _k_mem_map_struct _k_mem_map_obj_%s = " % (name) + - " __K_MEM_MAP_INITIALIZER(%d, %d, __MAP_%s_buffer);\n" % - (blocks, block_size, map[0]) + - "kmemory_map_t _k_mem_map_ptr_%s " % (name) + - " __in_section(_k_mem_map_ptr, public, mem_map) =\n" + - " (kmemory_map_t)&_k_mem_map_obj_%s;\n" % (name)) + for map in map_list: + name = map[0] + blocks = map[1] + block_size = map[2] + kernel_main_c_out("K_MEM_MAP_DEFINE(_k_mem_map_obj_%s, %s, %s);\n" % + (name, blocks, block_size)) def kernel_main_c_pools(): @@ -791,19 +958,22 @@ def kernel_main_c_generate(): global kernel_main_c_data kernel_main_c_header() - kernel_main_c_kargs() - kernel_main_c_timers() - kernel_main_c_tasks() - kernel_main_c_priorities() - kernel_main_c_events() + kernel_main_c_mutexes() kernel_main_c_semas() - kernel_main_c_fifos() - kernel_main_c_pipes() - kernel_main_c_mailboxes() + kernel_main_c_events() kernel_main_c_maps() - kernel_main_c_pools() - kernel_main_c_node_init() + kernel_main_c_fifos() + kernel_main_c_mailboxes() + kernel_main_c_tasks() + kernel_main_c_pipes() + + if kernel_type == 'micro': + kernel_main_c_kargs() + kernel_main_c_timers() + kernel_main_c_pools() + kernel_main_c_node_init() + kernel_main_c_priorities() write_file(output_dir + 'kernel_main.c', kernel_main_c_data) @@ -864,11 +1034,17 @@ sysgen_h_header_include_guard_str = \ def generate_sysgen_h_header(): global sysgen_h_data + + if kernel_type == 'micro': + kernel_api_file = "#include \n" + else: + kernel_api_file = "#include \n" + sysgen_h_data += \ sysgen_h_filename_str + \ copyright + \ do_not_edit_warning + \ - "#include \n" + \ + kernel_api_file + \ sysgen_h_header_include_guard_str + \ "\n" @@ -912,15 +1088,46 @@ def generate_sysgen_h_obj_ids(): global sysgen_h_data + if kernel_type == 'micro': + mutex_struct = '_k_mutex_struct' + mutex_type = 'kmutex_t' + sem_struct = '_k_sem_struct' + sem_type = 'ksem_t' + pipe_struct = '_k_pipe_struct' + pipe_type = 'kpipe_t' + map_struct = '_k_mem_map_struct' + map_type = 'kmemory_map_t' + fifo_struct = '_k_fifo_struct' + fifo_type = 'kfifo_t' + mbox_struct = '_k_mbox_struct' + mbox_type = 'kmbox_t' + event_type = 'kevent_t' + # add missing object types + else: + mutex_struct = 'k_mutex' + mutex_type = 'struct k_mutex *' + sem_struct = 'k_sem' + sem_type = 'struct k_sem *' + pipe_struct = 'k_pipe' + pipe_type = 'struct k_pipe *' + map_struct = 'k_mem_map' + map_type = 'struct k_mem_map *' + fifo_struct = 'k_msgq' + fifo_type = 'struct k_msgq *' + mbox_struct = 'k_mbox' + mbox_type = 'struct k_mbox *' + event_type = 'struct k_event *' + # add missing object types + # mutex object ids sysgen_h_data += "\n" for mutex in mutex_list: name = mutex[0] sysgen_h_data += \ - "extern struct _k_mutex_struct _k_mutex_obj_%s;\n" % (name) + "extern struct %s _k_mutex_obj_%s;\n" % (mutex_struct, name) sysgen_h_data += \ - "#define %s ((kmutex_t)&_k_mutex_obj_%s)\n\n" % (name, name) + "#define %s ((%s)&_k_mutex_obj_%s)\n\n" % (name, mutex_type, name) # semaphore object ids @@ -928,19 +1135,19 @@ def generate_sysgen_h_obj_ids(): for semaphore in sema_list: name = semaphore[0] sysgen_h_data += \ - "extern struct _k_sem_struct _k_sem_obj_%s;\n" % (name) + "extern struct %s _k_sem_obj_%s;\n" % (sem_struct, name) sysgen_h_data += \ - "#define %s ((ksem_t)&_k_sem_obj_%s)\n\n" % (name, name) + "#define %s ((%s)&_k_sem_obj_%s)\n\n" % (name, sem_type, name) - # fifo object ids + # fifo (aka message queue) object ids sysgen_h_data += "\n" for fifo in fifo_list: name = fifo[0] sysgen_h_data += \ - "extern struct _k_fifo_struct _k_fifo_obj_%s;\n" % (name) + "extern struct %s _k_fifo_obj_%s;\n" % (fifo_struct, name) sysgen_h_data += \ - "#define %s ((kfifo_t)&_k_fifo_obj_%s)\n\n" % (name, name) + "#define %s ((%s)&_k_fifo_obj_%s)\n\n" % (name, fifo_type, name) # mailbox object ids @@ -948,9 +1155,9 @@ def generate_sysgen_h_obj_ids(): for mbx in mbx_list: name = mbx[0] sysgen_h_data += \ - "extern struct _k_mbox_struct _k_mbox_obj_%s;\n" % (name) + "extern struct %s _k_mbox_obj_%s;\n" % (mbox_struct, name) sysgen_h_data += \ - "#define %s ((kmbox_t)&_k_mbox_obj_%s)\n\n" % (name, name) + "#define %s ((%s)&_k_mbox_obj_%s)\n\n" % (name, mbox_type, name) # pipe object id @@ -958,8 +1165,9 @@ def generate_sysgen_h_obj_ids(): for pipe in pipe_list: name = pipe[0]; sysgen_h_data += \ - "extern struct _k_pipe_struct _k_pipe_obj_%s;\n" % (name) + \ - "#define %s ((kpipe_t)&_k_pipe_obj_%s)\n\n" % (name, name) + "extern struct %s _k_pipe_obj_%s;\n" % (pipe_struct, name) + sysgen_h_data += \ + "#define %s ((%s)&_k_pipe_obj_%s)\n\n" % (name, pipe_type, name) # memory map object id @@ -967,27 +1175,38 @@ def generate_sysgen_h_obj_ids(): for map in map_list: name = map[0]; sysgen_h_data += \ - "extern struct _k_mem_map_struct _k_mem_map_obj_%s;\n" % (name) + \ - "#define %s ((kmemory_map_t)&_k_mem_map_obj_%s)\n" % (name, name) + "extern struct %s _k_mem_map_obj_%s;\n" % (map_struct, name) + sysgen_h_data += \ + "#define %s ((%s)&_k_mem_map_obj_%s)\n" % (name, map_type, name) # task object id sysgen_h_data += "\n" for task in task_list: name = task[0]; - sysgen_h_data += \ - "extern struct k_task _k_task_obj_%s;\n" % (name) + \ - "#define %s ((ktask_t)&_k_task_obj_%s)\n" % (name, name) + if kernel_type == 'micro': + sysgen_h_data += \ + "extern struct k_task _k_task_obj_%s;\n" % (name) + \ + "#define %s ((ktask_t)&_k_task_obj_%s)\n" % (name, name) + elif (kernel_type == 'unified'): + sysgen_h_data += \ + "extern char _k_thread_obj_%s[];\n" % (name) + \ + "#define %s ((k_tid_t)_k_thread_obj_%s)\n" % (name, name) - # event object id + # event object ids sysgen_h_data += "\n" for event in event_list: # no need to expose the irq task events if not (event[0].startswith("_TaskIrqEvt")): name = event[0]; - sysgen_h_data += \ - "extern const kevent_t %s;\n" % (name) + if kernel_type == 'micro': + sysgen_h_data += "extern const %s %s;\n" % (event_type, name) + elif (kernel_type == 'unified'): + sysgen_h_data += \ + "extern struct k_event _k_event_obj_%s;\n" % (name) + sysgen_h_data += \ + "#define %s (&_k_event_obj_%s)\n\n" % (name, name) # all other object ids @@ -1023,10 +1242,10 @@ def sysgen_h_generate(): # SYSTEM GENERATOR MAINLINE # - get_cmdline_args() mdef_parse() kernel_main_c_generate() -kernel_main_h_generate() -micro_private_types_h_generate() +if kernel_type == 'micro': + kernel_main_h_generate() + micro_private_types_h_generate() sysgen_h_generate()