unified: initial unified kernel implementation

Summary of what this includes:

    initialization:

    Copy from nano_init.c, with the following changes:

    - the main thread is the continuation of the init thread, but an idle
      thread is created as well

    - _main() initializes threads in groups and starts the EXE group

    - the ready queues are initialized

    - the main thread is marked as non-essential once the system init is
      done

    - a weak main() symbol is provided if the application does not provide a
      main() function

    scheduler:

    Not an exhaustive list, but basically provide primitives for:

    - adding/removing a thread to/from a wait queue
    - adding/removing a thread to/from the ready queue
    - marking thread as ready
    - locking/unlocking the scheduler
      - instead of locking interrupts
    - getting/setting thread priority
      - checking what state (coop/preempt) a thread is currenlty running in
    - rescheduling threads
    - finding what thread is the next to run
    - yielding/sleeping/aborting sleep
    - finding the current thread

    threads:

    - Add operationns on threads, such as creating and starting them.

    standardized handling of kernel object return codes:

    - Kernel objects now cause _Swap() to return the following values:
         0      => operation successful
        -EAGAIN => operation timed out
        -Exxxxx => operation failed for another reason

    - The thread's swap_data field can be used to return any additional
    information required to complete the operation, such as the actual
    result of a successful operation.

    timeouts:

    - same as nano timeouts, renamed to simply 'timeouts'
    - the kernel is still tick-based, but objects take timeout values in
      ms for forward compatibility with a tickless kernel.

    semaphores:

      - Port of the nanokernel semaphores, which have the same basic behaviour
      as the microkernel ones. Semaphore groups are not yet implemented.

      - These semaphores are enhanced in that they accept an initial count and a
      count limit. This allows configuring them as binary semaphores, and also
      provisioning them without having to "give" the semaphore multiple times
      before using them.

    mutexes:

    - Straight port of the microkernel mutexes. An init function is added to
    allow defining them at runtime.

    pipes:

    - straight port

    timers:

    - amalgamation of nano and micro timers, with all functionalities
      intact.

    events:

    - re-implementation, using semaphores and workqueues.

    mailboxes:

    - straight port

    message queues:

    - straight port of  microkernel FIFOs

    memory maps:

    - straight port

    workqueues:

    - Basically, have all APIs follow the k_ naming rule, and use the _timeout
    subsystem from the unified kernel directory, and not the _nano_timeout
    one.

    stacks:

    - Port of the nanokernel stacks. They can now have multiple threads
    pending on them and threads can wait with a timeout.

    LIFOs:

    - Straight port of the nanokernel LIFOs.

    FIFOs:

    - Straight port of the nanokernel FIFOs.

Work by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com>
         Peter Mitsis <peter.mitsis@windriver.com>
         Allan Stephens <allan.stephens@windriver.com>
         Benjamin Walsh <benjamin.walsh@windriver.com>

Change-Id: Id3cadb3694484ab2ca467889cfb029be3cd3a7d6
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2016-09-02 18:55:39 -04:00
commit 456c6daa9f
42 changed files with 7888 additions and 121 deletions

1050
include/kernel.h Normal file

File diff suppressed because it is too large Load diff

702
include/legacy.h Normal file
View file

@ -0,0 +1,702 @@
/*
* Copyright (c) 2016, Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* @brief Public legacy kernel APIs.
*/
#ifndef _legacy__h_
#define _legacy__h_
#include <stdint.h>
#include <errno.h>
#include <limits.h>
#include <sys_clock.h>
/* nanokernel/microkernel execution context types */
#define NANO_CTX_ISR (K_ISR)
#define NANO_CTX_FIBER (K_COOP_THREAD)
#define NANO_CTX_TASK (K_PREEMPT_THREAD)
/* timeout special values */
#define TICKS_UNLIMITED (K_FOREVER)
#define TICKS_NONE (K_NO_WAIT)
/* microkernel object return codes */
#define RC_OK 0
#define RC_FAIL 1
#define RC_TIME 2
#define RC_ALIGNMENT 3
#define RC_INCOMPLETE 4
#define ANYTASK K_ANY
/* end-of-list, mostly used for semaphore groups */
#define ENDLIST K_END
/* pipe amount of content to receive (0+, 1+, all) */
typedef enum {
_0_TO_N = 0x0,
_1_TO_N = 0x1,
_ALL_N = 0x2,
} K_PIPE_OPTION;
#define kpriority_t uint32_t
static inline int32_t _ticks_to_ms(int32_t ticks)
{
return (ticks == TICKS_UNLIMITED) ? K_FOREVER :
(MSEC_PER_SEC * (uint64_t)ticks) / sys_clock_ticks_per_sec;
}
static inline int _error_to_rc(int err)
{
return err == 0 ? RC_OK : err == -EAGAIN ? RC_TIME : RC_FAIL;
}
static inline int _error_to_rc_no_timeout(int err)
{
return err == 0 ? RC_OK : RC_FAIL;
}
/* tasks/fibers/scheduler */
#define ktask_t k_tid_t
#define nano_thread_id_t k_tid_t
typedef void (*nano_fiber_entry_t)(int i1, int i2);
typedef int nano_context_type_t;
#define sys_thread_self_get k_current_get
#define sys_thread_busy_wait k_busy_wait
extern int sys_execution_context_type_get(void);
static inline nano_thread_id_t fiber_start(char *stack, unsigned stack_size,
nano_fiber_entry_t entry,
int arg1, int arg2,
unsigned prio,
unsigned options)
{
return k_thread_spawn(stack, stack_size, (k_thread_entry_t)entry,
(void *)arg1, (void *)arg2, NULL,
K_PRIO_COOP(prio), options, 0);
}
#define fiber_fiber_start fiber_start
#define task_fiber_start fiber_start
#define fiber_config k_thread_config
#define fiber_start_config(config, entry, arg1, arg2, options) \
fiber_start(config->stack, config->stack_size, \
entry, arg1, arg2, \
config->prio, options)
#define fiber_fiber_start_config fiber_start_config
#define task_fiber_start_config fiber_start_config
static inline nano_thread_id_t
fiber_delayed_start(char *stack, unsigned int stack_size_in_bytes,
nano_fiber_entry_t entry_point, int param1,
int param2, unsigned int priority,
unsigned int options, int32_t timeout_in_ticks)
{
return k_thread_spawn(stack, stack_size_in_bytes,
(k_thread_entry_t)entry_point,
(void *)param1, (void *)param2, NULL,
K_PRIO_COOP(priority), options,
_ticks_to_ms(timeout_in_ticks));
}
#define fiber_fiber_delayed_start fiber_delayed_start
#define task_fiber_delayed_start fiber_delayed_start
#define fiber_delayed_start_cancel(fiber) k_thread_cancel((k_tid_t)fiber)
#define fiber_fiber_delayed_start_cancel fiber_delayed_start_cancel
#define task_fiber_delayed_start_cancel fiber_delayed_start_cancel
#define fiber_yield k_yield
#define fiber_abort() k_thread_abort(k_current_get())
static inline void fiber_sleep(int32_t timeout)
{
k_sleep(_ticks_to_ms(timeout));
}
#define fiber_wakeup k_wakeup
#define isr_fiber_wakeup k_wakeup
#define fiber_fiber_wakeup k_wakeup
#define task_fiber_wakeup k_wakeup
#define task_sleep fiber_sleep
#define task_yield k_yield
#define task_priority_set(task, prio) k_thread_priority_set(task, (int)prio)
#define task_entry_set(task, entry) \
k_thread_entry_set(task, (k_thread_entry_t)entry)
#define task_abort_handler_set k_thread_abort_handler_set
static inline void task_offload_to_fiber(int (*func)(), void *argp)
{
/* XXX - implement via work queue */
}
#define task_id_get k_current_get
#define task_priority_get (kpriority_t)k_current_priority_get
#define task_abort k_thread_abort
#define task_suspend k_thread_suspend
#define task_resume k_thread_resume
extern void task_start(ktask_t task);
static inline void sys_scheduler_time_slice_set(int32_t ticks,
kpriority_t prio)
{
k_sched_time_slice_set(_ticks_to_ms(ticks), (int)prio);
}
extern void _k_thread_group_op(uint32_t groups, void (*func)(struct tcs *));
static inline uint32_t task_group_mask_get(void)
{
extern uint32_t _k_thread_group_mask_get(struct tcs *thread);
return _k_thread_group_mask_get(k_current_get());
}
#define isr_task_group_mask_get task_group_mask_get
static inline void task_group_join(uint32_t groups)
{
extern void _k_thread_group_join(uint32_t groups, struct tcs *thread);
_k_thread_group_join(groups, k_current_get());
}
static inline void task_group_leave(uint32_t groups)
{
extern void _k_thread_group_leave(uint32_t groups, struct tcs *thread);
_k_thread_group_leave(groups, k_current_get());
}
static inline void task_group_start(uint32_t groups)
{
extern void _k_thread_single_start(struct tcs *thread);
return _k_thread_group_op(groups, _k_thread_single_start);
}
static inline void task_group_suspend(uint32_t groups)
{
extern void _k_thread_single_suspend(struct tcs *thread);
return _k_thread_group_op(groups, _k_thread_single_suspend);
}
static inline void task_group_resume(uint32_t groups)
{
extern void _k_thread_single_resume(struct tcs *thread);
return _k_thread_group_op(groups, _k_thread_single_resume);
}
static inline void task_group_abort(uint32_t groups)
{
extern void _k_thread_single_abort(struct tcs *thread);
return _k_thread_group_op(groups, _k_thread_single_abort);
}
#if 0
#define isr_task_id_get() task_id_get()
#define isr_task_priority_get() task_priority_get()
#endif
/* mutexes */
#define kmutex_t struct k_mutex *
static inline int task_mutex_lock(kmutex_t id, int32_t timeout)
{
return _error_to_rc(k_mutex_lock(id, _ticks_to_ms(timeout)));
}
#define task_mutex_unlock k_mutex_unlock
#define DEFINE_MUTEX(name) \
K_MUTEX_DEFINE(_k_mutex_obj_##name); \
struct k_mutex * const name = &_k_mutex_obj_##name
/* semaphores */
#define nano_sem k_sem
#define ksem_t struct k_sem *
static inline void nano_sem_init(struct nano_sem *sem)
{
k_sem_init(sem, 0, UINT_MAX);
}
#define nano_sem_give(id) k_sem_give((struct k_sem *)id)
#define nano_isr_sem_give(id) k_sem_give((struct k_sem *)id)
#define nano_fiber_sem_give(id) k_sem_give((struct k_sem *)id)
#define nano_task_sem_give(id) k_sem_give((struct k_sem *)id)
static inline int nano_sem_take(struct nano_sem *sem, int32_t timeout)
{
return k_sem_take((struct k_sem *)sem, _ticks_to_ms(timeout))
== 0 ? 1 : 0;
}
#define nano_isr_sem_take nano_sem_take
#define nano_fiber_sem_take nano_sem_take
#define nano_task_sem_take nano_sem_take
#define isr_sem_give k_sem_give
#define fiber_sem_give k_sem_give
#define task_sem_give k_sem_give
static inline int task_sem_take(ksem_t sem, int32_t timeout)
{
return _error_to_rc(k_sem_take(sem, _ticks_to_ms(timeout)));
}
#define task_sem_reset k_sem_reset
#define task_sem_count_get k_sem_count_get
typedef ksem_t *ksemg_t;
static inline ksem_t task_sem_group_take(ksemg_t group, int32_t timeout)
{
return k_sem_group_take(group, _ticks_to_ms(timeout));
}
#define task_sem_group_give k_sem_group_give
#define task_sem_group_reset k_sem_group_reset
#define DEFINE_SEMAPHORE(name) \
K_SEM_DEFINE(_k_sem_obj_##name, 0, UINT_MAX); \
struct k_sem * const name = &_k_sem_obj_##name
/* workqueues */
#define nano_work k_work
#define work_handler_t k_work_handler_t
#define nano_workqueue k_work_q
#define nano_delayed_work k_delayed_work
#define nano_work_init k_work_init
#define nano_work_submit_to_queue k_work_submit_to_queue
#define nano_workqueue_start k_work_q_start
#define nano_task_workqueue_start nano_fiber_workqueue_start
#define nano_fiber_workqueue_start nano_fiber_workqueue_start
#define nano_delayed_work_init k_delayed_work_init
static inline int nano_delayed_work_submit_to_queue(struct nano_workqueue *wq,
struct nano_delayed_work *work,
int ticks)
{
return k_delayed_work_submit_to_queue(wq, work, _ticks_to_ms(ticks));
}
#define nano_delayed_work_cancel k_delayed_work_cancel
#define nano_work_submit k_work_submit
#define nano_delayed_work_submit(work, ticks) \
nano_delayed_work_submit_to_queue(&k_sys_work_q, work, ticks)
/* events */
#define kevent_t const struct k_event *
typedef int (*kevent_handler_t)(int event);
#define isr_event_send task_event_send
#define fiber_event_send task_event_send
static inline int task_event_handler_set(kevent_t legacy_event,
kevent_handler_t handler)
{
struct k_event *event = (struct k_event *)legacy_event;
if ((event->handler != NULL) && (handler != NULL)) {
/* can't overwrite an existing event handler */
return RC_FAIL;
}
event->handler = (k_event_handler_t)handler;
return RC_OK;
}
static inline int task_event_send(kevent_t legacy_event)
{
k_event_send((struct k_event *)legacy_event);
return RC_OK;
}
static inline int task_event_recv(kevent_t legacy_event, int32_t timeout)
{
return _error_to_rc(k_event_recv((struct k_event *)legacy_event,
_ticks_to_ms(timeout)));
}
#define DEFINE_EVENT(name, event_handler) \
K_EVENT_DEFINE(_k_event_obj_##name, event_handler); \
struct k_event * const name = &(_k_event_obj_##name)
/* memory maps */
#define kmemory_map_t struct k_mem_map *
static inline int task_mem_map_alloc(kmemory_map_t map, void **mptr,
int32_t timeout)
{
return _error_to_rc(k_mem_map_alloc(map, mptr, _ticks_to_ms(timeout)));
}
#define task_mem_map_free k_mem_map_free
#define task_mem_map_used_get k_mem_map_num_used_get
#define DEFINE_MEM_MAP(name, map_num_blocks, map_block_size) \
K_MEM_MAP_DEFINE(_k_mem_map_obj_##name, \
map_num_blocks, map_block_size); \
struct k_mem_map *const name = &_k_mem_map_obj_##name
/* memory pools */
#define k_block k_mem_block
#define kmemory_pool_t k_mem_pool_t
#if 0 /* unimplemented object */
static inline int task_mem_pool_alloc(struct k_block *blockptr,
kmemory_pool_t pool_id,
int reqsize, int32_t timeout)
{
return _error_to_rc(k_mem_pool_alloc(pool_id, blockptr, reqsize,
_ticks_to_ms(timeout)));
}
#define task_mem_pool_free k_mem_pool_free
#define task_mem_pool_defragment k_mem_pool_defrag
#define task_malloc k_malloc
#define task_free k_free
#endif
/* message queues */
#define kfifo_t struct k_msgq *
static inline int task_fifo_put(kfifo_t queue, void *data, int32_t timeout)
{
return _error_to_rc(k_msgq_put(queue, data, _ticks_to_ms(timeout)));
}
static inline int task_fifo_get(kfifo_t queue, void *data, int32_t timeout)
{
return _error_to_rc(k_msgq_get(queue, data, _ticks_to_ms(timeout)));
}
static inline int task_fifo_purge(kfifo_t queue)
{
k_msgq_purge(queue);
return RC_OK;
}
static inline int task_fifo_size_get(kfifo_t queue)
{
return queue->used_msgs;
}
#define DEFINE_FIFO(name, q_depth, q_width) \
K_MSGQ_DEFINE(_k_fifo_obj_##name, q_depth, q_width); \
struct k_msgq * const name = &_k_fifo_obj_##name
/* mailboxes */
#define kmbox_t struct k_mbox *
struct k_msg {
/** Mailbox ID */
kmbox_t mailbox;
/** size of message (bytes) */
uint32_t size;
/** information field, free for user */
uint32_t info;
/** pointer to message data at sender side */
void *tx_data;
/** pointer to message data at receiver */
void *rx_data;
/** for async message posting */
struct k_block tx_block;
/** sending task */
ktask_t tx_task;
/** receiving task */
ktask_t rx_task;
/** internal use only */
union {
/** for 2-steps data transfer operation */
struct k_args *transfer;
/** semaphore to signal when asynchr. call */
ksem_t sema;
} extra;
};
int task_mbox_put(kmbox_t mbox, kpriority_t prio, struct k_msg *msg,
int32_t timeout);
void task_mbox_block_put(kmbox_t mbox, kpriority_t prio, struct k_msg *msg,
ksem_t sema);
int task_mbox_get(kmbox_t mbox, struct k_msg *msg, int32_t timeout);
void task_mbox_data_get(struct k_msg *msg);
int task_mbox_data_block_get(struct k_msg *msg, struct k_block *block,
kmemory_pool_t pool_id, int32_t timeout);
#define DEFINE_MAILBOX(name) \
K_MBOX_DEFINE(_k_mbox_obj_##name); \
struct k_mbox * const name = &_k_mbox_obj_##name
/* pipes */
#define kpipe_t struct k_pipe *
static inline int task_pipe_put(kpipe_t id, void *buffer, int bytes_to_write,
int *bytes_written, K_PIPE_OPTION options,
int32_t timeout)
{
size_t min_xfer = (size_t)options;
__ASSERT((options == _0_TO_N) ||
(options == _1_TO_N) ||
(options == _ALL_N), "Invalid pipe option");
*bytes_written = 0;
if (bytes_to_write == 0) {
return RC_FAIL;
}
if ((options == _0_TO_N) && (timeout != K_NO_WAIT)) {
return RC_FAIL;
}
if (options == _ALL_N) {
min_xfer = bytes_to_write;
}
return _error_to_rc(k_pipe_put(id, buffer, bytes_to_write,
(size_t *)bytes_written, min_xfer,
_ticks_to_ms(timeout)));
}
static inline int task_pipe_get(kpipe_t id, void *buffer, int bytes_to_read,
int *bytes_read, K_PIPE_OPTION options,
int32_t timeout)
{
size_t min_xfer = (size_t)options;
__ASSERT((options == _0_TO_N) ||
(options == _1_TO_N) ||
(options == _ALL_N), "Invalid pipe option");
*bytes_read = 0;
if (bytes_to_read == 0) {
return RC_FAIL;
}
if ((options == _0_TO_N) && (timeout != K_NO_WAIT)) {
return RC_FAIL;
}
if (options == _ALL_N) {
min_xfer = bytes_to_read;
}
return _error_to_rc(k_pipe_get(id, buffer, bytes_to_read,
(size_t *)bytes_read, min_xfer,
_ticks_to_ms(timeout)));
}
static inline int task_pipe_block_put(kpipe_t id, struct k_block block,
int size, ksem_t sema)
{
if (size == 0) {
return RC_FAIL;
}
k_pipe_block_put(id, &block, size, sema);
return RC_OK;
}
#define DEFINE_PIPE(name, pipe_buffer_size) \
K_PIPE_DEFINE(_k_pipe_obj_##name, pipe_buffer_size); \
struct k_pipe * const name = &_k_pipe_obj_##name
#define nano_fifo k_fifo
#define nano_fifo_init k_fifo_init
/* nanokernel fifos */
#define nano_fifo_put k_fifo_put
#define nano_isr_fifo_put k_fifo_put
#define nano_fiber_fifo_put k_fifo_put
#define nano_task_fifo_put k_fifo_put
#define nano_fifo_put_list k_fifo_put_list
#define nano_isr_fifo_put_list k_fifo_put_list
#define nano_fiber_fifo_put_list k_fifo_put_list
#define nano_task_fifo_put_list k_fifo_put_list
#define nano_fifo_put_slist k_fifo_put_slist
#define nano_isr_fifo_put_slist k_fifo_put_slist
#define nano_fiber_fifo_put_slist k_fifo_put_slist
#define nano_task_fifo_put_slist k_fifo_put_slist
static inline void *nano_fifo_get(struct nano_fifo *fifo,
int32_t timeout_in_ticks)
{
return k_fifo_get((struct k_fifo *)fifo,
_ticks_to_ms(timeout_in_ticks));
}
#define nano_isr_fifo_get nano_fifo_get
#define nano_fiber_fifo_get nano_fifo_get
#define nano_task_fifo_get nano_fifo_get
/* nanokernel lifos */
#define nano_lifo k_lifo
#define nano_lifo_init k_lifo_init
#define nano_lifo_put k_lifo_put
#define nano_isr_lifo_put k_lifo_put
#define nano_fiber_lifo_put k_lifo_put
#define nano_task_lifo_put k_lifo_put
static inline void *nano_lifo_get(struct nano_lifo *lifo,
int32_t timeout_in_ticks)
{
return k_lifo_get((struct k_lifo *)lifo,
_ticks_to_ms(timeout_in_ticks));
}
#define nano_isr_lifo_get nano_lifo_get
#define nano_fiber_lifo_get nano_lifo_get
#define nano_task_lifo_get nano_lifo_get
/* nanokernel stacks */
#define nano_stack k_stack
static inline void nano_stack_init(struct nano_stack *stack, uint32_t *data)
{
k_stack_init_with_buffer(stack, UINT_MAX, data);
}
#define nano_stack_push k_stack_push
#define nano_isr_stack_push k_stack_push
#define nano_fiber_stack_push k_stack_push
#define nano_task_stack_push k_stack_push
static inline int nano_stack_pop(struct nano_stack *stack, uint32_t *data,
int32_t timeout_in_ticks)
{
return k_stack_pop((struct k_stack *)stack, data,
_ticks_to_ms(timeout_in_ticks)) == 0 ? 1 : 0;
}
#define nano_isr_stack_pop nano_stack_pop
#define nano_fiber_stack_pop nano_stack_pop
#define nano_task_stack_pop nano_stack_pop
/* timers */
#define CONFIG_NUM_TIMER_PACKETS CONFIG_NUM_DYNAMIC_TIMERS
#define ktimer_t struct k_timer *
#define task_timer_alloc k_timer_alloc
#define task_timer_free k_timer_free
extern void task_timer_start(ktimer_t timer, int32_t duration,
int32_t period, ksem_t sema);
static inline void task_timer_restart(ktimer_t timer, int32_t duration,
int32_t period)
{
k_timer_restart(timer, _ticks_to_ms(duration), _ticks_to_ms(period));
}
#define nano_timer k_timer
#define nano_timer_init k_timer_init
static inline void nano_timer_start(struct nano_timer *timer, int ticks)
{
k_timer_start((struct k_timer *)timer, _ticks_to_ms(ticks), 0,
NULL, NULL, NULL, NULL);
}
#define nano_isr_timer_start nano_timer_start
#define nano_fiber_timer_start nano_timer_start
#define nano_task_timer_start nano_timer_start
static inline void *nano_timer_test(struct nano_timer *timer,
int32_t timeout_in_ticks)
{
void *data;
if (k_timer_test(timer, &data, _ticks_to_ms(timeout_in_ticks)) < 0) {
return NULL;
}
return data;
}
#define nano_isr_timer_test nano_timer_test
#define nano_fiber_timer_test nano_timer_test
#define nano_task_timer_test nano_timer_test
#define task_timer_stop k_timer_stop
#define nano_isr_timer_stop k_timer_stop
#define nano_fiber_timer_stop k_timer_stop
#define nano_task_timer_stop k_timer_stop
extern int32_t _ms_to_ticks(int32_t ms);
static inline int32_t nano_timer_ticks_remain(struct nano_timer *timer)
{
return _ms_to_ticks(k_timer_remaining_get(timer));
}
extern int64_t sys_tick_get(void);
extern uint32_t sys_tick_get_32(void);
extern int64_t sys_tick_delta(int64_t *reftime);
extern uint32_t sys_tick_delta_32(int64_t *reftime);
#define sys_cycle_get_32 k_cycle_get_32
/* floating point services */
#define fiber_float_enable k_float_enable
#define task_float_enable k_float_enable
#define fiber_float_disable k_float_disable
#define task_float_disable k_float_disable
#endif /* _legacy__h_ */

View file

@ -0,0 +1,3 @@
CONFIG_KERNEL_V2=y
CONFIG_MICROKERNEL=y
CONFIG_INIT_STACKS=y

248
kernel/unified/Kconfig Normal file
View file

@ -0,0 +1,248 @@
# Kconfig - nanokernel configuration options
#
# Copyright (c) 2014-2015 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
menu "Unified Kernel Options"
config KERNEL_V2_DEBUG
bool
prompt "Kernel V2 debug help"
default n
depends on KERNEL_V2
select INIT_STACKS
config NUM_COOP_PRIORITIES
int
prompt "Kernel V2: number of coop priorities"
default 16
help
Number of cooperative priorities configured in the system. Gives access
to priorities:
K_PRIO_COOP(0) to K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1)
or seen another way, priorities:
-CONFIG_NUM_COOP_PRIORITIES to -1
This can be set to zero to disable cooperative scheduling. Cooperative
threads always preempt preemptible threads.
Each priority requires an extra 8 bytes of RAM. If there are more than
32 total priorities, an extra 4 bytes is required.
config NUM_PREEMPT_PRIORITIES
int
prompt "Kernel V2: number of preemptible priorities"
default 15
help
Number of preemptible priorities available in the system. Gives access
to priorities 0 to CONFIG_NUM_PREEMPT_PRIORITIES - 1.
This can be set to 0 to disable preemptible scheduling.
The idle thread is always installed as a preemptible thread of the
lowest priority.
Each priority requires an extra 8 bytes of RAM. If there are more than
32 total priorities, an extra 4 bytes is required.
config PRIORITY_CEILING
int
prompt "Kernel V2: priority inheritance ceiling"
default 0
config BOOT_BANNER
bool
prompt "Boot banner"
default n
select PRINTK
depends on EARLY_CONSOLE
help
This option outputs a banner to the console device during boot up. It
also embeds a date & time stamp in the kernel and in each USAP image.
config BUILD_TIMESTAMP
bool
prompt "Build Timestamp"
help
Build timestamp and add it to the boot banner.
config INT_LATENCY_BENCHMARK
bool
prompt "Interrupt latency metrics [EXPERIMENTAL]"
default n
depends on ARCH="x86"
help
This option enables the tracking of interrupt latency metrics;
the exact set of metrics being tracked is board-dependent.
Tracking begins when int_latency_init() is invoked by an application.
The metrics are displayed (and a new sampling interval is started)
each time int_latency_show() is called thereafter.
config MAIN_THREAD_PRIORITY
int
prompt "Priority of initialization/main thread"
default 0
default -1 if NUM_PREEMPT_PRIORITIES = 0
help
Priority at which the initialization thread runs, including the start
of the main() function. main() can then change its priority if desired.
config MAIN_STACK_SIZE
int
prompt "Size of stack for initialization and main thread"
default 1024
help
When the intitialization is complete, the thread executing it then
executes the main() routine, so as to reuse the stack used by the
initialization, which would be wasted RAM otherwise.
After initialization is complete, the thread runs main().
config ISR_STACK_SIZE
int
prompt "ISR and initialization stack size (in bytes)"
default 2048
help
This option specifies the size of the stack used by interrupt
service routines (ISRs), and during nanokernel initialization.
config THREAD_CUSTOM_DATA
bool
prompt "Task and fiber custom data"
default n
help
This option allows each task and fiber to store 32 bits of custom data,
which can be accessed using the sys_thread_custom_data_xxx() APIs.
config NANO_TIMEOUTS
bool
prompt "Enable timeouts on nanokernel objects"
default y
depends on SYS_CLOCK_EXISTS
help
Allow fibers and tasks to wait on nanokernel objects with a timeout, by
enabling the nano_xxx_wait_timeout APIs, and allow fibers to sleep for a
period of time, by enabling the fiber_sleep API.
config NANO_TIMERS
bool
prompt "Enable nanokernel timers"
default y
depends on SYS_CLOCK_EXISTS
help
Allow fibers and tasks to wait on nanokernel timers, which can be
accessed using the nano_timer_xxx() APIs.
config NUM_DYNAMIC_TIMERS
int
prompt "Number of timers available for dynamic allocation"
default 10
depends on NANO_TIMERS
help
Number of timers available for dynamic allocation via the
k_timer_alloc()/k_timer_free() API.
config NANOKERNEL_TICKLESS_IDLE_SUPPORTED
bool
default n
help
To be selected by an architecture if it does support tickless idle in
nanokernel systems.
config ERRNO
bool
prompt "Enable errno support"
default y
help
Enable per-thread errno in the kernel. Application and library code must
include errno.h provided by the C library (libc) to use the errno
symbol. The C library must access the per-thread errno via the
_get_errno() symbol.
config NANO_WORKQUEUE
bool "Enable nano workqueue support"
default y
help
Nano workqueues allow scheduling work items to be executed in a fiber
context. Typically such work items are scheduled from ISRs, when the
work cannot be executed in interrupt context.
config SYSTEM_WORKQUEUE
bool "Start a system workqueue"
default y
depends on NANO_WORKQUEUE
help
Start a system-wide nano_workqueue that can be used by any system
component.
config SYSTEM_WORKQUEUE_STACK_SIZE
int "System workqueue stack size"
default 1024
depends on SYSTEM_WORKQUEUE
config SYSTEM_WORKQUEUE_PRIORITY
int "System workqueue priority"
default -1
depends on SYSTEM_WORKQUEUE
config NUM_MBOX_ASYNC_MSGS
int ""
default 10
help
This option specifies the total number of asynchronous mailbox
messages that can exist simultaneously, across all mailboxes
in the system.
Setting this option to 0 disables support for asynchronous
mailbox messages.
config NUM_PIPE_ASYNC_MSGS
int "Maximum number of in-flight asynchronous pipe messages"
default 10
help
This option specifies the total number of asynchronous pipe
messages that can exist simultaneously, across all pipes in
the system.
Setting this option to 0 disables support for asynchronous
pipe messages.
config ATOMIC_OPERATIONS_BUILTIN
bool
help
Use the compiler builtin functions for atomic operations. This is
the preferred method. However, support for all arches in GCC is
incomplete.
config ATOMIC_OPERATIONS_CUSTOM
bool
help
Use when there isn't support for compiler built-ins, but you have
written optimized assembly code under arch/ which implements these.
config ATOMIC_OPERATIONS_C
bool
help
Use atomic operations routines that are implemented entirely
in C by locking interrupts. Selected by architectures which either
do not have support for atomic operations in their instruction
set, or haven't been implemented yet during bring-up, and also
the compiler does not have support for the atomic __sync_* builtins.
endmenu

43
kernel/unified/Makefile Normal file
View file

@ -0,0 +1,43 @@
ccflags-y += -I$(srctree)/kernel/unified/include
asflags-y := ${ccflags-y}
obj-y =
obj-y += $(strip \
sys_clock.o \
thread.o \
init.o \
sem.o \
version.o \
device.o \
thread_abort.o \
)
obj-y += $(strip \
sched.o \
mutex.o \
)
obj-y += $(strip \
lifo.o \
fifo.o \
stack.o \
mem_map.o \
msg_q.o \
mailbox.o \
mem_pool.o \
event.o \
pipes.o \
)
obj-$(CONFIG_INT_LATENCY_BENCHMARK) += int_latency_bench.o
obj-$(CONFIG_STACK_CANARIES) += compiler_stack_protect.o
obj-$(CONFIG_SYS_POWER_MANAGEMENT) += idle.o
obj-$(CONFIG_NANO_TIMERS) += timer.o
obj-$(CONFIG_KERNEL_EVENT_LOGGER) += event_logger.o
obj-$(CONFIG_KERNEL_EVENT_LOGGER) += kernel_event_logger.o
obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
obj-$(CONFIG_ATOMIC_OPERATIONS_C) += atomic_c.o
obj-$(CONFIG_ERRNO) += errno.o
obj-$(CONFIG_NANO_WORKQUEUE) += work_q.o
obj-y += legacy/

View file

@ -0,0 +1 @@
#include "../nanokernel/atomic_c.c"

View file

@ -0,0 +1 @@
#include "../nanokernel/compiler_stack_protect.c"

1
kernel/unified/device.c Normal file
View file

@ -0,0 +1 @@
#include "../nanokernel/device.c"

7
kernel/unified/errno.c Normal file
View file

@ -0,0 +1,7 @@
#include "../nanokernel/errno.c"
/*
* Define _k_neg_eagain for use in assembly files as errno.h is
* not assembly language safe.
*/
const int _k_neg_eagain = -EAGAIN;

78
kernel/unified/event.c Normal file
View file

@ -0,0 +1,78 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief kernel events.
*/
#include <kernel.h>
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <atomic.h>
#include <toolchain.h>
#include <sections.h>
void _k_event_deliver(struct k_work *work)
{
struct k_event *event = CONTAINER_OF(work, struct k_event, work_item);
while (1) {
if ((event->handler)(event) == 0) {
/* do nothing -- handler has processed the event */
} else {
/* pend the event */
k_sem_give(&event->sem);
}
if (atomic_dec(&event->send_count) == 1) {
/* have finished delivering events */
break;
}
}
}
void k_event_init(struct k_event *event, k_event_handler_t handler)
{
const struct k_work my_work_item = { NULL, _k_event_deliver, { 1 } };
event->handler = handler;
event->send_count = ATOMIC_INIT(0);
event->work_item = my_work_item;
k_sem_init(&event->sem, 0, 1);
SYS_TRACING_OBJ_INIT(event, event);
}
void k_event_send(struct k_event *event)
{
if (event->handler == K_EVT_IGNORE) {
/* ignore the event */
} else if (event->handler == K_EVT_DEFAULT) {
/* pend the event */
k_sem_give(&event->sem);
} else {
/* deliver the event */
if (atomic_inc(&event->send_count) == 0) {
/* add event's work item to system work queue */
k_work_submit_to_queue(&k_sys_work_q,
&event->work_item);
}
}
}
int k_event_recv(struct k_event *event, int32_t timeout)
{
return k_sem_take(&event->sem, timeout);
}

View file

@ -0,0 +1 @@
#include "../nanokernel/event_logger.c"

133
kernel/unified/fifo.c Normal file
View file

@ -0,0 +1,133 @@
/*
* Copyright (c) 2010-2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* @brief dynamic-size FIFO queue object.
*/
#include <kernel.h>
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <sched.h>
#include <misc/slist.h>
void k_fifo_init(struct k_fifo *fifo)
{
sys_slist_init(&fifo->data_q);
sys_dlist_init(&fifo->wait_q);
SYS_TRACING_OBJ_INIT(k_fifo, fifo);
}
static void prepare_thread_to_run(struct k_thread *thread, void *data)
{
_timeout_abort(thread);
_ready_thread(thread);
_set_thread_return_value_with_data(thread, 0, data);
}
void k_fifo_put(struct k_fifo *fifo, void *data)
{
struct k_thread *first_pending_thread;
unsigned int key;
key = irq_lock();
first_pending_thread = _unpend_first_thread(&fifo->wait_q);
if (first_pending_thread) {
prepare_thread_to_run(first_pending_thread, data);
if (!_is_in_isr() && _must_switch_threads()) {
(void)_Swap(key);
return;
}
} else {
sys_slist_append(&fifo->data_q, data);
}
irq_unlock(key);
}
void k_fifo_put_list(struct k_fifo *fifo, void *head, void *tail)
{
__ASSERT(head && tail, "invalid head or tail");
struct k_thread *first_thread, *thread;
unsigned int key;
key = irq_lock();
first_thread = _peek_first_pending_thread(&fifo->wait_q);
while (head && ((thread = _unpend_first_thread(&fifo->wait_q)))) {
prepare_thread_to_run(thread, head);
head = *(void **)head;
}
if (head) {
sys_slist_append_list(&fifo->data_q, head, tail);
}
if (first_thread) {
if (!_is_in_isr() && _must_switch_threads()) {
(void)_Swap(key);
return;
}
}
irq_unlock(key);
}
void k_fifo_put_slist(struct k_fifo *fifo, sys_slist_t *list)
{
__ASSERT(!sys_slist_is_empty(list), "list must not be empty");
/*
* note: this works as long as:
* - the slist implementation keeps the next pointer as the first
* field of the node object type
* - list->tail->next = NULL.
*/
return k_fifo_put_list(fifo, list->head, list->tail);
}
void *k_fifo_get(struct k_fifo *fifo, int32_t timeout)
{
unsigned int key;
void *data;
key = irq_lock();
if (likely(!sys_slist_is_empty(&fifo->data_q))) {
data = sys_slist_get_not_empty(&fifo->data_q);
irq_unlock(key);
return data;
}
if (timeout == K_NO_WAIT) {
irq_unlock(key);
return NULL;
}
_pend_current_thread(&fifo->wait_q, timeout);
return _Swap(key) ? NULL : _current->swap_data;
}

1
kernel/unified/idle.c Normal file
View file

@ -0,0 +1 @@
#include "../nanokernel/idle.c"

View file

@ -0,0 +1,94 @@
/*
* Copyright (c) 2010, 2012, 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Macros to generate structure member offset definitions
*
* This header contains macros to allow a nanokernel implementation to
* generate absolute symbols whose values represents the member offsets for
* various nanokernel structures. These absolute symbols are typically
* utilized by assembly source files rather than hardcoding the values in
* some local header file.
*
* WARNING: Absolute symbols can potentially be utilized by external tools --
* for example, to locate a specific field within a data structure.
* Consequently, changes made to such symbols may require modifications to the
* associated tool(s). Typically, relocating a member of a structure merely
* requires that a tool be rebuilt; however, moving a member to another
* structure (or to a new sub-structure within an existing structure) may
* require that the tool itself be modified. Likewise, deleting, renaming, or
* changing the meaning of an absolute symbol may require modifications to a
* tool.
*
* The macro "GEN_OFFSET_SYM(structure, member)" is used to generate a single
* absolute symbol. The absolute symbol will appear in the object module
* generated from the source file that utilizes the GEN_OFFSET_SYM() macro.
* Absolute symbols representing a structure member offset have the following
* form:
*
* __<structure>_<member>_OFFSET
*
* This header also defines the GEN_ABSOLUTE_SYM macro to simply define an
* absolute symbol, irrespective of whether the value represents a structure
* or offset.
*
* The following sample file illustrates the usage of the macros available
* in this file:
*
* <START of sample source file: offsets.c>
*
* #include <gen_offset.h>
* /@ include struct definitions for which offsets symbols are to be
* generated @/
*
* #include <nano_private.h>
* GEN_ABS_SYM_BEGIN (_OffsetAbsSyms) /@ the name parameter is arbitrary @/
* /@ tNANO structure member offsets @/
*
* GEN_OFFSET_SYM (tNANO, fiber);
* GEN_OFFSET_SYM (tNANO, task);
* GEN_OFFSET_SYM (tNANO, current);
* GEN_OFFSET_SYM (tNANO, nested);
* GEN_OFFSET_SYM (tNANO, common_isp);
*
* GEN_ABSOLUTE_SYM (__tNANO_SIZEOF, sizeof(tNANO));
*
* GEN_ABS_SYM_END
* <END of sample source file: offsets.c>
*
* Compiling the sample offsets.c results in the following symbols in offsets.o:
*
* $ nm offsets.o
* 00000010 A __tNANO_common_isp_OFFSET
* 00000008 A __tNANO_current_OFFSET
* 0000000c A __tNANO_nested_OFFSET
* 00000000 A __tNANO_fiber_OFFSET
* 00000004 A __tNANO_task_OFFSET
*/
#ifndef _GEN_OFFSET_H
#define _GEN_OFFSET_H
#include <toolchain.h>
#include <stddef.h>
/* definition of the GEN_OFFSET_SYM() macros is toolchain independent */
#define GEN_OFFSET_SYM(S, M) \
GEN_ABSOLUTE_SYM(__##S##_##M##_##OFFSET, offsetof(S, M))
#endif /* _GEN_OFFSET_H */

View file

@ -0,0 +1,107 @@
/*
* Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Architecture-independent private nanokernel APIs
*
* This file contains private nanokernel APIs that are not
* architecture-specific.
*/
#ifndef _NANO_INTERNAL__H_
#define _NANO_INTERNAL__H_
#ifdef CONFIG_KERNEL_V2
#define K_NUM_PRIORITIES \
(CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES + 1)
#endif
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
extern "C" {
#endif
/* Early boot functions */
void _bss_zero(void);
#ifdef CONFIG_XIP
void _data_copy(void);
#else
static inline void _data_copy(void)
{
/* Do nothing */
}
#endif
FUNC_NORETURN void _Cstart(void);
/* helper type alias for thread control structure */
typedef struct tcs tTCS;
typedef void (*_thread_entry_t)(void *, void *, void *);
extern void _thread_entry(void (*)(void *, void *, void *),
void *, void *, void *);
extern void _new_thread(char *pStack, unsigned stackSize,
void *uk_task_ptr,
void (*pEntry)(void *, void *, void *),
void *p1, void *p2, void *p3,
int prio, unsigned options);
/* context switching and scheduling-related routines */
extern void _nano_fiber_ready(struct tcs *tcs);
extern void _nano_fiber_swap(void);
extern unsigned int _Swap(unsigned int);
/* set and clear essential fiber/task flag */
extern void _thread_essential_set(void);
extern void _thread_essential_clear(void);
/* clean up when a thread is aborted */
#if defined(CONFIG_THREAD_MONITOR)
extern void _thread_exit(struct tcs *tcs);
#else
#define _thread_exit(tcs) \
do {/* nothing */ \
} while (0)
#endif /* CONFIG_THREAD_MONITOR */
/* special nanokernel object APIs */
struct nano_lifo;
extern void *_nano_fiber_lifo_get_panic(struct nano_lifo *lifo);
#define _TASK_PENDQ_INIT(queue) do { } while (0)
#define _NANO_UNPEND_TASKS(queue) do { } while (0)
#define _TASK_NANO_UNPEND_TASKS(queue) do { } while (0)
#define _NANO_TASK_READY(tcs) do { } while (0)
#define _NANO_TIMER_TASK_READY(tcs) do { } while (0)
#define _IS_MICROKERNEL_TASK(tcs) (0)
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* _NANO_INTERNAL__H_ */

View file

@ -0,0 +1,66 @@
/* nano_offsets.h - nanokernel structure member offset definitions */
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <device.h>
#ifndef _NANO_OFFSETS__H_
#define _NANO_OFFSETS__H_
/*
* The final link step uses the symbol _OffsetAbsSyms to force the linkage of
* offsets.o into the ELF image.
*/
GEN_ABS_SYM_BEGIN(_OffsetAbsSyms)
/* arch-agnostic tNANO structure member offsets */
GEN_OFFSET_SYM(tNANO, current);
#if defined(CONFIG_THREAD_MONITOR)
GEN_OFFSET_SYM(tNANO, threads);
#endif
#ifdef CONFIG_FP_SHARING
GEN_OFFSET_SYM(tNANO, current_fp);
#endif
/* size of the entire tNANO structure */
GEN_ABSOLUTE_SYM(__tNANO_SIZEOF, sizeof(tNANO));
/* arch-agnostic struct tcs structure member offsets */
GEN_OFFSET_SYM(tTCS, prio);
GEN_OFFSET_SYM(tTCS, flags);
GEN_OFFSET_SYM(tTCS, coopReg); /* start of coop register set */
GEN_OFFSET_SYM(tTCS, preempReg); /* start of prempt register set */
#if defined(CONFIG_THREAD_MONITOR)
GEN_OFFSET_SYM(tTCS, next_thread);
#endif
GEN_OFFSET_SYM(tTCS, sched_locked);
/* size of the entire struct tcs structure */
GEN_ABSOLUTE_SYM(__tTCS_SIZEOF, sizeof(tTCS));
/* size of the device structure. Used by linker scripts */
GEN_ABSOLUTE_SYM(__DEVICE_STR_SIZEOF, sizeof(struct device));
#endif /* _NANO_OFFSETS__H_ */

View file

@ -0,0 +1,350 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _sched__h_
#define _sched__h_
#include <kernel.h>
#include <nano_private.h>
#include <atomic.h>
#include <misc/dlist.h>
extern k_tid_t const _main_thread;
extern k_tid_t const _idle_thread;
extern void _add_thread_to_ready_q(struct tcs *t);
extern void _remove_thread_from_ready_q(struct tcs *t);
extern void _reschedule_threads(int key);
extern void k_sched_unlock(void);
extern void _pend_thread(struct tcs *thread,
_wait_q_t *wait_q, int32_t timeout);
extern void _pend_current_thread(_wait_q_t *wait_q, int32_t timeout);
extern struct tcs *_get_next_ready_thread(void);
extern int __must_switch_threads(void);
extern void k_thread_priority_set(struct tcs *thread, int32_t priority);
extern int k_current_priority_get(void);
extern int32_t _ms_to_ticks(int32_t ms);
/*
* The _is_prio_higher family: I created this because higher priorities are
* lower numerically and I always found somewhat confusing seeing, e.g.:
*
* if (t1.prio < t2.prio) /# is t1's priority higher then t2's priority ? #/
*
* in code. And the fact that most of the time that kind of code has this
* exact comment warrants a function where it is embedded in the name.
*
* IMHO, feel free to remove them and do the comparison directly if this feels
* like overkill.
*/
static inline int _is_prio1_higher_than_prio2(int prio1, int prio2)
{
return prio1 < prio2;
}
static inline int _is_prio_higher(int prio, int test_prio)
{
return _is_prio1_higher_than_prio2(prio, test_prio);
}
static inline int _is_t1_higher_prio_than_t2(struct tcs *t1, struct tcs *t2)
{
return _is_prio1_higher_than_prio2(t1->prio, t2->prio);
}
static inline int _is_higher_prio_than_current(struct tcs *thread)
{
return _is_t1_higher_prio_than_t2(thread, _nanokernel.current);
}
/* is thread currenlty cooperative ? */
static inline int _is_coop(struct tcs *thread)
{
return thread->prio < 0;
}
/* is thread currently preemptible ? */
static inline int _is_preempt(struct tcs *thread)
{
return !_is_coop(thread);
}
/* is current thread preemptible and we are not running in ISR context */
static inline int _is_current_execution_context_preemptible(void)
{
return !_is_in_isr() && _is_preempt(_nanokernel.current);
}
/* find out if priority is under priority inheritance ceiling */
static inline int _is_under_prio_ceiling(int prio)
{
return prio >= CONFIG_PRIORITY_CEILING;
}
/*
* Find out what priority to set a thread to taking the prio ceiling into
* consideration.
*/
static inline int _get_new_prio_with_ceiling(int prio)
{
return _is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
}
/* find out the prio bitmap index for a given prio */
static inline int _get_ready_q_prio_bmap_index(int prio)
{
return (prio + CONFIG_NUM_COOP_PRIORITIES) >> 5;
}
/* find out the prio bit for a given prio */
static inline int _get_ready_q_prio_bit(int prio)
{
return (1 << ((prio + CONFIG_NUM_COOP_PRIORITIES) & 0x1f));
}
/* find out the ready queue array index for a given prio */
static inline int _get_ready_q_q_index(int prio)
{
return prio + CONFIG_NUM_COOP_PRIORITIES;
}
#if (K_NUM_PRIORITIES > 32)
#error not supported yet
#endif
/* find out the currently highest priority where a thread is ready to run */
/* interrupts must be locked */
static inline int _get_highest_ready_prio(void)
{
uint32_t ready = _nanokernel.ready_q.prio_bmap[0];
return find_lsb_set(ready) - 1 - CONFIG_NUM_COOP_PRIORITIES;
}
/*
* Checks if current thread must be context-switched out. The caller must
* already know that the execution context is a thread.
*/
static inline int _must_switch_threads(void)
{
return _is_preempt(_current) && __must_switch_threads();
}
/*
* Application API.
*
* lock the scheduler: prevents another thread from preempting the current one
* except if the current thread does an operation that causes it to pend
*
* Can be called recursively.
*/
static inline void k_sched_lock(void)
{
__ASSERT(!_is_in_isr(), "");
atomic_inc(&_nanokernel.current->sched_locked);
K_DEBUG("scheduler locked (%p:%d)\n",
_current, _current->sched_locked);
}
/**
* @brief Unlock the scheduler but do NOT reschedule
*
* It is incumbent upon the caller to ensure that the reschedule occurs
* sometime after the scheduler is unlocked.
*/
static inline void _sched_unlock_no_reschedule(void)
{
__ASSERT(!_is_in_isr(), "");
atomic_dec(&_nanokernel.current->sched_locked);
}
static inline void _set_thread_states(struct k_thread *thread, uint32_t states)
{
thread->flags |= states;
}
static inline void _reset_thread_states(struct k_thread *thread,
uint32_t states)
{
thread->flags &= ~states;
}
/* mark a thread as being suspended */
static inline void _mark_thread_as_suspended(struct tcs *thread)
{
thread->flags |= K_SUSPENDED;
}
/* mark a thread as not being suspended */
static inline void _mark_thread_as_not_suspended(struct tcs *thread)
{
thread->flags &= ~K_SUSPENDED;
}
/* mark a thread as being in the timer queue */
static inline void _mark_thread_as_timing(struct tcs *thread)
{
thread->flags |= K_TIMING;
}
/* mark a thread as not being in the timer queue */
static inline void _mark_thread_as_not_timing(struct tcs *thread)
{
thread->flags &= ~K_TIMING;
}
/* check if a thread is on the timer queue */
static inline int _is_thread_timing(struct tcs *thread)
{
return !!(thread->flags & K_TIMING);
}
static inline int _has_thread_started(struct tcs *thread)
{
return !(thread->flags & K_PRESTART);
}
/* check if a thread is ready */
static inline int _is_thread_ready(struct tcs *thread)
{
return (thread->flags & K_EXECUTION_MASK) == K_READY;
}
/* mark a thread as pending in its TCS */
static inline void _mark_thread_as_pending(struct tcs *thread)
{
thread->flags |= K_PENDING;
}
/* mark a thread as not pending in its TCS */
static inline void _mark_thread_as_not_pending(struct tcs *thread)
{
thread->flags &= ~K_PENDING;
}
/* check if a thread is pending */
static inline int _is_thread_pending(struct tcs *thread)
{
return !!(thread->flags & K_PENDING);
}
/*
* Mark the thread as not being in the timer queue. If this makes it ready,
* then add it to the ready queue according to its priority.
*/
/* must be called with interrupts locked */
static inline void _ready_thread(struct tcs *thread)
{
__ASSERT(_is_prio_higher(thread->prio, K_LOWEST_THREAD_PRIO) ||
((thread->prio == K_LOWEST_THREAD_PRIO) &&
(thread == _idle_thread)),
"thread %p prio too low (is %d, cannot be lower than %d)",
thread, thread->prio,
thread == _idle_thread ? K_LOWEST_THREAD_PRIO :
K_LOWEST_APPLICATION_THREAD_PRIO);
__ASSERT(!_is_prio_higher(thread->prio, K_HIGHEST_THREAD_PRIO),
"thread %p prio too high (id %d, cannot be higher than %d)",
thread, thread->prio, K_HIGHEST_THREAD_PRIO);
/* K_PRESTART is needed to handle the start-with-delay case */
_reset_thread_states(thread, K_TIMING|K_PRESTART);
if (_is_thread_ready(thread)) {
_add_thread_to_ready_q(thread);
}
}
/**
* @brief Mark a thread as started
*
* This routine must be called with interrupts locked.
*/
static inline void _mark_thread_as_started(struct tcs *thread)
{
thread->flags &= ~K_PRESTART;
}
/**
* @brief Mark thread as dead
*
* This routine must be called with interrupts locked.
*/
static inline void _mark_thread_as_dead(struct tcs *thread)
{
thread->flags |= K_DEAD;
}
/*
* Application API.
*
* Get a thread's priority. Note that it might have changed by the time this
* function returns.
*/
static inline int32_t k_thread_priority_get(struct tcs *thread)
{
return thread->prio;
}
/*
* Set a thread's priority. If the thread is ready, place it in the correct
* queue.
*/
/* must be called with interrupts locked */
static inline void _thread_priority_set(struct tcs *thread, int prio)
{
if (_is_thread_ready(thread)) {
_remove_thread_from_ready_q(thread);
thread->prio = prio;
_add_thread_to_ready_q(thread);
} else {
thread->prio = prio;
}
}
/* check if thread is a thread pending on a particular wait queue */
static inline struct k_thread *_peek_first_pending_thread(_wait_q_t *wait_q)
{
return (struct k_thread *)sys_dlist_peek_head(wait_q);
}
/* unpend the first thread from a wait queue */
static inline struct tcs *_unpend_first_thread(_wait_q_t *wait_q)
{
struct k_thread *thread = (struct k_thread *)sys_dlist_get(wait_q);
if (thread) {
_mark_thread_as_not_pending(thread);
}
return thread;
}
/* Unpend a thread from the wait queue it is on. Thread must be pending. */
/* must be called with interrupts locked */
static inline void _unpend_thread(struct k_thread *thread)
{
__ASSERT(thread->flags & K_PENDING, "");
sys_dlist_remove(&thread->k_q_node);
_mark_thread_as_not_pending(thread);
}
#endif /* _sched__h_ */

View file

@ -0,0 +1,295 @@
/** @file
* @brief timeout queue for fibers on nanokernel objects
*
* This file is meant to be included by nanokernel/include/wait_q.h only
*/
/*
* Copyright (c) 2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _kernel_nanokernel_include_timeout_q__h_
#define _kernel_nanokernel_include_timeout_q__h_
#include <misc/dlist.h>
#ifdef __cplusplus
extern "C" {
#endif
static inline int _do_timeout_abort(struct _timeout *t);
static inline void _do_timeout_add(struct tcs *tcs,
struct _timeout *t,
_wait_q_t *wait_q,
int32_t timeout);
#if defined(CONFIG_NANO_TIMEOUTS)
/* initialize the nano timeouts part of TCS when enabled in the kernel */
static inline void _timeout_init(struct _timeout *t, _timeout_func_t func)
{
/*
* Must be initialized here and when dequeueing a timeout so that code
* not dealing with timeouts does not have to handle this, such as when
* waiting forever on a semaphore.
*/
t->delta_ticks_from_prev = -1;
/*
* Must be initialized here so that the _fiber_wakeup family of APIs can
* verify the fiber is not on a wait queue before aborting a timeout.
*/
t->wait_q = NULL;
/*
* Must be initialized here, so the _timeout_handle_one_timeout()
* routine can check if there is a fiber waiting on this timeout
*/
t->tcs = NULL;
/*
* Function must be initialized before being potentially called.
*/
t->func = func;
/*
* These are initialized when enqueing on the timeout queue:
*
* tcs->timeout.node.next
* tcs->timeout.node.prev
*/
}
static inline void _timeout_tcs_init(struct tcs *tcs)
{
_timeout_init(&tcs->timeout, NULL);
}
/*
* XXX - backwards compatibility until the arch part is updated to call
* _timeout_tcs_init()
*/
static inline void _nano_timeout_tcs_init(struct tcs *tcs)
{
_timeout_tcs_init(tcs);
}
/**
* @brief Remove the thread from nanokernel object wait queue
*
* If a thread waits on a nanokernel object with timeout,
* remove the thread from the wait queue
*
* @param tcs Waiting thread
* @param t nano timer
*
* @return N/A
*/
static inline void _timeout_object_dequeue(struct tcs *tcs, struct _timeout *t)
{
if (t->wait_q) {
_timeout_remove_tcs_from_wait_q(tcs);
}
}
/* abort a timeout for a specified fiber */
static inline int _timeout_abort(struct tcs *tcs)
{
return _do_timeout_abort(&tcs->timeout);
}
/* put a fiber on the timeout queue and record its wait queue */
static inline void _timeout_add(struct tcs *tcs, _wait_q_t *wait_q,
int32_t timeout)
{
_do_timeout_add(tcs, &tcs->timeout, wait_q, timeout);
}
#else
#define _timeout_object_dequeue(tcs, t) do { } while (0)
#endif /* CONFIG_NANO_TIMEOUTS */
/*
* Handle one expired timeout.
* This removes the fiber from the timeout queue head, and also removes it
* from the wait queue it is on if waiting for an object. In that case, it
* also sets the return value to 0/NULL.
*/
/* must be called with interrupts locked */
static inline struct _timeout *_timeout_handle_one_timeout(
sys_dlist_t *timeout_q)
{
struct _timeout *t = (void *)sys_dlist_get(timeout_q);
struct tcs *tcs = t->tcs;
K_DEBUG("timeout %p\n", t);
if (tcs != NULL) {
_timeout_object_dequeue(tcs, t);
_ready_thread(tcs);
} else if (t->func) {
t->func(t);
}
/*
* Note: t->func() may add timeout again. Make sure that
* delta_ticks_from_prev is set to -1 only if timeout is
* still expired (delta_ticks_from_prev == 0)
*/
if (t->delta_ticks_from_prev == 0) {
t->delta_ticks_from_prev = -1;
}
return (struct _timeout *)sys_dlist_peek_head(timeout_q);
}
/* loop over all expired timeouts and handle them one by one */
/* must be called with interrupts locked */
static inline void _timeout_handle_timeouts(void)
{
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
struct _timeout *next;
next = (struct _timeout *)sys_dlist_peek_head(timeout_q);
while (next && next->delta_ticks_from_prev == 0) {
next = _timeout_handle_one_timeout(timeout_q);
}
}
/**
*
* @brief abort a timeout
*
* @param t Timeout to abort
*
* @return 0 in success and -1 if the timer has expired
*/
static inline int _do_timeout_abort(struct _timeout *t)
{
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
if (-1 == t->delta_ticks_from_prev) {
return -1;
}
if (!sys_dlist_is_tail(timeout_q, &t->node)) {
struct _timeout *next =
(struct _timeout *)sys_dlist_peek_next(timeout_q,
&t->node);
next->delta_ticks_from_prev += t->delta_ticks_from_prev;
}
sys_dlist_remove(&t->node);
t->delta_ticks_from_prev = -1;
return 0;
}
static inline int _nano_timer_timeout_abort(struct _timeout *t)
{
return _do_timeout_abort(t);
}
/*
* callback for sys_dlist_insert_at():
*
* Returns 1 if the timeout to insert is lower or equal than the next timeout
* in the queue, signifying that it should be inserted before the next.
* Returns 0 if it is greater.
*
* If it is greater, the timeout to insert is decremented by the next timeout,
* since the timeout queue is a delta queue. If it lower or equal, decrement
* the timeout of the insert point to update its delta queue value, since the
* current timeout will be inserted before it.
*/
static int _timeout_insert_point_test(sys_dnode_t *test, void *timeout)
{
struct _timeout *t = (void *)test;
int32_t *timeout_to_insert = timeout;
if (*timeout_to_insert > t->delta_ticks_from_prev) {
*timeout_to_insert -= t->delta_ticks_from_prev;
return 0;
}
t->delta_ticks_from_prev -= *timeout_to_insert;
return 1;
}
/**
*
* @brief Put timeout on the timeout queue, record waiting fiber and wait queue
*
* @param tcs Fiber waiting on a timeout
* @param t Timeout structure to be added to the nanokernel queue
* @wait_q nanokernel object wait queue
* @timeout Timeout in ticks
*
* @return N/A
*/
static inline void _do_timeout_add(struct tcs *tcs, struct _timeout *t,
_wait_q_t *wait_q, int32_t timeout)
{
K_DEBUG("thread %p on wait_q %p, for timeout: %d\n",
tcs, wait_q, timeout);
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
K_DEBUG("timeout_q %p before: head: %p, tail: %p\n",
&_nanokernel.timeout_q,
sys_dlist_peek_head(&_nanokernel.timeout_q),
_nanokernel.timeout_q.tail);
K_DEBUG("timeout %p before: next: %p, prev: %p\n",
t, t->node.next, t->node.prev);
t->tcs = tcs;
t->delta_ticks_from_prev = timeout;
t->wait_q = (sys_dlist_t *)wait_q;
sys_dlist_insert_at(timeout_q, (void *)t,
_timeout_insert_point_test,
&t->delta_ticks_from_prev);
K_DEBUG("timeout_q %p after: head: %p, tail: %p\n",
&_nanokernel.timeout_q,
sys_dlist_peek_head(&_nanokernel.timeout_q),
_nanokernel.timeout_q.tail);
K_DEBUG("timeout %p after: next: %p, prev: %p\n",
t, t->node.next, t->node.prev);
}
static inline void _nano_timer_timeout_add(struct _timeout *t,
_wait_q_t *wait_q,
int32_t timeout)
{
_do_timeout_add(NULL, t, wait_q, timeout);
}
/* find the closest deadline in the timeout queue */
static inline uint32_t _nano_get_earliest_timeouts_deadline(void)
{
sys_dlist_t *q = &_nanokernel.timeout_q;
struct _timeout *t =
(struct _timeout *)sys_dlist_peek_head(q);
return t ? min((uint32_t)t->delta_ticks_from_prev,
(uint32_t)_nanokernel.task_timeout)
: (uint32_t)_nanokernel.task_timeout;
}
#ifdef __cplusplus
}
#endif
#endif /* _kernel_nanokernel_include_timeout_q__h_ */

View file

@ -0,0 +1,134 @@
/* wait queue for multiple fibers on nanokernel objects */
/*
* Copyright (c) 2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _kernel_nanokernel_include_wait_q__h_
#define _kernel_nanokernel_include_wait_q__h_
#include <nano_private.h>
#ifdef CONFIG_KERNEL_V2
#include <misc/dlist.h>
#include <sched.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
#if 0
/* reset a wait queue, call during operation */
static inline void _nano_wait_q_reset(struct _nano_queue *wait_q)
{
sys_dlist_init((sys_dlist_t *)wait_q);
}
/* initialize a wait queue: call only during object initialization */
static inline void _nano_wait_q_init(struct _nano_queue *wait_q)
{
_nano_wait_q_reset(wait_q);
}
/*
* Remove first fiber from a wait queue and put it on the ready queue, knowing
* that the wait queue is not empty.
*/
static inline
struct tcs *_nano_wait_q_remove_no_check(struct _nano_queue *wait_q)
{
struct tcs *tcs = (struct tcs *)sys_dlist_get((sys_dlist_t *)wait_q);
_ready_thread(tcs);
return tcs;
}
/*
* Remove first fiber from a wait queue and put it on the ready queue.
* Abort and return NULL if the wait queue is empty.
*/
static inline struct tcs *_nano_wait_q_remove(struct _nano_queue *wait_q)
{
return _nano_wait_q_remove_no_check(wait_q);
}
/* put current fiber on specified wait queue */
static inline void _nano_wait_q_put(struct _nano_queue *wait_q)
{
/* unused */
}
#endif
#if defined(CONFIG_NANO_TIMEOUTS)
static inline void _timeout_remove_tcs_from_wait_q(struct tcs *tcs)
{
_unpend_thread(tcs);
tcs->timeout.wait_q = NULL;
}
#include <timeout_q.h>
#define _TIMEOUT_TICK_GET() sys_tick_get()
#define _TIMEOUT_ADD(thread, pq, ticks) \
do { \
if ((ticks) != TICKS_UNLIMITED) { \
_timeout_add(thread, pq, ticks); \
} \
} while (0)
#define _TIMEOUT_SET_TASK_TIMEOUT(ticks) \
_nanokernel.task_timeout = (ticks)
#define _TIMEOUT_UPDATE(timeout, limit, cur_ticks) \
do { \
if ((timeout) != TICKS_UNLIMITED) { \
(timeout) = (int32_t)((limit) - (cur_ticks)); \
} \
} while (0)
#elif defined(CONFIG_NANO_TIMERS)
#include <timeout_q.h>
#define _timeout_tcs_init(tcs) do { } while ((0))
#define _timeout_abort(tcs) do { } while ((0))
#define _TIMEOUT_TICK_GET() 0
#define _TIMEOUT_ADD(thread, pq, ticks) do { } while (0)
#define _TIMEOUT_SET_TASK_TIMEOUT(ticks) do { } while ((0))
#define _TIMEOUT_UPDATE(timeout, limit, cur_ticks) do { } while (0)
#else
#define _timeout_tcs_init(tcs) do { } while ((0))
#define _timeout_abort(tcs) do { } while ((0))
#define _nano_get_earliest_timeouts_deadline() \
((uint32_t)TICKS_UNLIMITED)
#define _TIMEOUT_TICK_GET() 0
#define _TIMEOUT_ADD(thread, pq, ticks) do { } while (0)
#define _TIMEOUT_SET_TASK_TIMEOUT(ticks) do { } while ((0))
#define _TIMEOUT_UPDATE(timeout, limit, cur_ticks) do { } while (0)
#endif
#define _NANO_OBJECT_WAIT(queue, data, timeout, key) \
do { \
_TIMEOUT_SET_TASK_TIMEOUT(timeout); \
nano_cpu_atomic_idle(key); \
key = irq_lock(); \
} while (0)
#ifdef __cplusplus
}
#endif
#endif /* _kernel_nanokernel_include_wait_q__h_ */

383
kernel/unified/init.c Normal file
View file

@ -0,0 +1,383 @@
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Nanokernel initialization module
*
* This module contains routines that are used to initialize the nanokernel.
*/
#include <offsets.h>
#include <kernel.h>
#include <misc/printk.h>
#include <drivers/rand32.h>
#include <sections.h>
#include <toolchain.h>
#include <nano_private.h>
#include <device.h>
#include <init.h>
#include <linker-defs.h>
#include <sched.h>
/* kernel build timestamp items */
#define BUILD_TIMESTAMP "BUILD: " __DATE__ " " __TIME__
#ifdef CONFIG_BUILD_TIMESTAMP
const char * const build_timestamp = BUILD_TIMESTAMP;
#endif
/* boot banner items */
#define BOOT_BANNER "BOOTING ZEPHYR OS"
#if !defined(CONFIG_BOOT_BANNER)
#define PRINT_BOOT_BANNER() do { } while (0)
#elif !defined(CONFIG_BUILD_TIMESTAMP)
#define PRINT_BOOT_BANNER() printk("***** " BOOT_BANNER " *****\n")
#else
#define PRINT_BOOT_BANNER() \
printk("***** " BOOT_BANNER " - %s *****\n", build_timestamp)
#endif
/* boot time measurement items */
#ifdef CONFIG_BOOT_TIME_MEASUREMENT
uint64_t __noinit __start_tsc; /* timestamp when kernel starts */
uint64_t __noinit __main_tsc; /* timestamp when main task starts */
uint64_t __noinit __idle_tsc; /* timestamp when CPU goes idle */
#endif
/* random number generator items */
#if defined(CONFIG_TEST_RANDOM_GENERATOR) || \
defined(CONFIG_CUSTOM_RANDOM_GENERATOR)
#define RAND32_INIT() sys_rand32_init()
#else
#define RAND32_INIT()
#endif
/* init/main and idle threads */
#define IDLE_STACK_SIZE 256
#if CONFIG_MAIN_STACK_SIZE & (STACK_ALIGN - 1)
#error "MAIN_STACK_SIZE must be a multiple of the stack alignment"
#endif
#if IDLE_STACK_SIZE & (STACK_ALIGN - 1)
#error "IDLE_STACK_SIZE must be a multiple of the stack alignment"
#endif
static char __noinit __stack main_stack[CONFIG_MAIN_STACK_SIZE];
static char __noinit __stack idle_stack[IDLE_STACK_SIZE];
k_tid_t const _main_thread = (k_tid_t)main_stack;
k_tid_t const _idle_thread = (k_tid_t)idle_stack;
/*
* storage space for the interrupt stack
*
* Note: This area is used as the system stack during nanokernel initialization,
* since the nanokernel hasn't yet set up its own stack areas. The dual
* purposing of this area is safe since interrupts are disabled until the
* nanokernel context switches to the background (or idle) task.
*/
#if CONFIG_ISR_STACK_SIZE & (STACK_ALIGN - 1)
#error "ISR_STACK_SIZE must be a multiple of the stack alignment"
#endif
char __noinit __stack _interrupt_stack[CONFIG_ISR_STACK_SIZE];
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
#include <misc/dlist.h>
#define initialize_timeouts() do { \
sys_dlist_init(&_nanokernel.timeout_q); \
_nanokernel.task_timeout = TICKS_UNLIMITED; \
} while ((0))
#else
#define initialize_timeouts() do { } while ((0))
#endif
/**
*
* @brief Clear BSS
*
* This routine clears the BSS region, so all bytes are 0.
*
* @return N/A
*/
void _bss_zero(void)
{
uint32_t *pos = (uint32_t *)&__bss_start;
for ( ; pos < (uint32_t *)&__bss_end; pos++) {
*pos = 0;
}
}
#ifdef CONFIG_XIP
/**
*
* @brief Copy the data section from ROM to RAM
*
* This routine copies the data section from ROM to RAM.
*
* @return N/A
*/
void _data_copy(void)
{
uint32_t *pROM, *pRAM;
pROM = (uint32_t *)&__data_rom_start;
pRAM = (uint32_t *)&__data_ram_start;
for ( ; pRAM < (uint32_t *)&__data_ram_end; pROM++, pRAM++) {
*pRAM = *pROM;
}
}
#endif
/**
*
* @brief Mainline for nanokernel's background task
*
* This routine completes kernel initialization by invoking the remaining
* init functions, then invokes application's main() routine.
*
* @return N/A
*/
static void _main(void *unused1, void *unused2, void *unused3)
{
ARG_UNUSED(unused1);
ARG_UNUSED(unused2);
ARG_UNUSED(unused3);
_sys_device_do_config_level(_SYS_INIT_LEVEL_SECONDARY);
_sys_device_do_config_level(_SYS_INIT_LEVEL_NANOKERNEL);
_sys_device_do_config_level(_SYS_INIT_LEVEL_APPLICATION);
#ifdef CONFIG_CPLUSPLUS
/* Process the .ctors and .init_array sections */
extern void __do_global_ctors_aux(void);
extern void __do_init_array_aux(void);
__do_global_ctors_aux();
__do_init_array_aux();
#endif
_init_static_threads();
_main_thread->flags &= ~ESSENTIAL;
extern void main(void);
main();
}
void __weak main(void)
{
/* NOP default main() if the application does not provide one. */
}
static void idle(void *unused1, void *unused2, void *unused3)
{
ARG_UNUSED(unused1);
ARG_UNUSED(unused2);
ARG_UNUSED(unused3);
for (;;) {
nano_cpu_idle();
if (_is_coop(_current)) {
k_yield();
}
}
}
/**
*
* @brief Initializes nanokernel data structures
*
* This routine initializes various nanokernel data structures, including
* the background (or idle) task and any architecture-specific initialization.
*
* Note that all fields of "_nanokernel" are set to zero on entry, which may
* be all the initialization many of them require.
*
* @return N/A
*/
static void nano_init(struct tcs *dummy_thread)
{
/*
* Initialize the current execution thread to permit a level of
* debugging output if an exception should happen during nanokernel
* initialization. However, don't waste effort initializing the
* fields of the dummy thread beyond those needed to identify it as a
* dummy thread.
*/
_current = dummy_thread;
/*
* Do not insert dummy execution context in the list of fibers, so
* that it does not get scheduled back in once context-switched out.
*/
dummy_thread->flags = ESSENTIAL;
dummy_thread->prio = K_PRIO_COOP(0);
/* _nanokernel.ready_q is all zeroes */
/*
* The interrupt library needs to be initialized early since a series
* of handlers are installed into the interrupt table to catch
* spurious interrupts. This must be performed before other nanokernel
* subsystems install bonafide handlers, or before hardware device
* drivers are initialized.
*/
_IntLibInit();
/* ready the init/main and idle threads */
for (int ii = 0; ii < K_NUM_PRIORITIES; ii++) {
sys_dlist_init(&_nanokernel.ready_q.q[ii]);
}
_new_thread(main_stack, CONFIG_MAIN_STACK_SIZE, NULL,
_main, NULL, NULL, NULL,
CONFIG_MAIN_THREAD_PRIORITY, ESSENTIAL);
_mark_thread_as_started(_main_thread);
_add_thread_to_ready_q(_main_thread);
_new_thread(idle_stack, IDLE_STACK_SIZE, NULL,
idle, NULL, NULL, NULL,
K_LOWEST_THREAD_PRIO, ESSENTIAL);
_mark_thread_as_started(_idle_thread);
_add_thread_to_ready_q(_idle_thread);
initialize_timeouts();
/* perform any architecture-specific initialization */
nanoArchInit();
/* handle any kernel objects that require run-time initialization */
_k_mem_map_init();
_k_mbox_init();
_k_dyamic_timer_init();
_k_pipes_init();
}
#ifdef CONFIG_STACK_CANARIES
/**
*
* @brief Initialize the kernel's stack canary
*
* This macro initializes the kernel's stack canary global variable,
* __stack_chk_guard, with a random value.
*
* INTERNAL
* Depending upon the compiler, modifying __stack_chk_guard directly at runtime
* may generate a build error. In-line assembly is used as a workaround.
*/
extern void *__stack_chk_guard;
#if defined(CONFIG_X86)
#define _MOVE_INSTR "movl "
#elif defined(CONFIG_ARM)
#define _MOVE_INSTR "str "
#elif defined(CONFIG_ARC)
#define _MOVE_INSTR "st "
#else
#error "Unknown Architecture type"
#endif /* CONFIG_X86 */
#define STACK_CANARY_INIT() \
do { \
register void *tmp; \
tmp = (void *)sys_rand32_get(); \
__asm__ volatile(_MOVE_INSTR "%1, %0;\n\t" \
: "=m"(__stack_chk_guard) \
: "r"(tmp)); \
} while (0)
#else /* !CONFIG_STACK_CANARIES */
#define STACK_CANARY_INIT()
#endif /* CONFIG_STACK_CANARIES */
/**
*
* @brief Initialize nanokernel
*
* This routine is invoked when the system is ready to run C code. The
* processor must be running in 32-bit mode, and the BSS must have been
* cleared/zeroed.
*
* @return Does not return
*/
FUNC_NORETURN void _Cstart(void)
{
/* floating point operations are NOT performed during nanokernel init */
char dummyTCS[__tTCS_NOFLOAT_SIZEOF];
/*
* Initialize nanokernel data structures. This step includes
* initializing the interrupt subsystem, which must be performed
* before the hardware initialization phase.
*/
nano_init((struct tcs *)&dummyTCS);
/* perform basic hardware initialization */
_sys_device_do_config_level(_SYS_INIT_LEVEL_PRIMARY);
/*
* Initialize random number generator
* As a platform may implement it in hardware, it has to be
* initialized after rest of hardware initialization and
* before stack canaries that use it
*/
RAND32_INIT();
/* initialize stack canaries */
STACK_CANARY_INIT();
/* display boot banner */
PRINT_BOOT_BANNER();
/*
* Context switch to main task (entry function is _main()): the
* current fake thread is not on a wait queue or ready queue, so it
* will never be rescheduled in.
*/
_Swap(irq_lock());
/*
* Compiler can't tell that the above routines won't return and issues
* a warning unless we explicitly tell it that control never gets this
* far.
*/
CODE_UNREACHABLE;
}

View file

@ -0,0 +1 @@
#include "../nanokernel/int_latency_bench.c"

View file

@ -0,0 +1 @@
#include "../nanokernel/kernel_event_logger.c"

View file

@ -0,0 +1,9 @@
ccflags-y += -I$(srctree)/kernel/unified/include
asflags-y := ${ccflags-y}
obj-y =
obj-y += $(strip \
)
obj-$(CONFIG_NANO_TIMERS) += timer_legacy.o

View file

@ -0,0 +1,37 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <kernel.h>
void task_timer_start(ktimer_t timer, int32_t duration,
int32_t period, ksem_t sema)
{
if (duration < 0 || period < 0 || (duration == 0 && period == 0)) {
int key = irq_lock();
if (timer->timeout.delta_ticks_from_prev != -1) {
k_timer_stop(timer);
}
irq_unlock(key);
return;
}
k_timer_start(timer, _ticks_to_ms(duration),
_ticks_to_ms(period),
(void(*)(void *))k_sem_give, sema, NULL, NULL);
}

88
kernel/unified/lifo.c Normal file
View file

@ -0,0 +1,88 @@
/*
* Copyright (c) 2010-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file
*
* @brief dynamic-size LIFO queue object
*/
#include <kernel.h>
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <sched.h>
void k_lifo_init(struct k_lifo *lifo)
{
lifo->list = (void *)0;
sys_dlist_init(&lifo->wait_q);
SYS_TRACING_OBJ_INIT(k_lifo, lifo);
}
void k_lifo_put(struct k_lifo *lifo, void *data)
{
struct k_thread *first_pending_thread;
unsigned int key;
key = irq_lock();
first_pending_thread = _unpend_first_thread(&lifo->wait_q);
if (first_pending_thread) {
_timeout_abort(first_pending_thread);
_ready_thread(first_pending_thread);
_set_thread_return_value_with_data(first_pending_thread,
0, data);
if (!_is_in_isr() && _must_switch_threads()) {
(void)_Swap(key);
return;
}
} else {
*(void **)data = lifo->list;
lifo->list = data;
}
irq_unlock(key);
}
void *k_lifo_get(struct k_lifo *lifo, int32_t timeout)
{
unsigned int key;
void *data;
key = irq_lock();
if (likely(lifo->list)) {
data = lifo->list;
lifo->list = *(void **)data;
irq_unlock(key);
return data;
}
if (timeout == K_NO_WAIT) {
irq_unlock(key);
return NULL;
}
_pend_current_thread(&lifo->wait_q, timeout);
return _Swap(key) ? NULL : _current->swap_data;
}

651
kernel/unified/mailbox.c Normal file
View file

@ -0,0 +1,651 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @brief Mailboxes.
*/
#include <kernel.h>
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <string.h>
#include <wait_q.h>
#include <misc/dlist.h>
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
/* asynchronous message descriptor type */
struct k_mbox_async {
struct tcs_base thread; /* dummy thread object */
struct k_mbox_msg tx_msg; /* transmit message descriptor */
};
/* array of asynchronous message descriptors */
static struct k_mbox_async __noinit async_msg[CONFIG_NUM_MBOX_ASYNC_MSGS];
/* stack of unused asynchronous message descriptors */
K_STACK_DEFINE(async_msg_free, CONFIG_NUM_MBOX_ASYNC_MSGS);
/**
* @brief Create pool of asynchronous message descriptors.
*
* A dummy thread requires minimal initialization, since it never actually
* gets to execute. The K_DUMMY flag is sufficient to distinguish a dummy
* thread from a real one. The threads are *not* added to the kernel's list of
* known threads.
*
* Once initialized, the address of each descriptor is added to a stack
* that governs access to them.
*
* @return N/A
*/
void _k_mbox_init(void)
{
int i;
for (i = 0; i < CONFIG_NUM_MBOX_ASYNC_MSGS; i++) {
async_msg[i].thread.flags = K_DUMMY;
k_stack_push(async_msg_free, (uint32_t)&async_msg[i]);
}
}
/**
* @brief Allocate an asynchronous message descriptor.
*
* @param async Address of area to hold the descriptor pointer.
*
* @return N/A.
*/
static inline void _mbox_async_alloc(struct k_mbox_async **async)
{
k_stack_pop(async_msg_free, (uint32_t *)async, K_FOREVER);
}
/**
* @brief Free an asynchronous message descriptor.
*
* @param Descriptor pointer.
*/
static inline void _mbox_async_free(struct k_mbox_async *async)
{
k_stack_push(async_msg_free, (uint32_t)async);
}
#endif
/**
* @brief Initialize a mailbox.
*
* @return N/A
*/
void k_mbox_init(struct k_mbox *mbox_ptr)
{
sys_dlist_init(&mbox_ptr->tx_msg_queue);
sys_dlist_init(&mbox_ptr->rx_msg_queue);
SYS_TRACING_OBJ_INIT(mbox, mbox_ptr);
}
/**
* @brief Check compatibility of sender's and receiver's message descriptors.
*
* Compares sender's and receiver's message descriptors to see if they are
* compatible. If so, the descriptor fields are updated to reflect that a
* match has occurred.
*
* @param tx_msg Pointer to transmit message descriptor.
* @param rx_msg Pointer to receive message descriptor.
*
* @return 0 if successfully matched, otherwise -1.
*/
static int _mbox_message_match(struct k_mbox_msg *tx_msg,
struct k_mbox_msg *rx_msg)
{
uint32_t temp_info;
if (((tx_msg->tx_target_thread == (k_tid_t)K_ANY) ||
(tx_msg->tx_target_thread == rx_msg->tx_target_thread)) &&
((rx_msg->rx_source_thread == (k_tid_t)K_ANY) ||
(rx_msg->rx_source_thread == tx_msg->rx_source_thread))) {
/* update thread identifier fields for both descriptors */
rx_msg->rx_source_thread = tx_msg->rx_source_thread;
tx_msg->tx_target_thread = rx_msg->tx_target_thread;
/* update application info fields for both descriptors */
temp_info = rx_msg->info;
rx_msg->info = tx_msg->info;
tx_msg->info = temp_info;
/* update data size field for receiver only */
if (rx_msg->size > tx_msg->size) {
rx_msg->size = tx_msg->size;
}
/* update data location fields for receiver only */
rx_msg->tx_data = tx_msg->tx_data;
rx_msg->tx_block = tx_msg->tx_block;
if (rx_msg->tx_data != NULL) {
rx_msg->tx_block.pool_id = NULL;
} else if (rx_msg->tx_block.pool_id != NULL) {
rx_msg->tx_data = rx_msg->tx_block.data;
}
/* update syncing thread field for receiver only */
rx_msg->_syncing_thread = tx_msg->_syncing_thread;
return 0;
}
return -1;
}
/**
* @brief Dispose of received message.
*
* Releases any memory pool block still associated with the message,
* then notifies the sender that message processing is complete.
*
* @param rx_msg Pointer to receive message descriptor.
*
* @return N/A
*/
static void _mbox_message_dispose(struct k_mbox_msg *rx_msg)
{
struct tcs *sending_thread;
struct k_mbox_msg *tx_msg;
unsigned int key;
/* do nothing if message was disposed of when it was received */
if (rx_msg->_syncing_thread == NULL) {
return;
}
/* release sender's memory pool block */
if (rx_msg->tx_block.pool_id != NULL) {
#if 0
/* NEED TO WAIT FOR MEMORY POOL SUPPORT */
k_mem_pool_free(&rx_msg->tx_block);
#endif
rx_msg->tx_block.pool_id = NULL;
}
/* recover sender info */
sending_thread = rx_msg->_syncing_thread;
rx_msg->_syncing_thread = NULL;
tx_msg = (struct k_mbox_msg *)sending_thread->swap_data;
/* update data size field for sender */
tx_msg->size = rx_msg->size;
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
/*
* asynchronous send: free asynchronous message descriptor +
* dummy thread pair, then give semaphore (if needed)
*/
if (sending_thread->flags & K_DUMMY) {
struct k_sem *async_sem = tx_msg->_async_sem;
_mbox_async_free((struct k_mbox_async *)sending_thread);
if (async_sem != NULL) {
k_sem_give(async_sem);
}
return;
}
#endif
/* synchronous send: wake up sending thread */
key = irq_lock();
_set_thread_return_value(sending_thread, 0);
_mark_thread_as_not_pending(sending_thread);
_ready_thread(sending_thread);
_reschedule_threads(key);
}
/**
* @brief Send a mailbox message.
*
* Helper routine that handles both synchronous and asynchronous sends.
*
* @param mbox Pointer to the mailbox object.
* @param tx_msg Pointer to transmit message descriptor.
* @param timeout Maximum time (nanoseconds) to wait for the message to be
* received (although not necessarily completely processed).
* Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long
* as necessary.
*
* @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
*/
static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
int32_t timeout)
{
struct tcs *sending_thread;
struct tcs *receiving_thread;
struct k_mbox_msg *rx_msg;
sys_dnode_t *wait_q_item;
unsigned int key;
/* save sender id so it can be used during message matching */
tx_msg->rx_source_thread = _current;
/* finish readying sending thread (actual or dummy) for send */
sending_thread = tx_msg->_syncing_thread;
sending_thread->swap_data = tx_msg;
/* search mailbox's rx queue for a compatible receiver */
key = irq_lock();
SYS_DLIST_FOR_EACH_NODE(&mbox->rx_msg_queue, wait_q_item) {
receiving_thread = (struct tcs *)wait_q_item;
rx_msg = (struct k_mbox_msg *)receiving_thread->swap_data;
if (_mbox_message_match(tx_msg, rx_msg) == 0) {
/* take receiver out of rx queue */
_unpend_thread(receiving_thread);
_timeout_abort(receiving_thread);
/* ready receiver for execution */
_set_thread_return_value(receiving_thread, 0);
_ready_thread(receiving_thread);
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
/*
* asynchronous send: swap out current thread
* if receiver has priority, otherwise let it continue
*
* note: dummy sending thread sits (unqueued)
* until the receiver consumes the message
*/
if (sending_thread->flags & K_DUMMY) {
_reschedule_threads(key);
return 0;
}
#endif
/*
* synchronous send: pend current thread (unqueued)
* until the receiver consumes the message
*/
_remove_thread_from_ready_q(_current);
_mark_thread_as_pending(_current);
return _Swap(key);
}
}
/* didn't find a matching receiver: don't wait for one */
if (timeout == K_NO_WAIT) {
irq_unlock(key);
return -ENOMSG;
}
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
/* asynchronous send: dummy thread waits on tx queue for receiver */
if (sending_thread->flags & K_DUMMY) {
_pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER);
irq_unlock(key);
return 0;
}
#endif
/* synchronous send: sender waits on tx queue for receiver or timeout */
_pend_current_thread(&mbox->tx_msg_queue, timeout);
return _Swap(key);
}
/**
* @brief Send a mailbox message in a synchronous manner.
*
* Sends a message to a mailbox and waits for a receiver to process it.
* The message data may be in a buffer, in a memory pool block, or non-existent
* (i.e. empty message).
*
* @param mbox Pointer to the mailbox object.
* @param tx_msg Pointer to transmit message descriptor.
* @param timeout Maximum time (nanoseconds) to wait for the message to be
* received (although not necessarily completely processed).
* Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long
* as necessary.
*
* @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
*/
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, int32_t timeout)
{
/* configure things for a synchronous send, then send the message */
tx_msg->_syncing_thread = _current;
return _mbox_message_put(mbox, tx_msg, timeout);
}
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
/**
* @brief Send a mailbox message in an asynchronous manner.
*
* Sends a message to a mailbox without waiting for a receiver to process it.
* The message data may be in a buffer, in a memory pool block, or non-existent
* (i.e. an empty message). Optionally, the specified semaphore will be given
* by the mailbox when the message has been both received and disposed of
* by the receiver.
*
* @param mbox Pointer to the mailbox object.
* @param tx_msg Pointer to transmit message descriptor.
* @param sem Semaphore identifier, or NULL if none specified.
*
* @return N/A
*/
void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
struct k_sem *sem)
{
struct k_mbox_async *async;
/*
* allocate an asynchronous message descriptor, configure both parts,
* then send the message asynchronously
*/
_mbox_async_alloc(&async);
async->thread.prio = _current->prio;
async->tx_msg = *tx_msg;
async->tx_msg._syncing_thread = (struct tcs *)&async->thread;
async->tx_msg._async_sem = sem;
_mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
}
#endif
/**
* @brief Retrieve mailbox message data into a buffer.
*
* Completes the processing of a received message by retrieving its data
* into a buffer, then disposing of the message.
*
* Alternatively, this routine can be used to dispose of a received message
* without retrieving its data.
*
* @param rx_msg Pointer to receive message descriptor.
* @param buffer Pointer to buffer to receive data. (Use NULL to discard data.)
*
* @return N/A
*/
void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
{
/* handle case where data is to be discarded */
if (buffer == NULL) {
rx_msg->size = 0;
_mbox_message_dispose(rx_msg);
return;
}
/* copy message data to buffer, then dispose of message */
if ((rx_msg->tx_data != NULL) && (rx_msg->size > 0)) {
memcpy(buffer, rx_msg->tx_data, rx_msg->size);
}
_mbox_message_dispose(rx_msg);
}
/**
* @brief Retrieve mailbox message data into a memory pool block.
*
* Completes the processing of a received message by retrieving its data
* into a memory pool block, then disposing of the message. The memory pool
* block that results from successful retrieval must be returned to the pool
* once the data has been processed, even in cases where zero bytes of data
* are retrieved.
*
* Alternatively, this routine can be used to dispose of a received message
* without retrieving its data. In this case there is no need to return a
* memory pool block to the pool.
*
* This routine allocates a new memory pool block for the data only if the
* data is not already in one. If a new block cannot be allocated, the routine
* returns a failure code and the received message is left unchanged. This
* permits the caller to reattempt data retrieval at a later time or to dispose
* of the received message without retrieving its data.
*
* @param msg Pointer to receive message descriptor.
* @param pool Memory pool identifier. (Use NULL to discard data.)
* @param block Pointer to area to hold memory pool block info.
* @param timeout Maximum time (nanoseconds) to wait for a memory pool block.
* Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long as
* necessary.
*
* @return 0 if successful, -ENOMEM if failed immediately, -EAGAIN if timed out
*/
int k_mbox_data_block_get(struct k_mbox_msg *rx_msg, k_mem_pool_t pool,
struct k_mem_block *block, int32_t timeout)
{
int result;
/* handle case where data is to be discarded */
if (pool == NULL) {
rx_msg->size = 0;
_mbox_message_dispose(rx_msg);
return 0;
}
/* handle case where data is already in a memory pool block */
if (rx_msg->tx_block.pool_id != NULL) {
/* give ownership of the block to receiver */
*block = rx_msg->tx_block;
rx_msg->tx_block.pool_id = NULL;
/* now dispose of message */
_mbox_message_dispose(rx_msg);
return 0;
}
/* allocate memory pool block (even when message size is 0!) */
result = k_mem_pool_alloc(pool, block, rx_msg->size, timeout);
if (result != 0) {
return result;
}
/* retrieve non-block data into new block, then dispose of message */
k_mbox_data_get(rx_msg, block->data);
return 0;
}
/**
* @brief Handle immediate consumption of received mailbox message data.
*
* Checks to see if received message data should be kept for later retrieval,
* or if the data should consumed immediately and the message disposed of.
*
* The data is consumed immediately in either of the following cases:
* 1) The receiver requested immediate retrieval by suppling a buffer
* to receive the data.
* 2) There is no data to be retrieved. (i.e. Data size is 0 bytes.)
*
* @param rx_msg Pointer to receive message descriptor.
* @param buffer Pointer to buffer to receive data.
*
* @return 0
*/
static int _mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
{
if (buffer != NULL) {
/* retrieve data now, then dispose of message */
k_mbox_data_get(rx_msg, buffer);
} else if (rx_msg->size == 0) {
/* there is no data to get, so just dispose of message */
_mbox_message_dispose(rx_msg);
} else {
/* keep message around for later data retrieval */
}
return 0;
}
/**
* @brief Receive a mailbox message.
*
* Receives a message from a mailbox, then optionally retrieves its data
* and disposes of the message.
*
* @param mbox Pointer to the mailbox object.
* @param msg Pointer to receive message descriptor.
* @param buffer Pointer to buffer to receive data.
* (Use NULL to defer data retrieval and message disposal until later.)
* @param timeout Maximum time (nanoseconds) to wait for a message.
* Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long as
* necessary.
*
* @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
*/
int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
int32_t timeout)
{
struct tcs *sending_thread;
struct k_mbox_msg *tx_msg;
sys_dnode_t *wait_q_item;
unsigned int key;
int result;
/* save receiver id so it can be used during message matching */
rx_msg->tx_target_thread = _current;
/* search mailbox's tx queue for a compatible sender */
key = irq_lock();
SYS_DLIST_FOR_EACH_NODE(&mbox->tx_msg_queue, wait_q_item) {
sending_thread = (struct tcs *)wait_q_item;
tx_msg = (struct k_mbox_msg *)sending_thread->swap_data;
if (_mbox_message_match(tx_msg, rx_msg) == 0) {
/* take sender out of mailbox's tx queue */
_unpend_thread(sending_thread);
_timeout_abort(sending_thread);
irq_unlock(key);
/* consume message data immediately, if needed */
return _mbox_message_data_check(rx_msg, buffer);
}
}
/* didn't find a matching sender */
if (timeout == K_NO_WAIT) {
/* don't wait for a matching sender to appear */
irq_unlock(key);
return -ENOMSG;
}
/* wait until a matching sender appears or a timeout occurs */
_pend_current_thread(&mbox->rx_msg_queue, timeout);
_current->swap_data = rx_msg;
result = _Swap(key);
/* consume message data immediately, if needed */
if (result == 0) {
result = _mbox_message_data_check(rx_msg, buffer);
}
return result;
}
int task_mbox_put(kmbox_t mbox, kpriority_t prio, struct k_msg *msg,
int32_t timeout)
{
struct k_mbox_msg *tx_msg = (struct k_mbox_msg *)msg;
kpriority_t curr_prio;
unsigned int key;
int result;
/* handle old-style request to send an empty message */
if (tx_msg->size == 0) {
tx_msg->tx_block.pool_id = NULL;
}
/* handle sending message of current thread priority */
curr_prio = _current->prio;
if (prio == curr_prio) {
return _error_to_rc(k_mbox_put(mbox, tx_msg,
_ticks_to_ms(timeout)));
}
/* handle sending message of a different thread priority */
key = irq_lock();
_thread_priority_set(_current, prio);
_reschedule_threads(key);
result = _error_to_rc(k_mbox_put(mbox, tx_msg, _ticks_to_ms(timeout)));
key = irq_lock();
_thread_priority_set(_current, curr_prio);
_reschedule_threads(key);
return result;
}
void task_mbox_block_put(kmbox_t mbox, kpriority_t prio, struct k_msg *msg,
ksem_t sema)
{
struct k_mbox_msg *tx_msg = (struct k_mbox_msg *)msg;
kpriority_t curr_prio;
unsigned int key;
/* handle sending message of current thread priority */
curr_prio = _current->prio;
if (prio == curr_prio) {
k_mbox_async_put(mbox, tx_msg, sema);
return;
}
/* handle sending message of a different thread priority */
key = irq_lock();
_thread_priority_set(_current, prio);
_reschedule_threads(key);
k_mbox_async_put(mbox, tx_msg, sema);
key = irq_lock();
_thread_priority_set(_current, curr_prio);
_reschedule_threads(key);
}
int task_mbox_get(kmbox_t mbox, struct k_msg *msg, int32_t timeout)
{
struct k_mbox_msg *rx_msg = (struct k_mbox_msg *)msg;
return _error_to_rc(k_mbox_get(mbox, rx_msg, rx_msg->_rx_data,
_ticks_to_ms(timeout)));
}
void task_mbox_data_get(struct k_msg *msg)
{
struct k_mbox_msg *rx_msg = (struct k_mbox_msg *)msg;
/* handle old-style request to discard message data */
if (rx_msg->size == 0) {
rx_msg->_rx_data = NULL;
}
k_mbox_data_get(rx_msg, rx_msg->_rx_data);
}
int task_mbox_data_block_get(struct k_msg *msg, struct k_block *block,
kmemory_pool_t pool_id, int32_t timeout)
{
struct k_mbox_msg *rx_msg = (struct k_mbox_msg *)msg;
return _error_to_rc(k_mbox_data_block_get(rx_msg, pool_id, block,
_ticks_to_ms(timeout)));
}

166
kernel/unified/mem_map.c Normal file
View file

@ -0,0 +1,166 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <kernel.h>
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <misc/dlist.h>
#include <sched.h>
extern struct k_mem_map _k_mem_map_ptr_start[];
extern struct k_mem_map _k_mem_map_ptr_end[];
/**
* @brief Initialize kernel memory map subsystem.
*
* Perform any initialization of memory maps that wasn't done at build time.
* Currently this just involves creating the list of free blocks for each map.
*
* @return N/A
*/
static void create_free_list(struct k_mem_map *map)
{
char *p;
int j;
map->free_list = NULL;
p = map->buffer;
for (j = 0; j < map->num_blocks; j++) {
*(char **)p = map->free_list;
map->free_list = p;
p += map->block_size;
}
}
/**
* @brief Complete initialization of statically defined memory maps.
*
* Perform any initialization that wasn't done at build time.
*
* @return N/A
*/
void _k_mem_map_init(void)
{
struct k_mem_map *map;
for (map = _k_mem_map_ptr_start; map < _k_mem_map_ptr_end; map++) {
create_free_list(map);
}
}
/**
* @brief Initialize a memory map.
*
* Initializes the memory map and creates its list of free blocks.
*
* @param map Address of memory map.
* @param num_blocks Number of blocks.
* @param block_size Size of each block, in bytes.
* @param buffer Pointer to buffer used for the blocks.
*
* @return N/A
*/
void k_mem_map_init(struct k_mem_map *map, int num_blocks, int block_size,
void *buffer)
{
map->num_blocks = num_blocks;
map->block_size = block_size;
map->buffer = buffer;
map->num_used = 0;
create_free_list(map);
sys_dlist_init(&map->wait_q);
SYS_TRACING_OBJ_INIT(mem_map, map);
}
/**
* @brief Allocate a memory map block.
*
* Takes a block from the list of unused blocks.
*
* @param map Pointer to memory map object.
* @param mem Pointer to area to receive block address.
* @param timeout Maximum time (nanoseconds) to wait for allocation to complete.
* Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long as
* necessary.
*
* @return 0 if successful, -ENOMEM if failed immediately, -EAGAIN if timed out
*/
int k_mem_map_alloc(struct k_mem_map *map, void **mem, int32_t timeout)
{
unsigned int key = irq_lock();
int result;
if (map->free_list != NULL) {
/* take a free block */
*mem = map->free_list;
map->free_list = *(char **)(map->free_list);
map->num_used++;
result = 0;
} else if (timeout == K_NO_WAIT) {
/* don't wait for a free block to become available */
*mem = NULL;
result = -ENOMEM;
} else {
/* wait for a free block or timeout */
_pend_current_thread(&map->wait_q, timeout);
result = _Swap(key);
if (result == 0) {
*mem = _current->swap_data;
}
return result;
}
irq_unlock(key);
return result;
}
/**
* @brief Free a memory map block.
*
* Gives block to a waiting thread if there is one, otherwise returns it to
* the list of unused blocks.
*
* @param map Pointer to memory map object.
* @param mem Pointer to area to containing block address.
*
* @return N/A
*/
void k_mem_map_free(struct k_mem_map *map, void **mem)
{
int key = irq_lock();
struct tcs *pending_thread = _unpend_first_thread(&map->wait_q);
if (pending_thread) {
_set_thread_return_value_with_data(pending_thread, 0, *mem);
_timeout_abort(pending_thread);
_ready_thread(pending_thread);
if (_must_switch_threads()) {
_Swap(key);
return;
}
} else {
**(char ***)mem = map->free_list;
map->free_list = *(char **)mem;
map->num_used--;
}
irq_unlock(key);
}

55
kernel/unified/mem_pool.c Normal file
View file

@ -0,0 +1,55 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @brief Memory pools.
*/
#include <kernel.h>
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <misc/dlist.h>
void k_mem_pool_init(struct k_mem_pool *mem, int max_block_size,
int num_max_blocks)
{
}
int k_mem_pool_alloc(k_mem_pool_t id, struct k_block *block, int size,
int32_t timeout)
{
return 0;
}
void k_mem_pool_free(struct k_block *block)
{
}
void k_mem_pool_defrag(k_mem_pool_t id)
{
}
void *k_malloc(uint32_t size)
{
return NULL;
}
void k_free(void *p)
{
}

216
kernel/unified/msg_q.c Normal file
View file

@ -0,0 +1,216 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Message queues.
*/
#include <kernel.h>
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <string.h>
#include <wait_q.h>
#include <misc/dlist.h>
/**
* @brief Initialize a message queue.
*
* @param q Pointer to the message queue object.
* @param msg_size Message size, in bytes.
* @param max_msgs Maximum number of messages that can be queued.
* @param buffer Pointer to memory area that holds queued messages.
*
* @return N/A
*/
void k_msgq_init(struct k_msgq *q, uint32_t msg_size, uint32_t max_msgs,
char *buffer)
{
q->msg_size = msg_size;
q->max_msgs = max_msgs;
q->buffer_start = buffer;
q->buffer_end = buffer + (max_msgs * msg_size);
q->read_ptr = buffer;
q->write_ptr = buffer;
q->used_msgs = 0;
sys_dlist_init(&q->wait_q);
SYS_TRACING_OBJ_INIT(msgq, q);
}
/**
* @brief Adds a message to a message queue.
*
* @param q Pointer to the message queue object.
* @param data Pointer to message data area.
* @param timeout Maximum time (nanoseconds) to wait for operation to complete.
* Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long as
* necessary.
*
* @return 0 if successful, -ENOMSG if failed immediately or after queue purge,
* -EAGAIN if timed out
*/
int k_msgq_put(struct k_msgq *q, void *data, int32_t timeout)
{
unsigned int key = irq_lock();
struct tcs *pending_thread;
int result;
if (q->used_msgs < q->max_msgs) {
/* message queue isn't full */
pending_thread = _unpend_first_thread(&q->wait_q);
if (pending_thread) {
/* give message to waiting thread */
memcpy(pending_thread->swap_data, data, q->msg_size);
/* wake up waiting thread */
_set_thread_return_value(pending_thread, 0);
_timeout_abort(pending_thread);
_ready_thread(pending_thread);
if (_must_switch_threads()) {
_Swap(key);
return 0;
}
} else {
/* put message in queue */
memcpy(q->write_ptr, data, q->msg_size);
q->write_ptr += q->msg_size;
if (q->write_ptr == q->buffer_end) {
q->write_ptr = q->buffer_start;
}
q->used_msgs++;
}
result = 0;
} else if (timeout == K_NO_WAIT) {
/* don't wait for message space to become available */
result = -ENOMSG;
} else {
/* wait for put message success, failure, or timeout */
_pend_current_thread(&q->wait_q, timeout);
_current->swap_data = data;
return _Swap(key);
}
irq_unlock(key);
return result;
}
/**
* @brief Removes a message from a message queue.
*
* @param q Pointer to the message queue object.
* @param data Pointer to message data area.
* @param timeout Maximum time (nanoseconds) to wait for operation to complete.
* Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long as
* necessary.
*
* @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
*/
int k_msgq_get(struct k_msgq *q, void *data, int32_t timeout)
{
unsigned int key = irq_lock();
struct tcs *pending_thread;
int result;
if (q->used_msgs > 0) {
/* take first available message from queue */
memcpy(data, q->read_ptr, q->msg_size);
q->read_ptr += q->msg_size;
if (q->read_ptr == q->buffer_end) {
q->read_ptr = q->buffer_start;
}
q->used_msgs--;
/* handle first thread waiting to write (if any) */
pending_thread = _unpend_first_thread(&q->wait_q);
if (pending_thread) {
/* add thread's message to queue */
memcpy(q->write_ptr, pending_thread->swap_data,
q->msg_size);
q->write_ptr += q->msg_size;
if (q->write_ptr == q->buffer_end) {
q->write_ptr = q->buffer_start;
}
q->used_msgs++;
/* wake up waiting thread */
_set_thread_return_value(pending_thread, 0);
_timeout_abort(pending_thread);
_ready_thread(pending_thread);
if (_must_switch_threads()) {
_Swap(key);
return 0;
}
}
result = 0;
} else if (timeout == K_NO_WAIT) {
/* don't wait for a message to become available */
result = -ENOMSG;
} else {
/* wait for get message success or timeout */
_pend_current_thread(&q->wait_q, timeout);
_current->swap_data = data;
return _Swap(key);
}
irq_unlock(key);
return result;
}
/**
* @brief Purge contents of a message queue.
*
* Discards all messages currently in the message queue, and cancels
* any "add message" operations initiated by waiting threads.
*
* @param q Pointer to the message queue object.
*
* @return N/A
*/
void k_msgq_purge(struct k_msgq *q)
{
unsigned int key = irq_lock();
if (q->used_msgs) {
/* wake up any threads that are waiting to write */
while (1) {
struct tcs *pending_thread =
_unpend_first_thread(&q->wait_q);
if (pending_thread == NULL) {
break;
}
_set_thread_return_value(pending_thread, -ENOMSG);
_timeout_abort(pending_thread);
_ready_thread(pending_thread);
}
q->used_msgs = 0;
q->read_ptr = q->write_ptr;
if (_must_switch_threads()) {
_Swap(key);
return;
}
} else {
/* queue is empty, so no need to do anything ... */
}
irq_unlock(key);
}

242
kernel/unified/mutex.c Normal file
View file

@ -0,0 +1,242 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file @brief mutex kernel services
*
* This module contains routines for handling mutex locking and unlocking.
*
* Mutexes implement a priority inheritance algorithm that boosts the priority
* level of the owning thread to match the priority level of the highest
* priority thread waiting on the mutex.
*
* Each mutex that contributes to priority inheritance must be released in the
* reverse order in which is was acquired. Furthermore each subsequent mutex
* that contributes to raising the owning thread's priority level must be
* acquired at a point after the most recent "bumping" of the priority level.
*
* For example, if thread A has two mutexes contributing to the raising of its
* priority level, the second mutex M2 must be acquired by thread A after
* thread A's priority level was bumped due to owning the first mutex M1.
* When releasing the mutex, thread A must release M2 before it releases M1.
* Failure to follow this nested model may result in threads running at
* unexpected priority levels (too high, or too low).
*/
#include <kernel.h>
#include <nano_private.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <misc/dlist.h>
#include <errno.h>
#ifdef CONFIG_OBJECT_MONITOR
#define RECORD_STATE_CHANGE(mutex) \
do { (mutex)->num_lock_state_changes++; } while ((0))
#define RECORD_CONFLICT(mutex) \
do { (mutex)->num_conflicts++; } while ((0))
#else
#define RECORD_STATE_CHANGE(mutex) do { } while ((0))
#define RECORD_CONFLICT(mutex) do { } while ((0))
#endif
#ifdef CONFIG_OBJECT_MONITOR
#define INIT_OBJECT_MONITOR(mutex) do { \
mutex->num_lock_state_changes = 0; \
mutex->num_conflicts = 0; \
} while ((0))
#else
#define INIT_OBJECT_MONITOR(mutex) do { } while ((0))
#endif
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
#define INIT_KERNEL_TRACING(mutex) do { \
mutex->__next = NULL; \
} while ((0))
#else
#define INIT_KERNEL_TRACING(mutex) do { } while ((0))
#endif
void k_mutex_init(struct k_mutex *mutex)
{
mutex->owner = NULL;
mutex->lock_count = 0;
/* initialized upon first use */
/* mutex->owner_orig_prio = 0; */
sys_dlist_init(&mutex->wait_q);
INIT_OBJECT_MONITOR(mutex);
INIT_KERNEL_TRACING(mutex);
}
static int new_prio_for_inheritance(int target, int limit)
{
int new_prio = _is_prio_higher(target, limit) ? target : limit;
new_prio = _get_new_prio_with_ceiling(new_prio);
return new_prio;
}
static void adjust_owner_prio(struct k_mutex *mutex, int new_prio)
{
if (mutex->owner->prio != new_prio) {
K_DEBUG("%p (ready (y/n): %c) prio changed to %d (was %d)\n",
mutex->owner, _is_thread_ready(mutex->owner) ?
'y' : 'n',
new_prio, mutex->owner->prio);
_thread_priority_set(mutex->owner, new_prio);
}
}
int k_mutex_lock(struct k_mutex *mutex, int32_t timeout)
{
int new_prio, key;
k_sched_lock();
if (likely(mutex->lock_count == 0 || mutex->owner == _current)) {
RECORD_STATE_CHANGE();
mutex->owner_orig_prio = mutex->lock_count == 0 ?
_current->prio :
mutex->owner_orig_prio;
mutex->lock_count++;
mutex->owner = _current;
K_DEBUG("%p took mutex %p, count: %d, orig prio: %d\n",
_current, mutex, mutex->lock_count,
mutex->owner_orig_prio);
k_sched_unlock();
return 0;
}
RECORD_CONFLICT();
if (unlikely(timeout == K_NO_WAIT)) {
k_sched_unlock();
return -EBUSY;
}
#if 0
if (_is_prio_higher(_current->prio, mutex->owner->prio)) {
new_prio = _current->prio;
}
new_prio = _get_new_prio_with_ceiling(new_prio);
#endif
new_prio = new_prio_for_inheritance(_current->prio, mutex->owner->prio);
key = irq_lock();
K_DEBUG("adjusting prio up on mutex %p\n", mutex);
adjust_owner_prio(mutex, new_prio);
_pend_current_thread(&mutex->wait_q, timeout);
int got_mutex = _Swap(key);
K_DEBUG("on mutex %p got_mutex value: %d\n", mutex, got_mutex);
K_DEBUG("%p got mutex %p (y/n): %c\n", _current, mutex,
got_mutex ? 'y' : 'n');
if (got_mutex == 0) {
k_sched_unlock();
return 0;
}
/* timed out */
K_DEBUG("%p timeout on mutex %p\n", _current, mutex);
struct tcs *waiter = (struct tcs *)sys_dlist_peek_head(&mutex->wait_q);
new_prio = mutex->owner_orig_prio;
new_prio = waiter ? new_prio_for_inheritance(waiter->prio, new_prio) :
new_prio;
K_DEBUG("adjusting prio down on mutex %p\n", mutex);
key = irq_lock();
adjust_owner_prio(mutex, new_prio);
irq_unlock(key);
k_sched_unlock();
return -EAGAIN;
}
void k_mutex_unlock(struct k_mutex *mutex)
{
int key;
__ASSERT(mutex->owner == _current, "");
k_sched_lock();
RECORD_STATE_CHANGE();
mutex->lock_count--;
K_DEBUG("mutex %p lock_count: %d\n", mutex, mutex->lock_count);
if (mutex->lock_count != 0) {
k_sched_unlock();
return;
}
key = irq_lock();
adjust_owner_prio(mutex, mutex->owner_orig_prio);
struct tcs *new_owner = _unpend_first_thread(&mutex->wait_q);
K_DEBUG("new owner of mutex %p: %p (prio: %d)\n",
mutex, new_owner, new_owner ? new_owner->prio : -1000);
if (new_owner) {
_timeout_abort(new_owner);
_ready_thread(new_owner);
irq_unlock(key);
_set_thread_return_value(new_owner, 0);
/*
* new owner is already of higher or equal prio than first
* waiter since the wait queue is priority-based: no need to
* ajust its priority
*/
mutex->owner = new_owner;
mutex->lock_count++;
mutex->owner_orig_prio = new_owner->prio;
} else {
irq_unlock(key);
mutex->owner = NULL;
}
k_sched_unlock();
}

693
kernel/unified/pipes.c Normal file
View file

@ -0,0 +1,693 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* @brief Pipes
*/
#include <kernel.h>
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <misc/dlist.h>
struct k_pipe_desc {
unsigned char *buffer; /* Position in src/dest buffer */
size_t bytes_to_xfer; /* # bytes left to transfer */
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
struct k_mem_block *block; /* Pointer to memory block */
struct k_mem_block copy_block; /* For backwards compatibility */
struct k_sem *sem; /* Semaphore to give if async */
#endif
};
struct k_pipe_async {
struct tcs_base thread; /* Dummy thread object */
struct k_pipe_desc desc; /* Pipe message descriptor */
};
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
/* Array of asynchronous message descriptors */
static struct k_pipe_async __noinit async_msg[CONFIG_NUM_PIPE_ASYNC_MSGS];
/* stack of unused asynchronous message descriptors */
K_STACK_DEFINE(pipe_async_msgs, CONFIG_NUM_PIPE_ASYNC_MSGS);
/**
* @brief Create pool of asynchronous pipe message descriptors
*
* A dummy thread requires minimal initialization since it never gets to
* execute. The K_DUMMY flag is sufficient to distinguish a dummy thread
* from a real one. The dummy threads are *not* added to the kernel's list of
* known threads.
*
* Once initialized, the address of each descriptor is added to a stack that
* governs access to them.
*
* @return N/A
*/
void _k_pipes_init(void)
{
for (int i = 0; i < CONFIG_NUM_PIPE_ASYNC_MSGS; i++) {
async_msg[i].thread.flags = K_DUMMY;
async_msg[i].thread.swap_data = &async_msg[i].desc;
k_stack_push(pipe_async_msgs, (uint32_t)&async_msg[i]);
}
}
void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size)
{
pipe->buffer = buffer;
pipe->size = size;
pipe->bytes_used = 0;
pipe->read_index = 0;
pipe->write_index = 0;
sys_dlist_init(&pipe->wait_q.writers);
sys_dlist_init(&pipe->wait_q.readers);
SYS_TRACING_OBJ_INIT(pipe, pipe);
}
/**
* @brief Allocate an asynchronous message descriptor
*
* @param async Address of area to hold the descriptor pointer
*
* @return N/A
*/
static void _pipe_async_alloc(struct k_pipe_async **async)
{
k_stack_pop(pipe_async_msgs, (uint32_t *)async, K_FOREVER);
}
/**
* @brief Free an asynchronous message descriptor
*
* @param async Descriptor pointer
*
* @return N/A
*/
static void _pipe_async_free(struct k_pipe_async *async)
{
k_stack_push(pipe_async_msgs, (uint32_t)async);
}
/**
* @brief Finish an asynchronous operation
*
* The asynchronous operation is finished with the scheduler locked to prevent
* the called routines from scheduling a new thread.
*
* @return N/A
*/
static void _pipe_async_finish(struct k_pipe_async *async_desc)
{
k_mem_pool_free(async_desc->desc.block);
if (async_desc->desc.sem != NULL) {
k_sem_give(async_desc->desc.sem);
}
_pipe_async_free(async_desc);
}
#endif
/**
* @brief Copy bytes from @a src to @a dest
*
* @return Number of bytes copied
*/
static size_t _pipe_xfer(unsigned char *dest, size_t dest_size,
const unsigned char *src, size_t src_size)
{
size_t num_bytes = min(dest_size, src_size);
const unsigned char *end = src + num_bytes;
while (src != end) {
*dest = *src;
dest++;
src++;
}
return num_bytes;
}
/**
* @brief Put data from @a src into the pipe's circular buffer
*
* Modifies the following fields in @a pipe:
* buffer, bytes_used, write_index
*
* @return Number of bytes written to the pipe's circular buffer
*/
static size_t _pipe_buffer_put(struct k_pipe *pipe,
const unsigned char *src, size_t src_size)
{
size_t bytes_copied;
size_t run_length;
size_t num_bytes_written = 0;
int i;
for (i = 0; i < 2; i++) {
run_length = min(pipe->size - pipe->bytes_used,
pipe->size - pipe->write_index);
bytes_copied = _pipe_xfer(pipe->buffer + pipe->write_index,
run_length,
src + num_bytes_written,
src_size - num_bytes_written);
num_bytes_written += bytes_copied;
pipe->bytes_used += bytes_copied;
pipe->write_index += bytes_copied;
if (pipe->write_index == pipe->size) {
pipe->write_index = 0;
}
}
return num_bytes_written;
}
/**
* @brief Get data from the pipe's circular buffer
*
* Modifies the following fields in @a pipe:
* bytes_used, read_index
*
* @return Number of bytes read from the pipe's circular buffer
*/
static size_t _pipe_buffer_get(struct k_pipe *pipe,
unsigned char *dest, size_t dest_size)
{
size_t bytes_copied;
size_t run_length;
size_t num_bytes_read = 0;
int i;
for (i = 0; i < 2; i++) {
run_length = min(pipe->bytes_used,
pipe->size - pipe->read_index);
bytes_copied = _pipe_xfer(dest + num_bytes_read,
dest_size - num_bytes_read,
pipe->buffer + pipe->read_index,
run_length);
num_bytes_read += bytes_copied;
pipe->bytes_used -= bytes_copied;
pipe->read_index += bytes_copied;
if (pipe->read_index == pipe->size) {
pipe->read_index = 0;
}
}
return num_bytes_read;
}
/**
* @brief Prepare a working set of readers/writers
*
* Prepare a list of "working threads" into/from which the data
* will be directly copied. This list is useful as it is used to ...
*
* 1. avoid double copying
* 2. minimize interrupt latency as interrupts are unlocked
* while copying data
* 3. ensure a timeout can not make the request impossible to satisfy
*
* The list is populated with previously pended threads that will be ready to
* run after the pipe call is complete.
*
* Important things to remember when reading from the pipe ...
* 1. If there are writers int @a wait_q, then the pipe's buffer is full.
* 2. Conversely if the pipe's buffer is not full, there are no writers.
* 3. The amount of available data in the pipe is the sum the bytes used in
* the pipe (@a pipe_space) and all the requests from the waiting writers.
* 4. Since data is read from the pipe's buffer first, the working set must
* include writers that will (try to) re-fill the pipe's buffer afterwards.
*
* Important things to remember when writing to the pipe ...
* 1. If there are readers in @a wait_q, then the pipe's buffer is empty.
* 2. Conversely if the pipe's buffer is not empty, then there are no readers.
* 3. The amount of space available in the pipe is the sum of the bytes unused
* in the pipe (@a pipe_space) and all the requests from the waiting readers.
*
* @return false if request is unsatisfiable, otherwise true
*/
static bool _pipe_xfer_prepare(sys_dlist_t *xfer_list,
struct k_thread **waiter,
_wait_q_t *wait_q,
size_t pipe_space,
size_t bytes_to_xfer,
size_t min_xfer,
int32_t timeout)
{
sys_dnode_t *node;
struct k_thread *thread;
struct k_pipe_desc *desc;
size_t num_bytes = 0;
if (timeout == K_NO_WAIT) {
for (node = sys_dlist_peek_head(wait_q); node != NULL;
node = sys_dlist_peek_next(wait_q, node)) {
thread = (struct k_thread *)node;
desc = (struct k_pipe_desc *)thread->swap_data;
num_bytes += desc->bytes_to_xfer;
if (num_bytes >= bytes_to_xfer) {
break;
}
}
if (num_bytes + pipe_space < min_xfer) {
return false;
}
}
/*
* Either @a timeout is not K_NO_WAIT (so the thread may pend) or
* the entire request can be satisfied. Generate the working list.
*/
sys_dlist_init(xfer_list);
num_bytes = 0;
while ((thread = (struct k_thread *) sys_dlist_peek_head(wait_q))) {
desc = (struct k_pipe_desc *)thread->swap_data;
num_bytes += desc->bytes_to_xfer;
if (num_bytes > bytes_to_xfer) {
/*
* This request can not be fully satisfied.
* Do not remove it from the wait_q.
* Do not abort its timeout (if applicable).
* Do not add it to the transfer list
*/
break;
}
/*
* This request can be fully satisfied.
* Remove it from the wait_q.
* Abort its timeout.
* Add it to the transfer list.
*/
_unpend_thread(thread);
_timeout_abort(thread);
sys_dlist_append(xfer_list, &thread->k_q_node);
}
*waiter = (num_bytes > bytes_to_xfer) ? thread : NULL;
return true;
}
/**
* @brief Determine the correct return code
*
* Bytes Xferred No Wait Wait
* >= Minimum 0 0
* < Minimum -EIO* -EAGAIN
*
* * The "-EIO No Wait" case was already checked when the "working set"
* was created in _pipe_xfer_prepare().
*
* @return See table above
*/
static int _pipe_return_code(size_t min_xfer, size_t bytes_remaining,
size_t bytes_requested)
{
if (bytes_requested - bytes_remaining >= min_xfer) {
/*
* At least the minimum number of requested
* bytes have been transferred.
*/
return 0;
}
return -EAGAIN;
}
/**
* @brief Ready a pipe thread
*
* If the pipe thread is a real thread, then add it to the ready queue.
* If it is a dummy thread, then finish the asynchronous work.
*
* @return N/A
*/
static void _pipe_thread_ready(struct k_thread *thread)
{
unsigned int key;
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
if (thread->flags & K_DUMMY) {
_pipe_async_finish((struct k_pipe_async *)thread);
return;
}
#endif
key = irq_lock();
_ready_thread(thread);
irq_unlock(key);
}
/**
* @brief Internal API used to send data to a pipe
*/
int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
unsigned char *data, size_t bytes_to_write,
size_t *bytes_written, size_t min_xfer,
int32_t timeout)
{
struct k_thread *reader;
struct k_pipe_desc *desc;
sys_dlist_t xfer_list;
unsigned int key;
size_t num_bytes_written = 0;
size_t bytes_copied;
#if (CONFIG_NUM_PIPE_ASYNC_MSGS == 0)
ARG_UNUSED(async_desc);
#endif
key = irq_lock();
/*
* Create a list of "working readers" into which the data will be
* directly copied.
*/
if (!_pipe_xfer_prepare(&xfer_list, &reader, &pipe->wait_q.readers,
pipe->size - pipe->bytes_used, bytes_to_write,
min_xfer, timeout)) {
irq_unlock(key);
*bytes_written = 0;
return -EIO;
}
k_sched_lock();
irq_unlock(key);
/*
* 1. 'xfer_list' currently contains a list of reader threads that can
* have their read requests fulfilled by the current call.
* 2. 'reader' if not NULL points to a thread on the reader wait_q
* that can get some of its requested data.
* 3. Interrupts are unlocked but the scheduler is locked to allow
* ticks to be delivered but no scheduling to occur
* 4. If 'reader' times out while we are copying data, not only do we
* still have a pointer to it, but it can not execute until this call
* is complete so it is still safe to copy data to it.
*/
struct k_thread *thread = (struct k_thread *)
sys_dlist_get(&xfer_list);
while (thread) {
desc = (struct k_pipe_desc *)thread->swap_data;
bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer,
data + num_bytes_written,
bytes_to_write - num_bytes_written);
num_bytes_written += bytes_copied;
desc->buffer += bytes_copied;
desc->bytes_to_xfer -= bytes_copied;
/* The thread's read request has been satisfied. Ready it. */
key = irq_lock();
_ready_thread(thread);
irq_unlock(key);
thread = (struct k_thread *)sys_dlist_get(&xfer_list);
}
/*
* Copy any data to the reader that we left on the wait_q.
* It is possible no data will be copied.
*/
if (reader) {
desc = (struct k_pipe_desc *)reader->swap_data;
bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer,
data + num_bytes_written,
bytes_to_write - num_bytes_written);
num_bytes_written += bytes_copied;
desc->buffer += bytes_copied;
desc->bytes_to_xfer -= bytes_copied;
}
/*
* As much data as possible has been directly copied to any waiting
* readers. Add as much as possible to the pipe's circular buffer.
*/
num_bytes_written +=
_pipe_buffer_put(pipe, data + num_bytes_written,
bytes_to_write - num_bytes_written);
if (num_bytes_written == bytes_to_write) {
*bytes_written = num_bytes_written;
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
if (async_desc != NULL) {
_pipe_async_finish(async_desc);
}
#endif
k_sched_unlock();
return 0;
}
/* Not all data was copied. */
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
if (async_desc != NULL) {
/*
* Lock interrupts and unlock the scheduler before
* manipulating the writers wait_q.
*/
key = irq_lock();
_sched_unlock_no_reschedule();
_pend_thread((struct tcs *) &async_desc->thread,
&pipe->wait_q.writers, K_FOREVER);
_reschedule_threads(key);
return 0;
}
#endif
struct k_pipe_desc pipe_desc;
pipe_desc.buffer = data + num_bytes_written;
pipe_desc.bytes_to_xfer = bytes_to_write - num_bytes_written;
if (timeout != K_NO_WAIT) {
_current->swap_data = &pipe_desc;
/*
* Lock interrupts and unlock the scheduler before
* manipulating the writers wait_q.
*/
key = irq_lock();
_sched_unlock_no_reschedule();
_pend_current_thread(&pipe->wait_q.writers, timeout);
_Swap(key);
} else {
k_sched_unlock();
}
*bytes_written = bytes_to_write - pipe_desc.bytes_to_xfer;
return _pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer,
bytes_to_write);
}
int k_pipe_get(struct k_pipe *pipe, void *buffer, size_t bytes_to_read,
size_t *bytes_read, size_t min_xfer, int32_t timeout)
{
struct k_thread *writer;
struct k_pipe_desc *desc;
sys_dlist_t xfer_list;
unsigned int key;
size_t num_bytes_read = 0;
size_t bytes_copied;
__ASSERT(min_xfer <= bytes_to_read, "");
__ASSERT(bytes_read != NULL, "");
key = irq_lock();
/*
* Create a list of "working readers" into which the data will be
* directly copied.
*/
if (!_pipe_xfer_prepare(&xfer_list, &writer, &pipe->wait_q.writers,
pipe->bytes_used, bytes_to_read,
min_xfer, timeout)) {
irq_unlock(key);
*bytes_read = 0;
return -EIO;
}
k_sched_lock();
irq_unlock(key);
num_bytes_read = _pipe_buffer_get(pipe, buffer, bytes_to_read);
/*
* 1. 'xfer_list' currently contains a list of writer threads that can
* have their write requests fulfilled by the current call.
* 2. 'writer' if not NULL points to a thread on the writer wait_q
* that can post some of its requested data.
* 3. Data will be copied from each writer's buffer to either the
* reader's buffer and/or to the pipe's circular buffer.
* 4. Interrupts are unlocked but the scheduler is locked to allow
* ticks to be delivered but no scheduling to occur
* 5. If 'writer' times out while we are copying data, not only do we
* still have a pointer to it, but it can not execute until this
* call is complete so it is still safe to copy data from it.
*/
struct k_thread *thread = (struct k_thread *)
sys_dlist_get(&xfer_list);
while (thread && (num_bytes_read < bytes_to_read)) {
desc = (struct k_pipe_desc *)thread->swap_data;
bytes_copied = _pipe_xfer(buffer + num_bytes_read,
bytes_to_read - num_bytes_read,
desc->buffer, desc->bytes_to_xfer);
num_bytes_read += bytes_copied;
desc->buffer += bytes_copied;
desc->bytes_to_xfer -= bytes_copied;
/*
* It is expected that the write request will be satisfied.
* However, if the read request was satisfied before the
* write request was satisfied, then the write request must
* finish later when writing to the pipe's circular buffer.
*/
if (num_bytes_read == bytes_to_read) {
break;
}
_pipe_thread_ready(thread);
thread = (struct k_thread *)sys_dlist_get(&xfer_list);
}
if (writer && (num_bytes_read < bytes_to_read)) {
desc = (struct k_pipe_desc *)writer->swap_data;
bytes_copied = _pipe_xfer(buffer + num_bytes_read,
bytes_to_read - num_bytes_read,
desc->buffer, desc->bytes_to_xfer);
num_bytes_read += bytes_copied;
desc->buffer += bytes_copied;
desc->bytes_to_xfer -= bytes_copied;
}
/*
* Copy as much data as possible from the writers (if any)
* into the pipe's circular buffer.
*/
while (thread) {
desc = (struct k_pipe_desc *)thread->swap_data;
bytes_copied = _pipe_buffer_put(pipe, desc->buffer,
desc->bytes_to_xfer);
desc->buffer += bytes_copied;
desc->bytes_to_xfer -= bytes_copied;
/* Write request has been satsified */
_pipe_thread_ready(thread);
thread = (struct k_thread *)sys_dlist_get(&xfer_list);
}
if (writer) {
desc = (struct k_pipe_desc *)writer->swap_data;
bytes_copied = _pipe_buffer_put(pipe, desc->buffer,
desc->bytes_to_xfer);
desc->buffer += bytes_copied;
desc->bytes_to_xfer -= bytes_copied;
}
if (num_bytes_read == bytes_to_read) {
k_sched_unlock();
*bytes_read = num_bytes_read;
return 0;
}
/* Not all data was read. */
struct k_pipe_desc pipe_desc;
pipe_desc.buffer = buffer + num_bytes_read;
pipe_desc.bytes_to_xfer = bytes_to_read - num_bytes_read;
if (timeout != K_NO_WAIT) {
_current->swap_data = &pipe_desc;
key = irq_lock();
_sched_unlock_no_reschedule();
_pend_current_thread(&pipe->wait_q.readers, timeout);
_Swap(key);
} else {
k_sched_unlock();
}
*bytes_read = bytes_to_read - pipe_desc.bytes_to_xfer;
return _pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer,
bytes_to_read);
}
int k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write,
size_t *bytes_written, size_t min_xfer, int32_t timeout)
{
__ASSERT(min_xfer <= bytes_to_write, "");
__ASSERT(bytes_written != NULL, "");
return _k_pipe_put_internal(pipe, NULL, data,
bytes_to_write, bytes_written,
min_xfer, timeout);
}
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
void k_pipe_block_put(struct k_pipe *pipe, struct k_mem_block *block,
size_t bytes_to_write, struct k_sem *sem)
{
struct k_pipe_async *async_desc;
size_t dummy_bytes_written;
/* For simplicity, always allocate an asynchronous descriptor */
_pipe_async_alloc(&async_desc);
async_desc->desc.block = &async_desc->desc.copy_block;
async_desc->desc.copy_block = *block;
async_desc->desc.sem = sem;
async_desc->thread.prio = k_current_priority_get();
(void) _k_pipe_put_internal(pipe, async_desc, block->data,
block->req_size, &dummy_bytes_written,
block->req_size, K_FOREVER);
}
#endif

View file

@ -0,0 +1 @@
#include "../nanokernel/ring_buffer.c"

282
kernel/unified/sched.c Normal file
View file

@ -0,0 +1,282 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <kernel.h>
#include <nano_private.h>
#include <atomic.h>
#include <sched.h>
#include <wait_q.h>
/* set the bit corresponding to prio in ready q bitmap */
static void _set_ready_q_prio_bit(int prio)
{
int bmap_index = _get_ready_q_prio_bmap_index(prio);
uint32_t *bmap = &_nanokernel.ready_q.prio_bmap[bmap_index];
*bmap |= _get_ready_q_prio_bit(prio);
}
/* clear the bit corresponding to prio in ready q bitmap */
static void _clear_ready_q_prio_bit(int prio)
{
int bmap_index = _get_ready_q_prio_bmap_index(prio);
uint32_t *bmap = &_nanokernel.ready_q.prio_bmap[bmap_index];
*bmap &= ~_get_ready_q_prio_bit(prio);
}
/*
* Add thread to the ready queue, in the slot for its priority; the thread
* must not be on a wait queue.
*/
void _add_thread_to_ready_q(struct tcs *thread)
{
int q_index = _get_ready_q_q_index(thread->prio);
sys_dlist_t *q = &_nanokernel.ready_q.q[q_index];
_set_ready_q_prio_bit(thread->prio);
sys_dlist_append(q, &thread->k_q_node);
}
/* remove thread from the ready queue */
void _remove_thread_from_ready_q(struct tcs *thread)
{
int q_index = _get_ready_q_q_index(thread->prio);
sys_dlist_t *q = &_nanokernel.ready_q.q[q_index];
sys_dlist_remove(&thread->k_q_node);
if (sys_dlist_is_empty(q)) {
_clear_ready_q_prio_bit(thread->prio);
}
}
/* reschedule threads if the scheduler is not locked */
/* not callable from ISR */
/* must be called with interrupts locked */
void _reschedule_threads(int key)
{
K_DEBUG("rescheduling threads\n");
if (unlikely(_nanokernel.current->sched_locked > 0)) {
K_DEBUG("aborted: scheduler was locked\n");
irq_unlock(key);
return;
}
if (_must_switch_threads()) {
K_DEBUG("context-switching out %p\n", _current);
_Swap(key);
} else {
irq_unlock(key);
}
}
/* application API: lock the scheduler */
void k_sched_unlock(void)
{
__ASSERT(_nanokernel.current->sched_locked > 0, "");
__ASSERT(!_is_in_isr(), "");
int key = irq_lock();
atomic_dec(&_nanokernel.current->sched_locked);
K_DEBUG("scheduler unlocked (%p:%d)\n",
_current, _current->sched_locked);
_reschedule_threads(key);
}
/*
* Callback for sys_dlist_insert_at() to find the correct insert point in a
* wait queue (priority-based).
*/
static int _is_wait_q_insert_point(sys_dnode_t *dnode_info, void *insert_prio)
{
struct tcs *waitq_node = CONTAINER_OF(dnode_info, struct tcs, k_q_node);
return _is_prio_higher((int)insert_prio, waitq_node->prio);
}
/* convert milliseconds to ticks */
#define ceiling(numerator, divider) \
(((numerator) + ((divider) - 1)) / (divider))
int32_t _ms_to_ticks(int32_t ms)
{
int64_t ms_ticks_per_sec = (int64_t)ms * sys_clock_ticks_per_sec;
return (int32_t)ceiling(ms_ticks_per_sec, MSEC_PER_SEC);
}
/* pend the specified thread: it must *not* be in the ready queue */
/* must be called with interrupts locked */
void _pend_thread(struct tcs *thread, _wait_q_t *wait_q, int32_t timeout)
{
sys_dlist_t *dlist = (sys_dlist_t *)wait_q;
sys_dlist_insert_at(dlist, &thread->k_q_node,
_is_wait_q_insert_point, (void *)thread->prio);
_mark_thread_as_pending(thread);
if (timeout != K_FOREVER) {
_mark_thread_as_timing(thread);
_TIMEOUT_ADD(thread, wait_q, _ms_to_ticks(timeout));
}
}
/* pend the current thread */
/* must be called with interrupts locked */
void _pend_current_thread(_wait_q_t *wait_q, int32_t timeout)
{
_remove_thread_from_ready_q(_current);
_pend_thread(_current, wait_q, timeout);
}
/* find which one is the next thread to run */
/* must be called with interrupts locked */
struct tcs *_get_next_ready_thread(void)
{
int prio = _get_highest_ready_prio();
int q_index = _get_ready_q_q_index(prio);
sys_dlist_t *list = &_nanokernel.ready_q.q[q_index];
struct k_thread *thread = (struct k_thread *)sys_dlist_peek_head(list);
__ASSERT(thread, "no thread to run (prio: %d, queue index: %u)!\n",
prio, q_index);
return thread;
}
/*
* Check if there is a thread of higher prio than the current one. Should only
* be called if we already know that the current thread is preemptible.
*/
int __must_switch_threads(void)
{
K_DEBUG("current prio: %d, highest prio: %d\n",
_current->prio, _get_highest_ready_prio());
extern void _dump_ready_q(void);
_dump_ready_q();
return _is_prio_higher(_get_highest_ready_prio(), _current->prio);
}
/* application API: change a thread's priority. Not callable from ISR */
void k_thread_priority_set(struct tcs *thread, int prio)
{
__ASSERT(!_is_in_isr(), "");
int key = irq_lock();
_thread_priority_set(thread, prio);
_reschedule_threads(key);
}
/* application API: find out the priority of the current thread */
int k_current_priority_get(void)
{
return k_thread_priority_get(_current);
}
/*
* application API: the current thread yields control to threads of higher or
* equal priorities. This is done by remove the thread from the ready queue,
* putting it back at the end of its priority's list and invoking the
* scheduler.
*/
void k_yield(void)
{
__ASSERT(!_is_in_isr(), "");
int key = irq_lock();
_remove_thread_from_ready_q(_current);
_add_thread_to_ready_q(_current);
if (_current == _get_next_ready_thread()) {
irq_unlock(key);
} else {
_Swap(key);
}
}
/* application API: put the current thread to sleep */
void k_sleep(int32_t duration)
{
__ASSERT(!_is_in_isr(), "");
K_DEBUG("thread %p for %d ns\n", _current, duration);
/* wait of 0 ns is treated as a 'yield' */
if (duration == 0) {
k_yield();
return;
}
int key = irq_lock();
_mark_thread_as_timing(_current);
_remove_thread_from_ready_q(_current);
_timeout_add(_current, NULL, _ms_to_ticks(duration));
_Swap(key);
}
/* application API: wakeup a sleeping thread */
void k_wakeup(k_tid_t thread)
{
int key = irq_lock();
/* verify first if thread is not waiting on an object */
if (thread->timeout.wait_q) {
irq_unlock(key);
return;
}
if (_timeout_abort(thread) < 0) {
irq_unlock(key);
return;
}
_ready_thread(thread);
if (_is_in_isr()) {
irq_unlock(key);
} else {
_reschedule_threads(key);
}
}
/* application API: get current thread ID */
k_tid_t k_current_get(void)
{
return _current;
}
/* debug aid */
void _dump_ready_q(void)
{
K_DEBUG("bitmap: %x\n", _ready_q.prio_bmap[0]);
for (int prio = 0; prio < K_NUM_PRIORITIES; prio++) {
K_DEBUG("prio: %d, head: %p\n",
prio - CONFIG_NUM_COOP_PRIORITIES,
sys_dlist_peek_head(&_ready_q.q[prio]));
}
}

93
kernel/unified/sem.c Normal file
View file

@ -0,0 +1,93 @@
/*
* Copyright (c) 2010-2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* @brief Nanokernel semaphore object.
*
* The semaphores are of the 'counting' type, i.e. each 'give' operation will
* increment the internal count by 1, if no fiber is pending on it. The 'init'
* call initializes the count to 0. Following multiple 'give' operations, the
* same number of 'take' operations can be performed without the calling fiber
* having to pend on the semaphore, or the calling task having to poll.
*/
#include <kernel.h>
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <misc/dlist.h>
#include <sched.h>
void k_sem_init(struct k_sem *sem, unsigned int initial_count,
unsigned int limit)
{
__ASSERT(limit != 0, "limit cannot be zero");
sem->count = initial_count;
sem->limit = limit;
sys_dlist_init(&sem->wait_q);
SYS_TRACING_OBJ_INIT(nano_sem, sem);
}
void k_sem_give(struct k_sem *sem)
{
int key = irq_lock();
struct tcs *first_pending_thread = _unpend_first_thread(&sem->wait_q);
if (first_pending_thread) {
_timeout_abort(first_pending_thread);
_ready_thread(first_pending_thread);
_set_thread_return_value(first_pending_thread, 0);
if (!_is_in_isr() && _must_switch_threads()) {
_Swap(key);
return;
}
} else {
if (likely(sem->count != sem->limit)) {
sem->count++;
}
}
irq_unlock(key);
}
int k_sem_take(struct k_sem *sem, int32_t timeout)
{
__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");
unsigned int key = irq_lock();
if (likely(sem->count > 0)) {
sem->count--;
irq_unlock(key);
return 0;
}
if (timeout == K_NO_WAIT) {
irq_unlock(key);
return -EBUSY;
}
_pend_current_thread(&sem->wait_q, timeout);
return _Swap(key);
}

101
kernel/unified/stack.c Normal file
View file

@ -0,0 +1,101 @@
/*
* Copyright (c) 2010-2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @brief fixed-size stack object
*/
#include <kernel.h>
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <sched.h>
#include <wait_q.h>
#include <misc/__assert.h>
void k_stack_init_with_buffer(struct k_stack *stack, int num_entries,
uint32_t *buffer)
{
sys_dlist_init(&stack->wait_q);
stack->next = stack->base = buffer;
stack->top = stack->base + num_entries;
SYS_TRACING_OBJ_INIT(k_stack, stack);
}
void k_stack_init(struct k_stack *stack, int num_entries)
{
k_stack_init_with_buffer(stack, num_entries, (uint32_t *)(stack + 1));
}
void k_stack_push(struct k_stack *stack, uint32_t data)
{
struct k_thread *first_pending_thread;
unsigned int key;
__ASSERT(stack->next != stack->top, "stack is full");
key = irq_lock();
first_pending_thread = _unpend_first_thread(&stack->wait_q);
if (first_pending_thread) {
_timeout_abort(first_pending_thread);
_ready_thread(first_pending_thread);
_set_thread_return_value_with_data(first_pending_thread,
0, (void *)data);
if (!_is_in_isr() && _must_switch_threads()) {
(void)_Swap(key);
return;
}
} else {
*(stack->next) = data;
stack->next++;
}
irq_unlock(key);
}
int k_stack_pop(struct k_stack *stack, uint32_t *data, int32_t timeout)
{
unsigned int key;
int result;
key = irq_lock();
if (likely(stack->next > stack->base)) {
stack->next--;
*data = *(stack->next);
irq_unlock(key);
return 0;
}
if (timeout == K_NO_WAIT) {
irq_unlock(key);
return -EBUSY;
}
_pend_current_thread(&stack->wait_q, timeout);
result = _Swap(key);
if (result == 0) {
*data = (uint32_t)_current->swap_data;
}
return result;
}

201
kernel/unified/sys_clock.c Normal file
View file

@ -0,0 +1,201 @@
/* system clock support for nanokernel-only systems */
/*
* Copyright (c) 1997-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nano_private.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <drivers/system_timer.h>
#ifdef CONFIG_SYS_CLOCK_EXISTS
int sys_clock_us_per_tick = 1000000 / sys_clock_ticks_per_sec;
int sys_clock_hw_cycles_per_tick =
CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / sys_clock_ticks_per_sec;
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
int sys_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
#endif
#else
/* don't initialize to avoid division-by-zero error */
int sys_clock_us_per_tick;
int sys_clock_hw_cycles_per_tick;
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
int sys_clock_hw_cycles_per_sec;
#endif
#endif
/* updated by timer driver for tickless, stays at 1 for non-tickless */
int32_t _sys_idle_elapsed_ticks = 1;
int64_t _sys_clock_tick_count;
/**
*
* @brief Return the lower part of the current system tick count
*
* @return the current system tick count
*
*/
uint32_t sys_tick_get_32(void)
{
return (uint32_t)_sys_clock_tick_count;
}
/**
*
* @brief Return the current system tick count
*
* @return the current system tick count
*
*/
int64_t sys_tick_get(void)
{
int64_t tmp_sys_clock_tick_count;
/*
* Lock the interrupts when reading _sys_clock_tick_count 64-bit
* variable. Some architectures (x86) do not handle 64-bit atomically,
* so we have to lock the timer interrupt that causes change of
* _sys_clock_tick_count
*/
unsigned int imask = irq_lock();
tmp_sys_clock_tick_count = _sys_clock_tick_count;
irq_unlock(imask);
return tmp_sys_clock_tick_count;
}
/**
*
* @brief Return number of ticks since a reference time
*
* This function is meant to be used in contained fragments of code. The first
* call to it in a particular code fragment fills in a reference time variable
* which then gets passed and updated every time the function is called. From
* the second call on, the delta between the value passed to it and the current
* tick count is the return value. Since the first call is meant to only fill in
* the reference time, its return value should be discarded.
*
* Since a code fragment that wants to use sys_tick_delta() passes in its
* own reference time variable, multiple code fragments can make use of this
* function concurrently.
*
* e.g.
* uint64_t reftime;
* (void) sys_tick_delta(&reftime); /# prime it #/
* [do stuff]
* x = sys_tick_delta(&reftime); /# how long since priming #/
* [do more stuff]
* y = sys_tick_delta(&reftime); /# how long since [do stuff] #/
*
* @return tick count since reference time; undefined for first invocation
*
* NOTE: We use inline function for both 64-bit and 32-bit functions.
* Compiler optimizes out 64-bit result handling in 32-bit version.
*/
static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
{
int64_t delta;
int64_t saved;
/*
* Lock the interrupts when reading _sys_clock_tick_count 64-bit
* variable. Some architectures (x86) do not handle 64-bit atomically,
* so we have to lock the timer interrupt that causes change of
* _sys_clock_tick_count
*/
unsigned int imask = irq_lock();
saved = _sys_clock_tick_count;
irq_unlock(imask);
delta = saved - (*reftime);
*reftime = saved;
return delta;
}
/**
*
* @brief Return number of ticks since a reference time
*
* @return tick count since reference time; undefined for first invocation
*/
int64_t sys_tick_delta(int64_t *reftime)
{
return _nano_tick_delta(reftime);
}
uint32_t sys_tick_delta_32(int64_t *reftime)
{
return (uint32_t)_nano_tick_delta(reftime);
}
/* handle the expired timeouts in the nano timeout queue */
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
#include <wait_q.h>
static inline void handle_expired_timeouts(int32_t ticks)
{
struct _timeout *head =
(struct _timeout *)sys_dlist_peek_head(&_timeout_q);
_nanokernel.task_timeout = TICKS_UNLIMITED;
K_DEBUG("head: %p, delta: %d\n",
head, head ? head->delta_ticks_from_prev : -2112);
if (head) {
head->delta_ticks_from_prev -= ticks;
_timeout_handle_timeouts();
}
}
#else
#define handle_expired_timeouts(ticks) do { } while ((0))
#endif
/**
*
* @brief Announce a tick to the nanokernel
*
* This function is only to be called by the system clock timer driver when a
* tick is to be announced to the nanokernel. It takes care of dequeuing the
* timers that have expired and wake up the fibers pending on them.
*
* @return N/A
*/
void _nano_sys_clock_tick_announce(int32_t ticks)
{
unsigned int key;
K_DEBUG("ticks: %d\n", ticks);
key = irq_lock();
_sys_clock_tick_count += ticks;
handle_expired_timeouts(ticks);
irq_unlock(key);
}
/*
* Get closest nano timeouts/timers deadline expiry, (uint32_t)TICKS_UNLIMITED
* if none.
*/
uint32_t _nano_get_earliest_deadline(void)
{
return _nano_get_earliest_timeouts_deadline();
}

459
kernel/unified/thread.c Normal file
View file

@ -0,0 +1,459 @@
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Nanokernel thread support
*
* This module provides general purpose thread support, with applies to both
* tasks or fibers.
*/
#include <kernel.h>
#include <toolchain.h>
#include <sections.h>
#include <nano_private.h>
#include <misc/printk.h>
#include <sys_clock.h>
#include <drivers/system_timer.h>
#include <sched.h>
#include <wait_q.h>
/* Legacy API */
int sys_execution_context_type_get(void)
{
if (k_am_in_isr())
return NANO_CTX_ISR;
if (_current->prio < 0)
return NANO_CTX_FIBER;
return NANO_CTX_TASK;
}
/**
*
* @brief Determine if code is running at interrupt level
*
* @return 0 if invoked by a thread, or non-zero if invoked by an ISR
*/
int k_am_in_isr(void)
{
return _IS_IN_ISR();
}
/**
*
* @brief Mark thread as essential to system
*
* This function tags the running fiber or task as essential to system
* operation; exceptions raised by this thread will be treated as a fatal
* system error.
*
* @return N/A
*/
void _thread_essential_set(void)
{
_current->flags |= ESSENTIAL;
}
/**
*
* @brief Mark thread as not essential to system
*
* This function tags the running fiber or task as not essential to system
* operation; exceptions raised by this thread may be recoverable.
* (This is the default tag for a thread.)
*
* @return N/A
*/
void _thread_essential_clear(void)
{
_current->flags &= ~ESSENTIAL;
}
/**
*
* @brief Is the specified thread essential?
*
* This routine indicates if the running fiber or task is an essential system
* thread.
*
* @return Non-zero if current thread is essential, zero if it is not
*/
int _is_thread_essential(void)
{
return _current->flags & ESSENTIAL;
}
void k_busy_wait(uint32_t usec_to_wait)
{
/* use 64-bit math to prevent overflow when multiplying */
uint32_t cycles_to_wait = (uint32_t)(
(uint64_t)usec_to_wait *
(uint64_t)sys_clock_hw_cycles_per_sec /
(uint64_t)USEC_PER_SEC
);
uint32_t start_cycles = k_cycle_get_32();
for (;;) {
uint32_t current_cycles = k_cycle_get_32();
/* this handles the rollover on an unsigned 32-bit value */
if ((current_cycles - start_cycles) >= cycles_to_wait) {
break;
}
}
}
#ifdef CONFIG_THREAD_CUSTOM_DATA
/**
*
* @brief Set thread's custom data
*
* This routine sets the custom data value for the current task or fiber.
* Custom data is not used by the kernel itself, and is freely available
* for the thread to use as it sees fit.
*
* @param value New to set the thread's custom data to.
*
* @return N/A
*/
void k_thread_custom_data_set(void *value)
{
_current->custom_data = value;
}
/**
*
* @brief Get thread's custom data
*
* This function returns the custom data value for the current task or fiber.
*
* @return current handle value
*/
void *k_thread_custom_data_get(void)
{
return _current->custom_data;
}
#endif /* CONFIG_THREAD_CUSTOM_DATA */
#if defined(CONFIG_THREAD_MONITOR)
/**
*
* @brief Thread exit routine
*
* This function is invoked when the specified thread is aborted, either
* normally or abnormally. It is called for the termination of any thread,
* (fibers and tasks).
*
* This routine must be invoked either from a fiber or from a task with
* interrupts locked to guarantee that the list of threads does not change in
* mid-operation. It cannot be called from ISR context.
*
* @return N/A
*/
void _thread_exit(struct tcs *thread)
{
/*
* Remove thread from the list of threads. This singly linked list of
* threads maintains ALL the threads in the system: both tasks and
* fibers regardless of whether they are runnable.
*/
if (thread == _nanokernel.threads) {
_nanokernel.threads = _nanokernel.threads->next_thread;
} else {
struct tcs *prev_thread;
prev_thread = _nanokernel.threads;
while (thread != prev_thread->next_thread) {
prev_thread = prev_thread->next_thread;
}
prev_thread->next_thread = thread->next_thread;
}
}
#endif /* CONFIG_THREAD_MONITOR */
/**
*
* @brief Common thread entry point function
*
* This function serves as the entry point for _all_ threads, i.e. both
* task and fibers are instantiated such that initial execution starts
* here.
*
* This routine invokes the actual task or fiber entry point function and
* passes it three arguments. It also handles graceful termination of the
* task or fiber if the entry point function ever returns.
*
* @param pEntry address of the app entry point function
* @param parameter1 1st arg to the app entry point function
* @param parameter2 2nd arg to the app entry point function
* @param parameter3 3rd arg to the app entry point function
*
* @internal
* The 'noreturn' attribute is applied to this function so that the compiler
* can dispense with generating the usual preamble that is only required for
* functions that actually return.
*
* @return Does not return
*
*/
FUNC_NORETURN void _thread_entry(void (*entry)(void *, void *, void *),
void *p1, void *p2, void *p3)
{
entry(p1, p2, p3);
if (_is_thread_essential()) {
_NanoFatalErrorHandler(_NANO_ERR_INVALID_TASK_EXIT,
&_default_esf);
}
k_thread_abort(_current);
/*
* Compiler can't tell that fiber_abort() won't return and issues a
* warning unless we explicitly tell it that control never gets this
* far.
*/
CODE_UNREACHABLE;
}
static void start_thread(struct tcs *thread)
{
int key = irq_lock(); /* protect kernel queues */
_mark_thread_as_started(thread);
if (_is_thread_ready(thread)) {
_add_thread_to_ready_q(thread);
if (_must_switch_threads()) {
_Swap(key);
return;
}
}
irq_unlock(key);
}
static void schedule_new_thread(struct k_thread *thread, int32_t delay)
{
#ifdef CONFIG_NANO_TIMEOUTS
if (delay == 0) {
start_thread(thread);
} else {
_mark_thread_as_timing(thread);
_timeout_add(thread, NULL, _ms_to_ticks(delay));
}
#else
ARG_UNUSED(delay);
start_thread(thread);
#endif
}
k_tid_t k_thread_spawn(char *stack, unsigned stack_size,
void (*entry)(void *, void *, void*),
void *p1, void *p2, void *p3,
int32_t prio, uint32_t options, int32_t delay)
{
__ASSERT(!_is_in_isr(), "");
struct tcs *new_thread = (struct tcs *)stack;
_new_thread(stack, stack_size, NULL, entry, p1, p2, p3, prio, options);
schedule_new_thread(new_thread, delay);
return new_thread;
}
int k_thread_cancel(k_tid_t tid)
{
struct tcs *thread = tid;
int key = irq_lock();
if (_has_thread_started(thread) || !_is_thread_timing(thread)) {
irq_unlock(key);
return -EINVAL;
}
_timeout_abort(thread);
_thread_exit(thread);
irq_unlock(key);
return 0;
}
void _k_thread_group_op(uint32_t groups, void (*func)(struct tcs *))
{
unsigned int key;
__ASSERT(!_is_in_isr(), "");
k_sched_lock();
/* Invoke func() on each static thread in the specified group set. */
_FOREACH_STATIC_THREAD(thread_init) {
if (is_in_any_group(thread_init, groups)) {
key = irq_lock();
func(thread_init->thread);
irq_unlock(key);
}
}
/*
* If the current thread is still in a ready state, then let the
* "unlock scheduler" code determine if any rescheduling is needed.
*/
if (_is_thread_ready(_current)) {
k_sched_unlock();
return;
}
/* The current thread is no longer in a ready state--reschedule. */
key = irq_lock();
_sched_unlock_no_reschedule();
_Swap(key);
}
void _k_thread_single_start(struct tcs *thread)
{
_mark_thread_as_started(thread);
if (_is_thread_ready(thread)) {
_add_thread_to_ready_q(thread);
}
}
void _k_thread_single_suspend(struct tcs *thread)
{
if (_is_thread_ready(thread)) {
_remove_thread_from_ready_q(thread);
}
_mark_thread_as_suspended(thread);
}
void k_thread_suspend(struct tcs *thread)
{
unsigned int key = irq_lock();
_k_thread_single_suspend(thread);
if (thread == _current) {
_Swap(key);
} else {
irq_unlock(key);
}
}
void _k_thread_single_resume(struct tcs *thread)
{
_mark_thread_as_not_suspended(thread);
if (_is_thread_ready(thread)) {
_add_thread_to_ready_q(thread);
}
}
void k_thread_resume(struct tcs *thread)
{
unsigned int key = irq_lock();
_k_thread_single_resume(thread);
_reschedule_threads(key);
}
void _k_thread_single_abort(struct tcs *thread)
{
if (thread->fn_abort != NULL) {
thread->fn_abort();
}
if (_is_thread_ready(thread)) {
_remove_thread_from_ready_q(thread);
} else {
if (_is_thread_pending(thread)) {
_unpend_thread(thread);
}
if (_is_thread_timing(thread)) {
_timeout_abort(thread);
_mark_thread_as_not_timing(thread);
}
}
_mark_thread_as_dead(thread);
}
void _init_static_threads(void)
{
_FOREACH_STATIC_THREAD(thread_init) {
_new_thread(
thread_init->init_stack,
thread_init->init_stack_size,
NULL,
thread_init->init_entry,
thread_init->init_p1,
thread_init->init_p2,
thread_init->init_p3,
thread_init->init_prio,
0);
thread_init->thread->init_data = thread_init;
}
_k_thread_group_op(K_THREAD_GROUP_EXE, _k_thread_single_start);
}
uint32_t _k_thread_group_mask_get(struct tcs *thread)
{
struct k_thread_static_init *thread_init = thread->init_data;
return thread_init->init_groups;
}
void _k_thread_group_join(uint32_t groups, struct tcs *thread)
{
struct k_thread_static_init *thread_init = thread->init_data;
thread_init->init_groups |= groups;
}
void _k_thread_group_leave(uint32_t groups, struct tcs *thread)
{
struct k_thread_static_init *thread_init = thread->init_data;
thread_init->init_groups &= groups;
}
/* legacy API */
void task_start(ktask_t task)
{
int key = irq_lock();
_k_thread_single_start(task);
_reschedule_threads(key);
}

View file

@ -0,0 +1,56 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Primitive for aborting a thread when an arch-specific one is not
* needed..
*/
#include <kernel.h>
#include <nano_private.h>
#include <nano_internal.h>
#include <string.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
#include <sched.h>
extern void _k_thread_single_abort(struct tcs *thread);
#if !defined(CONFIG_ARCH_HAS_NANO_FIBER_ABORT)
void k_thread_abort(k_tid_t thread)
{
unsigned int key;
key = irq_lock();
_k_thread_single_abort(thread);
if (_current == thread) {
_Swap(key);
CODE_UNREACHABLE;
}
/* The abort handler might have altered the ready queue. */
_reschedule_threads(key);
}
#endif
void k_thread_abort_handler_set(void (*func)(void))
{
_current->fn_abort = func;
}

326
kernel/unified/timer.c Normal file
View file

@ -0,0 +1,326 @@
/*
* Copyright (c) 1997-2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <wait_q.h>
#include <sched.h>
/**
* @brief Timer expire handler
*
* @param t Internal timeout structure
*
* @return N/A
*/
void timer_expiration_handler(struct _timeout *t)
{
int key = irq_lock();
struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
struct tcs *first_pending_thread = _unpend_first_thread(&timer->wait_q);
/* if the time is periodic, start it again */
if (timer->period > 0) {
_do_timeout_add(NULL, &timer->timeout, &timer->wait_q,
timer->period);
}
/* once timer is expired, it can return valid user data pointer */
timer->user_data = timer->user_data_internal;
/* resume thread waiting on the timer */
if (first_pending_thread) {
_ready_thread(first_pending_thread);
_set_thread_return_value(first_pending_thread, 0);
/*
* Since the routine is called from timer interrupt handler
* _Swap() is not invoked
*/
}
if (timer->handler) {
timer->handler(timer->handler_arg);
}
irq_unlock(key);
}
/**
* @brief Initialize timer structure
*
* Routine initializes timer structure parameters and assigns the user
* supplied data.
* Routine needs to be called before timer is used
*
* @param timer Pointer to the timer structure to be initialized
* @param data Pointer to user supplied data
*
* @return N/A
*/
void k_timer_init(struct k_timer *timer, void *data)
{
timer->user_data = NULL;
timer->user_data_internal = data;
timer->period = 0;
sys_dlist_init(&timer->wait_q);
_timeout_init(&timer->timeout, timer_expiration_handler);
SYS_TRACING_OBJ_INIT(micro_timer, timer);
}
#if (CONFIG_NUM_DYNAMIC_TIMERS > 0)
static struct k_timer _dynamic_timers[CONFIG_NUM_DYNAMIC_TIMERS];
static sys_dlist_t _timer_pool;
/* Initialize the pool of timers for dynamic timer allocation */
void _k_dyamic_timer_init(void)
{
int i;
int n_timers = ARRAY_SIZE(_dynamic_timers);
sys_dlist_init(&_timer_pool);
for (i = 0; i < n_timers; i++) {
k_timer_init(&_dynamic_timers[i], NULL);
sys_dlist_append(&_timer_pool,
&_dynamic_timers[i].timeout.node);
}
}
/**
* @brief Allocate timer
*
* Allocates a new timer timer.
*
* @return pointer to the new timer structure
*/
struct k_timer *k_timer_alloc(void)
{
k_sched_lock();
/*
* This conversion works only if timeout member
* variable is the first in time structure.
*/
struct k_timer *timer = (struct k_timer *)sys_dlist_get(&_timer_pool);
k_sched_unlock();
return timer;
}
/**
* @brief Deallocate timer
*
* Deallocates timer and inserts it into the timer queue.
* @param timer Timer to free
*
* @return N/A
*/
void k_timer_free(struct k_timer *timer)
{
k_timer_stop(timer);
k_sched_lock();
sys_dlist_append(&_timer_pool, &timer->timeout.node);
k_sched_unlock();
}
/**
*
* @brief Check if the timer pool is empty
*
* @return true if the timer pool is empty, false otherwise
*/
bool k_timer_pool_is_empty(void)
{
k_sched_lock();
bool is_empty = sys_dlist_is_empty(&_timer_pool);
k_sched_unlock();
return is_empty;
}
#endif /* (CONFIG_NUM_DYNAMIC_TIMERS > 0) */
/**
*
* @brief Start timer
*
* @param timer Timer structure
* @param duration Initial timer duration (ns)
* @param period Timer period (ns)
* @param sem Semaphore to signal timer expiration
*
* @return N/A
*/
void k_timer_start(struct k_timer *timer, int32_t duration, int32_t period,
void (*handler)(void *), void *handler_arg,
void (*stop_handler)(void *), void *stop_handler_arg)
{
__ASSERT(duration >= 0 && period >= 0 &&
(duration != 0 || period != 0), "invalid parameters\n");
unsigned int key = irq_lock();
if (timer->timeout.delta_ticks_from_prev != -1) {
_do_timeout_abort(&timer->timeout);
}
timer->period = _ms_to_ticks(period);
timer->handler = handler;
timer->handler_arg = handler_arg;
timer->stop_handler = stop_handler;
timer->stop_handler_arg = stop_handler_arg;
_do_timeout_add(NULL, &timer->timeout, &timer->wait_q,
_ms_to_ticks(duration));
irq_unlock(key);
}
/**
*
* @brief Restart timer with new parameters
*
* @param timer Timer structure
* @param duration Initial timer duration (ns)
* @param period Timer period (ns)
*
* @return N/A
*/
void k_timer_restart(struct k_timer *timer, int32_t duration, int32_t period)
{
k_timer_start(timer, duration, period,
timer->handler, timer->handler_arg,
timer->stop_handler, timer->stop_handler_arg);
}
/**
*
* @brief Stop the timer
*
* @param timer Timer structure
*
* @return N/A
*/
void k_timer_stop(struct k_timer *timer)
{
__ASSERT(!_is_in_isr(), "");
int key = irq_lock();
_do_timeout_abort(&timer->timeout);
irq_unlock(key);
if (timer->stop_handler) {
timer->stop_handler(timer->stop_handler_arg);
}
key = irq_lock();
struct tcs *pending_thread = _unpend_first_thread(&timer->wait_q);
if (pending_thread) {
_set_thread_return_value(pending_thread, -ECANCELED);
_ready_thread(pending_thread);
}
_reschedule_threads(key);
}
/**
*
* @brief Test the timer for expiration
*
* The routine checks if the timer is expired and returns the pointer
* to user data. Otherwise makes the thread wait for the timer expiration.
*
* @param timer Timer structure
* @param data User data pointer
* @param wait May be K_NO_WAIT or K_FOREVER
*
* @return 0 or error code
*/
int k_timer_test(struct k_timer *timer, void **user_data_ptr, int wait)
{
int result = 0;
unsigned int key = irq_lock();
/* check if the timer has expired */
if (timer->timeout.delta_ticks_from_prev == -1) {
*user_data_ptr = timer->user_data;
timer->user_data = NULL;
} else if (wait == K_NO_WAIT) {
/* if the thread should not wait, return immediately */
*user_data_ptr = NULL;
result = -EAGAIN;
} else {
/* otherwise pend the thread */
_pend_current_thread(&timer->wait_q, K_FOREVER);
result = _Swap(key);
key = irq_lock();
if (result == 0) {
*user_data_ptr = timer->user_data;
timer->user_data = NULL;
}
}
irq_unlock(key);
return result;
}
/**
*
* @brief Get timer remaining time
*
* @param timer Timer descriptor
*
* @return remaining time (ns)
*/
int32_t k_timer_remaining_get(struct k_timer *timer)
{
unsigned int key = irq_lock();
int32_t remaining_ticks;
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
if (timer->timeout.delta_ticks_from_prev == -1) {
remaining_ticks = 0;
} else {
/*
* As nanokernel timeouts are stored in a linked list with
* delta_ticks_from_prev, to get the actual number of ticks
* remaining for the timer, walk through the timeouts list
* and accumulate all the delta_ticks_from_prev values up to
* the timer.
*/
struct _timeout *t =
(struct _timeout *)sys_dlist_peek_head(timeout_q);
remaining_ticks = t->delta_ticks_from_prev;
while (t != &timer->timeout) {
t = (struct _timeout *)sys_dlist_peek_next(timeout_q,
&t->node);
remaining_ticks += t->delta_ticks_from_prev;
}
}
irq_unlock(key);
return _ticks_to_ms(remaining_ticks);
}

1
kernel/unified/version.c Normal file
View file

@ -0,0 +1 @@
#include "../nanokernel/version.c"

171
kernel/unified/work_q.c Normal file
View file

@ -0,0 +1,171 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* Workqueue support functions
*/
#include <nano_private.h>
#include <wait_q.h>
#include <errno.h>
static void work_q_main(void *work_q_ptr, void *p2, void *p3)
{
struct k_work_q *work_q = work_q_ptr;
ARG_UNUSED(p2);
ARG_UNUSED(p3);
while (1) {
struct k_work *work;
k_work_handler_t handler;
work = k_fifo_get(&work_q->fifo, K_FOREVER);
handler = work->handler;
/* Set state to idle so it can be resubmitted by handler */
if (!atomic_test_and_set_bit(work->flags, K_WORK_STATE_IDLE)) {
handler(work);
}
/* Make sure we don't hog up the CPU if the FIFO never (or
* very rarely) gets empty.
*/
k_yield();
}
}
void k_work_q_start(struct k_work_q *work_q,
const struct k_thread_config *config)
{
k_fifo_init(&work_q->fifo);
k_thread_spawn(config->stack, config->stack_size,
work_q_main, work_q, 0, 0,
config->prio, 0, 0);
}
static void work_timeout(struct _timeout *t)
{
struct k_delayed_work *w = CONTAINER_OF(t, struct k_delayed_work,
timeout);
/* submit work to workqueue */
k_work_submit_to_queue(w->work_q, &w->work);
}
void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler)
{
k_work_init(&work->work, handler);
_timeout_init(&work->timeout, work_timeout);
work->work_q = NULL;
}
int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
struct k_delayed_work *work,
int32_t timeout)
{
int key = irq_lock();
int err;
/* Work cannot be active in multiple queues */
if (work->work_q && work->work_q != work_q) {
err = -EADDRINUSE;
goto done;
}
/* Cancel if work has been submitted */
if (work->work_q == work_q) {
err = k_delayed_work_cancel(work);
if (err < 0) {
goto done;
}
}
/* Attach workqueue so the timeout callback can submit it */
work->work_q = work_q;
if (!timeout) {
/* Submit work if no ticks is 0 */
k_work_submit_to_queue(work_q, &work->work);
} else {
/* Add timeout */
_do_timeout_add(NULL, &work->timeout, NULL,
_ms_to_ticks(timeout));
}
err = 0;
done:
irq_unlock(key);
return err;
}
int k_delayed_work_cancel(struct k_delayed_work *work)
{
int key = irq_lock();
if (!atomic_test_bit(work->work.flags, K_WORK_STATE_IDLE)) {
irq_unlock(key);
return -EINPROGRESS;
}
if (!work->work_q) {
irq_unlock(key);
return -EINVAL;
}
/* Abort timeout, if it has expired this will do nothing */
_do_timeout_abort(&work->timeout);
/* Detach from workqueue */
work->work_q = NULL;
irq_unlock(key);
return 0;
}
#ifdef CONFIG_SYSTEM_WORKQUEUE
#include <init.h>
static char __stack sys_work_q_stack[CONFIG_SYSTEM_WORKQUEUE_STACK_SIZE];
static const struct k_thread_config sys_work_q_config = {
.stack = sys_work_q_stack,
.stack_size = sizeof(sys_work_q_stack),
.prio = CONFIG_SYSTEM_WORKQUEUE_PRIORITY,
};
struct k_work_q k_sys_work_q;
static int k_sys_work_q_init(struct device *dev)
{
ARG_UNUSED(dev);
k_work_q_start(&k_sys_work_q, &sys_work_q_config);
return 0;
}
SYS_INIT(k_sys_work_q_init, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif

View file

@ -85,6 +85,7 @@ def get_cmdline_args():
global input_mdef_file
global output_dir
global kernel_type
output_dir_help='output directory for kernel_main.*, sysgen.h, etc'
input_mdef_file_help='input MDEF file'
@ -171,11 +172,34 @@ def mdef_parse():
continue
if (words[0] == "TASK"):
if (len(words) != 6):
error_arg_count(line)
task_list.append((words[1], int(words[2]), words[3],
int(words[4]), words[5]))
continue
if kernel_type == 'micro':
if (len(words) != 6):
error_arg_count(line)
task_list.append((words[1], int(words[2]), words[3],
int(words[4]), words[5]))
continue
elif (kernel_type == 'unified'):
if len(words) < 6 and len(words) > 10:
error_arg_count(line)
p1 = 0
p2 = 0
p3 = 0
if len(words) >= 7:
p1 = words[6]
if len(words) >= 8:
p2 = words[7]
if len(words) == 9:
p3 = words[8]
abort = 0
if len(words) == 10:
abort = words[9]
task_list.append((words[1], int(words[2]), words[3],
int(words[4]), words[5], p1, p2, p3, abort))
continue
if (words[0] == "TASKGROUP"):
if (len(words) != 2):
@ -194,10 +218,21 @@ def mdef_parse():
continue
if (words[0] == "SEMA"):
if (len(words) != 2):
error_arg_count(line)
sema_list.append((words[1],))
continue
if (kernel_type == "micro"):
if (len(words) != 2):
error_arg_count(line)
sema_list.append((words[1],))
continue
elif (kernel_type == "unified"):
if len(words) < 2 and len(words) > 4:
error_arg_count(line)
if len(words) == 2:
sema_list.append((words[1], 0, 0xffffffff))
elif len(words) == 3:
sema_list.append((words[1], int(words[2]), 0xffffffff))
else:
sema_list.append((words[1], int(words[2]), int(words[3])))
continue
if (words[0] == "MUTEX"):
if (len(words) != 2):
@ -270,17 +305,29 @@ def kernel_main_c_out(string):
def kernel_main_c_header():
""" Generate initial portion of kernel_main.c """
kernel_main_c_out(
kernel_main_c_filename_str +
copyright +
do_not_edit_warning +
"\n" +
"#include <sysgen.h>\n" +
"#include <misc/debug/object_tracing_common.h>\n" +
"#include <micro_private_types.h>\n" +
"#include <kernel_main.h>\n" +
"#include <toolchain.h>\n" +
"#include <sections.h>\n")
if kernel_type == 'micro':
kernel_main_c_out(
kernel_main_c_filename_str +
copyright +
do_not_edit_warning +
"\n" +
"#include <sysgen.h>\n" +
"#include <misc/debug/object_tracing_common.h>\n" +
"#include <micro_private_types.h>\n" +
"#include <kernel_main.h>\n" +
"#include <toolchain.h>\n" +
"#include <sections.h>\n")
else:
kernel_main_c_out(
kernel_main_c_filename_str +
copyright +
do_not_edit_warning +
"\n" +
"#include <sysgen.h>\n" +
"#include <misc/debug/object_tracing_common.h>\n" +
"#include <kernel.h>\n" +
"#include <toolchain.h>\n" +
"#include <sections.h>\n")
def kernel_main_c_kargs():
@ -337,9 +384,77 @@ def kernel_main_c_timers():
"{{NULL, &_k_timer_free.wait_q.head}, " +
"(void *) &_k_timer_blocks[%d]};\n" % (num_timers - 1))
def get_group_bitmask(group_str):
def kernel_main_c_tasks():
""" Generate task variables """
# create bitmask of group(s) task belongs to
group_bitmask = 0
group_set = group_str[1:len(group_str) - 1] # drop [] surrounding groups
if (group_set != ""):
group_list = group_set.split(',')
for group in group_list:
group_bitmask |= group_dictionary[group]
return group_bitmask
def is_float(x):
try:
float(x)
return True
except ValueError:
return False
def is_int(x):
try:
int(x)
return True
except ValueError:
return False
def is_number(x):
return is_float(x) or is_int(x)
def kernel_main_c_tasks_unified():
global num_prios
kernel_main_c_out("\n")
# declare task entry points
kernel_main_c_out("\n")
for task in task_list:
kernel_main_c_out("EXTERN_C void %s(void *, void *, void *);\n" %
task[2])
# thread_init objects
kernel_main_c_out("\n")
for task in task_list:
name = task[0]
prio = task[1]
entry = task[2]
stack_size = task[3]
groups = get_group_bitmask(task[4])
params = (task[5], task[6], task[7])
for param in params:
if not is_number(param):
kernel_main_c_out("extern void *%s;\n" % (param));
abort = task[8]
if abort != 0 and abort != 'NULL':
kernel_main_c_out("EXTERN_C void %s(void);\n" % abort)
kernel_main_c_out(
"K_THREAD_OBJ_DEFINE(%s, %u, %s, %s, %s, %s, %s, %d, 0x%x);\n" %
(name, int(stack_size), entry,
params[0], params[1], params[2],
abort, int(prio), int(groups)))
def kernel_main_c_tasks_micro():
global num_prios
@ -375,12 +490,7 @@ def kernel_main_c_tasks():
stack = "__" + task[0] + "_stack"
# create bitmask of group(s) task belongs to
group_bitmask = 0
group_set = task[4][1:len(task[4]) - 1] # drop [] surrounding groups
if (group_set != ""):
group_list = group_set.split(',')
for group in group_list:
group_bitmask |= group_dictionary[group]
group_bitmask = get_group_bitmask(task[4])
# invert bitmask to convert SYS indication to non-SYS indication
#
@ -420,6 +530,15 @@ def kernel_main_c_tasks():
"struct k_task * _k_current_task = &_k_task_idle;\n")
def kernel_main_c_tasks():
""" Generate task variables """
if kernel_type == 'micro':
kernel_main_c_tasks_micro()
else:
kernel_main_c_tasks_unified()
def kernel_main_c_priorities():
""" Generate task scheduling variables """
@ -461,6 +580,11 @@ def kernel_main_c_priorities():
def kernel_main_c_events():
""" Generate event variables """
if kernel_type == 'micro':
event_type = 'int'
else:
event_type = 'struct k_event *'
# event descriptors
# pre-defined event for timer
@ -478,9 +602,14 @@ def kernel_main_c_events():
# in other words, no declaration if handler is NULL or 0
handler = event[1].strip().lower()
if handler != "null" and handler != "0":
kernel_main_c_out("extern int %s(int event);\n" % (event[1]))
kernel_main_c_out("extern int %s(%s event);\n" %
(event[1], event_type))
kernel_main_c_out("DEFINE_EVENT(%s, %s);\n" % (event[0], event[1]))
if kernel_type == 'micro':
kernel_main_c_out("DEFINE_EVENT(%s, %s);\n" % (event[0], event[1]))
else:
kernel_main_c_out("K_EVENT_DEFINE(_k_event_obj_%s, %s);\n" %
(event[0], event[1]))
def kernel_main_c_mutexes():
""" Generate mutex variables """
@ -495,8 +624,11 @@ def kernel_main_c_mutexes():
kernel_main_c_out("\n")
for mutex in mutex_list:
name = mutex[0]
kernel_main_c_out("struct _k_mutex_struct _k_mutex_obj_%s = " % (name) +
"__MUTEX_DEFAULT;\n")
if kernel_type == 'micro':
kernel_main_c_out("struct _k_mutex_struct _k_mutex_obj_%s = " %
(name) + "__MUTEX_DEFAULT;\n")
else:
kernel_main_c_out("K_MUTEX_DEFINE(_k_mutex_obj_%s);\n" % (name))
def kernel_main_c_semas():
@ -512,8 +644,14 @@ def kernel_main_c_semas():
kernel_main_c_out("\n")
for semaphore in sema_list:
name = semaphore[0]
kernel_main_c_out("struct _k_sem_struct _k_sem_obj_%s = " % (name) +
"__K_SEMAPHORE_DEFAULT;\n")
if kernel_type == 'micro':
kernel_main_c_out("struct _k_sem_struct _k_sem_obj_%s = " %
(name) + "__K_SEMAPHORE_DEFAULT;\n")
else:
initial_count = semaphore[1]
limit = semaphore[2]
kernel_main_c_out("K_SEM_DEFINE(_k_sem_obj_%s, %s, %s);\n" %
(name, initial_count, limit))
def kernel_main_c_fifos():
@ -524,25 +662,30 @@ def kernel_main_c_fifos():
if (total_fifos == 0):
return
# FIFO buffers
kernel_main_c_out("\n")
for fifo in fifo_list:
kernel_main_c_out(
"char __noinit __%s_buffer[%d];\n" % (fifo[0], fifo[1] * fifo[2]))
if kernel_type == 'micro':
# FIFO buffers and descriptors
# FIFO descriptors
for fifo in fifo_list:
name = fifo[0]
depth = fifo[1]
width = fifo[2]
buffer = "__" + name + "_buffer"
kernel_main_c_out("char __noinit %s[%d];\n" %
(buffer, depth * width))
kernel_main_c_out(
"struct _k_fifo_struct _k_fifo_obj_%s = " % (name) +
"__K_FIFO_DEFAULT(%d, %d, %s);\n" % (depth, width, buffer))
else:
# message queue objects
kernel_main_c_out("\n")
for fifo in fifo_list:
name = fifo[0]
depth = fifo[1]
width = fifo[2]
buffer = "__" + fifo[0] + "_buffer"
kernel_main_c_out("struct _k_fifo_struct _k_fifo_obj_%s = " % (name) +
"__K_FIFO_DEFAULT(%d, %d, %s);\n" % (depth, width, buffer))
kernel_main_c_out("\n")
for fifo in fifo_list:
name = fifo[0]
depth = fifo[1]
width = fifo[2]
kernel_main_c_out("K_MSGQ_DEFINE(_k_fifo_obj_%s, %s, %s);\n" %
(name, depth, width))
def kernel_main_c_pipes():
@ -557,21 +700,32 @@ def kernel_main_c_pipes():
kernel_main_c_out("\n")
for pipe in pipe_list:
kernel_main_c_out(
"char __noinit __%s_buffer[%d];\n" % (pipe[0], pipe[1]))
if kernel_type == 'micro':
for pipe in pipe_list:
kernel_main_c_out(
"char __noinit __%s_buffer[%d];\n" % (pipe[0], pipe[1]))
# pipe descriptors
# pipe descriptors
for pipe in pipe_list:
name = pipe[0]
size = pipe[1]
buffer = "__" + pipe[0] + "_buffer"
kernel_main_c_out("struct _k_pipe_struct _k_pipe_obj_%s = " % (name) +
" __K_PIPE_INITIALIZER(%d, %s);\n" % (size, buffer) +
"kpipe_t _k_pipe_ptr_%s " % (name) +
" __in_section(_k_pipe_ptr, public, pipe) =\n" +
" (kpipe_t)&_k_pipe_obj_%s;\n" % (name))
for pipe in pipe_list:
name = pipe[0]
size = pipe[1]
buffer = "__" + pipe[0] + "_buffer"
kernel_main_c_out("struct _k_pipe_struct _k_pipe_obj_%s = "
% (name) +
" __K_PIPE_INITIALIZER(%d, %s);\n" % (size, buffer) +
"kpipe_t _k_pipe_ptr_%s " % (name) +
" __in_section(_k_pipe_ptr, public, pipe) =\n" +
" (kpipe_t)&_k_pipe_obj_%s;\n" % (name))
else:
# pipe objects
for pipe in pipe_list:
name = pipe[0]
size = pipe[1]
kernel_main_c_out("K_PIPE_DEFINE(_k_pipe_obj_%s, %d);\n" %
(name, size))
def kernel_main_c_mailboxes():
@ -582,14 +736,22 @@ def kernel_main_c_mailboxes():
if (total_mbxs == 0):
return
# mailbox descriptors
kernel_main_c_out("\n")
kernel_main_c_out("\n")
for mbx in mbx_list:
name = mbx[0]
kernel_main_c_out("struct _k_mbox_struct _k_mbox_obj_%s = " % (name) +
"__K_MAILBOX_DEFAULT;\n")
kernel_main_c_out("\n")
if kernel_type == 'micro':
# mailbox descriptors
for mbx in mbx_list:
name = mbx[0]
kernel_main_c_out(
"struct _k_mbox_struct _k_mbox_obj_%s = " % (name) +
"__K_MAILBOX_DEFAULT;\n")
else:
# mailbox objects
for mbx in mbx_list:
name = mbx[0]
kernel_main_c_out("K_MBOX_DEFINE(_k_mbox_obj_%s);\n" % (name))
def kernel_main_c_maps():
@ -600,29 +762,34 @@ def kernel_main_c_maps():
if (total_maps == 0):
return
# memory map buffers
kernel_main_c_out("\n")
for map in map_list:
blocks = map[1]
block_size = map[2]
kernel_main_c_out("char __noinit __MAP_%s_buffer[%d];\n" %
(map[0], blocks * block_size))
if kernel_type == 'micro':
# memory map buffers and descriptors
# memory map descriptors
for map in map_list:
name = map[0]
blocks = map[1]
block_size = map[2]
kernel_main_c_out("char __noinit __MAP_%s_buffer[%d];\n" %
(map[0], blocks * block_size))
kernel_main_c_out(
"struct _k_mem_map_struct _k_mem_map_obj_%s = " % (name) +
"__K_MEM_MAP_INITIALIZER(%d, %d, __MAP_%s_buffer);\n" %
(blocks, block_size, map[0]))
kernel_main_c_out(
"kmemory_map_t _k_mem_map_ptr_%s " % (name) +
" __in_section(_k_mem_map_ptr, public, mem_map) =\n" +
" (kmemory_map_t)&_k_mem_map_obj_%s;\n" % (name))
else:
# memory map objects
for map in map_list:
name = map[0]
blocks = map[1]
block_size = map[2]
kernel_main_c_out(
"struct _k_mem_map_struct _k_mem_map_obj_%s = " % (name) +
" __K_MEM_MAP_INITIALIZER(%d, %d, __MAP_%s_buffer);\n" %
(blocks, block_size, map[0]) +
"kmemory_map_t _k_mem_map_ptr_%s " % (name) +
" __in_section(_k_mem_map_ptr, public, mem_map) =\n" +
" (kmemory_map_t)&_k_mem_map_obj_%s;\n" % (name))
for map in map_list:
name = map[0]
blocks = map[1]
block_size = map[2]
kernel_main_c_out("K_MEM_MAP_DEFINE(_k_mem_map_obj_%s, %s, %s);\n" %
(name, blocks, block_size))
def kernel_main_c_pools():
@ -791,19 +958,22 @@ def kernel_main_c_generate():
global kernel_main_c_data
kernel_main_c_header()
kernel_main_c_kargs()
kernel_main_c_timers()
kernel_main_c_tasks()
kernel_main_c_priorities()
kernel_main_c_events()
kernel_main_c_mutexes()
kernel_main_c_semas()
kernel_main_c_fifos()
kernel_main_c_pipes()
kernel_main_c_mailboxes()
kernel_main_c_events()
kernel_main_c_maps()
kernel_main_c_pools()
kernel_main_c_node_init()
kernel_main_c_fifos()
kernel_main_c_mailboxes()
kernel_main_c_tasks()
kernel_main_c_pipes()
if kernel_type == 'micro':
kernel_main_c_kargs()
kernel_main_c_timers()
kernel_main_c_pools()
kernel_main_c_node_init()
kernel_main_c_priorities()
write_file(output_dir + 'kernel_main.c', kernel_main_c_data)
@ -864,11 +1034,17 @@ sysgen_h_header_include_guard_str = \
def generate_sysgen_h_header():
global sysgen_h_data
if kernel_type == 'micro':
kernel_api_file = "#include <microkernel.h>\n"
else:
kernel_api_file = "#include <kernel.h>\n"
sysgen_h_data += \
sysgen_h_filename_str + \
copyright + \
do_not_edit_warning + \
"#include <microkernel.h>\n" + \
kernel_api_file + \
sysgen_h_header_include_guard_str + \
"\n"
@ -912,15 +1088,46 @@ def generate_sysgen_h_obj_ids():
global sysgen_h_data
if kernel_type == 'micro':
mutex_struct = '_k_mutex_struct'
mutex_type = 'kmutex_t'
sem_struct = '_k_sem_struct'
sem_type = 'ksem_t'
pipe_struct = '_k_pipe_struct'
pipe_type = 'kpipe_t'
map_struct = '_k_mem_map_struct'
map_type = 'kmemory_map_t'
fifo_struct = '_k_fifo_struct'
fifo_type = 'kfifo_t'
mbox_struct = '_k_mbox_struct'
mbox_type = 'kmbox_t'
event_type = 'kevent_t'
# add missing object types
else:
mutex_struct = 'k_mutex'
mutex_type = 'struct k_mutex *'
sem_struct = 'k_sem'
sem_type = 'struct k_sem *'
pipe_struct = 'k_pipe'
pipe_type = 'struct k_pipe *'
map_struct = 'k_mem_map'
map_type = 'struct k_mem_map *'
fifo_struct = 'k_msgq'
fifo_type = 'struct k_msgq *'
mbox_struct = 'k_mbox'
mbox_type = 'struct k_mbox *'
event_type = 'struct k_event *'
# add missing object types
# mutex object ids
sysgen_h_data += "\n"
for mutex in mutex_list:
name = mutex[0]
sysgen_h_data += \
"extern struct _k_mutex_struct _k_mutex_obj_%s;\n" % (name)
"extern struct %s _k_mutex_obj_%s;\n" % (mutex_struct, name)
sysgen_h_data += \
"#define %s ((kmutex_t)&_k_mutex_obj_%s)\n\n" % (name, name)
"#define %s ((%s)&_k_mutex_obj_%s)\n\n" % (name, mutex_type, name)
# semaphore object ids
@ -928,19 +1135,19 @@ def generate_sysgen_h_obj_ids():
for semaphore in sema_list:
name = semaphore[0]
sysgen_h_data += \
"extern struct _k_sem_struct _k_sem_obj_%s;\n" % (name)
"extern struct %s _k_sem_obj_%s;\n" % (sem_struct, name)
sysgen_h_data += \
"#define %s ((ksem_t)&_k_sem_obj_%s)\n\n" % (name, name)
"#define %s ((%s)&_k_sem_obj_%s)\n\n" % (name, sem_type, name)
# fifo object ids
# fifo (aka message queue) object ids
sysgen_h_data += "\n"
for fifo in fifo_list:
name = fifo[0]
sysgen_h_data += \
"extern struct _k_fifo_struct _k_fifo_obj_%s;\n" % (name)
"extern struct %s _k_fifo_obj_%s;\n" % (fifo_struct, name)
sysgen_h_data += \
"#define %s ((kfifo_t)&_k_fifo_obj_%s)\n\n" % (name, name)
"#define %s ((%s)&_k_fifo_obj_%s)\n\n" % (name, fifo_type, name)
# mailbox object ids
@ -948,9 +1155,9 @@ def generate_sysgen_h_obj_ids():
for mbx in mbx_list:
name = mbx[0]
sysgen_h_data += \
"extern struct _k_mbox_struct _k_mbox_obj_%s;\n" % (name)
"extern struct %s _k_mbox_obj_%s;\n" % (mbox_struct, name)
sysgen_h_data += \
"#define %s ((kmbox_t)&_k_mbox_obj_%s)\n\n" % (name, name)
"#define %s ((%s)&_k_mbox_obj_%s)\n\n" % (name, mbox_type, name)
# pipe object id
@ -958,8 +1165,9 @@ def generate_sysgen_h_obj_ids():
for pipe in pipe_list:
name = pipe[0];
sysgen_h_data += \
"extern struct _k_pipe_struct _k_pipe_obj_%s;\n" % (name) + \
"#define %s ((kpipe_t)&_k_pipe_obj_%s)\n\n" % (name, name)
"extern struct %s _k_pipe_obj_%s;\n" % (pipe_struct, name)
sysgen_h_data += \
"#define %s ((%s)&_k_pipe_obj_%s)\n\n" % (name, pipe_type, name)
# memory map object id
@ -967,27 +1175,38 @@ def generate_sysgen_h_obj_ids():
for map in map_list:
name = map[0];
sysgen_h_data += \
"extern struct _k_mem_map_struct _k_mem_map_obj_%s;\n" % (name) + \
"#define %s ((kmemory_map_t)&_k_mem_map_obj_%s)\n" % (name, name)
"extern struct %s _k_mem_map_obj_%s;\n" % (map_struct, name)
sysgen_h_data += \
"#define %s ((%s)&_k_mem_map_obj_%s)\n" % (name, map_type, name)
# task object id
sysgen_h_data += "\n"
for task in task_list:
name = task[0];
sysgen_h_data += \
"extern struct k_task _k_task_obj_%s;\n" % (name) + \
"#define %s ((ktask_t)&_k_task_obj_%s)\n" % (name, name)
if kernel_type == 'micro':
sysgen_h_data += \
"extern struct k_task _k_task_obj_%s;\n" % (name) + \
"#define %s ((ktask_t)&_k_task_obj_%s)\n" % (name, name)
elif (kernel_type == 'unified'):
sysgen_h_data += \
"extern char _k_thread_obj_%s[];\n" % (name) + \
"#define %s ((k_tid_t)_k_thread_obj_%s)\n" % (name, name)
# event object id
# event object ids
sysgen_h_data += "\n"
for event in event_list:
# no need to expose the irq task events
if not (event[0].startswith("_TaskIrqEvt")):
name = event[0];
sysgen_h_data += \
"extern const kevent_t %s;\n" % (name)
if kernel_type == 'micro':
sysgen_h_data += "extern const %s %s;\n" % (event_type, name)
elif (kernel_type == 'unified'):
sysgen_h_data += \
"extern struct k_event _k_event_obj_%s;\n" % (name)
sysgen_h_data += \
"#define %s (&_k_event_obj_%s)\n\n" % (name, name)
# all other object ids
@ -1023,10 +1242,10 @@ def sysgen_h_generate():
# SYSTEM GENERATOR MAINLINE
#
get_cmdline_args()
mdef_parse()
kernel_main_c_generate()
kernel_main_h_generate()
micro_private_types_h_generate()
if kernel_type == 'micro':
kernel_main_h_generate()
micro_private_types_h_generate()
sysgen_h_generate()