include: remove old kernel defintions

C++ support moved from nanokernel.h to kernel.h.

Change-Id: I5e1631941e26f4ab3f311b680267b743bab15e40
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2016-11-07 09:01:19 -08:00
commit e004dec958
23 changed files with 57 additions and 4223 deletions

View file

@ -409,24 +409,8 @@ extern void _arch_irq_enable(unsigned int irq);
extern void _arch_irq_disable(unsigned int irq); extern void _arch_irq_disable(unsigned int irq);
#ifdef CONFIG_FP_SHARING #ifdef CONFIG_FP_SHARING
#ifdef CONFIG_KERNEL_V2
extern void k_float_enable(k_tid_t thread_id, unsigned int options); extern void k_float_enable(k_tid_t thread_id, unsigned int options);
extern void k_float_disable(k_tid_t thread_id); extern void k_float_disable(k_tid_t thread_id);
#else
/**
* @brief Enable floating point hardware resources sharing
* Dynamically enable/disable the capability of a thread to share floating
* point hardware resources. The same "floating point" options accepted by
* fiber_fiber_start() are accepted by these APIs (i.e. K_FP_REGS, K_SSE_REGS).
*/
extern void fiber_float_enable(nano_thread_id_t thread_id,
unsigned int options);
extern void task_float_enable(nano_thread_id_t thread_id,
unsigned int options);
extern void fiber_float_disable(nano_thread_id_t thread_id);
extern void task_float_disable(nano_thread_id_t thread_id);
#endif /* CONFIG_KERNEL_V2 */
#endif /* CONFIG_FP_SHARING */ #endif /* CONFIG_FP_SHARING */
#include <stddef.h> /* for size_t */ #include <stddef.h> /* for size_t */

View file

@ -611,14 +611,7 @@ int device_busy_check(struct device *chk_dev);
* Synchronous calls API * Synchronous calls API
*/ */
#ifdef CONFIG_KERNEL_V2
#include <kernel.h> #include <kernel.h>
#else
#include <nanokernel.h>
#ifdef CONFIG_MICROKERNEL
#include <microkernel.h>
#endif
#endif
#include <stdbool.h> #include <stdbool.h>
/** /**

View file

@ -55,10 +55,6 @@ extern void _timer_idle_enter(int32_t ticks);
extern void _timer_idle_exit(void); extern void _timer_idle_exit(void);
#endif /* CONFIG_TICKLESS_IDLE */ #endif /* CONFIG_TICKLESS_IDLE */
#ifndef CONFIG_KERNEL_V2
extern uint32_t _nano_get_earliest_deadline(void);
#endif /* CONFIG_KERNEL_V2 */
extern void _nano_sys_clock_tick_announce(int32_t ticks); extern void _nano_sys_clock_tick_announce(int32_t ticks);
extern int sys_clock_device_ctrl(struct device *device, extern int sys_clock_device_ctrl(struct device *device,
@ -74,32 +70,6 @@ extern int sys_clock_device_ctrl(struct device *device,
#endif #endif
extern int32_t _sys_idle_elapsed_ticks; extern int32_t _sys_idle_elapsed_ticks;
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_MICROKERNEL)
extern void (*_do_sys_clock_tick_announce)(kevent_t);
#define _sys_clock_tick_announce() _do_sys_clock_tick_announce(TICK_EVENT)
/**
* @brief Account for the tick due to the timer interrupt
*
* @return N/A
*/
static inline void _sys_clock_final_tick_announce(void)
{
/*
* Ticks are annnounced at interrupt level but processed later in
* the kernel server fiber. Increment '_sys_idle_elapsed_ticks' as
* some ticks may have previously been announced by _timer_idle_exit()
* (if tickless idle is enabled) but not yet processed.
*/
_sys_idle_elapsed_ticks++;
/* If no ticks were previously announced, announce the tick now. */
if (_sys_idle_elapsed_ticks == 1) {
_sys_clock_tick_announce();
}
}
#else
#define _sys_clock_tick_announce() \ #define _sys_clock_tick_announce() \
_nano_sys_clock_tick_announce(_sys_idle_elapsed_ticks) _nano_sys_clock_tick_announce(_sys_idle_elapsed_ticks)
@ -117,7 +87,6 @@ static inline void _sys_clock_final_tick_announce(void)
_sys_idle_elapsed_ticks = 1; _sys_idle_elapsed_ticks = 1;
_sys_clock_tick_announce(); _sys_clock_tick_announce();
} }
#endif
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -2378,4 +2378,59 @@ extern void _timer_expiration_handler(struct _timeout *t);
} }
#endif #endif
#if defined(CONFIG_CPLUSPLUS) && defined(__cplusplus)
/*
* Define new and delete operators.
* At this moment, the operators do nothing since objects are supposed
* to be statically allocated.
*/
inline void operator delete(void *ptr)
{
(void)ptr;
}
inline void operator delete[](void *ptr)
{
(void)ptr;
}
inline void *operator new(size_t size)
{
(void)size;
return NULL;
}
inline void *operator new[](size_t size)
{
(void)size;
return NULL;
}
/* Placement versions of operator new and delete */
inline void operator delete(void *ptr1, void *ptr2)
{
(void)ptr1;
(void)ptr2;
}
inline void operator delete[](void *ptr1, void *ptr2)
{
(void)ptr1;
(void)ptr2;
}
inline void *operator new(size_t size, void *ptr)
{
(void)size;
return ptr;
}
inline void *operator new[](size_t size, void *ptr)
{
(void)size;
return ptr;
}
#endif /* defined(CONFIG_CPLUSPLUS) && defined(__cplusplus) */
#endif /* _kernel__h_ */ #endif /* _kernel__h_ */

View file

@ -1382,6 +1382,7 @@ static inline int nano_delayed_work_submit_to_queue(struct nano_workqueue *wq,
*/ */
#define nano_work_submit k_work_submit #define nano_work_submit k_work_submit
#if CONFIG_SYS_CLOCK_EXISTS
/** /**
* @brief Submit a delayed work item to the system workqueue. * @brief Submit a delayed work item to the system workqueue.
* *
@ -1395,7 +1396,7 @@ static inline int nano_delayed_work_submit_to_queue(struct nano_workqueue *wq,
*/ */
#define nano_delayed_work_submit(work, ticks) \ #define nano_delayed_work_submit(work, ticks) \
nano_delayed_work_submit_to_queue(&k_sys_work_q, work, ticks) nano_delayed_work_submit_to_queue(&k_sys_work_q, work, ticks)
#endif
/* events */ /* events */
#define kevent_t const struct k_alert * #define kevent_t const struct k_alert *

View file

@ -19,53 +19,6 @@
#ifndef _MICROKERNEL_H #ifndef _MICROKERNEL_H
#define _MICROKERNEL_H #define _MICROKERNEL_H
#ifdef __cplusplus
extern "C" {
#endif
/* nanokernel and generic kernel public APIs */
#include <nanokernel.h> #include <nanokernel.h>
#if !defined(CONFIG_KERNEL_V2)
/*
* microkernel private APIs that are exposed via the public API
*
* THESE ITEMS SHOULD NOT BE REFERENCED EXCEPT BY THE KERNEL ITSELF!
*/
#define _USE_CURRENT_SEM (-1)
/* end of private APIs */
/**
* @brief Microkernel Public APIs
* @defgroup microkernel_services Microkernel Services
* @{
*/
#include <microkernel/base_api.h>
#include <microkernel/task.h>
#include <microkernel/ticks.h>
#include <microkernel/memory_map.h>
#include <microkernel/mutex.h>
#include <microkernel/mailbox.h>
#include <microkernel/fifo.h>
#include <microkernel/semaphore.h>
#include <microkernel/event.h>
#include <microkernel/memory_pool.h>
#include <microkernel/pipe.h>
/**
* @}
*/
#endif /* !CONFIG_KERNEL_V2 */
#ifdef __cplusplus
}
#endif
#endif /* _MICROKERNEL_H */ #endif /* _MICROKERNEL_H */

View file

@ -1,274 +0,0 @@
/* microkernel/base_api.h */
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _BASE_API_H
#define _BASE_API_H
#include <stdbool.h>
#include <stdint.h>
#include <toolchain.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef int32_t ktask_t;
typedef uint32_t ktask_group_t;
typedef uint32_t kmutex_t;
typedef uint32_t kmemory_map_t;
typedef uint32_t kfifo_t;
typedef uint32_t kmbox_t;
typedef uint32_t kpipe_t;
typedef int32_t ksem_t;
typedef ksem_t *ksemg_t;
typedef uint32_t ktimer_t;
typedef uint32_t kpriority_t;
typedef uint32_t kmemory_pool_t;
typedef unsigned int kevent_t;
typedef uint32_t kirq_t;
typedef int (*kevent_handler_t)(int event);
#define RC_OK 0
#define RC_FAIL 1
#define RC_TIME 2
#define RC_ALIGNMENT 3
#define RC_INCOMPLETE 4
/** for mail sender or receiver parameter */
#define ANYTASK (-1)
/** this value terminates a semaphore list */
#define ENDLIST (-1)
struct k_args;
struct k_block {
kmemory_pool_t pool_id;
void *address_in_pool;
void *pointer_to_data;
uint32_t req_size;
};
struct k_msg {
/** Mailbox ID */
kmbox_t mailbox;
/** size of message (bytes) */
uint32_t size;
/** information field, free for user */
uint32_t info;
/** pointer to message data at sender side */
void *tx_data;
/** pointer to message data at receiver */
void *rx_data;
/** for async message posting */
struct k_block tx_block;
/** sending task */
ktask_t tx_task;
/** receiving task */
ktask_t rx_task;
/** internal use only */
union {
/** for 2-steps data transfer operation */
struct k_args *transfer;
/** semaphore to signal when asynchr. call */
ksem_t sema;
} extra;
};
/* Task control block */
struct k_task {
struct k_task *next;
struct k_task *prev;
kpriority_t priority;
ktask_t id;
uint32_t state;
uint32_t group;
void (*fn_start)(void);
char *workspace;
int worksize;
void (*fn_abort)(void);
struct k_args *args;
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
struct k_task *__next;
#endif
};
/**
* @cond internal
*/
struct _k_mbox_struct {
struct k_args *writers;
struct k_args *readers;
int count;
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
struct _k_mbox_struct *__next;
#endif
};
struct _k_mutex_struct {
ktask_t owner;
kpriority_t current_owner_priority;
kpriority_t original_owner_priority;
int level;
struct k_args *waiters;
int count;
int num_conflicts;
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
struct _k_mutex_struct *__next;
#endif
};
/*
* Semaphore structure. Must be aligned on a 4-byte boundary, since this is what
* the microkernel server's command stack processing requires.
*/
struct _k_sem_struct {
struct k_args *waiters;
int level;
int count;
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
struct _k_sem_struct *__next;
#endif
} __aligned(4);
struct _k_fifo_struct {
int Nelms;
int element_size;
char *base;
char *end_point;
char *enqueue_point;
char *dequeue_point;
struct k_args *waiters;
int num_used;
int high_watermark;
int count;
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
struct _k_fifo_struct *__next;
#endif
};
/* Pipe-related structures */
#define MAXNBR_PIPE_MARKERS 10 /* 1==disable parallel transfers */
struct _k_pipe_marker {
unsigned char *pointer; /* NULL == non valid marker == free */
int size;
bool buffer_xfer_busy;
int prev; /* -1 == no predecessor */
int next; /* -1 == no successor */
};
struct _k_pipe_marker_list {
int num_markers; /* Only used if STORE_NBR_MARKERS is defined */
int first_marker;
int last_marker;
int post_wrap_around_marker; /* -1 means no post wrap around markers */
struct _k_pipe_marker markers[MAXNBR_PIPE_MARKERS];
};
typedef enum {
BUFF_EMPTY, /* buffer is empty, disregarding the pending data Xfers
* (reads) still finishing up
*/
BUFF_FULL, /* buffer is full, disregarding the pending data Xfers
* (writes) still finishing up
*/
BUFF_OTHER
} _K_PIPE_BUFF_STATE;
struct _k_pipe_desc {
int buffer_size;
unsigned char *begin_ptr;
unsigned char *write_ptr;
unsigned char *read_ptr;
unsigned char *write_guard; /* can be NULL --> invalid */
unsigned char *read_guard; /* can be NULL --> invalid */
int free_space_count;
int free_space_post_wrap_around;
int num_pending_reads;
int available_data_count;
int available_data_post_wrap_around; /* AWA == After Wrap Around */
int num_pending_writes;
bool wrap_around_write;
bool wrap_around_read;
_K_PIPE_BUFF_STATE buffer_state;
struct _k_pipe_marker_list write_markers;
struct _k_pipe_marker_list read_markers;
unsigned char *end_ptr;
unsigned char *original_end_ptr;
};
struct _k_pipe_struct {
int buffer_size; /* size in bytes, must be first for sysgen */
char *Buffer; /* pointer to statically allocated buffer */
struct k_args *writers;
struct k_args *readers;
struct _k_pipe_desc desc;
int count;
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
struct _k_pipe_struct *__next;
#endif
};
/* Memory map related structure */
struct _k_mem_map_struct {
int Nelms;
int element_size;
char *base;
char *free;
struct k_args *waiters;
int num_used;
int high_watermark;
int count;
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
struct _k_mem_map_struct *__next;
#endif
};
/*
* Event structure. Must be aligned on a 4-byte boundary, since this is what
* the microkernel server's command stack processing requires.
*/
struct _k_event_struct {
int status;
kevent_handler_t func;
struct k_args *waiter;
int count;
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
struct _k_event_struct *__next;
#endif
} __aligned(4);
/**
* @endcond
*/
typedef enum {
_0_TO_N = 0x00000001,
_1_TO_N = 0x00000002,
_ALL_N = 0x00000004
} K_PIPE_OPTION;
#ifdef __cplusplus
}
#endif
#endif /* _BASE_API_H */

View file

@ -1,48 +0,0 @@
/* command_packet.h - command packet header file */
/*
* Copyright (c) 2012-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @brief Microkernel command packet library
*
* A command packet is an opaque data structure that maps to a k_args without
* exposing its innards. This library allows a subsystem that needs to define
* a command packet the ability to do so.
*/
#ifndef _COMMAND_PACKET_H
#define _COMMAND_PACKET_H
#ifdef __cplusplus
extern "C" {
#endif
#include <microkernel/base_api.h>
/* define size of command packet (without exposing its internal structure) */
#define CMD_PKT_SIZE_IN_WORDS (19)
/* define command packet type */
typedef uint32_t cmdPkt_t[CMD_PKT_SIZE_IN_WORDS];
#ifdef __cplusplus
}
#endif
#endif /* _COMMAND_PACKET_H */

View file

@ -1,153 +0,0 @@
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Event header file.
*/
#ifndef EVENT_H
#define EVENT_H
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Microkernel Events
* @defgroup microkernel_event Microkernel Events
* @ingroup microkernel_services
* @{
*/
#include <microkernel/base_api.h>
/** Well-known events. */
extern const kevent_t TICK_EVENT;
/**
*
* @brief Signal an event from an ISR.
*
* This routine does @em not validate the specified event number.
*
* @param event Event to signal.
*
* @return N/A
*/
extern void isr_event_send(kevent_t event);
/**
*
* @brief Signal an event from a fiber.
*
* This routine does @em not validate the specified event number.
*
* @param event Event to signal.
*
* @return N/A
*/
extern void fiber_event_send(kevent_t event);
/**
*
* @brief Set event handler request.
*
* This routine specifies the event handler that runs in the context of the
* microkernel server fiber when the associated event is signaled. Specifying
* a non-NULL handler installs a new handler, while specifying a NULL event
* handler removes the existing event handler.
*
* A new event handler cannot be installed if one already exists for that event.
* The old handler must be removed first. However, the NULL event handler can be
* replaced with itself.
*
* @param event Event upon which to register.
* @param handler Function pointer to handler.
*
* @retval RC_FAIL If an event handler exists or the event number is invalid.
* @retval RC_OK Otherwise.
*/
extern int task_event_handler_set(kevent_t event, kevent_handler_t handler);
/**
*
* @brief Signal an event request.
*
* This routine signals the specified event from a task. If an event handler
* is installed for that event, it will run. If no event handler is installed,
* any task waiting on the event is released.
*
* @param event Event to signal.
*
* @retval RC_FAIL If the event number is invalid.
* @retval RC_OK Otherwise.
*/
extern int task_event_send(kevent_t event);
/**
*
* @brief Test for an event request with timeout.
*
* This routine tests an event to see if it has been signaled.
*
* @param event Event to test.
* @param timeout Determines the action to take when the event has not yet
* been signaled.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise, wait up to the specified number of ticks before
* timing out.
*
* @retval RC_OK Successfully received signaled event
* @retval RC_TIME Timed out while waiting for signaled event
* @retval RC_FAIL Failed to immediately receive signaled event when
* timeout = TICKS_NONE
*/
extern int task_event_recv(kevent_t event, int32_t timeout);
/**
* @}
*/
#define _K_EVENT_INITIALIZER(handler) \
{ \
.status = 0, \
.func = (kevent_handler_t)handler, \
.waiter = NULL, \
.count = 0, \
}
/**
* @brief Define a private microkernel event
*
* This declares and initializes a private event. The new event
* can be passed to the microkernel event functions.
*
* @param name Name of the event
* @param handler Function to handle the event (can be NULL)
*/
#define DEFINE_EVENT(name, handler) \
struct _k_event_struct _k_event_obj_##name \
__in_section(_k_event_list, event, name) = \
_K_EVENT_INITIALIZER(handler); \
const kevent_t name = (kevent_t)&_k_event_obj_##name;
#ifdef __cplusplus
}
#endif
#endif /* EVENT_H */

View file

@ -1,153 +0,0 @@
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* @brief Microkernel FIFO header file.
*
*/
/**
* @brief Microkernel FIFOs
* @defgroup microkernel_fifo Microkernel FIFOs
* @ingroup microkernel_services
* @{
*/
#ifndef FIFO_H
#define FIFO_H
#include <sections.h>
/* externs */
#ifdef __cplusplus
extern "C" {
#endif
/**
* @cond internal
*/
extern int _task_fifo_ioctl(kfifo_t queue, int op);
/**
* @brief Initializer for microkernel FIFO
*/
#define __K_FIFO_DEFAULT(depth, width, buffer) \
{ \
.Nelms = depth,\
.element_size = width,\
.base = buffer,\
.end_point = (buffer + (depth * width)),\
.enqueue_point = buffer,\
.dequeue_point = buffer,\
.waiters = NULL,\
.num_used = 0,\
.high_watermark = 0,\
.count = 0,\
}
/**
* @endcond
*/
/**
* @brief FIFO enqueue request.
*
* This routine adds an item to the FIFO queue. When the FIFO is full,
* the routine will wait either for space to become available, or until the
* specified time limit is reached.
*
* @param queue FIFO queue.
* @param data Pointer to data to add to queue.
* @param timeout Determines the action to take when the FIFO is full.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise, wait up to the specified number of ticks before timing out.
*
* @retval RC_OK Successfully added item to FIFO.
* @retval RC_TIME Timed out while waiting to add item to FIFO.
* @retval RC_FAIL Failed to immediately add item to FIFO when
* @a timeout = TICKS_NONE.
* @sa TICKS_NONE, TICKS_UNLIMITED
*/
extern int task_fifo_put(kfifo_t queue, void *data, int32_t timeout);
/**
* @brief FIFO dequeue request.
*
* This routine fetches the oldest item from the FIFO queue. When the FIFO is found empty,
* the routine will wait either until an item is added to the FIFO queue or until
* the specified time limit is reached.
*
* @param queue FIFO queue.
* @param data Pointer to storage location of the FIFO entry.
* @param timeout Affects the action to take when the FIFO is empty.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise wait up to the specified number of ticks before timing out.
*
* @retval RC_OK Successfully fetched item from FIFO.
* @retval RC_TIME Timed out while waiting to fetch item from FIFO.
* @retval RC_FAIL Failed to immediately fetch item from FIFO when
* @a timeout = TICKS_NONE.
* @sa TICKS_NONE, TICKS_UNLIMITED
*/
extern int task_fifo_get(kfifo_t queue, void *data, int32_t timeout);
/**
* @brief Query the number of FIFO entries.
*
* @param q FIFO queue.
*
* @return # of FIFO entries on query.
*/
#define task_fifo_size_get(q) _task_fifo_ioctl(q, 0)
/**
* @brief Purge the FIFO of all its entries.
*
* @return RC_OK on purge.
*/
#define task_fifo_purge(q) _task_fifo_ioctl(q, 1)
/**
* @brief Define a private microkernel FIFO.
*
* This declares and initializes a private FIFO. The new FIFO
* can be passed to the microkernel FIFO functions.
*
* @param name Name of the FIFO.
* @param depth Depth of the FIFO.
* @param width Width of the FIFO.
*/
#define DEFINE_FIFO(name, depth, width) \
static char __noinit __##name##_buffer[(depth * width)]; \
struct _k_fifo_struct _k_fifo_obj_##name = \
__K_FIFO_DEFAULT(depth, width, __##name##_buffer); \
const kfifo_t name = (kfifo_t)&_k_fifo_obj_##name;
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* FIFO_H */

View file

@ -1,176 +0,0 @@
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file mailbox.h
* @brief Microkernel mailbox header file
*/
#ifndef _MAILBOX_H
#define _MAILBOX_H
/**
* @brief Microkernel Mailboxes
* @defgroup microkernel_mailbox Microkernel Mailboxes
* @ingroup microkernel_services
* @{
*/
/* externs */
#ifdef __cplusplus
extern "C" {
#endif
/**
* @cond internal
*/
extern void _task_mbox_block_put(kmbox_t mbox,
kpriority_t prio,
struct k_msg *M,
ksem_t sem);
extern void _task_mbox_data_get(struct k_msg *M);
/**
* @brief Initializer for microkernel mailbox
*/
#define __K_MAILBOX_DEFAULT \
{ \
.writers = NULL, \
.readers = NULL, \
.count = 0, \
}
/**
* @endcond
*/
/**
* @brief Send a message to a mailbox.
*
* This routine sends a message to a mailbox and looks for a matching receiver.
*
* @param mbox Mailbox.
* @param prio Priority of data transfer.
* @param M Pointer to message to send.
* @param timeout Determines the action to take when there is no waiting receiver.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise, wait up to the specified number of ticks before timing out.
*
* @return RC_OK Successfully delivered message.
* @return RC_TIME Timed out while waiting to deliver message.
* @return RC_FAIL Failed to immediately deliver message when
* @a timeout = TICKS_NONE.
* @sa TICKS_NONE, TICKS_UNLIMITED
*
*/
extern int task_mbox_put(kmbox_t mbox, kpriority_t prio,
struct k_msg *M, int32_t timeout);
/**
* @brief Get @b struct @b k_msg message header structure information from
* a mailbox and wait with timeout.
*
* @param mbox Mailbox.
* @param M Pointer to message.
* @param timeout Determines the action to take when there is no waiting receiver.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise, wait up to the specified number of ticks before timing out.
*
* @return RC_OK Successfully received message.
* @return RC_TIME Timed out while waiting to receive message.
* @return RC_FAIL Failed to immediately receive message when
* @a timeout = TICKS_NONE.
* @sa TICKS_NONE, TICKS_UNLIMITED
*/
extern int task_mbox_get(kmbox_t mbox, struct k_msg *M, int32_t timeout);
/**
* @brief Send a message asynchronously to a mailbox.
*
* This routine sends a message to a mailbox and does not wait for a matching
* receiver. No exchange header is returned to the sender. When the data
* has been transferred to the receiver, the semaphore signaling is performed.
*
* @param b Mailbox to which to send message.
* @param p Priority of data transfer.
* @param m Pointer to message to send.
* @param s Semaphore to signal when transfer is complete.
*
* @return N/A
*/
#define task_mbox_block_put(b, p, m, s) _task_mbox_block_put(b, p, m, s)
/**
* @brief Get message data.
*
* Call this routine for one of two reasons:
* 1. To transfer data when the call to @a task_mbox_get() yields an existing
* field in the @b struct @b k_msg header structure.
* 2. To wake up and release a transmitting task currently blocked from calling
* @b task_mbox_put[wait|wait_timeout]().
*
* @param m Message from which to get data.
*
* @return N/A
*/
#define task_mbox_data_get(m) _task_mbox_data_get(m)
/**
* @brief Retrieve message data into a block, with time-limited waiting.
*
* @param M Message from which to get data.
* @param block Block.
* @param pool_id Memory pool name.
* @param timeout Determines the action to take when no waiting sender exists.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise, wait up to the specified number of ticks before timing out.
*
* @retval RC_OK Successful retrieval of message data.
* @retval RC_TIME Timed out while waiting to receive message data.
* @retval RC_FAIL Failed to immediately receive message data when
* @a timeout = TICKS_NONE.
* @sa TICKS_NONE, TICKS_UNLIMITED
*/
extern int task_mbox_data_block_get(struct k_msg *M, struct k_block *block,
kmemory_pool_t pool_id, int32_t timeout);
/**
* @brief Define a private microkernel mailbox.
*
* This routine declares and initializes a private mailbox. The new mailbox
* can be passed to the microkernel mailbox functions.
*
* @param name Name of the mailbox
*/
#define DEFINE_MAILBOX(name) \
struct _k_mbox_struct _k_mbox_obj_##name = __K_MAILBOX_DEFAULT; \
const kmbox_t name = (kmbox_t)&_k_mbox_obj_##name;
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* _MAILBOX_H */

View file

@ -1,128 +0,0 @@
/*
* Copyright (c) 1997-2012, 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* @file
* @brief Memory map kernel services.
*/
/**
* @brief Microkernel Memory Maps
* @defgroup microkernel_memorymap Microkernel Memory Maps
* @ingroup microkernel_services
* @{
*/
#ifndef _MEMORY_MAP_H
#define _MEMORY_MAP_H
#ifdef __cplusplus
extern "C" {
#endif
#include <sections.h>
/**
* @cond internal
*/
extern void _task_mem_map_free(kmemory_map_t mmap, void **mptr);
/**
* @brief Initialize a memory map struct
*
* @param blocks Number of blocks.
* @param block_size Block Size (in bytes).
*/
#define __K_MEM_MAP_INITIALIZER(blocks, block_size, buffer) \
{ \
.Nelms = blocks, \
.element_size = block_size, \
.base = buffer, \
}
/**
* @endcond
*/
/**
* @brief Read the number of used blocks in a memory map.
*
* This routine returns the number of blocks in use for the memory map.
*
* @param map Memory map name.
*
* @return Number of used blocks.
*/
extern int task_mem_map_used_get(kmemory_map_t map);
/**
* @brief Return memory map block.
*
* This routine returns a block to the specified memory map.
*
* @param m Memory map name.
* @param p Memory block address.
*
* @return N/A
*/
#define task_mem_map_free(m, p) _task_mem_map_free(m, p)
/**
* @brief Allocate memory map block.
*
* This routine allocates a block from memory map @a mmap, and saves the
* block's address in the area indicated by @a mptr. When no block is available,
* the routine waits until either one can be allocated, or until the specified
* time limit is reached.
*
* @param mmap Memory map name.
* @param mptr Pointer to memory block address area.
* @param timeout Determines the action to take when the memory map is exhausted.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise, wait up to the specified number of ticks before timing out.
*
* @retval RC_OK Successfully allocated memory block.
* @retval RC_TIME Timed out while waiting for memory block.
* @retval RC_FAIL Failed to immediately allocate memory block when
* @a timeout = TICKS_NONE.
* @sa TICKS_NONE, TICKS_UNLIMITED
*/
extern int task_mem_map_alloc(kmemory_map_t mmap, void **mptr, int32_t timeout);
/**
* @brief Define a private microkernel memory map.
*
* @param name Memory map name.
* @param blocks Number of blocks.
* @param block_size Size of each block, in bytes.
*/
#define DEFINE_MEM_MAP(name, blocks, block_size) \
char __noinit __mem_map_buffer_##name[(blocks * block_size)]; \
struct _k_mem_map_struct _k_mem_map_obj_##name = \
__K_MEM_MAP_INITIALIZER(blocks, block_size, \
__mem_map_buffer_##name); \
const kmemory_map_t name \
__in_section(_k_mem_map_ptr, private, mem_map) = \
(kmemory_map_t)&_k_mem_map_obj_##name;
#ifdef __cplusplus
}
#endif
#endif /* _MEMORY_MAP_H */
/**
* @}
*/

View file

@ -1,126 +0,0 @@
/*
* Copyright (c) 1997-2012, 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Memory Pools
*/
#ifndef _MEMORY_POOL_H
#define _MEMORY_POOL_H
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Memory Pools
* @defgroup microkernel_memorypool Microkernel Memory Pools
* @ingroup microkernel_services
* @{
*/
/**
* @brief Return memory pool block.
*
* This routine returns a block to the memory pool from which it was allocated.
*
* @param b Pointer to block descriptor.
*
* @return N/A
*/
extern void task_mem_pool_free(struct k_block *b);
/**
* @brief Defragment memory pool.
*
* This routine concatenates unused blocks that can be merged in memory pool
* @a p.
*
* Doing a full defragmentation of a memory pool before allocating a set
* of blocks may be more efficient than having the pool do an implicit
* partial defragmentation each time a block is allocated.
*
* @param p Memory pool name.
*
* @return N/A
*/
extern void task_mem_pool_defragment(kmemory_pool_t p);
/**
* @brief Allocate memory pool block.
*
* This routine allocates a block of at least @a reqsize bytes from memory pool
* @a pool_id, and saves its information in block descriptor @a blockptr. When no
* such block is available, the routine waits either until one can be allocated,
* or until the specified time limit is reached.
*
* @param blockptr Pointer to block descriptor.
* @param pool_id Memory pool name.
* @param reqsize Requested block size, in bytes.
* @param timeout Determines the action to take when the memory pool is exhausted.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise, wait up to the specified number of ticks before timing out.
*
* @retval RC_OK Successfully allocated memory block
* @retval RC_TIME Timed out while waiting for memory block
* @retval RC_FAIL Failed to immediately allocate memory block when
* @a timeout = TICKS_NONE
* @sa TICKS_NONE, TICKS_UNLIMITED
*/
extern int task_mem_pool_alloc(struct k_block *blockptr, kmemory_pool_t pool_id,
int reqsize, int32_t timeout);
/**
* @brief Allocate memory
*
* This routine provides traditional malloc semantics and is a wrapper on top
* of microkernel pool alloc API.
* It returns an aligned memory address which points to the start of a memory
* block of at least \p size bytes.
* This memory comes from heap memory pool, consequently the app should
* specify its intention to use a heap pool via the HEAP_SIZE keyword in
* MDEF file, if it uses this API.
* When not enough free memory is available in the heap pool, it returns NULL
*
* @param size Size of memory requested by the caller.
*
* @retval address of the block if successful otherwise returns NULL
*/
extern void *task_malloc(uint32_t size);
/**
* @brief Free memory allocated through task_malloc
*
* This routine provides traditional free semantics and is intended to free
* memory allocated using task_malloc API.
*
* @param ptr pointer to be freed
*
* @return NA
*/
extern void task_free(void *ptr);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* _MEMORY_POOL_H */

View file

@ -1,109 +0,0 @@
/*
* Copyright (c) 1997-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Microkernel mutex header file
*/
/**
* @brief Microkernel Mutexes
* @defgroup microkernel_mutex Microkernel Mutexes
* @ingroup microkernel_services
* @{
*/
#ifndef MUTEX_H
#define MUTEX_H
#ifdef __cplusplus
extern "C" {
#endif
#include <microkernel/base_api.h>
extern void _task_mutex_unlock(kmutex_t mutex);
/*
* Initializer for mutexes
*/
#define __MUTEX_DEFAULT \
{ \
.owner = ANYTASK, \
.current_owner_priority = 64, \
.original_owner_priority = 64, \
.level = 0, \
.waiters = NULL, \
.count = 0, \
.num_conflicts = 0, \
}
/**
* @brief Lock mutex.
*
* This routine locks mutex @a mutex. When the mutex is locked by another task,
* the routine will either wait until it becomes available, or until a specified
* time limit is reached.
*
* A task is permitted to lock a mutex it has already locked; in such a case,
* this routine immediately succeeds.
*
* @param mutex Mutex name.
* @param timeout Determine the action to take when the mutex is already locked.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise, wait up to the specified number of ticks before timing out.
*
* @retval RC_OK Successfully locked mutex.
* @retval RC_TIME Timed out while waiting for mutex.
* @retval RC_FAIL Failed to immediately lock mutex when
* @a timeout = TICKS_NONE.
* @sa TICKS_NONE, TICKS_UNLIMITED
*/
extern int task_mutex_lock(kmutex_t mutex, int32_t timeout);
/**
* @brief Unlock mutex.
*
* This routine unlocks mutex @a m. The mutex must already be locked by the
* requesting task.
*
* The mutex cannot be claimed by another task until it has been unlocked by
* the requesting task as many times as it was locked by that task.
*
* @param m Mutex name.
*
* @return N/A
*/
#define task_mutex_unlock(m) _task_mutex_unlock(m)
/**
* @brief Define a private mutex.
*
* @param name Mutex name.
*/
#define DEFINE_MUTEX(name) \
struct _k_mutex_struct _k_mutex_obj_##name = __MUTEX_DEFAULT; \
const kmutex_t name = (kmutex_t)&_k_mutex_obj_##name;
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* MUTEX_H */

View file

@ -1,158 +0,0 @@
/* microkernel/pipe.h */
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PIPE_H
#define PIPE_H
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Microkernel Pipes
* @defgroup microkernel_pipe Microkernel Pipes
* @ingroup microkernel_services
* @{
*/
#include <sections.h>
/**
* @cond internal
*/
/**
* @internal
* @brief Initialize a pipe struct.
*
* @param size Size of pipe buffer.
* @param buffer Pointer to the buffer.
* @endinternal
*/
#define __K_PIPE_INITIALIZER(size, buffer) \
{ \
.buffer_size = size, \
.Buffer = buffer, \
}
/**
* @endcond
*/
/**
* @brief Pipe write request.
*
* Attempt to write data from a memory-buffer area to the
* specified pipe with a timeout option.
*
* @param id Pipe ID.
* @param buffer Buffer.
* @param bytes_to_write Number of bytes to write.
* @param bytes_written Pointer to number of bytes written.
* @param options Pipe options.
* @param timeout Determines the action to take when the pipe is already full.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise, wait up to the specified number of ticks before timing out.
*
* @retval RC_OK Successfully wrote data to pipe.
* @retval RC_ALIGNMENT Data is improperly aligned.
* @retval RC_INCOMPLETE Only some of the data was written to the pipe when
* @a options = _ALL_N.
* @retval RC_TIME Timed out while waiting to write to pipe.
* @retval RC_FAIL Failed to immediately write to pipe when
* @a timeout = TICKS_NONE
* @sa TICKS_NONE, TICKS_UNLIMITED
*/
extern int task_pipe_put(kpipe_t id, void *buffer, int bytes_to_write,
int *bytes_written, K_PIPE_OPTION options, int32_t timeout);
/**
* @brief Pipe read request.
*
* Attempt to read data into a memory buffer area from the
* specified pipe with a timeout option.
*
* @param id Pipe ID.
* @param buffer Buffer.
* @param bytes_to_read Number of bytes to read.
* @param bytes_read Pointer to number of bytes read.
* @param options Pipe options.
* @param timeout Determines the action to take when the pipe is already full.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise, wait up to the specified number of ticks before timing out.
*
* @retval RC_OK Successfully read data from pipe.
* @retval RC_ALIGNMENT Data is improperly aligned.
* @retval RC_INCOMPLETE Only some of the data was read from the pipe when
* @a options = _ALL_N.
* @retval RC_TIME Timed out waiting to read from pipe.
* @retval RC_FAIL Failed to immediately read from pipe when
* @a timeout = TICKS_NONE.
* @sa TICKS_NONE, TICKS_UNLIMITED
*/
extern int task_pipe_get(kpipe_t id, void *buffer, int bytes_to_read,
int *bytes_read, K_PIPE_OPTION options, int32_t timeout);
extern int _task_pipe_block_put(kpipe_t id,
struct k_block block,
int size,
ksem_t sema);
/**
* @brief Asynchronous pipe write request.
*
* This routine attempts to write data from a memory pool block to the
* specified pipe. (Note that partial transfers and timeouts are not
* supported, unlike the case for synchronous write requests.)
*
* @param id Pipe ID.
* @param block Block.
* @param size Size of data to be transferred.
* @param sema Semphore ID.
*
* @return RC_OK, RC_FAIL, or RC_ALIGNMENT
*/
#define task_pipe_block_put(id, block, size, sema) \
_task_pipe_block_put(id, block, size, sema)
/**
* @brief Define a private microkernel pipe.
*
* @param name Name of the pipe.
* @param size Size of the pipe buffer, in bytes.
*/
#define DEFINE_PIPE(name, size) \
char __noinit __pipe_buffer_##name[size]; \
struct _k_pipe_struct _k_pipe_obj_##name = \
__K_PIPE_INITIALIZER(size, __pipe_buffer_##name); \
const kpipe_t name \
__in_section(_k_pipe_ptr, private, pipe) = \
(kpipe_t)&_k_pipe_obj_##name;
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* PIPE_H */

View file

@ -1,197 +0,0 @@
/*
* Copyright (c) 1997-2010, 2012-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
*@brief Microkernel semaphore header file.
*/
#ifndef _SEMAPHORE_H
#define _SEMAPHORE_H
/**
* @brief Microkernel Semaphores
* @defgroup microkernel_semaphore Microkernel Semaphores
* @ingroup microkernel_services
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <microkernel/base_api.h>
extern void _k_sem_struct_value_update(int n, struct _k_sem_struct *S);
/**
* @brief Initializer for a semaphore.
*/
#define __K_SEMAPHORE_DEFAULT \
{ \
.waiters = NULL, \
.level = 0, \
.count = 0, \
}
/**
*
* @brief Give semaphore from an ISR.
*
* This routine gives semaphore @a sema from an ISR, rather than a task.
*
* @param sema Semaphore name.
*
* @return N/A
*/
extern void isr_sem_give(ksem_t sema);
/**
*
* @brief Give semaphore from a fiber.
*
* This routine gives semaphore @a sema from a fiber, rather than a task.
*
* @param sema Semaphore name.
*
* @return N/A
*/
extern void fiber_sem_give(ksem_t sema);
/**
*
* @brief Give semaphore.
*
* This routine gives semaphore @a sema.
*
* @param sema Semaphore name.
*
* @return N/A
*/
extern void task_sem_give(ksem_t sema);
/**
*
* @brief Give a group of semaphores.
*
* This routine gives each semaphore in a semaphore group @a semagroup.
* This method is faster than giving the semaphores individually, and
* ensures that all the semaphores are given before any waiting tasks run.
*
* @param semagroup Array of semaphore names - terminated by ENDLIST.
*
* @return N/A
*/
extern void task_sem_group_give(ksemg_t semagroup);
/**
*
* @brief Read a semaphore's count.
*
* This routine reads the current count of the semaphore @a sema.
*
* @param sema Semaphore name.
*
* @return Semaphore count.
*/
extern int task_sem_count_get(ksem_t sema);
/**
*
* @brief Reset the semaphore's count.
*
* This routine resets the count of the semaphore @a sema to zero.
*
* @param sema Semaphore name.
*
* @return N/A
*/
extern void task_sem_reset(ksem_t sema);
/**
*
* @brief Reset a group of semaphores.
*
* This routine resets the count for each semaphore in the sempahore group
* @a semagroup to zero. This method is faster than resetting the semaphores
* individually.
*
* @param semagroup Array of semaphore names - terminated by ENDLIST.
*
* @return N/A
*/
extern void task_sem_group_reset(ksemg_t semagroup);
/**
*
* @brief Take a semaphore or fail.
*
* This routine takes the semaphore @a sema. If the semaphore's count is
* zero the routine immediately returns a failure indication.
*
* @param sema Semaphore name.
* @param timeout Determines the action to take when the semaphore is unavailable.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise, wait up to the specified number of ticks before timing out.
*
* @retval RC_OK Successfully took semaphore
* @retval RC_TIME Timed out while waiting for semaphore
* @retval RC_FAIL Failed to immediately take semaphore when
* @a timeout = TICKS_NONE
*
* @sa TICKS_NONE, TICKS_UNLIMITED must be added.
*/
extern int task_sem_take(ksem_t sema, int32_t timeout);
/**
*
* @brief Wait for a semaphore from the semaphore group.
*
* This routine waits for the @a timeout ticks to take a semaphore from the
* semaphore group @a group.
*
* @param group Array of semaphore names - terminated by ENDLIST.
* @param timeout Determines the action to take when the semaphore is unavailable.
* For TICKS_NONE, return immediately.
* For TICKS_UNLIMITED, wait as long as necessary.
* Otherwise, wait up to the specified number of ticks before timing out.
*
* @retval sema Name of the semaphore that was taken if successful.
* @retval ENDLIST Otherwise.
*
* @sa TICKS_NONE, TICKS_UNLIMITED must be added.
*/
extern ksem_t task_sem_group_take(ksemg_t group, int32_t timeout);
/**
* @brief Define a private microkernel semaphore
*
* @param name Semaphore name.
*/
#define DEFINE_SEMAPHORE(name) \
struct _k_sem_struct _k_sem_obj_##name = __K_SEMAPHORE_DEFAULT; \
const ksem_t name = (ksem_t)&_k_sem_obj_##name;
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* _SEMAPHORE_H */

View file

@ -1,337 +0,0 @@
/* microkernel/task.h */
/*
* Copyright (c) 1997-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TASK_H
#define TASK_H
/**
* @brief Microkernel Tasks
* @defgroup microkernel_task Microkernel Tasks
* @ingroup microkernel_services
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <sections.h>
/*
* The following task groups are reserved for system use.
* sysgen automatically generates corresponding TASKGROUPs with reserved
* GROUPIDs
* sysgen must be updated if any changes are made to the reserved groups.
*/
#define EXE_GROUP 1 /* TASKGROUP EXE */
#define USR_GROUP 2 /* TASKGROUP SYS */
#define FPU_GROUP 4 /* TASKGROUP FPU */
/**
* @cond internal
*/
/**
* @brief Initialize a struct k_task given parameters.
*
* @param ident Numeric identifier of this task object.
* @param priority Priority of task.
* @param state State of task.
* @param groups Groups this task belong to.
* @param fn_start Entry function.
* @param workspace Pointer to workspace (aka, stack).
* @param worksize Size of workspace.
* @param fn_abort Abort function.
*/
#define __K_TASK_INITIALIZER(ident, priority, state, groups, \
fn_start, workspace, worksize, fn_abort) \
{ \
NULL, NULL, priority, ident, state, ((groups) ^ SYS), \
fn_start, workspace, worksize, fn_abort, NULL, \
}
extern struct k_task _k_task_list[];
extern void _task_ioctl(ktask_t, int);
extern void _task_group_ioctl(ktask_group_t, int);
/**
* @endcond
*/
/**
* @brief Yield the CPU to another task.
*
* This routine yields the processor to the next-equal priority runnable
* task. With task_yield(), the effect of round-robin scheduling is
* possible. When no task of equal priority is runnable, no task switch
* occurs, and the calling task resumes execution.
*
* @return N/A
*/
extern void task_yield(void);
/**
* @brief Set the priority of a task.
*
* This routine changes the priority of the specified task.
*
* The call has immediate effect. When the calling task no longer is the
* highest-priority runnable task, a task switch occurs.
*
* Priority can be assigned in the range 0 to 62, where 0 is the
* highest priority.
*
* @param task Task whose priority is to be set.
* @param prio New priority.
*
* @return N/A
*/
extern void task_priority_set(ktask_t task, kpriority_t prio);
/**
* @brief Set the entry point of a task.
*
* This routine sets the entry point of a task to a given routine. It is
* needed only when an entry point differs from what is set in the project
* file. In order to have any effect, it must be called before task_start(),
* and it cannot work with members of the EXE group or with any group that
* starts automatically on application loading.
*
* The routine is executed when the task is started.
*
* @param task Task to operate on.
* @param func Entry point.
*
* @return N/A
*/
extern void task_entry_set(ktask_t task, void (*func)(void));
/**
* @brief Install an abort handler.
*
* This routine installs an abort handler for the calling task.
*
* The abort handler runs when the calling task is aborted by a _TaskAbort()
* or task_group_abort() call.
*
* Each call to task_abort_handler_set() replaces the previously-installed
* handler.
*
* To remove an abort handler, set the parameter to NULL as below:
* task_abort_handler_set (NULL)
*
* @param func Abort handler.
*
* @return N/A
*/
extern void task_abort_handler_set(void (*func)(void));
/**
* @brief Issue a custom call from within the microkernel server fiber.
*
* This routine issues a request to execute a function from within the context
* of the microkernel server fiber.
*
* @param func Function to call from within the microkernel server fiber.
* @param argp Argument to pass to custom function.
*
* @return return value from custom @a func call
*/
extern int task_offload_to_fiber(int (*func)(), void *argp);
/*
* Operations supported by _task_ioctl() and _task_group_ioctl()
*/
#define TASK_START 0
#define TASK_ABORT 1
#define TASK_SUSPEND 2
#define TASK_RESUME 3
#define TASK_BLOCK 4
#define TASK_UNBLOCK 5
#define TASK_GROUP_START 0
#define TASK_GROUP_ABORT 1
#define TASK_GROUP_SUSPEND 2
#define TASK_GROUP_RESUME 3
#define TASK_GROUP_BLOCK 4
#define TASK_GROUP_UNBLOCK 5
/**
* @brief Gets task identifier
*
* @return identifier for current task
*/
extern ktask_t task_id_get(void);
/**
* @brief Gets task priority
*
* @return priority of current task
*/
extern kpriority_t task_priority_get(void);
/**
* @brief Start a task
* @param t Task to start
* @return N/A
*/
#define task_start(t) _task_ioctl(t, TASK_START)
/**
* @brief Abort a task
*
* @param t Task to abort
*
* @return N/A
*/
#define task_abort(t) _task_ioctl(t, TASK_ABORT)
/**
* @brief Suspend a task
*
* @param t Task to suspend
*
* @return N/A
*/
#define task_suspend(t) _task_ioctl(t, TASK_SUSPEND)
/**
* @brief Resume a task
*
* @param t Task to resume
*
* @return N/A
*/
#define task_resume(t) _task_ioctl(t, TASK_RESUME)
/**
* @brief Get task groups for task
*
* @return task groups associated with current task
*/
extern uint32_t task_group_mask_get(void);
/**
* @brief Add task to task group(s)
*
* @param groups Task Groups
*
* @return N/A
*/
extern void task_group_join(uint32_t groups);
/**
* @brief Remove task from task group(s)
*
* @param groups Task Groups
*
* @return N/A
*/
extern void task_group_leave(uint32_t groups);
/**
* @brief Start a task group
*
* @param g Task group to start
*
* @return N/A
*/
#define task_group_start(g) _task_group_ioctl(g, TASK_GROUP_START)
/**
* @brief Abort a task group
*
* @param g Task group to abort
*
* @return N/A
*/
#define task_group_abort(g) _task_group_ioctl(g, TASK_GROUP_ABORT)
/**
* @brief Suspend a task group
*
* @param g Task group to suspend
*
* @return N/A
*/
#define task_group_suspend(g) _task_group_ioctl(g, TASK_GROUP_SUSPEND)
/**
* @brief Resume a task group
*
* @param g Task group to resume
*
* @return N/A
*/
#define task_group_resume(g) _task_group_ioctl(g, TASK_GROUP_RESUME)
/**
* @brief Get task identifier
*
* @return identifier for current task
*/
#define isr_task_id_get() task_id_get()
/**
* @brief Get task priority
*
* @return priority of current task
*/
#define isr_task_priority_get() task_priority_get()
/**
* @brief Get task groups for task
*
* @return task groups associated with current task
*/
#define isr_task_group_mask_get() task_group_mask_get()
/**
* @brief Define a private microkernel task.
*
* This declares and initializes a private task. The new task
* can be passed to the microkernel task functions.
*
* @param name Name of the task.
* @param priority Priority of task.
* @param entry Entry function.
* @param stack_size Size of stack (in bytes)
* @param groups Groups this task belong to.
*/
#define DEFINE_TASK(name, priority, entry, stack_size, groups) \
extern void entry(void); \
char __noinit __stack __stack_##name[stack_size]; \
struct k_task _k_task_obj_##name \
__in_section(_k_task_list, private, task) = \
__K_TASK_INITIALIZER( \
(ktask_t)&_k_task_obj_##name, \
priority, 0x00000001, (uint32_t)(groups), \
entry, &__stack_##name[0], stack_size, NULL); \
const ktask_t name \
__in_section(_k_task_ptr, private, task) = \
(ktask_t)&_k_task_obj_##name;
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* TASK_H */

View file

@ -1,200 +0,0 @@
/* microkernel/ticks.h - microkernel tick header file */
/*
* Copyright (c) 1997-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TICKS_H
#define TICKS_H
/**
* @brief Microkernel Timers
* @defgroup microkernel_timer Microkernel Timers
* @ingroup microkernel_services
* @{
*/
#include <microkernel.h>
#include <sys_clock.h>
/* externs */
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Set time-slicing period and scope
*
* This routine controls how task time slicing is performed by the task
* scheduler; it specifes the maximum time slice length (in ticks) and
* the highest priority task level for which time slicing is performed.
*
* To enable time slicing, a non-zero time slice length must be specified.
* The task scheduler then ensures that no executing task runs for more than
* the specified number of ticks before giving other tasks of that priority
* a chance to execute. (However, any task whose priority is higher than the
* specified task priority level is exempted, and may execute as long as
* desired without being pre-empted due to time slicing.)
*
* Time slicing limits only the maximum amount of time a task may continuously
* execute. Once the scheduler selects a task for execution, there is no minimum
* guaranteed time the task will execute before tasks of greater or equal
* priority are scheduled.
*
* When the currently-executing task is the only one of that priority eligible
* for execution, this routine has no effect; the task is immediately rescheduled
* after the slice period expires.
*
* To disable timeslicing, call the API with both parameters set to zero.
*
* @return N/A
*/
extern void sys_scheduler_time_slice_set(int32_t t, kpriority_t p);
/**
* @brief Allocate a timer and return its object identifier.
*
* @return timer identifier
*/
extern ktimer_t task_timer_alloc(void);
/**
* @brief Deallocate a timer
*
* This routine frees the resources associated with the timer. If a timer was
* started, it has to be stopped using task_timer_stop() before it can be freed.
*
* @param timer Timer to deallocate.
*
* @return N/A
*/
extern void task_timer_free(ktimer_t timer);
/**
*
* @brief Start or restart the specified low-resolution timer
*
* This routine starts or restarts the specified low-resolution timer.
*
* Signals the semaphore after a specified number of ticks set by
* @a duration expires. The timer repeats the expiration/signal cycle
* each time @a period ticks elapses.
*
* Setting @a period to 0 stops the timer at the end of the initial delay.
* If either @a duration or @a period is passed an invalid value (@a duration <= 0,
* @a period < 0), this kernel API acts like a task_timer_stop(): if the
* allocated timer was still running (from a previous call), it will be
* cancelled; if not, nothing will happen.
*
* @param timer Timer to start.
* @param duration Initial delay in ticks.
* @param period Repetition interval in ticks.
* @param sema Semaphore to signal.
*
* @return N/A
*/
extern void task_timer_start(ktimer_t timer,
int32_t duration,
int32_t period,
ksem_t sema);
/**
*
* @brief Restart a timer
*
* This routine restarts the timer specified by @a timer. The timer must
* have previously been started by a call to task_timer_start().
*
* @param timer Timer to restart.
* @param duration Initial delay.
* @param period Repetition interval.
*
* @return N/A
*/
static inline void task_timer_restart(ktimer_t timer, int32_t duration,
int32_t period)
{
task_timer_start(timer, duration, period, _USE_CURRENT_SEM);
}
/**
* @brief Stop a timer
*
* This routine stops the specified timer. If the timer period has already
* elapsed, the call has no effect.
*
* @param timer Timer to stop.
*
* @return N/A
*/
extern void task_timer_stop(ktimer_t timer);
/**
*
* @brief Sleep for a number of ticks
*
* This routine suspends the calling task for the specified number of timer
* ticks. When the suspension expires, the task is rescheduled by priority.
*
* @param ticks Number of ticks for which to sleep.
*
* @return N/A
*/
static inline void task_sleep(int32_t ticks)
{
extern void (*_do_task_sleep)(int32_t ticks);
_do_task_sleep(ticks);
}
/**
*
* @brief Read the processor workload
*
* This routine returns the workload as a number ranging from 0 to 1000.
*
* Each unit equals 0.1% of the time the idle task was not scheduled by the
* microkernel during the period set by sys_workload_time_slice_set().
*
* IMPORTANT: This workload monitor ignores any time spent servicing ISRs and
* fibers! Thus, a system with no meaningful task work to do may spend
* up to 100% of its time servicing ISRs and fibers, yet it will report 0%
* workload because the microkernel always selects the idle task.
*
* @return workload
*/
extern int task_workload_get(void);
/**
*
* @brief Set workload period
*
* This routine specifies the workload measuring period for task_workload_get().
*
* @param t Time slice
*
* @return N/A
*/
extern void sys_workload_time_slice_set(int32_t t);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* TICKS_H */

View file

@ -24,33 +24,6 @@
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS #ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
#if !defined(CONFIG_KERNEL_V2)
#include <nanokernel.h>
extern struct nano_fifo *_trace_list_nano_fifo;
extern struct nano_lifo *_trace_list_nano_lifo;
extern struct nano_sem *_trace_list_nano_sem;
extern struct nano_timer *_trace_list_nano_timer;
extern struct nano_stack *_trace_list_nano_stack;
extern struct ring_buf *_trace_list_sys_ring_buf;
#ifdef CONFIG_MICROKERNEL
#include <microkernel/base_api.h>
#include <micro_private_types.h>
extern struct _k_mbox_struct *_trace_list_micro_mbox;
extern struct _k_mutex_struct *_trace_list_micro_mutex;
extern struct _k_sem_struct *_trace_list_micro_sem;
extern struct _k_fifo_struct *_trace_list_micro_fifo;
extern struct _k_pipe_struct *_trace_list_micro_pipe;
extern struct pool_struct *_trace_list_micro_mem_pool;
extern struct _k_mem_map_struct *_trace_list_micro_mem_map;
extern struct _k_event_struct *_trace_list_micro_event;
extern struct k_timer *_trace_list_micro_timer;
extern struct k_task *_trace_list_micro_task;
#endif /*CONFIG_MICROKERNEL*/
#else
#include <kernel.h> #include <kernel.h>
extern struct k_timer *_trace_list_k_timer; extern struct k_timer *_trace_list_k_timer;
extern struct k_mem_slab *_trace_list_k_mem_slab; extern struct k_mem_slab *_trace_list_k_mem_slab;
@ -67,8 +40,6 @@ extern struct k_pipe *_trace_list_k_pipe;
extern struct ring_buf *_trace_list_sys_ring_buf; extern struct ring_buf *_trace_list_sys_ring_buf;
#endif /*CONFIG_KERNEL_V2*/
/** /**
* @def SYS_TRACING_HEAD * @def SYS_TRACING_HEAD
* *

View file

@ -113,54 +113,9 @@
} \ } \
while (0) while (0)
#if !defined(CONFIG_KERNEL_V2)
/*
* Define list variables for all object types.
*
* This is ugly, since these list variables are redefined by every .c file
* that drags in this include file (explicitly or implicitly). Fortunately,
* the linker doesn't seem to mind seeing these duplicate definitions ...
*/
#include <nanokernel.h>
struct nano_fifo *_trace_list_nano_fifo;
struct nano_lifo *_trace_list_nano_lifo;
struct nano_sem *_trace_list_nano_sem;
struct nano_timer *_trace_list_nano_timer;
struct nano_stack *_trace_list_nano_stack;
struct ring_buf *_trace_list_sys_ring_buf;
#ifdef CONFIG_MICROKERNEL
#include <microkernel/base_api.h>
struct _k_mbox_struct *_trace_list_micro_mbox;
struct _k_mutex_struct *_trace_list_micro_mutex;
struct _k_sem_struct *_trace_list_micro_sem;
struct _k_fifo_struct *_trace_list_micro_fifo;
struct _k_pipe_struct *_trace_list_micro_pipe;
struct pool_struct *_trace_list_micro_mem_pool;
struct _k_mem_map_struct *_trace_list_micro_mem_map;
struct _k_event_struct *_trace_list_micro_event;
struct k_timer *_trace_list_micro_timer;
struct k_task *_trace_list_micro_task;
#endif /*CONFIG_MICROKERNEL*/
#else
/*
* Define list variables for object types that don't do it in a .c file.
*
* This is ugly, since these list variables are redefined by every .c file
* that drags in this include file (explicitly or implicitly). Fortunately,
* the linker doesn't seem to mind seeing these duplicate definitions ...
*/
struct ring_buf; struct ring_buf;
struct ring_buf *_trace_list_sys_ring_buf; struct ring_buf *_trace_list_sys_ring_buf;
#endif /*CONFIG_KERNEL_V2*/
#endif /*CONFIG_DEBUG_TRACING_KERNEL_OBJECTS*/ #endif /*CONFIG_DEBUG_TRACING_KERNEL_OBJECTS*/
#endif /*_OBJECT_TRACING_COMMON_H_*/ #endif /*_OBJECT_TRACING_COMMON_H_*/

View file

@ -24,211 +24,6 @@
#ifndef _misc_nano_work__h_ #ifndef _misc_nano_work__h_
#define _misc_nano_work__h_ #define _misc_nano_work__h_
#ifdef CONFIG_KERNEL_V2
#include <kernel.h> #include <kernel.h>
#else
#include <nanokernel.h>
#include <atomic.h>
#include <misc/__assert.h>
#ifdef __cplusplus
extern "C" {
#endif
struct nano_work;
typedef void (*work_handler_t)(struct nano_work *);
/**
* A workqueue is a fiber that executes @ref nano_work items that are
* queued to it. This is useful for drivers which need to schedule
* execution of code which might sleep from ISR context. The actual
* fiber identifier is not stored in the structure in order to save
* space.
*/
struct nano_workqueue {
struct nano_fifo fifo;
};
/**
* @brief Work flags.
*/
enum {
NANO_WORK_STATE_PENDING, /* Work item pending state */
NANO_WORK_NUM_FLAGS, /* Number of flags - must be last */
};
/**
* @brief An item which can be scheduled on a @ref nano_workqueue.
*/
struct nano_work {
void *_reserved; /* Used by nano_fifo implementation. */
work_handler_t handler;
ATOMIC_DEFINE(flags, NANO_WORK_NUM_FLAGS);
};
/**
* @brief Initialize work item
*/
static inline void nano_work_init(struct nano_work *work,
work_handler_t handler)
{
atomic_clear_bit(work->flags, NANO_WORK_STATE_PENDING);
work->handler = handler;
}
/**
* @brief Submit a work item to a workqueue.
*
* This procedure schedules a work item to be processed.
* In the case where the work item has already been submitted and is pending
* execution, calling this function will result in a no-op. In this case, the
* work item must not be modified externally (e.g. by the caller of this
* function), since that could cause the work item to be processed in a
* corrupted state.
*
* @param wq to schedule the work item
* @param work work item
*
* @return N/A
*/
static inline void nano_work_submit_to_queue(struct nano_workqueue *wq,
struct nano_work *work)
{
if (!atomic_test_and_set_bit(work->flags, NANO_WORK_STATE_PENDING)) {
nano_fifo_put(&wq->fifo, work);
}
}
/**
* @brief Check if work item is pending.
*/
static inline int nano_work_pending(struct nano_work *work)
{
return atomic_test_bit(work->flags, NANO_WORK_STATE_PENDING);
}
/**
* @brief Start a new workqueue. Call this from fiber context.
*/
extern void nano_fiber_workqueue_start(struct nano_workqueue *wq,
const struct fiber_config *config);
/**
* @brief Start a new workqueue. Call this from task context.
*/
extern void nano_task_workqueue_start(struct nano_workqueue *wq,
const struct fiber_config *config);
/**
* @brief Start a new workqueue. This routine can be called from either
* fiber or task context.
*/
extern void nano_workqueue_start(struct nano_workqueue *wq,
const struct fiber_config *config);
#if defined(CONFIG_NANO_TIMEOUTS)
/*
* @brief An item which can be scheduled on a @ref nano_workqueue with a
* delay.
*/
struct nano_delayed_work {
struct nano_work work;
struct _nano_timeout timeout;
struct nano_workqueue *wq;
};
/**
* @brief Initialize delayed work
*/
void nano_delayed_work_init(struct nano_delayed_work *work,
work_handler_t handler);
/**
* @brief Submit a delayed work item to a workqueue.
*
* This procedure schedules a work item to be processed after a delay.
* Once the delay has passed, the work item is submitted to the work queue:
* at this point, it is no longer possible to cancel it. Once the work item's
* handler is about to be executed, the work is considered complete and can be
* resubmitted.
*
* Care must be taken if the handler blocks or yield as there is no implicit
* mutual exclusion mechanism. Such usage is not recommended and if necessary,
* it should be explicitly done between the submitter and the handler.
*
* @param wq Workqueue to schedule the work item
* @param work Delayed work item
* @param ticks Ticks to wait before scheduling the work item
*
* @return 0 in case of success or negative value in case of error.
*/
int nano_delayed_work_submit_to_queue(struct nano_workqueue *wq,
struct nano_delayed_work *work,
int ticks);
/**
* @brief Cancel a delayed work item
*
* This procedure cancels a scheduled work item. If the work has been completed
* or is idle, this will do nothing. The only case where this can fail is when
* the work has been submitted to the work queue, but the handler has not run
* yet.
*
* @param work Delayed work item to be canceled
*
* @return 0 in case of success or negative value in case of error.
*/
int nano_delayed_work_cancel(struct nano_delayed_work *work);
#endif /* CONFIG_NANO_TIMEOUTS */
#if defined(CONFIG_SYSTEM_WORKQUEUE)
extern struct nano_workqueue sys_workqueue;
/*
* @brief Submit a work item to the system workqueue.
*
* @ref nano_work_submit_to_queue
*
* When using the system workqueue it is not recommended to block or yield
* on the handler since its fiber is shared system wide it may cause
* unexpected behavior.
*/
static inline void nano_work_submit(struct nano_work *work)
{
nano_work_submit_to_queue(&sys_workqueue, work);
}
#if defined(CONFIG_NANO_TIMEOUTS)
/*
* @brief Submit a delayed work item to the system workqueue.
*
* @ref nano_delayed_work_submit_to_queue
*
* When using the system workqueue it is not recommended to block or yield
* on the handler since its fiber is shared system wide it may cause
* unexpected behavior.
*/
static inline int nano_delayed_work_submit(struct nano_delayed_work *work,
int ticks)
{
return nano_delayed_work_submit_to_queue(&sys_workqueue, work, ticks);
}
#endif /* CONFIG_NANO_TIMEOUTS */
#endif /* CONFIG_SYSTEM_WORKQUEUE */
#ifdef __cplusplus
}
#endif
#endif /* CONFIG_KERNEL_V2 */
#endif /* _misc_nano_work__h_ */ #endif /* _misc_nano_work__h_ */

File diff suppressed because it is too large Load diff

View file

@ -17,15 +17,7 @@
#ifndef _ZEPHYR__H #ifndef _ZEPHYR__H
#define _ZEPHYR__H #define _ZEPHYR__H
#ifdef CONFIG_KERNEL_V2
#include <kernel.h> #include <kernel.h>
#elif CONFIG_MICROKERNEL
#include <microkernel.h>
#elif CONFIG_NANOKERNEL
#include <nanokernel.h>
#else
#error "unknown kernel type!"
#endif
#ifdef CONFIG_MDEF #ifdef CONFIG_MDEF
#include <sysgen.h> #include <sysgen.h>