microkernel: remove kernel service dispatch table

This change removes the internal number-to-function mapping
of microkernel services. Instead, function pointers are used
to specify which service to use.

This is in preparation for private kernel objects. Before this,
only kernel objects that are defined in MDEF files would have
corresponding functions included in the final binary, via sysgen
by populating an array of number-to-function mapping. This
causes an issue when a certain type of objects are all defined
with source code, and never in MDEF file. The corresponding
mapping would be deleted, and the functions are never included
in the binary. For example, if no mutexes are defined in MDEF
file, the _k_mutex_*() functions would not be included.

With this change, any usage of private kernel objects will hint
to the linker that those functions are needed, and should not be
removed from final binary.

Change-Id: If48864abcd6471bcb7964ec00fe668bcabe3239b
Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2015-07-25 15:31:09 -07:00 committed by Anas Nashif
commit 74c8852d05
22 changed files with 266 additions and 396 deletions

View file

@ -74,7 +74,7 @@ void _TaskAbort(void)
if (_ScbIsInThreadMode()) { if (_ScbIsInThreadMode()) {
_task_ioctl(_k_current_task->Ident, taskAbortCode); _task_ioctl(_k_current_task->Ident, taskAbortCode);
} else { } else {
cmd_packet.Comm = TSKOP; cmd_packet.Comm = _K_SVC_TASK_OP;
cmd_packet.Args.g1.task = _k_current_task->Ident; cmd_packet.Args.g1.task = _k_current_task->Ident;
cmd_packet.Args.g1.opt = taskAbortCode; cmd_packet.Args.g1.opt = taskAbortCode;
cmd_packet.alloc = false; cmd_packet.alloc = false;

View file

@ -128,6 +128,18 @@ extern void _k_sem_wait_request(struct k_args *);
*/ */
extern void _k_sem_wait_reply(struct k_args *); extern void _k_sem_wait_reply(struct k_args *);
/**
*
* @brief Reply to a semaphore wait request with timeout.
*
* @param A Pointer to a k_args structure.
*
* @return N/A
*
* @sa _k_sem_wait_reply
*/
extern void _k_sem_wait_reply_timeout(struct k_args *A);
/** /**
* *
* @brief Handle semaphore group wait request * @brief Handle semaphore group wait request
@ -227,11 +239,14 @@ extern void _k_sem_inquiry(struct k_args *);
extern void _k_mutex_lock_request(struct k_args *); extern void _k_mutex_lock_request(struct k_args *);
extern void _k_mutex_lock_reply(struct k_args *); extern void _k_mutex_lock_reply(struct k_args *);
extern void _k_mutex_lock_reply_timeout(struct k_args *);
extern void _k_mutex_unlock(struct k_args *); extern void _k_mutex_unlock(struct k_args *);
extern void _k_fifo_enque_request(struct k_args *); extern void _k_fifo_enque_request(struct k_args *);
extern void _k_fifo_enque_reply(struct k_args *); extern void _k_fifo_enque_reply(struct k_args *);
extern void _k_fifo_enque_reply_timeout(struct k_args *);
extern void _k_fifo_deque_request(struct k_args *); extern void _k_fifo_deque_request(struct k_args *);
extern void _k_fifo_deque_reply(struct k_args *); extern void _k_fifo_deque_reply(struct k_args *);
extern void _k_fifo_deque_reply_timeout(struct k_args *);
extern void _k_fifo_ioctl(struct k_args *); extern void _k_fifo_ioctl(struct k_args *);
extern void _k_mbox_send_request(struct k_args *); extern void _k_mbox_send_request(struct k_args *);
extern void _k_mbox_send_reply(struct k_args *); extern void _k_mbox_send_reply(struct k_args *);

View file

@ -55,91 +55,88 @@ struct k_timer {
/* Kernel server command codes */ /* Kernel server command codes */
typedef enum { #define _K_SVC_UNDEFINED (NULL)
NOP,
MVD_REQ, #define _K_SVC_BLOCK_WAITERS_GET _k_block_waiters_get
MVD_VOID, /* obsolete now */ #define _K_SVC_DEFRAG _k_defrag
RAWDATA, #define _K_SVC_MOVEDATA_REQ _k_movedata_request
OFFLOAD, #define _K_SVC_NOP _k_nop
READWL, #define _K_SVC_OFFLOAD_TO_FIBER _k_offload_to_fiber
SIGNALS, #define _K_SVC_TIME_ELAPSE _k_time_elapse
SIGNALM, #define _K_SVC_WORKLOAD_GET _k_workload_get
RESETS,
RESETM, #define _K_SVC_EVENT_HANDLER_SET _k_event_handler_set
WAITSREQ, #define _K_SVC_EVENT_SIGNAL _k_event_signal
WAITSRPL, #define _K_SVC_EVENT_TEST _k_event_test
WAITSTMO, #define _K_SVC_EVENT_TEST_TIMEOUT _k_event_test_timeout
WAITMANY,
WAITMREQ, #define _K_SVC_SEM_INQUIRY _k_sem_inquiry
WAITMRDY, #define _K_SVC_SEM_SIGNAL _k_sem_signal
WAITMCAN, #define _K_SVC_SEM_RESET _k_sem_reset
WAITMACC, #define _K_SVC_SEM_WAIT_REQUEST _k_sem_wait_request
WAITMEND, #define _K_SVC_SEM_WAIT_REPLY _k_sem_wait_reply
WAITMTMO, #define _K_SVC_SEM_WAIT_REPLY_TIMEOUT _k_sem_wait_reply_timeout
INQSEMA, #define _K_SVC_SEM_GROUP_SIGNAL _k_sem_group_signal
LOCK_REQ, #define _K_SVC_SEM_GROUP_RESET _k_sem_group_reset
LOCK_RPL, #define _K_SVC_SEM_GROUP_WAIT _k_sem_group_wait
LOCK_TMO, #define _K_SVC_SEM_GROUP_WAIT_ANY _k_sem_group_wait_any
UNLOCK, #define _K_SVC_SEM_GROUP_WAIT_ACCEPT _k_sem_group_wait_accept
ENQ_REQ, #define _K_SVC_SEM_GROUP_WAIT_CANCEL _k_sem_group_wait_cancel
ENQ_RPL, #define _K_SVC_SEM_GROUP_WAIT_READY _k_sem_group_ready
ENQ_TMO, #define _K_SVC_SEM_GROUP_WAIT_REQUEST _k_sem_group_wait_request
DEQ_REQ, #define _K_SVC_SEM_GROUP_WAIT_TIMEOUT _k_sem_group_wait_timeout
DEQ_RPL,
DEQ_TMO, #define _K_SVC_MUTEX_LOCK_REQUEST _k_mutex_lock_request
QUEUE, #define _K_SVC_MUTEX_LOCK_REPLY _k_mutex_lock_reply
SEND_REQ, #define _K_SVC_MUTEX_LOCK_REPLY_TIMEOUT _k_mutex_lock_reply_timeout
SEND_TMO, #define _K_SVC_MUTEX_UNLOCK _k_mutex_unlock
SEND_ACK,
SEND_DATA, #define _K_SVC_FIFO_ENQUE_REQUEST _k_fifo_enque_request
RECV_REQ, #define _K_SVC_FIFO_ENQUE_REPLY _k_fifo_enque_reply
RECV_TMO, #define _K_SVC_FIFO_ENQUE_REPLY_TIMEOUT _k_fifo_enque_reply_timeout
RECV_ACK, #define _K_SVC_FIFO_DEQUE_REQUEST _k_fifo_deque_request
RECV_DATA, #define _K_SVC_FIFO_DEQUE_REPLY _k_fifo_deque_reply
ELAPSE, #define _K_SVC_FIFO_DEQUE_REPLY_TIMEOUT _k_fifo_deque_reply_timeout
SLEEP, #define _K_SVC_FIFO_IOCTL _k_fifo_ioctl
WAKEUP,
TSKOP, #define _K_SVC_MBOX_SEND_REQUEST _k_mbox_send_request
GRPOP, #define _K_SVC_MBOX_SEND_REPLY _k_mbox_send_reply
SPRIO, #define _K_SVC_MBOX_SEND_ACK _k_mbox_send_ack
YIELD, #define _K_SVC_MBOX_SEND_DATA _k_mbox_send_data
ALLOC, #define _K_SVC_MBOX_RECEIVE_REQUEST _k_mbox_receive_request
DEALLOC, #define _K_SVC_MBOX_RECEIVE_REPLY _k_mbox_receive_reply
TALLOC, #define _K_SVC_MBOX_RECEIVE_ACK _k_mbox_receive_ack
TDEALLOC, #define _K_SVC_MBOX_RECEIVE_DATA _k_mbox_receive_data
TSTART,
TSTOP, #define _K_SVC_TASK_SLEEP _k_task_sleep
ALLOCTMO, #define _K_SVC_TASK_WAKEUP _k_task_wakeup
REMREPLY, #define _K_SVC_TASK_OP _k_task_op
DEBUG_REQ, #define _K_SVC_TASK_GROUP_OP _k_task_group_op
DEBUG_ACK, #define _K_SVC_TASK_PRIORITY_SET _k_task_priority_set
EVENTENABLE, /* obsolete now */ #define _K_SVC_TASK_YIELD _k_task_yield
EVENTTEST,
EVENTHANDLER, #define _K_SVC_MEM_MAP_ALLOC _k_mem_map_alloc
EVENTSIGNAL, #define _K_SVC_MEM_MAP_ALLOC_TIMEOUT _k_mem_map_alloc_timeout
GET_BLOCK, #define _K_SVC_MEM_MAP_DEALLOC _k_mem_map_dealloc
REL_BLOCK,
GET_BLOCK_WAIT, #define _K_SVC_TIMER_ALLOC _k_timer_alloc
GTBLTMO, #define _K_SVC_TIMER_DEALLOC _k_timer_dealloc
POOL_DEFRAG, #define _K_SVC_TIMER_START _k_timer_start
MVDSND_REQ, #define _K_SVC_TIMER_STOP _k_timer_stop
MVDRCV_REQ,
MVDSND_ACK, #define _K_SVC_MEM_POOL_BLOCK_GET _k_mem_pool_block_get
MVDRCV_ACK, #define _K_SVC_MEM_POOL_BLOCK_GET_TIMEOUT_HANDLE _k_mem_pool_block_get_timeout_handle
MEMCPY_REQ, #define _K_SVC_MEM_POOL_BLOCK_RELEASE _k_mem_pool_block_release
MEMCPY_RPL,
PIPE_PUT_REQUEST, #define _K_SVC_PIPE_PUT_REQUEST _k_pipe_put_request
PIPE_PUT_TIMEOUT, #define _K_SVC_PIPE_PUT_TIMEOUT _k_pipe_put_timeout
PIPE_PUT_REPLY, #define _K_SVC_PIPE_PUT_REPLY _k_pipe_put_reply
PIPE_PUT_ACK, #define _K_SVC_PIPE_PUT_ACK _k_pipe_put_ack
PIPE_GET_REQUEST, #define _K_SVC_PIPE_GET_REQUEST _k_pipe_get_request
PIPE_GET_TIMEOUT, #define _K_SVC_PIPE_GET_TIMEOUT _k_pipe_get_timeout
PIPE_GET_REPLY, #define _K_SVC_PIPE_GET_REPLY _k_pipe_get_reply
PIPE_GET_ACK, #define _K_SVC_PIPE_GET_ACK _k_pipe_get_ack
PIPE_MOVEDATA_ACK, #define _K_SVC_PIPE_MOVEDATA_ACK _k_pipe_movedata_ack
EVENT_TMO,
UNDEFINED = -1
} K_COMM;
/* Task queue header */ /* Task queue header */
@ -460,7 +457,7 @@ struct k_args {
/* 'alloc' is true if k_args is allocated via GETARGS() */ /* 'alloc' is true if k_args is allocated via GETARGS() */
bool alloc __aligned(4); bool alloc __aligned(4);
K_COMM Comm __aligned(4); void (*Comm)(struct k_args *);
/* /*
* Ctxt needs to be aligned to avoid "unaligned write" exception on ARM * Ctxt needs to be aligned to avoid "unaligned write" exception on ARM

View file

@ -77,7 +77,7 @@ int task_event_set_handler(kevent_t event, kevent_handler_t handler)
{ {
struct k_args A; struct k_args A;
A.Comm = EVENTHANDLER; A.Comm = _K_SVC_EVENT_HANDLER_SET;
A.Args.e1.event = event; A.Args.e1.event = event;
A.Args.e1.func = handler; A.Args.e1.func = handler;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
@ -128,7 +128,7 @@ void _k_event_test(struct k_args *A)
if (A->Time.ticks == TICKS_UNLIMITED) { if (A->Time.ticks == TICKS_UNLIMITED) {
A->Time.timer = NULL; A->Time.timer = NULL;
} else { } else {
A->Comm = EVENT_TMO; A->Comm = _K_SVC_EVENT_TEST_TIMEOUT;
_k_timeout_alloc(A); _k_timeout_alloc(A);
} }
#endif #endif
@ -151,7 +151,7 @@ int _task_event_recv(kevent_t event, int32_t time)
{ {
struct k_args A; struct k_args A;
A.Comm = EVENTTEST; A.Comm = _K_SVC_EVENT_TEST;
A.Args.e1.event = event; A.Args.e1.event = event;
A.Time.ticks = time; A.Time.ticks = time;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
@ -186,7 +186,7 @@ void _k_do_event_signal(kevent_t event)
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.timer != NULL) { if (A->Time.timer != NULL) {
_k_timeout_free(A->Time.timer); _k_timeout_free(A->Time.timer);
A->Comm = NOP; A->Comm = _K_SVC_NOP;
} }
#endif #endif
A->Time.rcode = RC_OK; A->Time.rcode = RC_OK;
@ -223,7 +223,7 @@ int task_event_send(kevent_t event)
{ {
struct k_args A; struct k_args A;
A.Comm = EVENTSIGNAL; A.Comm = _K_SVC_EVENT_SIGNAL;
A.Args.e1.event = event; A.Args.e1.event = event;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
return A.Time.rcode; return A.Time.rcode;

View file

@ -55,7 +55,7 @@ void _k_fifo_enque_reply(struct k_args *A)
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.timer) if (A->Time.timer)
FREETIMER(A->Time.timer); FREETIMER(A->Time.timer);
if (unlikely(A->Comm == ENQ_TMO)) { if (unlikely(A->Comm == _K_SVC_FIFO_ENQUE_REPLY_TIMEOUT)) {
REMOVE_ELM(A); REMOVE_ELM(A);
A->Time.rcode = RC_TIME; A->Time.rcode = RC_TIME;
} else { } else {
@ -68,6 +68,21 @@ void _k_fifo_enque_reply(struct k_args *A)
_k_state_bit_reset(A->Ctxt.proc, TF_ENQU); _k_state_bit_reset(A->Ctxt.proc, TF_ENQU);
} }
/**
*
* @brief Finish performing an incomplete FIFO enqueue request with timeout.
*
* @param A Pointer to a k_args structure
*
* @return N/A
*
* @sa _k_fifo_enque_reply
*/
void _k_fifo_enque_reply_timeout(struct k_args *A)
{
_k_fifo_enque_reply(A);
}
/** /**
* *
* @brief Perform a FIFO enqueue request * @brief Perform a FIFO enqueue request
@ -96,7 +111,7 @@ void _k_fifo_enque_request(struct k_args *A)
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
if (W->Time.timer) { if (W->Time.timer) {
_k_timeout_cancel(W); _k_timeout_cancel(W);
W->Comm = DEQ_RPL; W->Comm = _K_SVC_FIFO_DEQUE_REPLY;
} else { } else {
#endif #endif
W->Time.rcode = RC_OK; W->Time.rcode = RC_OK;
@ -134,7 +149,7 @@ void _k_fifo_enque_request(struct k_args *A)
if (A->Time.ticks == TICKS_UNLIMITED) if (A->Time.ticks == TICKS_UNLIMITED)
A->Time.timer = NULL; A->Time.timer = NULL;
else { else {
A->Comm = ENQ_TMO; A->Comm = _K_SVC_FIFO_ENQUE_REPLY_TIMEOUT;
_k_timeout_alloc(A); _k_timeout_alloc(A);
} }
#endif #endif
@ -151,7 +166,7 @@ int _task_fifo_put(kfifo_t queue, /* FIFO queue */
{ {
struct k_args A; struct k_args A;
A.Comm = ENQ_REQ; A.Comm = _K_SVC_FIFO_ENQUE_REQUEST;
A.Time.ticks = time; A.Time.ticks = time;
A.Args.q1.data = (char *)data; A.Args.q1.data = (char *)data;
A.Args.q1.queue = queue; A.Args.q1.queue = queue;
@ -172,7 +187,7 @@ void _k_fifo_deque_reply(struct k_args *A)
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.timer) if (A->Time.timer)
FREETIMER(A->Time.timer); FREETIMER(A->Time.timer);
if (unlikely(A->Comm == DEQ_TMO)) { if (unlikely(A->Comm == _K_SVC_FIFO_DEQUE_REPLY_TIMEOUT)) {
REMOVE_ELM(A); REMOVE_ELM(A);
A->Time.rcode = RC_TIME; A->Time.rcode = RC_TIME;
} else { } else {
@ -185,6 +200,21 @@ void _k_fifo_deque_reply(struct k_args *A)
_k_state_bit_reset(A->Ctxt.proc, TF_DEQU); _k_state_bit_reset(A->Ctxt.proc, TF_DEQU);
} }
/**
*
* @brief Finish performing an incomplete FIFO dequeue request with timeout.
*
* @param A Pointer to a k_args structure.
*
* @return N/A
*
* @sa _k_fifo_deque_reply
*/
void _k_fifo_deque_reply_timeout(struct k_args *A)
{
_k_fifo_deque_reply(A);
}
/** /**
* *
* @brief Perform FIFO dequeue request * @brief Perform FIFO dequeue request
@ -229,7 +259,7 @@ void _k_fifo_deque_request(struct k_args *A)
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
if (W->Time.timer) { if (W->Time.timer) {
_k_timeout_cancel(W); _k_timeout_cancel(W);
W->Comm = ENQ_RPL; W->Comm = _K_SVC_FIFO_ENQUE_REPLY;
} else { } else {
#endif #endif
W->Time.rcode = RC_OK; W->Time.rcode = RC_OK;
@ -253,7 +283,7 @@ void _k_fifo_deque_request(struct k_args *A)
if (A->Time.ticks == TICKS_UNLIMITED) if (A->Time.ticks == TICKS_UNLIMITED)
A->Time.timer = NULL; A->Time.timer = NULL;
else { else {
A->Comm = DEQ_TMO; A->Comm = _K_SVC_FIFO_DEQUE_REPLY_TIMEOUT;
_k_timeout_alloc(A); _k_timeout_alloc(A);
} }
#endif #endif
@ -281,7 +311,7 @@ int _task_fifo_get(kfifo_t queue, void *data, int32_t time)
{ {
struct k_args A; struct k_args A;
A.Comm = DEQ_REQ; A.Comm = _K_SVC_FIFO_DEQUE_REQUEST;
A.Time.ticks = time; A.Time.ticks = time;
A.Args.q1.data = (char *)data; A.Args.q1.data = (char *)data;
A.Args.q1.queue = queue; A.Args.q1.queue = queue;
@ -314,7 +344,7 @@ void _k_fifo_ioctl(struct k_args *A)
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
if (likely(X->Time.timer)) { if (likely(X->Time.timer)) {
_k_timeout_cancel(X); _k_timeout_cancel(X);
X->Comm = ENQ_RPL; X->Comm = _K_SVC_FIFO_ENQUE_REPLY;
} else { } else {
#endif #endif
X->Time.rcode = RC_FAIL; X->Time.rcode = RC_FAIL;
@ -347,7 +377,7 @@ int _task_fifo_ioctl(kfifo_t queue, int op)
{ {
struct k_args A; struct k_args A;
A.Comm = QUEUE; A.Comm = _K_SVC_FIFO_IOCTL;
A.Args.q1.queue = queue; A.Args.q1.queue = queue;
A.Args.q1.size = op; A.Args.q1.size = op;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);

View file

@ -250,7 +250,7 @@ int task_workload_get(void)
{ {
struct k_args A; struct k_args A;
A.Comm = READWL; A.Comm = _K_SVC_WORKLOAD_GET;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
return A.Args.u1.rval; return A.Args.u1.rval;
} }

View file

@ -139,20 +139,20 @@ static bool prepare_transfer(struct k_args *move,
* (this is shared code, irrespective of the value of 'move') * (this is shared code, irrespective of the value of 'move')
*/ */
__ASSERT_NO_MSG(NULL == reader->Forw); __ASSERT_NO_MSG(NULL == reader->Forw);
reader->Comm = RECV_ACK; reader->Comm = _K_SVC_MBOX_RECEIVE_ACK;
reader->Time.rcode = RC_OK; reader->Time.rcode = RC_OK;
__ASSERT_NO_MSG(NULL == writer->Forw); __ASSERT_NO_MSG(NULL == writer->Forw);
writer->alloc = true; writer->alloc = true;
writer->Comm = SEND_ACK; writer->Comm = _K_SVC_MBOX_SEND_ACK;
writer->Time.rcode = RC_OK; writer->Time.rcode = RC_OK;
if (move) { if (move) {
/* { move != NULL, which means full data exchange } */ /* { move != NULL, which means full data exchange } */
bool all_data_present = true; bool all_data_present = true;
move->Comm = MVD_REQ; move->Comm = _K_SVC_MOVEDATA_REQ;
/* /*
* transfer the data with the highest * transfer the data with the highest
* priority of reader and writer * priority of reader and writer
@ -237,7 +237,7 @@ void _k_mbox_send_ack(struct k_args *pCopyWriter)
#ifndef NO_KARG_CLEAR #ifndef NO_KARG_CLEAR
memset(&A, 0xfd, sizeof(struct k_args)); memset(&A, 0xfd, sizeof(struct k_args));
#endif #endif
A.Comm = SIGNALS; A.Comm = _K_SVC_SEM_SIGNAL;
A.Args.s1.sema = pCopyWriter->Args.m1.mess.extra.sema; A.Args.s1.sema = pCopyWriter->Args.m1.mess.extra.sema;
_k_sem_signal(&A); _k_sem_signal(&A);
} }
@ -253,7 +253,7 @@ void _k_mbox_send_ack(struct k_args *pCopyWriter)
* special value to tell if block should be * special value to tell if block should be
* freed or not * freed or not
*/ */
pCopyWriter->Comm = REL_BLOCK; pCopyWriter->Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE;
pCopyWriter->Args.p1.poolid = pCopyWriter->Args.p1.poolid =
pCopyWriter->Args.m1.mess.tx_block.poolid; pCopyWriter->Args.m1.mess.tx_block.poolid;
pCopyWriter->Args.p1.rep_poolptr = pCopyWriter->Args.p1.rep_poolptr =
@ -300,7 +300,7 @@ void _k_mbox_send_reply(struct k_args *pCopyWriter)
FREETIMER(pCopyWriter->Time.timer); FREETIMER(pCopyWriter->Time.timer);
REMOVE_ELM(pCopyWriter); REMOVE_ELM(pCopyWriter);
pCopyWriter->Time.rcode = RC_TIME; pCopyWriter->Time.rcode = RC_TIME;
pCopyWriter->Comm = SEND_ACK; pCopyWriter->Comm = _K_SVC_MBOX_SEND_ACK;
SENDARGS(pCopyWriter); SENDARGS(pCopyWriter);
} }
@ -434,7 +434,7 @@ void _k_mbox_send_request(struct k_args *Writer)
* to blindly set it rather than waste time on a comparison. * to blindly set it rather than waste time on a comparison.
*/ */
CopyWriter->Comm = SEND_TMO; CopyWriter->Comm = _K_SVC_MBOX_SEND_REPLY;
/* Put the letter into the mailbox */ /* Put the letter into the mailbox */
INSERT_ELM(MailBox->Writers, CopyWriter); INSERT_ELM(MailBox->Writers, CopyWriter);
@ -456,7 +456,7 @@ void _k_mbox_send_request(struct k_args *Writer)
* This is a no-wait operation. * This is a no-wait operation.
* Notify the sender of failure. * Notify the sender of failure.
*/ */
CopyWriter->Comm = SEND_ACK; CopyWriter->Comm = _K_SVC_MBOX_SEND_ACK;
CopyWriter->Time.rcode = RC_FAIL; CopyWriter->Time.rcode = RC_FAIL;
SENDARGS(CopyWriter); SENDARGS(CopyWriter);
} }
@ -496,7 +496,7 @@ int _task_mbox_put(kmbox_t mbox,
M->mailbox = mbox; M->mailbox = mbox;
A.Prio = prio; A.Prio = prio;
A.Comm = SEND_REQ; A.Comm = _K_SVC_MBOX_SEND_REQUEST;
A.Time.ticks = time; A.Time.ticks = time;
A.Args.m1.mess = *M; A.Args.m1.mess = *M;
@ -546,7 +546,7 @@ void _k_mbox_receive_reply(struct k_args *pCopyReader)
FREETIMER(pCopyReader->Time.timer); FREETIMER(pCopyReader->Time.timer);
REMOVE_ELM(pCopyReader); REMOVE_ELM(pCopyReader);
pCopyReader->Time.rcode = RC_TIME; pCopyReader->Time.rcode = RC_TIME;
pCopyReader->Comm = RECV_ACK; pCopyReader->Comm = _K_SVC_MBOX_RECEIVE_ACK;
SENDARGS(pCopyReader); SENDARGS(pCopyReader);
#endif #endif
} }
@ -648,7 +648,7 @@ void _k_mbox_receive_request(struct k_args *Reader)
* to blindly set it rather than waste time on a comparison. * to blindly set it rather than waste time on a comparison.
*/ */
CopyReader->Comm = RECV_TMO; CopyReader->Comm = _K_SVC_MBOX_RECEIVE_REPLY;
/* Put the letter into the mailbox */ /* Put the letter into the mailbox */
INSERT_ELM(MailBox->Readers, CopyReader); INSERT_ELM(MailBox->Readers, CopyReader);
@ -670,7 +670,7 @@ void _k_mbox_receive_request(struct k_args *Reader)
* This is a no-wait operation. * This is a no-wait operation.
* Notify the receiver of failure. * Notify the receiver of failure.
*/ */
CopyReader->Comm = RECV_ACK; CopyReader->Comm = _K_SVC_MBOX_RECEIVE_ACK;
CopyReader->Time.rcode = RC_FAIL; CopyReader->Time.rcode = RC_FAIL;
SENDARGS(CopyReader); SENDARGS(CopyReader);
} }
@ -693,7 +693,7 @@ int _task_mbox_get(kmbox_t mbox,
*/ */
A.Prio = _k_current_task->Prio; A.Prio = _k_current_task->Prio;
A.Comm = RECV_REQ; A.Comm = _K_SVC_MBOX_RECEIVE_REQUEST;
A.Time.ticks = time; A.Time.ticks = time;
A.Args.m1.mess = *M; A.Args.m1.mess = *M;
@ -729,7 +729,7 @@ void _task_mbox_put_async(kmbox_t mbox,
A.Time.timer = NULL; A.Time.timer = NULL;
#endif #endif
A.Prio = prio; A.Prio = prio;
A.Comm = SEND_REQ; A.Comm = _K_SVC_MBOX_SEND_REQUEST;
A.Args.m1.mess = *M; A.Args.m1.mess = *M;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
} }
@ -754,7 +754,7 @@ void _k_mbox_receive_data(struct k_args *Starter)
CopyStarter->Ctxt.args = Starter; CopyStarter->Ctxt.args = Starter;
MoveD = CopyStarter->Args.m1.mess.extra.transfer; MoveD = CopyStarter->Args.m1.mess.extra.transfer;
CopyStarter->Comm = RECV_ACK; CopyStarter->Comm = _K_SVC_MBOX_RECEIVE_ACK;
CopyStarter->Time.rcode = RC_OK; CopyStarter->Time.rcode = RC_OK;
MoveD->Args.MovedReq.Extra.Setup.ContRcv = CopyStarter; MoveD->Args.MovedReq.Extra.Setup.ContRcv = CopyStarter;
@ -793,7 +793,7 @@ void _task_mbox_data_get(struct k_msg *M)
} }
A.Args.m1.mess = *M; A.Args.m1.mess = *M;
A.Comm = RECV_DATA; A.Comm = _K_SVC_MBOX_RECEIVE_DATA;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
} }
@ -884,7 +884,7 @@ int _task_mbox_data_get_async_block(struct k_msg *message,
struct k_args A; struct k_args A;
A.Args.m1.mess = *message; A.Args.m1.mess = *message;
A.Comm = RECV_DATA; A.Comm = _K_SVC_MBOX_RECEIVE_DATA;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
return RC_OK; /* task_mbox_data_get() doesn't return anything */ return RC_OK; /* task_mbox_data_get() doesn't return anything */
@ -911,7 +911,7 @@ void _k_mbox_send_data(struct k_args *Starter)
MoveD = CopyStarter->Args.m1.mess.extra.transfer; MoveD = CopyStarter->Args.m1.mess.extra.transfer;
CopyStarter->Time.rcode = RC_OK; CopyStarter->Time.rcode = RC_OK;
CopyStarter->Comm = SEND_ACK; CopyStarter->Comm = _K_SVC_MBOX_SEND_ACK;
MoveD->Args.MovedReq.Extra.Setup.ContSnd = CopyStarter; MoveD->Args.MovedReq.Extra.Setup.ContSnd = CopyStarter;
CopyStarter->Forw = NULL; CopyStarter->Forw = NULL;

View file

@ -122,7 +122,7 @@ void _k_mem_map_alloc(struct k_args *A)
if (A->Time.ticks == TICKS_UNLIMITED) if (A->Time.ticks == TICKS_UNLIMITED)
A->Time.timer = NULL; A->Time.timer = NULL;
else { else {
A->Comm = ALLOCTMO; A->Comm = _K_SVC_MEM_MAP_ALLOC_TIMEOUT;
_k_timeout_alloc(A); _k_timeout_alloc(A);
} }
#endif #endif
@ -145,7 +145,7 @@ int _task_mem_map_alloc(kmemory_map_t mmap, void **mptr, int32_t time)
{ {
struct k_args A; struct k_args A;
A.Comm = ALLOC; A.Comm = _K_SVC_MEM_MAP_ALLOC;
A.Time.ticks = time; A.Time.ticks = time;
A.Args.a1.mmap = mmap; A.Args.a1.mmap = mmap;
A.Args.a1.mptr = mptr; A.Args.a1.mptr = mptr;
@ -178,7 +178,7 @@ void _k_mem_map_dealloc(struct k_args *A)
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
if (X->Time.timer) { if (X->Time.timer) {
_k_timeout_free(X->Time.timer); _k_timeout_free(X->Time.timer);
X->Comm = NOP; X->Comm = _K_SVC_NOP;
} }
#endif #endif
X->Time.rcode = RC_OK; X->Time.rcode = RC_OK;
@ -209,7 +209,7 @@ void _task_mem_map_free(kmemory_map_t mmap, void **mptr)
{ {
struct k_args A; struct k_args A;
A.Comm = DEALLOC; A.Comm = _K_SVC_MEM_MAP_DEALLOC;
A.Args.a1.mmap = mmap; A.Args.a1.mmap = mmap;
A.Args.a1.mptr = mptr; A.Args.a1.mptr = mptr;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);

View file

@ -218,7 +218,7 @@ void _k_defrag(struct k_args *A)
*/ */
GETARGS(NewGet); GETARGS(NewGet);
*NewGet = *A; *NewGet = *A;
NewGet->Comm = GET_BLOCK_WAIT; NewGet->Comm = _K_SVC_BLOCK_WAITERS_GET;
TO_ALIST(&_k_command_stack, NewGet); /*push on command stack */ TO_ALIST(&_k_command_stack, NewGet); /*push on command stack */
} }
} }
@ -237,7 +237,7 @@ void task_mem_pool_defragment(kmemory_pool_t Pid /* pool to defragment */
{ {
struct k_args A; struct k_args A;
A.Comm = POOL_DEFRAG; A.Comm = _K_SVC_DEFRAG;
A.Args.p1.poolid = Pid; A.Args.p1.poolid = Pid;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
} }
@ -545,7 +545,7 @@ void _k_mem_pool_block_get(struct k_args *A)
if (A->Time.ticks == TICKS_UNLIMITED) { if (A->Time.ticks == TICKS_UNLIMITED) {
A->Time.timer = NULL; A->Time.timer = NULL;
} else { } else {
A->Comm = GTBLTMO; A->Comm = _K_SVC_MEM_POOL_BLOCK_GET_TIMEOUT_HANDLE;
_k_timeout_alloc(A); _k_timeout_alloc(A);
} }
#endif #endif
@ -574,7 +574,7 @@ int _task_mem_pool_alloc(struct k_block *blockptr, /* ptr to requested block */
struct k_args A; struct k_args A;
A.Comm = GET_BLOCK; A.Comm = _K_SVC_MEM_POOL_BLOCK_GET;
A.Time.ticks = time; A.Time.ticks = time;
A.Args.p1.poolid = poolid; A.Args.p1.poolid = poolid;
A.Args.p1.req_size = reqsize; A.Args.p1.req_size = reqsize;
@ -646,7 +646,7 @@ void _k_mem_pool_block_release(struct k_args *A)
*/ */
GETARGS(NewGet); GETARGS(NewGet);
*NewGet = *A; *NewGet = *A;
NewGet->Comm = GET_BLOCK_WAIT; NewGet->Comm = _K_SVC_BLOCK_WAITERS_GET;
TO_ALIST(&_k_command_stack, NewGet); /* push on command stack */ TO_ALIST(&_k_command_stack, NewGet); /* push on command stack */
} }
if (A->alloc) { if (A->alloc) {
@ -676,7 +676,7 @@ void task_mem_pool_free(struct k_block *blockptr /* pointer to block to free */
{ {
struct k_args A; struct k_args A;
A.Comm = REL_BLOCK; A.Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE;
A.Args.p1.poolid = blockptr->poolid; A.Args.p1.poolid = blockptr->poolid;
A.Args.p1.req_size = blockptr->req_size; A.Args.p1.req_size = blockptr->req_size;
A.Args.p1.rep_poolptr = blockptr->address_in_pool; A.Args.p1.rep_poolptr = blockptr->address_in_pool;

View file

@ -58,7 +58,7 @@
#include <nano_private.h> #include <nano_private.h>
/** /**
* @brief Reply to a mutex lock request (LOCK_TMO, LOCK_RPL) * @brief Reply to a mutex lock request.
* *
* This routine replies to a mutex lock request. This will occur if either * This routine replies to a mutex lock request. This will occur if either
* the waiting task times out or acquires the mutex lock. * the waiting task times out or acquires the mutex lock.
@ -82,7 +82,7 @@ void _k_mutex_lock_reply(
FREETIMER(A->Time.timer); FREETIMER(A->Time.timer);
} }
if (A->Comm == LOCK_TMO) {/* Timeout case */ if (A->Comm == _K_SVC_MUTEX_LOCK_REPLY_TIMEOUT) {/* Timeout case */
REMOVE_ELM(A); REMOVE_ELM(A);
A->Time.rcode = RC_TIME; A->Time.rcode = RC_TIME;
@ -122,7 +122,7 @@ void _k_mutex_lock_reply(
if (Mutex->OwnerCurrentPrio != newPriority) { if (Mutex->OwnerCurrentPrio != newPriority) {
GETARGS(PrioChanger); GETARGS(PrioChanger);
PrioChanger->alloc = true; PrioChanger->alloc = true;
PrioChanger->Comm = SPRIO; PrioChanger->Comm = _K_SVC_TASK_PRIORITY_SET;
PrioChanger->Prio = newPriority; PrioChanger->Prio = newPriority;
PrioChanger->Args.g1.task = Mutex->Owner; PrioChanger->Args.g1.task = Mutex->Owner;
PrioChanger->Args.g1.prio = newPriority; PrioChanger->Args.g1.prio = newPriority;
@ -140,6 +140,21 @@ void _k_mutex_lock_reply(
_k_state_bit_reset(A->Ctxt.proc, TF_LOCK); _k_state_bit_reset(A->Ctxt.proc, TF_LOCK);
} }
/**
* @brief Reply to a mutex lock request with timeout.
*
* This routine replies to a mutex lock request. This will occur if either
* the waiting task times out or acquires the mutex lock.
*
* @param A Pointer to a k_args structure.
*
* @return N/A
*/
void _k_mutex_lock_reply_timeout(struct k_args *A)
{
_k_mutex_lock_reply(A);
}
/** /**
* @brief Process a mutex lock request * @brief Process a mutex lock request
* *
@ -222,7 +237,7 @@ void _k_mutex_lock_request(struct k_args *A /* pointer to mutex lock
* Prepare to call _k_mutex_lock_reply() should * Prepare to call _k_mutex_lock_reply() should
* the request time out. * the request time out.
*/ */
A->Comm = LOCK_TMO; A->Comm = _K_SVC_MUTEX_LOCK_REPLY_TIMEOUT;
_k_timeout_alloc(A); _k_timeout_alloc(A);
} }
#endif #endif
@ -243,7 +258,7 @@ void _k_mutex_lock_request(struct k_args *A /* pointer to mutex lock
GETARGS(PrioBooster); GETARGS(PrioBooster);
PrioBooster->alloc = true; PrioBooster->alloc = true;
PrioBooster->Comm = SPRIO; PrioBooster->Comm = _K_SVC_TASK_PRIORITY_SET;
PrioBooster->Prio = BoostedPrio; PrioBooster->Prio = BoostedPrio;
PrioBooster->Args.g1.task = Mutex->Owner; PrioBooster->Args.g1.task = Mutex->Owner;
PrioBooster->Args.g1.prio = BoostedPrio; PrioBooster->Args.g1.prio = BoostedPrio;
@ -278,7 +293,7 @@ int _task_mutex_lock(
{ {
struct k_args A; /* argument packet */ struct k_args A; /* argument packet */
A.Comm = LOCK_REQ; A.Comm = _K_SVC_MUTEX_LOCK_REQUEST;
A.Time.ticks = time; A.Time.ticks = time;
A.Args.l1.mutex = mutex; A.Args.l1.mutex = mutex;
A.Args.l1.task = _k_current_task->Ident; A.Args.l1.task = _k_current_task->Ident;
@ -330,7 +345,7 @@ void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock
GETARGS(PrioDowner); GETARGS(PrioDowner);
PrioDowner->alloc = true; PrioDowner->alloc = true;
PrioDowner->Comm = SPRIO; PrioDowner->Comm = _K_SVC_TASK_PRIORITY_SET;
PrioDowner->Prio = Mutex->OwnerOriginalPrio; PrioDowner->Prio = Mutex->OwnerOriginalPrio;
PrioDowner->Args.g1.task = Mutex->Owner; PrioDowner->Args.g1.task = Mutex->Owner;
PrioDowner->Args.g1.prio = Mutex->OwnerOriginalPrio; PrioDowner->Args.g1.prio = Mutex->OwnerOriginalPrio;
@ -358,7 +373,7 @@ void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock
* send a reply with a return code of RC_OK. * send a reply with a return code of RC_OK.
*/ */
_k_timeout_cancel(X); _k_timeout_cancel(X);
X->Comm = LOCK_RPL; X->Comm = _K_SVC_MUTEX_LOCK_REPLY;
} else { } else {
#endif #endif
/* /*
@ -392,7 +407,7 @@ void _task_mutex_unlock(kmutex_t mutex /* mutex to unlock */
{ {
struct k_args A; /* argument packet */ struct k_args A; /* argument packet */
A.Comm = UNLOCK; A.Comm = _K_SVC_MUTEX_UNLOCK;
A.Args.l1.mutex = mutex; A.Args.l1.mutex = mutex;
A.Args.l1.task = _k_current_task->Ident; A.Args.l1.task = _k_current_task->Ident;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);

View file

@ -69,6 +69,6 @@ void _task_nop(void)
{ {
struct k_args A; struct k_args A;
A.Comm = NOP; A.Comm = _K_SVC_NOP;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
} }

View file

@ -65,7 +65,7 @@ int task_offload_to_fiber(int (*func)(), void *argp)
{ {
struct k_args A; struct k_args A;
A.Comm = OFFLOAD; A.Comm = _K_SVC_OFFLOAD_TO_FIBER;
A.Args.u1.func = func; A.Args.u1.func = func;
A.Args.u1.argp = argp; A.Args.u1.argp = argp;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);

View file

@ -92,7 +92,7 @@ int _task_pipe_get(kpipe_t Id, void *pBuffer,
} }
A.Prio = _k_current_task->Prio; A.Prio = _k_current_task->Prio;
A.Comm = PIPE_GET_REQUEST; A.Comm = _K_SVC_PIPE_GET_REQUEST;
A.Time.ticks = TimeOut; A.Time.ticks = TimeOut;
A.Args.pipe_req.ReqInfo.pipe.id = Id; A.Args.pipe_req.ReqInfo.pipe.id = Id;
@ -144,7 +144,7 @@ int _task_pipe_put(kpipe_t Id, void *pBuffer,
} }
A.Prio = _k_current_task->Prio; A.Prio = _k_current_task->Prio;
A.Comm = PIPE_PUT_REQUEST; A.Comm = _K_SVC_PIPE_PUT_REQUEST;
A.Time.ticks = TimeOut; A.Time.ticks = TimeOut;
A.Args.pipe_req.ReqInfo.pipe.id = Id; A.Args.pipe_req.ReqInfo.pipe.id = Id;
@ -189,7 +189,7 @@ int _task_pipe_put_async(kpipe_t Id, struct k_block Block,
} }
A.Prio = _k_current_task->Prio; A.Prio = _k_current_task->Prio;
A.Comm = PIPE_PUT_REQUEST; A.Comm = _K_SVC_PIPE_PUT_REQUEST;
A.Time.ticks = TICKS_UNLIMITED; A.Time.ticks = TICKS_UNLIMITED;
/* same behavior in flow as a blocking call w/o a timeout */ /* same behavior in flow as a blocking call w/o a timeout */

View file

@ -148,7 +148,7 @@ void _k_pipe_get_request(struct k_args *RequestOrig)
* PIPE_GET_TIMEOUT microkernel command to the packet even though it * PIPE_GET_TIMEOUT microkernel command to the packet even though it
* is only useful to the finite timeout case. * is only useful to the finite timeout case.
*/ */
RequestProc->Comm = PIPE_GET_TIMEOUT; RequestProc->Comm = _K_SVC_PIPE_GET_TIMEOUT;
if (_TIME_B == _k_pipe_time_type_get(&RequestProc->Args)) { if (_TIME_B == _k_pipe_time_type_get(&RequestProc->Args)) {
/* /*
* The writer specified TICKS_UNLIMITED, so NULL the timer. * The writer specified TICKS_UNLIMITED, so NULL the timer.
@ -180,7 +180,7 @@ void _k_pipe_get_request(struct k_args *RequestOrig)
__ASSERT_NO_MSG(XFER_IDLE == __ASSERT_NO_MSG(XFER_IDLE ==
RequestProc->Args.pipe_xfer_req.status); RequestProc->Args.pipe_xfer_req.status);
__ASSERT_NO_MSG(0 == RequestProc->Args.pipe_xfer_req.iSizeXferred); __ASSERT_NO_MSG(0 == RequestProc->Args.pipe_xfer_req.iSizeXferred);
RequestProc->Comm = PIPE_GET_REPLY; RequestProc->Comm = _K_SVC_PIPE_GET_REPLY;
_k_pipe_get_reply(RequestProc); _k_pipe_get_reply(RequestProc);
} }
return; return;
@ -225,7 +225,7 @@ void _k_pipe_get_reply(struct k_args *ReqProc)
struct k_args *ReqOrig = ReqProc->Ctxt.args; struct k_args *ReqOrig = ReqProc->Ctxt.args;
PIPE_REQUEST_STATUS status; PIPE_REQUEST_STATUS status;
ReqOrig->Comm = PIPE_GET_ACK; ReqOrig->Comm = _K_SVC_PIPE_GET_ACK;
/* determine return value */ /* determine return value */

View file

@ -167,7 +167,7 @@ void _k_pipe_put_request(struct k_args *RequestOrig)
* PIPE_PUT_TIMEOUT microkernel command to the packet even though it * PIPE_PUT_TIMEOUT microkernel command to the packet even though it
* is only useful to the finite timeout case. * is only useful to the finite timeout case.
*/ */
RequestProc->Comm = PIPE_PUT_TIMEOUT; RequestProc->Comm = _K_SVC_PIPE_PUT_TIMEOUT;
if (_TIME_B == _k_pipe_time_type_get(&RequestProc->Args)) { if (_TIME_B == _k_pipe_time_type_get(&RequestProc->Args)) {
/* /*
* The writer specified TICKS_UNLIMITED; NULL the timer. * The writer specified TICKS_UNLIMITED; NULL the timer.
@ -199,7 +199,7 @@ void _k_pipe_put_request(struct k_args *RequestOrig)
__ASSERT_NO_MSG(XFER_IDLE == __ASSERT_NO_MSG(XFER_IDLE ==
RequestProc->Args.pipe_xfer_req.status); RequestProc->Args.pipe_xfer_req.status);
__ASSERT_NO_MSG(0 == RequestProc->Args.pipe_xfer_req.iSizeXferred); __ASSERT_NO_MSG(0 == RequestProc->Args.pipe_xfer_req.iSizeXferred);
RequestProc->Comm = PIPE_PUT_REPLY; RequestProc->Comm = _K_SVC_PIPE_PUT_REPLY;
_k_pipe_put_reply(RequestProc); _k_pipe_put_reply(RequestProc);
} }
return; return;
@ -245,7 +245,7 @@ void _k_pipe_put_reply(struct k_args *ReqProc)
struct k_args *ReqOrig = ReqProc->Ctxt.args; struct k_args *ReqOrig = ReqProc->Ctxt.args;
PIPE_REQUEST_STATUS status; PIPE_REQUEST_STATUS status;
ReqOrig->Comm = PIPE_PUT_ACK; ReqOrig->Comm = _K_SVC_PIPE_PUT_ACK;
/* determine return value: /* determine return value:
*/ */
@ -296,7 +296,7 @@ void _k_pipe_put_ack(struct k_args *Request)
/* invoke command to release block */ /* invoke command to release block */
blockptr = &pipe_ack->ReqType.Async.block; blockptr = &pipe_ack->ReqType.Async.block;
A.Comm = REL_BLOCK; A.Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE;
A.Args.p1.poolid = blockptr->poolid; A.Args.p1.poolid = blockptr->poolid;
A.Args.p1.req_size = blockptr->req_size; A.Args.p1.req_size = blockptr->req_size;
A.Args.p1.rep_poolptr = blockptr->address_in_pool; A.Args.p1.rep_poolptr = blockptr->address_in_pool;
@ -307,7 +307,7 @@ void _k_pipe_put_ack(struct k_args *Request)
/* invoke command to signal sema */ /* invoke command to signal sema */
struct k_args A; struct k_args A;
A.Comm = SIGNALS; A.Comm = _K_SVC_SEM_SIGNAL;
A.Args.s1.sema = pipe_ack->ReqType.Async.sema; A.Args.s1.sema = pipe_ack->ReqType.Async.sema;
_k_sem_signal(&A); /* will return immediately */ _k_sem_signal(&A); /* will return immediately */
} }

View file

@ -250,7 +250,7 @@ static void setup_movedata(struct k_args *A,
struct k_args *pContSend; struct k_args *pContSend;
struct k_args *pContRecv; struct k_args *pContRecv;
A->Comm = MVD_REQ; A->Comm = _K_SVC_MOVEDATA_REQ;
A->Ctxt.proc = NULL; A->Ctxt.proc = NULL;
/* this caused problems when != NULL related to set/reset of state bits */ /* this caused problems when != NULL related to set/reset of state bits */
@ -266,14 +266,14 @@ static void setup_movedata(struct k_args *A,
GETARGS(pContRecv); GETARGS(pContRecv);
pContSend->Forw = NULL; pContSend->Forw = NULL;
pContSend->Comm = PIPE_MOVEDATA_ACK; pContSend->Comm = _K_SVC_PIPE_MOVEDATA_ACK;
pContSend->Args.pipe_xfer_ack.pPipe = pPipe; pContSend->Args.pipe_xfer_ack.pPipe = pPipe;
pContSend->Args.pipe_xfer_ack.XferType = XferType; pContSend->Args.pipe_xfer_ack.XferType = XferType;
pContSend->Args.pipe_xfer_ack.ID = XferID; pContSend->Args.pipe_xfer_ack.ID = XferID;
pContSend->Args.pipe_xfer_ack.iSize = size; pContSend->Args.pipe_xfer_ack.iSize = size;
pContRecv->Forw = NULL; pContRecv->Forw = NULL;
pContRecv->Comm = PIPE_MOVEDATA_ACK; pContRecv->Comm = _K_SVC_PIPE_MOVEDATA_ACK;
pContRecv->Args.pipe_xfer_ack.pPipe = pPipe; pContRecv->Args.pipe_xfer_ack.pPipe = pPipe;
pContRecv->Args.pipe_xfer_ack.XferType = XferType; pContRecv->Args.pipe_xfer_ack.XferType = XferType;
pContRecv->Args.pipe_xfer_ack.ID = XferID; pContRecv->Args.pipe_xfer_ack.ID = XferID;
@ -970,7 +970,7 @@ void _k_pipe_process(struct pipe_struct *pPipe, struct k_args *pNLWriter,
myfreetimer(&(pReader->Time.timer)); myfreetimer(&(pReader->Time.timer));
} }
if (0 == pReader->Args.pipe_xfer_req.iNbrPendXfers) { if (0 == pReader->Args.pipe_xfer_req.iNbrPendXfers) {
pReader->Comm = PIPE_GET_REPLY; pReader->Comm = _K_SVC_PIPE_GET_REPLY;
/* if terminated and no pending Xfers anymore, /* if terminated and no pending Xfers anymore,
we have to reply */ we have to reply */
_k_pipe_get_reply(pReader); _k_pipe_get_reply(pReader);
@ -1000,7 +1000,7 @@ void _k_pipe_process(struct pipe_struct *pPipe, struct k_args *pNLWriter,
myfreetimer(&(pWriter->Time.timer)); myfreetimer(&(pWriter->Time.timer));
} }
if (0 == pWriter->Args.pipe_xfer_req.iNbrPendXfers) { if (0 == pWriter->Args.pipe_xfer_req.iNbrPendXfers) {
pWriter->Comm = PIPE_PUT_REPLY; pWriter->Comm = _K_SVC_PIPE_PUT_REPLY;
/* if terminated and no pending Xfers anymore, /* if terminated and no pending Xfers anymore,
we have to reply */ we have to reply */
_k_pipe_put_reply(pWriter); _k_pipe_put_reply(pWriter);

View file

@ -61,9 +61,10 @@ static void signal_semaphore(int n, struct sem_struct *S)
X = A->Forw; X = A->Forw;
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Comm == WAITSREQ || A->Comm == WAITSTMO) if (A->Comm == _K_SVC_SEM_WAIT_REQUEST
|| A->Comm == _K_SVC_SEM_WAIT_REPLY_TIMEOUT)
#else #else
if (A->Comm == WAITSREQ) if (A->Comm == _K_SVC_SEM_WAIT_REQUEST)
#endif #endif
{ {
S->Level--; S->Level--;
@ -75,7 +76,7 @@ static void signal_semaphore(int n, struct sem_struct *S)
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.timer) { if (A->Time.timer) {
_k_timeout_cancel(A); _k_timeout_cancel(A);
A->Comm = WAITSRPL; A->Comm = _K_SVC_SEM_WAIT_REPLY;
} else { } else {
#endif #endif
A->Time.rcode = RC_OK; A->Time.rcode = RC_OK;
@ -83,9 +84,9 @@ static void signal_semaphore(int n, struct sem_struct *S)
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
} }
#endif #endif
} else if (A->Comm == WAITMREQ) { } else if (A->Comm == _K_SVC_SEM_GROUP_WAIT_REQUEST) {
S->Level--; S->Level--;
A->Comm = WAITMRDY; A->Comm = _K_SVC_SEM_GROUP_WAIT_READY;
GETARGS(Y); GETARGS(Y);
*Y = *A; *Y = *A;
SENDARGS(Y); SENDARGS(Y);
@ -120,8 +121,9 @@ void _k_sem_group_wait_cancel(struct k_args *A)
} else { } else {
S->Waiters = X->Forw; S->Waiters = X->Forw;
} }
if (X->Comm == WAITMREQ || X->Comm == WAITMRDY) { if (X->Comm == _K_SVC_SEM_GROUP_WAIT_REQUEST
if (X->Comm == WAITMRDY) { || X->Comm == _K_SVC_SEM_GROUP_WAIT_READY) {
if (X->Comm == _K_SVC_SEM_GROUP_WAIT_READY) {
/* obtain struct k_args of waiting task */ /* obtain struct k_args of waiting task */
struct k_args *waitTaskArgs = X->Ctxt.args; struct k_args *waitTaskArgs = X->Ctxt.args;
@ -129,8 +131,9 @@ void _k_sem_group_wait_cancel(struct k_args *A)
/* /*
* Determine if the wait cancellation request is being * Determine if the wait cancellation request is being
* processed after the state of the 'Waiters' packet state * processed after the state of the 'Waiters' packet state
* has been updated to WAITMRDY, but before the WAITMRDY * has been updated to _K_SVC_SEM_GROUP_WAIT_READY, but before
* packet has been processed. This will occur if a WAITMTMO * the _K_SVC_SEM_GROUP_WAIT_READY packet has been processed.
* This will occur if a _K_SVC_SEM_GROUP_WAIT_TIMEOUT
* timer expiry occurs between the update of the packet state * timer expiry occurs between the update of the packet state
* and the processing of the WAITMRDY packet. * and the processing of the WAITMRDY packet.
*/ */
@ -174,7 +177,7 @@ void _k_sem_group_wait_accept(struct k_args *A)
} else { } else {
S->Waiters = X->Forw; S->Waiters = X->Forw;
} }
if (X->Comm == WAITMRDY) { if (X->Comm == _K_SVC_SEM_GROUP_WAIT_READY) {
_k_sem_group_wait(X); _k_sem_group_wait(X);
} else { } else {
FREEARGS(X); /* ERROR */ FREEARGS(X); /* ERROR */
@ -206,7 +209,8 @@ void _k_sem_group_wait_timeout(struct k_args *A)
GETARGS(R); GETARGS(R);
R->Prio = A->Prio; R->Prio = A->Prio;
R->Comm = R->Comm =
(K_COMM)((*L == A->Args.s1.sema) ? WAITMACC : WAITMCAN); ((*L == A->Args.s1.sema) ?
_K_SVC_SEM_GROUP_WAIT_ACCEPT : _K_SVC_SEM_GROUP_WAIT_CANCEL);
R->Ctxt.args = A; R->Ctxt.args = A;
R->Args.s1.sema = *L++; R->Args.s1.sema = *L++;
SENDARGS(R); SENDARGS(R);
@ -219,7 +223,7 @@ void _k_sem_group_ready(struct k_args *R)
if (A->Args.s1.sema == ENDLIST) { if (A->Args.s1.sema == ENDLIST) {
A->Args.s1.sema = R->Args.s1.sema; A->Args.s1.sema = R->Args.s1.sema;
A->Comm = WAITMTMO; A->Comm = _K_SVC_SEM_GROUP_WAIT_TIMEOUT;
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
if (A->Time.timer) { if (A->Time.timer) {
_k_timeout_cancel(A); _k_timeout_cancel(A);
@ -236,7 +240,7 @@ void _k_sem_wait_reply(struct k_args *A)
if (A->Time.timer) { if (A->Time.timer) {
FREETIMER(A->Time.timer); FREETIMER(A->Time.timer);
} }
if (A->Comm == WAITSTMO) { if (A->Comm == _K_SVC_SEM_WAIT_REPLY_TIMEOUT) {
REMOVE_ELM(A); REMOVE_ELM(A);
A->Time.rcode = RC_TIME; A->Time.rcode = RC_TIME;
} else } else
@ -245,6 +249,11 @@ void _k_sem_wait_reply(struct k_args *A)
_k_state_bit_reset(A->Ctxt.proc, TF_SEMA); _k_state_bit_reset(A->Ctxt.proc, TF_SEMA);
} }
void _k_sem_wait_reply_timeout(struct k_args *A)
{
_k_sem_wait_reply(A);
}
void _k_sem_group_wait_request(struct k_args *A) void _k_sem_group_wait_request(struct k_args *A)
{ {
struct sem_struct *S = _k_sem_list + OBJ_INDEX(A->Args.s1.sema); struct sem_struct *S = _k_sem_list + OBJ_INDEX(A->Args.s1.sema);
@ -258,7 +267,7 @@ void _k_sem_group_wait_request(struct k_args *A)
} else { } else {
S->Waiters = X->Forw; S->Waiters = X->Forw;
} }
if (X->Comm == WAITMCAN) { if (X->Comm == _K_SVC_SEM_GROUP_WAIT_CANCEL) {
_k_sem_group_wait(X); _k_sem_group_wait(X);
} else { } else {
FREEARGS(X); /* ERROR */ FREEARGS(X); /* ERROR */
@ -296,7 +305,7 @@ void _k_sem_group_wait_any(struct k_args *A)
GETARGS(R); GETARGS(R);
R->Prio = _k_current_task->Prio; R->Prio = _k_current_task->Prio;
R->Comm = WAITMREQ; R->Comm = _K_SVC_SEM_GROUP_WAIT_REQUEST;
R->Ctxt.args = A; R->Ctxt.args = A;
R->Args.s1.sema = *L++; R->Args.s1.sema = *L++;
SENDARGS(R); SENDARGS(R);
@ -311,7 +320,7 @@ void _k_sem_group_wait_any(struct k_args *A)
if (A->Time.ticks == TICKS_UNLIMITED) { if (A->Time.ticks == TICKS_UNLIMITED) {
A->Time.timer = NULL; A->Time.timer = NULL;
} else { } else {
A->Comm = WAITMTMO; A->Comm = _K_SVC_SEM_GROUP_WAIT_TIMEOUT;
_k_timeout_alloc(A); _k_timeout_alloc(A);
} }
} }
@ -338,7 +347,7 @@ void _k_sem_wait_request(struct k_args *A)
if (A->Time.ticks == TICKS_UNLIMITED) { if (A->Time.ticks == TICKS_UNLIMITED) {
A->Time.timer = NULL; A->Time.timer = NULL;
} else { } else {
A->Comm = WAITSTMO; A->Comm = _K_SVC_SEM_WAIT_REPLY_TIMEOUT;
_k_timeout_alloc(A); _k_timeout_alloc(A);
} }
#endif #endif
@ -352,7 +361,7 @@ int _task_sem_take(ksem_t sema, int32_t time)
{ {
struct k_args A; struct k_args A;
A.Comm = WAITSREQ; A.Comm = _K_SVC_SEM_WAIT_REQUEST;
A.Time.ticks = time; A.Time.ticks = time;
A.Args.s1.sema = sema; A.Args.s1.sema = sema;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
@ -363,7 +372,7 @@ ksem_t _task_sem_group_take(ksemg_t group, int32_t time)
{ {
struct k_args A; struct k_args A;
A.Comm = WAITMANY; A.Comm = _K_SVC_SEM_GROUP_WAIT_ANY;
A.Prio = _k_current_task->Prio; A.Prio = _k_current_task->Prio;
A.Time.ticks = time; A.Time.ticks = time;
A.Args.s1.list = group; A.Args.s1.list = group;
@ -391,7 +400,7 @@ void task_sem_give(ksem_t sema)
{ {
struct k_args A; struct k_args A;
A.Comm = SIGNALS; A.Comm = _K_SVC_SEM_SIGNAL;
A.Args.s1.sema = sema; A.Args.s1.sema = sema;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
} }
@ -400,7 +409,7 @@ void task_sem_group_give(ksemg_t group)
{ {
struct k_args A; struct k_args A;
A.Comm = SIGNALM; A.Comm = _K_SVC_SEM_GROUP_SIGNAL;
A.Args.s1.list = group; A.Args.s1.list = group;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
} }
@ -418,7 +427,7 @@ void isr_sem_give(ksem_t sema, struct cmd_pkt_set *pSet)
*/ */
pCommand = (struct k_args *)_cmd_pkt_get(pSet); pCommand = (struct k_args *)_cmd_pkt_get(pSet);
pCommand->Comm = SIGNALS; pCommand->Comm = _K_SVC_SEM_SIGNAL;
pCommand->Args.s1.sema = sema; pCommand->Args.s1.sema = sema;
nano_isr_stack_push(&_k_command_stack, (uint32_t)pCommand); nano_isr_stack_push(&_k_command_stack, (uint32_t)pCommand);
@ -444,7 +453,7 @@ void task_sem_reset(ksem_t sema)
{ {
struct k_args A; struct k_args A;
A.Comm = RESETS; A.Comm = _K_SVC_SEM_RESET;
A.Args.s1.sema = sema; A.Args.s1.sema = sema;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
} }
@ -453,7 +462,7 @@ void task_sem_group_reset(ksemg_t group)
{ {
struct k_args A; struct k_args A;
A.Comm = RESETM; A.Comm = _K_SVC_SEM_GROUP_RESET;
A.Args.s1.list = group; A.Args.s1.list = group;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
} }
@ -472,7 +481,7 @@ int task_sem_count_get(ksem_t sema)
{ {
struct k_args A; struct k_args A;
A.Comm = INQSEMA; A.Comm = _K_SVC_SEM_INQUIRY;
A.Args.s1.sema = sema; A.Args.s1.sema = sema;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
return A.Time.rcode; return A.Time.rcode;

View file

@ -132,7 +132,7 @@ FUNC_NORETURN void K_swapper(int parameter1, /* not used */
_k_task_monitor_args(pArgs); _k_task_monitor_args(pArgs);
} }
#endif #endif
_k_server_dispatch_table[pArgs->Comm](pArgs); (*pArgs->Comm)(pArgs);
} }
/* check if another fiber (of equal or greater priority) /* check if another fiber (of equal or greater priority)

View file

@ -364,7 +364,7 @@ void _task_ioctl(ktask_t task, /* task on which to operate */
{ {
struct k_args A; struct k_args A;
A.Comm = TSKOP; A.Comm = _K_SVC_TASK_OP;
A.Args.g1.task = task; A.Args.g1.task = task;
A.Args.g1.opt = opt; A.Args.g1.opt = opt;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
@ -435,7 +435,7 @@ void _task_group_ioctl(ktask_group_t group, /* task group */
{ {
struct k_args A; struct k_args A;
A.Comm = GRPOP; A.Comm = _K_SVC_TASK_GROUP_OP;
A.Args.g1.group = group; A.Args.g1.group = group;
A.Args.g1.opt = opt; A.Args.g1.opt = opt;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
@ -530,7 +530,7 @@ void task_priority_set(ktask_t task, /* task whose priority is to be set */
{ {
struct k_args A; struct k_args A;
A.Comm = SPRIO; A.Comm = _K_SVC_TASK_PRIORITY_SET;
A.Args.g1.task = task; A.Args.g1.task = task;
A.Args.g1.prio = prio; A.Args.g1.prio = prio;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
@ -573,7 +573,7 @@ void task_yield(void)
{ {
struct k_args A; struct k_args A;
A.Comm = YIELD; A.Comm = _K_SVC_TASK_YIELD;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
} }

View file

@ -336,7 +336,7 @@ int64_t task_tick_delta(int64_t *reftime /* pointer to reference time */
{ {
struct k_args A; struct k_args A;
A.Comm = ELAPSE; A.Comm = _K_SVC_TIME_ELAPSE;
A.Args.c1.time1 = *reftime; A.Args.c1.time1 = *reftime;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
*reftime = A.Args.c1.time1; *reftime = A.Args.c1.time1;

View file

@ -280,7 +280,7 @@ ktimer_t task_timer_alloc(void)
{ {
struct k_args A; struct k_args A;
A.Comm = TALLOC; A.Comm = _K_SVC_TIMER_ALLOC;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
return _timer_ptr_to_id(A.Args.c1.timer); return _timer_ptr_to_id(A.Args.c1.timer);
@ -324,7 +324,7 @@ void task_timer_free(ktimer_t timer)
{ {
struct k_args A; struct k_args A;
A.Comm = TDEALLOC; A.Comm = _K_SVC_TIMER_DEALLOC;
A.Args.c1.timer = _timer_id_to_ptr(timer); A.Args.c1.timer = _timer_id_to_ptr(timer);
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
} }
@ -372,7 +372,7 @@ void _k_timer_start(struct k_args *P)
/* Track the semaphore to signal for when the timer expires. */ /* Track the semaphore to signal for when the timer expires. */
if (P->Args.c1.sema != ENDLIST) { if (P->Args.c1.sema != ENDLIST) {
T->Args->Comm = SIGNALS; T->Args->Comm = _K_SVC_SEM_SIGNAL;
T->Args->Args.s1.sema = P->Args.c1.sema; T->Args->Args.s1.sema = P->Args.c1.sema;
} }
_k_timer_enlist(T); _k_timer_enlist(T);
@ -408,7 +408,7 @@ void task_timer_start(ktimer_t timer, int32_t duration, int32_t period,
{ {
struct k_args A; struct k_args A;
A.Comm = TSTART; A.Comm = _K_SVC_TIMER_START;
A.Args.c1.timer = _timer_id_to_ptr(timer); A.Args.c1.timer = _timer_id_to_ptr(timer);
A.Args.c1.time1 = (int64_t)duration; A.Args.c1.time1 = (int64_t)duration;
A.Args.c1.time2 = period; A.Args.c1.time2 = period;
@ -433,7 +433,7 @@ void task_timer_restart(ktimer_t timer, int32_t duration, int32_t period)
{ {
struct k_args A; struct k_args A;
A.Comm = TSTART; A.Comm = _K_SVC_TIMER_START;
A.Args.c1.timer = _timer_id_to_ptr(timer); A.Args.c1.timer = _timer_id_to_ptr(timer);
A.Args.c1.time1 = (int64_t)duration; A.Args.c1.time1 = (int64_t)duration;
A.Args.c1.time2 = period; A.Args.c1.time2 = period;
@ -475,7 +475,7 @@ void task_timer_stop(ktimer_t timer)
{ {
struct k_args A; struct k_args A;
A.Comm = TSTOP; A.Comm = _K_SVC_TIMER_STOP;
A.Args.c1.timer = _timer_id_to_ptr(timer); A.Args.c1.timer = _timer_id_to_ptr(timer);
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
} }
@ -525,7 +525,7 @@ void _k_task_sleep(struct k_args *P)
T->period = 0; T->period = 0;
T->Args = P; T->Args = P;
P->Comm = WAKEUP; P->Comm = _K_SVC_TASK_WAKEUP;
P->Ctxt.proc = _k_current_task; P->Ctxt.proc = _k_current_task;
P->Time.timer = T; P->Time.timer = T;
@ -550,7 +550,7 @@ void task_sleep(int32_t ticks)
{ {
struct k_args A; struct k_args A;
A.Comm = SLEEP; A.Comm = _K_SVC_TASK_SLEEP;
A.Time.ticks = ticks; A.Time.ticks = ticks;
KERNEL_ENTRY(&A); KERNEL_ENTRY(&A);
} }

View file

@ -299,14 +299,14 @@ def kernel_main_c_kargs():
kernel_main_c_out("\n" + kernel_main_c_out("\n" +
"struct k_args _k_server_command_packets[%s] =\n" % (num_kargs) + "struct k_args _k_server_command_packets[%s] =\n" % (num_kargs) +
"{\n" + "{\n" +
" {NULL, NULL, 0, 0, (K_COMM) UNDEFINED},\n") " {NULL, NULL, 0, 0, _K_SVC_UNDEFINED},\n")
for i in range(1, num_kargs - 1): for i in range(1, num_kargs - 1):
kernel_main_c_out( kernel_main_c_out(
" {&_k_server_command_packets[%d], " % (i - 1) + " {&_k_server_command_packets[%d], " % (i - 1) +
"NULL, 0, 0, (K_COMM) UNDEFINED},\n") "NULL, 0, 0, _K_SVC_UNDEFINED},\n")
kernel_main_c_out( kernel_main_c_out(
" {&_k_server_command_packets[%d], " % (num_kargs - 2) + " {&_k_server_command_packets[%d], " % (num_kargs - 2) +
"NULL, 0, 0, (K_COMM) UNDEFINED}\n" + "NULL, 0, 0, _K_SVC_UNDEFINED}\n" +
"};\n") "};\n")
# linked list of free command packets # linked list of free command packets
@ -761,201 +761,6 @@ def kernel_main_c_pools():
kernel_main_c_out(pool_descriptors) kernel_main_c_out(pool_descriptors)
def kernel_main_c_kernel_services():
""" Generate kernel services function table """
# initialize table with info for all possible kernel services
func_table = [
"/* 0 */ _k_nop,", # required
"/* 1 */ _k_movedata_request,", # required
"/* 2 */ (kernelfunc) NULL,", # unused
"/* 3 */ (kernelfunc) NULL,", # unused
"/* 4 */ _k_offload_to_fiber,", # required
"/* 5 */ _k_workload_get,", # required
"/* 6 */ _k_sem_signal,", # depends on semaphores
"/* 7 */ _k_sem_group_signal,", # depends on semaphores
"/* 8 */ _k_sem_reset,", # depends on semaphores
"/* 9 */ _k_sem_group_reset,", # depends on semaphores
"/* 10 */ _k_sem_wait_request,", # depends on semaphores
"/* 11 */ _k_sem_wait_reply,", # depends on semaphores
"/* 12 */ _k_sem_wait_reply,", # depends on semaphores and
# timers
"/* 13 */ _k_sem_group_wait_any,", # depends on semaphores
"/* 14 */ _k_sem_group_wait_request,", # depends on semaphores
"/* 15 */ _k_sem_group_ready,", # depends on semaphores
"/* 16 */ _k_sem_group_wait_cancel,", # depends on semaphores
"/* 17 */ _k_sem_group_wait_accept,", # depends on semaphores
"/* 18 */ _k_sem_group_wait,", # depends on semaphores
"/* 19 */ _k_sem_group_wait_timeout,", # depends on semaphores
# (but not timers)
"/* 20 */ _k_sem_inquiry,", # depends on semaphores
"/* 21 */ _k_mutex_lock_request,", # depends on mutexes
"/* 22 */ _k_mutex_lock_reply,", # depends on mutexes
"/* 23 */ _k_mutex_lock_reply,", # depends on mutexes and
# timers
"/* 24 */ _k_mutex_unlock,", # depends on mutexes
"/* 25 */ _k_fifo_enque_request,", # depends on FIFOs
"/* 26 */ _k_fifo_enque_reply,", # depends on FIFOs
"/* 27 */ _k_fifo_enque_reply,", # depends on FIFOs and timers
"/* 28 */ _k_fifo_deque_request,", # depends on FIFOs
"/* 29 */ _k_fifo_deque_reply,", # depends on FIFOs
"/* 30 */ _k_fifo_deque_reply,", # depends on FIFOs and timers
"/* 31 */ _k_fifo_ioctl,", # depends on FIFOs
"/* 32 */ _k_mbox_send_request,", # depends on mailboxes
"/* 33 */ _k_mbox_send_reply,", # depends on mailboxes and
# timers
"/* 34 */ _k_mbox_send_ack,", # depends on mailboxes
"/* 35 */ _k_mbox_send_data,", # depends on mailboxes
"/* 36 */ _k_mbox_receive_request,", # depends on mailboxes
"/* 37 */ _k_mbox_receive_reply,", # depends on mailboxes and
# timers
"/* 38 */ _k_mbox_receive_ack,", # depends on mailboxes
"/* 39 */ _k_mbox_receive_data,", # depends on mailboxes
"/* 40 */ _k_time_elapse,", # depends on timers
"/* 41 */ _k_task_sleep,", # depends on timers
"/* 42 */ _k_task_wakeup,", # depends on timers
"/* 43 */ _k_task_op,", # required
"/* 44 */ _k_task_group_op,", # required
"/* 45 */ _k_task_priority_set,", # required
"/* 46 */ _k_task_yield,", # required
"/* 47 */ _k_mem_map_alloc,", # depends on memory maps
"/* 48 */ _k_mem_map_dealloc,", # depends on memory maps
"/* 49 */ _k_timer_alloc,", # depends on timers
"/* 50 */ _k_timer_dealloc,", # depends on timers
"/* 51 */ _k_timer_start,", # depends on timers
"/* 52 */ _k_timer_stop,", # depends on timers
"/* 53 */ _k_mem_map_alloc_timeout,", # depends on memory maps and
# timers
"/* 54 */ (kernelfunc) NULL,", # unused
"/* 55 */ (kernelfunc) NULL,", # unused
"/* 56 */ (kernelfunc) NULL,", # unused
"/* 57 */ (kernelfunc) NULL,", # unused
"/* 58 */ _k_event_test,", # required
"/* 59 */ _k_event_handler_set,", # required
"/* 60 */ _k_event_signal,", # required
"/* 61 */ _k_mem_pool_block_get,", # depends on memory pools
"/* 62 */ _k_mem_pool_block_release,", # depends on memory pools
"/* 63 */ _k_block_waiters_get,", # depends on memory pools
"/* 64 */ _k_mem_pool_block_get_timeout_handle,", # depends on memory pools
# and timers
"/* 65 */ _k_defrag,", # depends on memory pools
"/* 66 */ (kernelfunc) NULL,", # unused
"/* 67 */ (kernelfunc) NULL,", # unused
"/* 68 */ (kernelfunc) NULL,", # unused
"/* 69 */ (kernelfunc) NULL,", # unused
"/* 70 */ (kernelfunc) NULL,", # unused
"/* 71 */ (kernelfunc) NULL,", # unused
"/* 72 */ _k_pipe_put_request,", # depends on pipes
"/* 73 */ _k_pipe_put_timeout,", # depends on pipes and timers
"/* 74 */ _k_pipe_put_reply,", # depends on pipes
"/* 75 */ _k_pipe_put_ack,", # depends on pipes
"/* 76 */ _k_pipe_get_request,", # depends on pipes
"/* 77 */ _k_pipe_get_timeout,", # depends on pipes and timers
"/* 78 */ _k_pipe_get_reply,", # depends on pipes
"/* 79 */ _k_pipe_get_ack,", # depends on pipes
"/* 80 */ _k_pipe_movedata_ack,", # depends on pipes
"/* 81 */ _k_event_test_timeout" # depends on timers
]
# eliminate table entries for kernel services that project doesn't utilize
# (note: some entries can be eliminated for more than one reason)
if (len(sema_list) == 0):
func_table[6] = "/* 6 */ (kernelfunc) NULL,"
func_table[7] = "/* 7 */ (kernelfunc) NULL,"
func_table[8] = "/* 8 */ (kernelfunc) NULL,"
func_table[9] = "/* 9 */ (kernelfunc) NULL,"
func_table[10] = "/* 10 */ (kernelfunc) NULL,"
func_table[11] = "/* 11 */ (kernelfunc) NULL,"
func_table[12] = "/* 12 */ (kernelfunc) NULL,"
func_table[13] = "/* 13 */ (kernelfunc) NULL,"
func_table[14] = "/* 14 */ (kernelfunc) NULL,"
func_table[15] = "/* 15 */ (kernelfunc) NULL,"
func_table[16] = "/* 16 */ (kernelfunc) NULL,"
func_table[17] = "/* 17 */ (kernelfunc) NULL,"
func_table[18] = "/* 18 */ (kernelfunc) NULL,"
func_table[19] = "/* 19 */ (kernelfunc) NULL,"
func_table[20] = "/* 20 */ (kernelfunc) NULL,"
if (len(mutex_list) == 0):
func_table[21] = "/* 21 */ (kernelfunc) NULL,"
func_table[22] = "/* 22 */ (kernelfunc) NULL,"
func_table[23] = "/* 23 */ (kernelfunc) NULL,"
func_table[24] = "/* 24 */ (kernelfunc) NULL,"
if (len(fifo_list) == 0):
func_table[25] = "/* 25 */ (kernelfunc) NULL,"
func_table[26] = "/* 26 */ (kernelfunc) NULL,"
func_table[27] = "/* 27 */ (kernelfunc) NULL,"
func_table[28] = "/* 28 */ (kernelfunc) NULL,"
func_table[29] = "/* 29 */ (kernelfunc) NULL,"
func_table[30] = "/* 30 */ (kernelfunc) NULL,"
func_table[31] = "/* 31 */ (kernelfunc) NULL,"
if (len(mbx_list) == 0):
func_table[32] = "/* 32 */ (kernelfunc) NULL,"
func_table[33] = "/* 33 */ (kernelfunc) NULL,"
func_table[34] = "/* 34 */ (kernelfunc) NULL,"
func_table[35] = "/* 35 */ (kernelfunc) NULL,"
func_table[36] = "/* 36 */ (kernelfunc) NULL,"
func_table[37] = "/* 37 */ (kernelfunc) NULL,"
func_table[38] = "/* 38 */ (kernelfunc) NULL,"
func_table[39] = "/* 39 */ (kernelfunc) NULL,"
if (len(map_list) == 0):
func_table[47] = "/* 47 */ (kernelfunc) NULL,"
func_table[48] = "/* 48 */ (kernelfunc) NULL,"
func_table[53] = "/* 53 */ (kernelfunc) NULL,"
if (len(pool_list) == 0):
func_table[61] = "/* 61 */ (kernelfunc) NULL,"
func_table[62] = "/* 62 */ (kernelfunc) NULL,"
func_table[63] = "/* 63 */ (kernelfunc) NULL,"
func_table[64] = "/* 64 */ (kernelfunc) NULL,"
func_table[65] = "/* 65 */ (kernelfunc) NULL,"
if (len(pipe_list) == 0):
func_table[72] = "/* 72 */ (kernelfunc) NULL,"
func_table[73] = "/* 73 */ (kernelfunc) NULL,"
func_table[74] = "/* 74 */ (kernelfunc) NULL,"
func_table[75] = "/* 75 */ (kernelfunc) NULL,"
func_table[76] = "/* 76 */ (kernelfunc) NULL,"
func_table[77] = "/* 77 */ (kernelfunc) NULL,"
func_table[78] = "/* 78 */ (kernelfunc) NULL,"
func_table[79] = "/* 79 */ (kernelfunc) NULL,"
func_table[80] = "/* 80 */ (kernelfunc) NULL,"
if (num_timers == 0):
func_table[12] = "/* 12 */ (kernelfunc) NULL,"
func_table[23] = "/* 23 */ (kernelfunc) NULL,"
func_table[27] = "/* 27 */ (kernelfunc) NULL,"
func_table[30] = "/* 30 */ (kernelfunc) NULL,"
func_table[33] = "/* 33 */ (kernelfunc) NULL,"
func_table[37] = "/* 37 */ (kernelfunc) NULL,"
func_table[40] = "/* 40 */ (kernelfunc) NULL,"
func_table[41] = "/* 41 */ (kernelfunc) NULL,"
func_table[42] = "/* 42 */ (kernelfunc) NULL,"
func_table[49] = "/* 49 */ (kernelfunc) NULL,"
func_table[50] = "/* 50 */ (kernelfunc) NULL,"
func_table[51] = "/* 51 */ (kernelfunc) NULL,"
func_table[52] = "/* 52 */ (kernelfunc) NULL,"
func_table[53] = "/* 53 */ (kernelfunc) NULL,"
func_table[64] = "/* 64 */ (kernelfunc) NULL,"
func_table[73] = "/* 73 */ (kernelfunc) NULL,"
func_table[77] = "/* 77 */ (kernelfunc) NULL,"
func_table[81] = "/* 81 */ (kernelfunc) NULL,"
# generate function table
kernel_main_c_out("\n" +
"const kernelfunc _k_server_dispatch_table[82] =\n" +
"{\n")
for func in func_table:
kernel_main_c_out(" " + func + "\n")
kernel_main_c_out("};\n")
def kernel_main_c_node_init(): def kernel_main_c_node_init():
""" Generate node initialization routine """ """ Generate node initialization routine """
@ -1000,7 +805,6 @@ def kernel_main_c_generate():
kernel_main_c_mailboxes() kernel_main_c_mailboxes()
kernel_main_c_maps() kernel_main_c_maps()
kernel_main_c_pools() kernel_main_c_pools()
kernel_main_c_kernel_services()
kernel_main_c_node_init() kernel_main_c_node_init()
kernel_main_c_main() kernel_main_c_main()