From 74c8852d0538d77dd8d3c795fcedbccaf303c21e Mon Sep 17 00:00:00 2001 From: Daniel Leung Date: Sat, 25 Jul 2015 15:31:09 -0700 Subject: [PATCH] microkernel: remove kernel service dispatch table This change removes the internal number-to-function mapping of microkernel services. Instead, function pointers are used to specify which service to use. This is in preparation for private kernel objects. Before this, only kernel objects that are defined in MDEF files would have corresponding functions included in the final binary, via sysgen by populating an array of number-to-function mapping. This causes an issue when a certain type of objects are all defined with source code, and never in MDEF file. The corresponding mapping would be deleted, and the functions are never included in the binary. For example, if no mutexes are defined in MDEF file, the _k_mutex_*() functions would not be included. With this change, any usage of private kernel objects will hint to the linker that those functions are needed, and should not be removed from final binary. Change-Id: If48864abcd6471bcb7964ec00fe668bcabe3239b Signed-off-by: Daniel Leung --- arch/arm/core/task_abort.c | 2 +- kernel/microkernel/include/kernel_main.h | 15 ++ .../microkernel/include/micro_private_types.h | 169 +++++++-------- kernel/microkernel/k_event.c | 10 +- kernel/microkernel/k_fifo.c | 50 ++++- kernel/microkernel/k_idle.c | 2 +- kernel/microkernel/k_mailbox.c | 36 ++-- kernel/microkernel/k_memory_map.c | 8 +- kernel/microkernel/k_memory_pool.c | 12 +- kernel/microkernel/k_mutex.c | 33 ++- kernel/microkernel/k_nop.c | 2 +- kernel/microkernel/k_offload.c | 2 +- kernel/microkernel/k_pipe.c | 6 +- kernel/microkernel/k_pipe_get.c | 6 +- kernel/microkernel/k_pipe_put.c | 10 +- kernel/microkernel/k_pipe_xfer.c | 10 +- kernel/microkernel/k_semaphore.c | 59 ++--- kernel/microkernel/k_server.c | 2 +- kernel/microkernel/k_task.c | 8 +- kernel/microkernel/k_ticker.c | 2 +- kernel/microkernel/k_timer.c | 16 +- scripts/sysgen | 202 +----------------- 22 files changed, 266 insertions(+), 396 deletions(-) diff --git a/arch/arm/core/task_abort.c b/arch/arm/core/task_abort.c index c0c3f6d4ddd..169370483a9 100644 --- a/arch/arm/core/task_abort.c +++ b/arch/arm/core/task_abort.c @@ -74,7 +74,7 @@ void _TaskAbort(void) if (_ScbIsInThreadMode()) { _task_ioctl(_k_current_task->Ident, taskAbortCode); } else { - cmd_packet.Comm = TSKOP; + cmd_packet.Comm = _K_SVC_TASK_OP; cmd_packet.Args.g1.task = _k_current_task->Ident; cmd_packet.Args.g1.opt = taskAbortCode; cmd_packet.alloc = false; diff --git a/kernel/microkernel/include/kernel_main.h b/kernel/microkernel/include/kernel_main.h index cd78e8efdec..1d6f1503e7e 100644 --- a/kernel/microkernel/include/kernel_main.h +++ b/kernel/microkernel/include/kernel_main.h @@ -128,6 +128,18 @@ extern void _k_sem_wait_request(struct k_args *); */ extern void _k_sem_wait_reply(struct k_args *); +/** + * + * @brief Reply to a semaphore wait request with timeout. + * + * @param A Pointer to a k_args structure. + * + * @return N/A + * + * @sa _k_sem_wait_reply + */ +extern void _k_sem_wait_reply_timeout(struct k_args *A); + /** * * @brief Handle semaphore group wait request @@ -227,11 +239,14 @@ extern void _k_sem_inquiry(struct k_args *); extern void _k_mutex_lock_request(struct k_args *); extern void _k_mutex_lock_reply(struct k_args *); +extern void _k_mutex_lock_reply_timeout(struct k_args *); extern void _k_mutex_unlock(struct k_args *); extern void _k_fifo_enque_request(struct k_args *); extern void _k_fifo_enque_reply(struct k_args *); +extern void _k_fifo_enque_reply_timeout(struct k_args *); extern void _k_fifo_deque_request(struct k_args *); extern void _k_fifo_deque_reply(struct k_args *); +extern void _k_fifo_deque_reply_timeout(struct k_args *); extern void _k_fifo_ioctl(struct k_args *); extern void _k_mbox_send_request(struct k_args *); extern void _k_mbox_send_reply(struct k_args *); diff --git a/kernel/microkernel/include/micro_private_types.h b/kernel/microkernel/include/micro_private_types.h index 99d731a5dd9..ed79ff41b71 100644 --- a/kernel/microkernel/include/micro_private_types.h +++ b/kernel/microkernel/include/micro_private_types.h @@ -55,91 +55,88 @@ struct k_timer { /* Kernel server command codes */ -typedef enum { - NOP, - MVD_REQ, - MVD_VOID, /* obsolete now */ - RAWDATA, - OFFLOAD, - READWL, - SIGNALS, - SIGNALM, - RESETS, - RESETM, - WAITSREQ, - WAITSRPL, - WAITSTMO, - WAITMANY, - WAITMREQ, - WAITMRDY, - WAITMCAN, - WAITMACC, - WAITMEND, - WAITMTMO, - INQSEMA, - LOCK_REQ, - LOCK_RPL, - LOCK_TMO, - UNLOCK, - ENQ_REQ, - ENQ_RPL, - ENQ_TMO, - DEQ_REQ, - DEQ_RPL, - DEQ_TMO, - QUEUE, - SEND_REQ, - SEND_TMO, - SEND_ACK, - SEND_DATA, - RECV_REQ, - RECV_TMO, - RECV_ACK, - RECV_DATA, - ELAPSE, - SLEEP, - WAKEUP, - TSKOP, - GRPOP, - SPRIO, - YIELD, - ALLOC, - DEALLOC, - TALLOC, - TDEALLOC, - TSTART, - TSTOP, - ALLOCTMO, - REMREPLY, - DEBUG_REQ, - DEBUG_ACK, - EVENTENABLE, /* obsolete now */ - EVENTTEST, - EVENTHANDLER, - EVENTSIGNAL, - GET_BLOCK, - REL_BLOCK, - GET_BLOCK_WAIT, - GTBLTMO, - POOL_DEFRAG, - MVDSND_REQ, - MVDRCV_REQ, - MVDSND_ACK, - MVDRCV_ACK, - MEMCPY_REQ, - MEMCPY_RPL, - PIPE_PUT_REQUEST, - PIPE_PUT_TIMEOUT, - PIPE_PUT_REPLY, - PIPE_PUT_ACK, - PIPE_GET_REQUEST, - PIPE_GET_TIMEOUT, - PIPE_GET_REPLY, - PIPE_GET_ACK, - PIPE_MOVEDATA_ACK, - EVENT_TMO, - UNDEFINED = -1 -} K_COMM; +#define _K_SVC_UNDEFINED (NULL) + +#define _K_SVC_BLOCK_WAITERS_GET _k_block_waiters_get +#define _K_SVC_DEFRAG _k_defrag +#define _K_SVC_MOVEDATA_REQ _k_movedata_request +#define _K_SVC_NOP _k_nop +#define _K_SVC_OFFLOAD_TO_FIBER _k_offload_to_fiber +#define _K_SVC_TIME_ELAPSE _k_time_elapse +#define _K_SVC_WORKLOAD_GET _k_workload_get + +#define _K_SVC_EVENT_HANDLER_SET _k_event_handler_set +#define _K_SVC_EVENT_SIGNAL _k_event_signal +#define _K_SVC_EVENT_TEST _k_event_test +#define _K_SVC_EVENT_TEST_TIMEOUT _k_event_test_timeout + +#define _K_SVC_SEM_INQUIRY _k_sem_inquiry +#define _K_SVC_SEM_SIGNAL _k_sem_signal +#define _K_SVC_SEM_RESET _k_sem_reset +#define _K_SVC_SEM_WAIT_REQUEST _k_sem_wait_request +#define _K_SVC_SEM_WAIT_REPLY _k_sem_wait_reply +#define _K_SVC_SEM_WAIT_REPLY_TIMEOUT _k_sem_wait_reply_timeout +#define _K_SVC_SEM_GROUP_SIGNAL _k_sem_group_signal +#define _K_SVC_SEM_GROUP_RESET _k_sem_group_reset +#define _K_SVC_SEM_GROUP_WAIT _k_sem_group_wait +#define _K_SVC_SEM_GROUP_WAIT_ANY _k_sem_group_wait_any +#define _K_SVC_SEM_GROUP_WAIT_ACCEPT _k_sem_group_wait_accept +#define _K_SVC_SEM_GROUP_WAIT_CANCEL _k_sem_group_wait_cancel +#define _K_SVC_SEM_GROUP_WAIT_READY _k_sem_group_ready +#define _K_SVC_SEM_GROUP_WAIT_REQUEST _k_sem_group_wait_request +#define _K_SVC_SEM_GROUP_WAIT_TIMEOUT _k_sem_group_wait_timeout + +#define _K_SVC_MUTEX_LOCK_REQUEST _k_mutex_lock_request +#define _K_SVC_MUTEX_LOCK_REPLY _k_mutex_lock_reply +#define _K_SVC_MUTEX_LOCK_REPLY_TIMEOUT _k_mutex_lock_reply_timeout +#define _K_SVC_MUTEX_UNLOCK _k_mutex_unlock + +#define _K_SVC_FIFO_ENQUE_REQUEST _k_fifo_enque_request +#define _K_SVC_FIFO_ENQUE_REPLY _k_fifo_enque_reply +#define _K_SVC_FIFO_ENQUE_REPLY_TIMEOUT _k_fifo_enque_reply_timeout +#define _K_SVC_FIFO_DEQUE_REQUEST _k_fifo_deque_request +#define _K_SVC_FIFO_DEQUE_REPLY _k_fifo_deque_reply +#define _K_SVC_FIFO_DEQUE_REPLY_TIMEOUT _k_fifo_deque_reply_timeout +#define _K_SVC_FIFO_IOCTL _k_fifo_ioctl + +#define _K_SVC_MBOX_SEND_REQUEST _k_mbox_send_request +#define _K_SVC_MBOX_SEND_REPLY _k_mbox_send_reply +#define _K_SVC_MBOX_SEND_ACK _k_mbox_send_ack +#define _K_SVC_MBOX_SEND_DATA _k_mbox_send_data +#define _K_SVC_MBOX_RECEIVE_REQUEST _k_mbox_receive_request +#define _K_SVC_MBOX_RECEIVE_REPLY _k_mbox_receive_reply +#define _K_SVC_MBOX_RECEIVE_ACK _k_mbox_receive_ack +#define _K_SVC_MBOX_RECEIVE_DATA _k_mbox_receive_data + +#define _K_SVC_TASK_SLEEP _k_task_sleep +#define _K_SVC_TASK_WAKEUP _k_task_wakeup +#define _K_SVC_TASK_OP _k_task_op +#define _K_SVC_TASK_GROUP_OP _k_task_group_op +#define _K_SVC_TASK_PRIORITY_SET _k_task_priority_set +#define _K_SVC_TASK_YIELD _k_task_yield + +#define _K_SVC_MEM_MAP_ALLOC _k_mem_map_alloc +#define _K_SVC_MEM_MAP_ALLOC_TIMEOUT _k_mem_map_alloc_timeout +#define _K_SVC_MEM_MAP_DEALLOC _k_mem_map_dealloc + +#define _K_SVC_TIMER_ALLOC _k_timer_alloc +#define _K_SVC_TIMER_DEALLOC _k_timer_dealloc +#define _K_SVC_TIMER_START _k_timer_start +#define _K_SVC_TIMER_STOP _k_timer_stop + +#define _K_SVC_MEM_POOL_BLOCK_GET _k_mem_pool_block_get +#define _K_SVC_MEM_POOL_BLOCK_GET_TIMEOUT_HANDLE _k_mem_pool_block_get_timeout_handle +#define _K_SVC_MEM_POOL_BLOCK_RELEASE _k_mem_pool_block_release + +#define _K_SVC_PIPE_PUT_REQUEST _k_pipe_put_request +#define _K_SVC_PIPE_PUT_TIMEOUT _k_pipe_put_timeout +#define _K_SVC_PIPE_PUT_REPLY _k_pipe_put_reply +#define _K_SVC_PIPE_PUT_ACK _k_pipe_put_ack +#define _K_SVC_PIPE_GET_REQUEST _k_pipe_get_request +#define _K_SVC_PIPE_GET_TIMEOUT _k_pipe_get_timeout +#define _K_SVC_PIPE_GET_REPLY _k_pipe_get_reply +#define _K_SVC_PIPE_GET_ACK _k_pipe_get_ack +#define _K_SVC_PIPE_MOVEDATA_ACK _k_pipe_movedata_ack /* Task queue header */ @@ -460,7 +457,7 @@ struct k_args { /* 'alloc' is true if k_args is allocated via GETARGS() */ bool alloc __aligned(4); - K_COMM Comm __aligned(4); + void (*Comm)(struct k_args *); /* * Ctxt needs to be aligned to avoid "unaligned write" exception on ARM diff --git a/kernel/microkernel/k_event.c b/kernel/microkernel/k_event.c index 297f686a719..dc4c4073a11 100644 --- a/kernel/microkernel/k_event.c +++ b/kernel/microkernel/k_event.c @@ -77,7 +77,7 @@ int task_event_set_handler(kevent_t event, kevent_handler_t handler) { struct k_args A; - A.Comm = EVENTHANDLER; + A.Comm = _K_SVC_EVENT_HANDLER_SET; A.Args.e1.event = event; A.Args.e1.func = handler; KERNEL_ENTRY(&A); @@ -128,7 +128,7 @@ void _k_event_test(struct k_args *A) if (A->Time.ticks == TICKS_UNLIMITED) { A->Time.timer = NULL; } else { - A->Comm = EVENT_TMO; + A->Comm = _K_SVC_EVENT_TEST_TIMEOUT; _k_timeout_alloc(A); } #endif @@ -151,7 +151,7 @@ int _task_event_recv(kevent_t event, int32_t time) { struct k_args A; - A.Comm = EVENTTEST; + A.Comm = _K_SVC_EVENT_TEST; A.Args.e1.event = event; A.Time.ticks = time; KERNEL_ENTRY(&A); @@ -186,7 +186,7 @@ void _k_do_event_signal(kevent_t event) #ifdef CONFIG_SYS_CLOCK_EXISTS if (A->Time.timer != NULL) { _k_timeout_free(A->Time.timer); - A->Comm = NOP; + A->Comm = _K_SVC_NOP; } #endif A->Time.rcode = RC_OK; @@ -223,7 +223,7 @@ int task_event_send(kevent_t event) { struct k_args A; - A.Comm = EVENTSIGNAL; + A.Comm = _K_SVC_EVENT_SIGNAL; A.Args.e1.event = event; KERNEL_ENTRY(&A); return A.Time.rcode; diff --git a/kernel/microkernel/k_fifo.c b/kernel/microkernel/k_fifo.c index 292ef5f3313..4eb7cd60b3e 100644 --- a/kernel/microkernel/k_fifo.c +++ b/kernel/microkernel/k_fifo.c @@ -55,7 +55,7 @@ void _k_fifo_enque_reply(struct k_args *A) #ifdef CONFIG_SYS_CLOCK_EXISTS if (A->Time.timer) FREETIMER(A->Time.timer); - if (unlikely(A->Comm == ENQ_TMO)) { + if (unlikely(A->Comm == _K_SVC_FIFO_ENQUE_REPLY_TIMEOUT)) { REMOVE_ELM(A); A->Time.rcode = RC_TIME; } else { @@ -68,6 +68,21 @@ void _k_fifo_enque_reply(struct k_args *A) _k_state_bit_reset(A->Ctxt.proc, TF_ENQU); } +/** + * + * @brief Finish performing an incomplete FIFO enqueue request with timeout. + * + * @param A Pointer to a k_args structure + * + * @return N/A + * + * @sa _k_fifo_enque_reply + */ +void _k_fifo_enque_reply_timeout(struct k_args *A) +{ + _k_fifo_enque_reply(A); +} + /** * * @brief Perform a FIFO enqueue request @@ -96,7 +111,7 @@ void _k_fifo_enque_request(struct k_args *A) #ifdef CONFIG_SYS_CLOCK_EXISTS if (W->Time.timer) { _k_timeout_cancel(W); - W->Comm = DEQ_RPL; + W->Comm = _K_SVC_FIFO_DEQUE_REPLY; } else { #endif W->Time.rcode = RC_OK; @@ -134,7 +149,7 @@ void _k_fifo_enque_request(struct k_args *A) if (A->Time.ticks == TICKS_UNLIMITED) A->Time.timer = NULL; else { - A->Comm = ENQ_TMO; + A->Comm = _K_SVC_FIFO_ENQUE_REPLY_TIMEOUT; _k_timeout_alloc(A); } #endif @@ -151,7 +166,7 @@ int _task_fifo_put(kfifo_t queue, /* FIFO queue */ { struct k_args A; - A.Comm = ENQ_REQ; + A.Comm = _K_SVC_FIFO_ENQUE_REQUEST; A.Time.ticks = time; A.Args.q1.data = (char *)data; A.Args.q1.queue = queue; @@ -172,7 +187,7 @@ void _k_fifo_deque_reply(struct k_args *A) #ifdef CONFIG_SYS_CLOCK_EXISTS if (A->Time.timer) FREETIMER(A->Time.timer); - if (unlikely(A->Comm == DEQ_TMO)) { + if (unlikely(A->Comm == _K_SVC_FIFO_DEQUE_REPLY_TIMEOUT)) { REMOVE_ELM(A); A->Time.rcode = RC_TIME; } else { @@ -185,6 +200,21 @@ void _k_fifo_deque_reply(struct k_args *A) _k_state_bit_reset(A->Ctxt.proc, TF_DEQU); } +/** + * + * @brief Finish performing an incomplete FIFO dequeue request with timeout. + * + * @param A Pointer to a k_args structure. + * + * @return N/A + * + * @sa _k_fifo_deque_reply + */ +void _k_fifo_deque_reply_timeout(struct k_args *A) +{ + _k_fifo_deque_reply(A); +} + /** * * @brief Perform FIFO dequeue request @@ -229,7 +259,7 @@ void _k_fifo_deque_request(struct k_args *A) #ifdef CONFIG_SYS_CLOCK_EXISTS if (W->Time.timer) { _k_timeout_cancel(W); - W->Comm = ENQ_RPL; + W->Comm = _K_SVC_FIFO_ENQUE_REPLY; } else { #endif W->Time.rcode = RC_OK; @@ -253,7 +283,7 @@ void _k_fifo_deque_request(struct k_args *A) if (A->Time.ticks == TICKS_UNLIMITED) A->Time.timer = NULL; else { - A->Comm = DEQ_TMO; + A->Comm = _K_SVC_FIFO_DEQUE_REPLY_TIMEOUT; _k_timeout_alloc(A); } #endif @@ -281,7 +311,7 @@ int _task_fifo_get(kfifo_t queue, void *data, int32_t time) { struct k_args A; - A.Comm = DEQ_REQ; + A.Comm = _K_SVC_FIFO_DEQUE_REQUEST; A.Time.ticks = time; A.Args.q1.data = (char *)data; A.Args.q1.queue = queue; @@ -314,7 +344,7 @@ void _k_fifo_ioctl(struct k_args *A) #ifdef CONFIG_SYS_CLOCK_EXISTS if (likely(X->Time.timer)) { _k_timeout_cancel(X); - X->Comm = ENQ_RPL; + X->Comm = _K_SVC_FIFO_ENQUE_REPLY; } else { #endif X->Time.rcode = RC_FAIL; @@ -347,7 +377,7 @@ int _task_fifo_ioctl(kfifo_t queue, int op) { struct k_args A; - A.Comm = QUEUE; + A.Comm = _K_SVC_FIFO_IOCTL; A.Args.q1.queue = queue; A.Args.q1.size = op; KERNEL_ENTRY(&A); diff --git a/kernel/microkernel/k_idle.c b/kernel/microkernel/k_idle.c index 3caa088f00a..639b3a469b9 100644 --- a/kernel/microkernel/k_idle.c +++ b/kernel/microkernel/k_idle.c @@ -250,7 +250,7 @@ int task_workload_get(void) { struct k_args A; - A.Comm = READWL; + A.Comm = _K_SVC_WORKLOAD_GET; KERNEL_ENTRY(&A); return A.Args.u1.rval; } diff --git a/kernel/microkernel/k_mailbox.c b/kernel/microkernel/k_mailbox.c index 0926bcfda5c..b4b86d8099f 100644 --- a/kernel/microkernel/k_mailbox.c +++ b/kernel/microkernel/k_mailbox.c @@ -139,20 +139,20 @@ static bool prepare_transfer(struct k_args *move, * (this is shared code, irrespective of the value of 'move') */ __ASSERT_NO_MSG(NULL == reader->Forw); - reader->Comm = RECV_ACK; + reader->Comm = _K_SVC_MBOX_RECEIVE_ACK; reader->Time.rcode = RC_OK; __ASSERT_NO_MSG(NULL == writer->Forw); writer->alloc = true; - writer->Comm = SEND_ACK; + writer->Comm = _K_SVC_MBOX_SEND_ACK; writer->Time.rcode = RC_OK; if (move) { /* { move != NULL, which means full data exchange } */ bool all_data_present = true; - move->Comm = MVD_REQ; + move->Comm = _K_SVC_MOVEDATA_REQ; /* * transfer the data with the highest * priority of reader and writer @@ -237,7 +237,7 @@ void _k_mbox_send_ack(struct k_args *pCopyWriter) #ifndef NO_KARG_CLEAR memset(&A, 0xfd, sizeof(struct k_args)); #endif - A.Comm = SIGNALS; + A.Comm = _K_SVC_SEM_SIGNAL; A.Args.s1.sema = pCopyWriter->Args.m1.mess.extra.sema; _k_sem_signal(&A); } @@ -253,7 +253,7 @@ void _k_mbox_send_ack(struct k_args *pCopyWriter) * special value to tell if block should be * freed or not */ - pCopyWriter->Comm = REL_BLOCK; + pCopyWriter->Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE; pCopyWriter->Args.p1.poolid = pCopyWriter->Args.m1.mess.tx_block.poolid; pCopyWriter->Args.p1.rep_poolptr = @@ -300,7 +300,7 @@ void _k_mbox_send_reply(struct k_args *pCopyWriter) FREETIMER(pCopyWriter->Time.timer); REMOVE_ELM(pCopyWriter); pCopyWriter->Time.rcode = RC_TIME; - pCopyWriter->Comm = SEND_ACK; + pCopyWriter->Comm = _K_SVC_MBOX_SEND_ACK; SENDARGS(pCopyWriter); } @@ -434,7 +434,7 @@ void _k_mbox_send_request(struct k_args *Writer) * to blindly set it rather than waste time on a comparison. */ - CopyWriter->Comm = SEND_TMO; + CopyWriter->Comm = _K_SVC_MBOX_SEND_REPLY; /* Put the letter into the mailbox */ INSERT_ELM(MailBox->Writers, CopyWriter); @@ -456,7 +456,7 @@ void _k_mbox_send_request(struct k_args *Writer) * This is a no-wait operation. * Notify the sender of failure. */ - CopyWriter->Comm = SEND_ACK; + CopyWriter->Comm = _K_SVC_MBOX_SEND_ACK; CopyWriter->Time.rcode = RC_FAIL; SENDARGS(CopyWriter); } @@ -496,7 +496,7 @@ int _task_mbox_put(kmbox_t mbox, M->mailbox = mbox; A.Prio = prio; - A.Comm = SEND_REQ; + A.Comm = _K_SVC_MBOX_SEND_REQUEST; A.Time.ticks = time; A.Args.m1.mess = *M; @@ -546,7 +546,7 @@ void _k_mbox_receive_reply(struct k_args *pCopyReader) FREETIMER(pCopyReader->Time.timer); REMOVE_ELM(pCopyReader); pCopyReader->Time.rcode = RC_TIME; - pCopyReader->Comm = RECV_ACK; + pCopyReader->Comm = _K_SVC_MBOX_RECEIVE_ACK; SENDARGS(pCopyReader); #endif } @@ -648,7 +648,7 @@ void _k_mbox_receive_request(struct k_args *Reader) * to blindly set it rather than waste time on a comparison. */ - CopyReader->Comm = RECV_TMO; + CopyReader->Comm = _K_SVC_MBOX_RECEIVE_REPLY; /* Put the letter into the mailbox */ INSERT_ELM(MailBox->Readers, CopyReader); @@ -670,7 +670,7 @@ void _k_mbox_receive_request(struct k_args *Reader) * This is a no-wait operation. * Notify the receiver of failure. */ - CopyReader->Comm = RECV_ACK; + CopyReader->Comm = _K_SVC_MBOX_RECEIVE_ACK; CopyReader->Time.rcode = RC_FAIL; SENDARGS(CopyReader); } @@ -693,7 +693,7 @@ int _task_mbox_get(kmbox_t mbox, */ A.Prio = _k_current_task->Prio; - A.Comm = RECV_REQ; + A.Comm = _K_SVC_MBOX_RECEIVE_REQUEST; A.Time.ticks = time; A.Args.m1.mess = *M; @@ -729,7 +729,7 @@ void _task_mbox_put_async(kmbox_t mbox, A.Time.timer = NULL; #endif A.Prio = prio; - A.Comm = SEND_REQ; + A.Comm = _K_SVC_MBOX_SEND_REQUEST; A.Args.m1.mess = *M; KERNEL_ENTRY(&A); } @@ -754,7 +754,7 @@ void _k_mbox_receive_data(struct k_args *Starter) CopyStarter->Ctxt.args = Starter; MoveD = CopyStarter->Args.m1.mess.extra.transfer; - CopyStarter->Comm = RECV_ACK; + CopyStarter->Comm = _K_SVC_MBOX_RECEIVE_ACK; CopyStarter->Time.rcode = RC_OK; MoveD->Args.MovedReq.Extra.Setup.ContRcv = CopyStarter; @@ -793,7 +793,7 @@ void _task_mbox_data_get(struct k_msg *M) } A.Args.m1.mess = *M; - A.Comm = RECV_DATA; + A.Comm = _K_SVC_MBOX_RECEIVE_DATA; KERNEL_ENTRY(&A); } @@ -884,7 +884,7 @@ int _task_mbox_data_get_async_block(struct k_msg *message, struct k_args A; A.Args.m1.mess = *message; - A.Comm = RECV_DATA; + A.Comm = _K_SVC_MBOX_RECEIVE_DATA; KERNEL_ENTRY(&A); return RC_OK; /* task_mbox_data_get() doesn't return anything */ @@ -911,7 +911,7 @@ void _k_mbox_send_data(struct k_args *Starter) MoveD = CopyStarter->Args.m1.mess.extra.transfer; CopyStarter->Time.rcode = RC_OK; - CopyStarter->Comm = SEND_ACK; + CopyStarter->Comm = _K_SVC_MBOX_SEND_ACK; MoveD->Args.MovedReq.Extra.Setup.ContSnd = CopyStarter; CopyStarter->Forw = NULL; diff --git a/kernel/microkernel/k_memory_map.c b/kernel/microkernel/k_memory_map.c index ffc9e9d8422..b2bb7f55ee6 100644 --- a/kernel/microkernel/k_memory_map.c +++ b/kernel/microkernel/k_memory_map.c @@ -122,7 +122,7 @@ void _k_mem_map_alloc(struct k_args *A) if (A->Time.ticks == TICKS_UNLIMITED) A->Time.timer = NULL; else { - A->Comm = ALLOCTMO; + A->Comm = _K_SVC_MEM_MAP_ALLOC_TIMEOUT; _k_timeout_alloc(A); } #endif @@ -145,7 +145,7 @@ int _task_mem_map_alloc(kmemory_map_t mmap, void **mptr, int32_t time) { struct k_args A; - A.Comm = ALLOC; + A.Comm = _K_SVC_MEM_MAP_ALLOC; A.Time.ticks = time; A.Args.a1.mmap = mmap; A.Args.a1.mptr = mptr; @@ -178,7 +178,7 @@ void _k_mem_map_dealloc(struct k_args *A) #ifdef CONFIG_SYS_CLOCK_EXISTS if (X->Time.timer) { _k_timeout_free(X->Time.timer); - X->Comm = NOP; + X->Comm = _K_SVC_NOP; } #endif X->Time.rcode = RC_OK; @@ -209,7 +209,7 @@ void _task_mem_map_free(kmemory_map_t mmap, void **mptr) { struct k_args A; - A.Comm = DEALLOC; + A.Comm = _K_SVC_MEM_MAP_DEALLOC; A.Args.a1.mmap = mmap; A.Args.a1.mptr = mptr; KERNEL_ENTRY(&A); diff --git a/kernel/microkernel/k_memory_pool.c b/kernel/microkernel/k_memory_pool.c index 3f4fb908d4f..544c8e7f223 100644 --- a/kernel/microkernel/k_memory_pool.c +++ b/kernel/microkernel/k_memory_pool.c @@ -218,7 +218,7 @@ void _k_defrag(struct k_args *A) */ GETARGS(NewGet); *NewGet = *A; - NewGet->Comm = GET_BLOCK_WAIT; + NewGet->Comm = _K_SVC_BLOCK_WAITERS_GET; TO_ALIST(&_k_command_stack, NewGet); /*push on command stack */ } } @@ -237,7 +237,7 @@ void task_mem_pool_defragment(kmemory_pool_t Pid /* pool to defragment */ { struct k_args A; - A.Comm = POOL_DEFRAG; + A.Comm = _K_SVC_DEFRAG; A.Args.p1.poolid = Pid; KERNEL_ENTRY(&A); } @@ -545,7 +545,7 @@ void _k_mem_pool_block_get(struct k_args *A) if (A->Time.ticks == TICKS_UNLIMITED) { A->Time.timer = NULL; } else { - A->Comm = GTBLTMO; + A->Comm = _K_SVC_MEM_POOL_BLOCK_GET_TIMEOUT_HANDLE; _k_timeout_alloc(A); } #endif @@ -574,7 +574,7 @@ int _task_mem_pool_alloc(struct k_block *blockptr, /* ptr to requested block */ struct k_args A; - A.Comm = GET_BLOCK; + A.Comm = _K_SVC_MEM_POOL_BLOCK_GET; A.Time.ticks = time; A.Args.p1.poolid = poolid; A.Args.p1.req_size = reqsize; @@ -646,7 +646,7 @@ void _k_mem_pool_block_release(struct k_args *A) */ GETARGS(NewGet); *NewGet = *A; - NewGet->Comm = GET_BLOCK_WAIT; + NewGet->Comm = _K_SVC_BLOCK_WAITERS_GET; TO_ALIST(&_k_command_stack, NewGet); /* push on command stack */ } if (A->alloc) { @@ -676,7 +676,7 @@ void task_mem_pool_free(struct k_block *blockptr /* pointer to block to free */ { struct k_args A; - A.Comm = REL_BLOCK; + A.Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE; A.Args.p1.poolid = blockptr->poolid; A.Args.p1.req_size = blockptr->req_size; A.Args.p1.rep_poolptr = blockptr->address_in_pool; diff --git a/kernel/microkernel/k_mutex.c b/kernel/microkernel/k_mutex.c index 4ab06d37749..61d7ac8e0d0 100644 --- a/kernel/microkernel/k_mutex.c +++ b/kernel/microkernel/k_mutex.c @@ -58,7 +58,7 @@ #include /** - * @brief Reply to a mutex lock request (LOCK_TMO, LOCK_RPL) + * @brief Reply to a mutex lock request. * * This routine replies to a mutex lock request. This will occur if either * the waiting task times out or acquires the mutex lock. @@ -82,7 +82,7 @@ void _k_mutex_lock_reply( FREETIMER(A->Time.timer); } - if (A->Comm == LOCK_TMO) {/* Timeout case */ + if (A->Comm == _K_SVC_MUTEX_LOCK_REPLY_TIMEOUT) {/* Timeout case */ REMOVE_ELM(A); A->Time.rcode = RC_TIME; @@ -122,7 +122,7 @@ void _k_mutex_lock_reply( if (Mutex->OwnerCurrentPrio != newPriority) { GETARGS(PrioChanger); PrioChanger->alloc = true; - PrioChanger->Comm = SPRIO; + PrioChanger->Comm = _K_SVC_TASK_PRIORITY_SET; PrioChanger->Prio = newPriority; PrioChanger->Args.g1.task = Mutex->Owner; PrioChanger->Args.g1.prio = newPriority; @@ -140,6 +140,21 @@ void _k_mutex_lock_reply( _k_state_bit_reset(A->Ctxt.proc, TF_LOCK); } +/** + * @brief Reply to a mutex lock request with timeout. + * + * This routine replies to a mutex lock request. This will occur if either + * the waiting task times out or acquires the mutex lock. + * + * @param A Pointer to a k_args structure. + * + * @return N/A + */ +void _k_mutex_lock_reply_timeout(struct k_args *A) +{ + _k_mutex_lock_reply(A); +} + /** * @brief Process a mutex lock request * @@ -222,7 +237,7 @@ void _k_mutex_lock_request(struct k_args *A /* pointer to mutex lock * Prepare to call _k_mutex_lock_reply() should * the request time out. */ - A->Comm = LOCK_TMO; + A->Comm = _K_SVC_MUTEX_LOCK_REPLY_TIMEOUT; _k_timeout_alloc(A); } #endif @@ -243,7 +258,7 @@ void _k_mutex_lock_request(struct k_args *A /* pointer to mutex lock GETARGS(PrioBooster); PrioBooster->alloc = true; - PrioBooster->Comm = SPRIO; + PrioBooster->Comm = _K_SVC_TASK_PRIORITY_SET; PrioBooster->Prio = BoostedPrio; PrioBooster->Args.g1.task = Mutex->Owner; PrioBooster->Args.g1.prio = BoostedPrio; @@ -278,7 +293,7 @@ int _task_mutex_lock( { struct k_args A; /* argument packet */ - A.Comm = LOCK_REQ; + A.Comm = _K_SVC_MUTEX_LOCK_REQUEST; A.Time.ticks = time; A.Args.l1.mutex = mutex; A.Args.l1.task = _k_current_task->Ident; @@ -330,7 +345,7 @@ void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock GETARGS(PrioDowner); PrioDowner->alloc = true; - PrioDowner->Comm = SPRIO; + PrioDowner->Comm = _K_SVC_TASK_PRIORITY_SET; PrioDowner->Prio = Mutex->OwnerOriginalPrio; PrioDowner->Args.g1.task = Mutex->Owner; PrioDowner->Args.g1.prio = Mutex->OwnerOriginalPrio; @@ -358,7 +373,7 @@ void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock * send a reply with a return code of RC_OK. */ _k_timeout_cancel(X); - X->Comm = LOCK_RPL; + X->Comm = _K_SVC_MUTEX_LOCK_REPLY; } else { #endif /* @@ -392,7 +407,7 @@ void _task_mutex_unlock(kmutex_t mutex /* mutex to unlock */ { struct k_args A; /* argument packet */ - A.Comm = UNLOCK; + A.Comm = _K_SVC_MUTEX_UNLOCK; A.Args.l1.mutex = mutex; A.Args.l1.task = _k_current_task->Ident; KERNEL_ENTRY(&A); diff --git a/kernel/microkernel/k_nop.c b/kernel/microkernel/k_nop.c index 4ecc695a90d..d4d46e37266 100644 --- a/kernel/microkernel/k_nop.c +++ b/kernel/microkernel/k_nop.c @@ -69,6 +69,6 @@ void _task_nop(void) { struct k_args A; - A.Comm = NOP; + A.Comm = _K_SVC_NOP; KERNEL_ENTRY(&A); } diff --git a/kernel/microkernel/k_offload.c b/kernel/microkernel/k_offload.c index a0203371c9d..4b4894b02ea 100644 --- a/kernel/microkernel/k_offload.c +++ b/kernel/microkernel/k_offload.c @@ -65,7 +65,7 @@ int task_offload_to_fiber(int (*func)(), void *argp) { struct k_args A; - A.Comm = OFFLOAD; + A.Comm = _K_SVC_OFFLOAD_TO_FIBER; A.Args.u1.func = func; A.Args.u1.argp = argp; KERNEL_ENTRY(&A); diff --git a/kernel/microkernel/k_pipe.c b/kernel/microkernel/k_pipe.c index 6653cee5270..f9e23753a36 100644 --- a/kernel/microkernel/k_pipe.c +++ b/kernel/microkernel/k_pipe.c @@ -92,7 +92,7 @@ int _task_pipe_get(kpipe_t Id, void *pBuffer, } A.Prio = _k_current_task->Prio; - A.Comm = PIPE_GET_REQUEST; + A.Comm = _K_SVC_PIPE_GET_REQUEST; A.Time.ticks = TimeOut; A.Args.pipe_req.ReqInfo.pipe.id = Id; @@ -144,7 +144,7 @@ int _task_pipe_put(kpipe_t Id, void *pBuffer, } A.Prio = _k_current_task->Prio; - A.Comm = PIPE_PUT_REQUEST; + A.Comm = _K_SVC_PIPE_PUT_REQUEST; A.Time.ticks = TimeOut; A.Args.pipe_req.ReqInfo.pipe.id = Id; @@ -189,7 +189,7 @@ int _task_pipe_put_async(kpipe_t Id, struct k_block Block, } A.Prio = _k_current_task->Prio; - A.Comm = PIPE_PUT_REQUEST; + A.Comm = _K_SVC_PIPE_PUT_REQUEST; A.Time.ticks = TICKS_UNLIMITED; /* same behavior in flow as a blocking call w/o a timeout */ diff --git a/kernel/microkernel/k_pipe_get.c b/kernel/microkernel/k_pipe_get.c index 4fca1e66ebb..babc0c11a37 100644 --- a/kernel/microkernel/k_pipe_get.c +++ b/kernel/microkernel/k_pipe_get.c @@ -148,7 +148,7 @@ void _k_pipe_get_request(struct k_args *RequestOrig) * PIPE_GET_TIMEOUT microkernel command to the packet even though it * is only useful to the finite timeout case. */ - RequestProc->Comm = PIPE_GET_TIMEOUT; + RequestProc->Comm = _K_SVC_PIPE_GET_TIMEOUT; if (_TIME_B == _k_pipe_time_type_get(&RequestProc->Args)) { /* * The writer specified TICKS_UNLIMITED, so NULL the timer. @@ -180,7 +180,7 @@ void _k_pipe_get_request(struct k_args *RequestOrig) __ASSERT_NO_MSG(XFER_IDLE == RequestProc->Args.pipe_xfer_req.status); __ASSERT_NO_MSG(0 == RequestProc->Args.pipe_xfer_req.iSizeXferred); - RequestProc->Comm = PIPE_GET_REPLY; + RequestProc->Comm = _K_SVC_PIPE_GET_REPLY; _k_pipe_get_reply(RequestProc); } return; @@ -225,7 +225,7 @@ void _k_pipe_get_reply(struct k_args *ReqProc) struct k_args *ReqOrig = ReqProc->Ctxt.args; PIPE_REQUEST_STATUS status; - ReqOrig->Comm = PIPE_GET_ACK; + ReqOrig->Comm = _K_SVC_PIPE_GET_ACK; /* determine return value */ diff --git a/kernel/microkernel/k_pipe_put.c b/kernel/microkernel/k_pipe_put.c index c560f23a214..850bf5aec50 100644 --- a/kernel/microkernel/k_pipe_put.c +++ b/kernel/microkernel/k_pipe_put.c @@ -167,7 +167,7 @@ void _k_pipe_put_request(struct k_args *RequestOrig) * PIPE_PUT_TIMEOUT microkernel command to the packet even though it * is only useful to the finite timeout case. */ - RequestProc->Comm = PIPE_PUT_TIMEOUT; + RequestProc->Comm = _K_SVC_PIPE_PUT_TIMEOUT; if (_TIME_B == _k_pipe_time_type_get(&RequestProc->Args)) { /* * The writer specified TICKS_UNLIMITED; NULL the timer. @@ -199,7 +199,7 @@ void _k_pipe_put_request(struct k_args *RequestOrig) __ASSERT_NO_MSG(XFER_IDLE == RequestProc->Args.pipe_xfer_req.status); __ASSERT_NO_MSG(0 == RequestProc->Args.pipe_xfer_req.iSizeXferred); - RequestProc->Comm = PIPE_PUT_REPLY; + RequestProc->Comm = _K_SVC_PIPE_PUT_REPLY; _k_pipe_put_reply(RequestProc); } return; @@ -245,7 +245,7 @@ void _k_pipe_put_reply(struct k_args *ReqProc) struct k_args *ReqOrig = ReqProc->Ctxt.args; PIPE_REQUEST_STATUS status; - ReqOrig->Comm = PIPE_PUT_ACK; + ReqOrig->Comm = _K_SVC_PIPE_PUT_ACK; /* determine return value: */ @@ -296,7 +296,7 @@ void _k_pipe_put_ack(struct k_args *Request) /* invoke command to release block */ blockptr = &pipe_ack->ReqType.Async.block; - A.Comm = REL_BLOCK; + A.Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE; A.Args.p1.poolid = blockptr->poolid; A.Args.p1.req_size = blockptr->req_size; A.Args.p1.rep_poolptr = blockptr->address_in_pool; @@ -307,7 +307,7 @@ void _k_pipe_put_ack(struct k_args *Request) /* invoke command to signal sema */ struct k_args A; - A.Comm = SIGNALS; + A.Comm = _K_SVC_SEM_SIGNAL; A.Args.s1.sema = pipe_ack->ReqType.Async.sema; _k_sem_signal(&A); /* will return immediately */ } diff --git a/kernel/microkernel/k_pipe_xfer.c b/kernel/microkernel/k_pipe_xfer.c index 74233362de7..ca1a29b0703 100644 --- a/kernel/microkernel/k_pipe_xfer.c +++ b/kernel/microkernel/k_pipe_xfer.c @@ -250,7 +250,7 @@ static void setup_movedata(struct k_args *A, struct k_args *pContSend; struct k_args *pContRecv; - A->Comm = MVD_REQ; + A->Comm = _K_SVC_MOVEDATA_REQ; A->Ctxt.proc = NULL; /* this caused problems when != NULL related to set/reset of state bits */ @@ -266,14 +266,14 @@ static void setup_movedata(struct k_args *A, GETARGS(pContRecv); pContSend->Forw = NULL; - pContSend->Comm = PIPE_MOVEDATA_ACK; + pContSend->Comm = _K_SVC_PIPE_MOVEDATA_ACK; pContSend->Args.pipe_xfer_ack.pPipe = pPipe; pContSend->Args.pipe_xfer_ack.XferType = XferType; pContSend->Args.pipe_xfer_ack.ID = XferID; pContSend->Args.pipe_xfer_ack.iSize = size; pContRecv->Forw = NULL; - pContRecv->Comm = PIPE_MOVEDATA_ACK; + pContRecv->Comm = _K_SVC_PIPE_MOVEDATA_ACK; pContRecv->Args.pipe_xfer_ack.pPipe = pPipe; pContRecv->Args.pipe_xfer_ack.XferType = XferType; pContRecv->Args.pipe_xfer_ack.ID = XferID; @@ -970,7 +970,7 @@ void _k_pipe_process(struct pipe_struct *pPipe, struct k_args *pNLWriter, myfreetimer(&(pReader->Time.timer)); } if (0 == pReader->Args.pipe_xfer_req.iNbrPendXfers) { - pReader->Comm = PIPE_GET_REPLY; + pReader->Comm = _K_SVC_PIPE_GET_REPLY; /* if terminated and no pending Xfers anymore, we have to reply */ _k_pipe_get_reply(pReader); @@ -1000,7 +1000,7 @@ void _k_pipe_process(struct pipe_struct *pPipe, struct k_args *pNLWriter, myfreetimer(&(pWriter->Time.timer)); } if (0 == pWriter->Args.pipe_xfer_req.iNbrPendXfers) { - pWriter->Comm = PIPE_PUT_REPLY; + pWriter->Comm = _K_SVC_PIPE_PUT_REPLY; /* if terminated and no pending Xfers anymore, we have to reply */ _k_pipe_put_reply(pWriter); diff --git a/kernel/microkernel/k_semaphore.c b/kernel/microkernel/k_semaphore.c index 65071540e8d..66e35de8007 100644 --- a/kernel/microkernel/k_semaphore.c +++ b/kernel/microkernel/k_semaphore.c @@ -61,9 +61,10 @@ static void signal_semaphore(int n, struct sem_struct *S) X = A->Forw; #ifdef CONFIG_SYS_CLOCK_EXISTS - if (A->Comm == WAITSREQ || A->Comm == WAITSTMO) + if (A->Comm == _K_SVC_SEM_WAIT_REQUEST + || A->Comm == _K_SVC_SEM_WAIT_REPLY_TIMEOUT) #else - if (A->Comm == WAITSREQ) + if (A->Comm == _K_SVC_SEM_WAIT_REQUEST) #endif { S->Level--; @@ -75,7 +76,7 @@ static void signal_semaphore(int n, struct sem_struct *S) #ifdef CONFIG_SYS_CLOCK_EXISTS if (A->Time.timer) { _k_timeout_cancel(A); - A->Comm = WAITSRPL; + A->Comm = _K_SVC_SEM_WAIT_REPLY; } else { #endif A->Time.rcode = RC_OK; @@ -83,9 +84,9 @@ static void signal_semaphore(int n, struct sem_struct *S) #ifdef CONFIG_SYS_CLOCK_EXISTS } #endif - } else if (A->Comm == WAITMREQ) { + } else if (A->Comm == _K_SVC_SEM_GROUP_WAIT_REQUEST) { S->Level--; - A->Comm = WAITMRDY; + A->Comm = _K_SVC_SEM_GROUP_WAIT_READY; GETARGS(Y); *Y = *A; SENDARGS(Y); @@ -120,8 +121,9 @@ void _k_sem_group_wait_cancel(struct k_args *A) } else { S->Waiters = X->Forw; } - if (X->Comm == WAITMREQ || X->Comm == WAITMRDY) { - if (X->Comm == WAITMRDY) { + if (X->Comm == _K_SVC_SEM_GROUP_WAIT_REQUEST + || X->Comm == _K_SVC_SEM_GROUP_WAIT_READY) { + if (X->Comm == _K_SVC_SEM_GROUP_WAIT_READY) { /* obtain struct k_args of waiting task */ struct k_args *waitTaskArgs = X->Ctxt.args; @@ -129,8 +131,9 @@ void _k_sem_group_wait_cancel(struct k_args *A) /* * Determine if the wait cancellation request is being * processed after the state of the 'Waiters' packet state - * has been updated to WAITMRDY, but before the WAITMRDY - * packet has been processed. This will occur if a WAITMTMO + * has been updated to _K_SVC_SEM_GROUP_WAIT_READY, but before + * the _K_SVC_SEM_GROUP_WAIT_READY packet has been processed. + * This will occur if a _K_SVC_SEM_GROUP_WAIT_TIMEOUT * timer expiry occurs between the update of the packet state * and the processing of the WAITMRDY packet. */ @@ -174,7 +177,7 @@ void _k_sem_group_wait_accept(struct k_args *A) } else { S->Waiters = X->Forw; } - if (X->Comm == WAITMRDY) { + if (X->Comm == _K_SVC_SEM_GROUP_WAIT_READY) { _k_sem_group_wait(X); } else { FREEARGS(X); /* ERROR */ @@ -206,7 +209,8 @@ void _k_sem_group_wait_timeout(struct k_args *A) GETARGS(R); R->Prio = A->Prio; R->Comm = - (K_COMM)((*L == A->Args.s1.sema) ? WAITMACC : WAITMCAN); + ((*L == A->Args.s1.sema) ? + _K_SVC_SEM_GROUP_WAIT_ACCEPT : _K_SVC_SEM_GROUP_WAIT_CANCEL); R->Ctxt.args = A; R->Args.s1.sema = *L++; SENDARGS(R); @@ -219,7 +223,7 @@ void _k_sem_group_ready(struct k_args *R) if (A->Args.s1.sema == ENDLIST) { A->Args.s1.sema = R->Args.s1.sema; - A->Comm = WAITMTMO; + A->Comm = _K_SVC_SEM_GROUP_WAIT_TIMEOUT; #ifdef CONFIG_SYS_CLOCK_EXISTS if (A->Time.timer) { _k_timeout_cancel(A); @@ -236,7 +240,7 @@ void _k_sem_wait_reply(struct k_args *A) if (A->Time.timer) { FREETIMER(A->Time.timer); } - if (A->Comm == WAITSTMO) { + if (A->Comm == _K_SVC_SEM_WAIT_REPLY_TIMEOUT) { REMOVE_ELM(A); A->Time.rcode = RC_TIME; } else @@ -245,6 +249,11 @@ void _k_sem_wait_reply(struct k_args *A) _k_state_bit_reset(A->Ctxt.proc, TF_SEMA); } +void _k_sem_wait_reply_timeout(struct k_args *A) +{ + _k_sem_wait_reply(A); +} + void _k_sem_group_wait_request(struct k_args *A) { struct sem_struct *S = _k_sem_list + OBJ_INDEX(A->Args.s1.sema); @@ -258,7 +267,7 @@ void _k_sem_group_wait_request(struct k_args *A) } else { S->Waiters = X->Forw; } - if (X->Comm == WAITMCAN) { + if (X->Comm == _K_SVC_SEM_GROUP_WAIT_CANCEL) { _k_sem_group_wait(X); } else { FREEARGS(X); /* ERROR */ @@ -296,7 +305,7 @@ void _k_sem_group_wait_any(struct k_args *A) GETARGS(R); R->Prio = _k_current_task->Prio; - R->Comm = WAITMREQ; + R->Comm = _K_SVC_SEM_GROUP_WAIT_REQUEST; R->Ctxt.args = A; R->Args.s1.sema = *L++; SENDARGS(R); @@ -311,7 +320,7 @@ void _k_sem_group_wait_any(struct k_args *A) if (A->Time.ticks == TICKS_UNLIMITED) { A->Time.timer = NULL; } else { - A->Comm = WAITMTMO; + A->Comm = _K_SVC_SEM_GROUP_WAIT_TIMEOUT; _k_timeout_alloc(A); } } @@ -338,7 +347,7 @@ void _k_sem_wait_request(struct k_args *A) if (A->Time.ticks == TICKS_UNLIMITED) { A->Time.timer = NULL; } else { - A->Comm = WAITSTMO; + A->Comm = _K_SVC_SEM_WAIT_REPLY_TIMEOUT; _k_timeout_alloc(A); } #endif @@ -352,7 +361,7 @@ int _task_sem_take(ksem_t sema, int32_t time) { struct k_args A; - A.Comm = WAITSREQ; + A.Comm = _K_SVC_SEM_WAIT_REQUEST; A.Time.ticks = time; A.Args.s1.sema = sema; KERNEL_ENTRY(&A); @@ -363,7 +372,7 @@ ksem_t _task_sem_group_take(ksemg_t group, int32_t time) { struct k_args A; - A.Comm = WAITMANY; + A.Comm = _K_SVC_SEM_GROUP_WAIT_ANY; A.Prio = _k_current_task->Prio; A.Time.ticks = time; A.Args.s1.list = group; @@ -391,7 +400,7 @@ void task_sem_give(ksem_t sema) { struct k_args A; - A.Comm = SIGNALS; + A.Comm = _K_SVC_SEM_SIGNAL; A.Args.s1.sema = sema; KERNEL_ENTRY(&A); } @@ -400,7 +409,7 @@ void task_sem_group_give(ksemg_t group) { struct k_args A; - A.Comm = SIGNALM; + A.Comm = _K_SVC_SEM_GROUP_SIGNAL; A.Args.s1.list = group; KERNEL_ENTRY(&A); } @@ -418,7 +427,7 @@ void isr_sem_give(ksem_t sema, struct cmd_pkt_set *pSet) */ pCommand = (struct k_args *)_cmd_pkt_get(pSet); - pCommand->Comm = SIGNALS; + pCommand->Comm = _K_SVC_SEM_SIGNAL; pCommand->Args.s1.sema = sema; nano_isr_stack_push(&_k_command_stack, (uint32_t)pCommand); @@ -444,7 +453,7 @@ void task_sem_reset(ksem_t sema) { struct k_args A; - A.Comm = RESETS; + A.Comm = _K_SVC_SEM_RESET; A.Args.s1.sema = sema; KERNEL_ENTRY(&A); } @@ -453,7 +462,7 @@ void task_sem_group_reset(ksemg_t group) { struct k_args A; - A.Comm = RESETM; + A.Comm = _K_SVC_SEM_GROUP_RESET; A.Args.s1.list = group; KERNEL_ENTRY(&A); } @@ -472,7 +481,7 @@ int task_sem_count_get(ksem_t sema) { struct k_args A; - A.Comm = INQSEMA; + A.Comm = _K_SVC_SEM_INQUIRY; A.Args.s1.sema = sema; KERNEL_ENTRY(&A); return A.Time.rcode; diff --git a/kernel/microkernel/k_server.c b/kernel/microkernel/k_server.c index 93086bcd9dd..5bd02189a61 100644 --- a/kernel/microkernel/k_server.c +++ b/kernel/microkernel/k_server.c @@ -132,7 +132,7 @@ FUNC_NORETURN void K_swapper(int parameter1, /* not used */ _k_task_monitor_args(pArgs); } #endif - _k_server_dispatch_table[pArgs->Comm](pArgs); + (*pArgs->Comm)(pArgs); } /* check if another fiber (of equal or greater priority) diff --git a/kernel/microkernel/k_task.c b/kernel/microkernel/k_task.c index a2cd18025bf..57ebfc81914 100644 --- a/kernel/microkernel/k_task.c +++ b/kernel/microkernel/k_task.c @@ -364,7 +364,7 @@ void _task_ioctl(ktask_t task, /* task on which to operate */ { struct k_args A; - A.Comm = TSKOP; + A.Comm = _K_SVC_TASK_OP; A.Args.g1.task = task; A.Args.g1.opt = opt; KERNEL_ENTRY(&A); @@ -435,7 +435,7 @@ void _task_group_ioctl(ktask_group_t group, /* task group */ { struct k_args A; - A.Comm = GRPOP; + A.Comm = _K_SVC_TASK_GROUP_OP; A.Args.g1.group = group; A.Args.g1.opt = opt; KERNEL_ENTRY(&A); @@ -530,7 +530,7 @@ void task_priority_set(ktask_t task, /* task whose priority is to be set */ { struct k_args A; - A.Comm = SPRIO; + A.Comm = _K_SVC_TASK_PRIORITY_SET; A.Args.g1.task = task; A.Args.g1.prio = prio; KERNEL_ENTRY(&A); @@ -573,7 +573,7 @@ void task_yield(void) { struct k_args A; - A.Comm = YIELD; + A.Comm = _K_SVC_TASK_YIELD; KERNEL_ENTRY(&A); } diff --git a/kernel/microkernel/k_ticker.c b/kernel/microkernel/k_ticker.c index 7d73b164271..284470430c3 100644 --- a/kernel/microkernel/k_ticker.c +++ b/kernel/microkernel/k_ticker.c @@ -336,7 +336,7 @@ int64_t task_tick_delta(int64_t *reftime /* pointer to reference time */ { struct k_args A; - A.Comm = ELAPSE; + A.Comm = _K_SVC_TIME_ELAPSE; A.Args.c1.time1 = *reftime; KERNEL_ENTRY(&A); *reftime = A.Args.c1.time1; diff --git a/kernel/microkernel/k_timer.c b/kernel/microkernel/k_timer.c index 8f2729ed07d..29715ec8be7 100644 --- a/kernel/microkernel/k_timer.c +++ b/kernel/microkernel/k_timer.c @@ -280,7 +280,7 @@ ktimer_t task_timer_alloc(void) { struct k_args A; - A.Comm = TALLOC; + A.Comm = _K_SVC_TIMER_ALLOC; KERNEL_ENTRY(&A); return _timer_ptr_to_id(A.Args.c1.timer); @@ -324,7 +324,7 @@ void task_timer_free(ktimer_t timer) { struct k_args A; - A.Comm = TDEALLOC; + A.Comm = _K_SVC_TIMER_DEALLOC; A.Args.c1.timer = _timer_id_to_ptr(timer); KERNEL_ENTRY(&A); } @@ -372,7 +372,7 @@ void _k_timer_start(struct k_args *P) /* Track the semaphore to signal for when the timer expires. */ if (P->Args.c1.sema != ENDLIST) { - T->Args->Comm = SIGNALS; + T->Args->Comm = _K_SVC_SEM_SIGNAL; T->Args->Args.s1.sema = P->Args.c1.sema; } _k_timer_enlist(T); @@ -408,7 +408,7 @@ void task_timer_start(ktimer_t timer, int32_t duration, int32_t period, { struct k_args A; - A.Comm = TSTART; + A.Comm = _K_SVC_TIMER_START; A.Args.c1.timer = _timer_id_to_ptr(timer); A.Args.c1.time1 = (int64_t)duration; A.Args.c1.time2 = period; @@ -433,7 +433,7 @@ void task_timer_restart(ktimer_t timer, int32_t duration, int32_t period) { struct k_args A; - A.Comm = TSTART; + A.Comm = _K_SVC_TIMER_START; A.Args.c1.timer = _timer_id_to_ptr(timer); A.Args.c1.time1 = (int64_t)duration; A.Args.c1.time2 = period; @@ -475,7 +475,7 @@ void task_timer_stop(ktimer_t timer) { struct k_args A; - A.Comm = TSTOP; + A.Comm = _K_SVC_TIMER_STOP; A.Args.c1.timer = _timer_id_to_ptr(timer); KERNEL_ENTRY(&A); } @@ -525,7 +525,7 @@ void _k_task_sleep(struct k_args *P) T->period = 0; T->Args = P; - P->Comm = WAKEUP; + P->Comm = _K_SVC_TASK_WAKEUP; P->Ctxt.proc = _k_current_task; P->Time.timer = T; @@ -550,7 +550,7 @@ void task_sleep(int32_t ticks) { struct k_args A; - A.Comm = SLEEP; + A.Comm = _K_SVC_TASK_SLEEP; A.Time.ticks = ticks; KERNEL_ENTRY(&A); } diff --git a/scripts/sysgen b/scripts/sysgen index 370a6b2c5ea..aebcc27757f 100755 --- a/scripts/sysgen +++ b/scripts/sysgen @@ -299,14 +299,14 @@ def kernel_main_c_kargs(): kernel_main_c_out("\n" + "struct k_args _k_server_command_packets[%s] =\n" % (num_kargs) + "{\n" + - " {NULL, NULL, 0, 0, (K_COMM) UNDEFINED},\n") + " {NULL, NULL, 0, 0, _K_SVC_UNDEFINED},\n") for i in range(1, num_kargs - 1): kernel_main_c_out( " {&_k_server_command_packets[%d], " % (i - 1) + - "NULL, 0, 0, (K_COMM) UNDEFINED},\n") + "NULL, 0, 0, _K_SVC_UNDEFINED},\n") kernel_main_c_out( " {&_k_server_command_packets[%d], " % (num_kargs - 2) + - "NULL, 0, 0, (K_COMM) UNDEFINED}\n" + + "NULL, 0, 0, _K_SVC_UNDEFINED}\n" + "};\n") # linked list of free command packets @@ -761,201 +761,6 @@ def kernel_main_c_pools(): kernel_main_c_out(pool_descriptors) -def kernel_main_c_kernel_services(): - """ Generate kernel services function table """ - - # initialize table with info for all possible kernel services - - func_table = [ -"/* 0 */ _k_nop,", # required -"/* 1 */ _k_movedata_request,", # required -"/* 2 */ (kernelfunc) NULL,", # unused -"/* 3 */ (kernelfunc) NULL,", # unused -"/* 4 */ _k_offload_to_fiber,", # required -"/* 5 */ _k_workload_get,", # required -"/* 6 */ _k_sem_signal,", # depends on semaphores -"/* 7 */ _k_sem_group_signal,", # depends on semaphores -"/* 8 */ _k_sem_reset,", # depends on semaphores -"/* 9 */ _k_sem_group_reset,", # depends on semaphores -"/* 10 */ _k_sem_wait_request,", # depends on semaphores -"/* 11 */ _k_sem_wait_reply,", # depends on semaphores -"/* 12 */ _k_sem_wait_reply,", # depends on semaphores and - # timers -"/* 13 */ _k_sem_group_wait_any,", # depends on semaphores -"/* 14 */ _k_sem_group_wait_request,", # depends on semaphores -"/* 15 */ _k_sem_group_ready,", # depends on semaphores -"/* 16 */ _k_sem_group_wait_cancel,", # depends on semaphores -"/* 17 */ _k_sem_group_wait_accept,", # depends on semaphores -"/* 18 */ _k_sem_group_wait,", # depends on semaphores -"/* 19 */ _k_sem_group_wait_timeout,", # depends on semaphores - # (but not timers) -"/* 20 */ _k_sem_inquiry,", # depends on semaphores -"/* 21 */ _k_mutex_lock_request,", # depends on mutexes -"/* 22 */ _k_mutex_lock_reply,", # depends on mutexes -"/* 23 */ _k_mutex_lock_reply,", # depends on mutexes and - # timers -"/* 24 */ _k_mutex_unlock,", # depends on mutexes -"/* 25 */ _k_fifo_enque_request,", # depends on FIFOs -"/* 26 */ _k_fifo_enque_reply,", # depends on FIFOs -"/* 27 */ _k_fifo_enque_reply,", # depends on FIFOs and timers -"/* 28 */ _k_fifo_deque_request,", # depends on FIFOs -"/* 29 */ _k_fifo_deque_reply,", # depends on FIFOs -"/* 30 */ _k_fifo_deque_reply,", # depends on FIFOs and timers -"/* 31 */ _k_fifo_ioctl,", # depends on FIFOs -"/* 32 */ _k_mbox_send_request,", # depends on mailboxes -"/* 33 */ _k_mbox_send_reply,", # depends on mailboxes and - # timers -"/* 34 */ _k_mbox_send_ack,", # depends on mailboxes -"/* 35 */ _k_mbox_send_data,", # depends on mailboxes -"/* 36 */ _k_mbox_receive_request,", # depends on mailboxes -"/* 37 */ _k_mbox_receive_reply,", # depends on mailboxes and - # timers -"/* 38 */ _k_mbox_receive_ack,", # depends on mailboxes -"/* 39 */ _k_mbox_receive_data,", # depends on mailboxes -"/* 40 */ _k_time_elapse,", # depends on timers -"/* 41 */ _k_task_sleep,", # depends on timers -"/* 42 */ _k_task_wakeup,", # depends on timers -"/* 43 */ _k_task_op,", # required -"/* 44 */ _k_task_group_op,", # required -"/* 45 */ _k_task_priority_set,", # required -"/* 46 */ _k_task_yield,", # required -"/* 47 */ _k_mem_map_alloc,", # depends on memory maps -"/* 48 */ _k_mem_map_dealloc,", # depends on memory maps -"/* 49 */ _k_timer_alloc,", # depends on timers -"/* 50 */ _k_timer_dealloc,", # depends on timers -"/* 51 */ _k_timer_start,", # depends on timers -"/* 52 */ _k_timer_stop,", # depends on timers -"/* 53 */ _k_mem_map_alloc_timeout,", # depends on memory maps and - # timers -"/* 54 */ (kernelfunc) NULL,", # unused -"/* 55 */ (kernelfunc) NULL,", # unused -"/* 56 */ (kernelfunc) NULL,", # unused -"/* 57 */ (kernelfunc) NULL,", # unused -"/* 58 */ _k_event_test,", # required -"/* 59 */ _k_event_handler_set,", # required -"/* 60 */ _k_event_signal,", # required -"/* 61 */ _k_mem_pool_block_get,", # depends on memory pools -"/* 62 */ _k_mem_pool_block_release,", # depends on memory pools -"/* 63 */ _k_block_waiters_get,", # depends on memory pools -"/* 64 */ _k_mem_pool_block_get_timeout_handle,", # depends on memory pools - # and timers -"/* 65 */ _k_defrag,", # depends on memory pools -"/* 66 */ (kernelfunc) NULL,", # unused -"/* 67 */ (kernelfunc) NULL,", # unused -"/* 68 */ (kernelfunc) NULL,", # unused -"/* 69 */ (kernelfunc) NULL,", # unused -"/* 70 */ (kernelfunc) NULL,", # unused -"/* 71 */ (kernelfunc) NULL,", # unused -"/* 72 */ _k_pipe_put_request,", # depends on pipes -"/* 73 */ _k_pipe_put_timeout,", # depends on pipes and timers -"/* 74 */ _k_pipe_put_reply,", # depends on pipes -"/* 75 */ _k_pipe_put_ack,", # depends on pipes -"/* 76 */ _k_pipe_get_request,", # depends on pipes -"/* 77 */ _k_pipe_get_timeout,", # depends on pipes and timers -"/* 78 */ _k_pipe_get_reply,", # depends on pipes -"/* 79 */ _k_pipe_get_ack,", # depends on pipes -"/* 80 */ _k_pipe_movedata_ack,", # depends on pipes -"/* 81 */ _k_event_test_timeout" # depends on timers - ] - - # eliminate table entries for kernel services that project doesn't utilize - # (note: some entries can be eliminated for more than one reason) - - if (len(sema_list) == 0): - func_table[6] = "/* 6 */ (kernelfunc) NULL," - func_table[7] = "/* 7 */ (kernelfunc) NULL," - func_table[8] = "/* 8 */ (kernelfunc) NULL," - func_table[9] = "/* 9 */ (kernelfunc) NULL," - func_table[10] = "/* 10 */ (kernelfunc) NULL," - func_table[11] = "/* 11 */ (kernelfunc) NULL," - func_table[12] = "/* 12 */ (kernelfunc) NULL," - func_table[13] = "/* 13 */ (kernelfunc) NULL," - func_table[14] = "/* 14 */ (kernelfunc) NULL," - func_table[15] = "/* 15 */ (kernelfunc) NULL," - func_table[16] = "/* 16 */ (kernelfunc) NULL," - func_table[17] = "/* 17 */ (kernelfunc) NULL," - func_table[18] = "/* 18 */ (kernelfunc) NULL," - func_table[19] = "/* 19 */ (kernelfunc) NULL," - func_table[20] = "/* 20 */ (kernelfunc) NULL," - - if (len(mutex_list) == 0): - func_table[21] = "/* 21 */ (kernelfunc) NULL," - func_table[22] = "/* 22 */ (kernelfunc) NULL," - func_table[23] = "/* 23 */ (kernelfunc) NULL," - func_table[24] = "/* 24 */ (kernelfunc) NULL," - - if (len(fifo_list) == 0): - func_table[25] = "/* 25 */ (kernelfunc) NULL," - func_table[26] = "/* 26 */ (kernelfunc) NULL," - func_table[27] = "/* 27 */ (kernelfunc) NULL," - func_table[28] = "/* 28 */ (kernelfunc) NULL," - func_table[29] = "/* 29 */ (kernelfunc) NULL," - func_table[30] = "/* 30 */ (kernelfunc) NULL," - func_table[31] = "/* 31 */ (kernelfunc) NULL," - - if (len(mbx_list) == 0): - func_table[32] = "/* 32 */ (kernelfunc) NULL," - func_table[33] = "/* 33 */ (kernelfunc) NULL," - func_table[34] = "/* 34 */ (kernelfunc) NULL," - func_table[35] = "/* 35 */ (kernelfunc) NULL," - func_table[36] = "/* 36 */ (kernelfunc) NULL," - func_table[37] = "/* 37 */ (kernelfunc) NULL," - func_table[38] = "/* 38 */ (kernelfunc) NULL," - func_table[39] = "/* 39 */ (kernelfunc) NULL," - - if (len(map_list) == 0): - func_table[47] = "/* 47 */ (kernelfunc) NULL," - func_table[48] = "/* 48 */ (kernelfunc) NULL," - func_table[53] = "/* 53 */ (kernelfunc) NULL," - - if (len(pool_list) == 0): - func_table[61] = "/* 61 */ (kernelfunc) NULL," - func_table[62] = "/* 62 */ (kernelfunc) NULL," - func_table[63] = "/* 63 */ (kernelfunc) NULL," - func_table[64] = "/* 64 */ (kernelfunc) NULL," - func_table[65] = "/* 65 */ (kernelfunc) NULL," - - if (len(pipe_list) == 0): - func_table[72] = "/* 72 */ (kernelfunc) NULL," - func_table[73] = "/* 73 */ (kernelfunc) NULL," - func_table[74] = "/* 74 */ (kernelfunc) NULL," - func_table[75] = "/* 75 */ (kernelfunc) NULL," - func_table[76] = "/* 76 */ (kernelfunc) NULL," - func_table[77] = "/* 77 */ (kernelfunc) NULL," - func_table[78] = "/* 78 */ (kernelfunc) NULL," - func_table[79] = "/* 79 */ (kernelfunc) NULL," - func_table[80] = "/* 80 */ (kernelfunc) NULL," - - if (num_timers == 0): - func_table[12] = "/* 12 */ (kernelfunc) NULL," - func_table[23] = "/* 23 */ (kernelfunc) NULL," - func_table[27] = "/* 27 */ (kernelfunc) NULL," - func_table[30] = "/* 30 */ (kernelfunc) NULL," - func_table[33] = "/* 33 */ (kernelfunc) NULL," - func_table[37] = "/* 37 */ (kernelfunc) NULL," - func_table[40] = "/* 40 */ (kernelfunc) NULL," - func_table[41] = "/* 41 */ (kernelfunc) NULL," - func_table[42] = "/* 42 */ (kernelfunc) NULL," - func_table[49] = "/* 49 */ (kernelfunc) NULL," - func_table[50] = "/* 50 */ (kernelfunc) NULL," - func_table[51] = "/* 51 */ (kernelfunc) NULL," - func_table[52] = "/* 52 */ (kernelfunc) NULL," - func_table[53] = "/* 53 */ (kernelfunc) NULL," - func_table[64] = "/* 64 */ (kernelfunc) NULL," - func_table[73] = "/* 73 */ (kernelfunc) NULL," - func_table[77] = "/* 77 */ (kernelfunc) NULL," - func_table[81] = "/* 81 */ (kernelfunc) NULL," - - # generate function table - - kernel_main_c_out("\n" + - "const kernelfunc _k_server_dispatch_table[82] =\n" + - "{\n") - for func in func_table: - kernel_main_c_out(" " + func + "\n") - kernel_main_c_out("};\n") - - def kernel_main_c_node_init(): """ Generate node initialization routine """ @@ -1000,7 +805,6 @@ def kernel_main_c_generate(): kernel_main_c_mailboxes() kernel_main_c_maps() kernel_main_c_pools() - kernel_main_c_kernel_services() kernel_main_c_node_init() kernel_main_c_main()