From 13d583dc06d7a8d778e2401ec3b6ac240b64c5c9 Mon Sep 17 00:00:00 2001 From: Yonattan Louise Date: Mon, 24 Aug 2015 10:48:18 -0500 Subject: [PATCH] Rename microkernel struct field 'Args' to 'args'. Rename field 'Args' of the struct: - 'k_proc' in the file include/microkernel/base_api.h - 'k_args' in the file kernel/microkernel/include/micro_private_types.h Change-Id: I5847a2f1e9c7dd34dea37857b4fadeb37ced489b Signed-off-by: Yonattan Louise --- arch/arm/core/task_abort.c | 6 +- include/microkernel/base_api.h | 2 +- .../microkernel/include/micro_private_types.h | 6 +- kernel/microkernel/k_command_packet.c | 2 +- kernel/microkernel/k_event.c | 20 +- kernel/microkernel/k_fifo.c | 28 +-- kernel/microkernel/k_idle.c | 6 +- kernel/microkernel/k_mailbox.c | 194 +++++++++--------- kernel/microkernel/k_memory_map.c | 24 +-- kernel/microkernel/k_memory_pool.c | 44 ++-- kernel/microkernel/k_move_data.c | 2 +- kernel/microkernel/k_mutex.c | 34 +-- kernel/microkernel/k_offload.c | 8 +- kernel/microkernel/k_pipe.c | 36 ++-- kernel/microkernel/k_pipe_get.c | 64 +++--- kernel/microkernel/k_pipe_put.c | 90 ++++---- kernel/microkernel/k_pipe_util.c | 8 +- kernel/microkernel/k_pipe_xfer.c | 158 +++++++------- kernel/microkernel/k_semaphore.c | 66 +++--- kernel/microkernel/k_task.c | 24 +-- kernel/microkernel/k_task_monitor.c | 8 +- kernel/microkernel/k_ticker.c | 10 +- kernel/microkernel/k_timer.c | 42 ++-- 23 files changed, 441 insertions(+), 441 deletions(-) diff --git a/arch/arm/core/task_abort.c b/arch/arm/core/task_abort.c index 6ba8c8e7e89..54f6aca5ae0 100644 --- a/arch/arm/core/task_abort.c +++ b/arch/arm/core/task_abort.c @@ -75,10 +75,10 @@ void _TaskAbort(void) _task_ioctl(_k_current_task->id, taskAbortCode); } else { cmd_packet.Comm = _K_SVC_TASK_OP; - cmd_packet.Args.g1.task = _k_current_task->id; - cmd_packet.Args.g1.opt = taskAbortCode; + cmd_packet.args.g1.task = _k_current_task->id; + cmd_packet.args.g1.opt = taskAbortCode; cmd_packet.alloc = false; - _k_current_task->Args = &cmd_packet; + _k_current_task->args = &cmd_packet; nano_isr_stack_push(&_k_command_stack, (uint32_t) &cmd_packet); _ScbPendsvSet(); } diff --git a/include/microkernel/base_api.h b/include/microkernel/base_api.h index 2dcae94a5ba..fb07cf06aa9 100644 --- a/include/microkernel/base_api.h +++ b/include/microkernel/base_api.h @@ -116,7 +116,7 @@ struct k_task { char *workspace; int worksize; void (*fn_abort)(void); - struct k_args *Args; + struct k_args *args; }; struct _k_mbox_struct { diff --git a/kernel/microkernel/include/micro_private_types.h b/kernel/microkernel/include/micro_private_types.h index 44fb63f05ef..5cece9622f4 100644 --- a/kernel/microkernel/include/micro_private_types.h +++ b/kernel/microkernel/include/micro_private_types.h @@ -50,7 +50,7 @@ struct k_timer { struct k_timer *prev; int32_t duration; int32_t period; - struct k_args *Args; + struct k_args *args; }; /* Kernel server command codes */ @@ -370,7 +370,7 @@ union k_args_args { * The size of the k_args structure must be equivalent to ... * CMD_PKT_SIZE_IN_WORDS * sizeof(uint32_t) * To this end the entire structure is packed. This ensures that the compiler - * aligns 'Args' to a 4-byte boundary. If left unpacked, then some compilers + * aligns 'args' to a 4-byte boundary. If left unpacked, then some compilers * may provide an extra 4 bytes of padding to align it to an 8-byte boundary, * thereby violating the previously stated equivalence. */ @@ -393,7 +393,7 @@ struct k_args { struct k_timer *timer; int rcode; } Time; - K_ARGS_ARGS Args; + K_ARGS_ARGS args; } __packed; /* ---------------------------------------------------------------------- */ diff --git a/kernel/microkernel/k_command_packet.c b/kernel/microkernel/k_command_packet.c index 294ae8b48da..1910f3f6f7d 100644 --- a/kernel/microkernel/k_command_packet.c +++ b/kernel/microkernel/k_command_packet.c @@ -103,6 +103,6 @@ cmdPkt_t *_cmd_pkt_get( void _k_task_call(struct k_args *cmd_packet) { cmd_packet->alloc = false; - _k_current_task->Args = cmd_packet; + _k_current_task->args = cmd_packet; nano_task_stack_push(&_k_command_stack, (uint32_t)cmd_packet); } diff --git a/kernel/microkernel/k_event.c b/kernel/microkernel/k_event.c index fd30dbb850c..45e2c080ce9 100644 --- a/kernel/microkernel/k_event.c +++ b/kernel/microkernel/k_event.c @@ -48,11 +48,11 @@ extern struct evstr _k_event_list[]; */ void _k_event_handler_set(struct k_args *A) { - kevent_t event = A->Args.e1.event; + kevent_t event = A->args.e1.event; struct evstr *E = _k_event_list + event; if (E->func != NULL) { - if (likely(A->Args.e1.func == NULL)) { + if (likely(A->args.e1.func == NULL)) { /* uninstall handler */ E->func = NULL; A->Time.rcode = RC_OK; @@ -62,7 +62,7 @@ void _k_event_handler_set(struct k_args *A) } } else { /* install handler */ - E->func = A->Args.e1.func; + E->func = A->args.e1.func; E->status = 0; A->Time.rcode = RC_OK; } @@ -73,8 +73,8 @@ int task_event_handler_set(kevent_t event, kevent_handler_t handler) struct k_args A; A.Comm = _K_SVC_EVENT_HANDLER_SET; - A.Args.e1.event = event; - A.Args.e1.func = handler; + A.args.e1.event = event; + A.args.e1.func = handler; KERNEL_ENTRY(&A); return A.Time.rcode; } @@ -87,7 +87,7 @@ int task_event_handler_set(kevent_t event, kevent_handler_t handler) */ void _k_event_test_timeout(struct k_args *A) { - kevent_t event = A->Args.e1.event; + kevent_t event = A->args.e1.event; struct evstr *E = _k_event_list + event; FREETIMER(A->Time.timer); @@ -104,7 +104,7 @@ void _k_event_test_timeout(struct k_args *A) */ void _k_event_test(struct k_args *A) { - kevent_t event = A->Args.e1.event; + kevent_t event = A->args.e1.event; struct evstr *E = _k_event_list + event; if (E->status) { /* the next event can be received */ @@ -142,7 +142,7 @@ int _task_event_recv(kevent_t event, int32_t time) struct k_args A; A.Comm = _K_SVC_EVENT_TEST; - A.Args.e1.event = event; + A.args.e1.event = event; A.Time.ticks = time; KERNEL_ENTRY(&A); return A.Time.rcode; @@ -199,7 +199,7 @@ void _k_do_event_signal(kevent_t event) */ void _k_event_signal(struct k_args *A) { - kevent_t event = A->Args.e1.event; + kevent_t event = A->args.e1.event; _k_do_event_signal(event); A->Time.rcode = RC_OK; } @@ -209,7 +209,7 @@ int task_event_send(kevent_t event) struct k_args A; A.Comm = _K_SVC_EVENT_SIGNAL; - A.Args.e1.event = event; + A.args.e1.event = event; KERNEL_ENTRY(&A); return A.Time.rcode; } diff --git a/kernel/microkernel/k_fifo.c b/kernel/microkernel/k_fifo.c index 5abfe1edd77..b9c921ae7fe 100644 --- a/kernel/microkernel/k_fifo.c +++ b/kernel/microkernel/k_fifo.c @@ -96,16 +96,16 @@ void _k_fifo_enque_request(struct k_args *A) int Qid, n, w; char *p, *q; /* Ski char->uint32_t ??? */ - Qid = A->Args.q1.queue; + Qid = A->args.q1.queue; Q = (struct _k_fifo_struct *)Qid; w = OCTET_TO_SIZEOFUNIT(Q->Esize); - q = A->Args.q1.data; + q = A->args.q1.data; n = Q->Nused; if (n < Q->Nelms) { W = Q->Waiters; if (W) { Q->Waiters = W->next; - p = W->Args.q1.data; + p = W->args.q1.data; memcpy(p, q, w); #ifdef CONFIG_SYS_CLOCK_EXISTS @@ -168,8 +168,8 @@ int _task_fifo_put(kfifo_t queue, /* FIFO queue */ A.Comm = _K_SVC_FIFO_ENQUE_REQUEST; A.Time.ticks = time; - A.Args.q1.data = (char *)data; - A.Args.q1.queue = queue; + A.args.q1.data = (char *)data; + A.args.q1.queue = queue; KERNEL_ENTRY(&A); @@ -228,10 +228,10 @@ void _k_fifo_deque_request(struct k_args *A) int Qid, n, w; char *p, *q; /* idem */ - Qid = A->Args.q1.queue; + Qid = A->args.q1.queue; Q = (struct _k_fifo_struct *)Qid; w = OCTET_TO_SIZEOFUNIT(Q->Esize); - p = A->Args.q1.data; + p = A->args.q1.data; n = Q->Nused; if (n) { q = Q->Deqp; @@ -247,7 +247,7 @@ void _k_fifo_deque_request(struct k_args *A) if (W) { Q->Waiters = W->next; p = Q->Enqp; - q = W->Args.q1.data; + q = W->args.q1.data; w = OCTET_TO_SIZEOFUNIT(Q->Esize); memcpy(p, q, w); p = (char *)((int)p + w); @@ -313,8 +313,8 @@ int _task_fifo_get(kfifo_t queue, void *data, int32_t time) A.Comm = _K_SVC_FIFO_DEQUE_REQUEST; A.Time.ticks = time; - A.Args.q1.data = (char *)data; - A.Args.q1.queue = queue; + A.args.q1.data = (char *)data; + A.args.q1.queue = queue; KERNEL_ENTRY(&A); @@ -333,9 +333,9 @@ void _k_fifo_ioctl(struct k_args *A) struct _k_fifo_struct *Q; int Qid; - Qid = A->Args.q1.queue; + Qid = A->args.q1.queue; Q = (struct _k_fifo_struct *)Qid; - if (A->Args.q1.size) { + if (A->args.q1.size) { if (Q->Nused) { struct k_args *X; @@ -378,8 +378,8 @@ int _task_fifo_ioctl(kfifo_t queue, int op) struct k_args A; A.Comm = _K_SVC_FIFO_IOCTL; - A.Args.q1.queue = queue; - A.Args.q1.size = op; + A.args.q1.queue = queue; + A.args.q1.size = op; KERNEL_ENTRY(&A); return A.Time.rcode; } diff --git a/kernel/microkernel/k_idle.c b/kernel/microkernel/k_idle.c index aa66ca4ad1f..fb00ae62103 100644 --- a/kernel/microkernel/k_idle.c +++ b/kernel/microkernel/k_idle.c @@ -217,12 +217,12 @@ void _k_workload_get(struct k_args *P) iret = 0; } - P->Args.u1.rval = iret; + P->args.u1.rval = iret; } #else void _k_workload_get(struct k_args *P) { - P->Args.u1.rval = 0; + P->args.u1.rval = 0; } #endif /* CONFIG_WORKLOAD_MONITOR */ @@ -250,7 +250,7 @@ int task_workload_get(void) A.Comm = _K_SVC_WORKLOAD_GET; KERNEL_ENTRY(&A); - return A.Args.u1.rval; + return A.args.u1.rval; } /** diff --git a/kernel/microkernel/k_mailbox.c b/kernel/microkernel/k_mailbox.c index f5245c650a0..73f08b98d7b 100644 --- a/kernel/microkernel/k_mailbox.c +++ b/kernel/microkernel/k_mailbox.c @@ -76,35 +76,35 @@ static void copy_packet(struct k_args **out, struct k_args *in) */ static int match(struct k_args *Reader, struct k_args *Writer) { - if ((Reader->Args.m1.mess.tx_task == ANYTASK || - Reader->Args.m1.mess.tx_task == Writer->Args.m1.mess.tx_task) && - (Writer->Args.m1.mess.rx_task == ANYTASK || - Writer->Args.m1.mess.rx_task == Reader->Args.m1.mess.rx_task)) { - if (!ISASYNCMSG(&(Writer->Args.m1.mess))) { + if ((Reader->args.m1.mess.tx_task == ANYTASK || + Reader->args.m1.mess.tx_task == Writer->args.m1.mess.tx_task) && + (Writer->args.m1.mess.rx_task == ANYTASK || + Writer->args.m1.mess.rx_task == Reader->args.m1.mess.rx_task)) { + if (!ISASYNCMSG(&(Writer->args.m1.mess))) { int32_t info; - Reader->Args.m1.mess.tx_task = - Writer->Args.m1.mess.tx_task; + Reader->args.m1.mess.tx_task = + Writer->args.m1.mess.tx_task; - Writer->Args.m1.mess.rx_task = - Reader->Args.m1.mess.rx_task; + Writer->args.m1.mess.rx_task = + Reader->args.m1.mess.rx_task; - info = Reader->Args.m1.mess.info; - Reader->Args.m1.mess.info = Writer->Args.m1.mess.info; - Writer->Args.m1.mess.info = info; + info = Reader->args.m1.mess.info; + Reader->args.m1.mess.info = Writer->args.m1.mess.info; + Writer->args.m1.mess.info = info; } else { - Reader->Args.m1.mess.tx_task = - Writer->Args.m1.mess.tx_task; - Reader->Args.m1.mess.tx_data = NULL; - Reader->Args.m1.mess.tx_block = - Writer->Args.m1.mess.tx_block; - Reader->Args.m1.mess.info = Writer->Args.m1.mess.info; + Reader->args.m1.mess.tx_task = + Writer->args.m1.mess.tx_task; + Reader->args.m1.mess.tx_data = NULL; + Reader->args.m1.mess.tx_block = + Writer->args.m1.mess.tx_block; + Reader->args.m1.mess.info = Writer->args.m1.mess.info; } - if (Reader->Args.m1.mess.size > Writer->Args.m1.mess.size) { - Reader->Args.m1.mess.size = Writer->Args.m1.mess.size; + if (Reader->args.m1.mess.size > Writer->args.m1.mess.size) { + Reader->args.m1.mess.size = Writer->args.m1.mess.size; } else { - Writer->Args.m1.mess.size = Reader->Args.m1.mess.size; + Writer->args.m1.mess.size = Reader->args.m1.mess.size; } /* @@ -112,12 +112,12 @@ static int match(struct k_args *Reader, struct k_args *Writer) * the -1 will not be returned when there is a match. */ - __ASSERT_NO_MSG(Writer->Args.m1.mess.size == - Reader->Args.m1.mess.size); + __ASSERT_NO_MSG(Writer->args.m1.mess.size == + Reader->args.m1.mess.size); - __ASSERT_NO_MSG((uint32_t)(-1) != Reader->Args.m1.mess.size); + __ASSERT_NO_MSG((uint32_t)(-1) != Reader->args.m1.mess.size); - return Reader->Args.m1.mess.size; + return Reader->args.m1.mess.size; } return -1; /* There was no match */ @@ -159,44 +159,44 @@ static bool prepare_transfer(struct k_args *move, */ move->priority = max(writer->priority, reader->priority); move->Ctxt.task = NULL; - move->Args.MovedReq.Action = + move->args.MovedReq.Action = (MovedAction)(MVDACT_SNDACK | MVDACT_RCVACK); - move->Args.MovedReq.iTotalSize = writer->Args.m1.mess.size; - move->Args.MovedReq.Extra.Setup.ContSnd = NULL; - move->Args.MovedReq.Extra.Setup.ContRcv = NULL; + move->args.MovedReq.iTotalSize = writer->args.m1.mess.size; + move->args.MovedReq.Extra.Setup.ContSnd = NULL; + move->args.MovedReq.Extra.Setup.ContRcv = NULL; /* reader: */ - if (reader->Args.m1.mess.rx_data == NULL) { + if (reader->args.m1.mess.rx_data == NULL) { all_data_present = false; - __ASSERT_NO_MSG(0 == reader->Args.m1.mess.extra + __ASSERT_NO_MSG(0 == reader->args.m1.mess.extra .transfer); /* == extra.sema */ - reader->Args.m1.mess.extra.transfer = move; + reader->args.m1.mess.extra.transfer = move; /*SENDARGS(reader); */ } else { - move->Args.MovedReq.destination = - reader->Args.m1.mess.rx_data; - writer->Args.m1.mess.rx_data = - reader->Args.m1.mess.rx_data; + move->args.MovedReq.destination = + reader->args.m1.mess.rx_data; + writer->args.m1.mess.rx_data = + reader->args.m1.mess.rx_data; /* chain the reader */ - move->Args.MovedReq.Extra.Setup.ContRcv = reader; + move->args.MovedReq.Extra.Setup.ContRcv = reader; } /* writer: */ - if (ISASYNCMSG(&(writer->Args.m1.mess))) { - move->Args.MovedReq.source = - writer->Args.m1.mess.tx_block.pointer_to_data; - reader->Args.m1.mess.tx_block = - writer->Args.m1.mess.tx_block; + if (ISASYNCMSG(&(writer->args.m1.mess))) { + move->args.MovedReq.source = + writer->args.m1.mess.tx_block.pointer_to_data; + reader->args.m1.mess.tx_block = + writer->args.m1.mess.tx_block; } else { - __ASSERT_NO_MSG(NULL != writer->Args.m1.mess.tx_data); - move->Args.MovedReq.source = - writer->Args.m1.mess.tx_data; - reader->Args.m1.mess.tx_data = - writer->Args.m1.mess.tx_data; + __ASSERT_NO_MSG(NULL != writer->args.m1.mess.tx_data); + move->args.MovedReq.source = + writer->args.m1.mess.tx_data; + reader->args.m1.mess.tx_data = + writer->args.m1.mess.tx_data; } /* chain the writer */ - move->Args.MovedReq.Extra.Setup.ContSnd = writer; + move->args.MovedReq.Extra.Setup.ContSnd = writer; return all_data_present; } else { @@ -212,8 +212,8 @@ static bool prepare_transfer(struct k_args *move, */ static void transfer(struct k_args *pMvdReq) { - __ASSERT_NO_MSG(NULL != pMvdReq->Args.MovedReq.source); - __ASSERT_NO_MSG(NULL != pMvdReq->Args.MovedReq.destination); + __ASSERT_NO_MSG(NULL != pMvdReq->args.MovedReq.source); + __ASSERT_NO_MSG(NULL != pMvdReq->args.MovedReq.destination); _k_movedata_request(pMvdReq); FREEARGS(pMvdReq); @@ -226,8 +226,8 @@ static void transfer(struct k_args *pMvdReq) */ void _k_mbox_send_ack(struct k_args *pCopyWriter) { - if (ISASYNCMSG(&(pCopyWriter->Args.m1.mess))) { - if (pCopyWriter->Args.m1.mess.extra.sema) { + if (ISASYNCMSG(&(pCopyWriter->args.m1.mess))) { + if (pCopyWriter->args.m1.mess.extra.sema) { /* * Signal the semaphore. Alternatively, this could * be done using the continuation mechanism. @@ -238,7 +238,7 @@ void _k_mbox_send_ack(struct k_args *pCopyWriter) memset(&A, 0xfd, sizeof(struct k_args)); #endif A.Comm = _K_SVC_SEM_SIGNAL; - A.Args.s1.sema = pCopyWriter->Args.m1.mess.extra.sema; + A.args.s1.sema = pCopyWriter->args.m1.mess.extra.sema; _k_sem_signal(&A); } @@ -248,22 +248,22 @@ void _k_mbox_send_ack(struct k_args *pCopyWriter) */ if ((uint32_t)(-1) != - pCopyWriter->Args.m1.mess.tx_block.pool_id) { + pCopyWriter->args.m1.mess.tx_block.pool_id) { /* * special value to tell if block should be * freed or not */ pCopyWriter->Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE; - pCopyWriter->Args.p1.pool_id = - pCopyWriter->Args.m1.mess.tx_block.pool_id; - pCopyWriter->Args.p1.rep_poolptr = - pCopyWriter->Args.m1.mess.tx_block + pCopyWriter->args.p1.pool_id = + pCopyWriter->args.m1.mess.tx_block.pool_id; + pCopyWriter->args.p1.rep_poolptr = + pCopyWriter->args.m1.mess.tx_block .address_in_pool; - pCopyWriter->Args.p1.rep_dataptr = - pCopyWriter->Args.m1.mess.tx_block + pCopyWriter->args.p1.rep_dataptr = + pCopyWriter->args.m1.mess.tx_block .pointer_to_data; - pCopyWriter->Args.p1.req_size = - pCopyWriter->Args.m1.mess.tx_block.req_size; + pCopyWriter->args.p1.req_size = + pCopyWriter->args.m1.mess.tx_block.req_size; SENDARGS(pCopyWriter); return; } else { @@ -282,7 +282,7 @@ void _k_mbox_send_ack(struct k_args *pCopyWriter) Starter = pCopyWriter->Ctxt.args; Starter->Time.rcode = pCopyWriter->Time.rcode; - Starter->Args.m1.mess = pCopyWriter->Args.m1.mess; + Starter->args.m1.mess = pCopyWriter->args.m1.mess; _k_state_bit_reset(Starter->Ctxt.task, TF_SEND | TF_SENDDATA); FREEARGS(pCopyWriter); @@ -312,14 +312,14 @@ void _k_mbox_send_reply(struct k_args *pCopyWriter) */ void _k_mbox_send_request(struct k_args *Writer) { - kmbox_t MailBoxId = Writer->Args.m1.mess.mailbox; + kmbox_t MailBoxId = Writer->args.m1.mess.mailbox; struct _k_mbox_struct *MailBox; struct k_args *CopyReader; struct k_args *CopyWriter; struct k_args *temp; bool bAsync; - bAsync = ISASYNCMSG(&Writer->Args.m1.mess); + bAsync = ISASYNCMSG(&Writer->args.m1.mess); struct k_task *sender = NULL; @@ -498,11 +498,11 @@ int _task_mbox_put(kmbox_t mbox, A.priority = prio; A.Comm = _K_SVC_MBOX_SEND_REQUEST; A.Time.ticks = time; - A.Args.m1.mess = *M; + A.args.m1.mess = *M; KERNEL_ENTRY(&A); - *M = A.Args.m1.mess; + *M = A.args.m1.mess; return A.Time.rcode; } @@ -527,7 +527,7 @@ void _k_mbox_receive_ack(struct k_args *pCopyReader) Starter->Time.rcode = pCopyReader->Time.rcode; /* And copy the message information from the received packet. */ - Starter->Args.m1.mess = pCopyReader->Args.m1.mess; + Starter->args.m1.mess = pCopyReader->args.m1.mess; /* Reschedule the sender task */ _k_state_bit_reset(Starter->Ctxt.task, TF_RECV | TF_RECVDATA); @@ -558,7 +558,7 @@ void _k_mbox_receive_reply(struct k_args *pCopyReader) */ void _k_mbox_receive_request(struct k_args *Reader) { - kmbox_t MailBoxId = Reader->Args.m1.mess.mailbox; + kmbox_t MailBoxId = Reader->args.m1.mess.mailbox; struct _k_mbox_struct *MailBox; struct k_args *CopyWriter; struct k_args *temp; @@ -695,10 +695,10 @@ int _task_mbox_get(kmbox_t mbox, A.priority = _k_current_task->priority; A.Comm = _K_SVC_MBOX_RECEIVE_REQUEST; A.Time.ticks = time; - A.Args.m1.mess = *M; + A.args.m1.mess = *M; KERNEL_ENTRY(&A); - *M = A.Args.m1.mess; + *M = A.args.m1.mess; return A.Time.rcode; } @@ -730,7 +730,7 @@ void _task_mbox_block_put(kmbox_t mbox, #endif A.priority = prio; A.Comm = _K_SVC_MBOX_SEND_REQUEST; - A.Args.m1.mess = *M; + A.args.m1.mess = *M; KERNEL_ENTRY(&A); } @@ -753,26 +753,26 @@ void _k_mbox_receive_data(struct k_args *Starter) memcpy(CopyStarter, Starter, sizeof(struct k_args)); CopyStarter->Ctxt.args = Starter; - MoveD = CopyStarter->Args.m1.mess.extra.transfer; + MoveD = CopyStarter->args.m1.mess.extra.transfer; CopyStarter->Comm = _K_SVC_MBOX_RECEIVE_ACK; CopyStarter->Time.rcode = RC_OK; - MoveD->Args.MovedReq.Extra.Setup.ContRcv = CopyStarter; + MoveD->args.MovedReq.Extra.Setup.ContRcv = CopyStarter; CopyStarter->next = NULL; - MoveD->Args.MovedReq.destination = CopyStarter->Args.m1.mess.rx_data; + MoveD->args.MovedReq.destination = CopyStarter->args.m1.mess.rx_data; - MoveD->Args.MovedReq.iTotalSize = CopyStarter->Args.m1.mess.size; + MoveD->args.MovedReq.iTotalSize = CopyStarter->args.m1.mess.size; - Writer = MoveD->Args.MovedReq.Extra.Setup.ContSnd; + Writer = MoveD->args.MovedReq.Extra.Setup.ContSnd; if (Writer != NULL) { - if (ISASYNCMSG(&(Writer->Args.m1.mess))) { - CopyStarter->Args.m1.mess.tx_block = - Writer->Args.m1.mess.tx_block; + if (ISASYNCMSG(&(Writer->args.m1.mess))) { + CopyStarter->args.m1.mess.tx_block = + Writer->args.m1.mess.tx_block; } else { - Writer->Args.m1.mess.rx_data = - CopyStarter->Args.m1.mess.rx_data; - CopyStarter->Args.m1.mess.tx_data = - Writer->Args.m1.mess.tx_data; + Writer->args.m1.mess.rx_data = + CopyStarter->args.m1.mess.rx_data; + CopyStarter->args.m1.mess.tx_data = + Writer->args.m1.mess.tx_data; } transfer(MoveD); /* and MoveD will be cleared as well */ } @@ -792,7 +792,7 @@ void _task_mbox_data_get(struct k_msg *M) return; } - A.Args.m1.mess = *M; + A.args.m1.mess = *M; A.Comm = _K_SVC_MBOX_RECEIVE_DATA; KERNEL_ENTRY(&A); @@ -840,11 +840,11 @@ int _task_mbox_data_block_get(struct k_msg *message, * SEND_ACK is processed, change its [pool_id] to -1. */ - Writer = MoveD->Args.MovedReq.Extra.Setup.ContSnd; + Writer = MoveD->args.MovedReq.Extra.Setup.ContSnd; __ASSERT_NO_MSG(NULL != Writer); __ASSERT_NO_MSG(NULL == Writer->next); - Writer->Args.m1.mess.tx_block.pool_id = (uint32_t)(-1); + Writer->args.m1.mess.tx_block.pool_id = (uint32_t)(-1); nano_task_stack_push(&_k_command_stack, (uint32_t)Writer); #ifdef ACTIV_ASSERTS @@ -855,7 +855,7 @@ int _task_mbox_data_block_get(struct k_msg *message, * for continuation on receive. */ - Dummy = MoveD->Args.MovedReq.Extra.Setup.ContRcv; + Dummy = MoveD->args.MovedReq.Extra.Setup.ContRcv; __ASSERT_NO_MSG(NULL == Dummy); #endif @@ -883,7 +883,7 @@ int _task_mbox_data_block_get(struct k_msg *message, */ struct k_args A; - A.Args.m1.mess = *message; + A.args.m1.mess = *message; A.Comm = _K_SVC_MBOX_RECEIVE_DATA; KERNEL_ENTRY(&A); @@ -908,21 +908,21 @@ void _k_mbox_send_data(struct k_args *Starter) memcpy(CopyStarter, Starter, sizeof(struct k_args)); CopyStarter->Ctxt.args = Starter; - MoveD = CopyStarter->Args.m1.mess.extra.transfer; + MoveD = CopyStarter->args.m1.mess.extra.transfer; CopyStarter->Time.rcode = RC_OK; CopyStarter->Comm = _K_SVC_MBOX_SEND_ACK; - MoveD->Args.MovedReq.Extra.Setup.ContSnd = CopyStarter; + MoveD->args.MovedReq.Extra.Setup.ContSnd = CopyStarter; CopyStarter->next = NULL; - MoveD->Args.MovedReq.source = CopyStarter->Args.m1.mess.rx_data; + MoveD->args.MovedReq.source = CopyStarter->args.m1.mess.rx_data; - Reader = MoveD->Args.MovedReq.Extra.Setup.ContRcv; + Reader = MoveD->args.MovedReq.Extra.Setup.ContRcv; if (Reader != NULL) { - Reader->Args.m1.mess.rx_data = - CopyStarter->Args.m1.mess.rx_data; - CopyStarter->Args.m1.mess.tx_data = - Reader->Args.m1.mess.tx_data; + Reader->args.m1.mess.rx_data = + CopyStarter->args.m1.mess.rx_data; + CopyStarter->args.m1.mess.tx_data = + Reader->args.m1.mess.tx_data; transfer(MoveD); /* and MoveD will be cleared as well */ } diff --git a/kernel/microkernel/k_memory_map.c b/kernel/microkernel/k_memory_map.c index bc6e8c0a452..1f4b9cbe4ca 100644 --- a/kernel/microkernel/k_memory_map.c +++ b/kernel/microkernel/k_memory_map.c @@ -103,10 +103,10 @@ void _k_mem_map_alloc_timeout(struct k_args *A) void _k_mem_map_alloc(struct k_args *A) { struct _k_mem_map_struct *M = - (struct _k_mem_map_struct *)(A->Args.a1.mmap); + (struct _k_mem_map_struct *)(A->args.a1.mmap); if (M->Free != NULL) { - *(A->Args.a1.mptr) = M->Free; + *(A->args.a1.mptr) = M->Free; M->Free = *(char **)(M->Free); M->Nused++; @@ -120,7 +120,7 @@ void _k_mem_map_alloc(struct k_args *A) return; } - *(A->Args.a1.mptr) = NULL; + *(A->args.a1.mptr) = NULL; if (likely(A->Time.ticks != TICKS_NONE)) { A->priority = _k_current_task->priority; @@ -156,8 +156,8 @@ int _task_mem_map_alloc(kmemory_map_t mmap, void **mptr, int32_t time) A.Comm = _K_SVC_MEM_MAP_ALLOC; A.Time.ticks = time; - A.Args.a1.mmap = mmap; - A.Args.a1.mptr = mptr; + A.args.a1.mmap = mmap; + A.args.a1.mptr = mptr; KERNEL_ENTRY(&A); return A.Time.rcode; } @@ -172,17 +172,17 @@ int _task_mem_map_alloc(kmemory_map_t mmap, void **mptr, int32_t time) void _k_mem_map_dealloc(struct k_args *A) { struct _k_mem_map_struct *M = - (struct _k_mem_map_struct *)(A->Args.a1.mmap); + (struct _k_mem_map_struct *)(A->args.a1.mmap); struct k_args *X; - **(char ***)(A->Args.a1.mptr) = M->Free; - M->Free = *(char **)(A->Args.a1.mptr); - *(A->Args.a1.mptr) = NULL; + **(char ***)(A->args.a1.mptr) = M->Free; + M->Free = *(char **)(A->args.a1.mptr); + *(A->args.a1.mptr) = NULL; X = M->Waiters; if (X) { M->Waiters = X->next; - *(X->Args.a1.mptr) = M->Free; + *(X->args.a1.mptr) = M->Free; M->Free = *(char **)(M->Free); #ifdef CONFIG_SYS_CLOCK_EXISTS @@ -220,8 +220,8 @@ void _task_mem_map_free(kmemory_map_t mmap, void **mptr) struct k_args A; A.Comm = _K_SVC_MEM_MAP_DEALLOC; - A.Args.a1.mmap = mmap; - A.Args.a1.mptr = mptr; + A.args.a1.mmap = mmap; + A.args.a1.mptr = mptr; KERNEL_ENTRY(&A); } diff --git a/kernel/microkernel/k_memory_pool.c b/kernel/microkernel/k_memory_pool.c index 126cb329c6b..35351a23a10 100644 --- a/kernel/microkernel/k_memory_pool.c +++ b/kernel/microkernel/k_memory_pool.c @@ -199,7 +199,7 @@ static void defrag(struct pool_struct *P, void _k_defrag(struct k_args *A) { - struct pool_struct *P = _k_mem_pool_list + OBJ_INDEX(A->Args.p1.pool_id); + struct pool_struct *P = _k_mem_pool_list + OBJ_INDEX(A->args.p1.pool_id); defrag(P, P->nr_of_frags - 1, /* start from smallest blocks */ @@ -238,7 +238,7 @@ void task_mem_pool_defragment(kmemory_pool_t Pid /* pool to defragment */ struct k_args A; A.Comm = _K_SVC_DEFRAG; - A.Args.p1.pool_id = Pid; + A.args.p1.pool_id = Pid; KERNEL_ENTRY(&A); } @@ -422,7 +422,7 @@ static char *get_block_recusive(struct pool_struct *P, int index, int startindex void _k_block_waiters_get(struct k_args *A) { - struct pool_struct *P = _k_mem_pool_list + OBJ_INDEX(A->Args.p1.pool_id); + struct pool_struct *P = _k_mem_pool_list + OBJ_INDEX(A->args.p1.pool_id); char *found_block; struct k_args *curr_task, *prev_task; int start_size, offset; @@ -436,7 +436,7 @@ void _k_block_waiters_get(struct k_args *A) /* calculate size & offset */ start_size = P->minblock_size; offset = P->nr_of_frags - 1; - while (curr_task->Args.p1.req_size > start_size) { + while (curr_task->args.p1.req_size > start_size) { start_size = start_size << 2; /* try one larger */ offset--; } @@ -448,8 +448,8 @@ void _k_block_waiters_get(struct k_args *A) /* if success : remove task from list and reschedule */ if (found_block != NULL) { /* return found block */ - curr_task->Args.p1.rep_poolptr = found_block; - curr_task->Args.p1.rep_dataptr = found_block; + curr_task->args.p1.rep_poolptr = found_block; + curr_task->args.p1.rep_dataptr = found_block; /* reschedule task */ @@ -502,7 +502,7 @@ void _k_mem_pool_block_get_timeout_handle(struct k_args *A) void _k_mem_pool_block_get(struct k_args *A) { - struct pool_struct *P = _k_mem_pool_list + OBJ_INDEX(A->Args.p1.pool_id); + struct pool_struct *P = _k_mem_pool_list + OBJ_INDEX(A->args.p1.pool_id); char *found_block; int start_size; @@ -512,7 +512,7 @@ void _k_mem_pool_block_get(struct k_args *A) start_size = P->minblock_size; offset = P->nr_of_frags - 1; - while (A->Args.p1.req_size > start_size) { + while (A->args.p1.req_size > start_size) { start_size = start_size << 2; /*try one larger */ offset--; } @@ -524,15 +524,15 @@ void _k_mem_pool_block_get(struct k_args *A) get_block_recusive(P, offset, offset); /* allocate and fragment blocks */ if (found_block != NULL) { - A->Args.p1.rep_poolptr = found_block; - A->Args.p1.rep_dataptr = found_block; + A->args.p1.rep_poolptr = found_block; + A->args.p1.rep_dataptr = found_block; A->Time.rcode = RC_OK; return; /* return found block */ } if (likely( (A->Time.ticks != TICKS_NONE) && - (A->Args.p1.req_size <= + (A->args.p1.req_size <= P->maxblock_size))) {/* timeout? but not block to large */ A->priority = _k_current_task->priority; A->Ctxt.task = _k_current_task; @@ -576,14 +576,14 @@ int _task_mem_pool_alloc(struct k_block *blockptr, /* ptr to requested block */ A.Comm = _K_SVC_MEM_POOL_BLOCK_GET; A.Time.ticks = time; - A.Args.p1.pool_id = pool_id; - A.Args.p1.req_size = reqsize; + A.args.p1.pool_id = pool_id; + A.args.p1.req_size = reqsize; KERNEL_ENTRY(&A); blockptr->pool_id = pool_id; - blockptr->address_in_pool = A.Args.p1.rep_poolptr; - blockptr->pointer_to_data = A.Args.p1.rep_dataptr; + blockptr->address_in_pool = A.args.p1.rep_poolptr; + blockptr->pointer_to_data = A.args.p1.rep_dataptr; blockptr->req_size = reqsize; return A.Time.rcode; @@ -608,7 +608,7 @@ void _k_mem_pool_block_release(struct k_args *A) int start_size, offset; int i, j; - Pid = A->Args.p1.pool_id; + Pid = A->args.p1.pool_id; P = _k_mem_pool_list + OBJ_INDEX(Pid); @@ -617,7 +617,7 @@ void _k_mem_pool_block_release(struct k_args *A) start_size = P->minblock_size; offset = P->nr_of_frags - 1; - while (A->Args.p1.req_size > start_size) { + while (A->args.p1.req_size > start_size) { start_size = start_size << 2; /* try one larger */ offset--; } @@ -631,7 +631,7 @@ void _k_mem_pool_block_release(struct k_args *A) while ((j < block->nr_of_entries) && ((blockstat = block->blocktable + j)->mem_blocks != 0)) { for (i = 0; i < 4; i++) { - if (A->Args.p1.rep_poolptr == + if (A->args.p1.rep_poolptr == (blockstat->mem_blocks + (OCTET_TO_SIZEOFUNIT(i * block->block_size)))) { /* we've found the right pointer, so free it */ @@ -677,9 +677,9 @@ void task_mem_pool_free(struct k_block *blockptr /* pointer to block to free */ struct k_args A; A.Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE; - A.Args.p1.pool_id = blockptr->pool_id; - A.Args.p1.req_size = blockptr->req_size; - A.Args.p1.rep_poolptr = blockptr->address_in_pool; - A.Args.p1.rep_dataptr = blockptr->pointer_to_data; + A.args.p1.pool_id = blockptr->pool_id; + A.args.p1.req_size = blockptr->req_size; + A.args.p1.rep_poolptr = blockptr->address_in_pool; + A.args.p1.rep_dataptr = blockptr->pointer_to_data; KERNEL_ENTRY(&A); } diff --git a/kernel/microkernel/k_move_data.c b/kernel/microkernel/k_move_data.c index 2465db45e5c..a743297285e 100644 --- a/kernel/microkernel/k_move_data.c +++ b/kernel/microkernel/k_move_data.c @@ -83,7 +83,7 @@ void _k_movedata_request(struct k_args *Req) { struct moved_req *ReqArgs; - ReqArgs = &(Req->Args.MovedReq); + ReqArgs = &(Req->args.MovedReq); __ASSERT_NO_MSG(0 == (ReqArgs->iTotalSize % diff --git a/kernel/microkernel/k_mutex.c b/kernel/microkernel/k_mutex.c index 1a3fa194b17..d2da6d5ebd0 100644 --- a/kernel/microkernel/k_mutex.c +++ b/kernel/microkernel/k_mutex.c @@ -87,7 +87,7 @@ void _k_mutex_lock_reply( REMOVE_ELM(A); A->Time.rcode = RC_TIME; - MutexId = A->Args.l1.mutex; + MutexId = A->args.l1.mutex; Mutex = (struct _k_mutex_struct *)MutexId; FirstWaiter = Mutex->Waiters; @@ -124,8 +124,8 @@ void _k_mutex_lock_reply( PrioChanger->alloc = true; PrioChanger->Comm = _K_SVC_TASK_PRIORITY_SET; PrioChanger->priority = newPriority; - PrioChanger->Args.g1.task = Mutex->Owner; - PrioChanger->Args.g1.prio = newPriority; + PrioChanger->args.g1.task = Mutex->Owner; + PrioChanger->args.g1.prio = newPriority; SENDARGS(PrioChanger); Mutex->OwnerCurrentPrio = newPriority; } @@ -175,17 +175,17 @@ void _k_mutex_lock_request(struct k_args *A /* pointer to mutex lock struct k_args *PrioBooster; /* used to change a task's priority level */ kpriority_t BoostedPrio; /* new "boosted" priority level */ - MutexId = A->Args.l1.mutex; + MutexId = A->args.l1.mutex; Mutex = (struct _k_mutex_struct *)MutexId; - if (Mutex->Level == 0 || Mutex->Owner == A->Args.l1.task) { + if (Mutex->Level == 0 || Mutex->Owner == A->args.l1.task) { /* The mutex is either unowned or this is a nested lock. */ #ifdef CONFIG_OBJECT_MONITOR Mutex->Count++; #endif - Mutex->Owner = A->Args.l1.task; + Mutex->Owner = A->args.l1.task; /* * Assign the task's priority directly if the requesting @@ -260,8 +260,8 @@ void _k_mutex_lock_request(struct k_args *A /* pointer to mutex lock PrioBooster->alloc = true; PrioBooster->Comm = _K_SVC_TASK_PRIORITY_SET; PrioBooster->priority = BoostedPrio; - PrioBooster->Args.g1.task = Mutex->Owner; - PrioBooster->Args.g1.prio = BoostedPrio; + PrioBooster->args.g1.task = Mutex->Owner; + PrioBooster->args.g1.prio = BoostedPrio; SENDARGS(PrioBooster); Mutex->OwnerCurrentPrio = BoostedPrio; } @@ -295,8 +295,8 @@ int _task_mutex_lock( A.Comm = _K_SVC_MUTEX_LOCK_REQUEST; A.Time.ticks = time; - A.Args.l1.mutex = mutex; - A.Args.l1.task = _k_current_task->id; + A.args.l1.mutex = mutex; + A.args.l1.task = _k_current_task->id; KERNEL_ENTRY(&A); return A.Time.rcode; } @@ -321,9 +321,9 @@ void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock int MutexId; /* mutex ID obtained from unlock request */ struct k_args *PrioDowner; /* used to change a task's priority level */ - MutexId = A->Args.l1.mutex; + MutexId = A->args.l1.mutex; Mutex = (struct _k_mutex_struct *)MutexId; - if (Mutex->Owner == A->Args.l1.task && --(Mutex->Level) == 0) { + if (Mutex->Owner == A->args.l1.task && --(Mutex->Level) == 0) { /* * The requesting task owns the mutex and all locks * have been released. @@ -347,8 +347,8 @@ void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock PrioDowner->alloc = true; PrioDowner->Comm = _K_SVC_TASK_PRIORITY_SET; PrioDowner->priority = Mutex->OwnerOriginalPrio; - PrioDowner->Args.g1.task = Mutex->Owner; - PrioDowner->Args.g1.prio = Mutex->OwnerOriginalPrio; + PrioDowner->args.g1.task = Mutex->Owner; + PrioDowner->args.g1.prio = Mutex->OwnerOriginalPrio; SENDARGS(PrioDowner); } @@ -361,7 +361,7 @@ void _k_mutex_unlock(struct k_args *A /* pointer to mutex unlock */ Mutex->Waiters = X->next; - Mutex->Owner = X->Args.l1.task; + Mutex->Owner = X->args.l1.task; Mutex->Level = 1; Mutex->OwnerCurrentPrio = X->priority; Mutex->OwnerOriginalPrio = X->priority; @@ -408,8 +408,8 @@ void _task_mutex_unlock(kmutex_t mutex /* mutex to unlock */ struct k_args A; /* argument packet */ A.Comm = _K_SVC_MUTEX_UNLOCK; - A.Args.l1.mutex = mutex; - A.Args.l1.task = _k_current_task->id; + A.args.l1.mutex = mutex; + A.args.l1.task = _k_current_task->id; KERNEL_ENTRY(&A); } diff --git a/kernel/microkernel/k_offload.c b/kernel/microkernel/k_offload.c index 5baead15704..33b1b531acd 100644 --- a/kernel/microkernel/k_offload.c +++ b/kernel/microkernel/k_offload.c @@ -45,7 +45,7 @@ void _k_offload_to_fiber(struct k_args *A) { - A->Args.u1.rval = (*A->Args.u1.func)(A->Args.u1.argp); + A->args.u1.rval = (*A->args.u1.func)(A->args.u1.argp); } /** @@ -66,8 +66,8 @@ int task_offload_to_fiber(int (*func)(), void *argp) struct k_args A; A.Comm = _K_SVC_OFFLOAD_TO_FIBER; - A.Args.u1.func = func; - A.Args.u1.argp = argp; + A.args.u1.func = func; + A.args.u1.argp = argp; KERNEL_ENTRY(&A); - return A.Args.u1.rval; + return A.args.u1.rval; } diff --git a/kernel/microkernel/k_pipe.c b/kernel/microkernel/k_pipe.c index b8dbf94283d..5c473812765 100644 --- a/kernel/microkernel/k_pipe.c +++ b/kernel/microkernel/k_pipe.c @@ -100,16 +100,16 @@ int _task_pipe_get(kpipe_t Id, void *pBuffer, A.Comm = _K_SVC_PIPE_GET_REQUEST; A.Time.ticks = TimeOut; - A.Args.pipe_req.ReqInfo.pipe.id = Id; - A.Args.pipe_req.ReqType.Sync.iSizeTotal = iNbrBytesToRead; - A.Args.pipe_req.ReqType.Sync.pData = pBuffer; + A.args.pipe_req.ReqInfo.pipe.id = Id; + A.args.pipe_req.ReqType.Sync.iSizeTotal = iNbrBytesToRead; + A.args.pipe_req.ReqType.Sync.pData = pBuffer; - _k_pipe_option_set(&A.Args, Option); - _k_pipe_request_type_set(&A.Args, _SYNCREQ); + _k_pipe_option_set(&A.args, Option); + _k_pipe_request_type_set(&A.args, _SYNCREQ); KERNEL_ENTRY(&A); - *piNbrBytesRead = A.Args.pipe_ack.iSizeXferred; + *piNbrBytesRead = A.args.pipe_ack.iSizeXferred; return A.Time.rcode; } @@ -152,16 +152,16 @@ int _task_pipe_put(kpipe_t Id, void *pBuffer, A.Comm = _K_SVC_PIPE_PUT_REQUEST; A.Time.ticks = TimeOut; - A.Args.pipe_req.ReqInfo.pipe.id = Id; - A.Args.pipe_req.ReqType.Sync.iSizeTotal = iNbrBytesToWrite; - A.Args.pipe_req.ReqType.Sync.pData = pBuffer; + A.args.pipe_req.ReqInfo.pipe.id = Id; + A.args.pipe_req.ReqType.Sync.iSizeTotal = iNbrBytesToWrite; + A.args.pipe_req.ReqType.Sync.pData = pBuffer; - _k_pipe_option_set(&A.Args, Option); - _k_pipe_request_type_set(&A.Args, _SYNCREQ); + _k_pipe_option_set(&A.args, Option); + _k_pipe_request_type_set(&A.args, _SYNCREQ); KERNEL_ENTRY(&A); - *piNbrBytesWritten = A.Args.pipe_ack.iSizeXferred; + *piNbrBytesWritten = A.args.pipe_ack.iSizeXferred; return A.Time.rcode; } @@ -198,13 +198,13 @@ int _task_pipe_block_put(kpipe_t Id, struct k_block Block, A.Time.ticks = TICKS_UNLIMITED; /* same behavior in flow as a blocking call w/o a timeout */ - A.Args.pipe_req.ReqInfo.pipe.id = Id; - A.Args.pipe_req.ReqType.Async.block = Block; - A.Args.pipe_req.ReqType.Async.iSizeTotal = iSize2Xfer; - A.Args.pipe_req.ReqType.Async.sema = Sema; + A.args.pipe_req.ReqInfo.pipe.id = Id; + A.args.pipe_req.ReqType.Async.block = Block; + A.args.pipe_req.ReqType.Async.iSizeTotal = iSize2Xfer; + A.args.pipe_req.ReqType.Async.sema = Sema; - _k_pipe_request_type_set(&A.Args, _ASYNCREQ); - _k_pipe_option_set(&A.Args, _ALL_N); /* force ALL_N */ + _k_pipe_request_type_set(&A.args, _ASYNCREQ); + _k_pipe_option_set(&A.args, _ALL_N); /* force ALL_N */ KERNEL_ENTRY(&A); return RC_OK; diff --git a/kernel/microkernel/k_pipe_get.c b/kernel/microkernel/k_pipe_get.c index a2766551cc7..16713711c53 100644 --- a/kernel/microkernel/k_pipe_get.c +++ b/kernel/microkernel/k_pipe_get.c @@ -48,7 +48,7 @@ void _k_pipe_get_request(struct k_args *RequestOrig) struct k_args *Request; struct k_args *RequestProc; - kpipe_t pipeId = RequestOrig->Args.pipe_req.ReqInfo.pipe.id; + kpipe_t pipeId = RequestOrig->args.pipe_req.ReqInfo.pipe.id; /* If it's a poster, then don't deschedule the task */ @@ -67,36 +67,36 @@ void _k_pipe_get_request(struct k_args *RequestOrig) */ mycopypacket(&RequestProc, Request); - RequestProc->Args.pipe_xfer_req.ReqInfo.pipe.ptr = + RequestProc->args.pipe_xfer_req.ReqInfo.pipe.ptr = (struct _k_pipe_struct *)pipeId; - switch (_k_pipe_request_type_get(&RequestProc->Args)) { + switch (_k_pipe_request_type_get(&RequestProc->args)) { case _SYNCREQ: - RequestProc->Args.pipe_xfer_req.pData = - Request->Args.pipe_req.ReqType.Sync.pData; - RequestProc->Args.pipe_xfer_req.iSizeTotal = - Request->Args.pipe_req.ReqType.Sync.iSizeTotal; + RequestProc->args.pipe_xfer_req.pData = + Request->args.pipe_req.ReqType.Sync.pData; + RequestProc->args.pipe_xfer_req.iSizeTotal = + Request->args.pipe_req.ReqType.Sync.iSizeTotal; break; default: break; } - RequestProc->Args.pipe_xfer_req.status = XFER_IDLE; - RequestProc->Args.pipe_xfer_req.iNbrPendXfers = 0; - RequestProc->Args.pipe_xfer_req.iSizeXferred = 0; + RequestProc->args.pipe_xfer_req.status = XFER_IDLE; + RequestProc->args.pipe_xfer_req.iNbrPendXfers = 0; + RequestProc->args.pipe_xfer_req.iSizeXferred = 0; RequestProc->next = NULL; RequestProc->Head = NULL; switch (RequestProc->Time.ticks) { case TICKS_NONE: - _k_pipe_time_type_set(&RequestProc->Args, _TIME_NB); + _k_pipe_time_type_set(&RequestProc->args, _TIME_NB); break; case TICKS_UNLIMITED: - _k_pipe_time_type_set(&RequestProc->Args, _TIME_B); + _k_pipe_time_type_set(&RequestProc->args, _TIME_B); break; default: - _k_pipe_time_type_set(&RequestProc->Args, _TIME_BT); + _k_pipe_time_type_set(&RequestProc->args, _TIME_BT); break; } @@ -104,7 +104,7 @@ void _k_pipe_get_request(struct k_args *RequestOrig) struct _k_pipe_struct *pPipe; - pPipe = RequestProc->Args.pipe_xfer_req.ReqInfo.pipe.ptr; + pPipe = RequestProc->args.pipe_xfer_req.ReqInfo.pipe.ptr; do { int iData2ReadFromWriters; @@ -128,7 +128,7 @@ void _k_pipe_get_request(struct k_args *RequestOrig) RequestProc->Time.ticks = ticks; /* check if request was processed */ - if (TERM_XXX & RequestProc->Args.pipe_xfer_req.status) { + if (TERM_XXX & RequestProc->args.pipe_xfer_req.status) { RequestProc->Time.timer = NULL; /* not really required */ return; /* not listed anymore --> completely processed */ } @@ -140,7 +140,7 @@ void _k_pipe_get_request(struct k_args *RequestOrig) * processing on the request */ - if (_TIME_NB != _k_pipe_time_type_get(&RequestProc->Args)) { + if (_TIME_NB != _k_pipe_time_type_get(&RequestProc->args)) { /* call is blocking */ INSERT_ELM(pPipe->Readers, RequestProc); /* @@ -149,7 +149,7 @@ void _k_pipe_get_request(struct k_args *RequestOrig) * is only useful to the finite timeout case. */ RequestProc->Comm = _K_SVC_PIPE_GET_TIMEOUT; - if (_TIME_B == _k_pipe_time_type_get(&RequestProc->Args)) { + if (_TIME_B == _k_pipe_time_type_get(&RequestProc->args)) { /* * The writer specified TICKS_UNLIMITED, so NULL the timer. */ @@ -158,7 +158,7 @@ void _k_pipe_get_request(struct k_args *RequestOrig) } else { /* { TIME_BT } */ #ifdef CANCEL_TIMERS - if (RequestProc->Args.pipe_xfer_req.iSizeXferred != 0) { + if (RequestProc->args.pipe_xfer_req.iSizeXferred != 0) { RequestProc->Time.timer = NULL; } else #endif @@ -174,12 +174,12 @@ void _k_pipe_get_request(struct k_args *RequestOrig) */ RequestProc->Time.timer = NULL; - if (XFER_BUSY == RequestProc->Args.pipe_xfer_req.status) { + if (XFER_BUSY == RequestProc->args.pipe_xfer_req.status) { INSERT_ELM(pPipe->Readers, RequestProc); } else { __ASSERT_NO_MSG(XFER_IDLE == - RequestProc->Args.pipe_xfer_req.status); - __ASSERT_NO_MSG(0 == RequestProc->Args.pipe_xfer_req.iSizeXferred); + RequestProc->args.pipe_xfer_req.status); + __ASSERT_NO_MSG(0 == RequestProc->args.pipe_xfer_req.iSizeXferred); RequestProc->Comm = _K_SVC_PIPE_GET_REPLY; _k_pipe_get_reply(RequestProc); } @@ -199,10 +199,10 @@ void _k_pipe_get_timeout(struct k_args *ReqProc) __ASSERT_NO_MSG(NULL != ReqProc->Time.timer); myfreetimer(&(ReqProc->Time.timer)); - _k_pipe_request_status_set(&ReqProc->Args.pipe_xfer_req, TERM_TMO); + _k_pipe_request_status_set(&ReqProc->args.pipe_xfer_req, TERM_TMO); DeListWaiter(ReqProc); - if (0 == ReqProc->Args.pipe_xfer_req.iNbrPendXfers) { + if (0 == ReqProc->args.pipe_xfer_req.iNbrPendXfers) { _k_pipe_get_reply(ReqProc); } } @@ -217,7 +217,7 @@ void _k_pipe_get_timeout(struct k_args *ReqProc) void _k_pipe_get_reply(struct k_args *ReqProc) { __ASSERT_NO_MSG( - (0 == ReqProc->Args.pipe_xfer_req.iNbrPendXfers) /* no pending Xfers */ + (0 == ReqProc->args.pipe_xfer_req.iNbrPendXfers) /* no pending Xfers */ && (NULL == ReqProc->Time.timer) /* no pending timer */ && (NULL == ReqProc->Head)); /* not in list */ @@ -229,17 +229,17 @@ void _k_pipe_get_reply(struct k_args *ReqProc) /* determine return value */ - status = ReqProc->Args.pipe_xfer_req.status; + status = ReqProc->args.pipe_xfer_req.status; if (TERM_TMO == status) { ReqOrig->Time.rcode = RC_TIME; } else if ((TERM_XXX | XFER_IDLE) & status) { - K_PIPE_OPTION Option = _k_pipe_option_get(&ReqProc->Args); + K_PIPE_OPTION Option = _k_pipe_option_get(&ReqProc->args); - if (likely(ReqProc->Args.pipe_xfer_req.iSizeXferred == - ReqProc->Args.pipe_xfer_req.iSizeTotal)) { + if (likely(ReqProc->args.pipe_xfer_req.iSizeXferred == + ReqProc->args.pipe_xfer_req.iSizeTotal)) { /* All data has been transferred */ ReqOrig->Time.rcode = RC_OK; - } else if (ReqProc->Args.pipe_xfer_req.iSizeXferred != 0) { + } else if (ReqProc->args.pipe_xfer_req.iSizeXferred != 0) { /* Some but not all data has been transferred */ ReqOrig->Time.rcode = (Option == _ALL_N) ? RC_INCOMPLETE : RC_OK; @@ -252,8 +252,8 @@ void _k_pipe_get_reply(struct k_args *ReqProc) __ASSERT_NO_MSG(1 == 0); /* should not come here */ } - ReqOrig->Args.pipe_ack.iSizeXferred = - ReqProc->Args.pipe_xfer_req.iSizeXferred; + ReqOrig->args.pipe_ack.iSizeXferred = + ReqProc->args.pipe_xfer_req.iSizeXferred; SENDARGS(ReqOrig); FREEARGS(ReqProc); @@ -272,7 +272,7 @@ void _k_pipe_get_ack(struct k_args *Request) LocalReq = Request->Ctxt.args; LocalReq->Time.rcode = Request->Time.rcode; - LocalReq->Args.pipe_ack = Request->Args.pipe_ack; + LocalReq->args.pipe_ack = Request->args.pipe_ack; /* Reschedule the sender task */ diff --git a/kernel/microkernel/k_pipe_put.c b/kernel/microkernel/k_pipe_put.c index cd44937a776..84f88613392 100644 --- a/kernel/microkernel/k_pipe_put.c +++ b/kernel/microkernel/k_pipe_put.c @@ -49,11 +49,11 @@ void _k_pipe_put_request(struct k_args *RequestOrig) struct k_args *Request; struct k_args *RequestProc; - kpipe_t pipeId = RequestOrig->Args.pipe_req.ReqInfo.pipe.id; + kpipe_t pipeId = RequestOrig->args.pipe_req.ReqInfo.pipe.id; bool bAsync; - if (_ASYNCREQ == _k_pipe_request_type_get(&RequestOrig->Args)) { + if (_ASYNCREQ == _k_pipe_request_type_get(&RequestOrig->args)) { bAsync = true; } else { bAsync = false; @@ -79,41 +79,41 @@ void _k_pipe_put_request(struct k_args *RequestOrig) */ mycopypacket(&RequestProc, Request); - RequestProc->Args.pipe_xfer_req.ReqInfo.pipe.ptr = + RequestProc->args.pipe_xfer_req.ReqInfo.pipe.ptr = (struct _k_pipe_struct *)pipeId; - switch (_k_pipe_request_type_get(&RequestProc->Args)) { + switch (_k_pipe_request_type_get(&RequestProc->args)) { case _SYNCREQ: - RequestProc->Args.pipe_xfer_req.pData = - Request->Args.pipe_req.ReqType.Sync.pData; - RequestProc->Args.pipe_xfer_req.iSizeTotal = - Request->Args.pipe_req.ReqType.Sync.iSizeTotal; + RequestProc->args.pipe_xfer_req.pData = + Request->args.pipe_req.ReqType.Sync.pData; + RequestProc->args.pipe_xfer_req.iSizeTotal = + Request->args.pipe_req.ReqType.Sync.iSizeTotal; break; case _ASYNCREQ: - RequestProc->Args.pipe_xfer_req.pData = - Request->Args.pipe_req.ReqType.Async.block.pointer_to_data; - RequestProc->Args.pipe_xfer_req.iSizeTotal = - Request->Args.pipe_req.ReqType.Async.iSizeTotal; + RequestProc->args.pipe_xfer_req.pData = + Request->args.pipe_req.ReqType.Async.block.pointer_to_data; + RequestProc->args.pipe_xfer_req.iSizeTotal = + Request->args.pipe_req.ReqType.Async.iSizeTotal; break; default: break; } - RequestProc->Args.pipe_xfer_req.status = XFER_IDLE; - RequestProc->Args.pipe_xfer_req.iNbrPendXfers = 0; - RequestProc->Args.pipe_xfer_req.iSizeXferred = 0; + RequestProc->args.pipe_xfer_req.status = XFER_IDLE; + RequestProc->args.pipe_xfer_req.iNbrPendXfers = 0; + RequestProc->args.pipe_xfer_req.iSizeXferred = 0; RequestProc->next = NULL; RequestProc->Head = NULL; switch (RequestProc->Time.ticks) { case TICKS_NONE: - _k_pipe_time_type_set(&RequestProc->Args, _TIME_NB); + _k_pipe_time_type_set(&RequestProc->args, _TIME_NB); break; case TICKS_UNLIMITED: - _k_pipe_time_type_set(&RequestProc->Args, _TIME_B); + _k_pipe_time_type_set(&RequestProc->args, _TIME_B); break; default: - _k_pipe_time_type_set(&RequestProc->Args, _TIME_BT); + _k_pipe_time_type_set(&RequestProc->args, _TIME_BT); break; } @@ -121,7 +121,7 @@ void _k_pipe_put_request(struct k_args *RequestOrig) struct _k_pipe_struct *pPipe; - pPipe = RequestProc->Args.pipe_xfer_req.ReqInfo.pipe.ptr; + pPipe = RequestProc->args.pipe_xfer_req.ReqInfo.pipe.ptr; do { int iSpace2WriteinReaders; @@ -147,7 +147,7 @@ void _k_pipe_put_request(struct k_args *RequestOrig) /* check if request was processed */ - if (TERM_XXX & RequestProc->Args.pipe_xfer_req.status) { + if (TERM_XXX & RequestProc->args.pipe_xfer_req.status) { RequestProc->Time.timer = NULL; /* not really required */ return; /* not listed anymore --> completely processed */ } @@ -159,7 +159,7 @@ void _k_pipe_put_request(struct k_args *RequestOrig) */ if (_TIME_NB != - _k_pipe_time_type_get(&RequestProc->Args)) { + _k_pipe_time_type_get(&RequestProc->args)) { /* call is blocking */ INSERT_ELM(pPipe->Writers, RequestProc); /* @@ -168,7 +168,7 @@ void _k_pipe_put_request(struct k_args *RequestOrig) * is only useful to the finite timeout case. */ RequestProc->Comm = _K_SVC_PIPE_PUT_TIMEOUT; - if (_TIME_B == _k_pipe_time_type_get(&RequestProc->Args)) { + if (_TIME_B == _k_pipe_time_type_get(&RequestProc->args)) { /* * The writer specified TICKS_UNLIMITED; NULL the timer. */ @@ -177,7 +177,7 @@ void _k_pipe_put_request(struct k_args *RequestOrig) } else { /* { TIME_BT } */ #ifdef CANCEL_TIMERS - if (RequestProc->Args.pipe_xfer_req.iSizeXferred != 0) { + if (RequestProc->args.pipe_xfer_req.iSizeXferred != 0) { RequestProc->Time.timer = NULL; } else #endif @@ -193,12 +193,12 @@ void _k_pipe_put_request(struct k_args *RequestOrig) */ RequestProc->Time.timer = NULL; - if (XFER_BUSY == RequestProc->Args.pipe_xfer_req.status) { + if (XFER_BUSY == RequestProc->args.pipe_xfer_req.status) { INSERT_ELM(pPipe->Writers, RequestProc); } else { __ASSERT_NO_MSG(XFER_IDLE == - RequestProc->Args.pipe_xfer_req.status); - __ASSERT_NO_MSG(0 == RequestProc->Args.pipe_xfer_req.iSizeXferred); + RequestProc->args.pipe_xfer_req.status); + __ASSERT_NO_MSG(0 == RequestProc->args.pipe_xfer_req.iSizeXferred); RequestProc->Comm = _K_SVC_PIPE_PUT_REPLY; _k_pipe_put_reply(RequestProc); } @@ -218,10 +218,10 @@ void _k_pipe_put_timeout(struct k_args *ReqProc) __ASSERT_NO_MSG(NULL != ReqProc->Time.timer); myfreetimer(&(ReqProc->Time.timer)); - _k_pipe_request_status_set(&ReqProc->Args.pipe_xfer_req, TERM_TMO); + _k_pipe_request_status_set(&ReqProc->args.pipe_xfer_req, TERM_TMO); DeListWaiter(ReqProc); - if (0 == ReqProc->Args.pipe_xfer_req.iNbrPendXfers) { + if (0 == ReqProc->args.pipe_xfer_req.iNbrPendXfers) { _k_pipe_put_reply(ReqProc); } } @@ -236,7 +236,7 @@ void _k_pipe_put_timeout(struct k_args *ReqProc) void _k_pipe_put_reply(struct k_args *ReqProc) { __ASSERT_NO_MSG( - 0 == ReqProc->Args.pipe_xfer_req.iNbrPendXfers /* no pending Xfers */ + 0 == ReqProc->args.pipe_xfer_req.iNbrPendXfers /* no pending Xfers */ && NULL == ReqProc->Time.timer /* no pending timer */ && NULL == ReqProc->Head); /* not in list */ @@ -249,17 +249,17 @@ void _k_pipe_put_reply(struct k_args *ReqProc) /* determine return value: */ - status = ReqProc->Args.pipe_xfer_req.status; + status = ReqProc->args.pipe_xfer_req.status; if (unlikely(TERM_TMO == status)) { ReqOrig->Time.rcode = RC_TIME; } else if ((TERM_XXX | XFER_IDLE) & status) { - K_PIPE_OPTION Option = _k_pipe_option_get(&ReqProc->Args); + K_PIPE_OPTION Option = _k_pipe_option_get(&ReqProc->args); - if (likely(ReqProc->Args.pipe_xfer_req.iSizeXferred == - ReqProc->Args.pipe_xfer_req.iSizeTotal)) { + if (likely(ReqProc->args.pipe_xfer_req.iSizeXferred == + ReqProc->args.pipe_xfer_req.iSizeTotal)) { /* All data has been transferred */ ReqOrig->Time.rcode = RC_OK; - } else if (ReqProc->Args.pipe_xfer_req.iSizeXferred != 0) { + } else if (ReqProc->args.pipe_xfer_req.iSizeXferred != 0) { /* Some but not all data has been transferred */ ReqOrig->Time.rcode = (Option == _ALL_N) ? RC_INCOMPLETE : RC_OK; } else { @@ -270,9 +270,9 @@ void _k_pipe_put_reply(struct k_args *ReqProc) /* unknown (invalid) status */ __ASSERT_NO_MSG(1 == 0); /* should not come here */ } - if (_ASYNCREQ != _k_pipe_request_type_get(&ReqOrig->Args)) { - ReqOrig->Args.pipe_ack.iSizeXferred = - ReqProc->Args.pipe_xfer_req.iSizeXferred; + if (_ASYNCREQ != _k_pipe_request_type_get(&ReqOrig->args)) { + ReqOrig->args.pipe_ack.iSizeXferred = + ReqProc->args.pipe_xfer_req.iSizeXferred; } SENDARGS(ReqOrig); @@ -289,18 +289,18 @@ void _k_pipe_put_reply(struct k_args *ReqProc) void _k_pipe_put_ack(struct k_args *Request) { - if (_ASYNCREQ == _k_pipe_request_type_get(&Request->Args)) { - struct _pipe_ack_arg *pipe_ack = &Request->Args.pipe_ack; + if (_ASYNCREQ == _k_pipe_request_type_get(&Request->args)) { + struct _pipe_ack_arg *pipe_ack = &Request->args.pipe_ack; struct k_args A; struct k_block *blockptr; /* invoke command to release block */ blockptr = &pipe_ack->ReqType.Async.block; A.Comm = _K_SVC_MEM_POOL_BLOCK_RELEASE; - A.Args.p1.pool_id = blockptr->pool_id; - A.Args.p1.req_size = blockptr->req_size; - A.Args.p1.rep_poolptr = blockptr->address_in_pool; - A.Args.p1.rep_dataptr = blockptr->pointer_to_data; + A.args.p1.pool_id = blockptr->pool_id; + A.args.p1.req_size = blockptr->req_size; + A.args.p1.rep_poolptr = blockptr->address_in_pool; + A.args.p1.rep_dataptr = blockptr->pointer_to_data; _k_mem_pool_block_release(&A); /* will return immediately */ if ((ksem_t)NULL != pipe_ack->ReqType.Async.sema) { @@ -308,7 +308,7 @@ void _k_pipe_put_ack(struct k_args *Request) struct k_args A; A.Comm = _K_SVC_SEM_SIGNAL; - A.Args.s1.sema = pipe_ack->ReqType.Async.sema; + A.args.s1.sema = pipe_ack->ReqType.Async.sema; _k_sem_signal(&A); /* will return immediately */ } } else { @@ -317,7 +317,7 @@ void _k_pipe_put_ack(struct k_args *Request) LocalReq = Request->Ctxt.args; LocalReq->Time.rcode = Request->Time.rcode; - LocalReq->Args.pipe_ack = Request->Args.pipe_ack; + LocalReq->args.pipe_ack = Request->args.pipe_ack; _k_state_bit_reset(LocalReq->Ctxt.task, TF_SEND | TF_SENDDATA); } diff --git a/kernel/microkernel/k_pipe_util.c b/kernel/microkernel/k_pipe_util.c index 1fc1b32276c..afd7f647f1a 100644 --- a/kernel/microkernel/k_pipe_util.c +++ b/kernel/microkernel/k_pipe_util.c @@ -71,8 +71,8 @@ int CalcFreeReaderSpace(struct k_args *pReaderList) if (pReaderList) { struct k_args *pReader = pReaderList; while (pReader != NULL) { - iSize += (pReader->Args.pipe_xfer_req.iSizeTotal - - pReader->Args.pipe_xfer_req.iSizeXferred); + iSize += (pReader->args.pipe_xfer_req.iSizeTotal - + pReader->args.pipe_xfer_req.iSizeXferred); pReader = pReader->next; } } @@ -86,8 +86,8 @@ int CalcAvailWriterData(struct k_args *pWriterList) if (pWriterList) { struct k_args *pWriter = pWriterList; while (pWriter != NULL) { - iSize += (pWriter->Args.pipe_xfer_req.iSizeTotal - - pWriter->Args.pipe_xfer_req.iSizeXferred); + iSize += (pWriter->args.pipe_xfer_req.iSizeTotal - + pWriter->args.pipe_xfer_req.iSizeXferred); pWriter = pWriter->next; } } diff --git a/kernel/microkernel/k_pipe_xfer.c b/kernel/microkernel/k_pipe_xfer.c index 50015b1d318..1f202c014e5 100644 --- a/kernel/microkernel/k_pipe_xfer.c +++ b/kernel/microkernel/k_pipe_xfer.c @@ -63,7 +63,7 @@ possibly copy the remaining data void _k_pipe_movedata_ack(struct k_args *pEOXfer) { - struct _pipe_xfer_ack_arg *pipe_xfer_ack = &pEOXfer->Args.pipe_xfer_ack; + struct _pipe_xfer_ack_arg *pipe_xfer_ack = &pEOXfer->args.pipe_xfer_ack; switch (pipe_xfer_ack->XferType) { case XFER_W2B: /* Writer to Buffer */ @@ -72,7 +72,7 @@ void _k_pipe_movedata_ack(struct k_args *pEOXfer) if (pWriter) { /* Xfer from Writer finished */ struct _pipe_xfer_req_arg *pipe_write_req = - &pipe_xfer_ack->pWriter->Args.pipe_xfer_req; + &pipe_xfer_ack->pWriter->args.pipe_xfer_req; --pipe_write_req->iNbrPendXfers; if (0 == pipe_write_req->iNbrPendXfers) { @@ -112,7 +112,7 @@ void _k_pipe_movedata_ack(struct k_args *pEOXfer) if (pReader) { /* Xfer to Reader finished */ struct _pipe_xfer_req_arg *pipe_read_req = - &pipe_xfer_ack->pReader->Args.pipe_xfer_req; + &pipe_xfer_ack->pReader->args.pipe_xfer_req; --pipe_read_req->iNbrPendXfers; if (0 == pipe_read_req->iNbrPendXfers) { @@ -152,7 +152,7 @@ void _k_pipe_movedata_ack(struct k_args *pEOXfer) if (pWriter) { /* Transfer from writer finished */ struct _pipe_xfer_req_arg *pipe_write_req = - &pipe_xfer_ack->pWriter->Args.pipe_xfer_req; + &pipe_xfer_ack->pWriter->args.pipe_xfer_req; --pipe_write_req->iNbrPendXfers; if (0 == pipe_write_req->iNbrPendXfers) { @@ -174,7 +174,7 @@ void _k_pipe_movedata_ack(struct k_args *pEOXfer) /* Transfer to Reader finished */ struct _pipe_xfer_req_arg *pipe_read_req = - &pipe_xfer_ack->pReader->Args.pipe_xfer_req; + &pipe_xfer_ack->pReader->args.pipe_xfer_req; --pipe_read_req->iNbrPendXfers; if (0 == pipe_read_req->iNbrPendXfers) { @@ -255,10 +255,10 @@ static void setup_movedata(struct k_args *A, A->Ctxt.task = NULL; /* this caused problems when != NULL related to set/reset of state bits */ - A->Args.MovedReq.Action = (MovedAction)(MVDACT_SNDACK | MVDACT_RCVACK); - A->Args.MovedReq.source = source; - A->Args.MovedReq.destination = destination; - A->Args.MovedReq.iTotalSize = size; + A->args.MovedReq.Action = (MovedAction)(MVDACT_SNDACK | MVDACT_RCVACK); + A->args.MovedReq.source = source; + A->args.MovedReq.destination = destination; + A->args.MovedReq.iTotalSize = size; /* continuation packet */ @@ -267,17 +267,17 @@ static void setup_movedata(struct k_args *A, pContSend->next = NULL; pContSend->Comm = _K_SVC_PIPE_MOVEDATA_ACK; - pContSend->Args.pipe_xfer_ack.pPipe = pPipe; - pContSend->Args.pipe_xfer_ack.XferType = XferType; - pContSend->Args.pipe_xfer_ack.ID = XferID; - pContSend->Args.pipe_xfer_ack.iSize = size; + pContSend->args.pipe_xfer_ack.pPipe = pPipe; + pContSend->args.pipe_xfer_ack.XferType = XferType; + pContSend->args.pipe_xfer_ack.ID = XferID; + pContSend->args.pipe_xfer_ack.iSize = size; pContRecv->next = NULL; pContRecv->Comm = _K_SVC_PIPE_MOVEDATA_ACK; - pContRecv->Args.pipe_xfer_ack.pPipe = pPipe; - pContRecv->Args.pipe_xfer_ack.XferType = XferType; - pContRecv->Args.pipe_xfer_ack.ID = XferID; - pContRecv->Args.pipe_xfer_ack.iSize = size; + pContRecv->args.pipe_xfer_ack.pPipe = pPipe; + pContRecv->args.pipe_xfer_ack.XferType = XferType; + pContRecv->args.pipe_xfer_ack.ID = XferID; + pContRecv->args.pipe_xfer_ack.iSize = size; A->priority = move_priority_compute(pWriter, pReader); pContSend->priority = A->priority; @@ -287,30 +287,30 @@ static void setup_movedata(struct k_args *A, case XFER_W2B: /* Writer to Buffer */ { __ASSERT_NO_MSG(NULL == pReader); - pContSend->Args.pipe_xfer_ack.pWriter = pWriter; - pContRecv->Args.pipe_xfer_ack.pWriter = NULL; + pContSend->args.pipe_xfer_ack.pWriter = pWriter; + pContRecv->args.pipe_xfer_ack.pWriter = NULL; break; } case XFER_B2R: { __ASSERT_NO_MSG(NULL == pWriter); - pContSend->Args.pipe_xfer_ack.pReader = NULL; - pContRecv->Args.pipe_xfer_ack.pReader = pReader; + pContSend->args.pipe_xfer_ack.pReader = NULL; + pContRecv->args.pipe_xfer_ack.pReader = pReader; break; } case XFER_W2R: { __ASSERT_NO_MSG(NULL != pWriter && NULL != pReader); - pContSend->Args.pipe_xfer_ack.pWriter = pWriter; - pContSend->Args.pipe_xfer_ack.pReader = NULL; - pContRecv->Args.pipe_xfer_ack.pWriter = NULL; - pContRecv->Args.pipe_xfer_ack.pReader = pReader; + pContSend->args.pipe_xfer_ack.pWriter = pWriter; + pContSend->args.pipe_xfer_ack.pReader = NULL; + pContRecv->args.pipe_xfer_ack.pWriter = NULL; + pContRecv->args.pipe_xfer_ack.pReader = pReader; break; } default: __ASSERT_NO_MSG(1 == 0); /* we should not come here */ } - A->Args.MovedReq.Extra.Setup.ContSnd = pContSend; - A->Args.MovedReq.Extra.Setup.ContRcv = pContRecv; + A->args.MovedReq.Extra.Setup.ContSnd = pContSend; + A->args.MovedReq.Extra.Setup.ContRcv = pContRecv; /* * (possible optimisation) @@ -330,11 +330,11 @@ static int ReaderInProgressIsBlocked(struct _k_pipe_struct *pPipe, /* first condition: request cannot wait any longer: must be - * (non-blocked) or a finite timed wait with a killed timer */ - TimeType = _k_pipe_time_type_get(&pReader->Args); - option = _k_pipe_option_get(&pReader->Args); + TimeType = _k_pipe_time_type_get(&pReader->args); + option = _k_pipe_option_get(&pReader->args); if (((_TIME_B == TimeType) && (_ALL_N == option)) || ((_TIME_B == TimeType) && (_X_TO_N & option) && - !(pReader->Args.pipe_xfer_req.iSizeXferred)) + !(pReader->args.pipe_xfer_req.iSizeXferred)) #ifdef CANCEL_TIMERS || ((_TIME_BT == TimeType) && pReader->Time.timer) #endif @@ -355,8 +355,8 @@ static int ReaderInProgressIsBlocked(struct _k_pipe_struct *pPipe, /* third condition: */ iSizeSpaceInReader = - pReader->Args.pipe_xfer_req.iSizeTotal - - pReader->Args.pipe_xfer_req.iSizeXferred; + pReader->args.pipe_xfer_req.iSizeTotal - + pReader->args.pipe_xfer_req.iSizeXferred; BuffGetAvailDataTotal(&pPipe->desc, &iAvailBufferData); if (iAvailBufferData >= iSizeSpaceInReader) { return 0; @@ -376,11 +376,11 @@ static int WriterInProgressIsBlocked(struct _k_pipe_struct *pPipe, /* first condition: request cannot wait any longer: must be - * (non-blocked) or a finite timed wait with a killed timer */ - TimeType = _k_pipe_time_type_get(&pWriter->Args); - option = _k_pipe_option_get(&pWriter->Args); + TimeType = _k_pipe_time_type_get(&pWriter->args); + option = _k_pipe_option_get(&pWriter->args); if (((_TIME_B == TimeType) && (_ALL_N == option)) || ((_TIME_B == TimeType) && (_X_TO_N & option) && - !(pWriter->Args.pipe_xfer_req.iSizeXferred)) + !(pWriter->args.pipe_xfer_req.iSizeXferred)) #ifdef CANCEL_TIMERS || ((_TIME_BT == TimeType) && pWriter->Time.timer) #endif @@ -401,8 +401,8 @@ static int WriterInProgressIsBlocked(struct _k_pipe_struct *pPipe, /* third condition: */ iSizeDataInWriter = - pWriter->Args.pipe_xfer_req.iSizeTotal - - pWriter->Args.pipe_xfer_req.iSizeXferred; + pWriter->args.pipe_xfer_req.iSizeTotal - + pWriter->args.pipe_xfer_req.iSizeXferred; BuffGetFreeSpaceTotal(&pPipe->desc, &iFreeBufferSpace); if (iFreeBufferSpace >= iSizeDataInWriter) { return 0; @@ -438,7 +438,7 @@ static void pipe_read(struct _k_pipe_struct *pPipe, struct k_args *pNewReader) __ASSERT_NO_MSG((pPipe->Readers == pNewReader) || (NULL == pPipe->Readers) || (NULL == pNewReader)); - pipe_read_req = &pReader->Args.pipe_xfer_req; + pipe_read_req = &pReader->args.pipe_xfer_req; do { iSize = min(pPipe->desc.iAvailDataCont, @@ -507,7 +507,7 @@ static void pipe_write(struct _k_pipe_struct *pPipe, struct k_args *pNewWriter) __ASSERT_NO_MSG(!((pPipe->Writers != pNewWriter) && (NULL != pPipe->Writers) && (NULL != pNewWriter))); - pipe_write_req = &pWriter->Args.pipe_xfer_req; + pipe_write_req = &pWriter->args.pipe_xfer_req; do { iSize = min((numIterations == 2) ? pPipe->desc.iFreeSpaceCont @@ -611,8 +611,8 @@ static void pipe_read_write( (NULL == pPipe->Readers) || (NULL == pNewReader)); /* Preparation */ - pipe_write_req = &pWriter->Args.pipe_xfer_req; - pipe_read_req = &pReader->Args.pipe_xfer_req; + pipe_write_req = &pWriter->args.pipe_xfer_req; + pipe_read_req = &pReader->args.pipe_xfer_req; /* Calculate iT1, iT2 and iT3 */ int iFreeSpaceReader = @@ -660,7 +660,7 @@ static void pipe_read_write( if (iT2 != 0) { struct k_args *Moved_req; - __ASSERT_NO_MSG(TERM_SATISFIED != pReader->Args.pipe_xfer_req.status); + __ASSERT_NO_MSG(TERM_SATISFIED != pReader->args.pipe_xfer_req.status); GETARGS(Moved_req); setup_movedata(Moved_req, pPipe, XFER_W2R, pWriter, pReader, @@ -679,7 +679,7 @@ static void pipe_read_write( /* T3 transfer */ if (iT3 != 0) { - __ASSERT_NO_MSG(TERM_SATISFIED != pWriter->Args.pipe_xfer_req.status); + __ASSERT_NO_MSG(TERM_SATISFIED != pWriter->args.pipe_xfer_req.status); pipe_write(pPipe, pWriter); } } @@ -709,12 +709,12 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, if (pReader != pNLReader) { pNextReader = pPipe->Readers; if (NULL == pNextReader) { - if (!(TERM_XXX & pNLReader->Args.pipe_xfer_req.status)) + if (!(TERM_XXX & pNLReader->args.pipe_xfer_req.status)) pNextReader = pNLReader; } } else { /* we already used the extra non-listed Reader */ - if (TERM_XXX & pReader->Args.pipe_xfer_req.status) { + if (TERM_XXX & pReader->args.pipe_xfer_req.status) { pNextReader = NULL; } else { pNextReader = pReader; /* == pNLReader */ @@ -730,12 +730,12 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, if (pWriter != pNLWriter) { pNextWriter = pPipe->Writers; if (NULL == pNextWriter) { - if (!(TERM_XXX & pNLWriter->Args.pipe_xfer_req.status)) + if (!(TERM_XXX & pNLWriter->args.pipe_xfer_req.status)) pNextWriter = pNLWriter; } } else { /* we already used the extra non-listed Writer */ - if (TERM_XXX & pWriter->Args.pipe_xfer_req.status) { + if (TERM_XXX & pWriter->args.pipe_xfer_req.status) { pNextWriter = NULL; } else { pNextWriter = pWriter; @@ -758,9 +758,9 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, pWriter = pNextWriter; if (pWriter) { - if (_ALL_N == _k_pipe_option_get(&pWriter->Args) && - (pWriter->Args.pipe_xfer_req.iSizeXferred == 0) && - _TIME_B != _k_pipe_time_type_get(&pWriter->Args)) { + if (_ALL_N == _k_pipe_option_get(&pWriter->args) && + (pWriter->args.pipe_xfer_req.iSizeXferred == 0) && + _TIME_B != _k_pipe_time_type_get(&pWriter->args)) { /* investigate if there is a problem for * his request to be satisfied */ @@ -772,14 +772,14 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, iSpace2WriteinReaders = CalcFreeReaderSpace(pPipe->Readers); if (pNLReader) iSpace2WriteinReaders += - (pNLReader->Args.pipe_xfer_req.iSizeTotal - - pNLReader->Args.pipe_xfer_req.iSizeXferred); + (pNLReader->args.pipe_xfer_req.iSizeTotal - + pNLReader->args.pipe_xfer_req.iSizeXferred); BuffGetFreeSpaceTotal(&pPipe->desc, &iFreeBufferSpace); iTotalSpace2Write = iFreeBufferSpace + iSpace2WriteinReaders; iSizeDataInWriter = - pWriter->Args.pipe_xfer_req.iSizeTotal - - pWriter->Args.pipe_xfer_req.iSizeXferred; + pWriter->args.pipe_xfer_req.iSizeTotal - + pWriter->args.pipe_xfer_req.iSizeXferred; if (iSizeDataInWriter > iTotalSpace2Write) { bALLNWriterNoGo = true; @@ -787,9 +787,9 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, } } if (pReader) { - if (_ALL_N == _k_pipe_option_get(&pReader->Args) && - (pReader->Args.pipe_xfer_req.iSizeXferred == 0) && - _TIME_B != _k_pipe_time_type_get(&pReader->Args)) { + if (_ALL_N == _k_pipe_option_get(&pReader->args) && + (pReader->args.pipe_xfer_req.iSizeXferred == 0) && + _TIME_B != _k_pipe_time_type_get(&pReader->args)) { /* investigate if there is a problem for * his request to be satisfied */ @@ -801,13 +801,13 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, iData2ReadFromWriters = CalcAvailWriterData(pPipe->Writers); if (pNLWriter) iData2ReadFromWriters += - (pNLWriter->Args.pipe_xfer_req.iSizeTotal - - pNLWriter->Args.pipe_xfer_req.iSizeXferred); + (pNLWriter->args.pipe_xfer_req.iSizeTotal - + pNLWriter->args.pipe_xfer_req.iSizeXferred); BuffGetAvailDataTotal(&pPipe->desc, &iAvailBufferData); iTotalData2Read = iAvailBufferData + iData2ReadFromWriters; iSizeFreeSpaceInReader = - pReader->Args.pipe_xfer_req.iSizeTotal - - pReader->Args.pipe_xfer_req.iSizeXferred; + pReader->args.pipe_xfer_req.iSizeTotal - + pReader->args.pipe_xfer_req.iSizeXferred; if (iSizeFreeSpaceInReader > iTotalData2Read) { bALLNReaderNoGo = true; @@ -835,7 +835,7 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, } else { #ifdef FORCE_XFER_ON_STALL if (pReader && (_TIME_NB != - _k_pipe_time_type_get(&pWriter->Args))) { + _k_pipe_time_type_get(&pWriter->args))) { /* force transfer (we make exception for non-blocked writer) */ pipe_read_write(pPipe, pWriter, pReader); @@ -858,7 +858,7 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, } else { #ifdef FORCE_XFER_ON_STALL if (pWriter && (_TIME_NB != - _k_pipe_time_type_get(&pReader->Args))) { + _k_pipe_time_type_get(&pReader->args))) { /* force transfer (we make exception for non-blocked reader) */ pipe_read_write(pPipe, pWriter, pReader); @@ -927,8 +927,8 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, processing is really blocked (for some reason) */ if (pReader && pWriter) { - __ASSERT_NO_MSG(!(TERM_XXX & pReader->Args.pipe_xfer_req.status) && - !(TERM_XXX & pWriter->Args.pipe_xfer_req.status)); + __ASSERT_NO_MSG(!(TERM_XXX & pReader->args.pipe_xfer_req.status) && + !(TERM_XXX & pWriter->args.pipe_xfer_req.status)); /* this could be possible when data Xfer operations are jammed (out of data Xfer resources e.g.) */ @@ -949,19 +949,19 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, */ ; } else if (pReader) { - __ASSERT_NO_MSG(!(TERM_XXX & pReader->Args.pipe_xfer_req.status)); + __ASSERT_NO_MSG(!(TERM_XXX & pReader->args.pipe_xfer_req.status)); /* check if this lonely reader is really blocked, then we will delist him (if he was listed uberhaupt) == EMERGENCY BREAK */ if (ReaderInProgressIsBlocked(pPipe, pReader)) { - if (_X_TO_N & _k_pipe_option_get(&pReader->Args) && - (pReader->Args.pipe_xfer_req.iSizeXferred != 0)) { - _k_pipe_request_status_set(&pReader->Args.pipe_xfer_req, + if (_X_TO_N & _k_pipe_option_get(&pReader->args) && + (pReader->args.pipe_xfer_req.iSizeXferred != 0)) { + _k_pipe_request_status_set(&pReader->args.pipe_xfer_req, TERM_SATISFIED); } else { /* in all other cases: forced termination */ - _k_pipe_request_status_set(&pReader->Args.pipe_xfer_req, + _k_pipe_request_status_set(&pReader->args.pipe_xfer_req, TERM_FORCED); } @@ -969,7 +969,7 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, DeListWaiter(pReader); myfreetimer(&(pReader->Time.timer)); } - if (0 == pReader->Args.pipe_xfer_req.iNbrPendXfers) { + if (0 == pReader->args.pipe_xfer_req.iNbrPendXfers) { pReader->Comm = _K_SVC_PIPE_GET_REPLY; /* if terminated and no pending Xfers anymore, we have to reply */ @@ -980,18 +980,18 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, * later on) */ } } else if (pWriter) { - __ASSERT_NO_MSG(!(TERM_SATISFIED & pWriter->Args.pipe_xfer_req.status)); + __ASSERT_NO_MSG(!(TERM_SATISFIED & pWriter->args.pipe_xfer_req.status)); /* check if this lonely Writer is really blocked, then we will delist him (if he was listed uberhaupt) == EMERGENCY BREAK */ if (WriterInProgressIsBlocked(pPipe, pWriter)) { - if (_X_TO_N & _k_pipe_option_get(&pWriter->Args) && - (pWriter->Args.pipe_xfer_req.iSizeXferred != 0)) { - _k_pipe_request_status_set(&pWriter->Args.pipe_xfer_req, + if (_X_TO_N & _k_pipe_option_get(&pWriter->args) && + (pWriter->args.pipe_xfer_req.iSizeXferred != 0)) { + _k_pipe_request_status_set(&pWriter->args.pipe_xfer_req, TERM_SATISFIED); } else { /* in all other cases: forced termination */ - _k_pipe_request_status_set(&pWriter->Args.pipe_xfer_req, + _k_pipe_request_status_set(&pWriter->args.pipe_xfer_req, TERM_FORCED); } @@ -999,7 +999,7 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, DeListWaiter(pWriter); myfreetimer(&(pWriter->Time.timer)); } - if (0 == pWriter->Args.pipe_xfer_req.iNbrPendXfers) { + if (0 == pWriter->args.pipe_xfer_req.iNbrPendXfers) { pWriter->Comm = _K_SVC_PIPE_PUT_REPLY; /* if terminated and no pending Xfers anymore, we have to reply */ @@ -1019,7 +1019,7 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, #ifdef CANCEL_TIMERS if (pReader) { - if (pReader->Args.pipe_xfer_req.iSizeXferred != 0) { + if (pReader->args.pipe_xfer_req.iSizeXferred != 0) { if (pReader->Head) { myfreetimer(&(pReader->Time.timer)); /* do not delist however */ @@ -1027,7 +1027,7 @@ void _k_pipe_process(struct _k_pipe_struct *pPipe, struct k_args *pNLWriter, } } if (pWriter) { - if (pWriter->Args.pipe_xfer_req.iSizeXferred != 0) { + if (pWriter->args.pipe_xfer_req.iSizeXferred != 0) { if (pWriter->Head) { myfreetimer(&(pWriter->Time.timer)); /* do not delist however */ diff --git a/kernel/microkernel/k_semaphore.c b/kernel/microkernel/k_semaphore.c index cf240131c14..5f74ca8523f 100644 --- a/kernel/microkernel/k_semaphore.c +++ b/kernel/microkernel/k_semaphore.c @@ -103,14 +103,14 @@ void _k_sem_group_wait(struct k_args *R) struct k_args *A = R->Ctxt.args; FREEARGS(R); - if (--(A->Args.s1.nsem) == 0) { + if (--(A->args.s1.nsem) == 0) { _k_state_bit_reset(A->Ctxt.task, TF_LIST); } } void _k_sem_group_wait_cancel(struct k_args *A) { - struct _k_sem_struct *S = (struct _k_sem_struct *)A->Args.s1.sema; + struct _k_sem_struct *S = (struct _k_sem_struct *)A->args.s1.sema; struct k_args *X = S->Waiters; struct k_args *Y = NULL; @@ -137,9 +137,9 @@ void _k_sem_group_wait_cancel(struct k_args *A) * timer expiry occurs between the update of the packet state * and the processing of the WAITMRDY packet. */ - if (unlikely(waitTaskArgs->Args.s1.sema == + if (unlikely(waitTaskArgs->args.s1.sema == ENDLIST)) { - waitTaskArgs->Args.s1.sema = A->Args.s1.sema; + waitTaskArgs->args.s1.sema = A->args.s1.sema; } else { signal_semaphore(1, S); } @@ -166,7 +166,7 @@ void _k_sem_group_wait_cancel(struct k_args *A) void _k_sem_group_wait_accept(struct k_args *A) { - struct _k_sem_struct *S = (struct _k_sem_struct *)A->Args.s1.sema; + struct _k_sem_struct *S = (struct _k_sem_struct *)A->args.s1.sema; struct k_args *X = S->Waiters; struct k_args *Y = NULL; @@ -202,17 +202,17 @@ void _k_sem_group_wait_timeout(struct k_args *A) } #endif - L = A->Args.s1.list; + L = A->args.s1.list; while (*L != ENDLIST) { struct k_args *R; GETARGS(R); R->priority = A->priority; R->Comm = - ((*L == A->Args.s1.sema) ? + ((*L == A->args.s1.sema) ? _K_SVC_SEM_GROUP_WAIT_ACCEPT : _K_SVC_SEM_GROUP_WAIT_CANCEL); R->Ctxt.args = A; - R->Args.s1.sema = *L++; + R->args.s1.sema = *L++; SENDARGS(R); } } @@ -221,8 +221,8 @@ void _k_sem_group_ready(struct k_args *R) { struct k_args *A = R->Ctxt.args; - if (A->Args.s1.sema == ENDLIST) { - A->Args.s1.sema = R->Args.s1.sema; + if (A->args.s1.sema == ENDLIST) { + A->args.s1.sema = R->args.s1.sema; A->Comm = _K_SVC_SEM_GROUP_WAIT_TIMEOUT; #ifdef CONFIG_SYS_CLOCK_EXISTS if (A->Time.timer) { @@ -256,7 +256,7 @@ void _k_sem_wait_reply_timeout(struct k_args *A) void _k_sem_group_wait_request(struct k_args *A) { - struct _k_sem_struct *S = (struct _k_sem_struct *)A->Args.s1.sema; + struct _k_sem_struct *S = (struct _k_sem_struct *)A->args.s1.sema; struct k_args *X = S->Waiters; struct k_args *Y = NULL; @@ -292,9 +292,9 @@ void _k_sem_group_wait_any(struct k_args *A) { ksem_t *L; - L = A->Args.s1.list; - A->Args.s1.sema = ENDLIST; - A->Args.s1.nsem = 0; + L = A->args.s1.list; + A->args.s1.sema = ENDLIST; + A->args.s1.nsem = 0; if (*L == ENDLIST) { return; @@ -307,9 +307,9 @@ void _k_sem_group_wait_any(struct k_args *A) R->priority = _k_current_task->priority; R->Comm = _K_SVC_SEM_GROUP_WAIT_REQUEST; R->Ctxt.args = A; - R->Args.s1.sema = *L++; + R->args.s1.sema = *L++; SENDARGS(R); - (A->Args.s1.nsem)++; + (A->args.s1.nsem)++; } A->Ctxt.task = _k_current_task; @@ -332,7 +332,7 @@ void _k_sem_wait_request(struct k_args *A) struct _k_sem_struct *S; uint32_t Sid; - Sid = A->Args.s1.sema; + Sid = A->args.s1.sema; S = (struct _k_sem_struct *)Sid; if (S->Level) { @@ -363,7 +363,7 @@ int _task_sem_take(ksem_t sema, int32_t time) A.Comm = _K_SVC_SEM_WAIT_REQUEST; A.Time.ticks = time; - A.Args.s1.sema = sema; + A.args.s1.sema = sema; KERNEL_ENTRY(&A); return A.Time.rcode; } @@ -375,14 +375,14 @@ ksem_t _task_sem_group_take(ksemg_t group, int32_t time) A.Comm = _K_SVC_SEM_GROUP_WAIT_ANY; A.priority = _k_current_task->priority; A.Time.ticks = time; - A.Args.s1.list = group; + A.args.s1.list = group; KERNEL_ENTRY(&A); - return A.Args.s1.sema; + return A.args.s1.sema; } void _k_sem_signal(struct k_args *A) { - uint32_t Sid = A->Args.s1.sema; + uint32_t Sid = A->args.s1.sema; struct _k_sem_struct *S = (struct _k_sem_struct *)Sid; signal_semaphore(1, S); @@ -390,9 +390,9 @@ void _k_sem_signal(struct k_args *A) void _k_sem_group_signal(struct k_args *A) { - ksem_t *L = A->Args.s1.list; + ksem_t *L = A->args.s1.list; - while ((A->Args.s1.sema = *L++) != ENDLIST) { + while ((A->args.s1.sema = *L++) != ENDLIST) { _k_sem_signal(A); } } @@ -402,7 +402,7 @@ void task_sem_give(ksem_t sema) struct k_args A; A.Comm = _K_SVC_SEM_SIGNAL; - A.Args.s1.sema = sema; + A.args.s1.sema = sema; KERNEL_ENTRY(&A); } @@ -411,7 +411,7 @@ void task_sem_group_give(ksemg_t group) struct k_args A; A.Comm = _K_SVC_SEM_GROUP_SIGNAL; - A.Args.s1.list = group; + A.args.s1.list = group; KERNEL_ENTRY(&A); } @@ -429,14 +429,14 @@ void isr_sem_give(ksem_t sema, struct cmd_pkt_set *pSet) pCommand = (struct k_args *)_cmd_pkt_get(pSet); pCommand->Comm = _K_SVC_SEM_SIGNAL; - pCommand->Args.s1.sema = sema; + pCommand->args.s1.sema = sema; nano_isr_stack_push(&_k_command_stack, (uint32_t)pCommand); } void _k_sem_reset(struct k_args *A) { - uint32_t Sid = A->Args.s1.sema; + uint32_t Sid = A->args.s1.sema; struct _k_sem_struct *S = (struct _k_sem_struct *)Sid; S->Level = 0; @@ -444,9 +444,9 @@ void _k_sem_reset(struct k_args *A) void _k_sem_group_reset(struct k_args *A) { - ksem_t *L = A->Args.s1.list; + ksem_t *L = A->args.s1.list; - while ((A->Args.s1.sema = *L++) != ENDLIST) { + while ((A->args.s1.sema = *L++) != ENDLIST) { _k_sem_reset(A); } } @@ -456,7 +456,7 @@ void task_sem_reset(ksem_t sema) struct k_args A; A.Comm = _K_SVC_SEM_RESET; - A.Args.s1.sema = sema; + A.args.s1.sema = sema; KERNEL_ENTRY(&A); } @@ -465,7 +465,7 @@ void task_sem_group_reset(ksemg_t group) struct k_args A; A.Comm = _K_SVC_SEM_GROUP_RESET; - A.Args.s1.list = group; + A.args.s1.list = group; KERNEL_ENTRY(&A); } @@ -474,7 +474,7 @@ void _k_sem_inquiry(struct k_args *A) struct _k_sem_struct *S; uint32_t Sid; - Sid = A->Args.s1.sema; + Sid = A->args.s1.sema; S = (struct _k_sem_struct *)Sid; A->Time.rcode = S->Level; } @@ -484,7 +484,7 @@ int task_sem_count_get(ksem_t sema) struct k_args A; A.Comm = _K_SVC_SEM_INQUIRY; - A.Args.s1.sema = sema; + A.args.s1.sema = sema; KERNEL_ENTRY(&A); return A.Time.rcode; } diff --git a/kernel/microkernel/k_task.c b/kernel/microkernel/k_task.c index aed50e4e76a..f0ee7d76275 100644 --- a/kernel/microkernel/k_task.c +++ b/kernel/microkernel/k_task.c @@ -328,10 +328,10 @@ void task_abort_handler_set(void (*func)(void) /* abort handler */ void _k_task_op(struct k_args *A) { - ktask_t Tid = A->Args.g1.task; + ktask_t Tid = A->args.g1.task; struct k_task *X = (struct k_task *)Tid; - switch (A->Args.g1.opt) { + switch (A->args.g1.opt) { case TASK_START: start_task(X, X->fn_start); break; @@ -367,8 +367,8 @@ void _task_ioctl(ktask_t task, /* task on which to operate */ struct k_args A; A.Comm = _K_SVC_TASK_OP; - A.Args.g1.task = task; - A.Args.g1.opt = opt; + A.args.g1.task = task; + A.args.g1.opt = opt; KERNEL_ENTRY(&A); } @@ -385,8 +385,8 @@ void _task_ioctl(ktask_t task, /* task on which to operate */ void _k_task_group_op(struct k_args *A) { - ktask_group_t grp = A->Args.g1.group; - int opt = A->Args.g1.opt; + ktask_group_t grp = A->args.g1.group; + int opt = A->args.g1.opt; struct k_task *X; #ifdef CONFIG_TASK_DEBUG @@ -437,8 +437,8 @@ void _task_group_ioctl(ktask_group_t group, /* task group */ struct k_args A; A.Comm = _K_SVC_TASK_GROUP_OP; - A.Args.g1.group = group; - A.Args.g1.opt = opt; + A.args.g1.group = group; + A.args.g1.opt = opt; KERNEL_ENTRY(&A); } @@ -499,11 +499,11 @@ kpriority_t task_priority_get(void) void _k_task_priority_set(struct k_args *A) { - ktask_t Tid = A->Args.g1.task; + ktask_t Tid = A->args.g1.task; struct k_task *X = (struct k_task *)Tid; _k_state_bit_set(X, TF_PRIO); - X->priority = A->Args.g1.prio; + X->priority = A->args.g1.prio; _k_state_bit_reset(X, TF_PRIO); if (A->alloc) @@ -532,8 +532,8 @@ void task_priority_set(ktask_t task, /* task whose priority is to be set */ struct k_args A; A.Comm = _K_SVC_TASK_PRIORITY_SET; - A.Args.g1.task = task; - A.Args.g1.prio = prio; + A.args.g1.task = task; + A.args.g1.prio = prio; KERNEL_ENTRY(&A); } diff --git a/kernel/microkernel/k_task_monitor.c b/kernel/microkernel/k_task_monitor.c index 959a37c1a5a..d26b8254b5e 100644 --- a/kernel/microkernel/k_task_monitor.c +++ b/kernel/microkernel/k_task_monitor.c @@ -110,13 +110,13 @@ void _k_task_monitor_args(struct k_args *A) void _k_task_monitor_read(struct k_args *A) { - A->Args.z4.nrec = k_monitor_nrec; - if (A->Args.z4.rind < k_monitor_nrec) { - int i = K_monitor_wind - k_monitor_nrec + A->Args.z4.rind; + A->args.z4.nrec = k_monitor_nrec; + if (A->args.z4.rind < k_monitor_nrec) { + int i = K_monitor_wind - k_monitor_nrec + A->args.z4.rind; if (i < 0) { i += k_monitor_capacity; } - A->Args.z4.mrec = k_monitor_buff[i]; + A->args.z4.mrec = k_monitor_buff[i]; } } diff --git a/kernel/microkernel/k_ticker.c b/kernel/microkernel/k_ticker.c index 277f09f3451..b2ec58556d1 100644 --- a/kernel/microkernel/k_ticker.c +++ b/kernel/microkernel/k_ticker.c @@ -238,8 +238,8 @@ void _k_time_elapse(struct k_args *P) { int64_t now = task_tick_get(); - P->Args.c1.time2 = now - P->Args.c1.time1; - P->Args.c1.time1 = now; + P->args.c1.time2 = now - P->args.c1.time1; + P->args.c1.time1 = now; } int64_t task_tick_delta(int64_t *reftime /* pointer to reference time */ @@ -248,8 +248,8 @@ int64_t task_tick_delta(int64_t *reftime /* pointer to reference time */ struct k_args A; A.Comm = _K_SVC_TIME_ELAPSE; - A.Args.c1.time1 = *reftime; + A.args.c1.time1 = *reftime; KERNEL_ENTRY(&A); - *reftime = A.Args.c1.time1; - return A.Args.c1.time2; + *reftime = A.args.c1.time1; + return A.args.c1.time2; } diff --git a/kernel/microkernel/k_timer.c b/kernel/microkernel/k_timer.c index b7a17129ad7..6a13ccef07b 100644 --- a/kernel/microkernel/k_timer.c +++ b/kernel/microkernel/k_timer.c @@ -115,7 +115,7 @@ void _k_timeout_alloc(struct k_args *P) GETTIMER(T); T->duration = P->Time.ticks; T->period = 0; - T->Args = P; + T->args = P; _k_timer_enlist(T); P->Time.timer = T; } @@ -203,7 +203,7 @@ void _k_timer_list_update(int ticks) } else { T->duration = -1; } - TO_ALIST(&_k_command_stack, T->Args); + TO_ALIST(&_k_command_stack, T->args); ticks = 0; /* don't decrement duration for subsequent timer(s) */ } @@ -227,10 +227,10 @@ void _k_timer_alloc(struct k_args *P) struct k_args *A; GETTIMER(T); - P->Args.c1.timer = T; + P->args.c1.timer = T; GETARGS(A); - T->Args = A; + T->args = A; T->duration = -1; /* -1 indicates that timer is disabled */ } @@ -248,7 +248,7 @@ ktimer_t task_timer_alloc(void) A.Comm = _K_SVC_TIMER_ALLOC; KERNEL_ENTRY(&A); - return (ktimer_t)A.Args.c1.timer; + return (ktimer_t)A.args.c1.timer; } /** @@ -263,8 +263,8 @@ ktimer_t task_timer_alloc(void) void _k_timer_dealloc(struct k_args *P) { - struct k_timer *T = P->Args.c1.timer; - struct k_args *A = T->Args; + struct k_timer *T = P->args.c1.timer; + struct k_args *A = T->args; if (T->duration != -1) _k_timer_delist(T); @@ -290,7 +290,7 @@ void task_timer_free(ktimer_t timer) struct k_args A; A.Comm = _K_SVC_TIMER_DEALLOC; - A.Args.c1.timer = (struct k_timer *)timer; + A.args.c1.timer = (struct k_timer *)timer; KERNEL_ENTRY(&A); } @@ -308,14 +308,14 @@ void task_timer_free(ktimer_t timer) void _k_timer_start(struct k_args *P) { - struct k_timer *T = P->Args.c1.timer; /* ptr to the timer to start */ + struct k_timer *T = P->args.c1.timer; /* ptr to the timer to start */ if (T->duration != -1) { /* Stop the timer if it is active */ _k_timer_delist(T); } - T->duration = (int32_t)P->Args.c1.time1; /* Set the initial delay */ - T->period = P->Args.c1.time2; /* Set the period */ + T->duration = (int32_t)P->args.c1.time1; /* Set the initial delay */ + T->period = P->args.c1.time2; /* Set the period */ /* * Either the initial delay and/or the period is invalid. Mark @@ -327,9 +327,9 @@ void _k_timer_start(struct k_args *P) } /* Track the semaphore to signal for when the timer expires. */ - if (P->Args.c1.sema != _USE_CURRENT_SEM) { - T->Args->Comm = _K_SVC_SEM_SIGNAL; - T->Args->Args.s1.sema = P->Args.c1.sema; + if (P->args.c1.sema != _USE_CURRENT_SEM) { + T->args->Comm = _K_SVC_SEM_SIGNAL; + T->args->args.s1.sema = P->args.c1.sema; } _k_timer_enlist(T); } @@ -365,10 +365,10 @@ void task_timer_start(ktimer_t timer, int32_t duration, int32_t period, struct k_args A; A.Comm = _K_SVC_TIMER_START; - A.Args.c1.timer = (struct k_timer *)timer; - A.Args.c1.time1 = (int64_t)duration; - A.Args.c1.time2 = period; - A.Args.c1.sema = sema; + A.args.c1.timer = (struct k_timer *)timer; + A.args.c1.time1 = (int64_t)duration; + A.args.c1.time2 = period; + A.args.c1.sema = sema; KERNEL_ENTRY(&A); } @@ -384,7 +384,7 @@ void task_timer_start(ktimer_t timer, int32_t duration, int32_t period, void _k_timer_stop(struct k_args *P) { - struct k_timer *T = P->Args.c1.timer; + struct k_timer *T = P->args.c1.timer; if (T->duration != -1) _k_timer_delist(T); @@ -407,7 +407,7 @@ void task_timer_stop(ktimer_t timer) struct k_args A; A.Comm = _K_SVC_TIMER_STOP; - A.Args.c1.timer = (struct k_timer *)timer; + A.args.c1.timer = (struct k_timer *)timer; KERNEL_ENTRY(&A); } @@ -454,7 +454,7 @@ void _k_task_sleep(struct k_args *P) GETTIMER(T); T->duration = P->Time.ticks; T->period = 0; - T->Args = P; + T->args = P; P->Comm = _K_SVC_TASK_WAKEUP; P->Ctxt.task = _k_current_task;