checkpatch: warning - long_line

Change-Id: I7dd5645db1de00ab4bf2ca3c7a8bae906e8d9e54
Signed-off-by: Dan Kalowsky <daniel.kalowsky@intel.com>
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Dan Kalowsky 2015-10-20 09:42:34 -07:00 committed by Anas Nashif
commit 827f6ba7dc
12 changed files with 137 additions and 83 deletions

View file

@ -122,7 +122,7 @@ static inline sys_dnode_t *sys_dlist_peek_head(sys_dlist_t *list)
*/
static inline sys_dnode_t *sys_dlist_peek_next(sys_dlist_t *list,
sys_dnode_t *node)
sys_dnode_t *node)
{
return node == list->tail ? NULL : node->next;
}

View file

@ -25,15 +25,21 @@ extern "C" {
#include <micro_private_types.h>
void BuffInit(unsigned char *pBuffer, int *piBuffSize, struct _k_pipe_desc *desc);
void BuffInit(unsigned char *pBuffer,
int *piBuffSize,
struct _k_pipe_desc *desc);
void BuffGetFreeSpaceTotal(struct _k_pipe_desc *desc, int *piTotalFreeSpace);
void BuffGetFreeSpace(struct _k_pipe_desc *desc, int *piTotalFreeSpace,
int *free_space_count_ptr, int *free_space_post_wrap_around_ptr);
void BuffGetFreeSpace(struct _k_pipe_desc *desc,
int *piTotalFreeSpace,
int *free_space_count_ptr,
int *free_space_post_wrap_around_ptr);
void BuffGetAvailDataTotal(struct _k_pipe_desc *desc, int *piAvailDataTotal);
void BuffGetAvailData(struct _k_pipe_desc *desc, int *piAvailDataTotal,
int *available_data_count_ptr, int *available_data_post_wrap_around_ptr);
void BuffGetAvailData(struct _k_pipe_desc *desc,
int *piAvailDataTotal,
int *available_data_count_ptr,
int *available_data_post_wrap_around_ptr);
int BuffEmpty(struct _k_pipe_desc *desc);
int BuffFull(struct _k_pipe_desc *desc);

View file

@ -47,8 +47,8 @@ static uint32_t __noinit _k_server_command_stack_storage
#endif
struct nano_stack _k_command_stack = {NULL,
_k_server_command_stack_storage,
_k_server_command_stack_storage};
_k_server_command_stack_storage,
_k_server_command_stack_storage};
extern void _k_server(int unused1, int unused2);

View file

@ -612,13 +612,15 @@ void _k_mem_pool_block_release(struct k_args *A)
if (P->waiters != NULL) {
struct k_args *NewGet;
/*
* get new command packet that calls the function
* that reallocate blocks for the waiting tasks
* get new command packet that calls
* the function that reallocate blocks
* for the waiting tasks
*/
GETARGS(NewGet);
*NewGet = *A;
NewGet->Comm = _K_SVC_BLOCK_WAITERS_GET;
TO_ALIST(&_k_command_stack, NewGet); /* push on command stack */
/* push on command stack */
TO_ALIST(&_k_command_stack, NewGet);
}
if (A->alloc) {
FREEARGS(A);

View file

@ -47,7 +47,9 @@
#define CHECK_BUFFER_POINTER(data_ptr) \
__ASSERT_NO_MSG(desc->begin_ptr <= data_ptr && data_ptr < desc->end_ptr)
static void pipe_intrusion_check(struct _k_pipe_desc *desc, unsigned char *begin_ptr, int size);
static void pipe_intrusion_check(struct _k_pipe_desc *desc,
unsigned char *begin_ptr,
int size);
/**
* Markers
@ -71,7 +73,8 @@ static int MarkerFindFree(struct _k_pipe_marker markers[])
}
static void MarkerLinkToListAfter(struct _k_pipe_marker markers[],
int iMarker, int iNewMarker)
int iMarker,
int iNewMarker)
{
int iNextMarker; /* index of next marker in original list */
@ -94,7 +97,9 @@ static void MarkerLinkToListAfter(struct _k_pipe_marker markers[],
}
static int MarkerAddLast(struct _k_pipe_marker_list *pMarkerList,
unsigned char *pointer, int size, bool buffer_xfer_busy)
unsigned char *pointer,
int size,
bool buffer_xfer_busy)
{
int i = MarkerFindFree(pMarkerList->markers);
@ -128,8 +133,10 @@ static int MarkerAddLast(struct _k_pipe_marker_list *pMarkerList,
return i;
}
static void MarkerUnlinkFromList(struct _k_pipe_marker markers[], int iMarker,
int *piPredecessor, int *piSuccessor)
static void MarkerUnlinkFromList(struct _k_pipe_marker markers[],
int iMarker,
int *piPredecessor,
int *piSuccessor)
{
int iNextMarker = markers[iMarker].next;
int iPrevMarker = markers[iMarker].prev;
@ -160,7 +167,8 @@ static void MarkerDelete(struct _k_pipe_marker_list *pMarkerList, int index)
__ASSERT_NO_MSG(-1 != i);
pMarkerList->markers[i].pointer = NULL;
MarkerUnlinkFromList(pMarkerList->markers, i, &iPredecessor, &iSuccessor);
MarkerUnlinkFromList(pMarkerList->markers, i,
&iPredecessor, &iSuccessor);
/* update first/last info */
if (i == pMarkerList->last_marker) {
@ -216,15 +224,13 @@ static void MarkersClear(struct _k_pipe_marker_list *pMarkerList)
* (**) for this, the complete markers table needs to be investigated
*/
/**/
/*
* This function will see if one or more 'areas' in the buffer can be made
* available (either for writing xor reading).
* Note: such a series of areas starts from the beginning.
*/
static int ScanMarkers(struct _k_pipe_marker_list *pMarkerList,
int *piSizeBWA, int *piSizeAWA, int *piNbrPendingXfers)
int *piSizeBWA, int *piSizeAWA, int *piNbrPendingXfers)
{
struct _k_pipe_marker *pM;
bool bMarkersAreNowAWA;
@ -271,7 +277,8 @@ static int ScanMarkers(struct _k_pipe_marker_list *pMarkerList,
__ASSERT_NO_MSG(index == pMarkerList->first_marker);
if (bMarkersAreNowAWA) {
pMarkerList->post_wrap_around_marker = pMarkerList->first_marker;
pMarkerList->post_wrap_around_marker =
pMarkerList->first_marker;
}
#ifdef STORE_NBR_MARKERS
@ -289,7 +296,9 @@ static int ScanMarkers(struct _k_pipe_marker_list *pMarkerList,
* General
*/
void BuffInit(unsigned char *pBuffer, int *piBuffSize, struct _k_pipe_desc *desc)
void BuffInit(unsigned char *pBuffer,
int *piBuffSize,
struct _k_pipe_desc *desc)
{
desc->begin_ptr = pBuffer;
@ -297,7 +306,8 @@ void BuffInit(unsigned char *pBuffer, int *piBuffSize, struct _k_pipe_desc *desc
/* reset all pointers */
desc->end_ptr = desc->begin_ptr + OCTET_TO_SIZEOFUNIT(desc->buffer_size);
desc->end_ptr = desc->begin_ptr +
OCTET_TO_SIZEOFUNIT(desc->buffer_size);
desc->original_end_ptr = desc->end_ptr;
/* assumed it is allowed */
@ -336,8 +346,10 @@ int CalcFreeSpace(struct _k_pipe_desc *desc, int *free_space_count_ptr,
*/
if (BUFF_EMPTY == desc->buffer_state) {
*free_space_count_ptr = SIZEOFUNIT_TO_OCTET(desc->end_ptr - pStart);
*free_space_post_wrap_around_ptr = SIZEOFUNIT_TO_OCTET(pStop - desc->begin_ptr);
*free_space_count_ptr =
SIZEOFUNIT_TO_OCTET(desc->end_ptr - pStart);
*free_space_post_wrap_around_ptr =
SIZEOFUNIT_TO_OCTET(pStop - desc->begin_ptr);
return (*free_space_count_ptr + *free_space_post_wrap_around_ptr);
/* this sum equals end_ptr-begin_ptr */
}
@ -352,21 +364,26 @@ int CalcFreeSpace(struct _k_pipe_desc *desc, int *free_space_count_ptr,
*free_space_count_ptr = SIZEOFUNIT_TO_OCTET(pStop - pStart);
*free_space_post_wrap_around_ptr = 0;
} else {
*free_space_count_ptr = SIZEOFUNIT_TO_OCTET(desc->end_ptr - pStart);
*free_space_post_wrap_around_ptr = SIZEOFUNIT_TO_OCTET(pStop - desc->begin_ptr);
*free_space_count_ptr =
SIZEOFUNIT_TO_OCTET(desc->end_ptr - pStart);
*free_space_post_wrap_around_ptr =
SIZEOFUNIT_TO_OCTET(pStop - desc->begin_ptr);
}
return (*free_space_count_ptr + *free_space_post_wrap_around_ptr);
}
void BuffGetFreeSpace(struct _k_pipe_desc *desc, int *piFreeSpaceTotal,
int *free_space_count_ptr, int *free_space_post_wrap_around_ptr)
void BuffGetFreeSpace(struct _k_pipe_desc *desc,
int *piFreeSpaceTotal,
int *free_space_count_ptr,
int *free_space_post_wrap_around_ptr)
{
int free_space_count;
int free_space_post_wrap_around;
int iFreeSpaceTotal;
iFreeSpaceTotal =
CalcFreeSpace(desc, &free_space_count, &free_space_post_wrap_around);
CalcFreeSpace(desc, &free_space_count,
&free_space_post_wrap_around);
__ASSERT_NO_MSG(free_space_count == desc->free_space_count);
__ASSERT_NO_MSG(free_space_post_wrap_around == desc->free_space_post_wrap_around);
*piFreeSpaceTotal = iFreeSpaceTotal;
@ -407,8 +424,10 @@ int CalcAvailData(struct _k_pipe_desc *desc, int *available_data_count_ptr,
*/
if (BUFF_FULL == desc->buffer_state) {
*available_data_count_ptr = SIZEOFUNIT_TO_OCTET(desc->end_ptr - pStart);
*available_data_post_wrap_around_ptr = SIZEOFUNIT_TO_OCTET(pStop - desc->begin_ptr);
*available_data_count_ptr =
SIZEOFUNIT_TO_OCTET(desc->end_ptr - pStart);
*available_data_post_wrap_around_ptr =
SIZEOFUNIT_TO_OCTET(pStop - desc->begin_ptr);
return (*available_data_count_ptr + *available_data_post_wrap_around_ptr);
/* this sum equals end_ptr-begin_ptr */
}
@ -423,26 +442,31 @@ int CalcAvailData(struct _k_pipe_desc *desc, int *available_data_count_ptr,
*available_data_count_ptr = SIZEOFUNIT_TO_OCTET(pStop - pStart);
*available_data_post_wrap_around_ptr = 0;
} else {
*available_data_count_ptr = SIZEOFUNIT_TO_OCTET(desc->end_ptr - pStart);
*available_data_post_wrap_around_ptr = SIZEOFUNIT_TO_OCTET(pStop - desc->begin_ptr);
*available_data_count_ptr =
SIZEOFUNIT_TO_OCTET(desc->end_ptr - pStart);
*available_data_post_wrap_around_ptr =
SIZEOFUNIT_TO_OCTET(pStop - desc->begin_ptr);
}
return (*available_data_count_ptr + *available_data_post_wrap_around_ptr);
}
void BuffGetAvailData(struct _k_pipe_desc *desc, int *piAvailDataTotal,
int *available_data_count_ptr, int *available_data_post_wrap_around_ptr)
void BuffGetAvailData(struct _k_pipe_desc *desc,
int *piAvailDataTotal,
int *available_data_count_ptr,
int *available_data_post_wrap_around_ptr)
{
int available_data_count;
int available_data_post_wrap_around;
int iAvailDataTotal;
iAvailDataTotal =
CalcAvailData(desc, &available_data_count, &available_data_post_wrap_around);
iAvailDataTotal = CalcAvailData(desc, &available_data_count,
&available_data_post_wrap_around);
__ASSERT_NO_MSG(available_data_count == desc->available_data_count);
__ASSERT_NO_MSG(available_data_post_wrap_around == desc->available_data_post_wrap_around);
*piAvailDataTotal = iAvailDataTotal;
*available_data_count_ptr = desc->available_data_count;
*available_data_post_wrap_around_ptr = desc->available_data_post_wrap_around;
*available_data_post_wrap_around_ptr =
desc->available_data_post_wrap_around;
}
void BuffGetAvailDataTotal(struct _k_pipe_desc *desc, int *piAvailDataTotal)
@ -484,10 +508,11 @@ static int AsyncEnQRegstr(struct _k_pipe_desc *desc, int size)
desc->read_guard = desc->write_ptr;
}
__ASSERT_NO_MSG(desc->write_markers.markers
[desc->write_markers.first_marker].pointer ==
[desc->write_markers.first_marker].pointer ==
desc->read_guard);
/* post_wrap_around_marker changes? */
if (-1 == desc->write_markers.post_wrap_around_marker && desc->wrap_around_write) {
if (-1 == desc->write_markers.post_wrap_around_marker &&
desc->wrap_around_write) {
desc->write_markers.post_wrap_around_marker = i;
}
}
@ -500,9 +525,9 @@ static void AsyncEnQFinished(struct _k_pipe_desc *desc, int iTransferID)
if (desc->write_markers.first_marker == iTransferID) {
int iNewFirstMarker = ScanMarkers(&desc->write_markers,
&desc->available_data_count,
&desc->available_data_post_wrap_around,
&desc->num_pending_writes);
&desc->available_data_count,
&desc->available_data_post_wrap_around,
&desc->num_pending_writes);
if (-1 != iNewFirstMarker) {
desc->read_guard =
desc->write_markers.markers[iNewFirstMarker].pointer;
@ -594,10 +619,11 @@ static int AsyncDeQRegstr(struct _k_pipe_desc *desc, int size)
desc->write_guard = desc->read_ptr;
}
__ASSERT_NO_MSG(desc->read_markers.markers
[desc->read_markers.first_marker].pointer ==
[desc->read_markers.first_marker].pointer ==
desc->write_guard);
/* post_wrap_around_marker changes? */
if (-1 == desc->read_markers.post_wrap_around_marker && desc->wrap_around_read) {
if (-1 == desc->read_markers.post_wrap_around_marker &&
desc->wrap_around_read) {
desc->read_markers.post_wrap_around_marker = i;
}
}
@ -610,12 +636,12 @@ static void AsyncDeQFinished(struct _k_pipe_desc *desc, int iTransferID)
if (desc->read_markers.first_marker == iTransferID) {
int iNewFirstMarker = ScanMarkers(&desc->read_markers,
&desc->free_space_count,
&desc->free_space_post_wrap_around,
&desc->num_pending_reads);
&desc->free_space_count,
&desc->free_space_post_wrap_around,
&desc->num_pending_reads);
if (-1 != iNewFirstMarker) {
desc->write_guard =
desc->read_markers.markers[iNewFirstMarker].pointer;
desc->read_markers.markers[iNewFirstMarker].pointer;
} else {
desc->write_guard = NULL;
}
@ -654,7 +680,8 @@ int BuffDeQA(struct _k_pipe_desc *desc, int size, unsigned char **ppRead,
desc->read_ptr += OCTET_TO_SIZEOFUNIT(size);
if (desc->end_ptr == desc->read_ptr) {
desc->read_ptr = desc->begin_ptr;
desc->available_data_count = desc->available_data_post_wrap_around;
desc->available_data_count =
desc->available_data_post_wrap_around;
desc->available_data_post_wrap_around = 0;
desc->wrap_around_write = false;
desc->wrap_around_read = true;
@ -729,7 +756,9 @@ static bool AreasCheck4Intrusion(unsigned char *pBegin1, int iSize1,
}
}
static void pipe_intrusion_check(struct _k_pipe_desc *desc, unsigned char *begin_ptr, int size)
static void pipe_intrusion_check(struct _k_pipe_desc *desc,
unsigned char *begin_ptr,
int size)
{
/*
* check possible collision with all existing data areas,
@ -759,7 +788,10 @@ static void pipe_intrusion_check(struct _k_pipe_desc *desc, unsigned char *begin
pM = &(pMarkerList->markers[index]);
if (0 != AreasCheck4Intrusion(begin_ptr, size, pM->pointer, pM->size)) {
if (0 != AreasCheck4Intrusion(begin_ptr,
size,
pM->pointer,
pM->size)) {
__ASSERT_NO_MSG(1 == 0);
}
index = pM->next;
@ -785,7 +817,10 @@ static void pipe_intrusion_check(struct _k_pipe_desc *desc, unsigned char *begin
pM = &(pMarkerList->markers[index]);
if (0 != AreasCheck4Intrusion(begin_ptr, size, pM->pointer, pM->size)) {
if (0 != AreasCheck4Intrusion(begin_ptr,
size,
pM->pointer,
pM->size)) {
__ASSERT_NO_MSG(1 == 0);
}
index = pM->next;

View file

@ -96,7 +96,8 @@ void _k_pipe_get_request(struct k_args *RequestOrig)
iData2ReadFromWriters = CalcAvailWriterData(pipe_ptr->writers);
iAvailBufferData =
pipe_ptr->desc.available_data_count + pipe_ptr->desc.available_data_post_wrap_around;
pipe_ptr->desc.available_data_count +
pipe_ptr->desc.available_data_post_wrap_around;
iTotalData2Read =
iAvailBufferData + iData2ReadFromWriters;

View file

@ -113,7 +113,8 @@ void _k_pipe_put_request(struct k_args *RequestOrig)
iSpace2WriteinReaders = CalcFreeReaderSpace(pipe_ptr->readers);
iFreeBufferSpace =
pipe_ptr->desc.free_space_count + pipe_ptr->desc.free_space_post_wrap_around;
pipe_ptr->desc.free_space_count +
pipe_ptr->desc.free_space_post_wrap_around;
iTotalSpace2Write =
iFreeBufferSpace + iSpace2WriteinReaders;

View file

@ -619,14 +619,14 @@ static void pipe_read_write(struct _k_pipe_struct *pipe_ptr,
pipe_read_req = &reader_ptr->args.pipe_xfer_req;
/* Calculate iT1, iT2 and iT3 */
int iFreeSpaceReader =
(pipe_read_req->total_size - pipe_read_req->xferred_size);
int iAvailDataWriter =
(pipe_write_req->total_size - pipe_write_req->xferred_size);
int iFreeSpaceBuffer =
(pipe_ptr->desc.free_space_count + pipe_ptr->desc.free_space_post_wrap_around);
int iAvailDataBuffer =
(pipe_ptr->desc.available_data_count + pipe_ptr->desc.available_data_post_wrap_around);
int iFreeSpaceReader = (pipe_read_req->total_size -
pipe_read_req->xferred_size);
int iAvailDataWriter = (pipe_write_req->total_size -
pipe_write_req->xferred_size);
int iFreeSpaceBuffer = (pipe_ptr->desc.free_space_count +
pipe_ptr->desc.free_space_post_wrap_around);
int iAvailDataBuffer = (pipe_ptr->desc.available_data_count +
pipe_ptr->desc.available_data_post_wrap_around);
iT1 = min(iFreeSpaceReader, iAvailDataBuffer);
@ -667,7 +667,8 @@ static void pipe_read_write(struct _k_pipe_struct *pipe_ptr,
__ASSERT_NO_MSG(TERM_SATISFIED != reader_ptr->args.pipe_xfer_req.status);
GETARGS(Moved_req);
setup_movedata(Moved_req, pipe_ptr, XFER_W2R, writer_ptr, reader_ptr,
setup_movedata(Moved_req, pipe_ptr, XFER_W2R,
writer_ptr, reader_ptr,
(char *)(pipe_read_req->data_ptr) +
OCTET_TO_SIZEOFUNIT(pipe_read_req->xferred_size),
(char *)(pipe_write_req->data_ptr) +
@ -775,7 +776,8 @@ void _k_pipe_process(struct _k_pipe_struct *pipe_ptr, struct k_args *pNLWriter,
int iFreeBufferSpace;
int iTotalSpace2Write;
iSpace2WriteinReaders = CalcFreeReaderSpace(pipe_ptr->readers);
iSpace2WriteinReaders =
CalcFreeReaderSpace(pipe_ptr->readers);
if (pNLReader)
iSpace2WriteinReaders +=
(pNLReader->args.pipe_xfer_req.total_size -
@ -931,17 +933,17 @@ void _k_pipe_process(struct _k_pipe_struct *pipe_ptr, struct k_args *pNLWriter,
/* in the sequel, we will:
* 1. check the hypothesis that an existing reader_ptr/writer_ptr is
* not completed
* not completed
* 2. check if we can force the termination of a X_TO_N request when
* some data transfer took place
* some data transfer took place
* 3. check if we have to cancel a timer when the (first) data has been
* Xferred
* Xferred
* 4. Check if we have to kick out a queued request because its
* processing is really blocked (for some reason)
* processing is really blocked (for some reason)
*/
if (reader_ptr && writer_ptr) {
__ASSERT_NO_MSG(!(TERM_XXX & reader_ptr->args.pipe_xfer_req.status) &&
!(TERM_XXX & writer_ptr->args.pipe_xfer_req.status));
!(TERM_XXX & writer_ptr->args.pipe_xfer_req.status));
/*
* this could be possible when data Xfer operations are jammed
* (out of data Xfer resources e.g.)

View file

@ -68,7 +68,8 @@ void _k_state_bit_reset(struct k_task *X, uint32_t bits)
X->next = NULL;
H->tail->next = X;
H->tail = X;
_k_task_priority_bitmap[X->priority >> 5] |= (1 << (X->priority & 0x1F));
_k_task_priority_bitmap[X->priority >> 5] |=
(1 << (X->priority & 0x1F));
}
#ifdef CONFIG_TASK_MONITOR
@ -127,7 +128,8 @@ void _k_state_bit_set(struct k_task *task_ptr, uint32_t bits)
volatile
#endif
#endif
struct k_tqhd *task_queue = _k_task_priority_list + task_ptr->priority;
struct k_tqhd *task_queue = _k_task_priority_list +
task_ptr->priority;
struct k_task *cur_task = (struct k_task *)(&task_queue->head);
/*
@ -149,7 +151,8 @@ void _k_state_bit_set(struct k_task *task_ptr, uint32_t bits)
* runnable, then clear that bit in the global priority bit map.
*/
if (task_queue->head == NULL) {
_k_task_priority_bitmap[task_ptr->priority >> 5] &= ~(1 << (task_ptr->priority & 0x1F));
_k_task_priority_bitmap[task_ptr->priority >> 5] &=
~(1 << (task_ptr->priority & 0x1F));
}
}

View file

@ -92,7 +92,8 @@ static inline void _nano_timeout_abort(struct tcs *tcs)
if (!sys_dlist_is_tail(timeout_q, &t->node)) {
struct _nano_timeout *next =
(struct _nano_timeout *)sys_dlist_peek_next(timeout_q, &t->node);
(struct _nano_timeout *)sys_dlist_peek_next(timeout_q,
&t->node);
next->delta_ticks_from_prev += t->delta_ticks_from_prev;
}
sys_dlist_remove(&t->node);
@ -127,8 +128,8 @@ static int _nano_timeout_insert_point_test(sys_dnode_t *test, void *timeout)
/* put a fiber on the timeout queue and record its wait queue */
static inline void _nano_timeout_add(struct tcs *tcs,
struct _nano_queue *wait_q,
int32_t timeout)
struct _nano_queue *wait_q,
int32_t timeout)
{
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
struct _nano_timeout *t = &tcs->nano_timeout;

View file

@ -197,16 +197,16 @@ FUNC_ALIAS(fiber_delayed_start, fiber_fiber_delayed_start, void *);
FUNC_ALIAS(fiber_delayed_start, task_fiber_delayed_start, void *);
void *fiber_delayed_start(char *stack, unsigned int stack_size_in_bytes,
nano_fiber_entry_t entry_point, int param1,
int param2, unsigned int priority,
unsigned int options, int32_t timeout_in_ticks)
nano_fiber_entry_t entry_point, int param1,
int param2, unsigned int priority,
unsigned int options, int32_t timeout_in_ticks)
{
unsigned int key;
struct tcs *tcs;
tcs = (struct tcs *)stack;
_new_thread(stack, stack_size_in_bytes, (_thread_entry_t)entry_point,
(void *)param1, (void *)param2, (void *)0, priority, options);
(void *)param1, (void *)param2, (void *)0, priority, options);
key = irq_lock();

View file

@ -110,7 +110,9 @@ void _sys_profiler_context_switch(void)
* switch during the process.
*/
_sys_event_logger_put_non_preemptible(&sys_profiler_logger,
PROFILER_CONTEXT_SWITCH_EVENT_ID, data, ARRAY_SIZE(data));
PROFILER_CONTEXT_SWITCH_EVENT_ID,
data,
ARRAY_SIZE(data));
}
}
@ -145,7 +147,8 @@ void _sys_profiler_exit_sleep(void)
uint32_t data[3];
data[0] = nano_tick_get_32();
data[1] = (nano_cycle_get_32() - _sys_profiler_sleep_start_time) / sys_clock_hw_cycles_per_tick;
data[1] = (nano_cycle_get_32() - _sys_profiler_sleep_start_time) /
sys_clock_hw_cycles_per_tick;
/* register the cause of exiting sleep mode */
data[2] = _sys_current_irq_key_get();