checkpatch: warning - line_spacing
Change-Id: I2276676142deea21cf8079449ce153f2fb887a8e Signed-off-by: Dan Kalowsky <daniel.kalowsky@intel.com> Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
e45956c9a0
commit
890cc2f1ef
16 changed files with 54 additions and 26 deletions
|
@ -767,9 +767,9 @@ static inline void nanoArchInit(void)
|
||||||
{
|
{
|
||||||
extern void *__isr___SpuriousIntHandler;
|
extern void *__isr___SpuriousIntHandler;
|
||||||
extern void *_dummy_spurious_interrupt;
|
extern void *_dummy_spurious_interrupt;
|
||||||
extern void _ExcEnt(void);
|
|
||||||
extern void *_dummy_exception_vector_stub;
|
extern void *_dummy_exception_vector_stub;
|
||||||
extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE];
|
extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE];
|
||||||
|
extern void _ExcEnt(void);
|
||||||
|
|
||||||
_nanokernel.nested = 0;
|
_nanokernel.nested = 0;
|
||||||
|
|
||||||
|
|
|
@ -290,11 +290,11 @@ static int _i2c_dw_setup(struct device *dev)
|
||||||
{
|
{
|
||||||
struct i2c_dw_dev_config * const dw = dev->driver_data;
|
struct i2c_dw_dev_config * const dw = dev->driver_data;
|
||||||
struct i2c_dw_rom_config const * const rom = dev->config->config_info;
|
struct i2c_dw_rom_config const * const rom = dev->config->config_info;
|
||||||
volatile struct i2c_dw_registers * const regs =
|
|
||||||
(struct i2c_dw_registers *)rom->base_address;
|
|
||||||
uint32_t value = 0;
|
uint32_t value = 0;
|
||||||
union ic_con_register ic_con;
|
union ic_con_register ic_con;
|
||||||
int rc = DEV_OK;
|
int rc = DEV_OK;
|
||||||
|
volatile struct i2c_dw_registers * const regs =
|
||||||
|
(struct i2c_dw_registers *)rom->base_address;
|
||||||
|
|
||||||
ic_con.raw = 0;
|
ic_con.raw = 0;
|
||||||
|
|
||||||
|
|
|
@ -462,6 +462,7 @@ IRQ_CONNECT_STATIC(spi_intel_irq_port_0, CONFIG_SPI_INTEL_PORT_0_IRQ,
|
||||||
void spi_config_0_irq(struct device *dev)
|
void spi_config_0_irq(struct device *dev)
|
||||||
{
|
{
|
||||||
struct spi_intel_config *config = dev->config->config_info;
|
struct spi_intel_config *config = dev->config->config_info;
|
||||||
|
|
||||||
IRQ_CONFIG(spi_intel_irq_port_0, config->irq, 0);
|
IRQ_CONFIG(spi_intel_irq_port_0, config->irq, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -505,6 +506,7 @@ IRQ_CONNECT_STATIC(spi_intel_irq_port_1, CONFIG_SPI_INTEL_PORT_1_IRQ,
|
||||||
void spi_config_1_irq(struct device *dev)
|
void spi_config_1_irq(struct device *dev)
|
||||||
{
|
{
|
||||||
struct spi_intel_config *config = dev->config->config_info;
|
struct spi_intel_config *config = dev->config->config_info;
|
||||||
|
|
||||||
IRQ_CONFIG(spi_intel_irq_port_1, config->irq, 0);
|
IRQ_CONFIG(spi_intel_irq_port_1, config->irq, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -391,6 +391,7 @@ static void sysTickTicklessIdleInit(void)
|
||||||
/* enable counter, disable interrupt and set clock src to system clock
|
/* enable counter, disable interrupt and set clock src to system clock
|
||||||
*/
|
*/
|
||||||
union __stcsr stcsr = {.bit = {1, 0, 1, 0, 0, 0} };
|
union __stcsr stcsr = {.bit = {1, 0, 1, 0, 0, 0} };
|
||||||
|
|
||||||
volatile uint32_t dummy; /* used to help determine the 'skew time' */
|
volatile uint32_t dummy; /* used to help determine the 'skew time' */
|
||||||
|
|
||||||
/* store the default reload value (which has already been set) */
|
/* store the default reload value (which has already been set) */
|
||||||
|
|
|
@ -283,13 +283,11 @@ union pci_dev {
|
||||||
/* offset 04: */
|
/* offset 04: */
|
||||||
|
|
||||||
#ifdef _BIG_ENDIAN
|
#ifdef _BIG_ENDIAN
|
||||||
uint32_t status : 16; /* device status */
|
uint32_t status : 16; /* device status */
|
||||||
uint32_t command
|
uint32_t command : 16; /* device command register */
|
||||||
: 16; /* device command register */
|
|
||||||
#else
|
#else
|
||||||
uint32_t command
|
uint32_t command : 16; /* device command register */
|
||||||
: 16; /* device command register */
|
uint32_t status : 16; /* device status */
|
||||||
uint32_t status : 16; /* device status */
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* offset 08: */
|
/* offset 08: */
|
||||||
|
|
|
@ -103,6 +103,7 @@ static inline int spi_configure(struct device *dev,
|
||||||
struct spi_config *config, void *user_data)
|
struct spi_config *config, void *user_data)
|
||||||
{
|
{
|
||||||
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
|
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
|
||||||
|
|
||||||
return api->configure(dev, config, user_data);
|
return api->configure(dev, config, user_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,6 +142,7 @@ inline int spi_slave_select(struct device *dev, uint32_t slave)
|
||||||
static inline int spi_read(struct device *dev, uint8_t *buf, uint32_t len)
|
static inline int spi_read(struct device *dev, uint8_t *buf, uint32_t len)
|
||||||
{
|
{
|
||||||
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
|
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
|
||||||
|
|
||||||
return api->transceive(dev, NULL, 0, buf, len);
|
return api->transceive(dev, NULL, 0, buf, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -155,6 +157,7 @@ static inline int spi_read(struct device *dev, uint8_t *buf, uint32_t len)
|
||||||
static inline int spi_write(struct device *dev, uint8_t *buf, uint32_t len)
|
static inline int spi_write(struct device *dev, uint8_t *buf, uint32_t len)
|
||||||
{
|
{
|
||||||
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
|
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
|
||||||
|
|
||||||
return api->transceive(dev, buf, len, NULL, 0);
|
return api->transceive(dev, buf, len, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,6 +179,7 @@ static inline int spi_transceive(struct device *dev,
|
||||||
uint8_t *rx_buf, uint32_t rx_buf_len)
|
uint8_t *rx_buf, uint32_t rx_buf_len)
|
||||||
{
|
{
|
||||||
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
|
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
|
||||||
|
|
||||||
return api->transceive(dev, tx_buf, tx_buf_len, rx_buf, rx_buf_len);
|
return api->transceive(dev, tx_buf, tx_buf_len, rx_buf, rx_buf_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,6 +192,7 @@ static inline int spi_transceive(struct device *dev,
|
||||||
static inline int spi_suspend(struct device *dev)
|
static inline int spi_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
|
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
|
||||||
|
|
||||||
return api->suspend(dev);
|
return api->suspend(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -200,6 +205,7 @@ static inline int spi_suspend(struct device *dev)
|
||||||
static inline int spi_resume(struct device *dev)
|
static inline int spi_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
|
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
|
||||||
|
|
||||||
return api->resume(dev);
|
return api->resume(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -185,6 +185,7 @@ void _k_do_event_signal(kevent_t event)
|
||||||
void _k_event_signal(struct k_args *A)
|
void _k_event_signal(struct k_args *A)
|
||||||
{
|
{
|
||||||
kevent_t event = A->args.e1.event;
|
kevent_t event = A->args.e1.event;
|
||||||
|
|
||||||
_k_do_event_signal(event);
|
_k_do_event_signal(event);
|
||||||
A->Time.rcode = RC_OK;
|
A->Time.rcode = RC_OK;
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,6 +72,7 @@ static void workload_loop(void)
|
||||||
|
|
||||||
while (++_k_workload_i != _k_workload_n1) {
|
while (++_k_workload_i != _k_workload_n1) {
|
||||||
unsigned int s_iCountDummyProc = 0;
|
unsigned int s_iCountDummyProc = 0;
|
||||||
|
|
||||||
while (64 != s_iCountDummyProc++) { /* 64 == 2^6 */
|
while (64 != s_iCountDummyProc++) { /* 64 == 2^6 */
|
||||||
x >>= y;
|
x >>= y;
|
||||||
x <<= y;
|
x <<= y;
|
||||||
|
@ -413,6 +414,7 @@ int _k_kernel_idle(void)
|
||||||
/* record timestamp when idling begins */
|
/* record timestamp when idling begins */
|
||||||
|
|
||||||
extern uint64_t __idle_tsc;
|
extern uint64_t __idle_tsc;
|
||||||
|
|
||||||
__idle_tsc = _NanoTscRead();
|
__idle_tsc = _NanoTscRead();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -139,8 +139,8 @@ static bool prepare_transfer(struct k_args *move,
|
||||||
|
|
||||||
if (move) {
|
if (move) {
|
||||||
/* { move != NULL, which means full data exchange } */
|
/* { move != NULL, which means full data exchange } */
|
||||||
|
|
||||||
bool all_data_present = true;
|
bool all_data_present = true;
|
||||||
|
|
||||||
move->Comm = _K_SVC_MOVEDATA_REQ;
|
move->Comm = _K_SVC_MOVEDATA_REQ;
|
||||||
/*
|
/*
|
||||||
* transfer the data with the highest
|
* transfer the data with the highest
|
||||||
|
@ -872,6 +872,7 @@ int _task_mbox_data_block_get(struct k_msg *message,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct k_args A;
|
struct k_args A;
|
||||||
|
|
||||||
A.args.m1.mess = *message;
|
A.args.m1.mess = *message;
|
||||||
A.Comm = _K_SVC_MBOX_RECEIVE_DATA;
|
A.Comm = _K_SVC_MBOX_RECEIVE_DATA;
|
||||||
KERNEL_ENTRY(&A);
|
KERNEL_ENTRY(&A);
|
||||||
|
|
|
@ -205,6 +205,7 @@ void _k_pipe_get_reply(struct k_args *ReqProc)
|
||||||
|
|
||||||
struct k_args *ReqOrig = ReqProc->Ctxt.args;
|
struct k_args *ReqOrig = ReqProc->Ctxt.args;
|
||||||
PIPE_REQUEST_STATUS status;
|
PIPE_REQUEST_STATUS status;
|
||||||
|
|
||||||
ReqOrig->Comm = _K_SVC_PIPE_GET_ACK;
|
ReqOrig->Comm = _K_SVC_PIPE_GET_ACK;
|
||||||
|
|
||||||
/* determine return value */
|
/* determine return value */
|
||||||
|
|
|
@ -56,6 +56,7 @@ int CalcFreeReaderSpace(struct k_args *pReaderList)
|
||||||
|
|
||||||
if (pReaderList) {
|
if (pReaderList) {
|
||||||
struct k_args *reader_ptr = pReaderList;
|
struct k_args *reader_ptr = pReaderList;
|
||||||
|
|
||||||
while (reader_ptr != NULL) {
|
while (reader_ptr != NULL) {
|
||||||
size += (reader_ptr->args.pipe_xfer_req.total_size -
|
size += (reader_ptr->args.pipe_xfer_req.total_size -
|
||||||
reader_ptr->args.pipe_xfer_req.xferred_size);
|
reader_ptr->args.pipe_xfer_req.xferred_size);
|
||||||
|
@ -71,6 +72,7 @@ int CalcAvailWriterData(struct k_args *pWriterList)
|
||||||
|
|
||||||
if (pWriterList) {
|
if (pWriterList) {
|
||||||
struct k_args *writer_ptr = pWriterList;
|
struct k_args *writer_ptr = pWriterList;
|
||||||
|
|
||||||
while (writer_ptr != NULL) {
|
while (writer_ptr != NULL) {
|
||||||
size += (writer_ptr->args.pipe_xfer_req.total_size -
|
size += (writer_ptr->args.pipe_xfer_req.total_size -
|
||||||
writer_ptr->args.pipe_xfer_req.xferred_size);
|
writer_ptr->args.pipe_xfer_req.xferred_size);
|
||||||
|
|
|
@ -99,6 +99,7 @@ void _k_task_monitor_read(struct k_args *A)
|
||||||
A->args.z4.nrec = k_monitor_nrec;
|
A->args.z4.nrec = k_monitor_nrec;
|
||||||
if (A->args.z4.rind < k_monitor_nrec) {
|
if (A->args.z4.rind < k_monitor_nrec) {
|
||||||
int i = K_monitor_wind - k_monitor_nrec + A->args.z4.rind;
|
int i = K_monitor_wind - k_monitor_nrec + A->args.z4.rind;
|
||||||
|
|
||||||
if (i < 0) {
|
if (i < 0) {
|
||||||
i += k_monitor_capacity;
|
i += k_monitor_capacity;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,7 @@ void _sys_device_do_config_level(int level)
|
||||||
|
|
||||||
for (info = config_levels[level]; info < config_levels[level+1]; info++) {
|
for (info = config_levels[level]; info < config_levels[level+1]; info++) {
|
||||||
struct device_config *device = info->config;
|
struct device_config *device = info->config;
|
||||||
|
|
||||||
device->init(info);
|
device->init(info);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,6 +115,7 @@ void _fifo_put_non_preemptible(struct nano_fifo *fifo, void *data)
|
||||||
fifo->stat++;
|
fifo->stat++;
|
||||||
if (fifo->stat <= 0) {
|
if (fifo->stat <= 0) {
|
||||||
struct tcs *tcs = _nano_wait_q_remove_no_check(&fifo->wait_q);
|
struct tcs *tcs = _nano_wait_q_remove_no_check(&fifo->wait_q);
|
||||||
|
|
||||||
_nano_timeout_abort(tcs);
|
_nano_timeout_abort(tcs);
|
||||||
fiberRtnValueSet(tcs, (unsigned int)data);
|
fiberRtnValueSet(tcs, (unsigned int)data);
|
||||||
} else {
|
} else {
|
||||||
|
@ -133,6 +134,7 @@ void nano_task_fifo_put(struct nano_fifo *fifo, void *data)
|
||||||
fifo->stat++;
|
fifo->stat++;
|
||||||
if (fifo->stat <= 0) {
|
if (fifo->stat <= 0) {
|
||||||
struct tcs *tcs = _nano_wait_q_remove_no_check(&fifo->wait_q);
|
struct tcs *tcs = _nano_wait_q_remove_no_check(&fifo->wait_q);
|
||||||
|
|
||||||
_nano_timeout_abort(tcs);
|
_nano_timeout_abort(tcs);
|
||||||
fiberRtnValueSet(tcs, (unsigned int)data);
|
fiberRtnValueSet(tcs, (unsigned int)data);
|
||||||
_Swap(imask);
|
_Swap(imask);
|
||||||
|
|
|
@ -71,6 +71,7 @@ int64_t nano_tick_get(void)
|
||||||
* _nano_ticks
|
* _nano_ticks
|
||||||
*/
|
*/
|
||||||
unsigned int imask = irq_lock();
|
unsigned int imask = irq_lock();
|
||||||
|
|
||||||
tmp_nano_ticks = _nano_ticks;
|
tmp_nano_ticks = _nano_ticks;
|
||||||
irq_unlock(imask);
|
irq_unlock(imask);
|
||||||
return tmp_nano_ticks;
|
return tmp_nano_ticks;
|
||||||
|
@ -116,6 +117,7 @@ static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
|
||||||
* _nano_ticks
|
* _nano_ticks
|
||||||
*/
|
*/
|
||||||
unsigned int imask = irq_lock();
|
unsigned int imask = irq_lock();
|
||||||
|
|
||||||
saved = _nano_ticks;
|
saved = _nano_ticks;
|
||||||
irq_unlock(imask);
|
irq_unlock(imask);
|
||||||
delta = saved - (*reftime);
|
delta = saved - (*reftime);
|
||||||
|
@ -171,6 +173,7 @@ static inline void handle_expired_nano_timers(int ticks)
|
||||||
while (_nano_timer_list && (!_nano_timer_list->ticks)) {
|
while (_nano_timer_list && (!_nano_timer_list->ticks)) {
|
||||||
struct nano_timer *expired = _nano_timer_list;
|
struct nano_timer *expired = _nano_timer_list;
|
||||||
struct nano_lifo *lifo = &expired->lifo;
|
struct nano_lifo *lifo = &expired->lifo;
|
||||||
|
|
||||||
_nano_timer_list = expired->link;
|
_nano_timer_list = expired->link;
|
||||||
nano_isr_lifo_put(lifo, expired->userData);
|
nano_isr_lifo_put(lifo, expired->userData);
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,8 +72,11 @@ void _sys_profiler_context_switch(void)
|
||||||
{
|
{
|
||||||
extern tNANO _nanokernel;
|
extern tNANO _nanokernel;
|
||||||
uint32_t data[2];
|
uint32_t data[2];
|
||||||
|
|
||||||
extern void _sys_event_logger_put_non_preemptible(
|
extern void _sys_event_logger_put_non_preemptible(
|
||||||
struct event_logger *logger, uint16_t event_id, uint32_t *event_data,
|
struct event_logger *logger,
|
||||||
|
uint16_t event_id,
|
||||||
|
uint32_t *event_data,
|
||||||
uint8_t data_size);
|
uint8_t data_size);
|
||||||
|
|
||||||
/* if the profiler has not been initialized, we do nothing */
|
/* if the profiler has not been initialized, we do nothing */
|
||||||
|
@ -86,21 +89,25 @@ void _sys_profiler_context_switch(void)
|
||||||
data[1] = (uint32_t)_nanokernel.current;
|
data[1] = (uint32_t)_nanokernel.current;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The mechanism we use to log the profile events uses a sync semaphore
|
* The mechanism we use to log the profile events uses a sync
|
||||||
* to inform that there are available events to be collected. The
|
* semaphore to inform that there are available events to be
|
||||||
* context switch event can be triggered from a task. When we
|
* collected. The context switch event can be triggered from a
|
||||||
* signal a semaphore from a task and a fiber is waiting for
|
* task. When we signal a semaphore from a task and a fiber is
|
||||||
* that semaphore, a context switch is generated immediately. Due to
|
* waiting for that semaphore, a context switch is generated
|
||||||
* the fact that we register the context switch event while the context
|
* immediately. Due to the fact that we register the context
|
||||||
* switch is being processed, a new context switch can be generated
|
* switch event while the context switch is being processed, a
|
||||||
* before the kernel finishes processing the current context switch. We
|
* new context switch can be generated before the kernel
|
||||||
* need to prevent this because the kernel is not able to handle it.
|
* finishes processing the current context switch. We
|
||||||
* The _sem_give_non_preemptible function does not trigger a context
|
* need to prevent this because the kernel is not able to
|
||||||
* switch when we signal the semaphore from any type of thread. Using
|
* handle it.
|
||||||
* _sys_event_logger_put_non_preemptible function, that internally uses
|
*
|
||||||
* _sem_give_non_preemptible function for signaling the sync semaphore,
|
* The _sem_give_non_preemptible function does not trigger a
|
||||||
* allow us registering the context switch event without triggering any
|
* context switch when we signal the semaphore from any type of
|
||||||
* new context switch during the process.
|
* thread. Using _sys_event_logger_put_non_preemptible function,
|
||||||
|
* that internally uses _sem_give_non_preemptible function for
|
||||||
|
* signaling the sync semaphore, allow us registering the
|
||||||
|
* context switch event without triggering any new context
|
||||||
|
* switch during the process.
|
||||||
*/
|
*/
|
||||||
_sys_event_logger_put_non_preemptible(&sys_profiler_logger,
|
_sys_event_logger_put_non_preemptible(&sys_profiler_logger,
|
||||||
PROFILER_CONTEXT_SWITCH_EVENT_ID, data, ARRAY_SIZE(data));
|
PROFILER_CONTEXT_SWITCH_EVENT_ID, data, ARRAY_SIZE(data));
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue