unified: move code from nanokernel into unified kernel

As the unified kernel should replace the nanokernel and microkernel
lets go ahead and move code shared between the nanonkernel and unified
kernel into the unified kernel.

Change-Id: I8931efa5d67025381d5d0d9563e7c6632cece87f
Signed-off-by: Kumar Gala <kumar.gala@linaro.org>
This commit is contained in:
Kumar Gala 2016-10-05 12:01:54 -05:00
commit d12d8af186
18 changed files with 1327 additions and 1325 deletions

View file

@ -1,362 +1 @@
/*
* Copyright (c) 2016 Intel Corporation
* Copyright (c) 2011-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file Atomic ops in pure C
*
* This module provides the atomic operators for processors
* which do not support native atomic operations.
*
* The atomic operations are guaranteed to be atomic with respect
* to interrupt service routines, and to operations performed by peer
* processors.
*
* (originally from x86's atomic.c)
*/
#include <atomic.h>
#include <toolchain.h>
#include <arch/cpu.h>
/**
*
* @brief Atomic compare-and-set primitive
*
* This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1.
*
* If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0.
*
* The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>.
*
* @param target address to be tested
* @param old_value value to compare against
* @param new_value value to compare against
* @return Returns 1 if <new_value> is written, 0 otherwise.
*/
int atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
unsigned int key;
int ret = 0;
key = irq_lock();
if (*target == old_value) {
*target = new_value;
ret = 1;
}
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic addition primitive
*
* This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned.
*
* @param target memory location to add to
* @param value the value to add
*
* @return The previous value from <target>
*/
atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target += value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic subtraction primitive
*
* This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned.
*
* @param target the memory location to subtract from
* @param value the value to subtract
*
* @return The previous value from <target>
*/
atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target -= value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic increment primitive
*
* @param target memory location to increment
*
* This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> before the increment
*/
atomic_val_t atomic_inc(atomic_t *target)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
(*target)++;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic decrement primitive
*
* @param target memory location to decrement
*
* This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> prior to the decrement
*/
atomic_val_t atomic_dec(atomic_t *target)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
(*target)--;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic get primitive
*
* @param target memory location to read from
*
* This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary.
*
* @return The value read from <target>
*/
atomic_val_t atomic_get(const atomic_t *target)
{
return *target;
}
/**
*
* @brief Atomic get-and-set primitive
*
* This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned.
*
* @param target the memory location to write to
* @param value the value to write
*
* @return The previous value from <target>
*/
atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target = value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic clear primitive
*
* This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
*
* @param target the memory location to write
*
* @return The previous value from <target>
*/
atomic_val_t atomic_clear(atomic_t *target)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target = 0;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic bitwise inclusive OR primitive
*
* This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to OR
*
* @return The previous value from <target>
*/
atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target |= value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic bitwise exclusive OR (XOR) primitive
*
* This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to XOR
*
* @return The previous value from <target>
*/
atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target ^= value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic bitwise AND primitive
*
* This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to AND
*
* @return The previous value from <target>
*/
atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target &= value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic bitwise NAND primitive
*
* This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to NAND
*
* @return The previous value from <target>
*/
atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target = ~(*target & value);
irq_unlock(key);
return ret;
}
#include "../unified/atomic_c.c"

View file

@ -1,66 +1 @@
/*
* Copyright (c) 2012-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Compiler stack protection (kernel part)
*
* This module provides functions to support compiler stack protection
* using canaries. This feature is enabled with configuration
* CONFIG_STACK_CANARIES=y.
*
* When this feature is enabled, the compiler generated code refers to
* function __stack_chk_fail and global variable __stack_chk_guard.
*/
#include <toolchain.h> /* compiler specific configurations */
#include <nano_private.h>
#include <toolchain.h>
#include <sections.h>
/**
*
* @brief Stack canary error handler
*
* This function is invoked when a stack canary error is detected.
*
* @return Does not return
*/
void FUNC_NORETURN _StackCheckHandler(void)
{
/* Stack canary error is a software fatal condition; treat it as such.
*/
_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, &_default_esf);
}
/* Global variable */
/*
* Symbol referenced by GCC compiler generated code for canary value.
* The canary value gets initialized in _Cstart().
*/
void __noinit *__stack_chk_guard;
/**
*
* @brief Referenced by GCC compiler generated code
*
* This routine is invoked when a stack canary error is detected, indicating
* a buffer overflow or stack corruption problem.
*/
FUNC_ALIAS(_StackCheckHandler, __stack_chk_fail, void);
#include "../unified/compiler_stack_protect.c"

View file

@ -1,137 +1 @@
/*
* Copyright (c) 2015-2016 Intel Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <string.h>
#include <device.h>
#include <misc/util.h>
#include <atomic.h>
extern struct device __device_init_start[];
extern struct device __device_PRIMARY_start[];
extern struct device __device_SECONDARY_start[];
extern struct device __device_NANOKERNEL_start[];
extern struct device __device_MICROKERNEL_start[];
extern struct device __device_APPLICATION_start[];
extern struct device __device_init_end[];
static struct device *config_levels[] = {
__device_PRIMARY_start,
__device_SECONDARY_start,
__device_NANOKERNEL_start,
__device_MICROKERNEL_start,
__device_APPLICATION_start,
__device_init_end,
};
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
struct device_pm_ops device_pm_ops_nop = {device_pm_nop, device_pm_nop};
extern uint32_t __device_busy_start[];
extern uint32_t __device_busy_end[];
#define DEVICE_BUSY_SIZE (__device_busy_end - __device_busy_start)
#endif
/**
* @brief Execute all the device initialization functions at a given level
*
* @details Invokes the initialization routine for each device object
* created by the DEVICE_INIT() macro using the specified level.
* The linker script places the device objects in memory in the order
* they need to be invoked, with symbols indicating where one level leaves
* off and the next one begins.
*
* @param level init level to run.
*/
void _sys_device_do_config_level(int level)
{
struct device *info;
for (info = config_levels[level]; info < config_levels[level+1]; info++) {
struct device_config *device = info->config;
device->init(info);
}
}
struct device *device_get_binding(const char *name)
{
struct device *info;
for (info = __device_init_start; info != __device_init_end; info++) {
if (info->driver_api && !strcmp(name, info->config->name)) {
return info;
}
}
return NULL;
}
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
int device_pm_nop(struct device *unused_device, int unused_policy)
{
return 0;
}
int device_control_nop(struct device *unused_device,
uint32_t unused_ctrl_command, void *unused_context)
{
return 0;
}
void device_list_get(struct device **device_list, int *device_count)
{
*device_list = __device_init_start;
*device_count = __device_init_end - __device_init_start;
}
int device_any_busy_check(void)
{
int i = 0;
for (i = 0; i < DEVICE_BUSY_SIZE; i++) {
if (__device_busy_start[i] != 0) {
return -EBUSY;
}
}
return 0;
}
int device_busy_check(struct device *chk_dev)
{
if (atomic_test_bit((const atomic_t *)__device_busy_start,
(chk_dev - __device_init_start))) {
return -EBUSY;
}
return 0;
}
#endif
void device_busy_set(struct device *busy_dev)
{
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
atomic_set_bit((atomic_t *) __device_busy_start,
(busy_dev - __device_init_start));
#endif
}
void device_busy_clear(struct device *busy_dev)
{
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
atomic_clear_bit((atomic_t *) __device_busy_start,
(busy_dev - __device_init_start));
#endif
}
#include "../unified/device.c"

View file

@ -1,30 +1 @@
/*
* Copyright (c) 2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file
*
* @brief Per-thread errno accessor function
*
* Allow accessing the errno for the current thread without involving the
* context switching.
*/
#include <nano_private.h>
int *_get_errno(void)
{
return &_nanokernel.current->errno_var;
}
#include "../unified/errno.c"

View file

@ -1,151 +1 @@
/*
* Copyright (c) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Event logger support.
*/
#include <misc/event_logger.h>
#include <misc/ring_buffer.h>
void sys_event_logger_init(struct event_logger *logger,
uint32_t *logger_buffer, uint32_t buffer_size)
{
sys_ring_buf_init(&logger->ring_buf, buffer_size, logger_buffer);
nano_sem_init(&(logger->sync_sema));
}
static void event_logger_put(struct event_logger *logger, uint16_t event_id,
uint32_t *event_data, uint8_t data_size,
void (*sem_give_fn)(struct nano_sem *))
{
int ret;
unsigned int key;
key = irq_lock();
ret = sys_ring_buf_put(&logger->ring_buf, event_id,
logger->ring_buf.dropped_put_count, event_data,
data_size);
if (ret == 0) {
logger->ring_buf.dropped_put_count = 0;
/* inform that there is event data available on the buffer */
sem_give_fn(&(logger->sync_sema));
}
irq_unlock(key);
}
void sys_event_logger_put(struct event_logger *logger, uint16_t event_id,
uint32_t *event_data, uint8_t data_size)
{
event_logger_put(logger, event_id, event_data, data_size, nano_sem_give);
}
/**
* @brief Send an event message to the logger with a non preemptible
* behaviour.
*
* @details Add an event message to the ring buffer and signal the sync
* semaphore using the internal function _sem_give_non_preemptible to inform
* that there are event messages available, avoiding the preemptible
* behaviour when the function is called from a task. This function
* should be only used for special cases where the sys_event_logger_put
* does not satisfy the needs.
*
* @param logger Pointer to the event logger used.
* @param event_id The identification of the profiler event.
* @param data Pointer to the data of the message.
* @param data_size Size of the buffer in 32-bit words.
*
* @return No return value.
*/
void _sys_event_logger_put_non_preemptible(struct event_logger *logger,
uint16_t event_id, uint32_t *event_data, uint8_t data_size)
{
extern void _sem_give_non_preemptible(struct nano_sem *sem);
event_logger_put(logger, event_id, event_data, data_size,
_sem_give_non_preemptible);
}
static int event_logger_get(struct event_logger *logger,
uint16_t *event_id, uint8_t *dropped_event_count,
uint32_t *buffer, uint8_t *buffer_size)
{
int ret;
ret = sys_ring_buf_get(&logger->ring_buf, event_id, dropped_event_count,
buffer, buffer_size);
if (likely(!ret)) {
return *buffer_size;
}
switch (ret) {
case -EMSGSIZE:
/* if the user can not retrieve the message, we increase the
* semaphore to indicate that the message remains in the buffer
*/
nano_fiber_sem_give(&(logger->sync_sema));
return -EMSGSIZE;
case -EAGAIN:
return 0;
default:
return ret;
}
}
int sys_event_logger_get(struct event_logger *logger, uint16_t *event_id,
uint8_t *dropped_event_count, uint32_t *buffer,
uint8_t *buffer_size)
{
if (nano_fiber_sem_take(&(logger->sync_sema), TICKS_NONE)) {
return event_logger_get(logger, event_id, dropped_event_count,
buffer, buffer_size);
}
return 0;
}
int sys_event_logger_get_wait(struct event_logger *logger, uint16_t *event_id,
uint8_t *dropped_event_count, uint32_t *buffer,
uint8_t *buffer_size)
{
nano_fiber_sem_take(&(logger->sync_sema), TICKS_UNLIMITED);
return event_logger_get(logger, event_id, dropped_event_count, buffer,
buffer_size);
}
#ifdef CONFIG_NANO_TIMEOUTS
int sys_event_logger_get_wait_timeout(struct event_logger *logger,
uint16_t *event_id,
uint8_t *dropped_event_count,
uint32_t *buffer, uint8_t *buffer_size,
uint32_t timeout)
{
if (nano_fiber_sem_take(&(logger->sync_sema), timeout)) {
return event_logger_get(logger, event_id, dropped_event_count,
buffer, buffer_size);
}
return 0;
}
#endif /* CONFIG_NANO_TIMEOUTS */
#include "../unified/event_logger.c"

View file

@ -1,235 +1 @@
/* int_latency_bench.c - interrupt latency benchmark support */
/*
* Copyright (c) 2012-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "toolchain.h"
#include "sections.h"
#include <stdint.h> /* uint32_t */
#include <limits.h> /* ULONG_MAX */
#include <misc/printk.h> /* printk */
#include <sys_clock.h>
#include <drivers/system_timer.h>
#define NB_CACHE_WARMING_DRY_RUN 7
/*
* Timestamp corresponding to when interrupt were turned off.
* A value of zero indicated interrupt are not currently locked.
*/
static uint32_t int_locked_timestamp;
/* stats tracking the minimum and maximum time when interrupts were locked */
static uint32_t int_locked_latency_min = ULONG_MAX;
static uint32_t int_locked_latency_max;
/* overhead added to intLock/intUnlock by this latency benchmark */
static uint32_t initial_start_delay;
static uint32_t nesting_delay;
static uint32_t stop_delay;
/* counter tracking intLock/intUnlock calls once interrupt are locked */
static uint32_t int_lock_unlock_nest;
/* indicate if the interrupt latency benchamrk is ready to be used */
static uint32_t int_latency_bench_ready;
/* min amount of time it takes from HW interrupt generation to 'C' handler */
uint32_t _hw_irq_to_c_handler_latency = ULONG_MAX;
/**
*
* @brief Start tracking time spent with interrupts locked
*
* calls to lock interrupt can nest, so this routine can be called numerous
* times before interrupt are unlocked
*
* @return N/A
*
*/
void _int_latency_start(void)
{
/* when interrupts are not already locked, take time stamp */
if (!int_locked_timestamp && int_latency_bench_ready) {
int_locked_timestamp = sys_cycle_get_32();
int_lock_unlock_nest = 0;
}
int_lock_unlock_nest++;
}
/**
*
* @brief Stop accumulating time spent for when interrupts are locked
*
* This is only call once when the interrupt are being reenabled
*
* @return N/A
*
*/
void _int_latency_stop(void)
{
uint32_t delta;
uint32_t delayOverhead;
uint32_t currentTime = sys_cycle_get_32();
/* ensured intLatencyStart() was invoked first */
if (int_locked_timestamp) {
/*
* time spent with interrupt lock is:
* (current time - time when interrupt got disabled first) -
* (delay when invoking start + number nested calls to intLock *
* time it takes to call intLatencyStart + intLatencyStop)
*/
delta = (currentTime - int_locked_timestamp);
/*
* Substract overhead introduce by the int latency benchmark
* only if
* it is bigger than delta. It can be possible sometimes for
* delta to
* be smaller than the estimated overhead.
*/
delayOverhead =
(initial_start_delay +
((int_lock_unlock_nest - 1) * nesting_delay) + stop_delay);
if (delta >= delayOverhead)
delta -= delayOverhead;
/* update max */
if (delta > int_locked_latency_max)
int_locked_latency_max = delta;
/* update min */
if (delta < int_locked_latency_min)
int_locked_latency_min = delta;
/* interrupts are now enabled, get ready for next interrupt lock
*/
int_locked_timestamp = 0;
}
}
/**
*
* @brief Initialize interrupt latency benchmark
*
* @return N/A
*
*/
void int_latency_init(void)
{
uint32_t timeToReadTime;
uint32_t cacheWarming = NB_CACHE_WARMING_DRY_RUN;
int_latency_bench_ready = 1;
/*
* measuring delay introduced by the interrupt latency benchmark few
* times to ensure we get the best possible values. The overhead of
* invoking the latency can changes runtime (i.e. cache hit or miss)
* but an estimated overhead is used to adjust Max interrupt latency.
* The overhead introduced by benchmark is composed of three values:
* initial_start_delay, nesting_delay, stop_delay.
*/
while (cacheWarming) {
/* measure how much time it takes to read time */
timeToReadTime = sys_cycle_get_32();
timeToReadTime = sys_cycle_get_32() - timeToReadTime;
/* measure time to call intLatencyStart() and intLatencyStop
* takes
*/
initial_start_delay = sys_cycle_get_32();
_int_latency_start();
initial_start_delay =
sys_cycle_get_32() - initial_start_delay - timeToReadTime;
nesting_delay = sys_cycle_get_32();
_int_latency_start();
nesting_delay = sys_cycle_get_32() - nesting_delay - timeToReadTime;
stop_delay = sys_cycle_get_32();
_int_latency_stop();
stop_delay = sys_cycle_get_32() - stop_delay - timeToReadTime;
/* re-initialize globals to default values */
int_locked_latency_min = ULONG_MAX;
int_locked_latency_max = 0;
cacheWarming--;
}
}
/**
*
* @brief Dumps interrupt latency values
*
* The interrupt latency value measures
*
* @return N/A
*
*/
void int_latency_show(void)
{
uint32_t intHandlerLatency = 0;
if (!int_latency_bench_ready) {
printk("error: int_latency_init() has not been invoked\n");
return;
}
if (int_locked_latency_min != ULONG_MAX) {
if (_hw_irq_to_c_handler_latency == ULONG_MAX) {
intHandlerLatency = 0;
printk(" Min latency from hw interrupt up to 'C' int. "
"handler: "
"not measured\n");
} else {
intHandlerLatency = _hw_irq_to_c_handler_latency;
printk(" Min latency from hw interrupt up to 'C' int. "
"handler:"
" %d tcs = %d nsec\n",
intHandlerLatency,
SYS_CLOCK_HW_CYCLES_TO_NS(intHandlerLatency));
}
printk(" Max interrupt latency (includes hw int. to 'C' "
"handler):"
" %d tcs = %d nsec\n",
int_locked_latency_max + intHandlerLatency,
SYS_CLOCK_HW_CYCLES_TO_NS(int_locked_latency_max + intHandlerLatency));
printk(" Overhead substracted from Max int. latency:\n"
" for int. lock : %d tcs = %d nsec\n"
" each time int. lock nest: %d tcs = %d nsec\n"
" for int. unlocked : %d tcs = %d nsec\n",
initial_start_delay,
SYS_CLOCK_HW_CYCLES_TO_NS(initial_start_delay),
nesting_delay,
SYS_CLOCK_HW_CYCLES_TO_NS(nesting_delay),
stop_delay,
SYS_CLOCK_HW_CYCLES_TO_NS(stop_delay));
} else {
printk("interrupts were not locked and unlocked yet\n");
}
/*
* Lets start with new values so that one extra long path executed
* with interrupt disabled hide smaller paths with interrupt
* disabled.
*/
int_locked_latency_min = ULONG_MAX;
int_locked_latency_max = 0;
}
#include "../unified/int_latency_bench.c"

View file

@ -1,194 +1 @@
/*
* Copyright (c) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Kernel event logger support.
*/
#include <misc/kernel_event_logger.h>
#include <misc/util.h>
#include <init.h>
#include <nano_private.h>
#include <kernel_event_logger_arch.h>
uint32_t _sys_k_event_logger_buffer[CONFIG_KERNEL_EVENT_LOGGER_BUFFER_SIZE];
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
void *_collector_fiber;
#endif
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
uint32_t _sys_k_event_logger_sleep_start_time;
#endif
#ifdef CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
int _sys_k_event_logger_mask;
#endif
/**
* @brief Initialize the kernel event logger system.
*
* @details Initialize the ring buffer and the sync semaphore.
*
* @return No return value.
*/
static int _sys_k_event_logger_init(struct device *arg)
{
ARG_UNUSED(arg);
sys_event_logger_init(&sys_k_event_logger, _sys_k_event_logger_buffer,
CONFIG_KERNEL_EVENT_LOGGER_BUFFER_SIZE);
return 0;
}
SYS_INIT(_sys_k_event_logger_init,
NANOKERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP
sys_k_timer_func timer_func;
void sys_k_event_logger_set_timer(sys_k_timer_func func)
{
timer_func = func;
}
#endif
void sys_k_event_logger_put_timed(uint16_t event_id)
{
uint32_t data[1];
data[0] = _sys_k_get_time();
sys_event_logger_put(&sys_k_event_logger, event_id, data,
ARRAY_SIZE(data));
}
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
void _sys_k_event_logger_context_switch(void)
{
extern tNANO _nanokernel;
uint32_t data[2];
extern void _sys_event_logger_put_non_preemptible(
struct event_logger *logger,
uint16_t event_id,
uint32_t *event_data,
uint8_t data_size);
if (!sys_k_must_log_event(KERNEL_EVENT_LOGGER_CONTEXT_SWITCH_EVENT_ID)) {
return;
}
/* if the kernel event logger has not been initialized, we do nothing */
if (sys_k_event_logger.ring_buf.buf == NULL) {
return;
}
if (_collector_fiber != _nanokernel.current) {
data[0] = _sys_k_get_time();
data[1] = (uint32_t)_nanokernel.current;
/*
* The mechanism we use to log the kernel events uses a sync semaphore
* to inform that there are available events to be collected. The
* context switch event can be triggered from a task. When we
* signal a semaphore from a task and a fiber is waiting for
* that semaphore, a context switch is generated immediately. Due to
* the fact that we register the context switch event while the context
* switch is being processed, a new context switch can be generated
* before the kernel finishes processing the current context switch. We
* need to prevent this because the kernel is not able to handle it.
* The _sem_give_non_preemptible function does not trigger a context
* switch when we signal the semaphore from any type of thread. Using
* _sys_event_logger_put_non_preemptible function, that internally uses
* _sem_give_non_preemptible function for signaling the sync semaphore,
* allow us registering the context switch event without triggering any
* new context switch during the process.
*/
_sys_event_logger_put_non_preemptible(&sys_k_event_logger,
KERNEL_EVENT_LOGGER_CONTEXT_SWITCH_EVENT_ID, data,
ARRAY_SIZE(data));
}
}
void sys_k_event_logger_register_as_collector(void)
{
_collector_fiber = _nanokernel.current;
}
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
void _sys_k_event_logger_interrupt(void)
{
uint32_t data[2];
if (!sys_k_must_log_event(KERNEL_EVENT_LOGGER_INTERRUPT_EVENT_ID)) {
return;
}
/* if the kernel event logger has not been initialized, we do nothing */
if (sys_k_event_logger.ring_buf.buf == NULL) {
return;
}
data[0] = _sys_k_get_time();
data[1] = _sys_current_irq_key_get();
sys_k_event_logger_put(KERNEL_EVENT_LOGGER_INTERRUPT_EVENT_ID, data,
ARRAY_SIZE(data));
}
#endif /* CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT */
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
void _sys_k_event_logger_enter_sleep(void)
{
if (!sys_k_must_log_event(KERNEL_EVENT_LOGGER_SLEEP_EVENT_ID)) {
return;
}
_sys_k_event_logger_sleep_start_time = sys_cycle_get_32();
}
void _sys_k_event_logger_exit_sleep(void)
{
uint32_t data[3];
if (!sys_k_must_log_event(KERNEL_EVENT_LOGGER_SLEEP_EVENT_ID)) {
return;
}
if (_sys_k_event_logger_sleep_start_time != 0) {
data[0] = _sys_k_get_time();
data[1] = (sys_cycle_get_32() - _sys_k_event_logger_sleep_start_time)
/ sys_clock_hw_cycles_per_tick;
/* register the cause of exiting sleep mode */
data[2] = _sys_current_irq_key_get();
/*
* if _sys_k_event_logger_sleep_start_time is different to zero, means
* that the CPU was sleeping, so we reset it to identify that the event
* was processed and that any the next interrupt is no awaing the CPU.
*/
_sys_k_event_logger_sleep_start_time = 0;
sys_k_event_logger_put(KERNEL_EVENT_LOGGER_SLEEP_EVENT_ID, data,
ARRAY_SIZE(data));
}
}
#endif /* CONFIG_KERNEL_EVENT_LOGGER_SLEEP */
#include "../unified/kernel_event_logger.c"

View file

@ -1,105 +1 @@
/* ring_buffer.c: Simple ring buffer API */
/*
* Copyright (c) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <misc/ring_buffer.h>
/**
* Internal data structure for a buffer header.
*
* We want all of this to fit in a single uint32_t. Every item stored in the
* ring buffer will be one of these headers plus any extra data supplied
*/
struct ring_element {
uint32_t type :16; /**< Application-specific */
uint32_t length :8; /**< length in 32-bit chunks */
uint32_t value :8; /**< Room for small integral values */
};
int sys_ring_buf_put(struct ring_buf *buf, uint16_t type, uint8_t value,
uint32_t *data, uint8_t size32)
{
uint32_t i, space, index, rc;
space = sys_ring_buf_space_get(buf);
if (space >= (size32 + 1)) {
struct ring_element *header =
(struct ring_element *)&buf->buf[buf->tail];
header->type = type;
header->length = size32;
header->value = value;
if (likely(buf->mask)) {
for (i = 0; i < size32; ++i) {
index = (i + buf->tail + 1) & buf->mask;
buf->buf[index] = data[i];
}
buf->tail = (buf->tail + size32 + 1) & buf->mask;
} else {
for (i = 0; i < size32; ++i) {
index = (i + buf->tail + 1) % buf->size;
buf->buf[index] = data[i];
}
buf->tail = (buf->tail + size32 + 1) % buf->size;
}
rc = 0;
} else {
buf->dropped_put_count++;
rc = -EMSGSIZE;
}
return rc;
}
int sys_ring_buf_get(struct ring_buf *buf, uint16_t *type, uint8_t *value,
uint32_t *data, uint8_t *size32)
{
struct ring_element *header;
uint32_t i, index;
if (sys_ring_buf_is_empty(buf)) {
return -EAGAIN;
}
header = (struct ring_element *) &buf->buf[buf->head];
if (header->length > *size32) {
*size32 = header->length;
return -EMSGSIZE;
}
*size32 = header->length;
*type = header->type;
*value = header->value;
if (likely(buf->mask)) {
for (i = 0; i < header->length; ++i) {
index = (i + buf->head + 1) & buf->mask;
data[i] = buf->buf[index];
}
buf->head = (buf->head + header->length + 1) & buf->mask;
} else {
for (i = 0; i < header->length; ++i) {
index = (i + buf->head + 1) % buf->size;
data[i] = buf->buf[index];
}
buf->head = (buf->head + header->length + 1) % buf->size;
}
return 0;
}
#include "../unified/ring_buffer.c"

View file

@ -1,36 +1 @@
/* version.c */
/*
* Copyright (c) 1997-2010, 2012-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include "version.h" /* generated by MAKE, at compile time */
static uint32_t kernel_version = KERNELVERSION;
/**
*
* @brief Return the kernel version of the present build
*
* The kernel version is a four-byte value, whose format is described in the
* file "kernel_version.h".
*
* @return kernel version
*/
uint32_t sys_kernel_version_get(void)
{
return kernel_version;
}
#include "../unified/version.c"

View file

@ -1 +1,362 @@
#include "../nanokernel/atomic_c.c"
/*
* Copyright (c) 2016 Intel Corporation
* Copyright (c) 2011-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file Atomic ops in pure C
*
* This module provides the atomic operators for processors
* which do not support native atomic operations.
*
* The atomic operations are guaranteed to be atomic with respect
* to interrupt service routines, and to operations performed by peer
* processors.
*
* (originally from x86's atomic.c)
*/
#include <atomic.h>
#include <toolchain.h>
#include <arch/cpu.h>
/**
*
* @brief Atomic compare-and-set primitive
*
* This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1.
*
* If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0.
*
* The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>.
*
* @param target address to be tested
* @param old_value value to compare against
* @param new_value value to compare against
* @return Returns 1 if <new_value> is written, 0 otherwise.
*/
int atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
unsigned int key;
int ret = 0;
key = irq_lock();
if (*target == old_value) {
*target = new_value;
ret = 1;
}
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic addition primitive
*
* This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned.
*
* @param target memory location to add to
* @param value the value to add
*
* @return The previous value from <target>
*/
atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target += value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic subtraction primitive
*
* This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned.
*
* @param target the memory location to subtract from
* @param value the value to subtract
*
* @return The previous value from <target>
*/
atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target -= value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic increment primitive
*
* @param target memory location to increment
*
* This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> before the increment
*/
atomic_val_t atomic_inc(atomic_t *target)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
(*target)++;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic decrement primitive
*
* @param target memory location to decrement
*
* This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> prior to the decrement
*/
atomic_val_t atomic_dec(atomic_t *target)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
(*target)--;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic get primitive
*
* @param target memory location to read from
*
* This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary.
*
* @return The value read from <target>
*/
atomic_val_t atomic_get(const atomic_t *target)
{
return *target;
}
/**
*
* @brief Atomic get-and-set primitive
*
* This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned.
*
* @param target the memory location to write to
* @param value the value to write
*
* @return The previous value from <target>
*/
atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target = value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic clear primitive
*
* This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
*
* @param target the memory location to write
*
* @return The previous value from <target>
*/
atomic_val_t atomic_clear(atomic_t *target)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target = 0;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic bitwise inclusive OR primitive
*
* This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to OR
*
* @return The previous value from <target>
*/
atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target |= value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic bitwise exclusive OR (XOR) primitive
*
* This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to XOR
*
* @return The previous value from <target>
*/
atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target ^= value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic bitwise AND primitive
*
* This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to AND
*
* @return The previous value from <target>
*/
atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target &= value;
irq_unlock(key);
return ret;
}
/**
*
* @brief Atomic bitwise NAND primitive
*
* This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to NAND
*
* @return The previous value from <target>
*/
atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
{
unsigned int key;
atomic_val_t ret;
key = irq_lock();
ret = *target;
*target = ~(*target & value);
irq_unlock(key);
return ret;
}

View file

@ -1 +1,66 @@
#include "../nanokernel/compiler_stack_protect.c"
/*
* Copyright (c) 2012-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Compiler stack protection (kernel part)
*
* This module provides functions to support compiler stack protection
* using canaries. This feature is enabled with configuration
* CONFIG_STACK_CANARIES=y.
*
* When this feature is enabled, the compiler generated code refers to
* function __stack_chk_fail and global variable __stack_chk_guard.
*/
#include <toolchain.h> /* compiler specific configurations */
#include <nano_private.h>
#include <toolchain.h>
#include <sections.h>
/**
*
* @brief Stack canary error handler
*
* This function is invoked when a stack canary error is detected.
*
* @return Does not return
*/
void FUNC_NORETURN _StackCheckHandler(void)
{
/* Stack canary error is a software fatal condition; treat it as such.
*/
_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, &_default_esf);
}
/* Global variable */
/*
* Symbol referenced by GCC compiler generated code for canary value.
* The canary value gets initialized in _Cstart().
*/
void __noinit *__stack_chk_guard;
/**
*
* @brief Referenced by GCC compiler generated code
*
* This routine is invoked when a stack canary error is detected, indicating
* a buffer overflow or stack corruption problem.
*/
FUNC_ALIAS(_StackCheckHandler, __stack_chk_fail, void);

View file

@ -1 +1,137 @@
#include "../nanokernel/device.c"
/*
* Copyright (c) 2015-2016 Intel Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <string.h>
#include <device.h>
#include <misc/util.h>
#include <atomic.h>
extern struct device __device_init_start[];
extern struct device __device_PRIMARY_start[];
extern struct device __device_SECONDARY_start[];
extern struct device __device_NANOKERNEL_start[];
extern struct device __device_MICROKERNEL_start[];
extern struct device __device_APPLICATION_start[];
extern struct device __device_init_end[];
static struct device *config_levels[] = {
__device_PRIMARY_start,
__device_SECONDARY_start,
__device_NANOKERNEL_start,
__device_MICROKERNEL_start,
__device_APPLICATION_start,
__device_init_end,
};
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
struct device_pm_ops device_pm_ops_nop = {device_pm_nop, device_pm_nop};
extern uint32_t __device_busy_start[];
extern uint32_t __device_busy_end[];
#define DEVICE_BUSY_SIZE (__device_busy_end - __device_busy_start)
#endif
/**
* @brief Execute all the device initialization functions at a given level
*
* @details Invokes the initialization routine for each device object
* created by the DEVICE_INIT() macro using the specified level.
* The linker script places the device objects in memory in the order
* they need to be invoked, with symbols indicating where one level leaves
* off and the next one begins.
*
* @param level init level to run.
*/
void _sys_device_do_config_level(int level)
{
struct device *info;
for (info = config_levels[level]; info < config_levels[level+1]; info++) {
struct device_config *device = info->config;
device->init(info);
}
}
struct device *device_get_binding(const char *name)
{
struct device *info;
for (info = __device_init_start; info != __device_init_end; info++) {
if (info->driver_api && !strcmp(name, info->config->name)) {
return info;
}
}
return NULL;
}
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
int device_pm_nop(struct device *unused_device, int unused_policy)
{
return 0;
}
int device_control_nop(struct device *unused_device,
uint32_t unused_ctrl_command, void *unused_context)
{
return 0;
}
void device_list_get(struct device **device_list, int *device_count)
{
*device_list = __device_init_start;
*device_count = __device_init_end - __device_init_start;
}
int device_any_busy_check(void)
{
int i = 0;
for (i = 0; i < DEVICE_BUSY_SIZE; i++) {
if (__device_busy_start[i] != 0) {
return -EBUSY;
}
}
return 0;
}
int device_busy_check(struct device *chk_dev)
{
if (atomic_test_bit((const atomic_t *)__device_busy_start,
(chk_dev - __device_init_start))) {
return -EBUSY;
}
return 0;
}
#endif
void device_busy_set(struct device *busy_dev)
{
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
atomic_set_bit((atomic_t *) __device_busy_start,
(busy_dev - __device_init_start));
#endif
}
void device_busy_clear(struct device *busy_dev)
{
#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
atomic_clear_bit((atomic_t *) __device_busy_start,
(busy_dev - __device_init_start));
#endif
}

View file

@ -1,7 +1,38 @@
#include "../nanokernel/errno.c"
/*
* Copyright (c) 2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file
*
* @brief Per-thread errno accessor function
*
* Allow accessing the errno for the current thread without involving the
* context switching.
*/
#include <nano_private.h>
#ifdef CONFIG_KERNEL_V2
/*
* Define _k_neg_eagain for use in assembly files as errno.h is
* not assembly language safe.
*/
const int _k_neg_eagain = -EAGAIN;
#endif
int *_get_errno(void)
{
return &_nanokernel.current->errno_var;
}

View file

@ -1 +1,151 @@
#include "../nanokernel/event_logger.c"
/*
* Copyright (c) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Event logger support.
*/
#include <misc/event_logger.h>
#include <misc/ring_buffer.h>
void sys_event_logger_init(struct event_logger *logger,
uint32_t *logger_buffer, uint32_t buffer_size)
{
sys_ring_buf_init(&logger->ring_buf, buffer_size, logger_buffer);
nano_sem_init(&(logger->sync_sema));
}
static void event_logger_put(struct event_logger *logger, uint16_t event_id,
uint32_t *event_data, uint8_t data_size,
void (*sem_give_fn)(struct nano_sem *))
{
int ret;
unsigned int key;
key = irq_lock();
ret = sys_ring_buf_put(&logger->ring_buf, event_id,
logger->ring_buf.dropped_put_count, event_data,
data_size);
if (ret == 0) {
logger->ring_buf.dropped_put_count = 0;
/* inform that there is event data available on the buffer */
sem_give_fn(&(logger->sync_sema));
}
irq_unlock(key);
}
void sys_event_logger_put(struct event_logger *logger, uint16_t event_id,
uint32_t *event_data, uint8_t data_size)
{
event_logger_put(logger, event_id, event_data, data_size, nano_sem_give);
}
/**
* @brief Send an event message to the logger with a non preemptible
* behaviour.
*
* @details Add an event message to the ring buffer and signal the sync
* semaphore using the internal function _sem_give_non_preemptible to inform
* that there are event messages available, avoiding the preemptible
* behaviour when the function is called from a task. This function
* should be only used for special cases where the sys_event_logger_put
* does not satisfy the needs.
*
* @param logger Pointer to the event logger used.
* @param event_id The identification of the profiler event.
* @param data Pointer to the data of the message.
* @param data_size Size of the buffer in 32-bit words.
*
* @return No return value.
*/
void _sys_event_logger_put_non_preemptible(struct event_logger *logger,
uint16_t event_id, uint32_t *event_data, uint8_t data_size)
{
extern void _sem_give_non_preemptible(struct nano_sem *sem);
event_logger_put(logger, event_id, event_data, data_size,
_sem_give_non_preemptible);
}
static int event_logger_get(struct event_logger *logger,
uint16_t *event_id, uint8_t *dropped_event_count,
uint32_t *buffer, uint8_t *buffer_size)
{
int ret;
ret = sys_ring_buf_get(&logger->ring_buf, event_id, dropped_event_count,
buffer, buffer_size);
if (likely(!ret)) {
return *buffer_size;
}
switch (ret) {
case -EMSGSIZE:
/* if the user can not retrieve the message, we increase the
* semaphore to indicate that the message remains in the buffer
*/
nano_fiber_sem_give(&(logger->sync_sema));
return -EMSGSIZE;
case -EAGAIN:
return 0;
default:
return ret;
}
}
int sys_event_logger_get(struct event_logger *logger, uint16_t *event_id,
uint8_t *dropped_event_count, uint32_t *buffer,
uint8_t *buffer_size)
{
if (nano_fiber_sem_take(&(logger->sync_sema), TICKS_NONE)) {
return event_logger_get(logger, event_id, dropped_event_count,
buffer, buffer_size);
}
return 0;
}
int sys_event_logger_get_wait(struct event_logger *logger, uint16_t *event_id,
uint8_t *dropped_event_count, uint32_t *buffer,
uint8_t *buffer_size)
{
nano_fiber_sem_take(&(logger->sync_sema), TICKS_UNLIMITED);
return event_logger_get(logger, event_id, dropped_event_count, buffer,
buffer_size);
}
#ifdef CONFIG_NANO_TIMEOUTS
int sys_event_logger_get_wait_timeout(struct event_logger *logger,
uint16_t *event_id,
uint8_t *dropped_event_count,
uint32_t *buffer, uint8_t *buffer_size,
uint32_t timeout)
{
if (nano_fiber_sem_take(&(logger->sync_sema), timeout)) {
return event_logger_get(logger, event_id, dropped_event_count,
buffer, buffer_size);
}
return 0;
}
#endif /* CONFIG_NANO_TIMEOUTS */

View file

@ -1 +1,235 @@
#include "../nanokernel/int_latency_bench.c"
/* int_latency_bench.c - interrupt latency benchmark support */
/*
* Copyright (c) 2012-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "toolchain.h"
#include "sections.h"
#include <stdint.h> /* uint32_t */
#include <limits.h> /* ULONG_MAX */
#include <misc/printk.h> /* printk */
#include <sys_clock.h>
#include <drivers/system_timer.h>
#define NB_CACHE_WARMING_DRY_RUN 7
/*
* Timestamp corresponding to when interrupt were turned off.
* A value of zero indicated interrupt are not currently locked.
*/
static uint32_t int_locked_timestamp;
/* stats tracking the minimum and maximum time when interrupts were locked */
static uint32_t int_locked_latency_min = ULONG_MAX;
static uint32_t int_locked_latency_max;
/* overhead added to intLock/intUnlock by this latency benchmark */
static uint32_t initial_start_delay;
static uint32_t nesting_delay;
static uint32_t stop_delay;
/* counter tracking intLock/intUnlock calls once interrupt are locked */
static uint32_t int_lock_unlock_nest;
/* indicate if the interrupt latency benchamrk is ready to be used */
static uint32_t int_latency_bench_ready;
/* min amount of time it takes from HW interrupt generation to 'C' handler */
uint32_t _hw_irq_to_c_handler_latency = ULONG_MAX;
/**
*
* @brief Start tracking time spent with interrupts locked
*
* calls to lock interrupt can nest, so this routine can be called numerous
* times before interrupt are unlocked
*
* @return N/A
*
*/
void _int_latency_start(void)
{
/* when interrupts are not already locked, take time stamp */
if (!int_locked_timestamp && int_latency_bench_ready) {
int_locked_timestamp = sys_cycle_get_32();
int_lock_unlock_nest = 0;
}
int_lock_unlock_nest++;
}
/**
*
* @brief Stop accumulating time spent for when interrupts are locked
*
* This is only call once when the interrupt are being reenabled
*
* @return N/A
*
*/
void _int_latency_stop(void)
{
uint32_t delta;
uint32_t delayOverhead;
uint32_t currentTime = sys_cycle_get_32();
/* ensured intLatencyStart() was invoked first */
if (int_locked_timestamp) {
/*
* time spent with interrupt lock is:
* (current time - time when interrupt got disabled first) -
* (delay when invoking start + number nested calls to intLock *
* time it takes to call intLatencyStart + intLatencyStop)
*/
delta = (currentTime - int_locked_timestamp);
/*
* Substract overhead introduce by the int latency benchmark
* only if
* it is bigger than delta. It can be possible sometimes for
* delta to
* be smaller than the estimated overhead.
*/
delayOverhead =
(initial_start_delay +
((int_lock_unlock_nest - 1) * nesting_delay) + stop_delay);
if (delta >= delayOverhead)
delta -= delayOverhead;
/* update max */
if (delta > int_locked_latency_max)
int_locked_latency_max = delta;
/* update min */
if (delta < int_locked_latency_min)
int_locked_latency_min = delta;
/* interrupts are now enabled, get ready for next interrupt lock
*/
int_locked_timestamp = 0;
}
}
/**
*
* @brief Initialize interrupt latency benchmark
*
* @return N/A
*
*/
void int_latency_init(void)
{
uint32_t timeToReadTime;
uint32_t cacheWarming = NB_CACHE_WARMING_DRY_RUN;
int_latency_bench_ready = 1;
/*
* measuring delay introduced by the interrupt latency benchmark few
* times to ensure we get the best possible values. The overhead of
* invoking the latency can changes runtime (i.e. cache hit or miss)
* but an estimated overhead is used to adjust Max interrupt latency.
* The overhead introduced by benchmark is composed of three values:
* initial_start_delay, nesting_delay, stop_delay.
*/
while (cacheWarming) {
/* measure how much time it takes to read time */
timeToReadTime = sys_cycle_get_32();
timeToReadTime = sys_cycle_get_32() - timeToReadTime;
/* measure time to call intLatencyStart() and intLatencyStop
* takes
*/
initial_start_delay = sys_cycle_get_32();
_int_latency_start();
initial_start_delay =
sys_cycle_get_32() - initial_start_delay - timeToReadTime;
nesting_delay = sys_cycle_get_32();
_int_latency_start();
nesting_delay = sys_cycle_get_32() - nesting_delay - timeToReadTime;
stop_delay = sys_cycle_get_32();
_int_latency_stop();
stop_delay = sys_cycle_get_32() - stop_delay - timeToReadTime;
/* re-initialize globals to default values */
int_locked_latency_min = ULONG_MAX;
int_locked_latency_max = 0;
cacheWarming--;
}
}
/**
*
* @brief Dumps interrupt latency values
*
* The interrupt latency value measures
*
* @return N/A
*
*/
void int_latency_show(void)
{
uint32_t intHandlerLatency = 0;
if (!int_latency_bench_ready) {
printk("error: int_latency_init() has not been invoked\n");
return;
}
if (int_locked_latency_min != ULONG_MAX) {
if (_hw_irq_to_c_handler_latency == ULONG_MAX) {
intHandlerLatency = 0;
printk(" Min latency from hw interrupt up to 'C' int. "
"handler: "
"not measured\n");
} else {
intHandlerLatency = _hw_irq_to_c_handler_latency;
printk(" Min latency from hw interrupt up to 'C' int. "
"handler:"
" %d tcs = %d nsec\n",
intHandlerLatency,
SYS_CLOCK_HW_CYCLES_TO_NS(intHandlerLatency));
}
printk(" Max interrupt latency (includes hw int. to 'C' "
"handler):"
" %d tcs = %d nsec\n",
int_locked_latency_max + intHandlerLatency,
SYS_CLOCK_HW_CYCLES_TO_NS(int_locked_latency_max + intHandlerLatency));
printk(" Overhead substracted from Max int. latency:\n"
" for int. lock : %d tcs = %d nsec\n"
" each time int. lock nest: %d tcs = %d nsec\n"
" for int. unlocked : %d tcs = %d nsec\n",
initial_start_delay,
SYS_CLOCK_HW_CYCLES_TO_NS(initial_start_delay),
nesting_delay,
SYS_CLOCK_HW_CYCLES_TO_NS(nesting_delay),
stop_delay,
SYS_CLOCK_HW_CYCLES_TO_NS(stop_delay));
} else {
printk("interrupts were not locked and unlocked yet\n");
}
/*
* Lets start with new values so that one extra long path executed
* with interrupt disabled hide smaller paths with interrupt
* disabled.
*/
int_locked_latency_min = ULONG_MAX;
int_locked_latency_max = 0;
}

View file

@ -1 +1,194 @@
#include "../nanokernel/kernel_event_logger.c"
/*
* Copyright (c) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Kernel event logger support.
*/
#include <misc/kernel_event_logger.h>
#include <misc/util.h>
#include <init.h>
#include <nano_private.h>
#include <kernel_event_logger_arch.h>
uint32_t _sys_k_event_logger_buffer[CONFIG_KERNEL_EVENT_LOGGER_BUFFER_SIZE];
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
void *_collector_fiber;
#endif
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
uint32_t _sys_k_event_logger_sleep_start_time;
#endif
#ifdef CONFIG_KERNEL_EVENT_LOGGER_DYNAMIC
int _sys_k_event_logger_mask;
#endif
/**
* @brief Initialize the kernel event logger system.
*
* @details Initialize the ring buffer and the sync semaphore.
*
* @return No return value.
*/
static int _sys_k_event_logger_init(struct device *arg)
{
ARG_UNUSED(arg);
sys_event_logger_init(&sys_k_event_logger, _sys_k_event_logger_buffer,
CONFIG_KERNEL_EVENT_LOGGER_BUFFER_SIZE);
return 0;
}
SYS_INIT(_sys_k_event_logger_init,
NANOKERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CUSTOM_TIMESTAMP
sys_k_timer_func timer_func;
void sys_k_event_logger_set_timer(sys_k_timer_func func)
{
timer_func = func;
}
#endif
void sys_k_event_logger_put_timed(uint16_t event_id)
{
uint32_t data[1];
data[0] = _sys_k_get_time();
sys_event_logger_put(&sys_k_event_logger, event_id, data,
ARRAY_SIZE(data));
}
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
void _sys_k_event_logger_context_switch(void)
{
extern tNANO _nanokernel;
uint32_t data[2];
extern void _sys_event_logger_put_non_preemptible(
struct event_logger *logger,
uint16_t event_id,
uint32_t *event_data,
uint8_t data_size);
if (!sys_k_must_log_event(KERNEL_EVENT_LOGGER_CONTEXT_SWITCH_EVENT_ID)) {
return;
}
/* if the kernel event logger has not been initialized, we do nothing */
if (sys_k_event_logger.ring_buf.buf == NULL) {
return;
}
if (_collector_fiber != _nanokernel.current) {
data[0] = _sys_k_get_time();
data[1] = (uint32_t)_nanokernel.current;
/*
* The mechanism we use to log the kernel events uses a sync semaphore
* to inform that there are available events to be collected. The
* context switch event can be triggered from a task. When we
* signal a semaphore from a task and a fiber is waiting for
* that semaphore, a context switch is generated immediately. Due to
* the fact that we register the context switch event while the context
* switch is being processed, a new context switch can be generated
* before the kernel finishes processing the current context switch. We
* need to prevent this because the kernel is not able to handle it.
* The _sem_give_non_preemptible function does not trigger a context
* switch when we signal the semaphore from any type of thread. Using
* _sys_event_logger_put_non_preemptible function, that internally uses
* _sem_give_non_preemptible function for signaling the sync semaphore,
* allow us registering the context switch event without triggering any
* new context switch during the process.
*/
_sys_event_logger_put_non_preemptible(&sys_k_event_logger,
KERNEL_EVENT_LOGGER_CONTEXT_SWITCH_EVENT_ID, data,
ARRAY_SIZE(data));
}
}
void sys_k_event_logger_register_as_collector(void)
{
_collector_fiber = _nanokernel.current;
}
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
void _sys_k_event_logger_interrupt(void)
{
uint32_t data[2];
if (!sys_k_must_log_event(KERNEL_EVENT_LOGGER_INTERRUPT_EVENT_ID)) {
return;
}
/* if the kernel event logger has not been initialized, we do nothing */
if (sys_k_event_logger.ring_buf.buf == NULL) {
return;
}
data[0] = _sys_k_get_time();
data[1] = _sys_current_irq_key_get();
sys_k_event_logger_put(KERNEL_EVENT_LOGGER_INTERRUPT_EVENT_ID, data,
ARRAY_SIZE(data));
}
#endif /* CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT */
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
void _sys_k_event_logger_enter_sleep(void)
{
if (!sys_k_must_log_event(KERNEL_EVENT_LOGGER_SLEEP_EVENT_ID)) {
return;
}
_sys_k_event_logger_sleep_start_time = sys_cycle_get_32();
}
void _sys_k_event_logger_exit_sleep(void)
{
uint32_t data[3];
if (!sys_k_must_log_event(KERNEL_EVENT_LOGGER_SLEEP_EVENT_ID)) {
return;
}
if (_sys_k_event_logger_sleep_start_time != 0) {
data[0] = _sys_k_get_time();
data[1] = (sys_cycle_get_32() - _sys_k_event_logger_sleep_start_time)
/ sys_clock_hw_cycles_per_tick;
/* register the cause of exiting sleep mode */
data[2] = _sys_current_irq_key_get();
/*
* if _sys_k_event_logger_sleep_start_time is different to zero, means
* that the CPU was sleeping, so we reset it to identify that the event
* was processed and that any the next interrupt is no awaing the CPU.
*/
_sys_k_event_logger_sleep_start_time = 0;
sys_k_event_logger_put(KERNEL_EVENT_LOGGER_SLEEP_EVENT_ID, data,
ARRAY_SIZE(data));
}
}
#endif /* CONFIG_KERNEL_EVENT_LOGGER_SLEEP */

View file

@ -1 +1,105 @@
#include "../nanokernel/ring_buffer.c"
/* ring_buffer.c: Simple ring buffer API */
/*
* Copyright (c) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <misc/ring_buffer.h>
/**
* Internal data structure for a buffer header.
*
* We want all of this to fit in a single uint32_t. Every item stored in the
* ring buffer will be one of these headers plus any extra data supplied
*/
struct ring_element {
uint32_t type :16; /**< Application-specific */
uint32_t length :8; /**< length in 32-bit chunks */
uint32_t value :8; /**< Room for small integral values */
};
int sys_ring_buf_put(struct ring_buf *buf, uint16_t type, uint8_t value,
uint32_t *data, uint8_t size32)
{
uint32_t i, space, index, rc;
space = sys_ring_buf_space_get(buf);
if (space >= (size32 + 1)) {
struct ring_element *header =
(struct ring_element *)&buf->buf[buf->tail];
header->type = type;
header->length = size32;
header->value = value;
if (likely(buf->mask)) {
for (i = 0; i < size32; ++i) {
index = (i + buf->tail + 1) & buf->mask;
buf->buf[index] = data[i];
}
buf->tail = (buf->tail + size32 + 1) & buf->mask;
} else {
for (i = 0; i < size32; ++i) {
index = (i + buf->tail + 1) % buf->size;
buf->buf[index] = data[i];
}
buf->tail = (buf->tail + size32 + 1) % buf->size;
}
rc = 0;
} else {
buf->dropped_put_count++;
rc = -EMSGSIZE;
}
return rc;
}
int sys_ring_buf_get(struct ring_buf *buf, uint16_t *type, uint8_t *value,
uint32_t *data, uint8_t *size32)
{
struct ring_element *header;
uint32_t i, index;
if (sys_ring_buf_is_empty(buf)) {
return -EAGAIN;
}
header = (struct ring_element *) &buf->buf[buf->head];
if (header->length > *size32) {
*size32 = header->length;
return -EMSGSIZE;
}
*size32 = header->length;
*type = header->type;
*value = header->value;
if (likely(buf->mask)) {
for (i = 0; i < header->length; ++i) {
index = (i + buf->head + 1) & buf->mask;
data[i] = buf->buf[index];
}
buf->head = (buf->head + header->length + 1) & buf->mask;
} else {
for (i = 0; i < header->length; ++i) {
index = (i + buf->head + 1) % buf->size;
data[i] = buf->buf[index];
}
buf->head = (buf->head + header->length + 1) % buf->size;
}
return 0;
}

View file

@ -1 +1,36 @@
#include "../nanokernel/version.c"
/* version.c */
/*
* Copyright (c) 1997-2010, 2012-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include "version.h" /* generated by MAKE, at compile time */
static uint32_t kernel_version = KERNELVERSION;
/**
*
* @brief Return the kernel version of the present build
*
* The kernel version is a four-byte value, whose format is described in the
* file "kernel_version.h".
*
* @return kernel version
*/
uint32_t sys_kernel_version_get(void)
{
return kernel_version;
}