2015-04-10 16:44:37 -07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2014 Wind River Systems, Inc.
|
|
|
|
*
|
2015-10-06 11:00:37 -05:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2015-04-10 16:44:37 -07:00
|
|
|
*
|
2015-10-06 11:00:37 -05:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2015-04-10 16:44:37 -07:00
|
|
|
*
|
2015-10-06 11:00:37 -05:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
2015-04-10 16:44:37 -07:00
|
|
|
*/
|
|
|
|
|
2015-12-04 10:09:39 -05:00
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* @brief Private nanokernel definitions
|
|
|
|
*
|
2015-10-20 09:42:33 -07:00
|
|
|
* This file contains private nanokernel structures definitions and various
|
|
|
|
* other definitions for the ARCv2 processor architecture.
|
|
|
|
*
|
|
|
|
* This file is also included by assembly language files which must #define
|
|
|
|
* _ASMLANGUAGE before including this header file. Note that nanokernel
|
|
|
|
* assembly source files obtains structure offset values via "absolute
|
|
|
|
* symbols" in the offsets.o module.
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2015-04-10 16:44:37 -07:00
|
|
|
|
2015-06-19 11:07:02 -04:00
|
|
|
#ifndef _NANO_PRIVATE_H
|
|
|
|
#define _NANO_PRIVATE_H
|
2015-04-10 16:44:37 -07:00
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <toolchain.h>
|
|
|
|
#include <sections.h>
|
2015-05-28 10:56:47 -07:00
|
|
|
#include <arch/cpu.h>
|
2015-11-17 14:08:45 -08:00
|
|
|
#include <vector_table.h>
|
2015-04-10 16:44:37 -07:00
|
|
|
|
|
|
|
#ifndef _ASMLANGUAGE
|
2015-07-21 15:46:38 -04:00
|
|
|
#include <nanokernel.h> /* public nanokernel API */
|
2015-06-04 09:02:25 -04:00
|
|
|
#include <../../../kernel/nanokernel/include/nano_internal.h>
|
2015-04-10 16:44:37 -07:00
|
|
|
#include <stdint.h>
|
2015-07-21 15:46:38 -04:00
|
|
|
#include <misc/util.h>
|
2015-06-14 14:19:10 -04:00
|
|
|
#include <misc/dlist.h>
|
2015-04-10 16:44:37 -07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef _ASMLANGUAGE
|
2016-03-02 17:31:26 -06:00
|
|
|
|
|
|
|
#ifdef CONFIG_THREAD_MONITOR
|
|
|
|
struct __thread_entry {
|
|
|
|
_thread_entry_t pEntry;
|
|
|
|
void *parameter1;
|
|
|
|
void *parameter2;
|
|
|
|
void *parameter3;
|
|
|
|
};
|
|
|
|
#endif /*CONFIG_THREAD_MONITOR*/
|
|
|
|
|
2015-04-10 16:44:37 -07:00
|
|
|
struct coop {
|
|
|
|
/*
|
|
|
|
* Saved on the stack as part of handling a regular IRQ or by the kernel
|
|
|
|
* when calling the FIRQ return code.
|
|
|
|
*/
|
|
|
|
};
|
|
|
|
|
|
|
|
struct irq_stack_frame {
|
|
|
|
uint32_t r0;
|
|
|
|
uint32_t r1;
|
|
|
|
uint32_t r2;
|
|
|
|
uint32_t r3;
|
|
|
|
uint32_t r4;
|
|
|
|
uint32_t r5;
|
|
|
|
uint32_t r6;
|
|
|
|
uint32_t r7;
|
|
|
|
uint32_t r8;
|
|
|
|
uint32_t r9;
|
|
|
|
uint32_t r10;
|
|
|
|
uint32_t r11;
|
|
|
|
uint32_t r12;
|
|
|
|
uint32_t r13;
|
|
|
|
uint32_t blink;
|
|
|
|
uint32_t lp_end;
|
|
|
|
uint32_t lp_start;
|
|
|
|
uint32_t lp_count;
|
|
|
|
#ifdef CONFIG_CODE_DENSITY
|
|
|
|
/*
|
|
|
|
* Currently unsupported. This is where those registers are automatically
|
|
|
|
* pushed on the stack by the CPU when taking a regular IRQ.
|
|
|
|
*/
|
|
|
|
uint32_t ei_base;
|
|
|
|
uint32_t ldi_base;
|
|
|
|
uint32_t jli_base;
|
|
|
|
#endif
|
|
|
|
uint32_t pc;
|
|
|
|
uint32_t status32;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct irq_stack_frame tISF;
|
|
|
|
|
|
|
|
struct preempt {
|
|
|
|
uint32_t sp; /* r28 */
|
|
|
|
};
|
|
|
|
typedef struct preempt tPreempt;
|
|
|
|
|
|
|
|
struct callee_saved {
|
|
|
|
uint32_t r13;
|
|
|
|
uint32_t r14;
|
|
|
|
uint32_t r15;
|
|
|
|
uint32_t r16;
|
|
|
|
uint32_t r17;
|
|
|
|
uint32_t r18;
|
|
|
|
uint32_t r19;
|
|
|
|
uint32_t r20;
|
|
|
|
uint32_t r21;
|
|
|
|
uint32_t r22;
|
|
|
|
uint32_t r23;
|
|
|
|
uint32_t r24;
|
|
|
|
uint32_t r25;
|
|
|
|
uint32_t r26;
|
|
|
|
uint32_t fp; /* r27 */
|
|
|
|
/* r28 is the stack pointer and saved separately */
|
|
|
|
/* r29 is ILINK and does not need to be saved */
|
|
|
|
uint32_t r30;
|
|
|
|
/*
|
|
|
|
* No need to save r31 (blink), it's either alread pushed as the pc or
|
|
|
|
* blink on an irq stack frame.
|
|
|
|
*/
|
|
|
|
};
|
|
|
|
typedef struct callee_saved tCalleeSaved;
|
|
|
|
|
|
|
|
#endif /* _ASMLANGUAGE */
|
|
|
|
|
2015-08-20 11:04:01 -04:00
|
|
|
/* Bitmask definitions for the struct tcs->flags bit field */
|
2015-04-10 16:44:37 -07:00
|
|
|
|
|
|
|
#define FIBER 0x000
|
2015-08-20 11:04:01 -04:00
|
|
|
#define TASK 0x001 /* 1 = task, 0 = fiber */
|
2015-04-10 16:44:37 -07:00
|
|
|
|
2015-08-20 11:04:01 -04:00
|
|
|
#define INT_ACTIVE 0x002 /* 1 = execution context is interrupt handler */
|
|
|
|
#define EXC_ACTIVE 0x004 /* 1 = executino context is exception handler */
|
|
|
|
#define USE_FP 0x010 /* 1 = thread uses floating point unit */
|
|
|
|
#define PREEMPTIBLE 0x020 /* 1 = preemptible thread */
|
|
|
|
#define ESSENTIAL 0x200 /* 1 = system thread that must not abort */
|
2015-04-10 16:44:37 -07:00
|
|
|
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
|
|
|
|
|
|
|
|
/* stacks */
|
|
|
|
|
|
|
|
#define STACK_ALIGN_SIZE 4
|
|
|
|
|
|
|
|
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
|
|
|
|
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
|
|
|
|
|
|
|
|
/*
|
2015-08-20 11:04:01 -04:00
|
|
|
* Reason a thread has relinquished control: fibers can only be in the NONE
|
2015-04-10 16:44:37 -07:00
|
|
|
* or COOP state, tasks can be one in the four.
|
|
|
|
*/
|
|
|
|
#define _CAUSE_NONE 0
|
|
|
|
#define _CAUSE_COOP 1
|
|
|
|
#define _CAUSE_RIRQ 2
|
|
|
|
#define _CAUSE_FIRQ 3
|
|
|
|
|
|
|
|
#ifndef _ASMLANGUAGE
|
|
|
|
|
2015-08-20 11:04:01 -04:00
|
|
|
struct tcs {
|
2015-10-20 09:42:33 -07:00
|
|
|
struct tcs *link; /* node in singly-linked list
|
|
|
|
* _nanokernel.fibers
|
|
|
|
*/
|
|
|
|
uint32_t flags; /* bitmask of flags above */
|
|
|
|
uint32_t intlock_key; /* interrupt key when relinquishing control */
|
|
|
|
int relinquish_cause; /* one of the _CAUSE_xxxx definitions above */
|
|
|
|
unsigned int return_value;/* return value from _Swap */
|
|
|
|
int prio; /* fiber priority, -1 for a task */
|
2015-08-20 11:04:01 -04:00
|
|
|
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
2015-10-20 09:42:33 -07:00
|
|
|
void *custom_data; /* available for custom use */
|
2015-04-10 16:44:37 -07:00
|
|
|
#endif
|
|
|
|
struct coop coopReg;
|
|
|
|
struct preempt preempReg;
|
2015-08-20 11:04:01 -04:00
|
|
|
#ifdef CONFIG_THREAD_MONITOR
|
2016-03-02 17:31:26 -06:00
|
|
|
struct __thread_entry *entry; /* thread entry and parameters description */
|
2015-08-20 11:04:01 -04:00
|
|
|
struct tcs *next_thread; /* next item in list of ALL fiber+tasks */
|
2015-04-10 16:44:37 -07:00
|
|
|
#endif
|
2015-06-14 14:19:10 -04:00
|
|
|
#ifdef CONFIG_NANO_TIMEOUTS
|
|
|
|
struct _nano_timeout nano_timeout;
|
|
|
|
#endif
|
2015-12-04 15:12:03 -05:00
|
|
|
#ifdef CONFIG_ERRNO
|
|
|
|
int errno_var;
|
|
|
|
#endif
|
2016-03-11 18:29:14 +01:00
|
|
|
#ifdef CONFIG_ARC_STACK_CHECKING
|
|
|
|
uint32_t stack_top;
|
|
|
|
#endif
|
2016-02-23 11:05:14 -05:00
|
|
|
#ifdef CONFIG_MICROKERNEL
|
|
|
|
void *uk_task_ptr;
|
|
|
|
#endif
|
2015-04-10 16:44:37 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct s_NANO {
|
2015-08-20 11:04:01 -04:00
|
|
|
struct tcs *fiber; /* singly linked list of runnable fibers */
|
|
|
|
struct tcs *task; /* current task the nanokernel knows about */
|
|
|
|
struct tcs *current; /* currently scheduled thread (fiber or task) */
|
2015-04-10 16:44:37 -07:00
|
|
|
|
2015-08-20 11:04:01 -04:00
|
|
|
#ifdef CONFIG_THREAD_MONITOR
|
|
|
|
struct tcs *threads; /* singly linked list of ALL fiber+tasks */
|
2015-04-10 16:44:37 -07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_FP_SHARING
|
2015-08-20 11:04:01 -04:00
|
|
|
struct tcs *current_fp; /* thread (fiber or task) that owns the FP regs */
|
2015-04-10 16:44:37 -07:00
|
|
|
#endif
|
|
|
|
|
2016-03-18 16:43:40 -07:00
|
|
|
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
2015-04-10 16:44:37 -07:00
|
|
|
int32_t idle; /* Number of ticks for kernel idling */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
char *rirq_sp; /* regular IRQ stack pointer base */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIRQ stack pointer is installed once in the second bank's SP, so
|
2015-05-08 17:12:45 -05:00
|
|
|
* there is no need to track it in _nanokernel.
|
2015-04-10 16:44:37 -07:00
|
|
|
*/
|
|
|
|
|
2016-03-10 11:18:05 -05:00
|
|
|
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
|
2015-06-14 14:19:10 -04:00
|
|
|
sys_dlist_t timeout_q;
|
idle: fix tasks waiting when NANO_TIMEOUTS is enabled
Fix an issue where, if a task is pending on a nano timeout, the duration
it wants to wait is not taken into account by the tickless idle code.
This could cause a system to wait forever, or to the limit of the timer
hardware (which is forever, for all intents and purposes).
This fix is to add one field in the nanokernel data structure for one
task to record the amount of ticks it will wait on a nano timeout. Only
one task has to be able to record this information, since, these waits
being looping busy waits, the task of highest priority is the only task
that can be actively waiting with a nano timeout. If a task of lower
priority was previously waiting, and a new task is now waiting, it means
that the wait of the original task has been interrupted, which will
cause said task to run the busy loop on the object again when it gets
scheduled, and the number of ticks it wants to wait has to be recomputed
and recorded again.
Change-Id: Ibcf0f288fc42d96897642cfee00ab7359716703f
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-01-26 13:47:03 -05:00
|
|
|
int32_t task_timeout;
|
2015-06-14 14:19:10 -04:00
|
|
|
#endif
|
2015-04-10 16:44:37 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct s_NANO tNANO;
|
2015-05-08 17:12:45 -05:00
|
|
|
extern tNANO _nanokernel;
|
2015-04-10 16:44:37 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_ARCV2
|
|
|
|
#include <v2/cache.h>
|
|
|
|
#include <v2/irq.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static ALWAYS_INLINE void nanoArchInit(void)
|
|
|
|
{
|
|
|
|
_icache_setup();
|
|
|
|
_irq_setup();
|
|
|
|
}
|
|
|
|
|
2015-07-01 17:22:39 -04:00
|
|
|
/**
|
|
|
|
*
|
2015-07-01 17:51:40 -04:00
|
|
|
* @brief Set the return value for the specified fiber (inline)
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
|
|
|
* The register used to store the return value from a function call invocation
|
|
|
|
* to <value>. It is assumed that the specified <fiber> is pending, and thus
|
2015-08-20 11:04:01 -04:00
|
|
|
* the fiber's thread is stored in its struct tcs structure.
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2015-07-01 17:29:04 -04:00
|
|
|
* @return N/A
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2015-08-20 11:04:01 -04:00
|
|
|
static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber, unsigned int value)
|
2015-04-10 16:44:37 -07:00
|
|
|
{
|
|
|
|
fiber->return_value = value;
|
|
|
|
}
|
|
|
|
|
2015-07-01 17:22:39 -04:00
|
|
|
/**
|
|
|
|
*
|
2015-07-01 17:51:40 -04:00
|
|
|
* @brief Indicates if kernel is handling interrupt
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2015-07-01 17:29:04 -04:00
|
|
|
* @return 1 if interrupt handler is executed, 0 otherwise
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2015-04-10 16:44:37 -07:00
|
|
|
static ALWAYS_INLINE int _IS_IN_ISR(void)
|
|
|
|
{
|
|
|
|
uint32_t act = _arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT);
|
2015-11-17 14:08:45 -08:00
|
|
|
#if CONFIG_IRQ_OFFLOAD
|
|
|
|
/* Check if we're in a TRAP_S exception as well */
|
|
|
|
if (_arc_v2_aux_reg_read(_ARC_V2_STATUS32) & _ARC_V2_STATUS32_AE &&
|
|
|
|
_ARC_V2_ECR_VECTOR(_arc_v2_aux_reg_read(_ARC_V2_ECR)) == EXC_EV_TRAP) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#endif
|
2015-04-10 16:44:37 -07:00
|
|
|
return ((act & 0xffff) != 0);
|
|
|
|
}
|
|
|
|
|
2016-04-13 16:38:38 -05:00
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @bried Indicates the interrupt number of the highest priority
|
|
|
|
* active interrupt
|
|
|
|
*
|
|
|
|
* @return IRQ number
|
|
|
|
*/
|
|
|
|
static ALWAYS_INLINE int _INTERRUPT_CAUSE(void)
|
|
|
|
{
|
|
|
|
uint32_t irq_num = _arc_v2_aux_reg_read(_ARC_V2_ICAUSE);
|
|
|
|
|
|
|
|
return irq_num;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-04-10 16:44:37 -07:00
|
|
|
extern void nanoCpuAtomicIdle(unsigned int);
|
2015-08-20 11:04:01 -04:00
|
|
|
extern void _thread_entry_wrapper(void);
|
2015-04-10 16:44:37 -07:00
|
|
|
|
|
|
|
static inline void _IntLibInit(void)
|
|
|
|
{
|
|
|
|
/* nothing needed, here because the kernel requires it */
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* _ASMLANGUAGE */
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-06-19 11:07:02 -04:00
|
|
|
#endif /* _NANO_PRIVATE_H */
|