ext qmsi: Update to QMSI 1.4 RC2

Update the builtin QMSI code to 1.4 (RC2).
The below shim drivers were updated for API or interface changes:
- aio
- counter
- i2c_ss
- rtc
- wdt.

Also, arch soc specific power management code were updated.

Jira: ZEP-1572

Change-Id: Ibc8fae032a39ffb2c2c997f697835bc0208fd308
Signed-off-by: Jesus Sanchez-Palencia <jesus.sanchez-palencia@intel.com>
Signed-off-by: Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
This commit is contained in:
Kuo-Lang Tseng 2017-02-01 17:12:34 -08:00 committed by Anas Nashif
commit 23b0074012
86 changed files with 4522 additions and 1516 deletions

View file

@ -24,7 +24,7 @@ extern void _power_soc_deep_sleep_2(void);
static void _deep_sleep(enum power_states state)
{
power_soc_set_ss_restore_flag();
qm_power_soc_set_ss_restore_flag();
switch (state) {
case SYS_POWER_STATE_DEEP_SLEEP_1:
@ -63,7 +63,7 @@ void _sys_soc_set_power_state(enum power_states state)
{
switch (state) {
case SYS_POWER_STATE_CPU_LPS:
ss_power_soc_lpss_enable();
qm_ss_power_soc_lpss_enable();
enter_arc_state(ARC_SS2);
break;
case SYS_POWER_STATE_CPU_LPS_1:
@ -78,8 +78,8 @@ void _sys_soc_set_power_state(enum power_states state)
_deep_sleep(state);
break;
case SYS_POWER_STATE_DEEP_SLEEP_2:
ss_power_soc_lpss_enable();
power_soc_set_ss_restore_flag();
qm_ss_power_soc_lpss_enable();
qm_power_soc_set_ss_restore_flag();
_power_soc_deep_sleep_2();
break;
#endif
@ -94,7 +94,7 @@ void _sys_soc_power_state_post_ops(enum power_states state)
switch (state) {
case SYS_POWER_STATE_CPU_LPS:
ss_power_soc_lpss_disable();
qm_ss_power_soc_lpss_disable();
case SYS_POWER_STATE_CPU_LPS_1:
/* Expire the timer as it is disabled in SS2. */
limit = _arc_v2_aux_reg_read(_ARC_V2_TMR0_LIMIT);
@ -105,7 +105,7 @@ void _sys_soc_power_state_post_ops(enum power_states state)
__builtin_arc_seti(0);
break;
case SYS_POWER_STATE_DEEP_SLEEP_2:
ss_power_soc_lpss_disable();
qm_ss_power_soc_lpss_disable();
/* If flag is cleared it means the system entered in
* sleep state while we were in LPS. In that case, we

View file

@ -57,7 +57,7 @@ SECTION_FUNC(TEXT, _power_soc_sleep)
/* Do not link to preserve blink */
jl @save_cpu_context
j @power_soc_sleep
j @qm_power_soc_sleep
/* Does not return */
SECTION_FUNC(TEXT, _power_soc_deep_sleep)
@ -70,7 +70,7 @@ SECTION_FUNC(TEXT, _power_soc_deep_sleep)
/* Do not link to preserve blink */
jl @save_cpu_context
j @power_soc_deep_sleep
j @qm_power_soc_deep_sleep
/* Does not return */
SECTION_FUNC(TEXT, _power_soc_deep_sleep_2)

View file

@ -41,7 +41,7 @@ static void _deep_sleep(enum power_states state)
qm_x86_set_resume_vector(_power_restore_cpu_context,
*__x86_restore_info);
power_soc_set_x86_restore_flag();
qm_power_soc_set_x86_restore_flag();
switch (state) {
case SYS_POWER_STATE_DEEP_SLEEP_1:
@ -61,13 +61,13 @@ void _sys_soc_set_power_state(enum power_states state)
{
switch (state) {
case SYS_POWER_STATE_CPU_LPS:
power_cpu_c2lp();
qm_power_cpu_c2lp();
break;
case SYS_POWER_STATE_CPU_LPS_1:
power_cpu_c2();
qm_power_cpu_c2();
break;
case SYS_POWER_STATE_CPU_LPS_2:
power_cpu_c1();
qm_power_cpu_c1();
break;
#if (defined(CONFIG_SYS_POWER_DEEP_SLEEP))
case SYS_POWER_STATE_DEEP_SLEEP:

View file

@ -53,13 +53,13 @@ SECTION_FUNC(TEXT, _power_restore_cpu_context)
SECTION_FUNC(TEXT, _power_soc_sleep)
call save_cpu_context
wbinvd
call power_soc_sleep
call qm_power_soc_sleep
/* Does not return */
SECTION_FUNC(TEXT, _power_soc_deep_sleep)
call save_cpu_context
wbinvd
call power_soc_deep_sleep
call qm_power_soc_deep_sleep
/* Does not return */
/*

View file

@ -50,7 +50,7 @@ static int aio_qmsi_cmp_disable(struct device *dev, uint8_t index)
CMP_INTR_ROUTER |= (1 << index);
/* Disable comparator according to index */
config.int_en &= ~(1 << index);
config.cmp_en &= ~(1 << index);
config.power &= ~(1 << index);
config.reference &= ~(1 << index);
config.polarity &= ~(1 << index);
@ -93,7 +93,7 @@ static int aio_qmsi_cmp_configure(struct device *dev, uint8_t index,
/* The driver will not use QMSI callback mechanism */
config.callback = NULL;
/* Enable comparator */
config.int_en |= (1 << index);
config.cmp_en |= (1 << index);
config.power |= (1 << index);
if (qm_ac_set_config(&config) != 0) {
@ -139,7 +139,7 @@ static int aio_qmsi_cmp_init(struct device *dev)
config.reference = QM_SCSS_CMP->cmp_ref_sel;
config.polarity = QM_SCSS_CMP->cmp_ref_pol;
config.power = QM_SCSS_CMP->cmp_pwr;
config.int_en = QM_SCSS_CMP->cmp_en;
config.cmp_en = QM_SCSS_CMP->cmp_en;
/* Clear callback pointers */
for (i = 0; i < dev_data->num_cmp; i++) {

View file

@ -115,7 +115,7 @@ static int aon_timer_qmsi_set_alarm(struct device *dev,
int result = 0;
/* Check if timer has been started */
if (QM_AONC[QM_AONC_0].aonpt_cfg == 0) {
if (QM_AONC[QM_AONC_0]->aonpt_cfg == 0) {
return -ENOTSUP;
}
@ -144,7 +144,7 @@ static int aon_timer_qmsi_set_alarm(struct device *dev,
static uint32_t aon_timer_qmsi_get_pending_int(struct device *dev)
{
return QM_AONC[QM_AONC_0].aonpt_stat;
return QM_AONC[QM_AONC_0]->aonpt_stat;
}
static const struct counter_driver_api aon_timer_qmsi_api = {

View file

@ -111,18 +111,6 @@ static int ss_i2c_device_ctrl(struct device *dev, uint32_t ctrl_command,
static int i2c_qmsi_ss_init(struct device *dev);
static void i2c_qmsi_ss_isr(void *arg)
{
struct device *dev = arg;
qm_ss_i2c_t instance = GET_CONTROLLER_INSTANCE(dev);
if (instance == QM_SS_I2C_0) {
qm_ss_i2c_0_isr(NULL);
} else {
qm_ss_i2c_1_isr(NULL);
}
}
#ifdef CONFIG_I2C_SS_0
static struct i2c_qmsi_ss_driver_data driver_data_0;
@ -164,13 +152,13 @@ static void i2c_qmsi_ss_config_irq_0(void)
sys_write32(mask, SCSS_REGISTER_BASE + I2C_SS_0_STOP_MASK);
/* Connect the IRQs to ISR */
IRQ_CONNECT(I2C_SS_0_ERR_VECTOR, 1, i2c_qmsi_ss_isr,
IRQ_CONNECT(I2C_SS_0_ERR_VECTOR, 1, qm_ss_i2c_0_error_isr,
DEVICE_GET(i2c_ss_0), 0);
IRQ_CONNECT(I2C_SS_0_RX_VECTOR, 1, i2c_qmsi_ss_isr,
IRQ_CONNECT(I2C_SS_0_RX_VECTOR, 1, qm_ss_i2c_0_rx_avail_isr,
DEVICE_GET(i2c_ss_0), 0);
IRQ_CONNECT(I2C_SS_0_TX_VECTOR, 1, i2c_qmsi_ss_isr,
IRQ_CONNECT(I2C_SS_0_TX_VECTOR, 1, qm_ss_i2c_0_tx_req_isr,
DEVICE_GET(i2c_ss_0), 0);
IRQ_CONNECT(I2C_SS_0_STOP_VECTOR, 1, i2c_qmsi_ss_isr,
IRQ_CONNECT(I2C_SS_0_STOP_VECTOR, 1, qm_ss_i2c_0_stop_det_isr,
DEVICE_GET(i2c_ss_0), 0);
irq_enable(I2C_SS_0_ERR_VECTOR);
@ -221,13 +209,13 @@ static void i2c_qmsi_ss_config_irq_1(void)
sys_write32(mask, SCSS_REGISTER_BASE + I2C_SS_1_STOP_MASK);
/* Connect the IRQs to ISR */
IRQ_CONNECT(I2C_SS_1_ERR_VECTOR, 1, i2c_qmsi_ss_isr,
IRQ_CONNECT(I2C_SS_1_ERR_VECTOR, 1, qm_ss_i2c_1_error_isr,
DEVICE_GET(i2c_ss_1), 0);
IRQ_CONNECT(I2C_SS_1_RX_VECTOR, 1, i2c_qmsi_ss_isr,
IRQ_CONNECT(I2C_SS_1_RX_VECTOR, 1, qm_ss_i2c_1_rx_avail_isr,
DEVICE_GET(i2c_ss_1), 0);
IRQ_CONNECT(I2C_SS_1_TX_VECTOR, 1, i2c_qmsi_ss_isr,
IRQ_CONNECT(I2C_SS_1_TX_VECTOR, 1, qm_ss_i2c_1_tx_req_isr,
DEVICE_GET(i2c_ss_1), 0);
IRQ_CONNECT(I2C_SS_1_STOP_VECTOR, 1, i2c_qmsi_ss_isr,
IRQ_CONNECT(I2C_SS_1_STOP_VECTOR, 1, qm_ss_i2c_1_stop_det_isr,
DEVICE_GET(i2c_ss_1), 0);
irq_enable(I2C_SS_1_ERR_VECTOR);

View file

@ -144,12 +144,12 @@ static int rtc_qmsi_set_alarm(struct device *dev, const uint32_t alarm_val)
static uint32_t rtc_qmsi_read(struct device *dev)
{
return QM_RTC[QM_RTC_0].rtc_ccvr;
return QM_RTC[QM_RTC_0]->rtc_ccvr;
}
static uint32_t rtc_qmsi_get_pending_int(struct device *dev)
{
return QM_RTC[QM_RTC_0].rtc_stat;
return QM_RTC[QM_RTC_0]->rtc_stat;
}
static const struct rtc_driver_api api = {

View file

@ -45,8 +45,8 @@ static void (*user_cb)(struct device *dev);
static void get_config(struct device *dev, struct wdt_config *cfg)
{
cfg->timeout = QM_WDT[QM_WDT_0].wdt_torr;
cfg->mode = ((QM_WDT[QM_WDT_0].wdt_cr & QM_WDT_CR_RMOD) >>
cfg->timeout = QM_WDT[QM_WDT_0]->wdt_torr;
cfg->mode = ((QM_WDT[QM_WDT_0]->wdt_cr & QM_WDT_CR_RMOD) >>
QM_WDT_CR_RMOD_OFFSET);
cfg->interrupt_fn = user_cb;
}

View file

@ -8,7 +8,7 @@ Microcontroller products. It currently supports the following SoCs:
- Intel® Quark™ D2000 Microcontroller
- Intel® Quark™ SE Microcontroller
The current version supported in Zephyr is QMSI 1.3.1. See:
The current version supported in Zephyr is QMSI 1.4 RC2. See:
https://github.com/quark-mcu/qmsi/releases

View file

@ -31,8 +31,6 @@
#include "clk.h"
#include <string.h>
#if (QUARK_D2000)
/* FIFO_INTERRUPT_THRESHOLD is used by qm_adc_irq_convert to set the threshold
* at which the FIFO will trigger an interrupt. */
#define FIFO_INTERRUPT_THRESHOLD (16)
@ -455,5 +453,3 @@ int qm_adc_irq_convert(const qm_adc_t adc, qm_adc_xfer_t *xfer)
return 0;
}
#endif /* QUARK_D2000 */

View file

@ -750,4 +750,22 @@ int qm_ss_adc_restore_context(const qm_ss_adc_t adc,
return 0;
}
#else
int qm_ss_adc_save_context(const qm_ss_adc_t adc,
qm_ss_adc_context_t *const ctx)
{
(void)adc;
(void)ctx;
return 0;
}
int qm_ss_adc_restore_context(const qm_ss_adc_t adc,
const qm_ss_adc_context_t *const ctx)
{
(void)adc;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -29,48 +29,135 @@
#include "qm_aon_counters.h"
static void (*callback)(void *) = NULL;
static void *callback_data;
#if (HAS_SOC_CONTEXT_RETENTION)
#include "power_states.h"
#endif /* HAS_SOC_CONTEXT_RETENTION */
static void pt_reset(const qm_aonc_t aonc)
static void (*callback[QM_AONC_NUM])(void *) = {NULL};
static void *callback_data[QM_AONC_NUM];
#ifndef UNIT_TEST
qm_aonc_reg_t *qm_aonc[QM_AONC_NUM] = {((qm_aonc_reg_t *)QM_AONC_0_BASE),
#if (NUM_AONC_CONTROLLERS > 1)
((qm_aonc_reg_t *)QM_AONC_1_BASE)
#endif /* NUM_AONC_CONTROLLERS >1 */
};
#endif /* UNIT_TEST */
#define BUSY_CHECK(_aonc)
#if (FIX_2 || FIX_3)
/* Cannot write to clear bit twice in one RTC clock cycle. */
static void wait_single_cycle(const qm_aonc_t aonc)
{
static bool first_run = true;
uint32_t aonc_cfg;
uint32_t aonc_cfg, initial_cnt;
/* After POR, it is required to wait for one RTC clock cycle before
* asserting QM_AONPT_CTRL_RST. Note the AON counter is enabled with an
* initial value of 0 at POR.
/* Ensure the AON counter is enabled */
aonc_cfg = QM_AONC[aonc]->aonc_cfg;
QM_AONC[aonc]->aonc_cfg |= QM_AONC_ENABLE;
initial_cnt = QM_AONC[aonc]->aonc_cnt;
while (initial_cnt == QM_AONC[aonc]->aonc_cnt) {
}
QM_AONC[aonc]->aonc_cfg = aonc_cfg;
}
#endif /* (FIX_2 || FIX_3) */
#if (FIX_3)
#define CLEAR_CHECK(_aonc) \
while (QM_AONC[(_aonc)]->aonpt_ctrl & QM_AONPT_CLR) { \
} \
wait_single_cycle(_aonc);
#define RESET_CHECK(_aonc) \
while (QM_AONC[(_aonc)]->aonpt_ctrl & QM_AONPT_RST) { \
} \
wait_single_cycle(_aonc);
#else /* FIX_3 */
#define CLEAR_CHECK(_aonc) \
while (QM_AONC[(_aonc)]->aonpt_ctrl & QM_AONPT_CLR) { \
}
#define RESET_CHECK(_aonc) \
while (QM_AONC[(_aonc)]->aonpt_ctrl & QM_AONPT_RST) { \
}
#endif /* FIX_3 */
#define AONPT_CLEAR(_aonc) \
/* Clear the alarm and wait for it to complete. */ \
QM_AONC[(_aonc)]->aonpt_ctrl |= QM_AONPT_CLR; \
CLEAR_CHECK(_aonc) \
BUSY_CHECK(_aonc)
/* AONPT requires one RTC clock edge before first timer reset. */
static __inline__ void pt_reset(const qm_aonc_t aonc)
{
#if (FIX_2)
uint32_t aonc_cfg;
static bool first_run = true;
/*
* After Power on Reset, it is required to wait for one RTC clock cycle
* before asserting QM_AONPT_CTRL_RST. Note the AON counter is enabled
* with an initial value of 0 at Power on Reset.
*/
if (first_run) {
first_run = false;
/* Ensure the AON counter is enabled */
aonc_cfg = QM_AONC[aonc].aonc_cfg;
QM_AONC[aonc].aonc_cfg = BIT(0);
aonc_cfg = QM_AONC[aonc]->aonc_cfg;
QM_AONC[aonc]->aonc_cfg |= QM_AONC_ENABLE;
while (0 == QM_AONC[aonc].aonc_cnt) {
while (0 == QM_AONC[aonc]->aonc_cnt) {
}
QM_AONC[aonc].aonc_cfg = aonc_cfg;
QM_AONC[aonc]->aonc_cfg = aonc_cfg;
}
#endif /* FIX_2 */
QM_AONC[aonc].aonpt_ctrl |= BIT(1);
/* Reset the counter. */
QM_AONC[aonc]->aonpt_ctrl |= QM_AONPT_RST;
RESET_CHECK(aonc);
BUSY_CHECK(aonc);
}
/* AONPT requires one RTC clock edge before first timer reset. */
#define AONPT_RESET(_aonc) pt_reset((_aonc))
QM_ISR_DECLARE(qm_aonpt_0_isr)
{
if (callback) {
(*callback)(callback_data);
qm_aonc_t aonc;
#if (HAS_SOC_CONTEXT_RETENTION)
if (QM_SCSS_GP->gps0 & QM_GPS0_POWER_STATES_MASK) {
qm_power_soc_restore();
}
QM_AONC[0].aonpt_ctrl |= BIT(0); /* Clear pending interrupts */
#endif
/*
* Check each always on counter for the interrupt status and calls
* the callback if it has been set.
*/
for (aonc = QM_AONC_0; aonc < QM_AONC_NUM; aonc++) {
if ((QM_AONC[aonc]->aonpt_stat & QM_AONPT_INTERRUPT)) {
if (callback[aonc]) {
(*callback[aonc])(callback_data[aonc]);
}
/* Clear pending interrupt. */
AONPT_CLEAR(aonc);
}
}
QM_ISR_EOI(QM_IRQ_AONPT_0_INT_VECTOR);
}
int qm_aonc_enable(const qm_aonc_t aonc)
{
QM_CHECK(aonc < QM_AONC_NUM, -EINVAL);
QM_CHECK(aonc >= QM_AONC_0, -EINVAL);
QM_AONC[aonc].aonc_cfg = 0x1;
QM_AONC[aonc]->aonc_cfg = QM_AONC_ENABLE;
return 0;
}
@ -78,8 +165,9 @@ int qm_aonc_enable(const qm_aonc_t aonc)
int qm_aonc_disable(const qm_aonc_t aonc)
{
QM_CHECK(aonc < QM_AONC_NUM, -EINVAL);
QM_CHECK(aonc >= QM_AONC_0, -EINVAL);
QM_AONC[aonc].aonc_cfg = 0x0;
QM_AONC[aonc]->aonc_cfg = QM_AONC_DISABLE;
return 0;
}
@ -87,9 +175,10 @@ int qm_aonc_disable(const qm_aonc_t aonc)
int qm_aonc_get_value(const qm_aonc_t aonc, uint32_t *const val)
{
QM_CHECK(aonc < QM_AONC_NUM, -EINVAL);
QM_CHECK(aonc >= QM_AONC_0, -EINVAL);
QM_CHECK(val != NULL, -EINVAL);
*val = QM_AONC[aonc].aonc_cnt;
*val = QM_AONC[aonc]->aonc_cnt;
return 0;
}
@ -97,16 +186,22 @@ int qm_aonpt_set_config(const qm_aonc_t aonc,
const qm_aonpt_config_t *const cfg)
{
QM_CHECK(aonc < QM_AONC_NUM, -EINVAL);
QM_CHECK(aonc >= QM_AONC_0, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
QM_AONC[aonc].aonpt_cfg = cfg->count;
QM_AONC[aonc]->aonpt_cfg = cfg->count;
/* Clear pending interrupts. */
AONPT_CLEAR(aonc);
if (cfg->int_en) {
callback = cfg->callback;
callback_data = cfg->callback_data;
callback[aonc] = cfg->callback;
callback_data[aonc] = cfg->callback_data;
} else {
callback = NULL;
callback[aonc] = NULL;
}
pt_reset(aonc);
AONPT_RESET(aonc);
return 0;
}
@ -114,23 +209,21 @@ int qm_aonpt_set_config(const qm_aonc_t aonc,
int qm_aonpt_get_value(const qm_aonc_t aonc, uint32_t *const val)
{
QM_CHECK(aonc < QM_AONC_NUM, -EINVAL);
QM_CHECK(aonc >= QM_AONC_0, -EINVAL);
QM_CHECK(val != NULL, -EINVAL);
*val = QM_AONC[aonc].aonpt_cnt;
*val = QM_AONC[aonc]->aonpt_cnt;
return 0;
}
int qm_aonpt_get_status(const qm_aonc_t aonc, qm_aonpt_status_t *const status)
{
QM_CHECK(aonc < QM_AONC_NUM, -EINVAL);
QM_CHECK(aonc >= QM_AONC_0, -EINVAL);
QM_CHECK(status != NULL, -EINVAL);
#if (HAS_AONPT_BUSY_BIT)
if (QM_AON_COUNTER[aonc]->aonpt_stat & BIT(1)) {
*status = QM_AONPT_BUSY;
} else
#endif
if (QM_AONC[aonc].aonpt_stat & BIT(0)) {
if (QM_AONC[aonc]->aonpt_stat & QM_AONPT_INTERRUPT) {
*status = QM_AONPT_EXPIRED;
} else {
*status = QM_AONPT_READY;
@ -142,8 +235,13 @@ int qm_aonpt_get_status(const qm_aonc_t aonc, qm_aonpt_status_t *const status)
int qm_aonpt_clear(const qm_aonc_t aonc)
{
QM_CHECK(aonc < QM_AONC_NUM, -EINVAL);
QM_CHECK(aonc >= QM_AONC_0, -EINVAL);
QM_AONC[aonc].aonpt_ctrl |= BIT(0);
/*
* Clear pending interrupt and poll until the command has been
* completed.
*/
AONPT_CLEAR(aonc);
return 0;
}
@ -151,8 +249,65 @@ int qm_aonpt_clear(const qm_aonc_t aonc)
int qm_aonpt_reset(const qm_aonc_t aonc)
{
QM_CHECK(aonc < QM_AONC_NUM, -EINVAL);
QM_CHECK(aonc >= QM_AONC_0, -EINVAL);
pt_reset(aonc);
AONPT_RESET(aonc);
return 0;
}
#if (ENABLE_RESTORE_CONTEXT)
int qm_aonpt_save_context(const qm_aonc_t aonc, qm_aonc_context_t *const ctx)
{
QM_CHECK(aonc < QM_AONC_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
(void)aonc;
(void)ctx;
return 0;
}
int qm_aonpt_restore_context(const qm_aonc_t aonc,
const qm_aonc_context_t *const ctx)
{
uint32_t int_aonpt_mask;
QM_CHECK(aonc < QM_AONC_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
(void)aonc;
(void)ctx;
/* The interrupt router registers are sticky and retain their
* values across warm resets, so we don't need to save them.
* But for wake capable peripherals, if their interrupts are
* configured to be edge sensitive, the wake event will be lost
* by the time the interrupt controller is reconfigured, while
* the interrupt is still pending. By masking and unmasking again
* the corresponding routing register, the interrupt is forwarded
* to the core and the ISR will be serviced as expected.
*/
int_aonpt_mask = QM_INTERRUPT_ROUTER->aonpt_0_int_mask;
QM_INTERRUPT_ROUTER->aonpt_0_int_mask = 0xFFFFFFFF;
QM_INTERRUPT_ROUTER->aonpt_0_int_mask = int_aonpt_mask;
return 0;
}
#else
int qm_aonpt_save_context(const qm_aonc_t aonc, qm_aonc_context_t *const ctx)
{
(void)aonc;
(void)ctx;
return 0;
}
int qm_aonpt_restore_context(const qm_aonc_t aonc,
const qm_aonc_context_t *const ctx)
{
(void)aonc;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -28,25 +28,31 @@
*/
#include "qm_comparator.h"
#if (HAS_SOC_CONTEXT_RETENTION)
#include "power_states.h"
#endif /* HAS_SOC_CONTEXT_RETENTION */
static void (*callback)(void *, uint32_t) = NULL;
static void *callback_data;
#define cmp_en cmp_en
#define cmp_ref_pol cmp_ref_pol
QM_ISR_DECLARE(qm_comparator_0_isr)
{
uint32_t int_status = QM_SCSS_CMP->cmp_stat_clr;
#if (HAS_SOC_CONTEXT_RETENTION)
if (QM_SCSS_GP->gps0 & QM_GPS0_POWER_STATES_MASK) {
power_soc_restore();
qm_power_soc_restore();
}
#endif
if (callback) {
(*callback)(callback_data, int_status);
}
/* Clear all pending interrupts */
/* Clear all pending interrupts. */
QM_SCSS_CMP->cmp_stat_clr = int_status;
QM_ISR_EOI(QM_IRQ_COMPARATOR_0_INT_VECTOR);
@ -56,20 +62,26 @@ int qm_ac_set_config(const qm_ac_config_t *const config)
{
QM_CHECK(config != NULL, -EINVAL);
/* Avoid interrupts while configuring the comparators.
uint32_t reference = 0;
reference = config->reference;
/*
* Avoid interrupts while configuring the comparators.
* This can happen when the polarity is changed
* compared to a previously configured interrupt. */
* compared to a previously configured interrupt.
*/
QM_SCSS_CMP->cmp_en = 0;
callback = config->callback;
callback_data = config->callback_data;
QM_SCSS_CMP->cmp_ref_sel = config->reference;
QM_SCSS_CMP->cmp_ref_sel = reference;
QM_SCSS_CMP->cmp_ref_pol = config->polarity;
QM_SCSS_CMP->cmp_pwr = config->power;
/* Clear all pending interrupts before we enable */
/* Clear all pending interrupts before we enable. */
QM_SCSS_CMP->cmp_stat_clr = 0x7FFFF;
QM_SCSS_CMP->cmp_en = config->int_en;
QM_SCSS_CMP->cmp_en = config->cmp_en;
return 0;
}

View file

@ -83,10 +83,12 @@ typedef struct dma_cfg_prv_t {
uint16_t num_blocks_per_buffer;
/*
* Number of blocks from buffer that need to be transfered (multiblock
* mode), decremented on each single block transfer callback.
* Number of block interrupts pending on the buffer currently being
* transfered. Used in multiblock continuous mode as well as multiblock
* link list mode when more than one buffer is set up. This counter is
* decremented on each block interrupt.
*/
uint16_t num_blocks_remaining;
uint16_t num_blocks_int_pending;
/*
* In multiblock linked list mode, indicates whether transfer is linear

View file

@ -27,6 +27,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "clk.h"
#include "dma.h"
#ifndef UNIT_TEST
@ -92,10 +93,10 @@ static void qm_dma_isr_handler(const qm_dma_t dma,
/* Block completed, clear interrupt. */
int_reg->clear_block_low = BIT(channel_id);
prv_cfg->num_blocks_remaining--;
prv_cfg->num_blocks_int_pending--;
if (NULL != prv_cfg->lli_tail &&
0 == prv_cfg->num_blocks_remaining) {
0 == prv_cfg->num_blocks_int_pending) {
/*
* Linked list mode, invoke callback if this is last
* block of buffer.
@ -107,13 +108,13 @@ static void qm_dma_isr_handler(const qm_dma_t dma,
}
/* Buffer done, set for next buffer. */
prv_cfg->num_blocks_remaining =
prv_cfg->num_blocks_int_pending =
prv_cfg->num_blocks_per_buffer;
} else if (NULL == prv_cfg->lli_tail) {
QM_ASSERT(prv_cfg->num_blocks_remaining <
QM_ASSERT(prv_cfg->num_blocks_int_pending <
prv_cfg->num_blocks_per_buffer);
if (1 == prv_cfg->num_blocks_remaining) {
if (1 == prv_cfg->num_blocks_int_pending) {
/*
* Contiguous mode. We have just processed the
* next to last block, clear CFG.RELOAD so
@ -238,7 +239,7 @@ int qm_dma_init(const qm_dma_t dma)
int return_code;
/* Enable the DMA Clock */
QM_SCSS_CCU->ccu_mlayer_ahb_ctl |= QM_CCU_DMA_CLK_EN;
clk_dma_enable();
/* Disable the controller */
return_code = dma_controller_disable(dma);
@ -408,22 +409,34 @@ dma_linked_list_init(const qm_dma_multi_transfer_t *multi_transfer,
(ctrl_low & QM_DMA_CTL_L_SINC_MASK) >> QM_DMA_CTL_L_SINC_OFFSET;
qm_dma_address_increment_t destination_address_inc_type =
(ctrl_low & QM_DMA_CTL_L_DINC_MASK) >> QM_DMA_CTL_L_DINC_OFFSET;
/* Linked list node iteration variable. */
qm_dma_linked_list_item_t *lli = multi_transfer->linked_list_first;
uint32_t source_inc = 0;
uint32_t destination_inc = 0;
uint32_t i;
QM_ASSERT(source_address_inc_type == QM_DMA_ADDRESS_INCREMENT ||
source_address_inc_type == QM_DMA_ADDRESS_NO_CHANGE);
QM_ASSERT(destination_address_inc_type == QM_DMA_ADDRESS_INCREMENT ||
destination_address_inc_type == QM_DMA_ADDRESS_NO_CHANGE);
/* Source/destination address increment between consecutive blocks. */
uint32_t source_inc =
(source_address_inc_type == QM_DMA_ADDRESS_INCREMENT)
? multi_transfer->block_size
: 0;
uint32_t destination_inc =
(destination_address_inc_type == QM_DMA_ADDRESS_INCREMENT)
? multi_transfer->block_size
: 0;
/* Linked list node iteration variable. */
qm_dma_linked_list_item_t *lli = multi_transfer->linked_list_first;
uint32_t i;
/*
* Memory endpoints increment the source/destination address between
* consecutive LLIs by the block size times the transfer width in
* bytes.
*/
if (source_address_inc_type == QM_DMA_ADDRESS_INCREMENT) {
source_inc = multi_transfer->block_size *
BIT((ctrl_low & QM_DMA_CTL_L_SRC_TR_WIDTH_MASK) >>
QM_DMA_CTL_L_SRC_TR_WIDTH_OFFSET);
}
if (destination_address_inc_type == QM_DMA_ADDRESS_INCREMENT) {
destination_inc =
multi_transfer->block_size *
BIT((ctrl_low & QM_DMA_CTL_L_DST_TR_WIDTH_MASK) >>
QM_DMA_CTL_L_DST_TR_WIDTH_OFFSET);
}
for (i = 0; i < multi_transfer->num_blocks; i++) {
lli->source_address = source_address;
@ -479,9 +492,8 @@ int qm_dma_multi_transfer_set_config(
if (0 == chan_reg->llp_low) {
prv_cfg->num_blocks_per_buffer =
multi_transfer_config->num_blocks;
prv_cfg->num_blocks_remaining =
multi_transfer_config->num_blocks;
}
prv_cfg->num_blocks_int_pending = multi_transfer_config->num_blocks;
switch (transfer_type) {
case QM_DMA_TYPE_MULTI_CONT:
@ -496,8 +508,18 @@ int qm_dma_multi_transfer_set_config(
multi_transfer_config->block_size);
break;
case QM_DMA_TYPE_MULTI_LL_CIRCULAR:
case QM_DMA_TYPE_MULTI_LL:
/*
* Block interrupts are not enabled in linear linked list with
* single buffer as only one client callback invocation is
* needed, which takes place on transfer callback interrupt.
*/
if (0 == chan_reg->llp_low) {
prv_cfg->num_blocks_int_pending = 0;
}
/* FALLTHROUGH - continue to common circular/linear LL code */
case QM_DMA_TYPE_MULTI_LL_CIRCULAR:
if (multi_transfer_config->linked_list_first == NULL ||
((uint32_t)multi_transfer_config->linked_list_first &
0x3) != 0) {
@ -601,8 +623,12 @@ int qm_dma_transfer_start(const qm_dma_t dma,
int_reg->mask_tfr_low = ((BIT(channel_id) << 8) | BIT(channel_id));
int_reg->mask_err_low = ((BIT(channel_id) << 8) | BIT(channel_id));
if (prv_cfg->num_blocks_per_buffer > 1) {
/* Block interrupts are only unmasked in multiblock mode. */
if (prv_cfg->num_blocks_int_pending > 0) {
/*
* Block interrupts are only unmasked in multiblock mode
* (contiguous, circular linked list or multibuffer linear
* linked list).
*/
int_reg->mask_block_low =
((BIT(channel_id) << 8) | BIT(channel_id));
}
@ -721,4 +747,21 @@ int qm_dma_restore_context(const qm_dma_t dma,
}
return 0;
}
#else
int qm_dma_save_context(const qm_dma_t dma, qm_dma_context_t *const ctx)
{
(void)dma;
(void)ctx;
return 0;
}
int qm_dma_restore_context(const qm_dma_t dma,
const qm_dma_context_t *const ctx)
{
(void)dma;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -38,6 +38,12 @@ qm_flash_reg_t *qm_flash[QM_FLASH_NUM] = {(qm_flash_reg_t *)QM_FLASH_BASE_0};
#endif
#endif
static __inline__ bool qm_flash_check_otp_locked(const uint32_t flash_stts)
{
return (
(QM_FLASH_STTS_ROM_PROG == (flash_stts & QM_FLASH_STTS_ROM_PROG)));
}
int qm_flash_set_config(const qm_flash_t flash, const qm_flash_config_t *cfg)
{
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
@ -52,10 +58,8 @@ int qm_flash_set_config(const qm_flash_t flash, const qm_flash_config_t *cfg)
(controller->tmg_ctrl & QM_FLASH_TMG_DEF_MASK) |
(cfg->us_count | (cfg->wait_states << QM_FLASH_WAIT_STATE_OFFSET));
if (QM_FLASH_WRITE_DISABLE == cfg->write_disable) {
if (cfg->write_disable == QM_FLASH_WRITE_DISABLE) {
controller->ctrl |= QM_FLASH_WRITE_DISABLE_VAL;
} else {
controller->ctrl &= ~QM_FLASH_WRITE_DISABLE_VAL;
}
return 0;
@ -94,6 +98,11 @@ int qm_flash_word_write(const qm_flash_t flash, const qm_flash_region_t region,
#endif
case QM_FLASH_REGION_OTP:
if (qm_flash_check_otp_locked(controller->flash_stts)) {
return -EACCES;
}
p_wr_data = &controller->rom_wr_data;
p_wr_ctrl = &controller->rom_wr_ctrl;
break;
@ -144,6 +153,12 @@ int qm_flash_page_write(const qm_flash_t flash, const qm_flash_region_t region,
break;
case QM_FLASH_REGION_OTP:
/* Check if OTP locked. */
if (qm_flash_check_otp_locked(controller->flash_stts)) {
return -EACCES;
}
p_wr_data = &controller->rom_wr_data;
p_wr_ctrl = &controller->rom_wr_ctrl;
break;
@ -227,6 +242,12 @@ int qm_flash_page_update(const qm_flash_t flash, const qm_flash_region_t region,
#endif
case QM_FLASH_REGION_OTP:
/* Check if OTP locked. */
if (qm_flash_check_otp_locked(controller->flash_stts)) {
return -EACCES;
}
p_wr_data = &controller->rom_wr_data;
p_wr_ctrl = &controller->rom_wr_ctrl;
p_flash = (uint32_t *)(QM_FLASH_REGION_OTP_0_BASE +
@ -295,6 +316,12 @@ int qm_flash_page_erase(const qm_flash_t flash, const qm_flash_region_t region,
break;
case QM_FLASH_REGION_OTP:
/* Check if OTP locked. */
if (qm_flash_check_otp_locked(controller->flash_stts)) {
return -EACCES;
}
controller->rom_wr_ctrl =
(page_num << (QM_FLASH_PAGE_SIZE_BITS + WR_ADDR_OFFSET)) |
ER_REQ;
@ -317,6 +344,12 @@ int qm_flash_mass_erase(const qm_flash_t flash, const uint8_t include_rom)
/* Erase all the Flash pages */
if (include_rom) {
/* Check if OTP locked. */
if (qm_flash_check_otp_locked(controller->flash_stts)) {
return -EACCES;
}
controller->ctrl |= MASS_ERASE_INFO;
}
controller->ctrl |= MASS_ERASE;
@ -352,4 +385,21 @@ int qm_flash_restore_context(const qm_flash_t flash,
return 0;
}
#else
int qm_flash_save_context(const qm_flash_t flash, qm_flash_context_t *const ctx)
{
(void)flash;
(void)ctx;
return 0;
}
int qm_flash_restore_context(const qm_flash_t flash,
const qm_flash_context_t *const ctx)
{
(void)flash;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -29,6 +29,7 @@
#include "qm_fpr.h"
#include "qm_interrupt.h"
#include "qm_interrupt_router.h"
static void (*callback[QM_FLASH_NUM])(void *);
static void *callback_data[QM_FLASH_NUM];
@ -153,10 +154,10 @@ int qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
/* unmask interrupt */
if (flash == QM_FLASH_0) {
qm_irq_unmask(QM_IRQ_FLASH_MPR_0_INT);
QM_IR_UNMASK_INT(QM_IRQ_FLASH_MPR_0_INT);
#if (QUARK_SE)
} else {
qm_irq_unmask(QM_IRQ_FLASH_MPR_1_INT);
QM_IR_UNMASK_INT(QM_IRQ_FLASH_MPR_1_INT);
#endif
}
@ -169,10 +170,10 @@ int qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
else {
/* mask interrupt */
if (flash == QM_FLASH_0) {
qm_irq_mask(QM_IRQ_FLASH_MPR_0_INT);
QM_IR_MASK_INT(QM_IRQ_FLASH_MPR_0_INT);
#if (QUARK_SE)
} else {
qm_irq_mask(QM_IRQ_FLASH_MPR_1_INT);
QM_IR_MASK_INT(QM_IRQ_FLASH_MPR_1_INT);
#endif
}
@ -229,4 +230,21 @@ int qm_fpr_restore_context(const qm_flash_t flash,
return 0;
}
#else
int qm_fpr_save_context(const qm_flash_t flash, qm_fpr_context_t *const ctx)
{
(void)flash;
(void)ctx;
return 0;
}
int qm_fpr_restore_context(const qm_flash_t flash,
const qm_fpr_context_t *const ctx)
{
(void)flash;
(void)ctx;
return 0;
}
#endif

View file

@ -28,16 +28,20 @@
*/
#include "qm_gpio.h"
#if (HAS_SOC_CONTEXT_RETENTION)
#include "power_states.h"
#endif /* HAS_SOC_CONTEXT_RETENTION */
#define ENABLE_PCLK (0x1)
#ifndef UNIT_TEST
#if (QUARK_SE)
qm_gpio_reg_t *qm_gpio[QM_GPIO_NUM] = {(qm_gpio_reg_t *)QM_GPIO_BASE,
(qm_gpio_reg_t *)QM_AON_GPIO_BASE};
#elif(QUARK_D2000)
qm_gpio_reg_t *qm_gpio[QM_GPIO_NUM] = {(qm_gpio_reg_t *)QM_GPIO_BASE};
#endif
#endif
#if (HAS_AON_GPIO)
(qm_gpio_reg_t *)QM_AON_GPIO_BASE
#endif /* HAS_AON_GPIO */
};
#endif /* UNIT_TEST */
static void (*callback[QM_GPIO_NUM])(void *, uint32_t);
static void *callback_data[QM_GPIO_NUM];
@ -48,7 +52,7 @@ static void gpio_isr(const qm_gpio_t gpio)
#if (HAS_SOC_CONTEXT_RETENTION)
if (QM_SCSS_GP->gps0 & QM_GPS0_POWER_STATES_MASK) {
power_soc_restore();
qm_power_soc_restore();
}
#endif
@ -94,6 +98,7 @@ int qm_gpio_set_config(const qm_gpio_t gpio,
controller->gpio_int_polarity = cfg->int_polarity;
controller->gpio_debounce = cfg->int_debounce;
controller->gpio_int_bothedge = cfg->int_bothedge;
controller->gpio_ls_sync |= ENABLE_PCLK;
callback[gpio] = cfg->callback;
callback_data[gpio] = cfg->callback_data;
@ -215,4 +220,21 @@ int qm_gpio_restore_context(const qm_gpio_t gpio,
return 0;
}
#else
int qm_gpio_save_context(const qm_gpio_t gpio, qm_gpio_context_t *const ctx)
{
(void)gpio;
(void)ctx;
return 0;
}
int qm_gpio_restore_context(const qm_gpio_t gpio,
const qm_gpio_context_t *const ctx)
{
(void)gpio;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -62,11 +62,22 @@ QM_ISR_DECLARE(qm_ss_gpio_1_isr)
int qm_ss_gpio_set_config(const qm_ss_gpio_t gpio,
const qm_ss_gpio_port_config_t *const cfg)
{
uint32_t controller;
uint32_t controller, reg_ls_sync_;
QM_CHECK(gpio < QM_SS_GPIO_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
controller = gpio_base[gpio];
#if (HAS_SS_GPIO_CLK_ENABLE)
/*
* SS GPIO Clock gate (CLKEN) is enabled here, because it is local to
* the peripheral block and not a part of the SoC power management clock
* gating.
*/
__builtin_arc_sr(BIT(0), controller + QM_SS_GPIO_CLKEN);
#endif /* HAS_SS_GPIO_CLK_ENABLE */
__builtin_arc_sr(0xFFFFFFFF, controller + QM_SS_GPIO_INTMASK);
__builtin_arc_sr(cfg->direction, controller + QM_SS_GPIO_SWPORTA_DDR);
@ -75,8 +86,19 @@ int qm_ss_gpio_set_config(const qm_ss_gpio_t gpio,
controller + QM_SS_GPIO_INT_POLARITY);
__builtin_arc_sr(cfg->int_debounce, controller + QM_SS_GPIO_DEBOUNCE);
#if (HAS_SS_GPIO_INTERRUPT_BOTHEDGE)
__builtin_arc_sr(cfg->int_bothedge,
controller + QM_SS_GPIO_INT_BOTHEDGE);
#endif /* HAS_SS_GPIO_INTERRUPT_BOTHEDGE */
callback[gpio] = cfg->callback;
callback_data[gpio] = cfg->callback_data;
/* Synchronize the level-sensitive interrupts to pclk_intr. */
reg_ls_sync_ = __builtin_arc_lr(gpio_base[gpio] + QM_SS_GPIO_LS_SYNC);
__builtin_arc_sr(reg_ls_sync_ | BIT(0),
controller + QM_SS_GPIO_LS_SYNC);
__builtin_arc_sr(cfg->int_en, controller + QM_SS_GPIO_INTEN);
__builtin_arc_sr(~cfg->int_en, controller + QM_SS_GPIO_INTMASK);
@ -210,4 +232,22 @@ int qm_ss_gpio_restore_context(const qm_ss_gpio_t gpio,
return 0;
}
#else
int qm_ss_gpio_save_context(const qm_ss_gpio_t gpio,
qm_ss_gpio_context_t *const ctx)
{
(void)gpio;
(void)ctx;
return 0;
}
int qm_ss_gpio_restore_context(const qm_ss_gpio_t gpio,
const qm_ss_gpio_context_t *const ctx)
{
(void)gpio;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -54,8 +54,8 @@ static volatile uint32_t i2c_write_pos[QM_I2C_NUM], i2c_read_pos[QM_I2C_NUM],
i2c_read_cmd_send[QM_I2C_NUM];
/* True if user buffers have been updated. */
static volatile bool transfer_ongoing = false;
static volatile bool transfer_ongoing;
static volatile bool first_start;
/*
* Keep track of activity if addressed.
* There is no register which keeps track of the internal state machine status,
@ -157,9 +157,9 @@ static void empty_rx_fifo(const qm_i2c_t i2c,
* If user does not update buffer when requested, fill the FIFO with dummy
* data.
*/
static void fill_tx_fifo(const qm_i2c_t i2c,
const volatile qm_i2c_transfer_t *const transfer,
qm_i2c_reg_t *const controller)
static void slave_fill_tx_fifo(const qm_i2c_t i2c,
const volatile qm_i2c_transfer_t *const transfer,
qm_i2c_reg_t *const controller)
{
while ((controller->ic_status & QM_I2C_IC_STATUS_TNF) &&
(!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_TX_ABRT))) {
@ -197,7 +197,11 @@ static __inline__ int handle_tx_abrt_common(qm_i2c_reg_t *const controller,
QM_ASSERT(!(controller->ic_tx_abrt_source &
QM_I2C_IC_TX_ABRT_SOURCE_ABRT_SBYTE_NORSTRT));
*status =
*status = QM_I2C_TX_ABORT;
/* Get source of TX_ABRT interrupt. */
*status |=
(controller->ic_tx_abrt_source & QM_I2C_IC_TX_ABRT_SOURCE_ALL_MASK);
/* Clear TX ABORT interrupt. */
@ -254,15 +258,77 @@ i2c_isr_slave_handler(const qm_i2c_t i2c,
/*
* Order of interrupt handling:
* - Stop interrupts
* - Start interrupts
* - RX Status interrupts
* - TX Status interrupts (RD_REQ, RX_DONE, TX_EMPTY)
* - General call (will only appear after few SCL clock cycles after
* start interrupt).
* - Stop (can appear very shortly after RX_DONE interrupt)
* - Start (can appear very shortly after a stop interrupt or RX_DONE
* interrupt)
*/
/* Stop condition detected. */
if (ic_intr_stat & QM_I2C_IC_INTR_STAT_STOP_DETECTED) {
/* Empty RX FIFO. */
empty_rx_fifo(i2c, transfer, controller);
/*
* Stop transfer if single transfer asked and controller has
* been addressed.
* Driver only knows it has been addressed if:
* - It already triggered an interrupt on TX_EMPTY or RX_FULL
* - Data was read from RX FIFO.
*/
if ((transfer->stop == true) &&
(is_addressed || (i2c_read_pos[i2c] != 0))) {
controller_disable(i2c);
}
if (transfer->callback) {
transfer->callback(
transfer->callback_data, 0, QM_I2C_STOP_DETECTED,
(transfer_ongoing) ? i2c_read_pos[i2c] : 0);
}
i2c_write_pos[i2c] = 0;
i2c_read_pos[i2c] = 0;
controller->ic_intr_mask &= ~QM_I2C_IC_INTR_MASK_TX_EMPTY;
is_addressed = false;
/* Clear stop interrupt. */
controller->ic_clr_stop_det;
/*
* Read again the interrupt status in case of a start interrupt
* has been triggered in the meantime.
*/
ic_intr_stat = controller->ic_intr_stat;
first_start = true;
}
/*
* START or RESTART condition detected.
* The RESTART_DETECTED interrupt is not used as it is redundant with
* the START_DETECTED interrupt.
*/
if (ic_intr_stat & QM_I2C_IC_INTR_STAT_START_DETECTED) {
if (!first_start) {
empty_rx_fifo(i2c, transfer, controller);
}
if (transfer->callback) {
transfer->callback(
transfer->callback_data, 0, QM_I2C_START_DETECTED,
(transfer_ongoing) ? i2c_read_pos[i2c] : 0);
}
transfer_ongoing = true;
i2c_write_pos[i2c] = 0;
i2c_read_pos[i2c] = 0;
/* Clear Start detected interrupt. */
controller->ic_clr_start_det;
first_start = false;
}
/*
* Check RX status.
* Master write (TX), slave read (RX).
@ -313,7 +379,7 @@ i2c_isr_slave_handler(const qm_i2c_t i2c,
/* Track activity of controller when addressed. */
is_addressed = true;
fill_tx_fifo(i2c, transfer, controller);
slave_fill_tx_fifo(i2c, transfer, controller);
/* Enable TX EMPTY interrupts. */
controller->ic_intr_mask |= QM_I2C_IC_INTR_MASK_TX_EMPTY;
@ -332,7 +398,7 @@ i2c_isr_slave_handler(const qm_i2c_t i2c,
ic_intr_stat = controller->ic_intr_stat;
} else if (ic_intr_stat & QM_I2C_IC_INTR_STAT_TX_EMPTY) {
fill_tx_fifo(i2c, transfer, controller);
slave_fill_tx_fifo(i2c, transfer, controller);
}
/* General call detected. */
@ -359,66 +425,48 @@ i2c_isr_slave_handler(const qm_i2c_t i2c,
/* Track activity of controller when addressed. */
is_addressed = true;
}
}
/* Stop condition detected. */
if (ic_intr_stat & QM_I2C_IC_INTR_STAT_STOP_DETECTED) {
/* Empty RX FIFO. */
empty_rx_fifo(i2c, transfer, controller);
static uint32_t
master_fill_tx_fifo(const qm_i2c_t i2c,
const volatile qm_i2c_transfer_t *const transfer,
qm_i2c_reg_t *const controller)
{
uint32_t ic_data_cmd, count_tx = (QM_I2C_FIFO_SIZE - TX_TL);
uint32_t write_buffer_remaining = transfer->tx_len - i2c_write_pos[i2c];
uint32_t read_buffer_remaining = transfer->rx_len - i2c_read_pos[i2c];
while ((count_tx) && write_buffer_remaining) {
count_tx--;
write_buffer_remaining--;
/*
* Stop transfer if single transfer asked and controller has
* been addressed.
* Driver only knows it has been addressed if:
* - It already triggered an interrupt on TX_EMPTY or RX_FULL
* - Data was read from RX FIFO.
* Write command -IC_DATA_CMD[8] = 0.
* Fill IC_DATA_CMD[7:0] with the data.
*/
if ((transfer->stop == true) &&
(is_addressed || (i2c_read_pos[i2c] != 0))) {
controller_disable(i2c);
}
if (transfer->callback) {
transfer->callback(
transfer->callback_data, 0, QM_I2C_STOP_DETECTED,
(transfer_ongoing) ? i2c_read_pos[i2c] : 0);
}
i2c_write_pos[i2c] = 0;
i2c_read_pos[i2c] = 0;
controller->ic_intr_mask &= ~QM_I2C_IC_INTR_MASK_TX_EMPTY;
is_addressed = false;
/* Clear stop interrupt. */
controller->ic_clr_stop_det;
ic_data_cmd = transfer->tx[i2c_write_pos[i2c]];
/*
* Read again the interrupt status in case of a start interrupt
* has been triggered in the meantime.
* If transfer is a combined transfer, only send stop at
* end of the transfer sequence.
*/
ic_intr_stat = controller->ic_intr_stat;
}
if (transfer->stop && (write_buffer_remaining == 0) &&
(read_buffer_remaining == 0)) {
/*
* START or RESTART condition detected.
* The RESTART_DETECTED interrupt is not used as it is redundant with
* the START_DETECTED interrupt.
*/
if (ic_intr_stat & QM_I2C_IC_INTR_STAT_START_DETECTED) {
empty_rx_fifo(i2c, transfer, controller);
if (transfer->callback) {
transfer->callback(
transfer->callback_data, 0, QM_I2C_START_DETECTED,
(transfer_ongoing) ? i2c_read_pos[i2c] : 0);
ic_data_cmd |= QM_I2C_IC_DATA_CMD_STOP_BIT_CTRL;
}
transfer_ongoing = true;
i2c_write_pos[i2c] = 0;
i2c_read_pos[i2c] = 0;
/* Clear Start detected interrupt. */
controller->ic_clr_start_det;
/* Write data. */
controller->ic_data_cmd = ic_data_cmd;
i2c_write_pos[i2c]++;
/*
* TX_EMPTY INTR is autocleared when the buffer levels
* goes above the threshold.
*/
}
return write_buffer_remaining;
}
static __inline__ void
@ -426,7 +474,7 @@ i2c_isr_master_handler(const qm_i2c_t i2c,
const volatile qm_i2c_transfer_t *const transfer,
qm_i2c_reg_t *const controller)
{
uint32_t ic_data_cmd = 0, count_tx = (QM_I2C_FIFO_SIZE - TX_TL);
uint32_t count_tx;
uint32_t read_buffer_remaining = transfer->rx_len - i2c_read_pos[i2c];
uint32_t write_buffer_remaining = transfer->tx_len - i2c_write_pos[i2c];
uint32_t missing_bytes;
@ -502,35 +550,8 @@ i2c_isr_master_handler(const qm_i2c_t i2c,
}
}
while ((count_tx) && write_buffer_remaining) {
count_tx--;
write_buffer_remaining--;
/*
* Write command -IC_DATA_CMD[8] = 0.
* Fill IC_DATA_CMD[7:0] with the data.
*/
ic_data_cmd = transfer->tx[i2c_write_pos[i2c]];
/*
* If transfer is a combined transfer, only send stop at
* end of the transfer sequence.
*/
if (transfer->stop && (write_buffer_remaining == 0) &&
(read_buffer_remaining == 0)) {
ic_data_cmd |= QM_I2C_IC_DATA_CMD_STOP_BIT_CTRL;
}
/* Write data. */
controller->ic_data_cmd = ic_data_cmd;
i2c_write_pos[i2c]++;
/*
* TX_EMPTY INTR is autocleared when the buffer levels
* goes above the threshold.
*/
}
write_buffer_remaining =
master_fill_tx_fifo(i2c, transfer, controller);
/*
* If missing_bytes is not null, then that means we are already
@ -588,9 +609,35 @@ static void i2c_isr_irq_handler(const qm_i2c_t i2c)
const volatile qm_i2c_transfer_t *const transfer = i2c_transfer[i2c];
qm_i2c_reg_t *const controller = QM_I2C[i2c];
/* Check for errors. */
QM_ASSERT(!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_TX_OVER));
QM_ASSERT(!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_UNDER));
/* Check TX_OVER error. */
if (controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_TX_OVER) {
/* Clear interrupt. */
controller->ic_clr_tx_over;
/* Mask interrupts. */
controller->ic_intr_mask = QM_I2C_IC_INTR_MASK_ALL;
controller_disable(i2c);
if (transfer->callback) {
transfer->callback(transfer->callback_data, -EIO,
QM_I2C_TX_OVER, i2c_write_pos[i2c]);
}
}
/* Check for RX_UNDER error. */
if (controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_UNDER) {
/* Clear interrupt. */
controller->ic_clr_rx_under;
/* Mask interrupts. */
controller->ic_intr_mask = QM_I2C_IC_INTR_MASK_ALL;
controller_disable(i2c);
if (transfer->callback) {
transfer->callback(transfer->callback_data, -EIO,
QM_I2C_RX_UNDER, i2c_write_pos[i2c]);
}
}
/*
* TX ABORT interrupt.
@ -605,8 +652,21 @@ static void i2c_isr_irq_handler(const qm_i2c_t i2c)
/* Master mode. */
if (controller->ic_con & QM_I2C_IC_CON_MASTER_MODE) {
QM_ASSERT(
!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_OVER));
/* Check for RX_OVER error. */
if (controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_OVER) {
/* Clear interrupt. */
controller->ic_clr_rx_over;
/* Mask interrupts. */
controller->ic_intr_mask = QM_I2C_IC_INTR_MASK_ALL;
controller_disable(i2c);
if (transfer->callback) {
transfer->callback(transfer->callback_data,
-EIO, QM_I2C_RX_OVER,
i2c_write_pos[i2c]);
}
}
i2c_isr_master_handler(i2c, transfer, controller);
}
/* Slave mode. */
@ -617,12 +677,51 @@ static void i2c_isr_irq_handler(const qm_i2c_t i2c)
static void i2c_isr_dma_handler(const qm_i2c_t i2c)
{
const volatile qm_i2c_transfer_t *const transfer = i2c_transfer[i2c];
qm_i2c_reg_t *const controller = QM_I2C[i2c];
/* Check for errors. */
QM_ASSERT(!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_TX_OVER));
QM_ASSERT(!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_UNDER));
QM_ASSERT(!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_OVER));
if (controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_TX_OVER) {
/* Clear interrupt. */
controller->ic_clr_tx_over;
/* Mask interrupts. */
controller->ic_intr_mask = QM_I2C_IC_INTR_MASK_ALL;
controller_disable(i2c);
if (transfer->callback) {
transfer->callback(transfer->callback_data, -EIO,
QM_I2C_TX_OVER, i2c_write_pos[i2c]);
}
}
if (controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_UNDER) {
/* Clear interrupt. */
controller->ic_clr_rx_under;
/* Mask interrupts. */
controller->ic_intr_mask = QM_I2C_IC_INTR_MASK_ALL;
controller_disable(i2c);
if (transfer->callback) {
transfer->callback(transfer->callback_data, -EIO,
QM_I2C_RX_UNDER, i2c_write_pos[i2c]);
}
}
if (controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_OVER) {
/* Clear interrupt. */
controller->ic_clr_rx_over;
/* Mask interrupts. */
controller->ic_intr_mask = QM_I2C_IC_INTR_MASK_ALL;
controller_disable(i2c);
if (transfer->callback) {
transfer->callback(transfer->callback_data, -EIO,
QM_I2C_RX_OVER, i2c_write_pos[i2c]);
}
}
/*
* TX ABORT interrupt.
@ -658,34 +757,17 @@ QM_ISR_DECLARE(qm_i2c_1_dma_isr)
i2c_isr_dma_handler(QM_I2C_1);
QM_ISR_EOI(QM_IRQ_I2C_1_INT_VECTOR);
}
#endif
static uint32_t get_lo_cnt(uint32_t lo_time_ns)
{
return ((((clk_sys_get_ticks_per_us() >>
((QM_SCSS_CCU->ccu_periph_clk_div_ctl0 &
CLK_PERIPH_DIV_DEF_MASK) >>
QM_CCU_PERIPH_PCLK_DIV_OFFSET)) *
lo_time_ns) /
1000) -
1);
return (((get_i2c_clk_freq_in_mhz() * lo_time_ns) / 1000) - 1);
}
static uint32_t get_hi_cnt(qm_i2c_t i2c, uint32_t hi_time_ns)
{
/*
* Generated SCL HIGH period is less than the expected SCL clock HIGH
* period in the Master receiver mode.
* Summary: workaround is +1 to hcnt.
*/
return (((((clk_sys_get_ticks_per_us() >>
((QM_SCSS_CCU->ccu_periph_clk_div_ctl0 &
CLK_PERIPH_DIV_DEF_MASK) >>
QM_CCU_PERIPH_PCLK_DIV_OFFSET)) *
hi_time_ns) /
1000) -
7 - QM_I2C[i2c]->ic_fs_spklen) +
return ((((get_i2c_clk_freq_in_mhz() * hi_time_ns) / 1000) - 7 -
QM_I2C[i2c]->ic_fs_spklen) +
1);
}
@ -850,7 +932,7 @@ int qm_i2c_get_status(const qm_i2c_t i2c, qm_i2c_status_t *const status)
qm_i2c_reg_t *const controller = QM_I2C[i2c];
*status = 0;
*status = QM_I2C_IDLE;
/* Check if slave or master are active. */
if (controller->ic_status & QM_I2C_IC_STATUS_BUSY_MASK) {
@ -870,16 +952,18 @@ int qm_i2c_master_write(const qm_i2c_t i2c, const uint16_t slave_addr,
{
uint8_t *d = (uint8_t *)data;
uint32_t ic_data_cmd = 0;
int ret = 0;
int rc = 0;
QM_CHECK(i2c < QM_I2C_NUM, -EINVAL);
QM_CHECK(slave_addr <= QM_I2C_IC_TAR_MASK, -EINVAL);
QM_CHECK(data != NULL, -EINVAL);
QM_CHECK(len > 0, -EINVAL);
qm_i2c_reg_t *const controller = QM_I2C[i2c];
/* Write slave address to TAR. */
controller->ic_tar = slave_addr;
controller->ic_tar &= ~QM_I2C_IC_TAR_MASK;
controller->ic_tar |= slave_addr;
/* Enable controller. */
controller_enable(i2c);
@ -912,13 +996,13 @@ int qm_i2c_master_write(const qm_i2c_t i2c, const uint16_t slave_addr,
;
if (controller->ic_tx_abrt_source & QM_I2C_IC_TX_ABRT_SOURCE_ALL_MASK) {
ret = -EIO;
rc = -EIO;
}
/* Disable controller. */
if (true == stop) {
if (controller_disable(i2c)) {
ret = -EBUSY;
rc = -EBUSY;
}
}
@ -934,7 +1018,7 @@ int qm_i2c_master_write(const qm_i2c_t i2c, const uint16_t slave_addr,
*/
controller->ic_clr_tx_abrt;
return ret;
return rc;
}
int qm_i2c_master_read(const qm_i2c_t i2c, const uint16_t slave_addr,
@ -942,16 +1026,18 @@ int qm_i2c_master_read(const qm_i2c_t i2c, const uint16_t slave_addr,
qm_i2c_status_t *const status)
{
uint8_t *d = (uint8_t *)data;
int ret = 0;
int rc = 0;
QM_CHECK(i2c < QM_I2C_NUM, -EINVAL);
QM_CHECK(slave_addr <= QM_I2C_IC_TAR_MASK, -EINVAL);
QM_CHECK(data != NULL, -EINVAL);
QM_CHECK(len > 0, -EINVAL);
qm_i2c_reg_t *const controller = QM_I2C[i2c];
/* Write slave address to TAR. */
controller->ic_tar = slave_addr;
controller->ic_tar &= ~QM_I2C_IC_TAR_MASK;
controller->ic_tar |= slave_addr;
/* Enable controller. */
controller_enable(i2c);
@ -979,7 +1065,7 @@ int qm_i2c_master_read(const qm_i2c_t i2c, const uint16_t slave_addr,
if (controller->ic_tx_abrt_source &
QM_I2C_IC_TX_ABRT_SOURCE_ALL_MASK) {
ret = -EIO;
rc = -EIO;
break;
}
/* IC_DATA_CMD[7:0] contains received data. */
@ -990,7 +1076,7 @@ int qm_i2c_master_read(const qm_i2c_t i2c, const uint16_t slave_addr,
/* Disable controller. */
if (true == stop) {
if (controller_disable(i2c)) {
ret = -EBUSY;
rc = -EBUSY;
}
}
@ -1006,7 +1092,7 @@ int qm_i2c_master_read(const qm_i2c_t i2c, const uint16_t slave_addr,
*/
controller->ic_clr_tx_abrt;
return ret;
return rc;
}
int qm_i2c_master_irq_transfer(const qm_i2c_t i2c,
@ -1015,11 +1101,13 @@ int qm_i2c_master_irq_transfer(const qm_i2c_t i2c,
{
QM_CHECK(i2c < QM_I2C_NUM, -EINVAL);
QM_CHECK(NULL != xfer, -EINVAL);
QM_CHECK(slave_addr <= QM_I2C_IC_TAR_MASK, -EINVAL);
qm_i2c_reg_t *const controller = QM_I2C[i2c];
/* Write slave address to TAR. */
controller->ic_tar = slave_addr;
controller->ic_tar &= ~QM_I2C_IC_TAR_MASK;
controller->ic_tar |= slave_addr;
i2c_write_pos[i2c] = 0;
i2c_read_pos[i2c] = 0;
@ -1045,6 +1133,9 @@ int qm_i2c_master_irq_transfer(const qm_i2c_t i2c,
/* Enable controller. */
controller_enable(i2c);
/* Start filling tx fifo. */
master_fill_tx_fifo(i2c, xfer, controller);
/* Unmask interrupts. */
controller->ic_intr_mask |=
QM_I2C_IC_INTR_MASK_RX_UNDER | QM_I2C_IC_INTR_MASK_RX_OVER |
@ -1060,6 +1151,8 @@ int qm_i2c_slave_irq_transfer(const qm_i2c_t i2c,
QM_CHECK(i2c < QM_I2C_NUM, -EINVAL);
QM_CHECK(xfer != NULL, -EINVAL);
qm_i2c_reg_t *const controller = QM_I2C[i2c];
/* Assign common properties. */
i2c_transfer[i2c] = xfer;
i2c_write_pos[i2c] = 0;
@ -1067,8 +1160,13 @@ int qm_i2c_slave_irq_transfer(const qm_i2c_t i2c,
transfer_ongoing = false;
is_addressed = false;
first_start = true;
QM_I2C[i2c]->ic_intr_mask = QM_I2C_IC_INTR_MASK_ALL;
/* Set threshold. */
controller->ic_tx_tl = TX_TL;
controller->ic_rx_tl = RX_TL;
controller->ic_intr_mask = QM_I2C_IC_INTR_MASK_ALL;
controller_enable(i2c);
@ -1078,7 +1176,7 @@ int qm_i2c_slave_irq_transfer(const qm_i2c_t i2c,
* Only TX_EMPTY must be set when needed, otherwise it will be triggered
* everytime, even when it is not required to fill the TX FIFO.
*/
QM_I2C[i2c]->ic_intr_mask =
controller->ic_intr_mask =
QM_I2C_IC_INTR_MASK_RX_UNDER | QM_I2C_IC_INTR_MASK_RX_OVER |
QM_I2C_IC_INTR_MASK_RX_FULL | QM_I2C_IC_INTR_MASK_TX_ABORT |
QM_I2C_IC_INTR_MASK_RX_DONE | QM_I2C_IC_INTR_MASK_STOP_DETECTED |
@ -1439,7 +1537,7 @@ int qm_i2c_dma_channel_config(const qm_i2c_t i2c,
/* Burst length is set to half the FIFO for performance */
dma_channel_config.source_burst_length = QM_DMA_BURST_TRANS_LENGTH_8;
dma_channel_config.destination_burst_length =
QM_DMA_BURST_TRANS_LENGTH_8;
QM_DMA_BURST_TRANS_LENGTH_4;
dma_channel_config.client_callback = i2c_dma_callbacks[direction];
dma_channel_config.transfer_type = QM_DMA_TYPE_SINGLE;
@ -1478,19 +1576,20 @@ int qm_i2c_master_dma_transfer(const qm_i2c_t i2c,
QM_CHECK(0 < xfer->tx_len ? xfer->tx != NULL : 1, -EINVAL);
QM_CHECK(0 < xfer->rx_len ? xfer->rx != NULL : 1, -EINVAL);
QM_CHECK(0 == xfer->rx_len ? xfer->tx_len != 0 : 1, -EINVAL);
QM_CHECK(slave_addr <= QM_I2C_IC_TAR_MASK, -EINVAL);
/* Disable all IRQs but the TX abort one. */
QM_I2C[i2c]->ic_intr_mask = QM_I2C_IC_INTR_MASK_TX_ABORT;
/* Write slave address to TAR. */
QM_I2C[i2c]->ic_tar = slave_addr;
QM_I2C[i2c]->ic_tar &= ~QM_I2C_IC_TAR_MASK;
QM_I2C[i2c]->ic_tar |= slave_addr;
i2c_read_cmd_send[i2c] = xfer->rx_len;
i2c_transfer[i2c] = xfer;
/* Set DMA TX and RX waterlevels to half the FIFO depth for performance
reasons */
QM_I2C[i2c]->ic_dma_tdlr = (QM_I2C_FIFO_SIZE / 2);
/* Set DMA TX and RX watermark levels. */
QM_I2C[i2c]->ic_dma_tdlr = (QM_I2C_FIFO_SIZE / 8);
/* RDLR value is desired watermark-1, according to I2C datasheet section
3.17.7 */
QM_I2C[i2c]->ic_dma_rdlr = (QM_I2C_FIFO_SIZE / 2) - 1;
@ -1607,6 +1706,8 @@ int qm_i2c_save_context(const qm_i2c_t i2c, qm_i2c_context_t *const ctx)
ctx->fs_spklen = regs->ic_fs_spklen;
ctx->ic_intr_mask = regs->ic_intr_mask;
ctx->enable = regs->ic_enable;
ctx->rx_tl = regs->ic_rx_tl;
ctx->tx_tl = regs->ic_tx_tl;
return 0;
}
@ -1628,6 +1729,25 @@ int qm_i2c_restore_context(const qm_i2c_t i2c,
regs->ic_fs_spklen = ctx->fs_spklen;
regs->ic_intr_mask = ctx->ic_intr_mask;
regs->ic_enable = ctx->enable;
regs->ic_rx_tl = ctx->rx_tl;
regs->ic_tx_tl = ctx->tx_tl;
return 0;
}
#else
int qm_i2c_save_context(const qm_i2c_t i2c, qm_i2c_context_t *const ctx)
{
(void)i2c;
(void)ctx;
return 0;
}
int qm_i2c_restore_context(const qm_i2c_t i2c,
const qm_i2c_context_t *const ctx)
{
(void)i2c;
(void)ctx;
return 0;
}

View file

@ -31,8 +31,6 @@
#include "qm_ss_i2c.h"
#include "clk.h"
#define SPK_LEN_SS (1)
#define SPK_LEN_FS (2)
#define TX_TL (2)
#define RX_TL (5)
@ -40,29 +38,6 @@
#define I2C_POLL_COUNT (1000000)
#define I2C_POLL_MICROSECOND (1)
/*
* NOTE: There are a number of differences between this Sensor Subsystem I2C
* driver and the Lakemont version. The IP is not the same, the
* functionality is a subset of the features contained on the Lakemont
* version:
* 1. Fast Mode Plus is not supported
* 2. Slave mode is not supported
*
* The registers are different and the register set is compressed.
* Some noteworthy differences are:
* 1. Clock enable is contained in the QM_SS_I2C_CON register
* 2. SPKLEN is contained in the QM_SS_I2C_CON register
* 3. The high and low count values are contained within a single
* register
* 4. There is no raw interrupt status register, QM_SS_I2C_INT_STAT
* takes its place and is non-maskable
* 5. There is a reduced number of TX abrt source status bits
* 6. The QM_SS_I2C_DATA_CMD register is different and requires the
* strobe bit to be written to indicate a QM_SS_I2C_DATA_CMD
* register update. There is a push and pop mechanism for using
* the FIFO.
*/
static uint32_t i2c_base[QM_SS_I2C_NUM] = {QM_SS_I2C_0_BASE, QM_SS_I2C_1_BASE};
static volatile const qm_ss_i2c_transfer_t *i2c_transfer[QM_SS_I2C_NUM];
static volatile uint32_t i2c_write_pos[QM_SS_I2C_NUM],
@ -71,63 +46,125 @@ static volatile uint32_t i2c_write_pos[QM_SS_I2C_NUM],
static void controller_enable(const qm_ss_i2c_t i2c);
static int controller_disable(const qm_ss_i2c_t i2c);
static void qm_ss_i2c_isr_handler(const qm_ss_i2c_t i2c)
static uint32_t
i2c_fill_tx_fifo(const qm_i2c_t i2c,
const volatile qm_ss_i2c_transfer_t *const transfer,
uint32_t controller)
{
const volatile qm_ss_i2c_transfer_t *const transfer = i2c_transfer[i2c];
uint32_t controller = i2c_base[i2c], data_cmd = 0,
count_tx = (QM_SS_I2C_FIFO_SIZE - TX_TL);
qm_ss_i2c_status_t status = 0;
int rc = 0;
uint32_t read_buffer_remaining = transfer->rx_len - i2c_read_pos[i2c];
uint32_t data_cmd = 0, count_tx = (QM_SS_I2C_FIFO_SIZE - TX_TL);
uint32_t write_buffer_remaining = transfer->tx_len - i2c_write_pos[i2c];
uint32_t missing_bytes;
uint32_t read_buffer_remaining = transfer->rx_len - i2c_read_pos[i2c];
/* Check for errors */
QM_ASSERT(!(__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_TX_OVER));
QM_ASSERT(!(__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_RX_UNDER));
QM_ASSERT(!(__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_RX_OVER));
while ((count_tx) && write_buffer_remaining) {
count_tx--;
write_buffer_remaining--;
if ((__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_TX_ABRT)) {
QM_ASSERT(
!(__builtin_arc_lr(controller + QM_SS_I2C_TX_ABRT_SOURCE) &
QM_SS_I2C_TX_ABRT_SBYTE_NORSTRT));
/* write command -IC_DATA_CMD[8] = 0 */
/* fill IC_DATA_CMD[7:0] with the data */
data_cmd = QM_SS_I2C_DATA_CMD_PUSH |
i2c_transfer[i2c]->tx[i2c_write_pos[i2c]];
status =
(__builtin_arc_lr(controller + QM_SS_I2C_TX_ABRT_SOURCE) &
QM_SS_I2C_TX_ABRT_SOURCE_ALL_MASK);
/* if transfer is a combined transfer, only
* send stop at
* end of the transfer sequence */
if (i2c_transfer[i2c]->stop && (read_buffer_remaining == 0) &&
(write_buffer_remaining == 0)) {
/* clear intr */
__builtin_arc_sr(QM_SS_I2C_INTR_CLR_TX_ABRT,
controller + QM_SS_I2C_INTR_CLR);
/* mask interrupts */
__builtin_arc_sr(QM_SS_I2C_INTR_MASK_ALL,
controller + QM_SS_I2C_INTR_MASK);
rc = (status & QM_SS_I2C_TX_ABRT_USER_ABRT) ? -ECANCELED : -EIO;
controller_disable(i2c);
if (i2c_transfer[i2c]->callback) {
i2c_transfer[i2c]->callback(
i2c_transfer[i2c]->callback_data, rc, status, 0);
data_cmd |= QM_SS_I2C_DATA_CMD_STOP;
}
/* write data */
QM_SS_I2C_WRITE_DATA_CMD(controller, data_cmd);
i2c_write_pos[i2c]++;
/* TX_EMPTY INTR is autocleared when the buffer
* levels goes above the threshold
*/
}
return write_buffer_remaining;
}
static void handle_i2c_error_interrupt(const qm_ss_i2c_t i2c)
{
uint32_t controller = i2c_base[i2c];
qm_ss_i2c_status_t status = 0;
int rc = -EIO;
/* Check for TX_OVER error */
if (QM_SS_I2C_READ_INTR_STAT(controller) &
QM_SS_I2C_INTR_STAT_TX_OVER) {
status = QM_SS_I2C_TX_OVER;
/* Clear interrupt */
QM_SS_I2C_CLEAR_TX_OVER_INTR(controller);
}
/* Check for RX_UNDER error */
if (QM_SS_I2C_READ_INTR_STAT(controller) &
QM_SS_I2C_INTR_STAT_RX_UNDER) {
status = QM_SS_I2C_RX_UNDER;
/* Clear interrupt */
QM_SS_I2C_CLEAR_RX_UNDER_INTR(controller);
}
/* Check for RX_OVER error */
if (QM_SS_I2C_READ_INTR_STAT(controller) &
QM_SS_I2C_INTR_STAT_RX_OVER) {
status = QM_SS_I2C_RX_OVER;
/* Clear interrupt */
QM_SS_I2C_CLEAR_RX_OVER_INTR(controller);
}
/* Check for TX_ABRT error */
if ((QM_SS_I2C_READ_INTR_STAT(controller) &
QM_SS_I2C_INTR_STAT_TX_ABRT)) {
QM_ASSERT(!(QM_SS_I2C_READ_TX_ABRT_SOURCE(controller) &
QM_SS_I2C_TX_ABRT_SBYTE_NORSTRT));
status = QM_SS_I2C_TX_ABORT;
status |= (QM_SS_I2C_READ_TX_ABRT_SOURCE(controller) &
QM_SS_I2C_TX_ABRT_SOURCE_ALL_MASK);
/* Clear interrupt */
QM_SS_I2C_CLEAR_TX_ABRT_INTR(controller);
rc = (status & QM_SS_I2C_TX_ABRT_USER_ABRT) ? -ECANCELED : -EIO;
}
/* Mask interrupts */
QM_SS_I2C_MASK_ALL_INTERRUPTS(controller);
controller_disable(i2c);
if (i2c_transfer[i2c]->callback) {
i2c_transfer[i2c]->callback(i2c_transfer[i2c]->callback_data,
rc, status, 0);
}
}
static void handle_i2c_rx_avail_interrupt(const qm_ss_i2c_t i2c)
{
const volatile qm_ss_i2c_transfer_t *const transfer = i2c_transfer[i2c];
uint32_t controller = i2c_base[i2c];
uint32_t read_buffer_remaining = transfer->rx_len - i2c_read_pos[i2c];
/* RX read from buffer */
if ((__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
if ((QM_SS_I2C_READ_INTR_STAT(controller) &
QM_SS_I2C_INTR_STAT_RX_FULL)) {
while (read_buffer_remaining &&
(__builtin_arc_lr(controller + QM_SS_I2C_RXFLR))) {
__builtin_arc_sr(QM_SS_I2C_DATA_CMD_POP,
controller + QM_SS_I2C_DATA_CMD);
(QM_SS_I2C_READ_RXFLR(controller))) {
QM_SS_I2C_READ_RX_FIFO(controller);
/* IC_DATA_CMD[7:0] contains received data */
i2c_transfer[i2c]->rx[i2c_read_pos[i2c]] =
__builtin_arc_lr(controller + QM_SS_I2C_DATA_CMD);
QM_SS_I2C_READ_DATA_CMD(controller);
read_buffer_remaining--;
i2c_read_pos[i2c]++;
@ -135,8 +172,8 @@ static void qm_ss_i2c_isr_handler(const qm_ss_i2c_t i2c)
/* mask rx full interrupt if transfer
* complete
*/
QM_SS_REG_AUX_NAND(
(controller + QM_SS_I2C_INTR_MASK),
QM_SS_I2C_MASK_INTERRUPT(
controller,
QM_SS_I2C_INTR_MASK_RX_FULL |
QM_SS_I2C_INTR_MASK_TX_EMPTY);
@ -157,28 +194,37 @@ static void qm_ss_i2c_isr_handler(const qm_ss_i2c_t i2c)
* interrupt is generated when all the remaining
* data are received.
*/
QM_SS_REG_AUX_NAND((controller + QM_SS_I2C_TL),
QM_SS_I2C_TL_RX_TL_MASK);
QM_SS_REG_AUX_OR((controller + QM_SS_I2C_TL),
(read_buffer_remaining - 1));
QM_SS_I2C_CLEAR_RX_TL(controller);
QM_SS_I2C_WRITE_RX_TL(controller,
(read_buffer_remaining - 1));
}
/* RX_FULL INTR is autocleared when the buffer
* levels goes below the threshold
*/
}
}
if ((__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
static void handle_i2c_tx_req_interrupt(const qm_ss_i2c_t i2c)
{
const volatile qm_ss_i2c_transfer_t *const transfer = i2c_transfer[i2c];
uint32_t controller = i2c_base[i2c];
uint32_t count_tx;
uint32_t read_buffer_remaining = transfer->rx_len - i2c_read_pos[i2c];
uint32_t write_buffer_remaining = transfer->tx_len - i2c_write_pos[i2c];
uint32_t missing_bytes;
if ((QM_SS_I2C_READ_INTR_STAT(controller) &
QM_SS_I2C_INTR_STAT_TX_EMPTY)) {
if ((__builtin_arc_lr(controller + QM_SS_I2C_STATUS) &
if ((QM_SS_I2C_READ_STATUS(controller) &
QM_SS_I2C_STATUS_TFE) &&
(i2c_transfer[i2c]->tx != NULL) &&
(write_buffer_remaining == 0) &&
(read_buffer_remaining == 0)) {
QM_SS_REG_AUX_NAND((controller + QM_SS_I2C_INTR_MASK),
QM_SS_I2C_INTR_MASK_TX_EMPTY);
QM_SS_I2C_MASK_INTERRUPT(controller,
QM_SS_I2C_INTR_MASK_TX_EMPTY);
/* if this is not a combined
* transaction, disable the controller now
@ -195,34 +241,8 @@ static void qm_ss_i2c_isr_handler(const qm_ss_i2c_t i2c)
}
}
while ((count_tx) && write_buffer_remaining) {
count_tx--;
write_buffer_remaining--;
/* write command -IC_DATA_CMD[8] = 0 */
/* fill IC_DATA_CMD[7:0] with the data */
data_cmd = QM_SS_I2C_DATA_CMD_PUSH |
i2c_transfer[i2c]->tx[i2c_write_pos[i2c]];
/* if transfer is a combined transfer, only
* send stop at
* end of the transfer sequence */
if (i2c_transfer[i2c]->stop &&
(read_buffer_remaining == 0) &&
(write_buffer_remaining == 0)) {
data_cmd |= QM_SS_I2C_DATA_CMD_STOP;
}
/* write data */
__builtin_arc_sr(data_cmd,
controller + QM_SS_I2C_DATA_CMD);
i2c_write_pos[i2c]++;
/* TX_EMPTY INTR is autocleared when the buffer
* levels goes above the threshold
*/
}
write_buffer_remaining =
i2c_fill_tx_fifo(i2c, i2c_transfer[i2c], controller);
/* If missing_bytes is not null, then that means we are already
* waiting for some bytes after sending read request on the
@ -233,12 +253,11 @@ static void qm_ss_i2c_isr_handler(const qm_ss_i2c_t i2c)
/* Sanity check: The number of read data but not processed
* cannot be more than the number of expected bytes */
QM_ASSERT(__builtin_arc_lr(controller + QM_SS_I2C_RXFLR) <=
missing_bytes);
QM_ASSERT(QM_SS_I2C_READ_RXFLR(controller) <= missing_bytes);
/* count_tx is the remaining size in the fifo */
count_tx = QM_SS_I2C_FIFO_SIZE -
__builtin_arc_lr(controller + QM_SS_I2C_TXFLR);
count_tx =
QM_SS_I2C_FIFO_SIZE - QM_SS_I2C_READ_TXFLR(controller);
if (count_tx > missing_bytes) {
count_tx -= missing_bytes;
@ -258,17 +277,16 @@ static void qm_ss_i2c_isr_handler(const qm_ss_i2c_t i2c)
if (i2c_transfer[i2c]->stop &&
(i2c_read_cmd_send[i2c] == 0)) {
__builtin_arc_sr((QM_SS_I2C_DATA_CMD_CMD |
QM_SS_I2C_DATA_CMD_PUSH |
QM_SS_I2C_DATA_CMD_STOP),
controller +
QM_SS_I2C_DATA_CMD);
QM_SS_I2C_WRITE_DATA_CMD(
controller, (QM_SS_I2C_DATA_CMD_CMD |
QM_SS_I2C_DATA_CMD_PUSH |
QM_SS_I2C_DATA_CMD_STOP));
} else {
__builtin_arc_sr((QM_SS_I2C_DATA_CMD_CMD |
QM_SS_I2C_DATA_CMD_PUSH),
controller +
QM_SS_I2C_DATA_CMD);
QM_SS_I2C_WRITE_DATA_CMD(
controller, (QM_SS_I2C_DATA_CMD_CMD |
QM_SS_I2C_DATA_CMD_PUSH));
}
}
@ -276,20 +294,66 @@ static void qm_ss_i2c_isr_handler(const qm_ss_i2c_t i2c)
* empty */
if ((write_buffer_remaining == 0) &&
(read_buffer_remaining == 0)) {
QM_SS_REG_AUX_NAND((controller + QM_SS_I2C_TL),
QM_SS_I2C_TL_TX_TL_MASK);
QM_SS_I2C_CLEAR_TX_TL(controller);
}
}
}
QM_ISR_DECLARE(qm_ss_i2c_0_isr)
static void handle_i2c_stop_det_interrupt(const qm_ss_i2c_t i2c)
{
qm_ss_i2c_isr_handler(QM_SS_I2C_0);
uint32_t controller = i2c_base[i2c];
if ((QM_SS_I2C_READ_INTR_STAT(controller) & QM_SS_I2C_INTR_STAT_STOP)) {
/* Clear interrupt */
QM_SS_I2C_CLEAR_STOP_DET_INTR(controller);
if (i2c_transfer[i2c]->callback) {
i2c_transfer[i2c]->callback(
i2c_transfer[i2c]->callback_data, 0, QM_SS_I2C_IDLE,
0);
}
}
}
QM_ISR_DECLARE(qm_ss_i2c_1_isr)
QM_ISR_DECLARE(qm_ss_i2c_0_error_isr)
{
qm_ss_i2c_isr_handler(QM_SS_I2C_1);
handle_i2c_error_interrupt(QM_SS_I2C_0);
}
QM_ISR_DECLARE(qm_ss_i2c_0_rx_avail_isr)
{
handle_i2c_rx_avail_interrupt(QM_SS_I2C_0);
}
QM_ISR_DECLARE(qm_ss_i2c_0_tx_req_isr)
{
handle_i2c_tx_req_interrupt(QM_SS_I2C_0);
}
QM_ISR_DECLARE(qm_ss_i2c_0_stop_det_isr)
{
handle_i2c_stop_det_interrupt(QM_SS_I2C_0);
}
QM_ISR_DECLARE(qm_ss_i2c_1_error_isr)
{
handle_i2c_error_interrupt(QM_SS_I2C_1);
}
QM_ISR_DECLARE(qm_ss_i2c_1_rx_avail_isr)
{
handle_i2c_rx_avail_interrupt(QM_SS_I2C_1);
}
QM_ISR_DECLARE(qm_ss_i2c_1_tx_req_isr)
{
handle_i2c_tx_req_interrupt(QM_SS_I2C_1);
}
QM_ISR_DECLARE(qm_ss_i2c_1_stop_det_isr)
{
handle_i2c_stop_det_interrupt(QM_SS_I2C_1);
}
static uint32_t get_lo_cnt(uint32_t lo_time_ns)
@ -302,34 +366,31 @@ static uint32_t get_hi_cnt(qm_ss_i2c_t i2c, uint32_t hi_time_ns)
uint32_t controller = i2c_base[i2c];
return (((clk_sys_get_ticks_per_us() * hi_time_ns) / 1000) - 7 -
((__builtin_arc_lr(controller + QM_SS_I2C_CON) &
QM_SS_I2C_CON_SPKLEN_MASK) >>
QM_SS_I2C_CON_SPKLEN_OFFSET));
(QM_SS_I2C_READ_SPKLEN(controller)));
}
int qm_ss_i2c_set_config(const qm_ss_i2c_t i2c,
const qm_ss_i2c_config_t *const cfg)
{
uint32_t controller = i2c_base[i2c], lcnt = 0, hcnt = 0, full_cnt = 0,
min_lcnt = 0, lcnt_diff = 0,
con = (__builtin_arc_lr(controller + QM_SS_I2C_CON) &
QM_SS_I2C_CON_CLK_ENA);
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
uint32_t controller = i2c_base[i2c], lcnt = 0, hcnt = 0, min_lcnt = 0,
lcnt_diff = 0;
QM_SS_I2C_WRITE_CLKEN(controller);
/* mask all interrupts */
__builtin_arc_sr(QM_SS_I2C_INTR_MASK_ALL,
controller + QM_SS_I2C_INTR_MASK);
QM_SS_I2C_MASK_ALL_INTERRUPTS(controller);
/* disable controller */
if (controller_disable(i2c)) {
return -EBUSY;
}
/* set mode */
con |= QM_SS_I2C_CON_RESTART_EN |
/* set 7/10 bit address mode */
(cfg->address_mode << QM_SS_I2C_CON_IC_10BITADDR_OFFSET);
/* Set mode */
QM_SS_I2C_WRITE_RESTART_EN(controller);
QM_SS_I2C_WRITE_ADDRESS_MODE(controller, cfg->address_mode);
/*
* Timing generation algorithm:
@ -339,13 +400,14 @@ int qm_ss_i2c_set_config(const qm_ss_i2c_t i2c,
* timings are guaranteed as per spec.
*/
QM_SS_I2C_CLEAR_SPKLEN(controller);
QM_SS_I2C_CLEAR_SPEED(controller);
switch (cfg->speed) {
case QM_SS_I2C_SPEED_STD:
con |= QM_SS_I2C_CON_SPEED_SS |
SPK_LEN_SS << QM_SS_I2C_CON_SPKLEN_OFFSET;
__builtin_arc_sr(con, controller + QM_SS_I2C_CON);
QM_SS_I2C_WRITE_SPEED(controller, QM_SS_I2C_CON_SPEED_SS);
QM_SS_I2C_WRITE_SPKLEN(controller, QM_SS_I2C_SPK_LEN_SS);
min_lcnt = get_lo_cnt(QM_I2C_MIN_SS_NS);
lcnt = get_lo_cnt(QM_I2C_SS_50_DC_NS);
@ -353,15 +415,27 @@ int qm_ss_i2c_set_config(const qm_ss_i2c_t i2c,
break;
case QM_SS_I2C_SPEED_FAST:
con |= QM_SS_I2C_CON_SPEED_FS |
SPK_LEN_FS << QM_SS_I2C_CON_SPKLEN_OFFSET;
__builtin_arc_sr(con, controller + QM_SS_I2C_CON);
QM_SS_I2C_WRITE_SPEED(controller, QM_SS_I2C_CON_SPEED_FS);
QM_SS_I2C_WRITE_SPKLEN(controller, QM_SS_I2C_SPK_LEN_FS);
min_lcnt = get_lo_cnt(QM_I2C_MIN_FS_NS);
lcnt = get_lo_cnt(QM_I2C_FS_50_DC_NS);
hcnt = get_hi_cnt(i2c, QM_I2C_FS_50_DC_NS);
break;
#if HAS_SS_I2C_FAST_PLUS_SPEED
case QM_SS_I2C_SPEED_FAST_PLUS:
QM_SS_I2C_WRITE_SPEED(controller, QM_SS_I2C_CON_SPEED_FSP);
QM_SS_I2C_WRITE_SPKLEN(controller, QM_SS_I2C_SPK_LEN_FSP);
min_lcnt = get_lo_cnt(QM_I2C_MIN_FSP_NS);
lcnt = get_lo_cnt(QM_I2C_FSP_50_DC_NS);
hcnt = get_hi_cnt(i2c, QM_I2C_FSP_50_DC_NS);
break;
#endif /* HAS_SS_I2C_FAST_PLUS_SPEED */
}
if (hcnt > QM_SS_I2C_IC_HCNT_MAX || hcnt < QM_SS_I2C_IC_HCNT_MIN) {
@ -380,14 +454,19 @@ int qm_ss_i2c_set_config(const qm_ss_i2c_t i2c,
hcnt -= (lcnt_diff);
}
full_cnt = (lcnt & 0xFFFF) |
(hcnt & 0xFFFF) << QM_SS_I2C_SS_FS_SCL_CNT_HCNT_OFFSET;
if (QM_SS_I2C_SPEED_STD == cfg->speed) {
__builtin_arc_sr(full_cnt, controller + QM_SS_I2C_SS_SCL_CNT);
QM_SS_I2C_CLEAR_SS_SCL_HCNT(controller);
QM_SS_I2C_CLEAR_SS_SCL_LCNT(controller);
} else {
__builtin_arc_sr(full_cnt, controller + QM_SS_I2C_FS_SCL_CNT);
QM_SS_I2C_WRITE_SS_SCL_HCNT(controller, hcnt);
QM_SS_I2C_WRITE_SS_SCL_LCNT(controller, lcnt);
} else { /* Fast and fast plus modes */
QM_SS_I2C_CLEAR_FS_SCL_HCNT(controller);
QM_SS_I2C_CLEAR_FS_SCL_LCNT(controller);
QM_SS_I2C_WRITE_FS_SCL_HCNT(controller, hcnt);
QM_SS_I2C_WRITE_FS_SCL_LCNT(controller, lcnt);
}
return 0;
@ -396,37 +475,54 @@ int qm_ss_i2c_set_config(const qm_ss_i2c_t i2c,
int qm_ss_i2c_set_speed(const qm_ss_i2c_t i2c, const qm_ss_i2c_speed_t speed,
const uint16_t lo_cnt, const uint16_t hi_cnt)
{
uint32_t full_cnt = 0, controller = i2c_base[i2c],
con = __builtin_arc_lr(controller + QM_SS_I2C_CON);
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
QM_CHECK(hi_cnt < QM_SS_I2C_IC_HCNT_MAX &&
lo_cnt > QM_SS_I2C_IC_HCNT_MIN,
hi_cnt > QM_SS_I2C_IC_HCNT_MIN,
-EINVAL);
QM_CHECK(lo_cnt < QM_SS_I2C_IC_LCNT_MAX &&
lo_cnt > QM_SS_I2C_IC_LCNT_MIN,
-EINVAL);
con &= ~(QM_SS_I2C_CON_SPEED_MASK | QM_SS_I2C_CON_SPKLEN_MASK);
uint32_t controller = i2c_base[i2c];
full_cnt = (lo_cnt & QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK) |
(hi_cnt & QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK)
<< QM_SS_I2C_SS_FS_SCL_CNT_HCNT_OFFSET;
QM_SS_I2C_CLEAR_SPKLEN(controller);
QM_SS_I2C_CLEAR_SPEED(controller);
switch (speed) {
case QM_SS_I2C_SPEED_STD:
con |= (QM_SS_I2C_CON_SPEED_SS |
(SPK_LEN_SS << QM_SS_I2C_CON_SPKLEN_OFFSET));
__builtin_arc_sr(full_cnt, controller + QM_SS_I2C_SS_SCL_CNT);
QM_SS_I2C_WRITE_SPKLEN(controller, QM_SS_I2C_SPK_LEN_SS);
QM_SS_I2C_WRITE_SPEED(controller, QM_SS_I2C_CON_SPEED_SS);
QM_SS_I2C_CLEAR_SS_SCL_HCNT(controller);
QM_SS_I2C_CLEAR_SS_SCL_LCNT(controller);
QM_SS_I2C_WRITE_SS_SCL_HCNT(controller, hi_cnt);
QM_SS_I2C_WRITE_SS_SCL_LCNT(controller, lo_cnt);
break;
case QM_SS_I2C_SPEED_FAST:
con |= (QM_SS_I2C_CON_SPEED_FS |
(SPK_LEN_FS << QM_SS_I2C_CON_SPKLEN_OFFSET));
__builtin_arc_sr(full_cnt, controller + QM_SS_I2C_FS_SCL_CNT);
break;
}
QM_SS_I2C_WRITE_SPKLEN(controller, QM_SS_I2C_SPK_LEN_FS);
QM_SS_I2C_WRITE_SPEED(controller, QM_SS_I2C_CON_SPEED_FS);
__builtin_arc_sr(con, controller + QM_SS_I2C_CON);
QM_SS_I2C_CLEAR_FS_SCL_HCNT(controller);
QM_SS_I2C_CLEAR_FS_SCL_LCNT(controller);
QM_SS_I2C_WRITE_FS_SCL_HCNT(controller, hi_cnt);
QM_SS_I2C_WRITE_FS_SCL_LCNT(controller, lo_cnt);
break;
#if HAS_SS_I2C_FAST_PLUS_SPEED
case QM_SS_I2C_SPEED_FAST_PLUS:
QM_SS_I2C_WRITE_SPKLEN(controller, QM_SS_I2C_SPK_LEN_FSP);
QM_SS_I2C_WRITE_SPEED(controller, QM_SS_I2C_CON_SPEED_FSP);
QM_SS_I2C_CLEAR_FS_SCL_HCNT(controller);
QM_SS_I2C_CLEAR_FS_SCL_LCNT(controller);
QM_SS_I2C_WRITE_FS_SCL_HCNT(controller, hi_cnt);
QM_SS_I2C_WRITE_FS_SCL_LCNT(controller, lo_cnt);
break;
#endif /* HAS_SS_I2C_FAST_PLUS_SPEED */
}
return 0;
}
@ -434,19 +530,19 @@ int qm_ss_i2c_set_speed(const qm_ss_i2c_t i2c, const qm_ss_i2c_speed_t speed,
int qm_ss_i2c_get_status(const qm_ss_i2c_t i2c,
qm_ss_i2c_status_t *const status)
{
uint32_t controller = i2c_base[i2c];
QM_CHECK(status != NULL, -EINVAL);
*status = 0;
uint32_t controller = i2c_base[i2c];
*status = QM_SS_I2C_IDLE;
/* check if slave or master are active */
if (__builtin_arc_lr(controller + QM_SS_I2C_STATUS) &
QM_SS_I2C_STATUS_BUSY_MASK) {
if (QM_SS_I2C_READ_STATUS(controller) & QM_SS_I2C_STATUS_BUSY_MASK) {
*status |= QM_SS_I2C_BUSY;
}
/* check for abort status */
*status |= (__builtin_arc_lr(controller + QM_SS_I2C_TX_ABRT_SOURCE) &
*status |= (QM_SS_I2C_READ_TX_ABRT_SOURCE(controller) &
QM_SS_I2C_TX_ABRT_SOURCE_ALL_MASK);
return 0;
@ -456,21 +552,17 @@ int qm_ss_i2c_master_write(const qm_ss_i2c_t i2c, const uint16_t slave_addr,
const uint8_t *const data, uint32_t len,
const bool stop, qm_ss_i2c_status_t *const status)
{
uint8_t *d = (uint8_t *)data;
uint32_t controller = i2c_base[i2c],
con = __builtin_arc_lr(controller + QM_SS_I2C_CON),
data_cmd = 0;
int ret = 0;
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
QM_CHECK(data != NULL, -EINVAL);
QM_CHECK(len > 0, -EINVAL);
uint8_t *d = (uint8_t *)data;
uint32_t controller = i2c_base[i2c], data_cmd = 0;
int ret = 0;
/* write slave address to TAR */
con &= ~QM_SS_I2C_CON_TAR_SAR_MASK;
con |= (slave_addr & QM_SS_I2C_CON_TAR_SAR_10_BIT_MASK)
<< QM_SS_I2C_CON_TAR_SAR_OFFSET;
__builtin_arc_sr(con, controller + QM_SS_I2C_CON);
QM_SS_I2C_CLEAR_TAR(controller);
QM_SS_I2C_WRITE_TAR(controller, slave_addr);
/* enable controller */
controller_enable(i2c);
@ -478,7 +570,7 @@ int qm_ss_i2c_master_write(const qm_ss_i2c_t i2c, const uint16_t slave_addr,
while (len--) {
/* wait if FIFO is full */
while (!((__builtin_arc_lr(controller + QM_SS_I2C_STATUS)) &
while (!(QM_SS_I2C_READ_STATUS(controller) &
QM_SS_I2C_STATUS_TFNF))
;
@ -492,17 +584,16 @@ int qm_ss_i2c_master_write(const qm_ss_i2c_t i2c, const uint16_t slave_addr,
data_cmd |= QM_SS_I2C_DATA_CMD_STOP;
}
__builtin_arc_sr(data_cmd, controller + QM_SS_I2C_DATA_CMD);
QM_SS_I2C_WRITE_DATA_CMD(controller, data_cmd);
d++;
}
/* this is a blocking call, wait until FIFO is empty or tx abrt
* error */
while (!(__builtin_arc_lr(controller + QM_SS_I2C_STATUS) &
QM_SS_I2C_STATUS_TFE))
while (!(QM_SS_I2C_READ_STATUS(controller) & QM_SS_I2C_STATUS_TFE))
;
if ((__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
if ((QM_SS_I2C_READ_INTR_STAT(controller) &
QM_SS_I2C_INTR_STAT_TX_ABRT)) {
ret = -EIO;
}
@ -524,8 +615,7 @@ int qm_ss_i2c_master_write(const qm_ss_i2c_t i2c, const uint16_t slave_addr,
* FIFO remains in this flushed state until the
* register IC_CLR_TX_ABRT is read.
*/
__builtin_arc_sr(QM_SS_I2C_INTR_CLR_TX_ABRT,
controller + QM_SS_I2C_INTR_CLR);
QM_SS_I2C_CLEAR_TX_ABRT_INTR(controller);
return ret;
}
@ -534,21 +624,18 @@ int qm_ss_i2c_master_read(const qm_ss_i2c_t i2c, const uint16_t slave_addr,
uint8_t *const data, uint32_t len, const bool stop,
qm_ss_i2c_status_t *const status)
{
uint32_t controller = i2c_base[i2c],
con = __builtin_arc_lr(controller + QM_SS_I2C_CON),
data_cmd = QM_SS_I2C_DATA_CMD_CMD | QM_SS_I2C_DATA_CMD_PUSH;
uint8_t *d = (uint8_t *)data;
int ret = 0;
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
QM_CHECK(data != NULL, -EINVAL);
QM_CHECK(len > 0, -EINVAL);
uint32_t controller = i2c_base[i2c],
data_cmd = QM_SS_I2C_DATA_CMD_CMD | QM_SS_I2C_DATA_CMD_PUSH;
uint8_t *d = (uint8_t *)data;
int ret = 0;
/* write slave address to TAR */
con &= ~QM_SS_I2C_CON_TAR_SAR_MASK;
con |= (slave_addr & QM_SS_I2C_CON_TAR_SAR_10_BIT_MASK)
<< QM_SS_I2C_CON_TAR_SAR_OFFSET;
__builtin_arc_sr(con, controller + QM_SS_I2C_CON);
QM_SS_I2C_CLEAR_TAR(controller);
QM_SS_I2C_WRITE_TAR(controller, slave_addr);
/* enable controller */
controller_enable(i2c);
@ -558,34 +645,33 @@ int qm_ss_i2c_master_read(const qm_ss_i2c_t i2c, const uint16_t slave_addr,
data_cmd |= QM_SS_I2C_DATA_CMD_STOP;
}
__builtin_arc_sr(data_cmd, controller + QM_SS_I2C_DATA_CMD);
QM_SS_I2C_WRITE_DATA_CMD(controller, data_cmd);
/* wait if rx fifo is empty, break if tx empty and
* error*/
while (!(__builtin_arc_lr(controller + QM_SS_I2C_STATUS) &
while (!(QM_SS_I2C_READ_STATUS(controller) &
QM_SS_I2C_STATUS_RFNE)) {
if (__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
if (QM_SS_I2C_READ_INTR_STAT(controller) &
QM_SS_I2C_INTR_STAT_TX_ABRT) {
break;
}
}
if ((__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
if ((QM_SS_I2C_READ_INTR_STAT(controller) &
QM_SS_I2C_INTR_STAT_TX_ABRT)) {
ret = -EIO;
break;
}
__builtin_arc_sr(QM_SS_I2C_DATA_CMD_POP,
controller + QM_SS_I2C_DATA_CMD);
QM_SS_I2C_READ_RX_FIFO(controller);
/* wait until rx fifo is empty, indicating pop is complete*/
while ((__builtin_arc_lr(controller + QM_SS_I2C_STATUS) &
QM_SS_I2C_STATUS_RFNE))
while (
(QM_SS_I2C_READ_STATUS(controller) & QM_SS_I2C_STATUS_RFNE))
;
/* IC_DATA_CMD[7:0] contains received data */
*d = __builtin_arc_lr(controller + QM_SS_I2C_DATA_CMD);
*d = QM_SS_I2C_READ_DATA_CMD(controller);
d++;
}
@ -606,8 +692,7 @@ int qm_ss_i2c_master_read(const qm_ss_i2c_t i2c, const uint16_t slave_addr,
* FIFO remains in this flushed state until the
* register IC_CLR_TX_ABRT is read.
*/
__builtin_arc_sr(QM_SS_I2C_INTR_CLR_TX_ABRT,
controller + QM_SS_I2C_INTR_CLR);
QM_SS_I2C_CLEAR_TX_ABRT_INTR(controller);
return ret;
}
@ -616,16 +701,14 @@ int qm_ss_i2c_master_irq_transfer(const qm_ss_i2c_t i2c,
const qm_ss_i2c_transfer_t *const xfer,
const uint16_t slave_addr)
{
uint32_t controller = i2c_base[i2c],
con = __builtin_arc_lr(controller + QM_SS_I2C_CON);
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
QM_CHECK(NULL != xfer, -EINVAL);
uint32_t controller = i2c_base[i2c];
/* write slave address to TAR */
con &= ~QM_SS_I2C_CON_TAR_SAR_MASK;
con |= (slave_addr & QM_SS_I2C_CON_TAR_SAR_10_BIT_MASK)
<< QM_SS_I2C_CON_TAR_SAR_OFFSET;
__builtin_arc_sr(con, controller + QM_SS_I2C_CON);
QM_SS_I2C_CLEAR_TAR(controller);
QM_SS_I2C_WRITE_TAR(controller, slave_addr);
i2c_write_pos[i2c] = 0;
i2c_read_pos[i2c] = 0;
@ -638,27 +721,27 @@ int qm_ss_i2c_master_irq_transfer(const qm_ss_i2c_t i2c,
* change the threshold value so the 'RX FULL' interrupt is
* generated once all data from the transfer is received.
*/
__builtin_arc_sr(
((TX_TL << QM_SS_I2C_TL_TX_TL_OFFSET) | (xfer->rx_len - 1)),
controller + QM_SS_I2C_TL);
} else {
__builtin_arc_sr(((TX_TL << QM_SS_I2C_TL_TX_TL_OFFSET) | RX_TL),
controller + QM_SS_I2C_TL);
}
QM_SS_I2C_CLEAR_RX_TL(controller);
QM_SS_I2C_CLEAR_TX_TL(controller);
/* mask interrupts */
__builtin_arc_sr(QM_SS_I2C_INTR_MASK_ALL,
controller + QM_SS_I2C_INTR_MASK);
QM_SS_I2C_WRITE_RX_TL(controller, (xfer->rx_len - 1));
QM_SS_I2C_WRITE_TX_TL(controller, TX_TL);
} else {
QM_SS_I2C_CLEAR_RX_TL(controller);
QM_SS_I2C_CLEAR_TX_TL(controller);
QM_SS_I2C_WRITE_RX_TL(controller, RX_TL);
QM_SS_I2C_WRITE_TX_TL(controller, TX_TL);
}
/* enable controller */
controller_enable(i2c);
/* Start filling tx fifo. */
i2c_fill_tx_fifo(i2c, xfer, controller);
/* unmask interrupts */
__builtin_arc_sr(
(QM_SS_I2C_INTR_MASK_TX_ABRT | QM_SS_I2C_INTR_MASK_TX_EMPTY |
QM_SS_I2C_INTR_MASK_TX_OVER | QM_SS_I2C_INTR_MASK_RX_FULL |
QM_SS_I2C_INTR_MASK_RX_OVER | QM_SS_I2C_INTR_MASK_RX_UNDER),
controller + QM_SS_I2C_INTR_MASK);
QM_SS_I2C_UNMASK_INTERRUPTS(controller);
return 0;
}
@ -666,21 +749,18 @@ int qm_ss_i2c_master_irq_transfer(const qm_ss_i2c_t i2c,
static void controller_enable(const qm_ss_i2c_t i2c)
{
uint32_t controller = i2c_base[i2c];
if (!(__builtin_arc_lr(controller + QM_SS_I2C_ENABLE_STATUS) &
if (!(QM_SS_I2C_READ_ENABLE_STATUS(controller) &
QM_SS_I2C_ENABLE_STATUS_IC_EN)) {
/* enable controller */
QM_SS_REG_AUX_OR((controller + QM_SS_I2C_CON),
QM_SS_I2C_CON_ENABLE);
QM_SS_I2C_ENABLE(controller);
/* wait until controller is enabled */
while (
!(__builtin_arc_lr(controller + QM_SS_I2C_ENABLE_STATUS) &
QM_SS_I2C_ENABLE_STATUS_IC_EN))
while (!(QM_SS_I2C_READ_ENABLE_STATUS(controller) &
QM_SS_I2C_ENABLE_STATUS_IC_EN))
;
}
/* Clear all interruption flags */
__builtin_arc_sr(QM_SS_I2C_INTR_CLR_ALL,
controller + QM_SS_I2C_INTR_CLR);
QM_SS_I2C_CLEAR_ALL_INTR(controller);
}
static int controller_disable(const qm_ss_i2c_t i2c)
@ -689,25 +769,26 @@ static int controller_disable(const qm_ss_i2c_t i2c)
int poll_count = I2C_POLL_COUNT;
/* disable controller */
QM_SS_REG_AUX_NAND((controller + QM_SS_I2C_CON), QM_SS_I2C_CON_ENABLE);
QM_SS_I2C_DISABLE(controller);
/* wait until controller is disabled */
while ((__builtin_arc_lr(controller + QM_SS_I2C_ENABLE_STATUS) &
while ((QM_SS_I2C_READ_ENABLE_STATUS(controller) &
QM_SS_I2C_ENABLE_STATUS_IC_EN) &&
poll_count--) {
clk_sys_udelay(I2C_POLL_MICROSECOND);
}
/* returns 0 if ok, meaning controller is disabled */
return (__builtin_arc_lr(controller + QM_SS_I2C_ENABLE_STATUS) &
return (QM_SS_I2C_READ_ENABLE_STATUS(controller) &
QM_SS_I2C_ENABLE_STATUS_IC_EN);
}
int qm_ss_i2c_irq_transfer_terminate(const qm_ss_i2c_t i2c)
{
uint32_t controller = i2c_base[i2c];
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
uint32_t controller = i2c_base[i2c];
/* Abort:
* In response to an ABORT, the controller issues a STOP and
* flushes
@ -717,7 +798,7 @@ int qm_ss_i2c_irq_transfer_terminate(const qm_ss_i2c_t i2c)
* is
* cleared automatically by hardware after the abort operation.
*/
QM_SS_REG_AUX_OR((controller + QM_SS_I2C_CON), QM_SS_I2C_CON_ABORT);
QM_SS_I2C_ABORT(controller);
return 0;
}
@ -756,4 +837,22 @@ int qm_ss_i2c_restore_context(const qm_ss_i2c_t i2c,
return 0;
}
#else
int qm_ss_i2c_save_context(const qm_ss_i2c_t i2c,
qm_ss_i2c_context_t *const ctx)
{
(void)i2c;
(void)ctx;
return 0;
}
int qm_ss_i2c_restore_context(const qm_ss_i2c_t i2c,
const qm_ss_i2c_context_t *const ctx)
{
(void)i2c;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -37,7 +37,9 @@
* Always-on Counters.
*
* @note The always on counters are in the 32kHz clock domain. Some register
* operations take a minimum of a 32kHz clock cycle to complete.
* operations take a minimum of a 32kHz clock cycle to complete. If the Always
* on timer interrupt is not configured to be edge triggered, multiple
* interrupts will occur.
*
* @defgroup groupAONC Always-on Counters
* @{
@ -55,16 +57,6 @@ typedef enum {
* Timer expired. Status must be cleared with qm_aonpt_clear().
*/
QM_AONPT_EXPIRED,
#if (HAS_AONPT_BUSY_BIT)
/**
* Timer is busy. Status after an alarm clear or timer reset has
* been initiated. Status must change back to ready before any further
* timer configuration to prevent timer lockup.
* This is due to the always on counter being in the 32kHz clock
* domain.
*/
QM_AONPT_BUSY,
#endif
} qm_aonpt_status_t;
/**
@ -195,6 +187,37 @@ int qm_aonpt_clear(const qm_aonc_t aonc);
*/
int qm_aonpt_reset(const qm_aonc_t aonc);
/**
* Save the Always-on Periodic Timer context.
*
* Save the configuration of the specified AONC peripheral
* before entering sleep.
*
* @param[in] aonc AONC index.
* @param[out] ctx AONC context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_aonpt_save_context(const qm_aonc_t aonc, qm_aonc_context_t *const ctx);
/**
* Restore the Always-on Periodic Timer context.
*
* Restore the configuration of the specified AONC peripheral
* after exiting sleep.
*
* @param[in] aonc AONC index.
* @param[in] ctx AONC context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_aonpt_restore_context(const qm_aonc_t aonc,
const qm_aonc_context_t *const ctx);
/**
* @}
*/

View file

@ -44,16 +44,17 @@
* Analog Comparator configuration type.
*
* Each bit in the registers controls a single Analog Comparator pin.
*
* @note There is no way to control comparator interrupts using this
* configuration struct: when a comparator is enabled and powered-up, it starts
* generating interrupts when proper input conditions are met; however,
* comparator interrupts can be masked at interrupt routing level.
*/
typedef struct {
uint32_t int_en; /**< Interrupt enable. */
uint32_t cmp_en; /**< Comparator enable. */
uint32_t reference; /**< Reference voltage, 1b: VREF; 0b: AR_PIN. */
uint32_t polarity; /**< 0b: input>ref; 1b: input<ref */
uint32_t power; /**< 1b: Normal mode; 0b:Power-down/Shutdown mode */
#if HAS_COMPARATOR_VREF2
/**< 0b: VREF_1; 1b: VREF_2; When reference is external */
uint32_t ar_pad;
#endif /* HAS_COMPARATOR_VREF2 */
/**
* Transfer callback.

View file

@ -341,7 +341,6 @@ int qm_dma_transfer_mem_to_mem(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
qm_dma_transfer_t *const transfer_config);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save DMA peripheral's context.
*
@ -372,7 +371,6 @@ int qm_dma_save_context(const qm_dma_t dma, qm_dma_context_t *const ctx);
*/
int qm_dma_restore_context(const qm_dma_t dma,
const qm_dma_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -71,7 +71,13 @@ typedef struct {
* operations.
*/
uint8_t us_count;
qm_flash_disable_t write_disable; /**< Write disable. */
/**
* Write Disable.
*
* When this is set, only a reset will enable writes to Flash again.
*/
qm_flash_disable_t write_disable;
} qm_flash_config_t;
/**
@ -196,7 +202,6 @@ int qm_flash_page_erase(const qm_flash_t flash, const qm_flash_region_t region,
*/
int qm_flash_mass_erase(const qm_flash_t flash, const uint8_t include_rom);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save flash context.
*
@ -229,7 +234,6 @@ int qm_flash_save_context(const qm_flash_t flash,
*/
int qm_flash_restore_context(const qm_flash_t flash,
const qm_flash_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -151,7 +151,6 @@ int qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
const qm_flash_t flash,
qm_fpr_callback_t fpr_cb, void *data);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save FPR context.
*
@ -196,7 +195,6 @@ int qm_fpr_save_context(const qm_flash_t flash, qm_fpr_context_t *const ctx);
*/
int qm_fpr_restore_context(const qm_flash_t flash,
const qm_fpr_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -173,7 +173,6 @@ int qm_gpio_read_port(const qm_gpio_t gpio, uint32_t *const port);
*/
int qm_gpio_write_port(const qm_gpio_t gpio, const uint32_t val);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save GPIO context.
*
@ -204,8 +203,6 @@ int qm_gpio_save_context(const qm_gpio_t gpio, qm_gpio_context_t *const ctx);
*/
int qm_gpio_restore_context(const qm_gpio_t gpio,
const qm_gpio_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}
*/

View file

@ -449,7 +449,6 @@ int qm_i2c_slave_dma_transfer(const qm_i2c_t i2c,
*/
int qm_i2c_dma_transfer_terminate(const qm_i2c_t i2c);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save I2C context.
*
@ -482,7 +481,6 @@ int qm_i2c_save_context(const qm_i2c_t i2c, qm_i2c_context_t *const ctx);
*/
int qm_i2c_restore_context(const qm_i2c_t i2c,
const qm_i2c_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -48,8 +48,6 @@
#endif
#if (ENABLE_RESTORE_CONTEXT)
#if (HAS_APIC) || (QM_SENSOR)
/**
* Save IRQ context.
*
@ -84,8 +82,6 @@ int qm_irq_save_context(qm_irq_context_t *const ctx);
* @retval Negative @ref errno for possible error codes.
*/
int qm_irq_restore_context(const qm_irq_context_t *const ctx);
#endif /* HAS_APIC || QM_SENSOR */
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* Interrupt driver.
@ -151,7 +147,7 @@ void qm_irq_mask(uint32_t irq);
void _qm_register_isr(uint32_t vector, qm_isr_t isr);
void _qm_irq_setup(uint32_t irq, uint16_t register_offset);
void _qm_irq_setup(uint32_t irq);
/*
* Request a given IRQ and register Interrupt Service Routine to interrupt
@ -161,17 +157,17 @@ void _qm_irq_setup(uint32_t irq, uint16_t register_offset);
* @param[in] isr ISR to register to given IRQ.
*/
#if (QM_SENSOR)
#define qm_irq_request(irq, isr) \
#define QM_IRQ_REQUEST(irq, isr) \
do { \
_qm_register_isr(irq##_VECTOR, isr); \
_qm_irq_setup(irq, irq##_MASK_OFFSET); \
_qm_irq_setup(irq); \
} while (0);
#else
#define qm_irq_request(irq, isr) \
#define QM_IRQ_REQUEST(irq, isr) \
do { \
qm_int_vector_request(irq##_VECTOR, isr); \
\
_qm_irq_setup(irq, irq##_MASK_OFFSET); \
_qm_irq_setup(irq); \
} while (0)
#endif /* QM_SENSOR */

View file

@ -45,7 +45,7 @@
* ISR for ADC 0 convert and calibration interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_ADC_0_CAL_INT, qm_adc_0_cal_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_ADC_0_CAL_INT, qm_adc_0_cal_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_adc_0_cal_isr);
@ -54,7 +54,7 @@ QM_ISR_DECLARE(qm_adc_0_cal_isr);
* ISR for ADC 0 change mode interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_ADC_0_PWR_INT, qm_adc_0_pwr_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_ADC_0_PWR_INT, qm_adc_0_pwr_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_adc_0_pwr_isr);
@ -64,7 +64,7 @@ QM_ISR_DECLARE(qm_adc_0_pwr_isr);
* ISR for Always-on Periodic Timer 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_AONPT_0_INT, qm_aonpt_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_AONPT_0_INT, qm_aonpt_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_aonpt_0_isr);
@ -73,7 +73,7 @@ QM_ISR_DECLARE(qm_aonpt_0_isr);
* ISR for Analog Comparator 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_COMPARATOR_0_INT, qm_comparator_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_COMPARATOR_0_INT, qm_comparator_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_comparator_0_isr);
@ -82,7 +82,7 @@ QM_ISR_DECLARE(qm_comparator_0_isr);
* ISR for DMA error interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_0_ERROR_INT, qm_dma_0_error_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_DMA_0_ERROR_INT, qm_dma_0_error_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_error_isr);
@ -91,7 +91,7 @@ QM_ISR_DECLARE(qm_dma_0_error_isr);
* ISR for DMA channel 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_0_INT_0, qm_dma_0_isr_0);
* @code QM_IRQ_REQUEST(QM_IRQ_DMA_0_INT_0, qm_dma_0_isr_0);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_0);
@ -100,7 +100,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_0);
* ISR for DMA channel 1 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_0_INT_1, qm_dma_0_isr_1);
* @code QM_IRQ_REQUEST(QM_IRQ_DMA_0_INT_1, qm_dma_0_isr_1);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_1);
@ -110,7 +110,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_1);
* ISR for DMA channel 2 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_0_INT_2, qm_dma_0_isr_2);
* @code QM_IRQ_REQUEST(QM_IRQ_DMA_0_INT_2, qm_dma_0_isr_2);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_2);
@ -119,7 +119,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_2);
* ISR for DMA channel 3 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_0_INT_3, qm_dma_0_isr_3);
* @code QM_IRQ_REQUEST(QM_IRQ_DMA_0_INT_3, qm_dma_0_isr_3);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_3);
@ -128,7 +128,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_3);
* ISR for DMA channel 4 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_0_INT_4, qm_dma_0_isr_4);
* @code QM_IRQ_REQUEST(QM_IRQ_DMA_0_INT_4, qm_dma_0_isr_4);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_4);
@ -137,7 +137,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_4);
* ISR for DMA channel 5 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_0_INT_5, qm_dma_0_isr_5);
* @code QM_IRQ_REQUEST(QM_IRQ_DMA_0_INT_5, qm_dma_0_isr_5);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_5);
@ -146,7 +146,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_5);
* ISR for DMA channel 6 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_0_INT_6, qm_dma_0_isr_6);
* @code QM_IRQ_REQUEST(QM_IRQ_DMA_0_INT_6, qm_dma_0_isr_6);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_6);
@ -155,7 +155,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_6);
* ISR for DMA 0 channel 7 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_0_INT_7, qm_dma_0_isr_7);
* @code QM_IRQ_REQUEST(QM_IRQ_DMA_0_INT_7, qm_dma_0_isr_7);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_7);
@ -165,7 +165,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_7);
* ISR for FPR 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_FLASH_MPR_0_INT, qm_flash_mpr_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_FLASH_MPR_0_INT, qm_flash_mpr_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_flash_mpr_0_isr);
@ -174,7 +174,7 @@ QM_ISR_DECLARE(qm_flash_mpr_0_isr);
* ISR for FPR 1 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_FLASH_MPR_1_INT, qm_flash_mpr_1_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_FLASH_MPR_1_INT, qm_flash_mpr_1_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_flash_mpr_1_isr);
@ -183,7 +183,7 @@ QM_ISR_DECLARE(qm_flash_mpr_1_isr);
* ISR for GPIO 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_GPIO_0_INT, qm_gpio_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_GPIO_0_INT, qm_gpio_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_gpio_0_isr);
@ -193,7 +193,7 @@ QM_ISR_DECLARE(qm_gpio_0_isr);
* ISR for AON GPIO 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_AON_GPIO_0_INT, qm_aon_gpio_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_AON_GPIO_0_INT, qm_aon_gpio_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_aon_gpio_0_isr);
@ -203,7 +203,7 @@ QM_ISR_DECLARE(qm_aon_gpio_0_isr);
* ISR for I2C 0 irq mode transfer interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_I2C_0_INT, qm_i2c_0_irq_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_I2C_0_INT, qm_i2c_0_irq_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_i2c_0_irq_isr);
@ -212,7 +212,7 @@ QM_ISR_DECLARE(qm_i2c_0_irq_isr);
* ISR for I2C 1 irq mode transfer interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_I2C_1_INT, qm_i2c_1_irq_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_I2C_1_INT, qm_i2c_1_irq_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_i2c_1_irq_isr);
@ -221,7 +221,7 @@ QM_ISR_DECLARE(qm_i2c_1_irq_isr);
* ISR for I2C 0 dma mode transfer interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_I2C_0_INT, qm_i2c_0_dma_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_I2C_0_INT, qm_i2c_0_dma_isr);
* @endcode if DMA based transfers are used.
*/
QM_ISR_DECLARE(qm_i2c_0_dma_isr);
@ -230,7 +230,7 @@ QM_ISR_DECLARE(qm_i2c_0_dma_isr);
* ISR for I2C 1 dma mode transfer interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_I2C_1_INT, qm_i2c_1_dma_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_I2C_1_INT, qm_i2c_1_dma_isr);
* @endcode if DMA based transfers are used.
*/
QM_ISR_DECLARE(qm_i2c_1_dma_isr);
@ -239,7 +239,7 @@ QM_ISR_DECLARE(qm_i2c_1_dma_isr);
* ISR for Mailbox interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_MAILBOX_0_INT, qm_mailbox_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_MAILBOX_0_INT, qm_mailbox_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_mailbox_0_isr);
@ -248,7 +248,7 @@ QM_ISR_DECLARE(qm_mailbox_0_isr);
* ISR for Memory Protection Region interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_SRAM_MPR_0_INT, qm_sram_mpr_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_SRAM_MPR_0_INT, qm_sram_mpr_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_sram_mpr_0_isr);
@ -263,25 +263,57 @@ QM_ISR_DECLARE(qm_sram_mpr_0_isr);
* @endcode if IRQ based transfers are used.
*
* On Quark SE, this function needs to be registered with:
* @code qm_irq_request(QM_IRQ_PIC_TIMER, qm_pic_timer_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_PIC_TIMER, qm_pic_timer_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_pic_timer_0_isr);
/**
* ISR for PWM 0 interrupt.
* ISR for PWM 0 Channel 0 interrupt.
* If there is only one interrupt per controller this ISR handles
* all channel interrupts.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_PWM_0_INT, qm_pwm_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_PWM_0_INT, qm_pwm_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_pwm_0_isr);
QM_ISR_DECLARE(qm_pwm_0_isr_0);
#if (NUM_PWM_CONTROLLER_INTERRUPTS > 1)
/**
* ISR for PWM 0 channel 1 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_PWM_1, qm_pwm_0_isr_1);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_pwm_0_isr_1);
/**
* ISR for PWM 0 channel 2 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_PWM_2, qm_pwm_0_isr_2);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_pwm_0_isr_2);
/**
* ISR for PWM 0 channel 3 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_PWM_3, qm_pwm_0_isr_3);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_pwm_0_isr_3);
#endif /* NUM_PWM_CONTROLLER_INTERRUPTS > 1 */
/**
* ISR for RTC 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_RTC_0_INT, qm_rtc_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_RTC_0_INT, qm_rtc_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_rtc_0_isr);
@ -290,7 +322,7 @@ QM_ISR_DECLARE(qm_rtc_0_isr);
* ISR for SPI Master 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_SPI_MASTER_0_INT, qm_spi_master_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_SPI_MASTER_0_INT, qm_spi_master_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_spi_master_0_isr);
@ -300,17 +332,26 @@ QM_ISR_DECLARE(qm_spi_master_0_isr);
* ISR for SPI Master 1 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_SPI_MASTER_1_INT, qm_spi_master_1_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_SPI_MASTER_1_INT, qm_spi_master_1_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_spi_master_1_isr);
#endif /* QUARK_SE */
#endif /* (QUARK_SE) */
/**
* ISR for SPI Slave 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_SPI_SLAVE_0_INT, qm_spi_slave_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_spi_slave_0_isr);
/**
* ISR for UART 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_UART_0_INT, qm_uart_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_UART_0_INT, qm_uart_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_uart_0_isr);
@ -319,7 +360,7 @@ QM_ISR_DECLARE(qm_uart_0_isr);
* ISR for UART 1 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_UART_1_INT, qm_uart_1_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_UART_1_INT, qm_uart_1_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_uart_1_isr);
@ -328,16 +369,27 @@ QM_ISR_DECLARE(qm_uart_1_isr);
* ISR for WDT 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_WDT_0_INT, qm_wdt_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_WDT_0_INT, qm_wdt_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_wdt_0_isr);
#if (NUM_WDT_CONTROLLERS > 1)
/**
* ISR for WDT 1 interrupt.
*
* This function needs to be registered with
* @code QM_IRQ_REQUEST(QM_IRQ_WDT_1_INT, qm_wdt_1_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_wdt_1_isr);
#endif /* NUM_WDT_CONTROLLERS > 1 */
/**
* ISR for USB 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_USB_0_INT, qm_usb_0_isr);
* @code QM_IRQ_REQUEST(QM_IRQ_USB_0_INT, qm_usb_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_usb_0_isr);

View file

@ -32,6 +32,7 @@
#include "qm_common.h"
#include "qm_soc_regs.h"
#include "qm_mailbox_defs.h"
#if (HAS_MAILBOX)
/**
@ -47,32 +48,20 @@
typedef enum {
/** No interrupt pending nor any data to consume. */
QM_MBOX_CH_IDLE = 0,
/** Receiver has serviced the interrupt and data
/**
* Receiver has serviced the interrupt and data
* has not been consumed. */
QM_MBOX_CH_INT_ACK_DATA_PEND,
/** Receiver in polling mode and data has not been consumed. */
QM_MBOX_CH_POLLING_DATA_PEND,
/** Receiver hasn't serviced the interrupt and data
QM_MBOX_CH_DATA_PEND = QM_MBOX_CH_STS,
/**
* Receiver hasn't serviced the interrupt and data
* has not been consumed.
*/
QM_MBOX_CH_INT_NACK_DATA_PEND,
QM_MBOX_CH_INT_AND_DATA_PEND =
(QM_MBOX_CH_STS | QM_MBOX_CH_STS_CTRL_INT),
} qm_mbox_ch_status_t;
/**
* Mailbox channel identifiers
*/
typedef enum {
QM_MBOX_CH_0 = 0, /**< Channel 0. */
QM_MBOX_CH_1, /**< Channel 1. */
QM_MBOX_CH_2, /**< Channel 2. */
QM_MBOX_CH_3, /**< Channel 3. */
QM_MBOX_CH_4, /**< Channel 4. */
QM_MBOX_CH_5, /**< Channel 5. */
QM_MBOX_CH_6, /**< Channel 6. */
QM_MBOX_CH_7, /**< Channel 7. */
QM_MBOX_CH_NUM /**< Mailbox number of channels. */
} qm_mbox_ch_t;
/**
* Mailbox message payload index values.
*/
@ -84,18 +73,6 @@ typedef enum {
QM_MBOX_PAYLOAD_NUM, /**< Number of payloads. */
} qm_mbox_payload_t;
/**
* Definition of the mailbox direction of operation
* The direction of communication for each channel is configurable by the user.
* The list below describes the possible communication directions for each
* channel.
*/
typedef enum {
QM_MBOX_TO_LMT = 0, /**< Lakemont core as destination */
QM_MBOX_TO_SS, /**< Sensor Sub-System core as destination */
QM_MBOX_UNUSED
} qm_mbox_destination_t;
/**
* Definition of the mailbox mode of operation, interrupt mode or polling mode.
*/

View file

@ -90,7 +90,6 @@ int qm_mpr_set_config(const qm_mpr_id_t id, const qm_mpr_config_t *const cfg);
int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
qm_mpr_callback_t callback_fn, void *data);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save MPR context.
*
@ -128,7 +127,6 @@ int qm_mpr_save_context(qm_mpr_context_t *const ctx);
* @retval Negative @ref errno for possible error codes.
*/
int qm_mpr_restore_context(const qm_mpr_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -105,7 +105,6 @@ int qm_pic_timer_set(const uint32_t count);
*/
int qm_pic_timer_get(uint32_t *const count);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save PIC Timer peripheral's context.
*
@ -134,7 +133,7 @@ int qm_pic_timer_save_context(qm_pic_timer_context_t *const ctx);
* @retval Negative @ref errno for possible error codes.
*/
int qm_pic_timer_restore_context(const qm_pic_timer_context_t *const ctx);
#endif
/**
* @}
*/

View file

@ -163,7 +163,6 @@ int qm_pwm_start(const qm_pwm_t pwm, const qm_pwm_id_t id);
*/
int qm_pwm_stop(const qm_pwm_t pwm, const qm_pwm_id_t id);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save PWM peripheral's context.
*
@ -194,7 +193,6 @@ int qm_pwm_save_context(const qm_pwm_t pwm, qm_pwm_context_t *const ctx);
*/
int qm_pwm_restore_context(const qm_pwm_t pwm,
const qm_pwm_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -140,6 +140,37 @@ int qm_rtc_set_alarm(const qm_rtc_t rtc, const uint32_t alarm_val);
*/
int qm_rtc_read(const qm_rtc_t rtc, uint32_t *const value);
/**
* Save RTC context.
*
* Save the configuration of the specified RTC peripheral
* before entering sleep.
*
* @param[in] rtc RTC index.
* @param[out] ctx RTC context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_rtc_save_context(const qm_rtc_t rtc, qm_rtc_context_t *const ctx);
/**
* Restore RTC context.
*
* Restore the configuration of the specified RTC peripheral
* after exiting sleep.
*
* @param[in] rtc RTC index.
* @param[in] ctx RTC context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_rtc_restore_context(const qm_rtc_t rtc,
const qm_rtc_context_t *const ctx);
/**
* @}
*/

View file

@ -115,9 +115,11 @@ typedef enum {
* SPI status
*/
typedef enum {
QM_SPI_IDLE, /**< SPI device is not in use. */
QM_SPI_BUSY, /**< SPI device is busy. */
QM_SPI_RX_OVERFLOW /**< RX transfer has overflown. */
QM_SPI_IDLE, /**< SPI device is not in use. */
QM_SPI_BUSY, /**< SPI device is busy. */
QM_SPI_RX_OVERFLOW, /**< RX transfer has overflown. */
QM_SPI_RX_FULL, /**< Appl. Rx buffer full (slave only). */
QM_SPI_TX_EMPTY /**< Appl. Tx buffer empty (slave only) . */
} qm_spi_status_t;
/**
@ -126,96 +128,20 @@ typedef enum {
typedef enum {
/**< Standard SPI mode */
QM_SPI_FRAME_FORMAT_STANDARD = 0x0,
#if HAS_QSPI
/**< Quad SPI mode */
QM_SPI_FRAME_FORMAT_QUAD = 0x2
#endif /* HAS_QSPI */
} qm_spi_frame_format_t;
#if HAS_QSPI
/**
* QM QSPI number of wait cycles
/*
* SPI update type
*
* Used by qm_spi_irq_update to know what to update, RX or TX.
* Logical OR can be used in order to update both RX and TX.
*/
typedef enum {
QM_SPI_QUAD_0_WAIT_CYCLES = 0x0, /**< No wait cycles */
QM_SPI_QUAD_1_WAIT_CYCLES = 0x1, /**< 1 wait cycle */
QM_SPI_QUAD_2_WAIT_CYCLES = 0x2, /**< 2 wait cycles */
QM_SPI_QUAD_3_WAIT_CYCLES = 0x3, /**< 3 wait cycles */
QM_SPI_QUAD_4_WAIT_CYCLES = 0x4, /**< 4 wait cycles */
QM_SPI_QUAD_5_WAIT_CYCLES = 0x5, /**< 5 wait cycles */
QM_SPI_QUAD_6_WAIT_CYCLES = 0x6, /**< 6 wait cycles */
QM_SPI_QUAD_7_WAIT_CYCLES = 0x7, /**< 7 wait cycles */
QM_SPI_QUAD_8_WAIT_CYCLES = 0x8, /**< 8 wait cycles */
QM_SPI_QUAD_9_WAIT_CYCLES = 0x9, /**< 9 wait cycles */
QM_SPI_QUAD_10_WAIT_CYCLES = 0xA, /**< 10 wait cycles */
QM_SPI_QUAD_11_WAIT_CYCLES = 0xB, /**< 11 wait cycles */
QM_SPI_QUAD_12_WAIT_CYCLES = 0xC, /**< 12 wait cycles */
QM_SPI_QUAD_13_WAIT_CYCLES = 0xD, /**< 13 wait cycles */
QM_SPI_QUAD_14_WAIT_CYCLES = 0xE, /**< 14 wait cycles */
QM_SPI_QUAD_15_WAIT_CYCLES = 0xF, /**< 15 wait cycles */
} qm_spi_quad_wait_cycles_t;
QM_SPI_UPDATE_RX = BIT(0), /* Update RX. */
QM_SPI_UPDATE_TX = BIT(1), /* Update TX. */
} qm_spi_update_t;
/**
* QM QSPI Instruction length
*/
typedef enum {
QM_SPI_QUAD_INST_LENGTH_0_BITS = 0x0, /**< No instruction */
QM_SPI_QUAD_INST_LENGTH_4_BITS = 0x1, /**< 4 bit instruction */
QM_SPI_QUAD_INST_LENGTH_8_BITS = 0x2, /**< 8 bit instruction */
QM_SPI_QUAD_INST_LENGTH_16_BITS = 0x3 /**< 16 bit instruction */
} qm_spi_quad_inst_length_t;
/**
* QM QSPI Address length
*/
typedef enum {
QM_SPI_QUAD_ADDR_LENGTH_0_BITS = 0x0, /**< No address */
QM_SPI_QUAD_ADDR_LENGTH_4_BITS = 0x1, /**< 4 bit address */
QM_SPI_QUAD_ADDR_LENGTH_8_BITS = 0x2, /**< 8 bit address */
QM_SPI_QUAD_ADDR_LENGTH_12_BITS = 0x3, /**< 12 bit address */
QM_SPI_QUAD_ADDR_LENGTH_16_BITS = 0x4, /**< 16 bit address */
QM_SPI_QUAD_ADDR_LENGTH_20_BITS = 0x5, /**< 20 bit address */
QM_SPI_QUAD_ADDR_LENGTH_24_BITS = 0x6, /**< 24 bit address */
QM_SPI_QUAD_ADDR_LENGTH_28_BITS = 0x7, /**< 28 bit address */
QM_SPI_QUAD_ADDR_LENGTH_32_BITS = 0x8, /**< 32 bit address */
QM_SPI_QUAD_ADDR_LENGTH_36_BITS = 0x9, /**< 36 bit address */
QM_SPI_QUAD_ADDR_LENGTH_40_BITS = 0xA, /**< 40 bit address */
QM_SPI_QUAD_ADDR_LENGTH_44_BITS = 0xB, /**< 44 bit address */
QM_SPI_QUAD_ADDR_LENGTH_48_BITS = 0xC, /**< 48 bit address */
QM_SPI_QUAD_ADDR_LENGTH_52_BITS = 0xD, /**< 52 bit address */
QM_SPI_QUAD_ADDR_LENGTH_56_BITS = 0xE, /**< 56 bit address */
QM_SPI_QUAD_ADDR_LENGTH_60_BITS = 0xF /**< 60 bit address */
} qm_spi_quad_addr_length_t;
/**
* QM QSPI Transfer type
*/
typedef enum {
/**< Both instruction and address sent in standard SPI mode */
QM_SPI_QUAD_INST_STD_ADDR_STD = 0x0,
/**
* Instruction sent in standard SPI mode
* and address sent in Quad SPI mode
*/
QM_SPI_QUAD_INST_STD_ADDR_QUAD = 0x1,
/**< Both instruction and address sent in Quad SPI mode */
QM_SPI_QUAD_INST_QUAD_ADDR_QUAD = 0x2
} qm_spi_quad_transfer_type_t;
/**
* QM QSPI Transfer Configuration
*/
typedef struct {
qm_spi_quad_wait_cycles_t
wait_cycles; /**< Wait cycles for QSPI reads */
qm_spi_quad_inst_length_t inst_length; /**< Instruction length */
qm_spi_quad_addr_length_t addr_length; /**< Address length */
qm_spi_quad_transfer_type_t trans_type; /**< QSPI Transfer type */
} qm_spi_quad_config_t;
#endif /* HAS_QSPI */
/**
/*
* SPI configuration type.
*/
typedef struct {
@ -242,19 +168,19 @@ typedef struct {
* frame are sent first.
*/
typedef struct {
void *tx; /**< Write data. */
void *rx; /**< Read data. */
uint16_t tx_len; /**< Number of data frames to write. */
uint16_t rx_len; /**< Number of data frames to read. */
#if HAS_QSPI
qm_spi_quad_config_t qspi_cfg; /**< QSPI transfer parameters */
#endif /* HAS_QSPI */
void *tx; /**< Write data. */
void *rx; /**< Read data. */
uint16_t tx_len; /**< Number of data frames to write. */
uint16_t rx_len; /**< Number of data frames to read. */
bool keep_enabled; /**< Keep device on once transfer is done. */
/**
* Transfer callback.
*
* Called after all data is transmitted/received or if the driver
* detects an error during the SPI transfer.
* For slave device it also allows the application to update
* transfer information by calling the qm_spi_irq_update function.
*
* @param[in] data The callback user data.
* @param[in] error 0 on success.
@ -282,9 +208,6 @@ typedef struct {
void *rx; /**< Read data. */
uint16_t tx_len; /**< Number of data frames to write. */
uint16_t rx_len; /**< Number of data frames to read. */
#if HAS_QSPI
qm_spi_quad_config_t qspi_cfg; /* QSPI transfer parameters */
#endif /* HAS_QSPI */
} qm_spi_transfer_t;
/**
@ -387,7 +310,35 @@ int qm_spi_transfer(const qm_spi_t spi, const qm_spi_transfer_t *const xfer,
* @retval Negative @ref errno for possible error codes.
*/
int qm_spi_irq_transfer(const qm_spi_t spi,
const qm_spi_async_transfer_t *const xfer);
volatile const qm_spi_async_transfer_t *const xfer);
/**
* Update parameters of Interrupt based transfer on SPI.
*
* Allow the application to transmit and/or receive more data over the current
* SPI communication.
* The application is supposed to call this function only inside the registered
* callback, once notified from the driver.
* It is strongly recommended to use this function for slave-based applications
* only, as slave controllers usually do not know how many frames an external
* master will send or request before starting the communication.
* Master controllers should not use this function as it will most likely
* corrupt the transaction.
*
* @param[in] spi Which SPI to transfer to / from.
* @param[in] xfer Transfer structure includes write / read buffers, length,
* user callback function and the callback context data.
* The structure must not be NULL and must be kept valid until
* the transfer is complete.
* @param[in] update Specify if only RX has to be updated, or only TX or both.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_spi_irq_update(const qm_spi_t spi,
volatile const qm_spi_async_transfer_t *const xfer,
const qm_spi_update_t update);
/**
* Configure a DMA channel with a specific transfer direction.
@ -488,45 +439,6 @@ int qm_spi_irq_transfer_terminate(const qm_spi_t spi);
*/
int qm_spi_dma_transfer_terminate(const qm_spi_t spi);
#if HAS_QSPI
/**
* Configure a QSPI enabled controller for use in XIP mode.
* Execute-In-Place (XIP) mode allows the processor to access
* external flash memory, via QSPI interface, as if it were
* memory-mapped.
* While in XIP mode, standard SPI register interface will be disabled.
* The user needs to call qm_spi_exit_xip_mode to resume normal SPI operation.
*
* @note 'inst_length' member of qm_spi_quad_config_t parameter is not
* needed for this function as XIP transfers do not require an
* instruction phase.
*
* @param[in] spi SPI controller identifier
* @param[in] wait_cycles No of wait cycles for QSPI transfer
* @param[in] addr_length Length of address for QSPI transfers
* @param[in] trans_type QSPI transfer type
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_spi_enter_xip_mode(const qm_spi_t spi,
const qm_spi_quad_config_t qspi_cfg);
/**
* Clear xip_mode flag and allow for normal operation
* of the SPI controller.
*
* @param[in] spi SPI controller identifier
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_spi_exit_xip_mode(const qm_spi_t spi);
#endif /* HAS_QSPI */
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save SPI context.
*
@ -557,7 +469,6 @@ int qm_spi_save_context(const qm_spi_t spi, qm_spi_context_t *const ctx);
*/
int qm_spi_restore_context(const qm_spi_t spi,
const qm_spi_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -300,7 +300,6 @@ int qm_ss_adc_convert(const qm_ss_adc_t adc, qm_ss_adc_xfer_t *const xfer,
*/
int qm_ss_adc_irq_convert(const qm_ss_adc_t adc, qm_ss_adc_xfer_t *const xfer);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save SS ADC context.
*
@ -338,7 +337,6 @@ int qm_ss_adc_save_context(const qm_ss_adc_t adc,
*/
int qm_ss_adc_restore_context(const qm_ss_adc_t adc,
const qm_ss_adc_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -178,7 +178,6 @@ int qm_ss_gpio_read_port(const qm_ss_gpio_t gpio, uint32_t *const port);
*/
int qm_ss_gpio_write_port(const qm_ss_gpio_t gpio, const uint32_t val);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save SS GPIO context.
*
@ -210,7 +209,6 @@ int qm_ss_gpio_save_context(const qm_ss_gpio_t gpio,
*/
int qm_ss_gpio_restore_context(const qm_ss_gpio_t gpio,
const qm_ss_gpio_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -266,7 +266,6 @@ int qm_ss_i2c_master_irq_transfer(const qm_ss_i2c_t i2c,
*/
int qm_ss_i2c_irq_transfer_terminate(const qm_ss_i2c_t i2c);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save SS I2C context.
*
@ -300,7 +299,6 @@ int qm_ss_i2c_save_context(const qm_ss_i2c_t i2c,
*/
int qm_ss_i2c_restore_context(const qm_ss_i2c_t i2c,
const qm_ss_i2c_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -62,7 +62,7 @@ QM_ISR_DECLARE(qm_ss_adc_0_error_isr);
* ISR for SS ADC 0 calibration interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_SS_IRQ_ADC_0_CAL_INT, qm_ss_adc_0_cal_isr);
* @code QM_IRQ_REQUEST(QM_SS_IRQ_ADC_0_CAL_INT, qm_ss_adc_0_cal_isr);
* @endcode if IRQ based calibration is used.
*/
QM_ISR_DECLARE(qm_ss_adc_0_cal_isr);
@ -71,7 +71,7 @@ QM_ISR_DECLARE(qm_ss_adc_0_cal_isr);
* ISR for SS ADC 0 mode change interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_SS_IRQ_ADC_0_PWR_INT, qm_ss_adc_0_pwr_isr);
* @code QM_IRQ_REQUEST(QM_SS_IRQ_ADC_0_PWR_INT, qm_ss_adc_0_pwr_isr);
* @endcode if IRQ based mode change is used.
*/
QM_ISR_DECLARE(qm_ss_adc_0_pwr_isr);
@ -98,25 +98,70 @@ QM_ISR_DECLARE(qm_ss_gpio_1_isr);
* ISR for I2C 0 error interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_ERROR_INT, qm_ss_i2c_0_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_RX_AVAIL_INT, qm_ss_i2c_0_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_TX_REQ_INT, qm_ss_i2c_0_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_STOP_DET_INT, qm_ss_i2c_0_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_ERROR_INT, qm_ss_i2c_0_error_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_i2c_0_isr);
QM_ISR_DECLARE(qm_ss_i2c_0_error_isr);
/**
* ISR for I2C 0 RX data available interrupt.
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_RX_AVAIL_INT,
* qm_ss_i2c_0_rx_avail_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_i2c_0_rx_avail_isr);
/**
* ISR for I2C 0 TX data requested interrupt.
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_TX_REQ_INT, qm_ss_i2c_0_tx_req_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_i2c_0_tx_req_isr);
/**
* ISR for I2C 0 STOP detected interrupt.
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_STOP_DET_INT,
* qm_ss_i2c_0_stop_det_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_i2c_0_stop_det_isr);
/**
* ISR for I2C 1 error interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_ERROR_INT, qm_ss_i2c_1_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_RX_AVAIL_INT, qm_ss_i2c_1_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_TX_REQ_INT, qm_ss_i2c_1_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_STOP_DET_INT, qm_ss_i2c_1_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_ERROR_INT, qm_ss_i2c_1_error_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_i2c_1_isr);
QM_ISR_DECLARE(qm_ss_i2c_1_error_isr);
/**
* ISR for I2C 1 RX data available interrupt.
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_RX_AVAIL_INT,
* qm_ss_i2c_1_rx_avail_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_i2c_1_rx_avail_isr);
/**
* ISR for I2C 1 TX data requested interrupt.
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_TX_REQ_INT, qm_ss_i2c_1_tx_req_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_i2c_1_tx_req_isr);
/**
* ISR for I2C 1 STOP detected interrupt.
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_STOP_DET_INT,
* qm_ss_i2c_1_stop_det_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_i2c_1_stop_det_isr);
/**
* ISR for SPI 0 error interrupt.

View file

@ -32,9 +32,6 @@
#include "qm_common.h"
#include "qm_sensor_regs.h"
#if HAS_SS_DMA
#include "qm_ss_dma.h"
#endif
/**
* SPI peripheral driver for Sensor Subsystem.
*
@ -313,58 +310,6 @@ int qm_ss_spi_transfer(const qm_ss_spi_t spi,
int qm_ss_spi_irq_transfer(const qm_ss_spi_t spi,
const qm_ss_spi_async_transfer_t *const xfer);
#if HAS_SS_DMA
/**
* Configure a DMA channel with a specific transfer direction.
*
* The user is responsible for managing the allocation of the pool of DMA
* channels provided by each DMA core to the different peripheral drivers
* that require them.
*
* Note that a SPI controller cannot use different DMA cores to manage
* transfers in different directions.
*
* This function configures DMA channel parameters that are unlikely to change
* between transfers, like transaction width, burst size, and handshake
* interface parameters. The user will likely only call this function once for
* the lifetime of an application unless the channel needs to be repurposed.
*
* Note that qm_dma_init() must first be called before configuring a channel.
*
* @param[in] spi SPI controller identifier.
* @param[in] dma_channel_direction DMA channel direction, either
* QM_DMA_MEMORY_TO_PERIPHERAL (TX transfer) or QM_DMA_PERIPHERAL_TO_MEMORY
* (RX transfer).
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
/* TODO - DR AP[2756] Revisit when DMA is added the sensor SPI QMSI 1.1 */
int qm_ss_spi_dma_channel_config(
const qm_ss_spi_t spi,
const qm_ss_dma_destination_target_type_t dma_channel_direction);
/**
* Perform a DMA based transfer on the SPI bus. The function will
* replenish/empty TX/RX FIFOs on DMA empty/full interrupts.
*
* @brief DMA based transfer on SPI.
*
* @param [in] spi SPI controller id
* @param [in] xfer Transfer structure includes write / read data
* and length; write, read and error callback
* functions and a callback identifier.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes
*/
/* TODO - DR AP[2756] Revisit when DMA is added the sensor SPI QMSI 1.1 */
int qm_ss_spi_dma_transfer(const qm_ss_spi_t spi,
const qm_ss_spi_async_transfer_t *const xfer);
#endif /* HAS_SS_QMSI_DMA */
/**
* Terminate SPI IRQ transfer.
*
@ -378,27 +323,8 @@ int qm_ss_spi_dma_transfer(const qm_ss_spi_t spi,
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_spi_transfer_terminate(const qm_ss_spi_t spi);
int qm_ss_spi_irq_transfer_terminate(const qm_ss_spi_t spi);
#if HAS_SS_QMSI_DMA
/**
* Terminate SPI DMA transfer.
*
* Terminate the current DMA SPI transfer.
* This function will trigger complete callbacks even
* if the transfer is not completed.
*
* @param[in] spi SPI module identifier.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
/* TODO - DR AP[2756] Revisit when DMA is added the sensor SPI QMSI 1.1 */
int qm_ss_spi_dma_transfer_terminate(const qm_ss_spi_t spi);
#endif /* HAS_SS_QMSI_DMA */
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save SS SPI context.
*
@ -430,7 +356,6 @@ int qm_ss_spi_save_context(const qm_ss_spi_t spi,
*/
int qm_ss_spi_restore_context(const qm_ss_spi_t spi,
const qm_ss_spi_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -113,7 +113,6 @@ int qm_ss_timer_set(const qm_ss_timer_t timer, const uint32_t count);
*/
int qm_ss_timer_get(const qm_ss_timer_t timer, uint32_t *const count);
#if (ENABLE_RESTORE_CONTEXT)
/*
* Save SS TIMER context.
*
@ -145,7 +144,6 @@ int qm_ss_timer_save_context(const qm_ss_timer_t timer,
*/
int qm_ss_timer_restore_context(const qm_ss_timer_t timer,
const qm_ss_timer_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -71,30 +71,6 @@ typedef enum {
QM_UART_LC_8O2 = 0x0f /**< 8 data bits, odd parity, 2 stop bits. */
} qm_uart_lc_t;
#if HAS_ADVANCED_UART_CONFIGURATION
/**
* UART Transmit Water Mark
* Empty trigger level in the transmit FIFO.
*/
typedef enum {
QM_UART_TX_WM_EMPTY = 0, /* FIFO empty */
QM_UART_TX_WM_TWOCHAR, /* 2 characters in the FIFO */
QM_UART_TX_WM_QUARTER, /* FIFO 1/4 full */
QM_UART_TX_WM_HALF, /* FIFO 1/2 full */
} qm_uart_tx_water_mark_t;
/**
* UART Receive Water Mark
* Trigger level in the receiver FIFO.
*/
typedef enum {
QM_UART_RX_WM_ONEBYTE = 0, /* 1 character in the FIFO */
QM_UART_RX_WM_QUARTER, /* FIFO 1/4 full */
QM_UART_RX_WM_HALF, /* FIFO 1/2 full */
QM_UART_RX_WM_TWOLESS, /* FIFO 2 less than full */
} qm_uart_rx_water_mark_t;
#endif /* HAS_ADVANCED_UART_CONFIGURATION */
/**
* UART Status type.
*/
@ -117,10 +93,6 @@ typedef struct {
qm_uart_lc_t line_control; /**< Line control (enum). */
uint32_t baud_divisor; /**< Baud Divisor. */
bool hw_fc; /**< Hardware Automatic Flow Control. */
#if HAS_ADVANCED_UART_CONFIGURATION
qm_uart_tx_water_mark_t tx_water_mark; /* UART Tx FIFO Water Mark */
qm_uart_rx_water_mark_t rx_water_mark; /* UART Rx FIFO Water Mark */
#endif /* HAS_ADVANCED_UART_CONFIGURATION */
} qm_uart_config_t;
/**
@ -424,7 +396,6 @@ int qm_uart_dma_write_terminate(const qm_uart_t uart);
*/
int qm_uart_dma_read_terminate(const qm_uart_t uart);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save UART context.
*
@ -459,7 +430,6 @@ int qm_uart_save_context(const qm_uart_t uart, qm_uart_context_t *const ctx);
*/
int qm_uart_restore_context(const qm_uart_t uart,
const qm_uart_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -136,7 +136,6 @@ int qm_wdt_set_config(const qm_wdt_t wdt, const qm_wdt_config_t *const cfg);
*/
int qm_wdt_reload(const qm_wdt_t wdt);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save watchdog context.
*
@ -165,7 +164,6 @@ int qm_wdt_save_context(const qm_wdt_t wdt, qm_wdt_context_t *const ctx);
*/
int qm_wdt_restore_context(const qm_wdt_t wdt,
const qm_wdt_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/**
* @}

View file

@ -64,9 +64,19 @@ typedef enum {
SOCW_EVENT_SLEEP = 2, /**< Sleep mode entered. */
SOCW_EVENT_REGISTER = 3, /**< SOC register altered. */
SOCW_EVENT_APP = 4, /**< Application-defined event. */
SOCW_EVENT_MAX = 5 /**< End of events sentinel. */
SOCW_EVENT_FREQ = 5, /**< Frequency altered. */
SOCW_EVENT_MAX = 6 /**< End of events sentinel. */
} soc_watch_event_t;
/*
* Power profiling events for ARC Sensor states.
*
* Internally socwatch process the SS1 and SS2 as Halt
* Sleep events encoding respectively.
*/
#define SOCW_ARC_EVENT_SS1 SOCW_EVENT_HALT
#define SOCW_ARC_EVENT_SS2 SOCW_EVENT_SLEEP
/**
* Register ID enumeration.
*
@ -156,6 +166,13 @@ void soc_watch_log_event(soc_watch_event_t event_id, uintptr_t ev_data);
void soc_watch_log_app_event(soc_watch_event_t event_id, uint8_t ev_subtype,
uintptr_t ev_data);
/**
* Trigger a buffer flush via watchpoint.
* This allows applications layered on top of QMSI to trigger the transfer of
* profiler information to the host whenever it requires.
*/
void soc_watch_trigger_flush();
/**
* @}
*/

View file

@ -75,6 +75,7 @@ extern intr_gate_desc_t __idt_start[];
static __inline__ void idt_set_intr_gate_desc(uint32_t vector, uint32_t isr)
{
intr_gate_desc_t *desc;
idtr_t idtr;
desc = __idt_start + vector;
@ -88,6 +89,13 @@ static __inline__ void idt_set_intr_gate_desc(uint32_t vector, uint32_t isr)
p: 1
*/
desc->isr_high = (isr >> 16) & 0xFFFF;
/* The following reloads the IDTR register. If a lookaside buffer is
* being used this will invalidate it. This is required as it's possible
* for an application to change the registered ISR. */
idtr.limit = IDT_SIZE - 1;
idtr.base = (uint32_t)__idt_start;
__asm__ __volatile__("lidt %0\n\t" ::"m"(idtr));
}
/*

View file

@ -48,10 +48,6 @@ static void ss_register_irq(unsigned int vector);
#error "Unsupported / unspecified processor detected."
#endif
/* Event router base addr for LMT interrupt routing, for linear IRQ mapping */
#define INTERRUPT_ROUTER_LMT_INT_MASK_BASE \
(&QM_INTERRUPT_ROUTER->i2c_master_0_int_mask)
/* x86 CPU FLAGS.IF register field (Interrupt enable Flag, bit 9), indicating
* whether or not CPU interrupts are enabled.
*/
@ -206,15 +202,18 @@ int qm_irq_save_context(qm_irq_context_t *const ctx)
QM_CHECK(ctx != NULL, -EINVAL);
/* Start from i=1, skip reset vector. */
for (i = 1; i < QM_SS_INT_VECTOR_NUM; i++) {
__builtin_arc_sr(i, QM_SS_AUX_IRQ_SELECT);
ctx->irq_config[i - 1] =
__builtin_arc_lr(QM_SS_AUX_IRQ_PRIORITY) << 2;
ctx->irq_config[i - 1] |=
__builtin_arc_lr(QM_SS_AUX_IRQ_TRIGGER) << 1;
ctx->irq_config[i - 1] |=
__builtin_arc_lr(QM_SS_AUX_IRQ_ENABLE);
/* Interrupts from 0 to 15 are exceptions and they are ignored
* by IRQ auxiliary registers. For that reason we skip those
* values in this loop.
*/
for (i = 0; i < (QM_SS_INT_VECTOR_NUM - QM_SS_EXCEPTION_NUM); i++) {
__builtin_arc_sr(i + QM_SS_EXCEPTION_NUM, QM_SS_AUX_IRQ_SELECT);
ctx->irq_config[i] = __builtin_arc_lr(QM_SS_AUX_IRQ_PRIORITY)
<< 2;
ctx->irq_config[i] |= __builtin_arc_lr(QM_SS_AUX_IRQ_TRIGGER)
<< 1;
ctx->irq_config[i] |= __builtin_arc_lr(QM_SS_AUX_IRQ_ENABLE);
}
status32 = __builtin_arc_lr(QM_SS_AUX_STATUS32);
@ -233,14 +232,14 @@ int qm_irq_restore_context(const qm_irq_context_t *const ctx)
QM_CHECK(ctx != NULL, -EINVAL);
/* Start from i=1, skip reset vector. */
for (i = 1; i < QM_SS_INT_VECTOR_NUM; i++) {
__builtin_arc_sr(i, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(ctx->irq_config[i - 1] >> 2,
for (i = 0; i < (QM_SS_INT_VECTOR_NUM - QM_SS_EXCEPTION_NUM); i++) {
__builtin_arc_sr(i + QM_SS_EXCEPTION_NUM, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(ctx->irq_config[i] >> 2,
QM_SS_AUX_IRQ_PRIORITY);
__builtin_arc_sr((ctx->irq_config[i - 1] >> 1) & BIT(0),
__builtin_arc_sr((ctx->irq_config[i] >> 1) & BIT(0),
QM_SS_AUX_IRQ_TRIGGER);
__builtin_arc_sr(ctx->irq_config[i - 1] & BIT(0),
__builtin_arc_sr(ctx->irq_config[i] & BIT(0),
QM_SS_AUX_IRQ_ENABLE);
}
@ -257,56 +256,36 @@ int qm_irq_restore_context(const qm_irq_context_t *const ctx)
return 0;
}
#endif /* QM_SENSOR */
#else /* !ENABLE_RESTORE_CONTEXT */
int qm_irq_save_context(qm_irq_context_t *const ctx)
{
(void)ctx;
return 0;
}
int qm_irq_restore_context(const qm_irq_context_t *const ctx)
{
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */
void _qm_irq_setup(uint32_t irq, uint16_t register_offset)
void _qm_irq_setup(uint32_t irq)
{
uint32_t *event_router_intmask;
#if (HAS_APIC)
/*
* Quark SE SOC has an APIC. Other SoCs uses a simple, fixed-vector
* non-8259 PIC that requires no configuration.
*/
ioapic_register_irq(irq, QM_IRQ_TO_VECTOR(irq));
#elif(HAS_MVIC)
mvic_register_irq(irq);
#elif(QM_SENSOR)
ss_register_irq(QM_IRQ_TO_VECTOR(irq));
#endif
/* Route peripheral interrupt to Lakemont/Sensor Subsystem */
event_router_intmask =
(uint32_t *)INTERRUPT_ROUTER_LMT_INT_MASK_BASE + register_offset;
/* On Quark D2000 and Quark SE the register for the analog comparator
* host mask has a different bit field than the other host mask
* registers. */
if (QM_IRQ_COMPARATOR_0_INT_MASK_OFFSET == register_offset) {
*event_router_intmask &= ~0x0007ffff;
#if !defined(QUARK_D2000)
} else if (QM_IRQ_MAILBOX_0_INT_MASK_OFFSET == register_offset) {
/* Masking MAILBOX irq id done inside mbox driver */
#endif
/*
* DMA error mask uses 1 bit per DMA channel rather than the
* generic host mask.
*/
} else if (QM_IRQ_DMA_0_ERROR_INT_MASK_OFFSET == register_offset) {
#if (QM_SENSOR)
*event_router_intmask &= ~QM_IR_DMA_ERROR_SS_MASK;
#else
*event_router_intmask &= ~QM_IR_DMA_ERROR_HOST_MASK;
#endif
} else {
QM_IR_UNMASK_INTERRUPTS(*event_router_intmask);
}
#if (HAS_APIC)
ioapic_unmask_irq(irq);
#elif(HAS_MVIC)
mvic_register_irq(irq);
mvic_unmask_irq(irq);
#elif(QM_SENSOR)
ss_register_irq(QM_IRQ_TO_VECTOR(irq));
qm_ss_irq_unmask(QM_IRQ_TO_VECTOR(irq));
#endif
}
@ -316,7 +295,7 @@ void _qm_irq_setup(uint32_t irq, uint16_t register_offset)
*
* @param[in] vector Interrupt Vector number.
* @param[in] isr ISR to register to given vector. Must be a valid x86 ISR.
* If this can't be provided, qm_irq_request() or
* If this can't be provided, QM_IRQ_REQUEST() or
* qm_int_vector_request() should be used instead.
*/
void _qm_register_isr(uint32_t vector, qm_isr_t isr)

View file

@ -81,7 +81,6 @@ void qm_ss_int_vector_request(uint32_t vector, qm_ss_isr_t isr)
void qm_ss_irq_request(uint32_t irq, qm_ss_isr_t isr)
{
uint32_t *scss_intmask;
uint32_t vector = irq + (QM_SS_EXCEPTION_NUM + QM_SS_INT_TIMER_NUM);
/* Guarding the IRQ set-up */
@ -89,9 +88,5 @@ void qm_ss_irq_request(uint32_t irq, qm_ss_isr_t isr)
qm_ss_int_vector_request(vector, isr);
/* Route peripheral interrupt to Sensor Subsystem */
scss_intmask = (uint32_t *)INTERRUPT_ROUTER_SS_INT_MASK_BASE + irq;
QM_IR_UNMASK_SS_INTERRUPTS(*scss_intmask);
qm_ss_irq_unmask(vector);
}

View file

@ -30,6 +30,7 @@
#include "qm_common.h"
#include "qm_mailbox.h"
#include "qm_interrupt.h"
#include "qm_interrupt_router.h"
/**
* The Active core can be either Lakemont or the Sensor Sub-System.
@ -39,28 +40,36 @@
* Core specific mailbox #defines are grouped here to prevent
* duplication below.
*/
#if (HAS_MAILBOX_LAKEMONT_DEST)
#ifdef QM_LAKEMONT
#if HAS_MAILBOX_LAKEMONT_DEST
#if QM_LAKEMONT
#define ACTIVE_CORE_DEST QM_MBOX_TO_LMT
#define MBOX_INT_MASK QM_MBOX_LMT_INT_MASK
#define MBOX_INT_LOCK_MASK(N) QM_MBOX_LMT_INT_LOCK_MASK(N)
#define MBOX_INT_LOCK_HALT_MASK(N) QM_MBOX_LMT_INT_LOCK_HALT_MASK(N)
#define MBOX_ENABLE_INT_MASK(N) QM_MBOX_ENABLE_LMT_INT_MASK(N)
#define MBOX_DISABLE_INT_MASK(N) QM_MBOX_DISABLE_LMT_INT_MASK(N)
#define MBOX_ACTIVE_CORE_ALL_INT_MASK QM_IR_MBOX_LMT_ALL_INT_MASK
#define MBOX_INT_LOCK_MASK(N) QM_IR_MBOX_LMT_INT_LOCK_MASK(N)
#define MBOX_INT_LOCK_HALT_MASK(N) QM_IR_MBOX_LMT_INT_LOCK_HALT_MASK(N)
#define MBOX_IS_INT_MASK_EN(N) QM_IR_MBOX_IS_LMT_INT_MASK_EN(N)
#define MBOX_ENABLE_INT_MASK(N) QM_IR_MBOX_ENABLE_LMT_INT_MASK(N)
#define MBOX_DISABLE_INT_MASK(N) QM_IR_MBOX_DISABLE_LMT_INT_MASK(N)
#endif /* QM_LAKEMONT */
#endif /* HAS_MAILBOX_LAKEMONT_DEST */
#if (HAS_MAILBOX_SENSOR_SUB_SYSTEM_DEST)
#ifdef QM_SENSOR
#if HAS_MAILBOX_SENSOR_SUB_SYSTEM_DEST
#if QM_SENSOR
#define ACTIVE_CORE_DEST QM_MBOX_TO_SS
#define MBOX_INT_MASK QM_MBOX_SS_INT_MASK
#define MBOX_INT_LOCK_MASK(N) QM_MBOX_SS_INT_LOCK_HALT_MASK(N)
#define MBOX_INT_LOCK_HALT_MASK(N) QM_MBOX_SS_INT_LOCK_MASK(N)
#define MBOX_ENABLE_INT_MASK(N) QM_MBOX_ENABLE_SS_INT_MASK(N)
#define MBOX_DISABLE_INT_MASK(N) QM_MBOX_DISABLE_SS_INT_MASK(N)
#define MBOX_ACTIVE_CORE_ALL_INT_MASK QM_IR_MBOX_SS_ALL_INT_MASK
#define MBOX_INT_LOCK_MASK(N) QM_IR_MBOX_SS_INT_LOCK_HALT_MASK(N)
#define MBOX_INT_LOCK_HALT_MASK(N) QM_IR_MBOX_SS_INT_LOCK_MASK(N)
#define MBOX_IS_INT_MASK_EN(N) QM_IR_MBOX_IS_SS_INT_MASK_EN(N)
#define MBOX_ENABLE_INT_MASK(N) QM_IR_MBOX_ENABLE_SS_INT_MASK(N)
#define MBOX_DISABLE_INT_MASK(N) QM_IR_MBOX_DISABLE_SS_INT_MASK(N)
#endif /* QM_SENSOR */
#endif /* HAS_MAILBOX_SENSOR_SUB_SYSTEM_DEST */
#define MBOX_CHECK_DESTINATION(_dest) (ACTIVE_CORE_DEST == (_dest))
#define MBOX_CHECK_POLLING_MODE(_mode) (QM_MBOX_POLLING_MODE == (_mode))
static void mailbox_isr_handler(void);
/**
* Private data structure maintained by the driver
*/
@ -77,17 +86,22 @@ typedef struct {
} qm_mailbox_info_t;
/* Mailbox channels private data structures */
static qm_mailbox_info_t mailbox_devs[QM_MBOX_CH_NUM];
static qm_mailbox_info_t mailbox_devs[NUM_MAILBOXES];
QM_ISR_DECLARE(qm_mailbox_0_isr)
{
mailbox_isr_handler();
QM_ISR_EOI(QM_IRQ_MAILBOX_0_INT_VECTOR);
}
static void mailbox_isr_handler(void)
{
qm_mailbox_t *const mbox_reg = (qm_mailbox_t *)QM_MAILBOX;
uint8_t i = 0;
uint8_t mask;
uint16_t chall_sts = QM_MAILBOX->mbox_chall_sts;
mask = MBOX_INT_MASK;
mask = MBOX_ACTIVE_CORE_ALL_INT_MASK;
for (i = 0; chall_sts; i++, chall_sts >>= 2) {
if ((chall_sts & QM_MBOX_CH_STS_CTRL_INT) == 0) {
continue;
@ -105,20 +119,18 @@ QM_ISR_DECLARE(qm_mailbox_0_isr)
mbox_reg[i].ch_sts = QM_MBOX_CH_STS_CTRL_INT;
}
}
QM_ISR_EOI(QM_IRQ_MAILBOX_0_INT_VECTOR);
}
int qm_mbox_ch_set_config(const qm_mbox_ch_t mbox_ch,
const qm_mbox_config_t *const config)
{
QM_CHECK((QM_MBOX_CH_0 <= mbox_ch) && (mbox_ch < QM_MBOX_CH_NUM),
QM_CHECK((QM_MBOX_CH_0 <= mbox_ch) && (mbox_ch < NUM_MAILBOXES),
-EINVAL);
qm_mailbox_info_t *device = &mailbox_devs[mbox_ch];
/* Block interrupts while configuring MBOX */
qm_irq_mask(QM_IRQ_MAILBOX_0_INT);
QM_IR_MASK_INT(QM_IRQ_MAILBOX_0_INT);
/* Store the device destination */
device->dest = config->dest;
@ -143,6 +155,10 @@ int qm_mbox_ch_set_config(const qm_mbox_ch_t mbox_ch,
/* Note: Routing is done now, cannot be done in
* irq_request! */
MBOX_ENABLE_INT_MASK(mbox_ch);
} else {
/* The lock is set, but we need to check if the
* interrupt is routed */
QM_CHECK(MBOX_IS_INT_MASK_EN(mbox_ch), -EIO);
}
} else {
device->mode = QM_MBOX_POLLING_MODE;
@ -153,6 +169,7 @@ int qm_mbox_ch_set_config(const qm_mbox_ch_t mbox_ch,
* irq_request! */
MBOX_DISABLE_INT_MASK(mbox_ch);
}
device->callback = NULL;
device->callback_data = 0;
}
@ -165,19 +182,20 @@ int qm_mbox_ch_set_config(const qm_mbox_ch_t mbox_ch,
}
/* Set the mailbox channel to its default configuration. */
device->dest = QM_MBOX_UNUSED;
device->mode = QM_MBOX_INTERRUPT_MODE;
device->callback = NULL;
device->callback_data = 0;
}
/* UnBlock MBOX interrupts. */
qm_irq_unmask(QM_IRQ_MAILBOX_0_INT);
QM_IR_UNMASK_INT(QM_IRQ_MAILBOX_0_INT);
return 0;
}
int qm_mbox_ch_write(const qm_mbox_ch_t mbox_ch, const qm_mbox_msg_t *const msg)
{
QM_CHECK((QM_MBOX_CH_0 <= mbox_ch) && (mbox_ch < QM_MBOX_CH_NUM),
QM_CHECK((QM_MBOX_CH_0 <= mbox_ch) && (mbox_ch < NUM_MAILBOXES),
-EINVAL);
QM_CHECK(NULL != msg, -EINVAL);
qm_mailbox_t *const mbox_reg = (qm_mailbox_t *)QM_MAILBOX + mbox_ch;
@ -204,7 +222,7 @@ int qm_mbox_ch_write(const qm_mbox_ch_t mbox_ch, const qm_mbox_msg_t *const msg)
int qm_mbox_ch_read(const qm_mbox_ch_t mbox_ch, qm_mbox_msg_t *const msg)
{
QM_CHECK((QM_MBOX_CH_0 <= mbox_ch) && (mbox_ch < QM_MBOX_CH_NUM),
QM_CHECK((QM_MBOX_CH_0 <= mbox_ch) && (mbox_ch < NUM_MAILBOXES),
-EINVAL);
QM_CHECK(NULL != msg, -EINVAL);
@ -212,9 +230,8 @@ int qm_mbox_ch_read(const qm_mbox_ch_t mbox_ch, qm_mbox_msg_t *const msg)
uint32_t status = 0;
qm_mailbox_t *mbox_reg = &QM_MAILBOX->mbox[mbox_ch];
qm_mailbox_info_t *device = &mailbox_devs[mbox_ch];
if (ACTIVE_CORE_DEST == device->dest) {
if (MBOX_CHECK_DESTINATION(mailbox_devs[mbox_ch].dest)) {
status = mbox_reg->ch_sts;
/* If there is data pending consume it */
@ -227,7 +244,8 @@ int qm_mbox_ch_read(const qm_mbox_ch_t mbox_ch, qm_mbox_msg_t *const msg)
msg->data[2] = mbox_reg->ch_data[2];
msg->data[3] = mbox_reg->ch_data[3];
if (QM_MBOX_POLLING_MODE == device->mode) {
if (MBOX_CHECK_POLLING_MODE(
mailbox_devs[mbox_ch].mode)) {
/* In polling mode the interrupt status still
* needs to be cleared since we are not using
* the ISR. Note we write 1 to clear the bit.
@ -255,33 +273,11 @@ int qm_mbox_ch_read(const qm_mbox_ch_t mbox_ch, qm_mbox_msg_t *const msg)
int qm_mbox_ch_get_status(const qm_mbox_ch_t mbox_ch,
qm_mbox_ch_status_t *const status)
{
QM_CHECK((QM_MBOX_CH_0 <= mbox_ch) && (mbox_ch < QM_MBOX_CH_NUM),
QM_CHECK((QM_MBOX_CH_0 <= mbox_ch) && (mbox_ch < NUM_MAILBOXES),
-EINVAL);
QM_CHECK(NULL != status, -EINVAL);
uint32_t mbox_status = QM_MAILBOX->mbox[mbox_ch].ch_sts;
qm_mbox_ch_status_t rc = QM_MBOX_CH_IDLE;
*status = QM_MAILBOX->mbox[mbox_ch].ch_sts;
/* Check if the mailbox is in polling mode or not */
if (QM_MBOX_POLLING_MODE == mailbox_devs[mbox_ch].mode) {
/* Polling Mode
* Check if there is pending data to be consumed
*/
if (QM_MBOX_CH_STS & mbox_status) {
rc = QM_MBOX_CH_POLLING_DATA_PEND;
}
} else {
/* Interrupt Mode
* Check if there is a pending interrupt & data,
* otherwise check if there is pending data to be consumed
*/
if (QM_MBOX_STATUS_MASK == mbox_status) {
rc = QM_MBOX_CH_INT_NACK_DATA_PEND;
} else if (QM_MBOX_CH_STS == mbox_status) {
rc = QM_MBOX_CH_INT_ACK_DATA_PEND;
}
}
*status = rc;
return 0;
}

View file

@ -29,6 +29,7 @@
#include "qm_mpr.h"
#include "qm_interrupt.h"
#include "qm_interrupt_router.h"
#define ADDRESS_MASK_7_BIT (0x7F)
@ -120,7 +121,7 @@ int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
callback_data = cb_data;
/* unmask interrupt */
qm_irq_unmask(QM_IRQ_SRAM_MPR_0_INT);
QM_IR_UNMASK_INT(QM_IRQ_SRAM_MPR_0_INT);
QM_IR_MASK_HALTS(QM_INTERRUPT_ROUTER->sram_mpr_0_int_mask);
}
@ -128,7 +129,7 @@ int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
/* probe or reset mode */
else {
/* mask interrupt */
qm_irq_mask(QM_IRQ_SRAM_MPR_0_INT);
QM_IR_MASK_INT(QM_IRQ_SRAM_MPR_0_INT);
QM_IR_UNMASK_HALTS(QM_INTERRUPT_ROUTER->sram_mpr_0_int_mask);
@ -180,4 +181,18 @@ int qm_mpr_restore_context(const qm_mpr_context_t *const ctx)
return 0;
}
#else
int qm_mpr_save_context(qm_mpr_context_t *const ctx)
{
(void)ctx;
return 0;
}
int qm_mpr_restore_context(const qm_mpr_context_t *const ctx)
{
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -29,29 +29,89 @@
#include "qm_pwm.h"
static void (*callback[QM_PWM_NUM])(void *data, uint32_t int_status);
/* Store callback information for each PWM channel. */
static void (*callback[QM_PWM_NUM][QM_PWM_ID_NUM])(void *data,
uint32_t int_status);
static void *callback_data[QM_PWM_NUM][QM_PWM_ID_NUM];
static void *callback_data[QM_PWM_NUM];
#if (NUM_PWM_CONTROLLER_INTERRUPTS > 1)
const uint32_t pwm_isr_vectors[QM_PWM_ID_NUM] = {
QM_IRQ_PWM_0_INT_0_PRIORITY, QM_IRQ_PWM_0_INT_1_PRIORITY,
QM_IRQ_PWM_0_INT_2_PRIORITY, QM_IRQ_PWM_0_INT_3_PRIORITY};
#endif /* NUM_PWM_CONTROLLER_INTERRUPTS > 1 */
QM_ISR_DECLARE(qm_pwm_0_isr)
#ifndef UNIT_TEST
qm_pwm_reg_t *qm_pwm[QM_PWM_NUM] = {(qm_pwm_reg_t *)(QM_PWM_BASE)};
#endif /* UNIT_TEST */
#if (NUM_PWM_CONTROLLER_INTERRUPTS > 1)
/*
* If there is more than one interrupt line for PWM, use a common handler
* and only clear the corresponding interrupt IRQ.
*/
/* Interrupt service routine handler for PWM channels. */
static void pwm_isr_handler(const qm_pwm_t pwm, const qm_pwm_id_t id)
{
/* Which timers fired. */
uint32_t int_status = QM_PWM[QM_PWM_0].timersintstatus;
/* Clear timers interrupt flag. */
QM_PWM[QM_PWM_0].timerseoi;
qm_pwm_reg_t *const controller = QM_PWM[pwm];
/* Check for callback function. */
if (callback[pwm][id]) {
(callback[pwm][id])(callback_data[pwm][id], BIT(id));
}
if (callback[QM_PWM_0]) {
(*callback[QM_PWM_0])(callback_data[QM_PWM_0], int_status);
/* Clear interrupt on read. */
controller->timer[id].eoi;
QM_ISR_EOI(pwm_isr_vectors[id]);
}
QM_ISR_DECLARE(qm_pwm_0_isr_0)
{
pwm_isr_handler(QM_PWM_0, QM_PWM_ID_0);
}
QM_ISR_DECLARE(qm_pwm_0_isr_1)
{
pwm_isr_handler(QM_PWM_0, QM_PWM_ID_1);
}
QM_ISR_DECLARE(qm_pwm_0_isr_2)
{
pwm_isr_handler(QM_PWM_0, QM_PWM_ID_2);
}
QM_ISR_DECLARE(qm_pwm_0_isr_3)
{
pwm_isr_handler(QM_PWM_0, QM_PWM_ID_3);
}
#else /* NUM_PWM_CONTROLLER_INTERRUPTS > 1 */
QM_ISR_DECLARE(qm_pwm_0_isr_0)
{
qm_pwm_reg_t *const controller = QM_PWM[QM_PWM_0];
uint32_t int_status = controller->timersintstatus;
uint8_t pwm_id = 0;
for (; pwm_id < QM_PWM_ID_NUM; pwm_id++) {
if (int_status & BIT(pwm_id)) {
if (callback[QM_PWM_0][pwm_id]) {
(*callback[QM_PWM_0][pwm_id])(
callback_data[QM_PWM_0][pwm_id],
BIT(pwm_id));
controller->timer[pwm_id].eoi;
}
}
}
QM_ISR_EOI(QM_IRQ_PWM_0_INT_VECTOR);
}
#endif /* NUM_PWM_CONTROLLER_INTERRUPTS > 1 */
int qm_pwm_start(const qm_pwm_t pwm, const qm_pwm_id_t id)
{
QM_CHECK(pwm < QM_PWM_NUM, -EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, -EINVAL);
QM_PWM[pwm].timer[id].controlreg |= PWM_START;
qm_pwm_reg_t *const controller = QM_PWM[pwm];
controller->timer[id].controlreg |= PWM_START;
return 0;
}
@ -61,7 +121,9 @@ int qm_pwm_stop(const qm_pwm_t pwm, const qm_pwm_id_t id)
QM_CHECK(pwm < QM_PWM_NUM, -EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, -EINVAL);
QM_PWM[pwm].timer[id].controlreg &= ~PWM_START;
qm_pwm_reg_t *const controller = QM_PWM[pwm];
controller->timer[id].controlreg &= ~PWM_START;
return 0;
}
@ -78,14 +140,15 @@ int qm_pwm_set_config(const qm_pwm_t pwm, const qm_pwm_id_t id,
QM_CHECK(cfg->mode == QM_PWM_MODE_PWM ? 0 != cfg->hi_count : 1,
-EINVAL);
QM_PWM[pwm].timer[id].loadcount = cfg->lo_count - 1;
QM_PWM[pwm].timer[id].controlreg =
qm_pwm_reg_t *const controller = QM_PWM[pwm];
controller->timer[id].loadcount = cfg->lo_count - 1;
controller->timer[id].controlreg =
(cfg->mode | (cfg->mask_interrupt << QM_PWM_INTERRUPT_MASK_OFFSET));
QM_PWM[pwm].timer_loadcount2[id] = cfg->hi_count - 1;
controller->timer_loadcount2[id] = cfg->hi_count - 1;
/* Assign user callback function. */
callback[pwm] = cfg->callback;
callback_data[pwm] = cfg->callback_data;
callback[pwm][id] = cfg->callback;
callback_data[pwm][id] = cfg->callback_data;
return 0;
}
@ -97,14 +160,15 @@ int qm_pwm_set(const qm_pwm_t pwm, const qm_pwm_id_t id,
QM_CHECK(id < QM_PWM_ID_NUM, -EINVAL);
QM_CHECK(0 < lo_count, -EINVAL);
/* If mode is PWM, hi_count must be > 0, otherwise don't care. */
QM_CHECK(((QM_PWM[pwm].timer[id].controlreg & QM_PWM_CONF_MODE_MASK) ==
QM_CHECK(((QM_PWM[pwm]->timer[id].controlreg & QM_PWM_CONF_MODE_MASK) ==
QM_PWM_MODE_PWM
? 0 < hi_count
: 1),
-EINVAL);
QM_PWM[pwm].timer[id].loadcount = lo_count - 1;
QM_PWM[pwm].timer_loadcount2[id] = hi_count - 1;
qm_pwm_reg_t *const controller = QM_PWM[pwm];
controller->timer[id].loadcount = lo_count - 1;
controller->timer_loadcount2[id] = hi_count - 1;
return 0;
}
@ -117,8 +181,9 @@ int qm_pwm_get(const qm_pwm_t pwm, const qm_pwm_id_t id,
QM_CHECK(lo_count != NULL, -EINVAL);
QM_CHECK(hi_count != NULL, -EINVAL);
*lo_count = QM_PWM[pwm].timer[id].loadcount;
*hi_count = QM_PWM[pwm].timer_loadcount2[id];
qm_pwm_reg_t *const controller = QM_PWM[pwm];
*lo_count = controller->timer[id].loadcount;
*hi_count = controller->timer_loadcount2[id];
return 0;
}
@ -129,7 +194,7 @@ int qm_pwm_save_context(const qm_pwm_t pwm, qm_pwm_context_t *const ctx)
QM_CHECK(pwm < QM_PWM_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_pwm_reg_t *const controller = &QM_PWM[pwm];
qm_pwm_reg_t *const controller = QM_PWM[pwm];
uint8_t i;
for (i = 0; i < QM_PWM_ID_NUM; i++) {
@ -147,7 +212,7 @@ int qm_pwm_restore_context(const qm_pwm_t pwm,
QM_CHECK(pwm < QM_PWM_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_pwm_reg_t *const controller = &QM_PWM[pwm];
qm_pwm_reg_t *const controller = QM_PWM[pwm];
uint8_t i;
for (i = 0; i < QM_PWM_ID_NUM; i++) {
@ -158,4 +223,21 @@ int qm_pwm_restore_context(const qm_pwm_t pwm,
return 0;
}
#else
int qm_pwm_save_context(const qm_pwm_t pwm, qm_pwm_context_t *const ctx)
{
(void)pwm;
(void)ctx;
return 0;
}
int qm_pwm_restore_context(const qm_pwm_t pwm,
const qm_pwm_context_t *const ctx)
{
(void)pwm;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -29,65 +29,163 @@
#include "qm_rtc.h"
#include "clk.h"
#if (HAS_SOC_CONTEXT_RETENTION)
#include "power_states.h"
#endif /* HAS_SOC_CONTEXT_RETENTION */
static void (*callback[QM_RTC_NUM])(void *data);
static void *callback_data[QM_RTC_NUM];
#ifndef UNIT_TEST
qm_rtc_reg_t *qm_rtc[QM_RTC_NUM] = {(qm_rtc_reg_t *)(QM_RTC_BASE)};
#endif /* UNIT_TEST */
QM_ISR_DECLARE(qm_rtc_0_isr)
{
/* Disable RTC interrupt */
QM_RTC[QM_RTC_0].rtc_ccr &= ~QM_RTC_CCR_INTERRUPT_ENABLE;
QM_RTC[QM_RTC_0]->rtc_ccr &= ~QM_RTC_CCR_INTERRUPT_ENABLE;
#if (HAS_SOC_CONTEXT_RETENTION)
if (QM_SCSS_GP->gps0 & QM_GPS0_POWER_STATES_MASK) {
power_soc_restore();
qm_power_soc_restore();
}
#endif
#endif /* HAS_SOC_CONTEXT_RETENTION */
if (callback[QM_RTC_0]) {
(callback[QM_RTC_0])(callback_data[QM_RTC_0]);
}
/* clear interrupt */
QM_RTC[QM_RTC_0].rtc_eoi;
/* Clear interrupt. */
QM_RTC[QM_RTC_0]->rtc_eoi;
QM_ISR_EOI(QM_IRQ_RTC_0_INT_VECTOR);
}
int qm_rtc_set_config(const qm_rtc_t rtc, const qm_rtc_config_t *const cfg)
{
QM_CHECK(rtc < QM_RTC_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
QM_CHECK(rtc >= QM_RTC_0, -EINVAL);
QM_CHECK(NULL != cfg, -EINVAL);
/* set rtc divider */
clk_rtc_set_div(cfg->prescaler);
/* Disable the RTC before re-configuration. */
QM_RTC[rtc]->rtc_ccr &= ~QM_RTC_CCR_ENABLE;
QM_RTC[rtc].rtc_clr = cfg->init_val;
QM_RTC[rtc]->rtc_clr = cfg->init_val;
/* clear any pending interrupts */
QM_RTC[rtc].rtc_eoi;
/* Clear any pending interrupts. */
QM_RTC[rtc]->rtc_eoi;
callback[rtc] = cfg->callback;
callback_data[rtc] = cfg->callback_data;
/* Perform if the IP used has the prescaler feature. */
#if (HAS_RTC_PRESCALER)
/* With the RTC prescaler, the minimum value that can be set is 2. */
if (QM_RTC_MIN_PRESCALER <= cfg->prescaler) {
/* Enable RTC prescaler in CCR. */
QM_RTC[rtc]->rtc_ccr |= QM_RTC_PRESCLR_ENABLE;
QM_RTC[rtc]->rtc_cpsr = BIT(cfg->prescaler);
} else {
/* Disable RTC prescaler in CCR. */
QM_RTC[rtc]->rtc_ccr &= ~QM_RTC_PRESCLR_ENABLE;
}
#else /* HAS_RTC_PRESCALER */
clk_rtc_set_div(cfg->prescaler); /* Set RTC divider. */
#endif /* HAS_RTC_PRESCALER */
if (cfg->alarm_en) {
callback[rtc] = cfg->callback;
callback_data[rtc] = cfg->callback_data;
qm_rtc_set_alarm(rtc, cfg->alarm_val);
} else {
/* Disable RTC interrupt */
QM_RTC[rtc].rtc_ccr &= ~QM_RTC_CCR_INTERRUPT_ENABLE;
callback[rtc] = NULL;
callback_data[rtc] = NULL;
/* Disable RTC interrupt. */
QM_RTC[rtc]->rtc_ccr &= ~QM_RTC_CCR_INTERRUPT_ENABLE;
/* Internally mask the RTC interrupt. */
QM_RTC[rtc]->rtc_ccr |= QM_RTC_CCR_INTERRUPT_MASK;
}
/* Enable the RTC upon completion. */
QM_RTC[rtc]->rtc_ccr |= QM_RTC_CCR_ENABLE;
return 0;
}
int qm_rtc_set_alarm(const qm_rtc_t rtc, const uint32_t alarm_val)
{
QM_CHECK(rtc < QM_RTC_NUM, -EINVAL);
QM_CHECK(rtc >= QM_RTC_0, -EINVAL);
/* Enable RTC interrupt */
QM_RTC[rtc].rtc_ccr |= QM_RTC_CCR_INTERRUPT_ENABLE;
/* Enable RTC interrupt. */
QM_RTC[rtc]->rtc_ccr |= QM_RTC_CCR_INTERRUPT_ENABLE;
/* Internally unmask the RTC interrupt. */
QM_RTC[rtc]->rtc_ccr &= ~QM_RTC_CCR_INTERRUPT_MASK;
/* set alarm val */
QM_RTC[rtc].rtc_cmr = alarm_val;
/* Set alarm value. */
QM_RTC[rtc]->rtc_cmr = alarm_val;
return 0;
}
int qm_rtc_read(const qm_rtc_t rtc, uint32_t *const value)
{
QM_CHECK(rtc < QM_RTC_NUM, -EINVAL);
QM_CHECK(rtc >= QM_RTC_0, -EINVAL);
QM_CHECK(NULL != value, -EINVAL);
*value = QM_RTC[rtc]->rtc_ccvr;
return 0;
}
#if (ENABLE_RESTORE_CONTEXT)
int qm_rtc_save_context(const qm_rtc_t rtc, qm_rtc_context_t *const ctx)
{
QM_CHECK(rtc < QM_RTC_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
(void)rtc;
(void)ctx;
return 0;
}
int qm_rtc_restore_context(const qm_rtc_t rtc,
const qm_rtc_context_t *const ctx)
{
uint32_t int_rtc_mask;
QM_CHECK(rtc < QM_RTC_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
(void)rtc;
(void)ctx;
/* The interrupt router registers are sticky and retain their
* values across warm resets, so we don't need to save them.
* But for wake capable peripherals, if their interrupts are
* configured to be edge sensitive, the wake event will be lost
* by the time the interrupt controller is reconfigured, while
* the interrupt is still pending. By masking and unmasking again
* the corresponding routing register, the interrupt is forwarded
* to the core and the ISR will be serviced as expected.
*/
int_rtc_mask = QM_INTERRUPT_ROUTER->rtc_0_int_mask;
QM_INTERRUPT_ROUTER->rtc_0_int_mask = 0xFFFFFFFF;
QM_INTERRUPT_ROUTER->rtc_0_int_mask = int_rtc_mask;
return 0;
}
#else
int qm_rtc_save_context(const qm_rtc_t rtc, qm_rtc_context_t *const ctx)
{
(void)rtc;
(void)ctx;
return 0;
}
int qm_rtc_restore_context(const qm_rtc_t rtc,
const qm_rtc_context_t *const ctx)
{
(void)rtc;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -31,13 +31,27 @@
* SoC Watch - QMSI power profiler
*/
#if (SOC_WATCH_ENABLE) && (!QM_SENSOR)
#if (SOC_WATCH_ENABLE)
#include <x86intrin.h>
/*
* Header files common to LMT and ARC Sensor.
*/
#include "soc_watch.h"
#include "qm_common.h"
#include "qm_soc_regs.h"
#include "soc_watch.h"
/*
* Header files and macro defines specific to LMT and ARC Sensor.
*/
#if (!QM_SENSOR)
#include <x86intrin.h>
/* 64bit Timestamp counter. */
#define get_ticks() _rdtsc()
#else
#include "qm_sensor_regs.h"
/* Timestamp counter for sensor subsystem is 32bit. */
#define get_ticks() __builtin_arc_lr(QM_SS_TSC_BASE + QM_SS_TIMER_COUNT)
#endif
/*
* Define a macro for exposing some functions and other definitions
* only when unit testing. If we're not unit testing, then declare
@ -73,6 +87,7 @@ NONUTSTATIC const char *ev_strs[] = {
"STtL", /* Sleep event */
"RT1R", /* Register read event: Timestamp, reg enum, reg value*/
"UTs4", /* User event: timestamp, subtype, data value. */
"FTf", /* Frequency change event */
};
/*
@ -153,6 +168,12 @@ void mlog_byte(uint8_t byte)
#define MLOG_BYTE(b)
#endif /* !MLOG_ENABLE */
/*
* Defines for frequency related platform registers.
*/
#define SW_OSC0_CFG1 (0)
#define SW_SYS_CLK_CTL (2)
/*
* CONFIGURABLE: Set this to control the number of bytes of RAM you
* want to dedicate to event buffering. The larger the buffer,
@ -230,13 +251,52 @@ static void eb_write_char(uint8_t data)
/* Store a word in the event buffer. */
static void eb_write_uint32(uint32_t *data)
{
uint32_t *uip = (uint32_t *)&soc_watch_event_buffer
.event_data[soc_watch_event_buffer.eb_idx];
*uip = *data;
uint32_t dst_data = *data;
uint8_t byte_count = 0;
SOC_WATCH_TRACE("I%d:0x%x\n", soc_watch_event_buffer.eb_idx, *data);
soc_watch_event_buffer.eb_idx += sizeof(uint32_t);
while (byte_count < sizeof(uint32_t)) {
soc_watch_event_buffer
.event_data[soc_watch_event_buffer.eb_idx++] =
((dst_data & 0xFF));
dst_data = dst_data >> 8;
byte_count++;
}
}
/*
* soc_watch is duplicating the implementation of qm_irq_lock/unlock APIs
* for both sensor and x86. This is required to remove the dependency on
* qm_interrupt.h
* Reason: soc_watch driver is common to both zephyr and QMSI based applications
* and zephyr
* has it's own interrupt handling implementation.
* Both the functions are added as static inline and hence no side-effects.
*/
#if (QM_SENSOR)
static inline unsigned int soc_watch_irq_lock(void)
{
unsigned int key = 0;
/*
* Store the ARC STATUS32 register fields relating to interrupts into
* the variable `key' and disable interrupt delivery to the core.
*/
__asm__ __volatile__("clri %0" : "=r"(key));
return key;
}
static inline void soc_watch_irq_unlock(unsigned int key)
{
/*
* Restore the ARC STATUS32 register fields relating to interrupts based
* on the variable `key' populated by qm_irq_lock().
*/
__asm__ __volatile__("seti %0" : : "ir"(key));
}
#else /* x86 */
/* x86 CPU FLAGS.IF register field (Interrupt enable Flag, bit 9), indicating
* whether or not CPU interrupts are enabled.
*/
@ -283,7 +343,8 @@ static inline void soc_watch_irq_unlock(unsigned int key)
*/
if (!(key & X86_FLAGS_IF)) {
/*
* Interrupts were disabled when soc_watch_irq_lock() was invoked:
* Interrupts were disabled when soc_watch_irq_lock() was
* invoked:
* do not re-enable interrupts.
*/
return;
@ -292,6 +353,7 @@ static inline void soc_watch_irq_unlock(unsigned int key)
/* Enable interrupts */
__asm__ __volatile__("sti;\n\t" : :);
}
#endif
/* Log an event with one parameter. */
void soc_watch_log_event(soc_watch_event_t event_id, uintptr_t ev_data)
@ -308,10 +370,14 @@ void soc_watch_log_app_event(soc_watch_event_t event_id, uint8_t ev_subtype,
uintptr_t ev_data)
{
static uint8_t record_rtc = 0;
const uint32_t *rtc_ctr = (uint32_t *)&QM_RTC->rtc_ccvr;
const uint32_t *rtc_ctr = (uint32_t *)&QM_RTC[QM_RTC_0]->rtc_ccvr;
const char *cp;
unsigned int irq_flag = 0;
uint64_t tsc = __builtin_ia32_rdtsc(); /* Grab hi-res timestamp */
#if (!QM_SENSOR)
uint64_t tsc = 0; /* hi-res timestamp */
#else
uint32_t tsc = 0;
#endif
uint32_t rtc_val = *rtc_ctr;
#define AVG_EVENT_SIZE 8 /* Size of a typical message in bytes. */
@ -355,6 +421,7 @@ void soc_watch_log_app_event(soc_watch_event_t event_id, uint8_t ev_subtype,
while (*++cp) {
switch (*cp) {
case 'T':
tsc = get_ticks();
eb_write_uint32((uint32_t *)(&tsc)); /* Hi-res
Timestamp */
break;
@ -381,6 +448,12 @@ void soc_watch_log_app_event(soc_watch_event_t event_id, uint8_t ev_subtype,
/* Event subtype */
eb_write_char(((uint32_t)ev_subtype) & 0xff);
break;
case 'f':
eb_write_uint32(
(uint32_t *)platform_regs[SW_OSC0_CFG1]);
eb_write_uint32(
(uint32_t *)platform_regs[SW_SYS_CLK_CTL]);
break;
default:
SOC_WATCH_TRACE(
"Unknown string char: 0x%x on string "
@ -408,4 +481,16 @@ void soc_watch_log_app_event(soc_watch_event_t event_id, uint8_t ev_subtype,
MLOG(']');
}
/*
* Trigger the Watchpoint to flush the data.
* Application can use this API to trigger the transfer of
* profiler information to the host whenever it requires.
* The static function soc_watch_event_buffer_flush() is also used internally
* when the soc_watch_buffer_full flag is set and is not exposed to the
* application.
*/
void soc_watch_trigger_flush()
{
soc_watch_event_buffer_flush();
}
#endif /* !(defined(SOC_WATCH) && (!QM_SENSOR)) */

View file

@ -30,8 +30,10 @@
#include "qm_spi.h"
/* SPI FIFO size defaults */
#define SPI_DEFAULT_TX_THRESHOLD (0x05)
#define SPI_DEFAULT_RX_THRESHOLD (0x05)
#define SPI_MST_DEFAULT_TX_THRESHOLD (0x05)
#define SPI_MST_DEFAULT_RX_THRESHOLD (0x05)
#define SPI_SLV_DEFAULT_TX_THRESHOLD (0x04)
#define SPI_SLV_DEFAULT_RX_THRESHOLD (0x03)
#define SPI_FIFOS_DEPTH (8)
/* SPI DMA transmit watermark level. When the number of valid data entries in
@ -87,18 +89,21 @@ typedef struct {
#ifndef UNIT_TEST
#if (QUARK_SE)
qm_spi_reg_t *qm_spi_controllers[QM_SPI_NUM] = {
(qm_spi_reg_t *)QM_SPI_MST_0_BASE, (qm_spi_reg_t *)QM_SPI_MST_1_BASE};
(qm_spi_reg_t *)QM_SPI_MST_0_BASE, (qm_spi_reg_t *)QM_SPI_MST_1_BASE,
(qm_spi_reg_t *)QM_SPI_SLV_BASE};
#elif(QUARK_D2000)
qm_spi_reg_t *qm_spi_controllers[QM_SPI_NUM] = {
(qm_spi_reg_t *)QM_SPI_MST_0_BASE};
(qm_spi_reg_t *)QM_SPI_MST_0_BASE, (qm_spi_reg_t *)QM_SPI_SLV_BASE};
#endif
#endif
static const qm_spi_async_transfer_t *spi_async_transfer[QM_SPI_NUM];
static volatile uint16_t tx_counter[QM_SPI_NUM], rx_counter[QM_SPI_NUM];
static const volatile qm_spi_async_transfer_t *spi_async_transfer[QM_SPI_NUM];
static volatile uint16_t tx_counter[QM_SPI_NUM];
static volatile uint16_t rx_counter[QM_SPI_NUM];
static uint8_t dfs[QM_SPI_NUM];
static const uint32_t tx_dummy_frame = 0;
static qm_spi_tmode_t tmode[QM_SPI_NUM];
static qm_spi_frame_format_t frf[QM_SPI_NUM];
/* DMA (memory to SPI controller) callback information. */
static dma_context_t dma_context_tx[QM_SPI_NUM];
/* DMA (SPI controller to memory) callback information. */
@ -146,16 +151,14 @@ static void wait_for_controller(const qm_spi_reg_t *const controller)
;
}
/**
* Service an RX FIFO Full interrupt
*
* @brief Interrupt based transfer on SPI.
* @param [in] spi Which SPI to transfer from.
/*
* Service a RX FIFO Full interrupt on master side.
*/
static __inline__ void handle_rx_interrupt(const qm_spi_t spi)
static __inline__ void handle_mst_rx_interrupt(const qm_spi_t spi)
{
qm_spi_reg_t *const controller = QM_SPI[spi];
const qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi];
const volatile qm_spi_async_transfer_t *const transfer =
spi_async_transfer[spi];
/* Jump to the right position of RX buffer.
* If no bytes were received before, we start from the beginning,
@ -172,14 +175,18 @@ static __inline__ void handle_rx_interrupt(const qm_spi_t spi)
* requested.
*/
if (transfer->rx_len == rx_counter[spi]) {
controller->imr &=
~(QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM |
QM_SPI_IMR_RXFIM);
if (transfer->callback &&
tmode[spi] == QM_SPI_TMOD_RX) {
transfer->callback(transfer->callback_data, 0,
QM_SPI_IDLE,
transfer->rx_len);
if (tmode[spi] == QM_SPI_TMOD_RX) {
controller->imr = QM_SPI_IMR_MASK_ALL;
controller->ssienr = 0;
if (transfer->callback) {
transfer->callback(
transfer->callback_data, 0,
QM_SPI_IDLE, transfer->rx_len);
}
} else {
controller->imr &=
~(QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM |
QM_SPI_IMR_RXFIM);
}
break;
}
@ -195,15 +202,16 @@ static __inline__ void handle_rx_interrupt(const qm_spi_t spi)
}
/**
* Service an Tx FIFO Empty interrupt
* Service a TX FIFO Empty interrupt on master side.
*
* @brief Interrupt based transfer on SPI.
* @param [in] spi Which SPI to transfer to.
*/
static __inline__ void handle_tx_interrupt(const qm_spi_t spi)
static __inline__ void handle_mst_tx_interrupt(const qm_spi_t spi)
{
qm_spi_reg_t *const controller = QM_SPI[spi];
const qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi];
const volatile qm_spi_async_transfer_t *const transfer =
spi_async_transfer[spi];
/* Jump to the right position of TX buffer.
* If no bytes were transmitted before, we start from the beginning,
@ -227,27 +235,149 @@ static __inline__ void handle_tx_interrupt(const qm_spi_t spi)
}
}
static void handle_spi_interrupt(const qm_spi_t spi)
/*
* Service a RX FIFO Full interrupt on slave side.
*/
static __inline__ void handle_slv_rx_interrupt(const qm_spi_t spi)
{
qm_spi_reg_t *const controller = QM_SPI[spi];
const qm_spi_async_transfer_t *transfer = spi_async_transfer[spi];
const volatile qm_spi_async_transfer_t *const transfer =
spi_async_transfer[spi];
uint8_t *rx_buffer = transfer->rx + (rx_counter[spi] * dfs[spi]);
uint16_t len = transfer->rx_len;
int32_t frames_left = 0;
while (controller->rxflr) {
if (rx_counter[spi] < len) {
read_frame(spi, rx_buffer);
rx_buffer += dfs[spi];
rx_counter[spi]++;
if (rx_counter[spi] == len) {
/* Application notification. */
if (transfer->callback) {
/*
* Application can now read received
* data. In order to receive more
* data, the application needs to
* call the update function.
*/
transfer->callback(
transfer->callback_data, 0,
QM_SPI_RX_FULL, len);
/*
* RX counter is zero if the application
* has called the update function.
*/
if (!rx_counter[spi]) {
/*
* Update transfer information.
*/
rx_buffer = transfer->rx;
len = transfer->rx_len;
} else {
break;
}
}
}
} else {
break;
}
}
frames_left = len - rx_counter[spi];
if (frames_left > 0 && (uint32_t)frames_left <= controller->rxftlr) {
controller->rxftlr = frames_left - 1;
}
}
/*
* Service a TX FIFO Empty interrupt on slave side.
*/
static __inline__ void handle_slv_tx_interrupt(const qm_spi_t spi)
{
qm_spi_reg_t *const controller = QM_SPI[spi];
const volatile qm_spi_async_transfer_t *const transfer =
spi_async_transfer[spi];
const uint8_t *tx_buffer = transfer->tx + (tx_counter[spi] * dfs[spi]);
uint16_t len = transfer->tx_len;
int entries_free =
SPI_FIFOS_DEPTH - controller->txflr - controller->rxflr - 1;
while (entries_free > 0 && tx_counter[spi] < len) {
write_frame(spi, tx_buffer);
tx_buffer += dfs[spi];
entries_free--;
tx_counter[spi]++;
if (tx_counter[spi] == len) {
/* Application notification. */
if (transfer->callback) {
/*
* In order to transmit more data, the
* application needs to call the update
* function.
*/
transfer->callback(transfer->callback_data, 0,
QM_SPI_TX_EMPTY, len);
/*
* RX counter is zero if the application
* has called the update function.
*/
if (!tx_counter[spi]) {
/* Update transfer information. */
tx_buffer = transfer->tx;
len = transfer->tx_len;
}
}
}
}
if (tx_counter[spi] >= len) {
controller->txftlr = 0;
}
}
static void handle_spi_overflow_interrupt(const qm_spi_t spi)
{
qm_spi_reg_t *const controller = QM_SPI[spi];
const volatile qm_spi_async_transfer_t *transfer =
spi_async_transfer[spi];
if (transfer->callback) {
transfer->callback(transfer->callback_data, -EIO,
QM_SPI_RX_OVERFLOW, rx_counter[spi]);
}
/* Clear RX FIFO Overflow interrupt. */
controller->rxoicr;
controller->imr = QM_SPI_IMR_MASK_ALL;
controller->ssienr = 0;
}
static void handle_spi_mst_interrupt(const qm_spi_t spi)
{
qm_spi_reg_t *const controller = QM_SPI[spi];
const volatile qm_spi_async_transfer_t *transfer =
spi_async_transfer[spi];
const uint32_t int_status = controller->isr;
QM_ASSERT((int_status & (QM_SPI_ISR_TXOIS | QM_SPI_ISR_RXUIS)) == 0);
if (int_status & QM_SPI_ISR_RXOIS) {
if (transfer->callback) {
transfer->callback(transfer->callback_data, -EIO,
QM_SPI_RX_OVERFLOW, rx_counter[spi]);
}
controller->rxoicr;
controller->imr = QM_SPI_IMR_MASK_ALL;
controller->ssienr = 0;
/* RX FIFO Overflow interrupt. */
if (int_status & QM_SPI_ISR_RXOIS) {
handle_spi_overflow_interrupt(spi);
return;
}
if (int_status & QM_SPI_ISR_RXFIS) {
handle_rx_interrupt(spi);
handle_mst_rx_interrupt(spi);
}
if (transfer->rx_len == rx_counter[spi] &&
@ -267,7 +397,51 @@ static void handle_spi_interrupt(const qm_spi_t spi)
if (int_status & QM_SPI_ISR_TXEIS &&
transfer->tx_len > tx_counter[spi]) {
handle_tx_interrupt(spi);
handle_mst_tx_interrupt(spi);
}
}
static void handle_spi_slv_interrupt(const qm_spi_t spi)
{
qm_spi_reg_t *const controller = QM_SPI[spi];
const volatile qm_spi_async_transfer_t *transfer =
spi_async_transfer[spi];
const uint32_t int_status = controller->isr;
QM_ASSERT((int_status & (QM_SPI_ISR_TXOIS | QM_SPI_ISR_RXUIS)) == 0);
if (int_status & QM_SPI_ISR_RXOIS) {
/* RX FIFO Overflow interrupt. */
handle_spi_overflow_interrupt(spi);
return;
}
if (int_status & QM_SPI_ISR_RXFIS) {
/* RX FIFO Full interrupt. */
handle_slv_rx_interrupt(spi);
}
if (transfer->rx_len == rx_counter[spi] &&
transfer->tx_len == tx_counter[spi] &&
(controller->sr & QM_SPI_SR_TFE) &&
(!(controller->sr & QM_SPI_SR_BUSY) ||
tmode[spi] == QM_SPI_TMOD_RX)) {
/* End of communication. */
if (!transfer->keep_enabled) {
controller->ssienr = 0;
}
controller->imr = QM_SPI_IMR_MASK_ALL;
/* Application notification. */
if (transfer->callback) {
transfer->callback(transfer->callback_data, 0,
QM_SPI_IDLE, 0);
}
return;
}
if (int_status & QM_SPI_ISR_TXEIS) {
/* TX FIFO Empty interrupt. */
handle_slv_tx_interrupt(spi);
}
}
@ -275,6 +449,10 @@ int qm_spi_set_config(const qm_spi_t spi, const qm_spi_config_t *cfg)
{
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK(cfg, -EINVAL);
QM_CHECK(QM_SPI_SLV_0 == spi
? cfg->transfer_mode != QM_SPI_TMOD_EEPROM_READ
: 1,
-EINVAL);
if (0 != QM_SPI[spi]->ssienr) {
return -EBUSY;
@ -282,12 +460,18 @@ int qm_spi_set_config(const qm_spi_t spi, const qm_spi_config_t *cfg)
qm_spi_reg_t *const controller = QM_SPI[spi];
/* Apply the selected cfg options */
/* Apply the selected cfg options. */
controller->ctrlr0 = (cfg->frame_size << QM_SPI_CTRLR0_DFS_32_OFFSET) |
(cfg->transfer_mode << QM_SPI_CTRLR0_TMOD_OFFSET) |
(cfg->bus_mode << QM_SPI_CTRLR0_SCPOL_SCPH_OFFSET);
controller->baudr = cfg->clk_divider;
/*
* If the device is configured as a slave, an external master will
* set the baud rate.
*/
if (QM_SPI_SLV_0 != spi) {
controller->baudr = cfg->clk_divider;
}
/* Keep the current data frame size in bytes, being:
* - 1 byte for DFS set from 4 to 8 bits;
@ -298,13 +482,14 @@ int qm_spi_set_config(const qm_spi_t spi, const qm_spi_config_t *cfg)
dfs[spi] = (cfg->frame_size / 8) + 1;
tmode[spi] = cfg->transfer_mode;
frf[spi] = cfg->frame_format;
return 0;
}
int qm_spi_slave_select(const qm_spi_t spi, const qm_spi_slave_select_t ss)
{
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK((spi < QM_SPI_NUM) && (spi != QM_SPI_SLV_0), -EINVAL);
/* Check if the device reports as busy. */
if (QM_SPI[spi]->sr & QM_SPI_SR_BUSY) {
@ -347,7 +532,9 @@ int qm_spi_transfer(const qm_spi_t spi, const qm_spi_transfer_t *const xfer,
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX ? (xfer->rx_len == 0) : 1,
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_RX ? (xfer->tx_len == 0) : 1,
QM_CHECK(((tmode[spi] == QM_SPI_TMOD_RX ? (xfer->tx_len == 0) : 1) ||
(((tmode[spi] == QM_SPI_TMOD_RX) &&
(QM_SPI_FRAME_FORMAT_STANDARD != frf[spi])))),
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_EEPROM_READ
? (xfer->tx_len && xfer->rx_len)
@ -367,12 +554,13 @@ int qm_spi_transfer(const qm_spi_t spi, const qm_spi_transfer_t *const xfer,
controller->imr = QM_SPI_IMR_MASK_ALL;
/* If we are in RX only or EEPROM Read mode, the ctrlr1 reg holds how
* many bytes the controller solicits, minus 1. */
* many bytes the controller solicits, minus 1.
*/
if (xfer->rx_len) {
controller->ctrlr1 = xfer->rx_len - 1;
}
/* Enable SPI device */
/* Enable SPI device. */
controller->ssienr = QM_SPI_SSIENR_SSIENR;
/* Transfer is only complete when all the tx data is sent and all
@ -381,8 +569,9 @@ int qm_spi_transfer(const qm_spi_t spi, const qm_spi_transfer_t *const xfer,
uint8_t *rx_buffer = xfer->rx;
const uint8_t *tx_buffer = xfer->tx;
/* RX Only transfers need a dummy byte to be sent for starting. */
if (tmode[spi] == QM_SPI_TMOD_RX) {
/* RX Only transfers need a dummy frame to be sent for starting. */
if ((tmode[spi] == QM_SPI_TMOD_RX) &&
(QM_SPI_FRAME_FORMAT_STANDARD == frf[spi])) {
tx_buffer = (uint8_t *)&tx_dummy_frame;
i_tx = 1;
}
@ -411,98 +600,187 @@ int qm_spi_transfer(const qm_spi_t spi, const qm_spi_transfer_t *const xfer,
}
wait_for_controller(controller);
controller->ssienr = 0; /* Disable SPI Device */
/* Disable SPI Device. */
controller->ssienr = 0;
return rc;
}
int qm_spi_irq_transfer(const qm_spi_t spi,
const qm_spi_async_transfer_t *const xfer)
int qm_spi_irq_update(const qm_spi_t spi,
const volatile qm_spi_async_transfer_t *const xfer,
const qm_spi_update_t update)
{
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK(xfer, -EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX ? (xfer->rx_len == 0) : 1,
-EINVAL);
QM_CHECK(((tmode[spi] == QM_SPI_TMOD_RX ? (xfer->tx_len == 0) : 1) ||
(((tmode[spi] == QM_SPI_TMOD_RX) &&
(QM_SPI_FRAME_FORMAT_STANDARD != frf[spi])))),
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_EEPROM_READ
? (xfer->tx_len && xfer->rx_len)
: 1,
-EINVAL);
QM_CHECK((spi != QM_SPI_SLV_0) ||
(tmode[spi] != QM_SPI_TMOD_EEPROM_READ),
-EINVAL);
QM_CHECK(update == QM_SPI_UPDATE_TX || update == QM_SPI_UPDATE_RX ||
update == (QM_SPI_UPDATE_TX | QM_SPI_UPDATE_RX),
-EINVAL);
/* If updating only TX, then the mode shall not be RX. */
QM_CHECK((update & QM_SPI_UPDATE_TX) ? (tmode[spi] != QM_SPI_TMOD_RX)
: 1,
-EINVAL);
/* If updating only RX, then the mode shall not be TX. */
QM_CHECK((update & QM_SPI_UPDATE_RX) ? (tmode[spi] != QM_SPI_TMOD_TX)
: 1,
-EINVAL);
qm_spi_reg_t *const controller = QM_SPI[spi];
spi_async_transfer[spi] = xfer;
if (update == QM_SPI_UPDATE_RX) {
rx_counter[spi] = 0;
/* Unmask RX interrupt sources. */
controller->imr =
QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM | QM_SPI_IMR_RXFIM;
} else if (update == QM_SPI_UPDATE_TX) {
tx_counter[spi] = 0;
/* Unmask TX interrupt sources. */
controller->imr = QM_SPI_IMR_TXEIM | QM_SPI_IMR_TXOIM;
} else {
rx_counter[spi] = 0;
tx_counter[spi] = 0;
/* Unmask both TX and RX interrupt sources. */
controller->imr = QM_SPI_IMR_TXEIM | QM_SPI_IMR_TXOIM |
QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM |
QM_SPI_IMR_RXFIM;
}
return 0;
}
int qm_spi_irq_transfer(const qm_spi_t spi,
const volatile qm_spi_async_transfer_t *const xfer)
{
qm_spi_update_t update = 0;
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK(xfer, -EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX
? (xfer->tx_len == xfer->rx_len)
: 1,
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX ? (xfer->rx_len == 0) : 1,
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_RX ? (xfer->tx_len == 0) : 1,
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_EEPROM_READ
? (xfer->tx_len && xfer->rx_len)
: 1,
-EINVAL);
qm_spi_reg_t *const controller = QM_SPI[spi];
/* If we are in RX only or EEPROM Read mode, the ctrlr1 reg holds how
* many bytes the controller solicits, minus 1. We also set the same
* into rxftlr, so the controller only triggers a RX_FIFO_FULL
* interrupt when all frames are available at the FIFO for consumption.
*/
if (xfer->rx_len) {
controller->ctrlr1 = xfer->rx_len - 1;
controller->rxftlr = (xfer->rx_len < SPI_FIFOS_DEPTH)
? xfer->rx_len - 1
: SPI_DEFAULT_RX_THRESHOLD;
if ((tmode[spi] == QM_SPI_TMOD_RX) ||
(tmode[spi] == QM_SPI_TMOD_TX_RX) ||
(tmode[spi] == QM_SPI_TMOD_EEPROM_READ)) {
update |= QM_SPI_UPDATE_RX;
}
if ((tmode[spi] == QM_SPI_TMOD_TX) ||
(tmode[spi] == QM_SPI_TMOD_TX_RX) ||
(tmode[spi] == QM_SPI_TMOD_EEPROM_READ)) {
update |= QM_SPI_UPDATE_TX;
}
controller->txftlr = SPI_DEFAULT_TX_THRESHOLD;
spi_async_transfer[spi] = xfer;
tx_counter[spi] = 0;
rx_counter[spi] = 0;
tx_counter[spi] = 0;
qm_spi_irq_update(spi, xfer, update);
/* Unmask interrupts */
if (tmode[spi] == QM_SPI_TMOD_TX) {
controller->imr = QM_SPI_IMR_TXEIM | QM_SPI_IMR_TXOIM;
} else if (tmode[spi] == QM_SPI_TMOD_RX) {
controller->imr =
QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM | QM_SPI_IMR_RXFIM;
controller->ssienr = QM_SPI_SSIENR_SSIENR;
write_frame(spi, (uint8_t *)&tx_dummy_frame);
if (QM_SPI_SLV_0 != spi) {
/*
* If we are in RX only or EEPROM Read mode, the ctrlr1 reg
* holds how many bytes the controller solicits, minus 1.
* We also set the same into rxftlr, so the controller only
* triggers a RX_FIFO_FULL interrupt when all frames are
* available at the FIFO for consumption.
*/
if (xfer->rx_len) {
controller->ctrlr1 = xfer->rx_len - 1;
controller->rxftlr = (xfer->rx_len < SPI_FIFOS_DEPTH)
? xfer->rx_len - 1
: SPI_MST_DEFAULT_RX_THRESHOLD;
}
controller->txftlr = SPI_MST_DEFAULT_TX_THRESHOLD;
} else {
controller->imr = QM_SPI_IMR_TXEIM | QM_SPI_IMR_TXOIM |
QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM |
QM_SPI_IMR_RXFIM;
if (xfer->rx_len) {
controller->rxftlr =
(xfer->rx_len < SPI_SLV_DEFAULT_RX_THRESHOLD)
? xfer->rx_len - 1
: SPI_SLV_DEFAULT_RX_THRESHOLD;
}
controller->txftlr = SPI_SLV_DEFAULT_TX_THRESHOLD;
if (QM_SPI_TMOD_RX != tmode[spi]) {
/* Enable MISO line. */
controller->ctrlr0 &= ~QM_SPI_CTRLR0_SLV_OE;
} else {
/* Disable MISO line. */
controller->ctrlr0 |= QM_SPI_CTRLR0_SLV_OE;
}
}
controller->ssienr = QM_SPI_SSIENR_SSIENR; /** Enable SPI Device */
/* Enable SPI controller. */
controller->ssienr = QM_SPI_SSIENR_SSIENR;
if ((QM_SPI_SLV_0 != spi && QM_SPI_TMOD_RX == tmode[spi]) &&
(QM_SPI_FRAME_FORMAT_STANDARD == frf[spi])) {
/*
* In RX only, master is required to send
* a dummy frame in order to start the
* communication.
*/
write_frame(spi, (uint8_t *)&tx_dummy_frame);
}
return 0;
}
QM_ISR_DECLARE(qm_spi_master_0_isr)
{
handle_spi_interrupt(QM_SPI_MST_0);
handle_spi_mst_interrupt(QM_SPI_MST_0);
QM_ISR_EOI(QM_IRQ_SPI_MASTER_0_INT_VECTOR);
}
#if (QUARK_SE)
QM_ISR_DECLARE(qm_spi_master_1_isr)
{
handle_spi_interrupt(QM_SPI_MST_1);
handle_spi_mst_interrupt(QM_SPI_MST_1);
QM_ISR_EOI(QM_IRQ_SPI_MASTER_1_INT_VECTOR);
}
#endif
QM_ISR_DECLARE(qm_spi_slave_0_isr)
{
handle_spi_slv_interrupt(QM_SPI_SLV_0);
QM_ISR_EOI(QM_IRQ_SPI_SLAVE_0_INT_VECTOR);
}
int qm_spi_irq_transfer_terminate(const qm_spi_t spi)
{
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
qm_spi_reg_t *const controller = QM_SPI[spi];
const qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi];
const volatile qm_spi_async_transfer_t *const transfer =
spi_async_transfer[spi];
/* Mask the interrupts */
/* Mask the interrupts. */
controller->imr = QM_SPI_IMR_MASK_ALL;
controller->ssienr = 0; /** Disable SPI device */
/* Read how many frames are still on TX queue. */
uint16_t tx_fifo_frames = controller->txflr;
/* Disable SPI device. */
controller->ssienr = 0;
if (transfer->callback) {
uint16_t len = 0;
if (tmode[spi] == QM_SPI_TMOD_TX ||
tmode[spi] == QM_SPI_TMOD_TX_RX) {
len = tx_counter[spi];
if (tmode[spi] == QM_SPI_TMOD_TX) {
if (tx_counter[spi] > tx_fifo_frames) {
len = tx_counter[spi] - tx_fifo_frames;
} else {
len = 0;
}
} else {
len = rx_counter[spi];
}
@ -531,12 +809,14 @@ static void spi_dma_callback(void *callback_context, uint32_t len,
volatile bool *cb_pending_alternate_p;
/* The DMA driver returns a pointer to a dma_context struct from which
* we find out the corresponding SPI device and transfer direction. */
* we find out the corresponding SPI device and transfer direction.
*/
dma_context_t *const dma_context_p = callback_context;
const qm_spi_t spi = dma_context_p->spi_id;
QM_ASSERT(spi < QM_SPI_NUM);
qm_spi_reg_t *const controller = QM_SPI[spi];
const qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi];
const volatile qm_spi_async_transfer_t *const transfer =
spi_async_transfer[spi];
QM_ASSERT(transfer);
const uint8_t frame_size = dfs[spi];
QM_ASSERT((frame_size == 1) || (frame_size == 2) || (frame_size == 4));
@ -564,11 +844,13 @@ static void spi_dma_callback(void *callback_context, uint32_t len,
if (error_code) {
/* Transfer failed, pass to client the error code returned by
* the DMA driver. */
* the DMA driver.
*/
client_error = error_code;
} else if (false == *cb_pending_alternate_p) {
/* TX transfers invoke the callback before the TX data has been
* transmitted, we need to wait here. */
* transmitted, we need to wait here.
*/
wait_for_controller(controller);
if (frames_transfered != frames_expected) {
@ -609,7 +891,8 @@ int qm_spi_dma_channel_config(
/* Every data transfer performed by the DMA core corresponds to an SPI
* data frame, the SPI uses the number of bits determined by a previous
* qm_spi_set_config call where the frame size was specified. */
* qm_spi_set_config call where the frame size was specified.
*/
switch (dfs[spi]) {
case 1:
dma_chan_cfg.source_transfer_width = QM_DMA_TRANS_WIDTH_8;
@ -642,7 +925,8 @@ int qm_spi_dma_channel_config(
#endif
/* The DMA burst length has to fit in the space remaining in the
* TX FIFO after the watermark level, DMATDLR. */
* TX FIFO after the watermark level, DMATDLR.
*/
dma_chan_cfg.source_burst_length = SPI_DMA_WRITE_BURST_LENGTH;
dma_chan_cfg.destination_burst_length =
SPI_DMA_WRITE_BURST_LENGTH;
@ -660,7 +944,8 @@ int qm_spi_dma_channel_config(
dma_chan_cfg.handshake_interface = DMA_HW_IF_SPI_MASTER_0_RX;
#endif
/* The DMA burst length has to match the value of the receive
* watermark level, DMARDLR + 1. */
* watermark level, DMARDLR + 1.
*/
dma_chan_cfg.source_burst_length = SPI_DMA_READ_BURST_LENGTH;
dma_chan_cfg.destination_burst_length =
SPI_DMA_READ_BURST_LENGTH;
@ -676,7 +961,8 @@ int qm_spi_dma_channel_config(
/* The DMA driver needs a pointer to the client callback function so
* that later we can identify to which SPI controller the DMA callback
* corresponds to as well as whether we are dealing with a TX or RX
* dma_context struct. */
* dma_context struct.
*/
QM_ASSERT(dma_context_p);
dma_chan_cfg.callback_context = dma_context_p;
@ -746,7 +1032,8 @@ int qm_spi_dma_transfer(const qm_spi_t spi,
}
/* In RX-only or EEPROM mode, the ctrlr1 register holds how
* many data frames the controller solicits, minus 1. */
* many data frames the controller solicits, minus 1.
*/
controller->ctrlr1 = xfer->rx_len - 1;
}
@ -763,7 +1050,8 @@ int qm_spi_dma_transfer(const qm_spi_t spi,
}
/* Transfer pointer kept to extract user callback address and transfer
* client id when DMA completes. */
* client id when DMA completes.
*/
spi_async_transfer[spi] = xfer;
/* Enable the SPI device. */
@ -791,7 +1079,8 @@ int qm_spi_dma_transfer(const qm_spi_t spi,
if (!xfer->tx_len) {
/* In RX-only mode we need to transfer an initial dummy
* byte. */
* byte.
*/
write_frame(spi, (uint8_t *)&tx_dummy_frame);
}
}
@ -812,10 +1101,11 @@ int qm_spi_dma_transfer(const qm_spi_t spi,
if (xfer->rx_len) {
/* If a RX transfer was previously started, we
* need to stop it - the SPI device will be
* disabled when handling the DMA callback. */
* disabled when handling the DMA callback.
*/
qm_spi_dma_transfer_terminate(spi);
} else {
/* Disable DMA setting and SPI controller. */
/* Disable DMA setting and SPI controller.*/
controller->dmacr = 0;
controller->ssienr = 0;
}
@ -888,4 +1178,21 @@ int qm_spi_restore_context(const qm_spi_t spi,
return 0;
}
#else
int qm_spi_save_context(const qm_spi_t spi, qm_spi_context_t *const ctx)
{
(void)spi;
(void)ctx;
return 0;
}
int qm_spi_restore_context(const qm_spi_t spi,
const qm_spi_context_t *const ctx)
{
(void)spi;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -29,12 +29,13 @@
#include "qm_ss_spi.h"
#define FIFO_SIZE (8) /* Maximum size of RX or TX FIFO */
#define FIFO_RX_W_MARK (6) /* Interrupt mark to read RX FIFO */
#define FIFO_TX_W_MARK (3) /* Interrupt mark to write TX FIFO */
#define BYTES_PER_FRAME(reg_data) \
(((reg_data & QM_SS_SPI_CTRL_DFS_MASK) >> 3) + 1)
((((reg_data & QM_SS_SPI_CTRL_DFS_MASK) >> QM_SS_SPI_CTRL_DFS_OFFS) >> \
3) + \
1)
static uint32_t base[QM_SS_SPI_NUM] = {QM_SS_SPI_0_BASE, QM_SS_SPI_1_BASE};
@ -50,9 +51,9 @@ static void spi_disable(const qm_ss_spi_t spi)
/* Disable SPI device */
QM_SS_REG_AUX_NAND(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN);
/* MASK all interrupts. */
__builtin_arc_sr(0, base[spi] + QM_SS_SPI_INTR_MASK);
QM_SS_SPI_INTERRUPT_MASK_WRITE(0, base[spi]);
/* Clear all interrupts */
__builtin_arc_sr(QM_SS_SPI_INTR_ALL, base[spi] + QM_SS_SPI_CLR_INTR);
QM_SS_SPI_INTERRUPT_CLEAR_WRITE(QM_SS_SPI_INTR_ALL, base[spi]);
}
static __inline__ void fifo_write(const qm_ss_spi_t spi, const void *data,
@ -62,8 +63,10 @@ static __inline__ void fifo_write(const qm_ss_spi_t spi, const void *data,
if (size == 1) {
dr = *(uint8_t *)data;
} else {
} else if (size == 2) {
dr = *(uint16_t *)data;
} else {
dr = *(uint32_t *)data;
}
dr |= QM_SS_SPI_DR_W_MASK;
@ -73,11 +76,13 @@ static __inline__ void fifo_write(const qm_ss_spi_t spi, const void *data,
static __inline__ void fifo_read(const qm_ss_spi_t spi, void *data,
uint8_t size)
{
__builtin_arc_sr(QM_SS_SPI_DR_R_MASK, base[spi] + QM_SS_SPI_DR);
QM_SS_SPI_DUMMY_WRITE(base[spi]);
if (size == 1) {
*(uint8_t *)data = __builtin_arc_lr(base[spi] + QM_SS_SPI_DR);
} else {
} else if (size == 2) {
*(uint16_t *)data = __builtin_arc_lr(base[spi] + QM_SS_SPI_DR);
} else {
*(uint32_t *)data = __builtin_arc_lr(base[spi] + QM_SS_SPI_DR);
}
}
@ -87,20 +92,28 @@ int qm_ss_spi_set_config(const qm_ss_spi_t spi,
{
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
QM_CHECK(cfg, -EINVAL);
uint32_t ctrl = 0;
/* Configuration can be changed only when SPI is disabled */
if (0 != (__builtin_arc_lr(base[spi] + QM_SS_SPI_SPIEN) &
QM_SS_SPI_SPIEN_EN)) {
return -EBUSY;
}
uint32_t ctrl = __builtin_arc_lr(QM_SS_SPI_0_BASE + QM_SS_SPI_CTRL);
ctrl &= QM_SS_SPI_CTRL_CLK_ENA;
/* Enable clock to peripheral to allow register writes */
QM_SS_SPI_ENABLE_REG_WRITES(base[spi]);
ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
ctrl &= ~(QM_SS_SPI_CTRL_DFS_MASK | QM_SS_SPI_CTRL_TMOD_MASK |
QM_SS_SPI_CTRL_BMOD_MASK);
ctrl |= cfg->frame_size << QM_SS_SPI_CTRL_DFS_OFFS;
ctrl |= cfg->transfer_mode << QM_SS_SPI_CTRL_TMOD_OFFS;
ctrl |= cfg->bus_mode << QM_SS_SPI_CTRL_BMOD_OFFS;
__builtin_arc_sr(ctrl, base[spi] + QM_SS_SPI_CTRL);
__builtin_arc_sr(cfg->clk_divider, base[spi] + QM_SS_SPI_TIMING);
QM_SS_SPI_CTRL_WRITE(ctrl, base[spi]);
QM_SS_SPI_BAUD_RATE_WRITE(cfg->clk_divider, base[spi]);
return 0;
}
@ -115,10 +128,7 @@ int qm_ss_spi_slave_select(const qm_ss_spi_t spi,
return -EBUSY;
}
uint32_t spien = __builtin_arc_lr(base[spi] + QM_SS_SPI_SPIEN);
spien &= ~QM_SS_SPI_SPIEN_SER_MASK;
spien |= (ss << QM_SS_SPI_SPIEN_SER_OFFS);
__builtin_arc_sr(spien, base[spi] + QM_SS_SPI_SPIEN);
QM_SS_SPI_SER_WRITE(ss, base[spi]);
return 0;
}
@ -145,7 +155,7 @@ int qm_ss_spi_transfer(const qm_ss_spi_t spi,
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
QM_CHECK(xfer, -EINVAL);
uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL);
uint32_t ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
QM_SS_SPI_CTRL_TMOD_OFFS);
@ -163,16 +173,16 @@ int qm_ss_spi_transfer(const qm_ss_spi_t spi,
uint8_t *rx_buffer = xfer->rx;
uint8_t *tx_buffer = xfer->tx;
int ret = 0;
uint32_t sr = 0;
/* Calculate number of bytes per frame */
uint8_t bytes = BYTES_PER_FRAME(ctrl);
/* Disable all SPI interrupts */
__builtin_arc_sr(0, base[spi] + QM_SS_SPI_INTR_MASK);
QM_SS_SPI_INTERRUPT_MASK_WRITE(0, base[spi]);
/* Set NDF (Number of Data Frames) in RX or EEPROM Read mode. (-1) */
if (tmode == QM_SS_SPI_TMOD_RX || tmode == QM_SS_SPI_TMOD_EEPROM_READ) {
ctrl &= ~QM_SS_SPI_CTRL_NDF_MASK;
ctrl |= ((xfer->rx_len - 1) << QM_SS_SPI_CTRL_NDF_OFFS) &
QM_SS_SPI_CTRL_NDF_MASK;
__builtin_arc_sr(ctrl, base[spi] + QM_SS_SPI_CTRL);
QM_SS_SPI_NDF_WRITE((xfer->rx_len - 1), base[spi]);
}
/* RX only transfers need a dummy frame to be sent. */
@ -181,15 +191,13 @@ int qm_ss_spi_transfer(const qm_ss_spi_t spi,
tx_cnt = 1;
}
/* Calculate number of bytes per frame (1 or 2)*/
uint8_t bytes = BYTES_PER_FRAME(ctrl);
/* Enable SPI device */
QM_SS_REG_AUX_OR(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN);
while (tx_cnt || rx_cnt) {
uint32_t sr = __builtin_arc_lr(base[spi] + QM_SS_SPI_SR);
sr = __builtin_arc_lr(base[spi] + QM_SS_SPI_SR);
/* Break and report error if RX FIFO has overflown */
if (__builtin_arc_lr(base[spi] + QM_SS_SPI_INTR_STAT) &
if (QM_SS_SPI_INTERRUPT_STATUS_READ(base[spi]) &
QM_SS_SPI_INTR_RXOI) {
ret = -EIO;
if (status) {
@ -227,7 +235,7 @@ int qm_ss_spi_irq_transfer(const qm_ss_spi_t spi,
QM_CHECK(xfer, -EINVAL);
/* Load and save initial control register */
uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL);
uint32_t ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
QM_SS_SPI_CTRL_TMOD_OFFS);
uint8_t bytes = BYTES_PER_FRAME(ctrl);
@ -236,27 +244,29 @@ int qm_ss_spi_irq_transfer(const qm_ss_spi_t spi,
: 1,
-EINVAL);
uint32_t rftlr = 0;
uint32_t tftlr = 0;
spi_async_transfer[spi] = xfer;
tx_c[spi] = xfer->tx_len;
rx_c[spi] = xfer->rx_len;
/* Set NDF (Number of Data Frames) in RX or EEPROM Read mode. (-1) */
if (tmode == QM_SS_SPI_TMOD_RX || tmode == QM_SS_SPI_TMOD_EEPROM_READ) {
ctrl &= ~QM_SS_SPI_CTRL_NDF_MASK;
ctrl |= ((xfer->rx_len - 1) << QM_SS_SPI_CTRL_NDF_OFFS) &
QM_SS_SPI_CTRL_NDF_MASK;
__builtin_arc_sr(ctrl, base[spi] + QM_SS_SPI_CTRL);
QM_SS_SPI_NDF_WRITE((xfer->rx_len - 1), base[spi]);
}
uint32_t ftlr =
rftlr =
(((FIFO_RX_W_MARK < xfer->rx_len ? FIFO_RX_W_MARK : xfer->rx_len) -
1)
<< QM_SS_SPI_FTLR_RFT_OFFS) &
QM_SS_SPI_FTLR_RFT_MASK;
__builtin_arc_sr(ftlr, base[spi] + QM_SS_SPI_FTLR);
1));
tftlr = FIFO_TX_W_MARK;
/* Set FIFO threshold levels */
QM_SS_SPI_RFTLR_WRITE(rftlr, base[spi]);
QM_SS_SPI_TFTLR_WRITE(tftlr, base[spi]);
/* Unmask all interrupts */
__builtin_arc_sr(QM_SS_SPI_INTR_ALL, base[spi] + QM_SS_SPI_INTR_MASK);
QM_SS_SPI_INTERRUPT_MASK_WRITE(QM_SS_SPI_INTR_ALL, base[spi]);
/* Enable SPI device */
QM_SS_REG_AUX_OR(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN);
@ -269,19 +279,21 @@ int qm_ss_spi_irq_transfer(const qm_ss_spi_t spi,
return 0;
}
int qm_ss_spi_transfer_terminate(const qm_ss_spi_t spi)
int qm_ss_spi_irq_transfer_terminate(const qm_ss_spi_t spi)
{
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
const qm_ss_spi_async_transfer_t *const transfer =
spi_async_transfer[spi];
uint32_t len = 0;
uint32_t ctrl = 0;
uint8_t tmode = 0;
spi_disable(spi);
if (transfer->callback) {
uint32_t len = 0;
uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL);
uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
QM_SS_SPI_CTRL_TMOD_OFFS);
ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
QM_SS_SPI_CTRL_TMOD_OFFS);
if (tmode == QM_SS_SPI_TMOD_TX ||
tmode == QM_SS_SPI_TMOD_TX_RX) {
len = transfer->tx_len - tx_c[spi];
@ -302,13 +314,28 @@ int qm_ss_spi_transfer_terminate(const qm_ss_spi_t spi)
static void handle_spi_err_interrupt(const qm_ss_spi_t spi)
{
uint32_t intr_stat = __builtin_arc_lr(base[spi] + QM_SS_SPI_INTR_STAT);
uint32_t intr_stat = QM_SS_SPI_INTERRUPT_STATUS_READ(base[spi]);
const qm_ss_spi_async_transfer_t *const transfer =
spi_async_transfer[spi];
spi_disable(spi);
#if HAS_SS_SPI_VERBOSE_ERROR
if ((intr_stat & QM_SS_SPI_INTR_TXOI) && transfer->callback) {
transfer->callback(transfer->callback_data, -EIO,
QM_SS_SPI_TX_OVERFLOW,
transfer->tx_len - tx_c[spi]);
}
if ((intr_stat & QM_SS_SPI_INTR_RXUI) && transfer->callback) {
transfer->callback(transfer->callback_data, -EIO,
QM_SS_SPI_RX_UNDERFLOW,
transfer->rx_len - rx_c[spi]);
}
#else /* HAS_SS_SPI_VERBOSE_ERROR */
QM_ASSERT((intr_stat & QM_SS_SPI_INTR_STAT_TXOI) == 0);
QM_ASSERT((intr_stat & QM_SS_SPI_INTR_STAT_RXUI) == 0);
#endif /* HAS_SS_SPI_VERBOSE_ERROR */
if ((intr_stat & QM_SS_SPI_INTR_RXOI) && transfer->callback) {
transfer->callback(transfer->callback_data, -EIO,
@ -319,17 +346,20 @@ static void handle_spi_err_interrupt(const qm_ss_spi_t spi)
static void handle_spi_tx_interrupt(const qm_ss_spi_t spi)
{
/* Clear Transmit Fifo Emtpy interrupt */
__builtin_arc_sr(QM_SS_SPI_INTR_TXEI, base[spi] + QM_SS_SPI_CLR_INTR);
uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL);
/* Calculate number of bytes per frame (1 or 2)*/
uint32_t ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
/* Calculate number of bytes per frame */
uint8_t bytes = BYTES_PER_FRAME(ctrl);
uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
QM_SS_SPI_CTRL_TMOD_OFFS);
uint32_t rxflr = 0;
uint32_t txflr = 0;
int32_t cnt = 0;
const qm_ss_spi_async_transfer_t *const transfer =
spi_async_transfer[spi];
/* Clear Transmit Fifo Emtpy interrupt */
QM_SS_SPI_INTERRUPT_CLEAR_WRITE(QM_SS_SPI_INTR_TXEI, base[spi]);
/* Jump to the right position of TX buffer.
* If no bytes were transmitted before, we start from the beginning,
* otherwise we jump to the next frame to be sent.
@ -347,15 +377,15 @@ static void handle_spi_tx_interrupt(const qm_ss_spi_t spi)
transfer->tx_len);
}
} else {
QM_SS_REG_AUX_NAND(base[spi] + QM_SS_SPI_INTR_MASK,
QM_SS_SPI_INTR_TXEI);
QM_SS_SPI_INTERRUPT_MASK_NAND(QM_SS_SPI_INTR_TXEI,
base[spi]);
}
return;
}
/* Make sure RX fifo does not overflow */
uint32_t rxflr = __builtin_arc_lr(base[spi] + QM_SS_SPI_RXFLR);
uint32_t txflr = __builtin_arc_lr(base[spi] + QM_SS_SPI_TXFLR);
int32_t cnt = FIFO_SIZE - rxflr - txflr - 1;
rxflr = __builtin_arc_lr(base[spi] + QM_SS_SPI_RXFLR);
txflr = __builtin_arc_lr(base[spi] + QM_SS_SPI_TXFLR);
cnt = QM_SS_SPI_FIFO_DEPTH - rxflr - txflr - 1;
while (tx_c[spi] && cnt > 0) {
fifo_write(spi, tx_buffer, bytes);
tx_buffer += bytes;
@ -366,14 +396,15 @@ static void handle_spi_tx_interrupt(const qm_ss_spi_t spi)
static void handle_spi_rx_interrupt(const qm_ss_spi_t spi)
{
/* Clear RX-FIFO FULL interrupt */
__builtin_arc_sr(QM_SS_SPI_INTR_RXFI, base[spi] + QM_SS_SPI_CLR_INTR);
uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL);
/* Calculate number of bytes per frame (1 or 2)*/
uint32_t ctrl = QM_SS_SPI_CTRL_READ(base[spi]);
/* Calculate number of bytes per frame */
uint8_t bytes = BYTES_PER_FRAME(ctrl);
const qm_ss_spi_async_transfer_t *const transfer =
spi_async_transfer[spi];
uint32_t new_irq_level = 0;
/* Clear RX-FIFO FULL interrupt */
QM_SS_SPI_INTERRUPT_CLEAR_WRITE(QM_SS_SPI_INTR_RXFI, base[spi]);
/*
* Jump to the right position of RX buffer.
@ -390,14 +421,11 @@ static void handle_spi_rx_interrupt(const qm_ss_spi_t spi)
rx_c[spi]--;
}
/* Set new FIFO threshold or complete transfer */
uint32_t new_irq_level =
new_irq_level =
(FIFO_RX_W_MARK < rx_c[spi] ? FIFO_RX_W_MARK : rx_c[spi]);
if (rx_c[spi]) {
new_irq_level--;
uint32_t ftlr = __builtin_arc_lr(base[spi] + QM_SS_SPI_FTLR);
ftlr &= ~QM_SS_SPI_FTLR_RFT_MASK;
ftlr |= (new_irq_level << QM_SS_SPI_FTLR_RFT_OFFS);
__builtin_arc_sr(ftlr, base[spi] + QM_SS_SPI_FTLR);
QM_SS_SPI_RFTLR_WRITE(new_irq_level, base[spi]);
} else {
spi_disable(spi);
if (transfer->callback) {
@ -462,4 +490,22 @@ int qm_ss_spi_restore_context(const qm_ss_spi_t spi,
return 0;
}
#else
int qm_ss_spi_save_context(const qm_ss_spi_t spi,
qm_ss_spi_context_t *const ctx)
{
(void)spi;
(void)ctx;
return 0;
}
int qm_ss_spi_restore_context(const qm_ss_spi_t spi,
const qm_ss_spi_context_t *const ctx)
{
(void)spi;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -130,4 +130,18 @@ int qm_pic_timer_restore_context(const qm_pic_timer_context_t *const ctx)
return 0;
}
#else
int qm_pic_timer_save_context(qm_pic_timer_context_t *const ctx)
{
(void)ctx;
return 0;
}
int qm_pic_timer_restore_context(const qm_pic_timer_context_t *const ctx)
{
(void)ctx;
return 0;
}
#endif

View file

@ -125,4 +125,22 @@ int qm_ss_timer_restore_context(const qm_ss_timer_t timer,
return 0;
}
#else
int qm_ss_timer_save_context(const qm_ss_timer_t timer,
qm_ss_timer_context_t *const ctx)
{
(void)timer;
(void)ctx;
return 0;
}
int qm_ss_timer_restore_context(const qm_ss_timer_t timer,
const qm_ss_timer_context_t *const ctx)
{
(void)timer;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -755,4 +755,21 @@ int qm_uart_restore_context(const qm_uart_t uart,
return 0;
}
#else
int qm_uart_save_context(const qm_uart_t uart, qm_uart_context_t *const ctx)
{
(void)uart;
(void)ctx;
return 0;
}
int qm_uart_restore_context(const qm_uart_t uart,
const qm_uart_context_t *const ctx)
{
(void)uart;
(void)ctx;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -139,13 +139,10 @@ typedef struct {
static usb_priv_t usb_ctrl[QM_USB_NUM];
/* This helper is only used by QM_CHECKs, thus on Debug mode. */
#if DEBUG || UNIT_TEST
static bool usb_dc_ep_is_valid(const qm_usb_ep_idx_t ep)
{
return (ep < QM_USB_IN_EP_NUM + QM_USB_OUT_EP_NUM);
}
#endif
static int usb_dc_reset(const qm_usb_t usb)
{
@ -526,7 +523,9 @@ int qm_usb_ep_set_config(const qm_usb_t usb,
QM_CHECK(usb < QM_USB_NUM, -EINVAL);
QM_CHECK(usb_ctrl[usb].attached, -EINVAL);
QM_CHECK(ep_cfg, -EINVAL);
QM_CHECK(usb_dc_ep_is_valid(ep_cfg->ep), -EINVAL);
if (!usb_dc_ep_is_valid(ep_cfg->ep)) {
return -EINVAL;
}
volatile uint32_t *p_depctl;
const uint8_t ep_idx = ep_cfg->ep < QM_USB_IN_EP_NUM
@ -606,7 +605,9 @@ int qm_usb_ep_set_stall_state(const qm_usb_t usb, const qm_usb_ep_idx_t ep,
{
QM_CHECK(usb < QM_USB_NUM, -EINVAL);
QM_CHECK(usb_ctrl[usb].attached, -EINVAL);
QM_CHECK(usb_dc_ep_is_valid(ep), -EINVAL);
if (!usb_dc_ep_is_valid(ep)) {
return -EINVAL;
}
const uint8_t ep_idx = IS_IN_EP(ep) ? ep : ep - QM_USB_IN_EP_NUM;
@ -634,7 +635,9 @@ int qm_usb_ep_halt(const qm_usb_t usb, const qm_usb_ep_idx_t ep)
{
QM_CHECK(usb < QM_USB_NUM, -EINVAL);
QM_CHECK(usb_ctrl[usb].attached, -EINVAL);
QM_CHECK(usb_dc_ep_is_valid(ep), -EINVAL);
if (!usb_dc_ep_is_valid(ep)) {
return -EINVAL;
}
const uint8_t ep_idx = IS_IN_EP(ep) ? ep : ep - QM_USB_IN_EP_NUM;
volatile uint32_t *p_depctl;
@ -664,8 +667,10 @@ int qm_usb_ep_is_stalled(const qm_usb_t usb, const qm_usb_ep_idx_t ep,
{
QM_CHECK(usb < QM_USB_NUM, -EINVAL);
QM_CHECK(usb_ctrl[usb].attached, -EINVAL);
QM_CHECK(usb_dc_ep_is_valid(ep), -EINVAL);
QM_CHECK(stalled, -EINVAL);
if (!usb_dc_ep_is_valid(ep)) {
return -EINVAL;
}
volatile uint32_t *p_depctl;
@ -685,7 +690,9 @@ int qm_usb_ep_enable(const qm_usb_t usb, const qm_usb_ep_idx_t ep)
{
QM_CHECK(usb < QM_USB_NUM, -EINVAL);
QM_CHECK(usb_ctrl[usb].attached, -EINVAL);
QM_CHECK(usb_dc_ep_is_valid(ep), -EINVAL);
if (!usb_dc_ep_is_valid(ep)) {
return -EINVAL;
}
usb_ctrl[usb].ep_ctrl[ep].enabled = true;
@ -715,7 +722,9 @@ int qm_usb_ep_enable(const qm_usb_t usb, const qm_usb_ep_idx_t ep)
int qm_usb_ep_disable(const qm_usb_t usb, const qm_usb_ep_idx_t ep)
{
QM_CHECK(usb < QM_USB_NUM, -EINVAL);
QM_CHECK(usb_dc_ep_is_valid(ep), -EINVAL);
if (!usb_dc_ep_is_valid(ep)) {
return -EINVAL;
}
/* Disable EP intr then de-activate, disable and set NAK. */
if (!IS_IN_EP(ep)) {
@ -744,15 +753,20 @@ int qm_usb_ep_flush(const qm_usb_t usb, const qm_usb_ep_idx_t ep)
{
QM_CHECK(usb < QM_USB_NUM, -EINVAL);
QM_CHECK(usb_ctrl[usb].attached, -EINVAL);
QM_CHECK(usb_dc_ep_is_valid(ep), -EINVAL);
if (!usb_dc_ep_is_valid(ep)) {
return -EINVAL;
}
/*
*
* RX FIFO is global and cannot be flushed per EP, but it can
* be through bit 4 from GRSTCTL. For now we don't flush it
* here since both FIFOs are always flushed during the Core
* Soft Reset done at usb_dc_reset(), which is called on both
* qm_usb_attach() and qm_usb_reset().
*/
QM_CHECK(IS_IN_EP(ep), -EINVAL);
if (!IS_IN_EP(ep)) {
return -EINVAL;
}
/* Each IN endpoint has dedicated Tx FIFO. */
QM_USB[usb].grstctl |= ep << QM_USB_GRSTCTL_TX_FNUM_OFFSET;
@ -775,8 +789,12 @@ int qm_usb_ep_write(const qm_usb_t usb, const qm_usb_ep_idx_t ep,
{
QM_CHECK(usb < QM_USB_NUM, -EINVAL);
QM_CHECK(usb_ctrl[usb].attached, -EINVAL);
QM_CHECK(usb_dc_ep_is_valid(ep), -EINVAL);
QM_CHECK(IS_IN_EP(ep), -EINVAL);
if (!usb_dc_ep_is_valid(ep)) {
return -EINVAL;
}
if (!IS_IN_EP(ep)) {
return -EINVAL;
}
/* Check if IN EP is enabled */
if (!usb_ctrl[usb].ep_ctrl[ep].enabled) {
@ -801,9 +819,13 @@ int qm_usb_ep_read(const qm_usb_t usb, const qm_usb_ep_idx_t ep,
{
QM_CHECK(usb < QM_USB_NUM, -EINVAL);
QM_CHECK(usb_ctrl[usb].attached, -EINVAL);
QM_CHECK(usb_dc_ep_is_valid(ep), -EINVAL);
QM_CHECK(!IS_IN_EP(ep), -EINVAL);
QM_CHECK(data, -EINVAL);
if (!usb_dc_ep_is_valid(ep)) {
return -EINVAL;
}
if (IS_IN_EP(ep)) {
return -EINVAL;
}
uint32_t i, j, data_len, bytes_to_copy;
@ -862,9 +884,13 @@ int qm_usb_ep_get_bytes_read(const qm_usb_t usb, const qm_usb_ep_idx_t ep,
{
QM_CHECK(usb < QM_USB_NUM, -EINVAL);
QM_CHECK(usb_ctrl[usb].attached, -EINVAL);
QM_CHECK(usb_dc_ep_is_valid(ep), -EINVAL);
QM_CHECK(!IS_IN_EP(ep), -EINVAL);
QM_CHECK(read_bytes, -EINVAL);
if (!usb_dc_ep_is_valid(ep)) {
return -EINVAL;
}
if (IS_IN_EP(ep)) {
return -EINVAL;
}
/* Check if OUT EP enabled. */
if (!usb_ctrl[usb].ep_ctrl[ep].enabled) {

View file

@ -31,28 +31,53 @@
#include "clk.h"
#include "soc_watch.h"
#define QM_WDT_RELOAD_VALUE (0x76)
static void (*callback[QM_WDT_NUM])(void *data);
static void *callback_data[QM_WDT_NUM];
#ifndef UNIT_TEST
qm_wdt_reg_t *qm_wdt[QM_WDT_NUM] = {((qm_wdt_reg_t *)QM_WDT_0_BASE),
#if (NUM_WDT_CONTROLLERS > 1)
((qm_wdt_reg_t *)QM_WDT_1_BASE)
#endif /* NUM_WDT_CONTROLLERS >1 */
};
#endif /* UNIT_TEST */
QM_ISR_DECLARE(qm_wdt_0_isr)
{
if (callback[QM_WDT_0]) {
callback[QM_WDT_0](callback_data[QM_WDT_0]);
}
/* Clear the interrupt by reading. */
QM_WDT[QM_WDT_0]->wdt_eoi;
QM_ISR_EOI(QM_IRQ_WDT_0_INT_VECTOR);
}
#if (NUM_WDT_CONTROLLERS > 1)
QM_ISR_DECLARE(qm_wdt_1_isr)
{
if (NULL != callback[QM_WDT_1]) {
(callback[QM_WDT_1])(callback_data[QM_WDT_1]);
}
/* Clear the interrupt by reading. */
QM_WDT[QM_WDT_1]->wdt_eoi;
QM_ISR_EOI(QM_IRQ_WDT_1_INT_VECTOR);
}
#endif /* (NUM_WDT_CONTROLLERS > 1) */
int qm_wdt_start(const qm_wdt_t wdt)
{
QM_CHECK(wdt < QM_WDT_NUM, -EINVAL);
QM_WDT[wdt].wdt_cr |= QM_WDT_CR_WDT_ENABLE;
QM_WDT[wdt]->wdt_cr |= QM_WDT_CR_WDT_ENABLE;
#if (HAS_WDT_CLOCK_ENABLE)
clk_periph_enable(CLK_PERIPH_WDT_REGISTER | CLK_PERIPH_CLK);
QM_SCSS_PERIPHERAL->periph_cfg0 |= BIT(1);
#endif /* HAS_WDT_CLOCK_ENABLE */
qm_wdt_reload(wdt);
return 0;
}
@ -63,20 +88,31 @@ int qm_wdt_set_config(const qm_wdt_t wdt, const qm_wdt_config_t *const cfg)
QM_CHECK(cfg != NULL, -EINVAL);
QM_CHECK(cfg->timeout <= QM_WDT_TORR_TOP_MASK, -EINVAL);
qm_wdt_reload(wdt);
if (cfg->mode == QM_WDT_MODE_INTERRUPT_RESET) {
callback[wdt] = cfg->callback;
callback_data[wdt] = cfg->callback_data;
}
QM_WDT[wdt].wdt_cr &= ~QM_WDT_CR_RMOD;
QM_WDT[wdt].wdt_cr |= cfg->mode << QM_WDT_CR_RMOD_OFFSET;
QM_WDT[wdt]->wdt_cr &= ~QM_WDT_CR_RMOD;
QM_WDT[wdt]->wdt_cr |= cfg->mode << QM_WDT_CR_RMOD_OFFSET;
/* If the SoC has the C2 Pause enable bit for LMT WDT. */
#if (HAS_WDT_PAUSE)
/* Make sure WDT0 is the one requested to be configured. */
QM_CHECK(QM_WDT_0 == wdt, -EINVAL);
(QM_SCU_AON_CRU_BLOCK)->wdt0_tclk_en &= ~C2_WDT_PAUSE_EN_MASK;
(QM_SCU_AON_CRU_BLOCK)->wdt0_tclk_en |= cfg->pause_en
<< C2_WDT_PAUSE_EN_SHIFT;
#endif /* HAS_WDT_PAUSE */
/*
* Timeout range register. Select the timeout from the pre-defined
* tables. These tables can be found in the SoC databook or register
* file.
*/
QM_WDT[wdt].wdt_torr = cfg->timeout;
QM_WDT[wdt]->wdt_torr = cfg->timeout;
/* kick the WDT to load the Timeout Period(TOP) value */
qm_wdt_reload(wdt);
@ -88,7 +124,12 @@ int qm_wdt_reload(const qm_wdt_t wdt)
{
QM_CHECK(wdt < QM_WDT_NUM, -EINVAL);
QM_WDT[wdt].wdt_crr = QM_WDT_RELOAD_VALUE;
/*
* This register is used to restart the WDT counter. As a safety feature
* to prevent accidental restarts the value 0x76 must be written.
* A restart also clears the WDT interrupt.
*/
QM_WDT[wdt]->wdt_crr = QM_WDT_RELOAD_VALUE;
return 0;
}
@ -99,8 +140,8 @@ int qm_wdt_save_context(const qm_wdt_t wdt, qm_wdt_context_t *const ctx)
QM_CHECK(wdt < QM_WDT_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
ctx->wdt_torr = QM_WDT[wdt].wdt_torr;
ctx->wdt_cr = QM_WDT[wdt].wdt_cr;
ctx->wdt_torr = QM_WDT[wdt]->wdt_torr;
ctx->wdt_cr = QM_WDT[wdt]->wdt_cr;
return 0;
}
@ -114,13 +155,30 @@ int qm_wdt_restore_context(const qm_wdt_t wdt,
/*
* TOP_INIT field has to be written before Watchdog Timer is enabled.
*/
QM_WDT[wdt].wdt_torr = ctx->wdt_torr;
QM_WDT[wdt].wdt_cr = ctx->wdt_cr;
QM_WDT[wdt]->wdt_torr = ctx->wdt_torr;
QM_WDT[wdt]->wdt_cr = ctx->wdt_cr;
/*
* Reload the wdt value to avoid interrupts to fire on wake up.
*/
QM_WDT[wdt].wdt_crr = QM_WDT_RELOAD_VALUE;
QM_WDT[wdt]->wdt_crr = QM_WDT_RELOAD_VALUE;
return 0;
}
#else
int qm_wdt_save_context(const qm_wdt_t wdt, qm_wdt_context_t *const ctx)
{
(void)wdt;
(void)ctx;
return 0;
}
int qm_wdt_restore_context(const qm_wdt_t wdt,
const qm_wdt_context_t *const ctx)
{
(void)wdt;
(void)ctx;
return 0;
}

View file

@ -31,7 +31,7 @@
#define __QM_COMMON_H__
#if (UNIT_TEST)
#define __volatile__(x)
#define __volatile__(...)
#define __asm__
#endif /* UNIT_TEST */
@ -272,7 +272,7 @@ int pico_printf(const char *format, ...);
#define QM_VER_STRINGIFY(major, minor, patch) \
QM_STRINGIFY(major) "." QM_STRINGIFY(minor) "." QM_STRINGIFY(patch)
#if (SOC_WATCH_ENABLE) && (!QM_SENSOR)
#if (SOC_WATCH_ENABLE)
/**
* Front-end macro for logging a SoC Watch event. When SOC_WATCH_ENABLE
* is not set to 1, the macro expands to nothing, there is no overhead.
@ -302,9 +302,22 @@ int pico_printf(const char *format, ...);
do { \
soc_watch_log_app_event(event, subtype, param); \
} while (0)
/**
* Front-end macro for triggering a buffer flush via soc_watch.
*
* This allows applications layered on top of QMSI to trigger the transfer of
* profiler information to the host whenever it requires.
* When SOC_WATCH_ENABLE is not set to 1,
* the macro expands to nothing, there is no overhead.
*/
#define SOC_WATCH_TRIGGER_FLUSH() \
do { \
soc_watch_trigger_flush(); \
} while (0)
#else
#define SOC_WATCH_LOG_EVENT(event, param)
#define SOC_WATCH_LOG_APP_EVENT(event, subtype, param)
#define SOC_WATCH_TRIGGER_FLUSH()
#endif
#endif /* __QM_COMMON_H__ */

View file

@ -232,8 +232,7 @@ int clk_sys_set_mode(const clk_sys_mode_t mode, const clk_sys_div_t div)
apply_flash_timings(sys_ticks_per_us);
/* Log any clock changes. */
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER, SOCW_REG_OSC0_CFG1);
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER, SOCW_REG_CCU_SYS_CLK_CTL);
SOC_WATCH_LOG_EVENT(SOCW_EVENT_FREQ, 0);
return 0;
}
@ -348,8 +347,10 @@ int clk_periph_enable(const clk_periph_t clocks)
QM_SCSS_CCU->ccu_periph_clk_gate_ctl |= clocks;
#if (HAS_SW_SOCWATCH)
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER,
SOCW_REG_CCU_PERIPH_CLK_GATE_CTL);
#endif /* HAS_SW_SOCWATCH */
return 0;
}
@ -360,8 +361,10 @@ int clk_periph_disable(const clk_periph_t clocks)
QM_SCSS_CCU->ccu_periph_clk_gate_ctl &= ~clocks;
#if (HAS_SW_SOCWATCH)
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER,
SOCW_REG_CCU_PERIPH_CLK_GATE_CTL);
#endif /* HAS_SW_SOCWATCH */
return 0;
}
@ -380,3 +383,30 @@ void clk_sys_udelay(uint32_t microseconds)
while (get_ticks() - tsc_start < timeout) {
}
}
int clk_dma_enable(void)
{
QM_SCSS_CCU->ccu_mlayer_ahb_ctl |= QM_CCU_DMA_CLK_EN;
return 0;
}
int clk_dma_disable(void)
{
QM_SCSS_CCU->ccu_mlayer_ahb_ctl &= ~QM_CCU_DMA_CLK_EN;
return 0;
}
/**
* Get I2C clock frequency in MHz.
*
* @return [uint32_t] I2C freq_in_mhz.
*/
uint32_t get_i2c_clk_freq_in_mhz(void)
{
return clk_sys_get_ticks_per_us() >>
((QM_SCSS_CCU->ccu_periph_clk_div_ctl0 &
CLK_PERIPH_DIV_DEF_MASK) >>
QM_CCU_PERIPH_PCLK_DIV_OFFSET);
}

View file

@ -51,7 +51,7 @@ typedef struct {
static power_context_t power_context;
void power_cpu_halt(void)
void qm_power_cpu_halt(void)
{
SOC_WATCH_LOG_EVENT(SOCW_EVENT_HALT, 0);
/*
@ -72,16 +72,16 @@ static void clear_all_pending_interrupts(void)
QM_SCSS_CMP->cmp_stat_clr = -1;
/* Clear RTC interrupts. */
QM_RTC->rtc_eoi;
QM_RTC[QM_RTC_0]->rtc_eoi;
/* Clear timers interrupt flag. */
QM_PWM->timerseoi;
QM_PWM[QM_PWM_0]->timerseoi;
/* Clear GPIO interrupts. */
QM_GPIO[QM_GPIO_0]->gpio_porta_eoi = -1;
}
void power_soc_sleep(void)
void qm_power_soc_sleep(void)
{
/* Save register values. */
power_context.ac_power_save = QM_SCSS_CMP->cmp_pwr;
@ -147,21 +147,21 @@ void power_soc_sleep(void)
clk_sys_set_mode(CLK_SYS_HYB_OSC_4MHZ, CLK_SYS_DIV_8);
/* Set the RAR to retention mode. */
rar_set_mode(RAR_RETENTION);
qm_rar_set_mode(QM_RAR_RETENTION);
/*
* If wake source is any of AON Timer, RTC, GPIO interrupt, program
* CCU_SYS_CLK_CTL.CCU_SYS_CLK_SEL to RTC Oscillator.
*/
/* Enter SoC sleep mode. */
power_cpu_halt();
qm_power_cpu_halt();
}
void power_soc_sleep_restore(void)
void qm_power_soc_sleep_restore(void)
{
/* From here on, restore the SoC to an active state. */
/* Set the RAR to normal mode. */
rar_set_mode(RAR_NORMAL);
qm_rar_set_mode(QM_RAR_NORMAL);
/*
* Since we are running below 4MHz, 0 wait states are configured.
@ -202,7 +202,7 @@ void power_soc_sleep_restore(void)
QM_SCSS_GP->gps0 &= ~QM_GPS0_POWER_STATE_SLEEP;
}
void power_soc_deep_sleep(const power_wake_event_t wake_event)
void qm_power_soc_deep_sleep(const qm_power_wake_event_t wake_event)
{
/* Save register values. */
power_context.ac_power_save = QM_SCSS_CMP->cmp_pwr;
@ -227,11 +227,11 @@ void power_soc_deep_sleep(const power_wake_event_t wake_event)
* comparator.
*/
switch (wake_event) {
case POWER_WAKE_FROM_RTC:
case QM_POWER_WAKE_FROM_RTC:
QM_SCSS_CCU->wake_mask =
SET_ALL_BITS & ~QM_CCU_WAKE_MASK_RTC_BIT;
break;
case POWER_WAKE_FROM_GPIO_COMP:
case QM_POWER_WAKE_FROM_GPIO_COMP:
default:
QM_SCSS_CCU->wake_mask = SET_ALL_BITS &
~(QM_CCU_WAKE_MASK_COMPARATOR_BIT |
@ -255,7 +255,7 @@ void power_soc_deep_sleep(const power_wake_event_t wake_event)
QM_SCSS_PMUX->pmux_slew[0] = 0;
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER, SOCW_REG_PMUX_SLEW);
if (wake_event != POWER_WAKE_FROM_RTC) {
if (wake_event != QM_POWER_WAKE_FROM_RTC) {
/* Disable RTC. */
QM_SCSS_CCU->osc1_cfg0 &= ~QM_OSC1_PD;
@ -312,9 +312,9 @@ void power_soc_deep_sleep(const power_wake_event_t wake_event)
__asm__ __volatile__("nop");
/* Set the RAR to retention mode. */
rar_set_mode(RAR_RETENTION);
qm_rar_set_mode(QM_RAR_RETENTION);
if (wake_event == POWER_WAKE_FROM_RTC) {
if (wake_event == QM_POWER_WAKE_FROM_RTC) {
/* Start running on the rtc clock */
clk_sys_set_mode(CLK_SYS_RTC_OSC, CLK_SYS_DIV_1);
}
@ -323,14 +323,14 @@ void power_soc_deep_sleep(const power_wake_event_t wake_event)
clk_periph_disable(CLK_PERIPH_REGISTER | CLK_PERIPH_CLK);
/* Enter SoC deep sleep mode. */
power_cpu_halt();
qm_power_cpu_halt();
}
void power_soc_deep_sleep_restore(void)
void qm_power_soc_deep_sleep_restore(void)
{
/* We are now exiting from deep sleep mode. */
/* Set the RAR to normal mode. */
rar_set_mode(RAR_NORMAL);
qm_rar_set_mode(QM_RAR_NORMAL);
/*
* Since we are running below 4MHz, 0 wait states are configured.
@ -424,34 +424,34 @@ void power_soc_deep_sleep_restore(void)
QM_SCSS_GP->gps0 &= ~QM_GPS0_POWER_STATE_DEEP_SLEEP;
}
void power_soc_restore(void)
void qm_power_soc_restore(void)
{
/*
* If the SoC is waking from sleep or deep sleep mode then the full
* system state must be restored.
*/
if (QM_SCSS_GP->gps0 & QM_GPS0_POWER_STATE_SLEEP) {
power_soc_sleep_restore();
qm_power_soc_sleep_restore();
} else if (QM_SCSS_GP->gps0 & QM_GPS0_POWER_STATE_DEEP_SLEEP) {
power_soc_deep_sleep_restore();
qm_power_soc_deep_sleep_restore();
}
}
int rar_set_mode(const rar_state_t mode)
int qm_rar_set_mode(const qm_rar_state_t mode)
{
QM_CHECK(mode <= RAR_RETENTION, -EINVAL);
QM_CHECK(mode <= QM_RAR_RETENTION, -EINVAL);
volatile uint32_t i = 32;
volatile uint32_t reg;
switch (mode) {
case RAR_RETENTION:
case QM_RAR_RETENTION:
QM_SCSS_PMU->aon_vr |=
(QM_AON_VR_PASS_CODE | QM_AON_VR_ROK_BUF_VREG_MASK);
QM_SCSS_PMU->aon_vr |=
(QM_AON_VR_PASS_CODE | QM_AON_VR_VREG_SEL);
break;
case RAR_NORMAL:
case QM_RAR_NORMAL:
reg = QM_SCSS_PMU->aon_vr & ~QM_AON_VR_VREG_SEL;
QM_SCSS_PMU->aon_vr = QM_AON_VR_PASS_CODE | reg;
/* Wait for >= 2usec, at most 64 clock cycles. */

View file

@ -0,0 +1,100 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_common.h"
#include "qm_soc_regs.h"
#include "qm_interrupt_router.h"
#include "qm_interrupt_router_regs.h"
/* Event router base addr for LMT interrupt routing, for linear IRQ mapping */
#define INTERRUPT_ROUTER_LMT_INT_MASK_BASE \
(&QM_INTERRUPT_ROUTER->i2c_master_0_int_mask)
void _qm_ir_unmask_int(uint32_t irq, uint32_t register_offset)
{
uint32_t *interrupt_router_intmask;
/* Route peripheral interrupt to Lakemont/Sensor Subsystem */
interrupt_router_intmask =
(uint32_t *)INTERRUPT_ROUTER_LMT_INT_MASK_BASE + register_offset;
if (!QM_IR_INT_LOCK_MASK(*interrupt_router_intmask)) {
switch (irq) {
case QM_IRQ_COMPARATOR_0_INT:
/*
* Comparator error mask uses 1 bit per Comparator
* rather than the generic host mask.
*/
QM_INTERRUPT_ROUTER->comparator_0_host_int_mask &=
~0x0007ffff;
break;
case QM_IRQ_DMA_0_ERROR_INT:
/*
* DMA error mask uses 1 bit per DMA channel rather than
* the
* generic host mask.
*/
*interrupt_router_intmask &= ~QM_IR_DMA_ERROR_HOST_MASK;
break;
default:
QM_IR_UNMASK_INTERRUPTS(*interrupt_router_intmask);
break;
}
}
}
void _qm_ir_mask_int(uint32_t irq, uint32_t register_offset)
{
uint32_t *interrupt_router_intmask;
/* Route peripheral interrupt to Lakemont/Sensor Subsystem */
interrupt_router_intmask =
(uint32_t *)INTERRUPT_ROUTER_LMT_INT_MASK_BASE + register_offset;
/**/
if (!QM_IR_INT_LOCK_MASK(*interrupt_router_intmask)) {
switch (irq) {
case QM_IRQ_COMPARATOR_0_INT:
QM_INTERRUPT_ROUTER->comparator_0_host_int_mask |=
0x0007ffff;
break;
case QM_IRQ_DMA_0_ERROR_INT:
/*
* DMA error mask uses 1 bit per DMA channel rather than
* the
* generic host mask.
*/
*interrupt_router_intmask |= QM_IR_DMA_ERROR_HOST_MASK;
break;
default:
QM_IR_MASK_INTERRUPTS(*interrupt_router_intmask);
break;
}
}
}

View file

@ -295,6 +295,37 @@ uint32_t clk_sys_get_ticks_per_us(void);
*/
void clk_sys_udelay(uint32_t microseconds);
/**
* Enable the DMA clock.
*
* Enable the DMA clock by setting the corresponding bit in the AHB Control
* register. By default the DMA clock is disabled.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_dma_enable(void);
/**
* Disable the DMA clock.
*
* Disable the DMA clock by clearing the corresponding bit in the AHB Control
* register.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_dma_disable(void);
/**
* Get I2C clock frequency in MHz.
*
* @return [uint32_t] I2C freq_in_mhz.
*/
uint32_t get_i2c_clk_freq_in_mhz(void);
/**
* @}
*/

View file

@ -44,9 +44,10 @@
* Wake source for deep sleep mode type.
*/
typedef enum {
POWER_WAKE_FROM_GPIO_COMP, /**< Use GPIO / Comparator as wake source. */
POWER_WAKE_FROM_RTC, /**< Use RTC as wake source. */
} power_wake_event_t;
/** Use GPIO/Comparator as wake source. */
QM_POWER_WAKE_FROM_GPIO_COMP,
QM_POWER_WAKE_FROM_RTC, /**< Use RTC as wake source. */
} qm_power_wake_event_t;
/**
* Put CPU in halt state.
@ -56,7 +57,7 @@ typedef enum {
* This function can be called with interrupts disabled.
* Interrupts will be enabled before triggering the transition.
*/
void power_cpu_halt(void);
void qm_power_cpu_halt(void);
/**
* Put SoC to sleep.
@ -79,27 +80,27 @@ void power_cpu_halt(void);
* - RTC
* - Low power comparators
*/
void power_soc_sleep();
void qm_power_soc_sleep();
/**
* Put SoC to deep sleep.
*
* Enter into deep sleep mode. All clocks are gated. The Wake source for this
* function depends on the input parameter, POWER_WAKE_FROM_GPIO_COMP will
* enable waking from GPIO or comparator pins and POWER_WAKE_FROM_RTC will
* function depends on the input parameter, QM_POWER_WAKE_FROM_GPIO_COMP will
* enable waking from GPIO or comparator pins and QM_POWER_WAKE_FROM_RTC will
* enable waking from the RTC.
*
* @param[in] wake_event Select wake source for deep sleep mode.
*/
void power_soc_deep_sleep(const power_wake_event_t wake_event);
void qm_power_soc_deep_sleep(const qm_power_wake_event_t wake_event);
/**
* Restore system state after sleep or deep sleep.
*
* On wakeup, the system is restored to the previous state before
* power_soc_sleep() or power_soc_deep_sleep() was called.
* qm_power_soc_sleep() or qm_power_soc_deep_sleep() was called.
*/
void power_soc_restore(void);
void qm_power_soc_restore(void);
/**
* Retention alternator regulator for Quark D2000.
@ -112,9 +113,9 @@ void power_soc_restore(void);
* RAR modes type.
*/
typedef enum {
RAR_NORMAL, /**< Normal mode = 50 mA. */
RAR_RETENTION /**< Retention mode = 300 uA. */
} rar_state_t;
QM_RAR_NORMAL, /**< Normal mode = 50 mA. */
QM_RAR_RETENTION /**< Retention mode = 300 uA. */
} qm_rar_state_t;
/**
* Change operating mode of RAR.
@ -130,7 +131,7 @@ typedef enum {
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int rar_set_mode(const rar_state_t mode);
int qm_rar_set_mode(const qm_rar_state_t mode);
/**
* @}

View file

@ -0,0 +1,65 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_INTERRUPT_ROUTER_H__
#define __QM_INTERRUPT_ROUTER_H__
/**
* Quark D2000 SoC Interrupt Router.
*
* @defgroup groupQUARKD2000INTERRUPTROUTER SoC Interrupt Router (D2000)
* @{
*/
void _qm_ir_mask_int(uint32_t irq, uint32_t register_offset);
void _qm_ir_unmask_int(uint32_t irq, uint32_t register_offset);
/*
* Unmask a given IRQ in the Interrupt Router.
*
* @param[in] irq IRQ number. Must be of type QM_IRQ_XXX.
*/
#define QM_IR_UNMASK_INT(irq) \
do { \
_qm_ir_unmask_int(irq, irq##_MASK_OFFSET); \
} while (0);
/*
* Mask a given IRQ in the Interrupt Router.
*
* @param[in] irq IRQ number. Must be of type QM_IRQ_XXX.
*/
#define QM_IR_MASK_INT(irq) \
do { \
_qm_ir_mask_int(irq, irq##_MASK_OFFSET); \
} while (0);
/** @} */
#endif /* __QM_INTERRUPT_ROUTER_H__ */

View file

@ -47,6 +47,15 @@
/* Masks for single source halts in the Interrupt Router. */
#define QM_IR_INT_LMT_HALT_MASK BIT(16)
/**
* Interrupt Router macros to determine if the specified peripheral interrupt
* mask has been locked.
*/
#define QM_IR_LMT_INT_LOCK_HALT_MASK(_peripheral_) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(2))
#define QM_IR_LMT_INT_LOCK_MASK(_peripheral_) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(0))
/* Interrupt Router Unmask interrupts for a peripheral. */
#define QM_IR_UNMASK_LMT_INTERRUPTS(_peripheral_) \
(_peripheral_ &= ~(QM_IR_INT_LMT_MASK))
@ -76,6 +85,10 @@
#define QM_IR_UNMASK_HALTS(_peripheral_) QM_IR_UNMASK_LMT_HALTS(_peripheral_)
#define QM_IR_MASK_HALTS(_peripheral_) QM_IR_MASK_LMT_HALTS(_peripheral_)
#define QM_IR_INT_LOCK_MASK(_peripheral_) QM_IR_LMT_INT_LOCK_MASK(_peripheral_)
#define QM_IR_INT_LOCK_HALT_MASK(_peripheral_) \
QM_IR_LMT_INT_LOCK_MASK(_peripheral_)
#define QM_IR_INT_MASK QM_IR_INT_LMT_MASK
#define QM_IR_INT_HALT_MASK QM_IR_INT_LMT_HALT_MASK
#define QM_IR_GET_MASK(_peripheral_) QM_IR_GET_LMT_MASK(_peripheral_)

View file

@ -0,0 +1,205 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_PIN_FUNCTIONS_H__
#define __QM_PIN_FUNCTIONS_H__
/**
* SoC Pins definition.
*
* @defgroup group SOC_PINS
* @{
*/
#include "qm_common.h"
/*
* This file provides an abstraction layer for pin numbers and pin functions.
*/
/* Package pins to pin IDs. */
/* QFN40 package. */
#define QM_PIN_ID_QFN40_31 QM_PIN_ID_0
#define QM_PIN_ID_QFN40_32 QM_PIN_ID_1
#define QM_PIN_ID_QFN40_33 QM_PIN_ID_2
#define QM_PIN_ID_QFN40_34 QM_PIN_ID_3
#define QM_PIN_ID_QFN40_35 QM_PIN_ID_4
#define QM_PIN_ID_QFN40_36 QM_PIN_ID_5
#define QM_PIN_ID_QFN40_37 QM_PIN_ID_6
#define QM_PIN_ID_QFN40_38 QM_PIN_ID_7
#define QM_PIN_ID_QFN40_39 QM_PIN_ID_8
#define QM_PIN_ID_QFN40_11 QM_PIN_ID_9
#define QM_PIN_ID_QFN40_2 QM_PIN_ID_10
#define QM_PIN_ID_QFN40_3 QM_PIN_ID_11
#define QM_PIN_ID_QFN40_4 QM_PIN_ID_12
#define QM_PIN_ID_QFN40_5 QM_PIN_ID_13
#define QM_PIN_ID_QFN40_6 QM_PIN_ID_14
#define QM_PIN_ID_QFN40_7 QM_PIN_ID_15
#define QM_PIN_ID_QFN40_8 QM_PIN_ID_16
#define QM_PIN_ID_QFN40_9 QM_PIN_ID_17
#define QM_PIN_ID_QFN40_10 QM_PIN_ID_18
#define QM_PIN_ID_QFN40_18 QM_PIN_ID_19
#define QM_PIN_ID_QFN40_13 QM_PIN_ID_20
#define QM_PIN_ID_QFN40_14 QM_PIN_ID_21
#define QM_PIN_ID_QFN40_15 QM_PIN_ID_22
#define QM_PIN_ID_QFN40_16 QM_PIN_ID_23
#define QM_PIN_ID_QFN40_21 QM_PIN_ID_24
/* Pin function name to pin function number. */
/* Pin ID 0. */
#define QM_PIN_0_FN_GPIO_0 QM_PMUX_FN_0
#define QM_PIN_0_FN_AIN_0 QM_PMUX_FN_1
#define QM_PIN_0_FN_SPI0_M_CS_B_0 QM_PMUX_FN_2
/* Pin ID 1. */
#define QM_PIN_1_FN_GPIO_1 QM_PMUX_FN_0
#define QM_PIN_1_FN_AIN_1 QM_PMUX_FN_1
#define QM_PIN_1_FN_SPI0_M_CS_B_1 QM_PMUX_FN_2
/* Pin ID 2. */
#define QM_PIN_2_FN_GPIO_2 QM_PMUX_FN_0
#define QM_PIN_2_FN_AIN_2 QM_PMUX_FN_1
#define QM_PIN_2_FN_SPI0_M_CS_B_2 QM_PMUX_FN_2
/* Pin ID 3. */
#define QM_PIN_3_FN_GPIO_3 QM_PMUX_FN_0
#define QM_PIN_3_FN_AIN_3 QM_PMUX_FN_1
#define QM_PIN_3_FN_SPI0_M_CS_B_3 QM_PMUX_FN_2
/* Pin ID 4. */
#define QM_PIN_4_FN_GPIO_4 QM_PMUX_FN_0
#define QM_PIN_4_FN_AIN_4 QM_PMUX_FN_1
#define QM_PIN_4_FN_RTC_CLK_OUT QM_PMUX_FN_2
/* Pin ID 5. */
#define QM_PIN_5_FN_GPIO_5 QM_PMUX_FN_0
#define QM_PIN_5_FN_AIN_5 QM_PMUX_FN_1
#define QM_PIN_5_FN_SYS_CLK_OUT QM_PMUX_FN_2
/* Pin ID 6. */
#define QM_PIN_6_FN_GPIO_6 QM_PMUX_FN_0
#define QM_PIN_6_FN_AIN_6 QM_PMUX_FN_1
#define QM_PIN_6_FN_I2C0_SCL QM_PMUX_FN_2
/* Pin ID 7. */
#define QM_PIN_7_FN_GPIO_7 QM_PMUX_FN_0
#define QM_PIN_7_FN_AIN_7 QM_PMUX_FN_1
#define QM_PIN_7_FN_I2C0_SDA QM_PMUX_FN_2
/* Pin ID 8. */
#define QM_PIN_8_FN_GPIO_8 QM_PMUX_FN_0
#define QM_PIN_8_FN_AIN_8 QM_PMUX_FN_1
#define QM_PIN_8_FN_SPI_S_SCK QM_PMUX_FN_2
/* Pin ID 9. */
#define QM_PIN_9_FN_GPIO_9 QM_PMUX_FN_0
#define QM_PIN_9_FN_AIN_9 QM_PMUX_FN_1
#define QM_PIN_9_FN_SPI_S_MOSI QM_PMUX_FN_2
/* Pin ID 10. */
#define QM_PIN_10_FN_GPIO_10 QM_PMUX_FN_0
#define QM_PIN_10_FN_AIN_10 QM_PMUX_FN_1
#define QM_PIN_10_FN_SPI_S_MISO QM_PMUX_FN_2
/* Pin ID 11. */
#define QM_PIN_11_FN_GPIO_11 QM_PMUX_FN_0
#define QM_PIN_11_FN_AIN_11 QM_PMUX_FN_1
#define QM_PIN_11_FN_SPI_S_CS_B QM_PMUX_FN_2
/* Pin ID 12. */
#define QM_PIN_12_FN_GPIO_12 QM_PMUX_FN_0
#define QM_PIN_12_FN_AIN_12 QM_PMUX_FN_1
#define QM_PIN_12_FN_UART0_TXD QM_PMUX_FN_2
/* Pin ID 13. */
#define QM_PIN_13_FN_GPIO_13 QM_PMUX_FN_0
#define QM_PIN_13_FN_AIN_13 QM_PMUX_FN_1
#define QM_PIN_13_FN_UART0_RXD QM_PMUX_FN_2
/* Pin ID 14. */
#define QM_PIN_14_FN_GPIO_14 QM_PMUX_FN_0
#define QM_PIN_14_FN_AIN_14 QM_PMUX_FN_1
#define QM_PIN_14_FN_UART0_RTS QM_PMUX_FN_2
/* Pin ID 15. */
#define QM_PIN_15_FN_GPIO_15 QM_PMUX_FN_0
#define QM_PIN_15_FN_AIN_15 QM_PMUX_FN_1
#define QM_PIN_15_FN_UART0_CTS QM_PMUX_FN_2
/* Pin ID 16. */
#define QM_PIN_16_FN_GPIO_16 QM_PMUX_FN_0
#define QM_PIN_16_FN_AIN_16 QM_PMUX_FN_1
#define QM_PIN_16_FN_SPI0_M_SCK QM_PMUX_FN_2
/* Pin ID 17. */
#define QM_PIN_17_FN_GPIO_17 QM_PMUX_FN_0
#define QM_PIN_17_FN_AIN_17 QM_PMUX_FN_1
#define QM_PIN_17_FN_SPI0_M_MOSI QM_PMUX_FN_2
/* Pin ID 18. */
#define QM_PIN_18_FN_GPIO_18 QM_PMUX_FN_0
#define QM_PIN_18_FN_AIN_18 QM_PMUX_FN_1
#define QM_PIN_18_FN_SPI0_M_MISO QM_PMUX_FN_2
/* Pin ID 19. */
#define QM_PIN_19_FN_TDO QM_PMUX_FN_0
#define QM_PIN_19_FN_GPIO_19 QM_PMUX_FN_1
#define QM_PIN_19_FN_PWM_0 QM_PMUX_FN_2
/* Pin ID 20. */
#define QM_PIN_20_FN_TRST_N QM_PMUX_FN_0
#define QM_PIN_20_FN_GPIO_20 QM_PMUX_FN_1
#define QM_PIN_20_FN_UART1_TXD QM_PMUX_FN_2
/* Pin ID 21. */
#define QM_PIN_21_FN_TCK QM_PMUX_FN_0
#define QM_PIN_21_FN_GPIO_21 QM_PMUX_FN_1
#define QM_PIN_21_FN_UART1_RXD QM_PMUX_FN_2
/* Pin ID 22. */
#define QM_PIN_22_FN_TMS QM_PMUX_FN_0
#define QM_PIN_22_FN_GPIO_22 QM_PMUX_FN_1
#define QM_PIN_22_FN_UART1_RTS QM_PMUX_FN_2
/* Pin ID 23. */
#define QM_PIN_23_FN_TDI QM_PMUX_FN_0
#define QM_PIN_23_FN_GPIO_23 QM_PMUX_FN_1
#define QM_PIN_23_FN_UART1_CTS QM_PMUX_FN_2
/* Pin ID 24. */
#define QM_PIN_24_FN_GPIO_24 QM_PMUX_FN_0
#define QM_PIN_24_FN_LPD_SIG_OUT QM_PMUX_FN_1
#define QM_PIN_24_FN_PWM_1 QM_PMUX_FN_2
/**
* @}
*/
#endif /* __QM_PIN_FUNCTIONS_H__ */

View file

@ -100,7 +100,7 @@
#define QM_IRQ_I2C_0_INT_VECTOR 36
#define QM_IRQ_SPI_SLAVE_0_INT 5
#define QM_IRQ_SPI_SLAVE_0_INT_MASK_OFFSET 3
#define QM_IRQ_SPI_SLAVE_0_INT_MASK_OFFSET 5
#define QM_IRQ_SPI_SLAVE_0_INT_VECTOR 37
#define QM_IRQ_UART_1_INT 6

View file

@ -31,8 +31,9 @@
#define __REGISTERS_H__
#include "qm_common.h"
#include "qm_soc_interrupts.h"
#include "qm_interrupt_router_regs.h"
#include "qm_soc_interrupts.h"
#include "flash_layout.h"
/**
* Quark D2000 SoC Registers.
@ -85,6 +86,9 @@ qm_scss_ccu_reg_t test_scss_ccu;
#define QM_SCSS_CCU ((qm_scss_ccu_reg_t *)QM_SCSS_CCU_BASE)
#endif
/* The GPS0 register usage. */
#define QM_GPS0_BIT_FM (0) /**< Start Firmware Manager. */
/* Hybrid oscillator output select select (0=Silicon, 1=Crystal) */
#define QM_OSC0_MODE_SEL BIT(3)
#define QM_OSC0_PD BIT(2)
@ -287,13 +291,28 @@ typedef struct {
aonpt_cfg; /**< Always-on periodic timer configuration register. */
} qm_aonc_reg_t;
#define qm_aonc_context_t uint8_t
#define HAS_AONPT_BUSY_BIT (0)
#define QM_AONC_ENABLE (BIT(0))
#define QM_AONC_DISABLE (~QM_AONC_ENABLE)
#define QM_AONPT_INTERRUPT (BIT(0))
#define QM_AONPT_CLR (BIT(0))
#define QM_AONPT_RST (BIT(1))
#if (UNIT_TEST)
qm_aonc_reg_t test_aonc;
#define QM_AONC ((qm_aonc_reg_t *)(&test_aonc))
qm_aonc_reg_t test_aonc_instance[QM_AONC_NUM];
qm_aonc_reg_t *test_aonc[QM_AONC_NUM];
#define QM_AONC test_aonc
#else
#define QM_AONC_BASE (0xB0800700)
#define QM_AONC ((qm_aonc_reg_t *)QM_AONC_BASE)
extern qm_aonc_reg_t *qm_aonc[QM_AONC_NUM];
#define QM_AONC_0_BASE (0xB0800700)
#define QM_AONC qm_aonc
#endif
/** @} */
@ -412,15 +431,19 @@ typedef struct {
timer_loadcount2[QM_PWM_ID_NUM]; /**< Timer Load Count 2 */
} qm_pwm_reg_t;
#define qm_pwm_context_t uint8_t
#if (UNIT_TEST)
qm_pwm_reg_t test_pwm_t;
#define QM_PWM ((qm_pwm_reg_t *)(&test_pwm_t))
qm_pwm_reg_t test_pwm_instance[QM_PWM_NUM];
qm_pwm_reg_t *test_pwm[QM_PWM_NUM];
#define QM_PWM test_pwm
#else
extern qm_pwm_reg_t *qm_pwm[QM_PWM_NUM];
/* PWM register base address. */
#define QM_PWM_BASE (0xB0000800)
/* PWM register block. */
#define QM_PWM ((qm_pwm_reg_t *)QM_PWM_BASE)
#define QM_PWM qm_pwm
#endif
#define PWM_START (1)
@ -430,6 +453,8 @@ qm_pwm_reg_t test_pwm_t;
#define QM_PWM_INTERRUPT_MASK_OFFSET (0x2)
#define NUM_PWM_CONTROLLER_INTERRUPTS (1)
/**
* Timer N Control (TimerNControlReg)
*
@ -484,16 +509,20 @@ typedef struct {
QM_RW uint32_t wdt_comp_type; /**< Component Type Register. */
} qm_wdt_reg_t;
#define qm_wdt_context_t uint8_t
#if (UNIT_TEST)
qm_wdt_reg_t test_wdt;
#define QM_WDT ((qm_wdt_reg_t *)(&test_wdt))
qm_wdt_reg_t test_wdt_instance[QM_WDT_NUM];
qm_wdt_reg_t *test_wdt[QM_WDT_NUM];
#define QM_WDT test_wdt
#else
extern qm_wdt_reg_t *qm_wdt[QM_WDT_NUM];
/* WDT register base address. */
#define QM_WDT_BASE (0xB0000000)
#define QM_WDT_0_BASE (0xB0000000)
/* WDT register block. */
#define QM_WDT ((qm_wdt_reg_t *)QM_WDT_BASE)
#define QM_WDT qm_wdt
#endif
/* Watchdog enable. */
@ -504,6 +533,18 @@ qm_wdt_reg_t test_wdt;
#define QM_WDT_CR_RMOD_OFFSET (1)
/* Watchdog Timeout Mask. */
#define QM_WDT_TORR_TOP_MASK (0xF)
/* Watchdog reload special value. */
#define QM_WDT_RELOAD_VALUE (0x76)
/* Number of WDT controllers. */
#define NUM_WDT_CONTROLLERS (1)
/* Watchdog does not have pause enable. */
#define HAS_WDT_PAUSE (0)
/* Software SoC watch required. */
#define HAS_SW_SOCWATCH (1)
/* Peripheral WDT clock enable mask. */
#define QM_WDT_CLOCK_EN_MASK (BIT(10))
/* Required to enable WDT clock on start. */
#define HAS_WDT_CLOCK_ENABLE (1)
/**
* WDT timeout table (in clock cycles):
@ -666,6 +707,8 @@ typedef struct {
QM_RW uint32_t padding[204]; /* 0x400 - 0xD0 */
} qm_uart_reg_t;
#define qm_uart_context_t uint8_t
#if (UNIT_TEST)
qm_uart_reg_t test_uart_instance;
qm_uart_reg_t *test_uart[QM_UART_NUM];
@ -687,8 +730,8 @@ extern qm_uart_reg_t *qm_uart[QM_UART_NUM];
* @{
*/
/** Number of SPI controllers (only master driver available). */
typedef enum { QM_SPI_MST_0 = 0, QM_SPI_NUM } qm_spi_t;
/** Number of SPI controllers. */
typedef enum { QM_SPI_MST_0 = 0, QM_SPI_SLV_0, QM_SPI_NUM } qm_spi_t;
/** SPI register map. */
typedef struct {
@ -724,6 +767,8 @@ typedef struct {
QM_RW uint32_t padding[0x1C4]; /* (0x800 - 0xF0) / 4 */
} qm_spi_reg_t;
#define qm_spi_context_t uint8_t
#if (UNIT_TEST)
qm_spi_reg_t test_spi;
qm_spi_reg_t *test_spi_controllers[QM_SPI_NUM];
@ -749,6 +794,7 @@ extern qm_spi_reg_t *qm_spi_controllers[QM_SPI_NUM];
#define QM_SPI_CTRLR0_TMOD_OFFSET (8)
#define QM_SPI_CTRLR0_SCPOL_SCPH_OFFSET (6)
#define QM_SPI_CTRLR0_FRF_OFFSET (4)
#define QM_SPI_CTRLR0_SLV_OE BIT(10)
/* SPI SSI Enable register */
#define QM_SPI_SSIENR_SSIENR BIT(0)
@ -808,20 +854,25 @@ typedef struct {
QM_RW uint32_t rtc_comp_version; /**< End of Interrupt Register. */
} qm_rtc_reg_t;
#define qm_rtc_context_t uint8_t
#define QM_RTC_CCR_INTERRUPT_ENABLE BIT(0)
#define QM_RTC_CCR_INTERRUPT_MASK BIT(1)
#define QM_RTC_CCR_ENABLE BIT(2)
#if (UNIT_TEST)
qm_rtc_reg_t test_rtc;
#define QM_RTC ((qm_rtc_reg_t *)(&test_rtc))
qm_rtc_reg_t test_rtc_instance[QM_RTC_NUM];
qm_rtc_reg_t *test_rtc[QM_RTC_NUM];
#define QM_RTC test_rtc
#else
extern qm_rtc_reg_t *qm_rtc[QM_RTC_NUM];
/* RTC register base address. */
#define QM_RTC_BASE (0xB0000400)
/* RTC register block. */
#define QM_RTC ((qm_rtc_reg_t *)QM_RTC_BASE)
#define QM_RTC qm_rtc
#endif
/** @} */
@ -892,6 +943,8 @@ typedef struct {
QM_RW uint32_t ic_comp_type; /**< Component Type. */
} qm_i2c_reg_t;
#define qm_i2c_context_t uint8_t
#if (UNIT_TEST)
qm_i2c_reg_t test_i2c_instance[QM_I2C_NUM];
qm_i2c_reg_t *test_i2c[QM_I2C_NUM];
@ -967,6 +1020,7 @@ extern qm_i2c_reg_t *qm_i2c[QM_I2C_NUM];
#define QM_I2C_IC_LCNT_MIN (8)
#define QM_I2C_IC_HCNT_MAX (65525)
#define QM_I2C_IC_HCNT_MIN (6)
#define QM_I2C_IC_TAR_MASK (0x3FF)
#define QM_I2C_FIFO_SIZE (16)
@ -1007,6 +1061,8 @@ typedef struct {
QM_RW uint32_t gpio_config_reg1; /**< GPIO Configuration Register 1. */
} qm_gpio_reg_t;
#define qm_gpio_context_t uint8_t
#define QM_NUM_GPIO_PINS (25)
#if (UNIT_TEST)
@ -1103,6 +1159,9 @@ qm_adc_reg_t test_adc;
* @{
*/
#define NUM_FLASH_CONTROLLERS (1)
#define HAS_FLASH_WRITE_DISABLE (1)
/** Number of Flash controllers. */
typedef enum { QM_FLASH_0 = 0, QM_FLASH_NUM } qm_flash_t;
@ -1122,6 +1181,8 @@ typedef struct {
QM_RW uint32_t mpr_vdata; /**< MPR Violation Data Value Register */
} qm_flash_reg_t;
#define qm_flash_context_t uint8_t
#define QM_FLASH_REGION_DATA_0_SIZE (0x1000)
#define QM_FLASH_REGION_DATA_0_PAGES (0x02)
@ -1194,6 +1255,11 @@ extern qm_flash_reg_t *qm_flash[QM_FLASH_NUM];
/* Flash perform mass erase. */
#define MASS_ERASE BIT(7)
/* ROM read disable for upper 4k. */
#define ROM_RD_DIS_U BIT(3)
/* ROM read disable for lower 4k. */
#define ROM_RD_DIS_L BIT(2)
#define QM_FLASH_ADDRESS_MASK (0x7FF)
/* Increment by 4 bytes each time, but there is an offset of 2, so 0x10. */
#define QM_FLASH_ADDR_INC (0x10)
@ -1204,6 +1270,8 @@ extern qm_flash_reg_t *qm_flash[QM_FLASH_NUM];
#define QM_FLASH_PAGE_SIZE_BYTES (0x800)
/* Flash page size in bits. */
#define QM_FLASH_PAGE_SIZE_BITS (11)
/* OTP ROM_PROG bit. */
#define QM_FLASH_STTS_ROM_PROG (BIT(2))
/** @} */
@ -1223,6 +1291,11 @@ typedef enum {
QM_FPR_NUM
} qm_fpr_id_t;
#define qm_fpr_context_t uint8_t
/* The addressing granularity of FPRs. */
#define QM_FPR_GRANULARITY (1024)
/** @} */
/**
@ -1246,6 +1319,11 @@ typedef struct {
QM_RW uint32_t mpr_vsts; /**< MPR_VSTS */
} qm_mpr_reg_t;
#define qm_mpr_context_t uint8_t
/* The addressing granularity of MPRs. */
#define QM_MPR_GRANULARITY (1024)
#if (UNIT_TEST)
qm_mpr_reg_t test_mpr;
@ -1288,6 +1366,8 @@ typedef struct {
QM_RW pic_timer_reg_pad_t timer_ccr; /**< Current Count Register */
} qm_pic_timer_reg_t;
#define qm_pic_timer_context_t uint8_t
#if (UNIT_TEST)
qm_pic_timer_reg_t test_pic_timer;
#define QM_PIC_TIMER ((qm_pic_timer_reg_t *)(&test_pic_timer))
@ -1371,6 +1451,8 @@ typedef struct {
QM_RW mvic_reg_pad_t ccr; /**< Timer current count. */
} qm_mvic_reg_t;
#define qm_irq_context_t uint8_t
#define QM_MVIC_REG_VER (0x01) /* MVIC version. */
#define QM_MVIC_REG_REDTBL (0x10) /* Redirection table base. */
@ -1468,6 +1550,8 @@ typedef struct {
QM_RW uint32_t dst_sg_high; /**< DSR */
} qm_dma_chan_reg_t;
#define qm_dma_context_t uint8_t
/* DMA channel control register offsets and masks. */
#define QM_DMA_CTL_L_INT_EN_MASK BIT(0)
#define QM_DMA_CTL_L_DST_TR_WIDTH_OFFSET (1)
@ -1513,6 +1597,9 @@ typedef struct {
#define QM_DMA_CFG_H_DEST_PER_OFFSET (11)
#define QM_DMA_CFG_H_DEST_PER_MASK (0xf << QM_DMA_CFG_H_DEST_PER_OFFSET)
#define QM_DMA_ENABLE_CLOCK(dma) \
(QM_SCSS_CCU->ccu_mlayer_ahb_ctl |= QM_CCU_DMA_CLK_EN)
/** DMA interrupt register map. */
typedef struct {
QM_RW uint32_t raw_tfr_low; /**< RawTfr */
@ -1609,6 +1696,8 @@ extern qm_dma_reg_t *qm_dma[QM_DMA_NUM];
/* Refer to "HARDWARE_ISSUES.rst" for fix description. */
#define FIX_1 (1)
#define FIX_2 (0)
#define FIX_3 (1)
/** @} */
@ -1621,7 +1710,10 @@ extern qm_dma_reg_t *qm_dma[QM_DMA_NUM];
uint32_t test_rom_version;
#define ROM_VERSION_ADDRESS &test_rom_version;
#else
#define ROM_VERSION_ADDRESS (0x1FFC);
#define ROM_VERSION_ADDRESS \
(BL_DATA_FLASH_REGION_BASE + \
(BL_DATA_SECTION_BASE_PAGE * QM_FLASH_PAGE_SIZE_BYTES) + \
sizeof(qm_flash_data_trim_t))
#endif
/** @} */

View file

@ -241,8 +241,7 @@ int clk_sys_set_mode(const clk_sys_mode_t mode, const clk_sys_div_t div)
apply_flash_timings(sys_ticks_per_us);
/* Log any clock changes. */
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER, SOCW_REG_OSC0_CFG1);
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER, SOCW_REG_CCU_SYS_CLK_CTL);
SOC_WATCH_LOG_EVENT(SOCW_EVENT_FREQ, 0);
return 0;
}
@ -339,8 +338,10 @@ int clk_periph_enable(const clk_periph_t clocks)
QM_SCSS_CCU->ccu_periph_clk_gate_ctl |= clocks;
#if (HAS_SW_SOCWATCH)
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER,
SOCW_REG_CCU_PERIPH_CLK_GATE_CTL);
#endif /* HAS_SW_SOCWATCH */
return 0;
}
@ -351,9 +352,10 @@ int clk_periph_disable(const clk_periph_t clocks)
QM_SCSS_CCU->ccu_periph_clk_gate_ctl &= ~clocks;
#if (HAS_SW_SOCWATCH)
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER,
SOCW_REG_CCU_PERIPH_CLK_GATE_CTL);
#endif /* HAS_SW_SOCWATCH */
return 0;
}
@ -429,3 +431,30 @@ int clk_sys_usb_disable(void)
return 0;
}
int clk_dma_enable(void)
{
QM_SCSS_CCU->ccu_mlayer_ahb_ctl |= QM_CCU_DMA_CLK_EN;
return 0;
}
int clk_dma_disable(void)
{
QM_SCSS_CCU->ccu_mlayer_ahb_ctl &= ~QM_CCU_DMA_CLK_EN;
return 0;
}
/**
* Get I2C clock frequency in MHz.
*
* @return [uint32_t] I2C freq_in_mhz.
*/
uint32_t get_i2c_clk_freq_in_mhz(void)
{
return clk_sys_get_ticks_per_us() >>
((QM_SCSS_CCU->ccu_periph_clk_div_ctl0 &
CLK_PERIPH_DIV_DEF_MASK) >>
QM_CCU_PERIPH_PCLK_DIV_OFFSET);
}

View file

@ -35,7 +35,7 @@
#endif
#include "soc_watch.h"
void power_soc_sleep()
void qm_power_soc_sleep()
{
/* Go to sleep */
QM_SCSS_PMU->slp_cfg &= ~QM_SCSS_SLP_CFG_LPMODE_EN;
@ -45,7 +45,7 @@ void power_soc_sleep()
QM_SCSS_PMU->pm1c |= QM_SCSS_PM1C_SLPEN;
}
void power_soc_deep_sleep()
void qm_power_soc_deep_sleep()
{
/* Switch to linear regulators.
* For low power deep sleep mode, it is a requirement that the platform
@ -74,7 +74,7 @@ extern uint32_t *__x86_restore_info;
* qm_x86_restore_context() after wake up.
*/
uint32_t sp_restore_storage;
void power_soc_sleep_restore()
void qm_power_soc_sleep_restore()
{
/*
* Save x86 restore trap address.
@ -88,10 +88,10 @@ void power_soc_sleep_restore()
qm_x86_save_context(sp_restore_storage);
/* Set restore flags. */
power_soc_set_x86_restore_flag();
qm_power_soc_set_x86_restore_flag();
/* Enter sleep. */
power_soc_sleep();
qm_power_soc_sleep();
/*
* Restore x86 execution context.
@ -102,7 +102,7 @@ void power_soc_sleep_restore()
qm_x86_restore_context(sleep_restore_trap, sp_restore_storage);
}
void power_soc_deep_sleep_restore()
void qm_power_soc_deep_sleep_restore()
{
/*
* Save x86 restore trap address.
@ -116,10 +116,10 @@ void power_soc_deep_sleep_restore()
qm_x86_save_context(sp_restore_storage);
/* Set restore flags. */
power_soc_set_x86_restore_flag();
qm_power_soc_set_x86_restore_flag();
/* Enter sleep. */
power_soc_deep_sleep();
qm_power_soc_deep_sleep();
/*
* Restore x86 execution context.
@ -130,7 +130,7 @@ void power_soc_deep_sleep_restore()
qm_x86_restore_context(deep_sleep_restore_trap, sp_restore_storage);
}
void power_sleep_wait()
void qm_power_sleep_wait()
{
/*
* Save x86 restore trap address.
@ -144,11 +144,11 @@ void power_sleep_wait()
qm_x86_save_context(sp_restore_storage);
/* Set restore flags. */
power_soc_set_x86_restore_flag();
qm_power_soc_set_x86_restore_flag();
/* Enter C2 and stay in it until sleep and wake-up. */
while (1) {
power_cpu_c2();
qm_power_cpu_c2();
}
/*
@ -160,14 +160,14 @@ void power_sleep_wait()
qm_x86_restore_context(sleep_restore_trap, sp_restore_storage);
}
void power_soc_set_x86_restore_flag(void)
void qm_power_soc_set_x86_restore_flag(void)
{
QM_SCSS_GP->gps0 |= BIT(QM_GPS0_BIT_X86_WAKEUP);
}
#endif /* ENABLE_RESTORE_CONTEXT */
#if (!QM_SENSOR)
void power_cpu_c1()
void qm_power_cpu_c1()
{
SOC_WATCH_LOG_EVENT(SOCW_EVENT_HALT, 0);
/*
@ -182,7 +182,7 @@ void power_cpu_c1()
"hlt\n\t");
}
void power_cpu_c2()
void qm_power_cpu_c2()
{
QM_SCSS_CCU->ccu_lp_clk_ctl &= ~QM_SCSS_CCU_C2_LP_EN;
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER, SOCW_REG_CCU_LP_CLK_CTL);
@ -192,7 +192,7 @@ void power_cpu_c2()
QM_SCSS_PMU->p_lvl2;
}
void power_cpu_c2lp()
void qm_power_cpu_c2lp()
{
QM_SCSS_CCU->ccu_lp_clk_ctl |= QM_SCSS_CCU_C2_LP_EN;
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER, SOCW_REG_CCU_LP_CLK_CTL);

View file

@ -0,0 +1,122 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_common.h"
#include "qm_soc_regs.h"
#include "qm_interrupt_router.h"
#include "qm_interrupt_router_regs.h"
/* Event router base addr for LMT interrupt routing, for linear IRQ mapping */
#define INTERRUPT_ROUTER_LMT_INT_MASK_BASE \
(&QM_INTERRUPT_ROUTER->i2c_master_0_int_mask)
void _qm_ir_unmask_int(uint32_t irq, uint32_t register_offset)
{
uint32_t *interrupt_router_intmask;
/* Route peripheral interrupt to Lakemont/Sensor Subsystem */
interrupt_router_intmask =
(uint32_t *)INTERRUPT_ROUTER_LMT_INT_MASK_BASE + register_offset;
if (!QM_IR_INT_LOCK_MASK(*interrupt_router_intmask)) {
switch (irq) {
case QM_IRQ_COMPARATOR_0_INT:
/*
* Comparator mask uses 1 bit per comparator rather than the
* generic host mask.
*/
#if (QM_SENSOR)
QM_INTERRUPT_ROUTER->comparator_0_ss_int_mask &=
~0x0007ffff;
#else
QM_INTERRUPT_ROUTER->comparator_0_host_int_mask &=
~0x0007ffff;
#endif
break;
case QM_IRQ_MAILBOX_0_INT:
/* Masking MAILBOX irq is done inside mbox driver */
break;
case QM_IRQ_DMA_0_ERROR_INT:
/*
* DMA error mask uses 1 bit per DMA channel rather than the
* generic host mask.
*/
#if (QM_SENSOR)
*interrupt_router_intmask &= ~QM_IR_DMA_ERROR_SS_MASK;
#else
*interrupt_router_intmask &= ~QM_IR_DMA_ERROR_HOST_MASK;
#endif
break;
default:
QM_IR_UNMASK_INTERRUPTS(*interrupt_router_intmask);
break;
}
}
}
void _qm_ir_mask_int(uint32_t irq, uint32_t register_offset)
{
uint32_t *interrupt_router_intmask;
/* Route peripheral interrupt to Lakemont/Sensor Subsystem */
interrupt_router_intmask =
(uint32_t *)INTERRUPT_ROUTER_LMT_INT_MASK_BASE + register_offset;
/**/
if (!QM_IR_INT_LOCK_MASK(*interrupt_router_intmask)) {
switch (irq) {
case QM_IRQ_COMPARATOR_0_INT:
#if (QM_SENSOR)
QM_INTERRUPT_ROUTER->comparator_0_ss_int_mask |=
0x0007ffff;
#else
QM_INTERRUPT_ROUTER->comparator_0_host_int_mask |=
0x0007ffff;
#endif
break;
case QM_IRQ_MAILBOX_0_INT:
/* Masking MAILBOX irq id done inside mbox driver */
break;
case QM_IRQ_DMA_0_ERROR_INT:
/*
* DMA error mask uses 1 bit per DMA channel rather than the
* generic host mask.
*/
#if (QM_SENSOR)
*interrupt_router_intmask |= QM_IR_DMA_ERROR_SS_MASK;
#else
*interrupt_router_intmask |= QM_IR_DMA_ERROR_HOST_MASK;
#endif
break;
default:
QM_IR_MASK_INTERRUPTS(*interrupt_router_intmask);
break;
}
}
}

View file

@ -31,6 +31,8 @@
#include "ss_power_states.h"
#include "qm_isr.h"
#include "qm_sensor_regs.h"
#include "soc_watch.h"
#include "qm_common.h"
/* Sensor Subsystem sleep operand definition.
* Only a subset applies as internal sensor RTC
@ -51,11 +53,17 @@
* - [4] : Interrupt enable
* - [3:0] : Interrupt threshold value
*/
#define QM_SS_SLEEP_MODE_CORE_OFF (0x0)
#define QM_SS_SLEEP_MODE_CORE_OFF_TIMER_OFF (0x20)
#define QM_SS_SLEEP_MODE_CORE_TIMERS_RTC_OFF (0x60)
void ss_power_soc_lpss_enable()
#define SLEEP_INT_EN BIT(4)
#define SLEEP_TIMER_ON (0x0)
#define SLEEP_TIMER_OFF (0x20)
#define SLEEP_TIMER_RTC_OFF (0x60)
#define SS_STATE_1_TIMER_ON (SLEEP_TIMER_ON | SLEEP_INT_EN)
#define SS_STATE_1_TIMER_OFF (SLEEP_TIMER_OFF | SLEEP_INT_EN)
#define SS_STATE_2 (SLEEP_TIMER_RTC_OFF | SLEEP_INT_EN)
void qm_ss_power_soc_lpss_enable()
{
uint32_t creg_mst0_ctrl = 0;
@ -78,7 +86,7 @@ void ss_power_soc_lpss_enable()
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER, SOCW_REG_CCU_LP_CLK_CTL);
}
void ss_power_soc_lpss_disable()
void qm_ss_power_soc_lpss_disable()
{
uint32_t creg_mst0_ctrl = 0;
@ -105,77 +113,57 @@ void ss_power_soc_lpss_disable()
* SLEEP + sleep operand
* __builtin_arc_sleep is not used here as it does not propagate sleep operand.
*/
void ss_power_cpu_ss1(const ss_power_cpu_ss1_mode_t mode)
void qm_ss_power_cpu_ss1(const qm_ss_power_cpu_ss1_mode_t mode)
{
/* The sensor cannot be woken up with an edge triggered
* interrupt from the RTC and the AON Counter.
* Switch to Level triggered interrupts and restore
* the setting when waking up.
*/
__builtin_arc_sr(QM_IRQ_RTC_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
uint32_t priority;
__builtin_arc_sr(QM_IRQ_AONPT_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
priority =
(__builtin_arc_lr(QM_SS_AUX_STATUS32) & QM_SS_STATUS32_E_MASK) >> 1;
SOC_WATCH_LOG_EVENT(SOCW_ARC_EVENT_SS1, 0);
/* Enter SS1 */
switch (mode) {
case SS_POWER_CPU_SS1_TIMER_OFF:
__asm__ __volatile__(
"sleep %0"
:
: "i"(QM_SS_SLEEP_MODE_CORE_OFF_TIMER_OFF));
case QM_SS_POWER_CPU_SS1_TIMER_OFF:
__asm__ __volatile__("sleep %0"
:
: "r"(SS_STATE_1_TIMER_OFF | priority)
: "memory", "cc");
break;
case SS_POWER_CPU_SS1_TIMER_ON:
case QM_SS_POWER_CPU_SS1_TIMER_ON:
default:
__asm__ __volatile__("sleep %0"
:
: "i"(QM_SS_SLEEP_MODE_CORE_OFF));
: "r"(SS_STATE_1_TIMER_ON | priority)
: "memory", "cc");
break;
}
/* Restore the RTC and AONC to edge interrupt after when waking up. */
__builtin_arc_sr(QM_IRQ_RTC_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
__builtin_arc_sr(QM_IRQ_AONPT_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
}
/* Enter SS2 :
* SLEEP + sleep operand
* __builtin_arc_sleep is not used here as it does not propagate sleep operand.
*/
void ss_power_cpu_ss2(void)
void qm_ss_power_cpu_ss2(void)
{
/* The sensor cannot be woken up with an edge triggered
* interrupt from the RTC and the AON Counter.
* Switch to Level triggered interrupts and restore
* the setting when waking up.
*/
__builtin_arc_sr(QM_IRQ_RTC_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
uint32_t priority;
__builtin_arc_sr(QM_IRQ_AONPT_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
priority =
(__builtin_arc_lr(QM_SS_AUX_STATUS32) & QM_SS_STATUS32_E_MASK) >> 1;
SOC_WATCH_LOG_EVENT(SOCW_ARC_EVENT_SS2, 0);
/* Enter SS2 */
__asm__ __volatile__("sleep %0"
:
: "i"(QM_SS_SLEEP_MODE_CORE_TIMERS_RTC_OFF));
/* Restore the RTC and AONC to edge interrupt after when waking up. */
__builtin_arc_sr(QM_IRQ_RTC_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
__builtin_arc_sr(QM_IRQ_AONPT_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
: "r"(SS_STATE_2 | priority)
: "memory", "cc");
}
#if (ENABLE_RESTORE_CONTEXT)
extern uint32_t arc_restore_addr;
uint32_t cpu_context[33];
void ss_power_soc_sleep_restore(void)
void qm_ss_power_soc_sleep_restore(void)
{
/*
* Save sensor restore trap address.
@ -189,10 +177,10 @@ void ss_power_soc_sleep_restore(void)
qm_ss_save_context(cpu_context);
/* Set restore flags. */
power_soc_set_ss_restore_flag();
qm_power_soc_set_ss_restore_flag();
/* Enter sleep. */
power_soc_sleep();
qm_power_soc_sleep();
/*
* Restore sensor execution context.
@ -202,7 +190,7 @@ void ss_power_soc_sleep_restore(void)
*/
qm_ss_restore_context(sleep_restore_trap, cpu_context);
}
void ss_power_soc_deep_sleep_restore(void)
void qm_ss_power_soc_deep_sleep_restore(void)
{
/*
* Save sensor restore trap address.
@ -216,10 +204,10 @@ void ss_power_soc_deep_sleep_restore(void)
qm_ss_save_context(cpu_context);
/* Set restore flags. */
power_soc_set_ss_restore_flag();
qm_power_soc_set_ss_restore_flag();
/* Enter sleep. */
power_soc_deep_sleep();
qm_power_soc_deep_sleep();
/*
* Restore sensor execution context.
@ -230,7 +218,7 @@ void ss_power_soc_deep_sleep_restore(void)
qm_ss_restore_context(deep_sleep_restore_trap, cpu_context);
}
void ss_power_sleep_wait(void)
void qm_ss_power_sleep_wait(void)
{
/*
* Save sensor restore trap address.
@ -244,11 +232,11 @@ void ss_power_sleep_wait(void)
qm_ss_save_context(cpu_context);
/* Set restore flags. */
power_soc_set_ss_restore_flag();
qm_power_soc_set_ss_restore_flag();
/* Enter SS1 and stay in it until sleep and wake-up. */
while (1) {
ss_power_cpu_ss1(SS_POWER_CPU_SS1_TIMER_ON);
qm_ss_power_cpu_ss1(QM_SS_POWER_CPU_SS1_TIMER_ON);
}
/*
@ -260,7 +248,7 @@ void ss_power_sleep_wait(void)
qm_ss_restore_context(sleep_restore_trap, cpu_context);
}
void power_soc_set_ss_restore_flag(void)
void qm_power_soc_set_ss_restore_flag(void)
{
QM_SCSS_GP->gps0 |= BIT(QM_GPS0_BIT_SENSOR_WAKEUP);
}

View file

@ -315,6 +315,37 @@ int clk_sys_usb_enable(void);
*/
int clk_sys_usb_disable(void);
/**
* Enable the DMA clock.
*
* Enable the DMA clock by setting the corresponding bit in the AHB Control
* register. By default the DMA clock is disabled.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_dma_enable(void);
/**
* Disable the DMA clock.
*
* Disable the DMA clock by clearing the corresponding bit in the AHB Control
* register.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_dma_disable(void);
/**
* Get I2C clock frequency in MHz.
*
* @return [uint32_t] I2C freq_in_mhz.
*/
uint32_t get_i2c_clk_freq_in_mhz(void);
/**
* @}
*/

View file

@ -41,7 +41,7 @@
* - Sleep
*
* LPSS can only be enabled from the Sensor core,
* refer to @ref ss_power_soc_lpss_enable for further details.
* refer to @ref qm_ss_power_soc_lpss_enable for further details.
*
* @defgroup groupSoCPower Quark SE SoC Power states
* @{
@ -63,7 +63,7 @@
* - AON Timer Interrupt
* - RTC Interrupt
*/
void power_soc_sleep(void);
void qm_power_soc_sleep(void);
/**
* Enter SoC deep sleep state.
@ -83,7 +83,7 @@ void power_soc_sleep(void);
*
* This function puts 1P8V regulators and 3P3V into Linear Mode.
*/
void power_soc_deep_sleep(void);
void qm_power_soc_deep_sleep(void);
#if (ENABLE_RESTORE_CONTEXT) && (!QM_SENSOR)
/**
@ -99,9 +99,9 @@ void power_soc_deep_sleep(void);
*
* This function calls qm_x86_save_context and qm_x86_restore_context
* in order to restore execution where it stopped.
* All power management transitions are done by power_soc_sleep().
* All power management transitions are done by qm_power_soc_sleep().
*/
void power_soc_sleep_restore(void);
void qm_power_soc_sleep_restore(void);
/**
* Enter SoC deep sleep state and restore after wake up.
@ -116,25 +116,25 @@ void power_soc_sleep_restore(void);
*
* This function calls qm_x86_save_context and qm_x86_restore_context
* in order to restore execution where it stopped.
* All power management transitions are done by power_soc_deep_sleep().
* All power management transitions are done by qm_power_soc_deep_sleep().
*/
void power_soc_deep_sleep_restore(void);
void qm_power_soc_deep_sleep_restore(void);
/**
* Save context, enter x86 C2 power save state and restore after wake up.
*
* This routine is same as power_soc_sleep_restore(), just instead of
* This routine is same as qm_power_soc_sleep_restore(), just instead of
* going to sleep it will go to C2 power save state.
* Note: this function has a while(1) which will spin until we enter
* (and exit) sleep while the power state change will be managed by
* the other core.
*/
void power_sleep_wait(void);
void qm_power_sleep_wait(void);
/**
* Enable the x86 startup restore flag, see GPS0 #define in qm_soc_regs.h
*/
void power_soc_set_x86_restore_flag(void);
void qm_power_soc_set_x86_restore_flag(void);
#endif /* ENABLE_RESTORE_CONTEXT */
@ -164,7 +164,7 @@ void power_soc_set_x86_restore_flag(void);
* A wake event causes the Host to transition to C0.<BR>
* A wake event is a host interrupt.
*/
void power_cpu_c1(void);
void qm_power_cpu_c1(void);
/**
* Enter Host C2 state or SoC LPSS state.
@ -185,7 +185,7 @@ void power_cpu_c1(void);
* - LPSS wake events applies.
* - If the Sensor Subsystem wakes the SoC from LPSS, Host is back in C2.
*/
void power_cpu_c2(void);
void qm_power_cpu_c2(void);
/**
* Enter Host C2LP state or SoC LPSS state.
@ -207,7 +207,7 @@ void power_cpu_c2(void);
* - If the Sensor Subsystem wakes the SoC from LPSS,
* Host transitions back to C2LP.
*/
void power_cpu_c2lp(void);
void qm_power_cpu_c2lp(void);
#endif
#if (ENABLE_RESTORE_CONTEXT) && (!QM_SENSOR) && (!UNIT_TEST)
@ -240,12 +240,24 @@ void power_cpu_c2lp(void);
"lea %[stackpointer], %%eax\n\t" \
"pushfl\n\t" \
"pushal\n\t" \
"movl %%dr0, %%edx\n\t" \
"pushl %%edx\n\t" \
"movl %%dr1, %%edx\n\t" \
"pushl %%edx\n\t" \
"movl %%dr2, %%edx\n\t" \
"pushl %%edx\n\t" \
"movl %%dr3, %%edx\n\t" \
"pushl %%edx\n\t" \
"movl %%dr6, %%edx\n\t" \
"pushl %%edx\n\t" \
"movl %%dr7, %%edx\n\t" \
"pushl %%edx\n\t" \
"movl %%esp, (%%eax)\n\t" \
: /* Output operands. */ \
: /* Input operands. */ \
[stackpointer] "m"(stack_pointer) \
: /* Clobbered registers list. */ \
"eax")
"eax", "edx")
/* Restore trap. This routine recovers the stack pointer into esp and retrieves
* 'idtr', EFLAGS and general purpose registers from stack.
@ -257,6 +269,18 @@ void power_cpu_c2lp(void);
__asm__ __volatile__(#_restore_label ":\n\t" \
"lea %[stackpointer], %%eax\n\t" \
"movl (%%eax), %%esp\n\t" \
"popl %%edx\n\t" \
"movl %%edx, %%dr7\n\t" \
"popl %%edx\n\t" \
"movl %%edx, %%dr6\n\t" \
"popl %%edx\n\t" \
"movl %%edx, %%dr3\n\t" \
"popl %%edx\n\t" \
"movl %%edx, %%dr2\n\t" \
"popl %%edx\n\t" \
"movl %%edx, %%dr1\n\t" \
"popl %%edx\n\t" \
"movl %%edx, %%dr0\n\t" \
"popal\n\t" \
"popfl\n\t" \
"lidt (%%esp)\n\t" \
@ -265,7 +289,8 @@ void power_cpu_c2lp(void);
: /* Input operands. */ \
[stackpointer] "m"(stack_pointer) \
: /* Clobbered registers list. */ \
"eax")
"eax", "edx")
#else
#define qm_x86_set_resume_vector(_restore_label, shared_mem)
#define qm_x86_save_context(stack_pointer)

View file

@ -0,0 +1,71 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_INTERRUPT_ROUTER_H__
#define __QM_INTERRUPT_ROUTER_H__
/**
* Quark SE SoC Interrupt Router registers.
*
* @defgroup groupQUARKSEINTERRUPTROUTER SoC Interrupt Router (SE)
* @{
*/
void _qm_ir_mask_int(uint32_t irq, uint32_t register_offset);
void _qm_ir_unmask_int(uint32_t irq, uint32_t register_offset);
/*
* Unmask a given IRQ in the Interrupt Router.
*
* @note Not for use with sensor sub-system peripheral IRQs.
* For sensor peripherals use the macros defined in qm_interrupt_router_regs.h.
*
* @param[in] irq IRQ number. Must be of type QM_IRQ_XXX.
*/
#define QM_IR_UNMASK_INT(irq) \
do { \
_qm_ir_unmask_int(irq, irq##_MASK_OFFSET); \
} while (0);
/*
* Mask a given IRQ in the Interrupt Router.
*
* @note Not for use with sensor sub-system peripheral IRQs.
* For sensor peripherals use the macros defined in qm_interrupt_router_regs.h.
*
* @param[in] irq IRQ number. Must be of type QM_IRQ_XXX.
*/
#define QM_IR_MASK_INT(irq) \
do { \
_qm_ir_mask_int(irq, irq##_MASK_OFFSET); \
} while (0);
/** @} */
#endif /* __QM_INTERRUPT_ROUTER_H__ */

View file

@ -31,25 +31,38 @@
#define __QM_INTERRUPT_ROUTER_REGS_H__
/**
* Quark SE SoC Event Router registers.
* Quark SE SoC Interrupt Router registers.
*
* @defgroup groupQUARKSESEEVENTROUTER SoC Event Router (SE)
* @defgroup groupQUARKSEINTERRUPTROUTER SoC Interrupt Router (SE)
* @{
*/
/**
* Masks for single source interrupts in the Event Router.
* Masks for single source interrupts in the Interrupt Router.
* To enable: reg &= ~(MASK)
* To disable: reg |= MASK;
*/
#define QM_IR_INT_LMT_MASK BIT(0)
#define QM_IR_INT_SS_MASK BIT(8)
/* Masks for single source halts in the Event Router. */
/* Masks for single source halts in the Interrupt Router. */
#define QM_IR_INT_LMT_HALT_MASK BIT(16)
#define QM_IR_INT_SS_HALT_MASK BIT(24)
/* Event Router Unmask interrupts for a peripheral. */
/**
* Interrupt Router macros to determine if the specified peripheral interrupt
* mask has been locked.
*/
#define QM_IR_SS_INT_LOCK_HALT_MASK(_peripheral_) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(3))
#define QM_IR_LMT_INT_LOCK_HALT_MASK(_peripheral_) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(2))
#define QM_IR_SS_INT_LOCK_MASK(_peripheral_) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(1))
#define QM_IR_LMT_INT_LOCK_MASK(_peripheral_) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(0))
/* Interrupt Router Unmask interrupts for a peripheral. */
#define QM_IR_UNMASK_LMT_INTERRUPTS(_peripheral_) \
(_peripheral_ &= ~(QM_IR_INT_LMT_MASK))
#define QM_IR_UNMASK_SS_INTERRUPTS(_peripheral_) \
@ -81,6 +94,103 @@
#define QM_IR_GET_SS_HALT_MASK(_peripheral_) \
(_peripheral_ & QM_IR_INT_SS_HALT_MASK)
/**
* Mailbox Interrupt Mask enable/disable definitions
*
* \#defines use the channel number to determine the register and bit shift to
* use.
* The interrupt destination adds an offset to the bit shift.
*/
#define QM_IR_MBOX_ENABLE_LMT_INT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask &= \
~(BIT(N + QM_MBOX_HOST_MASK_OFFSET))
#define QM_IR_MBOX_DISABLE_LMT_INT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask |= \
(BIT(N + QM_MBOX_HOST_MASK_OFFSET))
#define QM_IR_MBOX_ENABLE_SS_INT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask &= \
~(BIT(N + QM_MBOX_SS_MASK_OFFSET))
#define QM_IR_MBOX_DISABLE_SS_INT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask |= \
(BIT(N + QM_MBOX_SS_MASK_OFFSET))
/**
* Mailbox Interrupt Halt Mask enable/disable definitions
*
* \#defines use the channel number to determine the register and bit shift to
* use.
* The interrupt destination adds an offset to the bit shift,
* see above for the bit position layout
*/
#define QM_IR_MBOX_ENABLE_LMT_INT_HALT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask &= \
~(BIT(N + QM_MBOX_HOST_HALT_MASK_OFFSET))
#define QM_IR_MBOX_DISABLE_LMT_INT_HALT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask |= \
(BIT(N + QM_MBOX_HOST_HALT_MASK_OFFSET))
#define QM_IR_MBOX_ENABLE_SS_INT_HALT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask &= \
~(BIT(N + QM_MBOX_SS_HALT_MASK_OFFSET))
#define QM_IR_MBOX_DISABLE_SS_INT_HALT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask |= \
(BIT(N + QM_MBOX_SS_HALT_MASK_OFFSET))
/**
* Mailbox interrupt mask definitions to return the current mask values
*/
#define QM_IR_MBOX_SS_INT_HALT_MASK \
((QM_MBOX_SS_HALT_MASK_MASK & \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask) >> \
QM_MBOX_SS_HALT_MASK_OFFSET)
#define QM_IR_MBOX_LMT_INT_HALT_MASK \
((QM_MBOX_HOST_HALT_MASK_MASK & \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask) >> \
QM_MBOX_SS_HALT_MASK_OFFSET)
#define QM_IR_MBOX_SS_ALL_INT_MASK \
((QM_MBOX_SS_MASK_MASK & QM_INTERRUPT_ROUTER->mailbox_0_int_mask) >> \
QM_MBOX_SS_MASK_OFFSET)
#define QM_IR_MBOX_LMT_ALL_INT_MASK \
(QM_MBOX_HOST_MASK_MASK & QM_INTERRUPT_ROUTER->mailbox_0_int_mask)
/**
* Mailbox interrupt macros to determine if the specified mailbox interrupt mask
* has been locked.
*/
#define QM_IR_MBOX_SS_INT_LOCK_HALT_MASK(N) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(3))
#define QM_IR_MBOX_LMT_INT_LOCK_HALT_MASK(N) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(2))
#define QM_IR_MBOX_SS_INT_LOCK_MASK(N) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(1))
#define QM_IR_MBOX_LMT_INT_LOCK_MASK(N) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(0))
/**
* Mailbox macros to check if a particular mailbox has been routed to a core.
*/
#define QM_IR_MBOX_IS_LMT_INT_MASK_EN(N) \
~(QM_IR_MBOX_LMT_ALL_INT_MASK & ((1 << (N))))
#define QM_IR_MBOX_IS_SS_INT_MASK_EN(N) \
~(QM_IR_MBOX_SS_ALL_INT_MASK & ((1 << (QM_MBOX_SS_MASK_OFFSET + (N)))))
#define QM_IR_UNMASK_COMPARATOR_LMT_INTERRUPTS(n) \
(QM_INTERRUPT_ROUTER->comparator_0_host_int_mask &= ~(BIT(n)))
#define QM_IR_MASK_COMPARATOR_LMT_INTERRUPTS(n) \
(QM_INTERRUPT_ROUTER->comparator_0_host_int_mask |= BIT(n))
#define QM_IR_UNMASK_COMPARATOR_LMT_HALTS(n) \
(QM_INTERRUPT_ROUTER->comparator_0_host_halt_int_mask &= ~(BIT(n)))
#define QM_IR_MASK_COMPARATOR_LMT_HALTS(n) \
(QM_INTERRUPT_ROUTER->comparator_0_host_halt_int_mask |= BIT(n))
#define QM_IR_UNMASK_COMPARATOR_SS_INTERRUPTS(n) \
(QM_INTERRUPT_ROUTER->comparator_0_ss_int_mask &= ~(BIT(n)))
#define QM_IR_MASK_COMPARATOR_SS_INTERRUPTS(n) \
(QM_INTERRUPT_ROUTER->comparator_0_ss_int_mask |= BIT(n))
#define QM_IR_UNMASK_COMPARATOR_SS_HALTS(n) \
(QM_INTERRUPT_ROUTER->comparator_0_ss_halt_int_mask &= ~(BIT(n)))
#define QM_IR_MASK_COMPARATOR_SS_HALTS(n) \
(QM_INTERRUPT_ROUTER->comparator_0_ss_halt_int_mask |= BIT(n))
/* Define macros for use by the active core. */
#if (QM_LAKEMONT)
#define QM_IR_UNMASK_INTERRUPTS(_peripheral_) \
@ -90,11 +200,22 @@
#define QM_IR_UNMASK_HALTS(_peripheral_) QM_IR_UNMASK_LMT_HALTS(_peripheral_)
#define QM_IR_MASK_HALTS(_peripheral_) QM_IR_MASK_LMT_HALTS(_peripheral_)
#define QM_IR_INT_LOCK_MASK(_peripheral_) QM_IR_LMT_INT_LOCK_MASK(_peripheral_)
#define QM_IR_INT_LOCK_HALT_MASK(_peripheral_) \
QM_IR_LMT_INT_LOCK_MASK(_peripheral_)
#define QM_IR_INT_MASK QM_IR_INT_LMT_MASK
#define QM_IR_INT_HALT_MASK QM_IR_INT_LMT_HALT_MASK
#define QM_IR_GET_MASK(_peripheral_) QM_IR_GET_LMT_MASK(_peripheral_)
#define QM_IR_GET_HALT_MASK(_peripheral_) QM_IR_GET_LMT_HALT_MASK(_peripheral_)
#define QM_IR_UNMASK_COMPARATOR_INTERRUPTS(n) \
QM_IR_UNMASK_COMPARATOR_LMT_INTERRUPTS(n)
#define QM_IR_MASK_COMPARATOR_INTERRUPTS(n) \
QM_IR_MASK_COMPARATOR_LMT_INTERRUPTS(n)
#define QM_IR_UNMASK_COMPARATOR_HALTS(n) QM_IR_UNMASK_COMPARATOR_LMT_HALTS(n)
#define QM_IR_MASK_COMPARATOR_HALTS(n) QM_IR_MASK_COMPARATOR_LMT_HALTS(n)
#elif(QM_SENSOR)
#define QM_IR_UNMASK_INTERRUPTS(_peripheral_) \
QM_IR_UNMASK_SS_INTERRUPTS(_peripheral_)
@ -103,10 +224,22 @@
#define QM_IR_UNMASK_HALTS(_peripheral_) QM_IR_UNMASK_SS_HALTS(_peripheral_)
#define QM_IR_MASK_HALTS(_peripheral_) QM_IR_MASK_SS_HALTS(_peripheral_)
#define QM_IR_INT_LOCK_MASK(_peripheral_) QM_IR_SS_INT_LOCK_MASK(_peripheral_)
#define QM_IR_INT_LOCK_HALT_MASK(_peripheral_) \
QM_IR_SS_INT_LOCK_MASK(_peripheral_)
#define QM_IR_INT_MASK QM_IR_INT_SS_MASK
#define QM_IR_INT_HALT_MASK QM_IR_INT_SS_HALT_MASK
#define QM_IR_GET_MASK(_peripheral_) QM_IR_GET_SS_MASK(_peripheral_)
#define QM_IR_GET_HALT_MASK(_peripheral_) QM_IR_GET_SS_HALT_MASK(_peripheral_)
#define QM_IR_UNMASK_COMPARATOR_INTERRUPTS(n) \
QM_IR_UNMASK_COMPARATOR_SS_INTERRUPTS(n)
#define QM_IR_MASK_COMPARATOR_INTERRUPTS(n) \
QM_IR_MASK_COMPARATOR_SS_INTERRUPTS(n)
#define QM_IR_UNMASK_COMPARATOR_HALTS(n) QM_IR_UNMASK_COMPARATOR_SS_HALTS(n)
#define QM_IR_MASK_COMPARATOR_HALTS(n) QM_IR_MASK_COMPARATOR_SS_HALTS(n)
#else
#error "No active core selected."
#endif
@ -202,6 +335,12 @@ qm_interrupt_router_reg_t test_interrupt_router;
#define QM_IR_DMA_ERROR_HOST_MASK (0x000000FF)
#define QM_IR_DMA_ERROR_SS_MASK (0x0000FF00)
#if (QM_LAKEMONT)
#define QM_IR_DMA_ERROR_MASK QM_IR_DMA_ERROR_HOST_MASK
#elif(QM_SENSOR)
#define QM_IR_DMA_ERROR_MASK QM_IR_DMA_ERROR_SS_MASK
#endif
/** @} */
#endif /* __QM_INTERRUPT_ROUTER_REGS_H__ */

View file

@ -0,0 +1,70 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_MAILBOX_DEFS_H__
#define __QM_MAILBOX_DEFS_H__
/**
* Mailbox definitions.
*
* @defgroup groupMBOX_DEFS Mailbox Definitions
* @{
*/
/**
* Mailbox channel identifiers
*/
typedef enum {
QM_MBOX_CH_0 = 0, /**< Channel 0. */
QM_MBOX_CH_1, /**< Channel 1. */
QM_MBOX_CH_2, /**< Channel 2. */
QM_MBOX_CH_3, /**< Channel 3. */
QM_MBOX_CH_4, /**< Channel 4. */
QM_MBOX_CH_5, /**< Channel 5. */
QM_MBOX_CH_6, /**< Channel 6. */
QM_MBOX_CH_7, /**< Channel 7. */
} qm_mbox_ch_t;
/**
* Definition of the mailbox direction of operation
* The direction of communication for each channel is configurable by the user.
* The list below describes the possible communication directions for each
* channel.
*/
typedef enum {
QM_MBOX_UNUSED = 0,
QM_MBOX_TO_LMT, /**< Lakemont core as destination */
QM_MBOX_TO_SS, /**< Sensor Sub-System core as destination */
} qm_mbox_destination_t;
/**
* @}
*/
#endif /* __QM_MAILBOX_DEFS_H__ */

View file

@ -0,0 +1,461 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_PIN_FUNCTIONS_H__
#define __QM_PIN_FUNCTIONS_H__
/**
* SoC Pins definition.
*
* @defgroup group SOC_PINS
* @{
*/
#include "qm_common.h"
/*
* This file provides an abstraction layer for pin numbers and pin functions.
*/
/* Package pins to pin IDs. */
/* BGA144 package. */
#define QM_PIN_ID_BGA144_D12 QM_PIN_ID_0
#define QM_PIN_ID_BGA144_E10 QM_PIN_ID_1
#define QM_PIN_ID_BGA144_E11 QM_PIN_ID_2
#define QM_PIN_ID_BGA144_E12 QM_PIN_ID_3
#define QM_PIN_ID_BGA144_F9 QM_PIN_ID_4
#define QM_PIN_ID_BGA144_F10 QM_PIN_ID_5
#define QM_PIN_ID_BGA144_L9 QM_PIN_ID_6
#define QM_PIN_ID_BGA144_K8 QM_PIN_ID_7
#define QM_PIN_ID_BGA144_F11 QM_PIN_ID_8
#define QM_PIN_ID_BGA144_F12 QM_PIN_ID_9
#define QM_PIN_ID_BGA144_G9 QM_PIN_ID_10
#define QM_PIN_ID_BGA144_G10 QM_PIN_ID_11
#define QM_PIN_ID_BGA144_G11 QM_PIN_ID_12
#define QM_PIN_ID_BGA144_G12 QM_PIN_ID_13
#define QM_PIN_ID_BGA144_K10 QM_PIN_ID_14
#define QM_PIN_ID_BGA144_K9 QM_PIN_ID_15
#define QM_PIN_ID_BGA144_H11 QM_PIN_ID_16
#define QM_PIN_ID_BGA144_H12 QM_PIN_ID_17
#define QM_PIN_ID_BGA144_E8 QM_PIN_ID_18
#define QM_PIN_ID_BGA144_E9 QM_PIN_ID_19
#define QM_PIN_ID_BGA144_C12 QM_PIN_ID_20
#define QM_PIN_ID_BGA144_B12 QM_PIN_ID_21
#define QM_PIN_ID_BGA144_D11 QM_PIN_ID_22
#define QM_PIN_ID_BGA144_C11 QM_PIN_ID_23
#define QM_PIN_ID_BGA144_A11 QM_PIN_ID_24
#define QM_PIN_ID_BGA144_B11 QM_PIN_ID_25
#define QM_PIN_ID_BGA144_C10 QM_PIN_ID_26
#define QM_PIN_ID_BGA144_D10 QM_PIN_ID_27
#define QM_PIN_ID_BGA144_A10 QM_PIN_ID_28
#define QM_PIN_ID_BGA144_B10 QM_PIN_ID_29
#define QM_PIN_ID_BGA144_C9 QM_PIN_ID_30
#define QM_PIN_ID_BGA144_A9 QM_PIN_ID_31
#define QM_PIN_ID_BGA144_D9 QM_PIN_ID_32
#define QM_PIN_ID_BGA144_B9 QM_PIN_ID_33
#define QM_PIN_ID_BGA144_C8 QM_PIN_ID_34
#define QM_PIN_ID_BGA144_A8 QM_PIN_ID_35
#define QM_PIN_ID_BGA144_B7 QM_PIN_ID_36
#define QM_PIN_ID_BGA144_B8 QM_PIN_ID_37
#define QM_PIN_ID_BGA144_C7 QM_PIN_ID_38
#define QM_PIN_ID_BGA144_A7 QM_PIN_ID_39
#define QM_PIN_ID_BGA144_D8 QM_PIN_ID_40
#define QM_PIN_ID_BGA144_D7 QM_PIN_ID_41
#define QM_PIN_ID_BGA144_D6 QM_PIN_ID_42
#define QM_PIN_ID_BGA144_B6 QM_PIN_ID_43
#define QM_PIN_ID_BGA144_A6 QM_PIN_ID_44
#define QM_PIN_ID_BGA144_C6 QM_PIN_ID_45
#define QM_PIN_ID_BGA144_B5 QM_PIN_ID_46
#define QM_PIN_ID_BGA144_A5 QM_PIN_ID_47
#define QM_PIN_ID_BGA144_D5 QM_PIN_ID_48
#define QM_PIN_ID_BGA144_C5 QM_PIN_ID_49
#define QM_PIN_ID_BGA144_B4 QM_PIN_ID_50
#define QM_PIN_ID_BGA144_D4 QM_PIN_ID_51
#define QM_PIN_ID_BGA144_A4 QM_PIN_ID_52
#define QM_PIN_ID_BGA144_C4 QM_PIN_ID_53
#define QM_PIN_ID_BGA144_B3 QM_PIN_ID_54
#define QM_PIN_ID_BGA144_A3 QM_PIN_ID_55
#define QM_PIN_ID_BGA144_C3 QM_PIN_ID_56
#define QM_PIN_ID_BGA144_D3 QM_PIN_ID_57
#define QM_PIN_ID_BGA144_E4 QM_PIN_ID_58
#define QM_PIN_ID_BGA144_E3 QM_PIN_ID_59
#define QM_PIN_ID_BGA144_B2 QM_PIN_ID_60
#define QM_PIN_ID_BGA144_Q2 QM_PIN_ID_61
#define QM_PIN_ID_BGA144_C2 QM_PIN_ID_62
#define QM_PIN_ID_BGA144_D2 QM_PIN_ID_63
#define QM_PIN_ID_BGA144_B1 QM_PIN_ID_64
#define QM_PIN_ID_BGA144_E2 QM_PIN_ID_65
#define QM_PIN_ID_BGA144_C1 QM_PIN_ID_66
#define QM_PIN_ID_BGA144_D1 QM_PIN_ID_67
#define QM_PIN_ID_BGA144_E1 QM_PIN_ID_68
/* WLCSP144 package. */
#define QM_PIN_ID_WLCSP144_F2 QM_PIN_ID_0
#define QM_PIN_ID_WLCSP144_G4 QM_PIN_ID_1
#define QM_PIN_ID_WLCSP144_H5 QM_PIN_ID_2
#define QM_PIN_ID_WLCSP144_J6 QM_PIN_ID_3
#define QM_PIN_ID_WLCSP144_K6 QM_PIN_ID_4
#define QM_PIN_ID_WLCSP144_L6 QM_PIN_ID_5
#define QM_PIN_ID_WLCSP144_H4 QM_PIN_ID_6
#define QM_PIN_ID_WLCSP144_G3 QM_PIN_ID_7
#define QM_PIN_ID_WLCSP144_L5 QM_PIN_ID_8
#define QM_PIN_ID_WLCSP144_M5 QM_PIN_ID_9
#define QM_PIN_ID_WLCSP144_K5 QM_PIN_ID_10
#define QM_PIN_ID_WLCSP144_G1 QM_PIN_ID_11
#define QM_PIN_ID_WLCSP144_J4 QM_PIN_ID_12
#define QM_PIN_ID_WLCSP144_G2 QM_PIN_ID_13
#define QM_PIN_ID_WLCSP144_F1 QM_PIN_ID_14
#define QM_PIN_ID_WLCSP144_J5 QM_PIN_ID_15
#define QM_PIN_ID_WLCSP144_l4 QM_PIN_ID_16
#define QM_PIN_ID_WLCSP144_M4 QM_PIN_ID_17
#define QM_PIN_ID_WLCSP144_K4 QM_PIN_ID_18
#define QM_PIN_ID_WLCSP144_B2 QM_PIN_ID_19
#define QM_PIN_ID_WLCSP144_C1 QM_PIN_ID_20
#define QM_PIN_ID_WLCSP144_C2 QM_PIN_ID_21
#define QM_PIN_ID_WLCSP144_D1 QM_PIN_ID_22
#define QM_PIN_ID_WLCSP144_D2 QM_PIN_ID_23
#define QM_PIN_ID_WLCSP144_E1 QM_PIN_ID_24
#define QM_PIN_ID_WLCSP144_E2 QM_PIN_ID_25
#define QM_PIN_ID_WLCSP144_B3 QM_PIN_ID_26
#define QM_PIN_ID_WLCSP144_A3 QM_PIN_ID_27
#define QM_PIN_ID_WLCSP144_C3 QM_PIN_ID_28
#define QM_PIN_ID_WLCSP144_E3 QM_PIN_ID_29
#define QM_PIN_ID_WLCSP144_D3 QM_PIN_ID_30
#define QM_PIN_ID_WLCSP144_D4 QM_PIN_ID_31
#define QM_PIN_ID_WLCSP144_C4 QM_PIN_ID_32
#define QM_PIN_ID_WLCSP144_B4 QM_PIN_ID_33
#define QM_PIN_ID_WLCSP144_A4 QM_PIN_ID_34
#define QM_PIN_ID_WLCSP144_B5 QM_PIN_ID_35
#define QM_PIN_ID_WLCSP144_C5 QM_PIN_ID_36
#define QM_PIN_ID_WLCSP144_D5 QM_PIN_ID_37
#define QM_PIN_ID_WLCSP144_E5 QM_PIN_ID_38
#define QM_PIN_ID_WLCSP144_E4 QM_PIN_ID_39
#define QM_PIN_ID_WLCSP144_A6 QM_PIN_ID_40
#define QM_PIN_ID_WLCSP144_B6 QM_PIN_ID_41
#define QM_PIN_ID_WLCSP144_C6 QM_PIN_ID_42
#define QM_PIN_ID_WLCSP144_D6 QM_PIN_ID_43
#define QM_PIN_ID_WLCSP144_E6 QM_PIN_ID_44
#define QM_PIN_ID_WLCSP144_D7 QM_PIN_ID_45
#define QM_PIN_ID_WLCSP144_C7 QM_PIN_ID_46
#define QM_PIN_ID_WLCSP144_B7 QM_PIN_ID_47
#define QM_PIN_ID_WLCSP144_A7 QM_PIN_ID_48
#define QM_PIN_ID_WLCSP144_B8 QM_PIN_ID_49
#define QM_PIN_ID_WLCSP144_A8 QM_PIN_ID_50
#define QM_PIN_ID_WLCSP144_B9 QM_PIN_ID_51
#define QM_PIN_ID_WLCSP144_A9 QM_PIN_ID_52
#define QM_PIN_ID_WLCSP144_C9 QM_PIN_ID_53
#define QM_PIN_ID_WLCSP144_D9 QM_PIN_ID_54
#define QM_PIN_ID_WLCSP144_D8 QM_PIN_ID_55
#define QM_PIN_ID_WLCSP144_E7 QM_PIN_ID_56
#define QM_PIN_ID_WLCSP144_E9 QM_PIN_ID_57
#define QM_PIN_ID_WLCSP144_E8 QM_PIN_ID_58
#define QM_PIN_ID_WLCSP144_A10 QM_PIN_ID_59
#define QM_PIN_ID_WLCSP144_B10 QM_PIN_ID_60
#define QM_PIN_ID_WLCSP144_C10 QM_PIN_ID_61
#define QM_PIN_ID_WLCSP144_D10 QM_PIN_ID_62
#define QM_PIN_ID_WLCSP144_E10 QM_PIN_ID_63
#define QM_PIN_ID_WLCSP144_D11 QM_PIN_ID_64
#define QM_PIN_ID_WLCSP144_C11 QM_PIN_ID_65
#define QM_PIN_ID_WLCSP144_B11 QM_PIN_ID_66
#define QM_PIN_ID_WLCSP144_D12 QM_PIN_ID_67
#define QM_PIN_ID_WLCSP144_C12 QM_PIN_ID_68
/* Pin function name to pin function number. */
/* Pin ID 0. */
#define QM_PIN_0_FN_GPIO_0 QM_PMUX_FN_0
#define QM_PIN_0_FN_AIN_0 QM_PMUX_FN_1
#define QM_PIN_0_FN_SPI_S_CS_B QM_PMUX_FN_2
/* Pin ID 1. */
#define QM_PIN_1_FN_GPIO_1 QM_PMUX_FN_0
#define QM_PIN_1_FN_AIN_1 QM_PMUX_FN_1
#define QM_PIN_1_FN_SPI_S_MISO QM_PMUX_FN_2
/* Pin ID 2. */
#define QM_PIN_2_FN_GPIO_2 QM_PMUX_FN_0
#define QM_PIN_2_FN_AIN_2 QM_PMUX_FN_1
#define QM_PIN_2_FN_SPI_S_SCK QM_PMUX_FN_2
/* Pin ID 3. */
#define QM_PIN_3_FN_GPIO_3 QM_PMUX_FN_0
#define QM_PIN_3_FN_AIN_3 QM_PMUX_FN_1
#define QM_PIN_3_FN_SPI_S_MOSI QM_PMUX_FN_2
/* Pin ID 4. */
#define QM_PIN_4_FN_GPIO_4 QM_PMUX_FN_0
#define QM_PIN_4_FN_AIN_4 QM_PMUX_FN_1
/* Pin ID 5. */
#define QM_PIN_5_FN_GPIO_5 QM_PMUX_FN_0
#define QM_PIN_5_FN_AIN_5 QM_PMUX_FN_1
/* Pin ID 6. */
#define QM_PIN_6_FN_GPIO_6 QM_PMUX_FN_0
#define QM_PIN_6_FN_AIN_6 QM_PMUX_FN_1
/* Pin ID 7. */
#define QM_PIN_7_FN_GPIO_7 QM_PMUX_FN_0
#define QM_PIN_7_FN_AIN_7 QM_PMUX_FN_1
/* Pin ID 8. */
#define QM_PIN_8_FN_GPIO_SS_0 QM_PMUX_FN_0
#define QM_PIN_8_FN_AIN_8 QM_PMUX_FN_1
#define QM_PIN_8_FN_UART1_CTS QM_PMUX_FN_2
/* Pin ID 9. */
#define QM_PIN_9_FN_GPIO_SS_1 QM_PMUX_FN_0
#define QM_PIN_9_FN_AIN_9 QM_PMUX_FN_1
#define QM_PIN_9_FN_UART1_RTS QM_PMUX_FN_2
/* Pin ID 10. */
#define QM_PIN_10_FN_GPIO_SS_2 QM_PMUX_FN_0
#define QM_PIN_10_FN_AIN_10 QM_PMUX_FN_1
/* Pin ID 11. */
#define QM_PIN_11_FN_GPIO_SS_3 QM_PMUX_FN_0
#define QM_PIN_11_FN_AIN_11 QM_PMUX_FN_1
/* Pin ID 12. */
#define QM_PIN_12_FN_GPIO_SS_4 QM_PMUX_FN_0
#define QM_PIN_12_FN_AIN_12 QM_PMUX_FN_1
/* Pin ID 13. */
#define QM_PIN_13_FN_GPIO_SS_5 QM_PMUX_FN_0
#define QM_PIN_13_FN_AIN_13 QM_PMUX_FN_1
/* Pin ID 14. */
#define QM_PIN_14_FN_GPIO_SS_6 QM_PMUX_FN_0
#define QM_PIN_14_FN_AIN_14 QM_PMUX_FN_1
/* Pin ID 15. */
#define QM_PIN_15_FN_GPIO_SS_7 QM_PMUX_FN_0
#define QM_PIN_15_FN_AIN_15 QM_PMUX_FN_1
/* Pin ID 16. */
#define QM_PIN_16_FN_GPIO_SS_8 QM_PMUX_FN_0
#define QM_PIN_16_FN_AIN_16 QM_PMUX_FN_1
#define QM_PIN_16_FN_UART1_TXD QM_PMUX_FN_2
/* Pin ID 17. */
#define QM_PIN_17_FN_GPIO_SS_9 QM_PMUX_FN_0
#define QM_PIN_17_FN_AIN_17 QM_PMUX_FN_1
#define QM_PIN_17_FN_UART1_RXD QM_PMUX_FN_2
/* Pin ID 18. */
#define QM_PIN_18_FN_UART0_RXD QM_PMUX_FN_0
#define QM_PIN_18_FN_AIN_18 QM_PMUX_FN_1
/* Pin ID 19. */
#define QM_PIN_19_FN_UART0_TXD QM_PMUX_FN_0
#define QM_PIN_19_FN_GPIO_31 QM_PMUX_FN_1
/* Pin ID 20. */
#define QM_PIN_20_FN_I2C0_SCL QM_PMUX_FN_0
/* Pin ID 21. */
#define QM_PIN_21_FN_I2C0_SDA QM_PMUX_FN_0
/* Pin ID 22. */
#define QM_PIN_22_FN_I2C1_SCL QM_PMUX_FN_0
/* Pin ID 23. */
#define QM_PIN_23_FN_I2C1_SDA QM_PMUX_FN_0
/* Pin ID 24. */
#define QM_PIN_24_FN_I2C0_SS_SDA QM_PMUX_FN_0
/* Pin ID 25. */
#define QM_PIN_25_FN_I2C0_SS_SCL QM_PMUX_FN_0
/* Pin ID 26. */
#define QM_PIN_26_FN_I2C1_SS_SDA QM_PMUX_FN_0
/* Pin ID 27. */
#define QM_PIN_27_FN_I2C1_SS_SCL QM_PMUX_FN_0
/* Pin ID 28. */
#define QM_PIN_28_FN_SPI0_SS_MISO QM_PMUX_FN_0
/* Pin ID 29. */
#define QM_PIN_29_FN_SPI0_SS_MOSI QM_PMUX_FN_0
/* Pin ID 30. */
#define QM_PIN_30_FN_SPI0_SS_SCK QM_PMUX_FN_0
/* Pin ID 31. */
#define QM_PIN_31_FN_SPI0_SS_CS_B_0 QM_PMUX_FN_0
/* Pin ID 32. */
#define QM_PIN_32_FN_SPI0_SS_CS_B_1 QM_PMUX_FN_0
/* Pin ID 33. */
#define QM_PIN_33_FN_SPI0_SS_CS_B_2 QM_PMUX_FN_0
#define QM_PIN_33_FN_GPIO_29 QM_PMUX_FN_1
/* Pin ID 34. */
#define QM_PIN_34_FN_SPI0_SS_CS_B_3 QM_PMUX_FN_0
#define QM_PIN_34_FN_GPIO_30 QM_PMUX_FN_1
/* Pin ID 35. */
#define QM_PIN_35_FN_SPI1_SS_MISO QM_PMUX_FN_0
/* Pin ID 36. */
#define QM_PIN_36_FN_SPI1_SS_MOSI QM_PMUX_FN_0
/* Pin ID 37. */
#define QM_PIN_37_FN_SPI1_SS_SCK QM_PMUX_FN_0
/* Pin ID 38. */
#define QM_PIN_38_FN_SPI1_SS_CS_B_0 QM_PMUX_FN_0
/* Pin ID 39. */
#define QM_PIN_39_FN_SPI1_SS_CS_B_1 QM_PMUX_FN_0
/* Pin ID 40. */
#define QM_PIN_40_FN_SPI1_SS_CS_B_2 QM_PMUX_FN_0
#define QM_PIN_40_FN_UART0_CTS_B QM_PMUX_FN_1
/* Pin ID 41. */
#define QM_PIN_41_FN_SPI1_SS_CS_B_3 QM_PMUX_FN_0
#define QM_PIN_41_FN_UART0_RTS_B QM_PMUX_FN_1
/* Pin ID 42. */
#define QM_PIN_42_FN_GPIO_8 QM_PMUX_FN_0
#define QM_PIN_42_FN_SPI1_M_SCK QM_PMUX_FN_1
/* Pin ID 43. */
#define QM_PIN_43_FN_GPIO_9 QM_PMUX_FN_0
#define QM_PIN_43_FN_SPI1_M_MISO QM_PMUX_FN_1
/* Pin ID 44. */
#define QM_PIN_44_FN_GPIO_10 QM_PMUX_FN_0
#define QM_PIN_44_FN_SPI1_M_MOSI QM_PMUX_FN_1
/* Pin ID 45. */
#define QM_PIN_45_FN_GPIO_11 QM_PMUX_FN_0
#define QM_PIN_45_FN_SPI1_M_CS_B_0 QM_PMUX_FN_1
/* Pin ID 46. */
#define QM_PIN_46_FN_GPIO_12 QM_PMUX_FN_0
#define QM_PIN_46_FN_SPI1_M_CS_B_1 QM_PMUX_FN_1
/* Pin ID 47. */
#define QM_PIN_47_FN_GPIO_13 QM_PMUX_FN_0
#define QM_PIN_47_FN_SPI1_M_CS_B_2 QM_PMUX_FN_1
/* Pin ID 48. */
#define QM_PIN_48_FN_GPIO_14 QM_PMUX_FN_0
#define QM_PIN_48_FN_SPI1_M_CS_B_3 QM_PMUX_FN_1
/* Pin ID 49. */
#define QM_PIN_49_FN_GPIO_15 QM_PMUX_FN_0
#define QM_PIN_49_FN_I2S_RXD QM_PMUX_FN_1
/* Pin ID 50. */
#define QM_PIN_50_FN_GPIO_16 QM_PMUX_FN_0
#define QM_PIN_50_FN_I2S_RSCK QM_PMUX_FN_1
/* Pin ID 51. */
#define QM_PIN_51_FN_GPIO_17 QM_PMUX_FN_0
#define QM_PIN_51_FN_I2S_RWS QM_PMUX_FN_1
/* Pin ID 52. */
#define QM_PIN_52_FN_GPIO_18 QM_PMUX_FN_0
#define QM_PIN_52_FN_I2S_TSCK QM_PMUX_FN_1
/* Pin ID 53. */
#define QM_PIN_53_FN_GPIO_19 QM_PMUX_FN_0
#define QM_PIN_53_FN_I2S_TWS QM_PMUX_FN_1
/* Pin ID 54. */
#define QM_PIN_54_FN_GPIO_20 QM_PMUX_FN_0
#define QM_PIN_54_FN_I2S_TXD QM_PMUX_FN_1
/* Pin ID 55. */
#define QM_PIN_55_FN_GPIO_21 QM_PMUX_FN_0
#define QM_PIN_55_FN_SPI0_M_SCK QM_PMUX_FN_1
/* Pin ID 56. */
#define QM_PIN_56_FN_GPIO_22 QM_PMUX_FN_0
#define QM_PIN_56_FN_SPI0_M_MISO QM_PMUX_FN_1
/* Pin ID 57. */
#define QM_PIN_57_FN_GPIO_23 QM_PMUX_FN_0
#define QM_PIN_57_FN_SPI0_M_MOSI QM_PMUX_FN_1
/* Pin ID 58. */
#define QM_PIN_58_FN_GPIO_24 QM_PMUX_FN_0
#define QM_PIN_58_FN_SPI0_M_CS_B_0 QM_PMUX_FN_1
/* Pin ID 59. */
#define QM_PIN_59_FN_GPIO_25 QM_PMUX_FN_0
#define QM_PIN_59_FN_SPI0_M_CS_B_1 QM_PMUX_FN_1
/* Pin ID 60. */
#define QM_PIN_60_FN_GPIO_26 QM_PMUX_FN_0
#define QM_PIN_60_FN_SPI0_M_CS_B_2 QM_PMUX_FN_1
/* Pin ID 61. */
#define QM_PIN_61_FN_GPIO_27 QM_PMUX_FN_0
#define QM_PIN_61_FN_SPI0_M_CS_B_3 QM_PMUX_FN_1
/* Pin ID 62. */
#define QM_PIN_62_FN_GPIO_28 QM_PMUX_FN_0
/* Pin ID 63. */
#define QM_PIN_63_FN_GPIO_SS_10 QM_PMUX_FN_0
#define QM_PIN_63_FN_PWM_0 QM_PMUX_FN_1
/* Pin ID 64. */
#define QM_PIN_64_FN_GPIO_SS_11 QM_PMUX_FN_0
#define QM_PIN_64_FN_PWM_1 QM_PMUX_FN_1
/* Pin ID 65. */
#define QM_PIN_65_FN_GPIO_SS_12 QM_PMUX_FN_0
#define QM_PIN_65_FN_PWM_2 QM_PMUX_FN_1
/* Pin ID 66. */
#define QM_PIN_66_FN_GPIO_SS_13 QM_PMUX_FN_0
#define QM_PIN_66_FN_PWM_3 QM_PMUX_FN_1
/* Pin ID 67. */
#define QM_PIN_67_FN_GPIO_SS_14 QM_PMUX_FN_0
#define QM_PIN_67_FN_PLT_CLK_0 QM_PMUX_FN_1
/* Pin ID 68. */
#define QM_PIN_68_FN_GPIO_SS_15 QM_PMUX_FN_0
#define QM_PIN_68_FN_PLT_CLK_1 QM_PMUX_FN_1
/**
* @}
*/
#endif /* __QM_PIN_FUNCTIONS_H__ */

View file

@ -87,6 +87,11 @@ uint32_t test_sensor_aux[QM_SS_AUX_REGS_SIZE];
/* Bitwise NAND operation macro for registers in the auxiliary memory space. */
#define QM_SS_REG_AUX_NAND(reg, mask) \
(__builtin_arc_sr(__builtin_arc_lr(reg) & (~(mask)), reg))
/* Bitwise MASK and OR operation macro for registers in the auxiliary memory
* space.
*/
#define QM_SS_REG_AUX_MASK_OR(reg, mask, value) \
(__builtin_arc_sr(((__builtin_arc_lr(reg) & (~(mask))) | value), reg))
/* Sensor Subsystem status32 register. */
#define QM_SS_AUX_STATUS32 (0xA)
@ -124,7 +129,7 @@ typedef struct {
* - IRQ Trigger:BIT(1)
* - IRQ Enable:BIT(0)
*/
uint8_t irq_config[QM_SS_INT_VECTOR_NUM - 1];
uint8_t irq_config[QM_SS_INT_VECTOR_NUM - QM_SS_EXCEPTION_NUM];
} qm_irq_context_t;
/** @} */
@ -260,19 +265,24 @@ typedef struct {
#define QM_SS_I2C_CON_ENABLE BIT(0)
#define QM_SS_I2C_CON_ABORT BIT(1)
#define QM_SS_I2C_CON_ABORT_OFFSET (1)
#define QM_SS_I2C_CON_SPEED_SS BIT(3)
#define QM_SS_I2C_CON_SPEED_FS BIT(4)
#define QM_SS_I2C_CON_SPEED_FSP BIT(4)
#define QM_SS_I2C_CON_SPEED_MASK (0x18)
#define QM_SS_I2C_CON_SPEED_OFFSET (3)
#define QM_SS_I2C_CON_IC_10BITADDR BIT(5)
#define QM_SS_I2C_CON_IC_10BITADDR_OFFSET (5)
#define QM_SS_I2C_CON_IC_10BITADDR_MASK (5)
#define QM_SS_I2C_CON_IC_10BITADDR_MASK BIT(5)
#define QM_SS_I2C_CON_RESTART_EN BIT(7)
#define QM_SS_I2C_CON_RESTART_EN_OFFSET (7)
#define QM_SS_I2C_CON_TAR_SAR_OFFSET (9)
#define QM_SS_I2C_CON_TAR_SAR_MASK (0x7FE00)
#define QM_SS_I2C_CON_TAR_SAR_10_BIT_MASK (0x3FF)
#define QM_SS_I2C_CON_SPKLEN_OFFSET (22)
#define QM_SS_I2C_CON_SPKLEN_MASK (0x3FC00000)
#define QM_SS_I2C_CON_CLK_ENA BIT(31)
#define QM_SS_I2C_CON_ENABLE_ABORT_MASK (0x3)
#define QM_SS_I2C_DATA_CMD_CMD BIT(8)
#define QM_SS_I2C_DATA_CMD_STOP BIT(9)
@ -288,6 +298,8 @@ typedef struct {
#define QM_SS_I2C_INTR_STAT_TX_OVER BIT(3)
#define QM_SS_I2C_INTR_STAT_TX_EMPTY BIT(4)
#define QM_SS_I2C_INTR_STAT_TX_ABRT BIT(6)
#define QM_SS_I2C_INTR_STAT_STOP BIT(9)
#define QM_SS_I2C_INTR_STAT_START BIT(10)
#define QM_SS_I2C_INTR_MASK_ALL (0x0)
#define QM_SS_I2C_INTR_MASK_RX_UNDER BIT(0)
@ -296,13 +308,20 @@ typedef struct {
#define QM_SS_I2C_INTR_MASK_TX_OVER BIT(3)
#define QM_SS_I2C_INTR_MASK_TX_EMPTY BIT(4)
#define QM_SS_I2C_INTR_MASK_TX_ABRT BIT(6)
#define QM_SS_I2C_INTR_MASK_STOP BIT(9)
#define QM_SS_I2C_INTR_MASK_START BIT(10)
#define QM_SS_I2C_TL_TX_TL_OFFSET (16)
#define QM_SS_I2C_TL_MASK (0xFF)
#define QM_SS_I2C_TL_RX_TL_MASK (0xFF)
#define QM_SS_I2C_TL_TX_TL_MASK (0xFF0000)
#define QM_SS_I2C_INTR_CLR_ALL (0xFF)
#define QM_SS_I2C_INTR_CLR_RX_UNDER BIT(0)
#define QM_SS_I2C_INTR_CLR_RX_OVER BIT(1)
#define QM_SS_I2C_INTR_CLR_TX_OVER BIT(3)
#define QM_SS_I2C_INTR_CLR_TX_ABRT BIT(6)
#define QM_SS_I2C_INTR_CLR_STOP_DET BIT(9)
#define QM_SS_I2C_TX_ABRT_SOURCE_NAK_MASK (0x09)
#define QM_SS_I2C_TX_ABRT_SOURCE_ALL_MASK (0x1FFFF)
@ -324,6 +343,216 @@ typedef struct {
#define QM_SS_I2C_FIFO_SIZE (8)
#define QM_SS_I2C_SPK_LEN_SS (1)
#define QM_SS_I2C_SPK_LEN_FS (2)
#define QM_SS_I2C_SPK_LEN_FSP (2)
#define QM_SS_I2C_WRITE_CLKEN(controller) \
__builtin_arc_sr((__builtin_arc_lr(controller + QM_SS_I2C_CON) & \
QM_SS_I2C_CON_CLK_ENA), \
controller + QM_SS_I2C_CON)
#define QM_SS_I2C_WRITE_SPKLEN(controller, value) \
QM_SS_REG_AUX_OR((controller + QM_SS_I2C_CON), \
value << QM_SS_I2C_CON_SPKLEN_OFFSET)
#define QM_SS_I2C_WRITE_TAR(controller, value) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_CON, \
(value & QM_SS_I2C_CON_TAR_SAR_10_BIT_MASK) \
<< QM_SS_I2C_CON_TAR_SAR_OFFSET)
#define QM_SS_I2C_WRITE_RESTART_EN(controller) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_CON, QM_SS_I2C_CON_RESTART_EN)
#define QM_SS_I2C_WRITE_ADDRESS_MODE(contoller, value) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_CON, \
value << QM_SS_I2C_CON_IC_10BITADDR_OFFSET)
#define QM_SS_I2C_WRITE_SPEED(controller, value) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_CON, value)
#define QM_SS_I2C_WRITE_DATA_CMD(controller, value) \
__builtin_arc_sr(value, controller + QM_SS_I2C_DATA_CMD)
#define QM_SS_I2C_WRITE_SS_SCL_HCNT(controller, value) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_SS_SCL_CNT, \
(value & QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK) \
<< QM_SS_I2C_SS_FS_SCL_CNT_HCNT_OFFSET)
#define QM_SS_I2C_WRITE_SS_SCL_LCNT(controller, value) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_SS_SCL_CNT, \
value & QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK)
#define QM_SS_I2C_WRITE_FS_SCL_HCNT(controller, value) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_FS_SCL_CNT, \
(value & QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK) \
<< QM_SS_I2C_SS_FS_SCL_CNT_HCNT_OFFSET)
#define QM_SS_I2C_WRITE_FS_SCL_LCNT(controller, value) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_FS_SCL_CNT, \
value & QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK)
#define QM_SS_I2C_WRITE_RAW_INTR_STAT(controller, value) \
__builtin_arc_sr(value, controller + QM_SS_I2C_INTR_STAT)
#define QM_SS_I2C_WRITE_TX_TL(controller, value) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_TL, \
(value & QM_SS_I2C_TL_MASK) \
<< QM_SS_I2C_TL_TX_TL_OFFSET)
#define QM_SS_I2C_WRITE_RX_TL(controller, value) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_TL, \
value & QM_SS_I2C_TL_RX_TL_MASK)
#define QM_SS_I2C_WRITE_STATUS(controller, value) \
__builtin_arc_sr(value, controller + QM_SS_I2C_STATUS)
#define QM_SS_I2C_WRITE_TXFLR(controller, value) \
__builtin_arc_sr(value, controller + QM_SS_I2C_TXFLR)
#define QM_SS_I2C_WRITE_RXFLR(controller, value) \
__builtin_arc_sr(value, controller + QM_SS_I2C_RXFLR)
#define QM_SS_I2C_WRITE_TX_ABRT_SOURCE(controller, value) \
__builtin_arc_sr(value, controller + QM_SS_I2C_TX_ABRT_SOURCE)
#define QM_SS_I2C_CLEAR_ENABLE(controller) \
QM_SS_REG_AUX_NAND(controller + QM_SS_I2C_CON, \
QM_SS_I2C_CON_ENABLE_ABORT_MASK)
#define QM_SS_I2C_CLEAR_CON(controller) \
__builtin_arc_sr(0, controller + QM_SS_I2C_CON)
#define QM_SS_I2C_CLEAR_SPKLEN(controller) \
QM_SS_REG_AUX_NAND(controller + QM_SS_I2C_CON, \
QM_SS_I2C_CON_SPKLEN_MASK)
#define QM_SS_I2C_CLEAR_TAR(controller) \
QM_SS_REG_AUX_NAND(controller + QM_SS_I2C_CON, \
QM_SS_I2C_CON_TAR_SAR_MASK)
#define QM_SS_I2C_CLEAR_SPEED(controller) \
QM_SS_REG_AUX_NAND(controller + QM_SS_I2C_CON, QM_SS_I2C_CON_SPEED_MASK)
#define QM_SS_I2C_CLEAR_DATA_CMD(controller) \
__builtin_arc_sr(0, controller + QM_SS_I2C_DATA_CMD)
#define QM_SS_I2C_CLEAR_SS_SCL_HCNT(controller) \
QM_SS_REG_AUX_NAND(controller + QM_SS_I2C_SS_SCL_CNT, \
QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK \
<< QM_SS_I2C_SS_FS_SCL_CNT_HCNT_OFFSET)
#define QM_SS_I2C_CLEAR_SS_SCL_LCNT(controller) \
QM_SS_REG_AUX_NAND(controller + QM_SS_I2C_SS_SCL_CNT, \
QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK)
#define QM_SS_I2C_CLEAR_FS_SCL_HCNT(controller) \
QM_SS_REG_AUX_NAND(controller + QM_SS_I2C_FS_SCL_CNT, \
QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK \
<< QM_SS_I2C_SS_FS_SCL_CNT_HCNT_OFFSET)
#define QM_SS_I2C_CLEAR_FS_SCL_LCNT(controller) \
QM_SS_REG_AUX_NAND(controller + QM_SS_I2C_FS_SCL_CNT, \
QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK)
#define QM_SS_I2C_CLEAR_INTR_STAT(controller) \
__builtin_arc_sr(0, controller + QM_SS_I2C_INTR_STAT)
#define QM_SS_I2C_CLEAR_INTR_MASK(controller) \
__builtin_arc_sr(0, controller + QM_SS_I2C_INTR_MASK)
#define QM_SS_I2C_CLEAR_TX_TL(controller) \
QM_SS_REG_AUX_NAND(controller + QM_SS_I2C_TL, QM_SS_I2C_TL_TX_TL_MASK)
#define QM_SS_I2C_CLEAR_RX_TL(controller) \
QM_SS_REG_AUX_NAND(controller + QM_SS_I2C_TL, QM_SS_I2C_TL_RX_TL_MASK)
#define QM_SS_I2C_CLEAR_STATUS(controller) \
__builtin_arc_sr(0, controller + QM_SS_I2C_STATUS)
#define QM_SS_I2C_CLEAR_TXFLR(controller) \
__builtin_arc_sr(0, controller + QM_SS_I2C_TXFLR)
#define QM_SS_I2C_CLEAR_RXFLR(controller) \
__builtin_arc_sr(0, controller + QM_SS_I2C_RXFLR)
#define QM_SS_I2C_CLEAR_SDA_CONFIG(controller) \
__builtin_arc_sr(0, controller + QM_SS_I2C_SDA_CONFIG)
#define QM_SS_I2C_CLEAR_TX_ABRT_SOURCE(controller) \
__builtin_arc_sr(0, controller + QM_SS_I2C_TX_ABRT_SOURCE)
#define QM_SS_I2C_CLEAR_ENABLE_STATUS(controller) \
__builtin_arc_sr(0, controller + QM_SS_I2C_ENABLE_STATUS)
#define QM_SS_I2C_READ_CON(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_CON)
#define QM_SS_I2C_READ_ENABLE(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_CON) & QM_SS_I2C_CON_ENABLE
#define QM_SS_I2C_READ_ABORT(controller) \
(__builtin_arc_lr(controller + QM_SS_I2C_CON) & \
QM_SS_I2C_CON_ABORT) >> \
QM_SS_I2C_CON_ABORT_OFFSET
#define QM_SS_I2C_READ_SPEED(controller) \
(__builtin_arc_lr(controller + QM_SS_I2C_CON) & \
QM_SS_I2C_CON_SPEED_MASK) >> \
QM_SS_I2C_CON_SPEED_OFFSET
#define QM_SS_I2C_READ_ADDR_MODE(controller) \
(__builtin_arc_lr(controller + QM_SS_I2C_CON) & \
QM_SS_I2C_CON_IC_10BITADDR_MASK) >> \
QM_SS_I2C_CON_IC_10BITADDR_OFFSET
#define QM_SS_I2C_READ_RESTART_EN(controller) \
(__builtin_arc_lr(controller + QM_SS_I2C_CON) & \
QM_SS_I2C_CON_RESTART_EN) >> \
QM_SS_I2C_CON_RESTART_EN_OFFSET
#define QM_SS_I2C_READ_TAR(controller) \
(__builtin_arc_lr(controller + QM_SS_I2C_CON) & \
QM_SS_I2C_CON_TAR_SAR_MASK) >> \
QM_SS_I2C_CON_TAR_SAR_OFFSET
#define QM_SS_I2C_READ_SPKLEN(controller) \
(__builtin_arc_lr(controller + QM_SS_I2C_CON) & \
QM_SS_I2C_CON_SPKLEN_MASK) >> \
QM_SS_I2C_CON_SPKLEN_OFFSET
#define QM_SS_I2C_READ_DATA_CMD(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_DATA_CMD)
#define QM_SS_I2C_READ_RX_FIFO(controller) \
__builtin_arc_sr(QM_SS_I2C_DATA_CMD_POP, \
controller + QM_SS_I2C_DATA_CMD)
#define QM_SS_I2C_READ_SS_SCL_HCNT(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_SS_SCL_CNT) >> \
QM_SS_I2C_SS_FS_SCL_CNT_HCNT_OFFSET
#define QM_SS_I2C_READ_SS_SCL_LCNT(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_SS_SCL_CNT) & \
QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK
#define QM_SS_I2C_READ_FS_SCL_HCNT(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_FS_SCL_CNT) >> \
QM_SS_I2C_SS_FS_SCL_CNT_HCNT_OFFSET
#define QM_SS_I2C_READ_FS_SCL_LCNT(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_FS_SCL_CNT) & \
QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK
#define QM_SS_I2C_READ_INTR_STAT(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT)
#define QM_SS_I2C_READ_INTR_MASK(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_INTR_MASK)
#define QM_SS_I2C_READ_RX_TL(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_TL) & QM_SS_I2C_TL_RX_TL_MASK
#define QM_SS_I2C_READ_TX_TL(controller) \
(__builtin_arc_lr(controller + QM_SS_I2C_TL) & \
QM_SS_I2C_TL_TX_TL_MASK) >> \
QM_SS_I2C_TL_TX_TL_OFFSET
#define QM_SS_I2C_READ_STATUS(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_STATUS)
#define QM_SS_I2C_READ_TXFLR(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_TXFLR)
#define QM_SS_I2C_READ_RXFLR(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_RXFLR)
#define QM_SS_I2C_READ_TX_ABRT_SOURCE(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_TX_ABRT_SOURCE)
#define QM_SS_I2C_READ_ENABLE_STATUS(controller) \
__builtin_arc_lr(controller + QM_SS_I2C_ENABLE_STATUS)
#define QM_SS_I2C_ABORT(controller) \
QM_SS_REG_AUX_OR((controller + QM_SS_I2C_CON), QM_SS_I2C_CON_ABORT)
#define QM_SS_I2C_ENABLE(controller) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_CON, QM_SS_I2C_CON_ENABLE)
#define QM_SS_I2C_DISABLE(controller) \
QM_SS_REG_AUX_NAND((controller + QM_SS_I2C_CON), QM_SS_I2C_CON_ENABLE)
#define QM_SS_I2C_MASK_ALL_INTERRUPTS(controller) \
__builtin_arc_sr(QM_SS_I2C_INTR_MASK_ALL, \
controller + QM_SS_I2C_INTR_MASK)
#define QM_SS_I2C_UNMASK_INTERRUPTS(controller) \
__builtin_arc_sr( \
(QM_SS_I2C_INTR_MASK_TX_ABRT | QM_SS_I2C_INTR_MASK_TX_EMPTY | \
QM_SS_I2C_INTR_MASK_TX_OVER | QM_SS_I2C_INTR_MASK_RX_FULL | \
QM_SS_I2C_INTR_MASK_RX_OVER | QM_SS_I2C_INTR_MASK_RX_UNDER), \
controller + QM_SS_I2C_INTR_MASK)
#define QM_SS_I2C_MASK_INTERRUPT(controller, value) \
QM_SS_REG_AUX_NAND(controller + QM_SS_I2C_INTR_MASK, value)
#define QM_SS_I2C_CLEAR_RX_UNDER_INTR(controller) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_INTR_CLR, \
QM_SS_I2C_INTR_CLR_RX_UNDER)
#define QM_SS_I2C_CLEAR_RX_OVER_INTR(controller) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_INTR_CLR, \
QM_SS_I2C_INTR_CLR_RX_OVER)
#define QM_SS_I2C_CLEAR_TX_OVER_INTR(controller) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_INTR_CLR, \
QM_SS_I2C_INTR_CLR_TX_OVER)
#define QM_SS_I2C_CLEAR_TX_ABRT_INTR(controller) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_INTR_CLR, \
QM_SS_I2C_INTR_CLR_TX_ABRT)
#define QM_SS_I2C_CLEAR_STOP_DET_INTR(controller) \
QM_SS_REG_AUX_OR(controller + QM_SS_I2C_INTR_CLR, \
QM_SS_I2C_INTR_CLR_STOP_DET)
#define QM_SS_I2C_CLEAR_ALL_INTR(controller) \
__builtin_arc_sr(QM_SS_I2C_INTR_CLR_ALL, \
controller + QM_SS_I2C_INTR_CLR)
/** Sensor Subsystem I2C */
typedef enum { QM_SS_I2C_0 = 0, QM_SS_I2C_1, QM_SS_I2C_NUM } qm_ss_i2c_t;
@ -459,12 +688,15 @@ typedef enum {
/** @} */
/**
* I2C registers and definitions.
* SPI registers and definitions.
*
* @name SS SPI
* @{
*/
/* SS SPI FIFO depth */
#define QM_SS_SPI_FIFO_DEPTH (8)
/** Sensor Subsystem SPI register map. */
typedef enum {
QM_SS_SPI_CTRL = 0, /**< SPI control register. */
@ -568,6 +800,54 @@ typedef enum {
#define QM_SS_SPI_DR_W_MASK (0xc0000000)
#define QM_SS_SPI_DR_R_MASK (0x80000000)
#define QM_SS_SPI_ENABLE_REG_WRITES(spi) \
QM_SS_REG_AUX_OR(spi + QM_SS_SPI_CTRL, QM_SS_SPI_CTRL_CLK_ENA)
#define QM_SS_SPI_CTRL_READ(spi) __builtin_arc_lr(spi + QM_SS_SPI_CTRL)
#define QM_SS_SPI_CTRL_WRITE(value, spi) \
__builtin_arc_sr(value, spi + QM_SS_SPI_CTRL)
#define QM_SS_SPI_BAUD_RATE_WRITE(value, spi) \
__builtin_arc_sr(value, spi + QM_SS_SPI_TIMING)
#define QM_SS_SPI_SER_WRITE(value, spi) \
QM_SS_REG_AUX_MASK_OR(spi + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_SER_MASK, \
value << QM_SS_SPI_SPIEN_SER_OFFS)
#define QM_SS_SPI_INTERRUPT_MASK_WRITE(value, spi) \
__builtin_arc_sr(value, spi + QM_SS_SPI_INTR_MASK)
#define QM_SS_SPI_INTERRUPT_MASK_NAND(value, spi) \
QM_SS_REG_AUX_NAND(spi + QM_SS_SPI_INTR_MASK, value)
#define QM_SS_SPI_NDF_WRITE(value, spi) \
QM_SS_REG_AUX_MASK_OR(spi + QM_SS_SPI_CTRL, QM_SS_SPI_CTRL_NDF_MASK, \
value << QM_SS_SPI_CTRL_NDF_OFFS)
#define QM_SS_SPI_INTERRUPT_STATUS_READ(spi) \
__builtin_arc_lr(spi + QM_SS_SPI_INTR_STAT)
#define QM_SS_SPI_INTERRUPT_CLEAR_WRITE(value, spi) \
__builtin_arc_sr(value, spi + QM_SS_SPI_CLR_INTR)
#define QM_SS_SPI_RFTLR_WRITE(value, spi) \
__builtin_arc_sr((value << QM_SS_SPI_FTLR_RFT_OFFS) & \
QM_SS_SPI_FTLR_RFT_MASK, \
spi + QM_SS_SPI_FTLR)
#define QM_SS_SPI_TFTLR_WRITE(value, spi) \
__builtin_arc_sr((value << QM_SS_SPI_FTLR_TFT_OFFS) & \
QM_SS_SPI_FTLR_TFT_MASK, \
spi + QM_SS_SPI_FTLR)
#define QM_SS_SPI_RFTLR_READ(spi) __builtin_arc_lr(spi + QM_SS_SPI_FTLR)
#define QM_SS_SPI_TFTLR_READ(spi) __builtin_arc_lr(spi + QM_SS_SPI_FTLR)
#define QM_SS_SPI_DUMMY_WRITE(spi) \
__builtin_arc_sr(QM_SS_SPI_DR_R_MASK, spi + QM_SS_SPI_DR)
/** @} */
/** @} */

View file

@ -357,8 +357,8 @@ typedef enum {
/** System and Power Management Single Interrupt. */
#define QM_IRQ_PMU_0_INT 23
#define QM_IRQ_PMU_0_INT_MASK_OFFSET 26
#define QM_IRQ_PMU_0_INT_VECTOR 58
#define QM_IRQ_PMU_0_INT_MASK_OFFSET 27
#define QM_IRQ_PMU_0_INT_VECTOR 59
/**
* 8 DMA Channel Error Interrupts Routed to Single Interrupt with 8bit Mask

View file

@ -33,6 +33,7 @@
#include "qm_common.h"
#include "qm_soc_interrupts.h"
#include "qm_interrupt_router_regs.h"
#include "flash_layout.h"
/**
* Quark SE SoC Registers.
@ -467,13 +468,30 @@ typedef struct {
aonpt_cfg; /**< Always-on periodic timer configuration register. */
} qm_aonc_reg_t;
/* Nothing to save for aonc on Quark SE. */
#define qm_aonc_context_t uint8_t
#define HAS_AONPT_BUSY_BIT (0)
#define QM_AONC_ENABLE (BIT(0))
#define QM_AONC_DISABLE (~QM_AONC_ENABLE)
#define QM_AONPT_INTERRUPT (BIT(0))
#define QM_AONPT_CLR (BIT(0))
#define QM_AONPT_RST (BIT(1))
#if (UNIT_TEST)
qm_aonc_reg_t test_aonc;
#define QM_AONC ((qm_aonc_reg_t *)(&test_aonc))
qm_aonc_reg_t test_aonc_instance[QM_AONC_NUM];
qm_aonc_reg_t *test_aonc[QM_AONC_NUM];
#define QM_AONC test_aonc
#else
#define QM_AONC_BASE (0xB0800700)
#define QM_AONC ((qm_aonc_reg_t *)QM_AONC_BASE)
extern qm_aonc_reg_t *qm_aonc[QM_AONC_NUM];
#define QM_AONC_0_BASE (0xB0800700)
#define QM_AONC qm_aonc
#endif
/** @} */
@ -634,77 +652,6 @@ qm_scss_info_reg_t test_scss_info;
#define QM_MBOX_HOST_MASK_OFFSET (0)
#define QM_MBOX_HOST_MASK_MASK (0x000000FF)
/**
* Mailbox Interrupt Mask enable/disable definitions
*
* \#defines use the channel number to determine the register and bit shift to
* use.
* The interrupt destination adds an offset to the bit shift.
*/
#define QM_MBOX_ENABLE_LMT_INT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask &= \
~(BIT(N + QM_MBOX_HOST_MASK_OFFSET))
#define QM_MBOX_DISABLE_LMT_INT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask |= \
(BIT(N + QM_MBOX_HOST_MASK_OFFSET))
#define QM_MBOX_ENABLE_SS_INT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask &= \
~(BIT(N + QM_MBOX_SS_MASK_OFFSET))
#define QM_MBOX_DISABLE_SS_INT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask |= \
(BIT(N + QM_MBOX_SS_MASK_OFFSET))
/**
* Mailbox Interrupt Halt Mask enable/disable definitions
*
* \#defines use the channel number to determine the register and bit shift to
* use.
* The interrupt destination adds an offset to the bit shift,
* see above for the bit position layout
*/
#define QM_MBOX_ENABLE_LMT_INT_HALT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask &= \
~(BIT(N + QM_MBOX_HOST_HALT_MASK_OFFSET))
#define QM_MBOX_DISABLE_LMT_INT_HALT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask |= \
(BIT(N + QM_MBOX_HOST_HALT_MASK_OFFSET))
#define QM_MBOX_ENABLE_SS_INT_HALT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask &= \
~(BIT(N + QM_MBOX_SS_HALT_MASK_OFFSET))
#define QM_MBOX_DISABLE_SS_INT_HALT_MASK(N) \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask |= \
(BIT(N + QM_MBOX_SS_HALT_MASK_OFFSET))
/**
* Mailbox interrupt mask definitions to return the current mask values
*/
#define QM_MBOX_SS_INT_HALT_MASK \
((QM_MBOX_SS_HALT_MASK_MASK & \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask) >> \
QM_MBOX_SS_HALT_MASK_OFFSET)
#define QM_MBOX_LMT_INT_HALT_MASK \
((QM_MBOX_HOST_HALT_MASK_MASK & \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask) >> \
QM_MBOX_SS_HALT_MASK_OFFSET)
#define QM_MBOX_SS_INT_MASK \
((QM_MBOX_SS_MASK_MASK & QM_INTERRUPT_ROUTER->mailbox_0_int_mask) >> \
QM_MBOX_SS_MASK_OFFSET)
#define QM_MBOX_LMT_INT_MASK \
(QM_MBOX_HOST_MASK_MASK & QM_INTERRUPT_ROUTER->mailbox_0_int_mask)
/**
* Mailbox interrupt macros to determine if the specified mailbox interrupt mask
* has been locked.
*/
#define QM_MBOX_SS_INT_LOCK_HALT_MASK(N) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(3))
#define QM_MBOX_LMT_INT_LOCK_HALT_MASK(N) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(2))
#define QM_MBOX_SS_INT_LOCK_MASK(N) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(1))
#define QM_MBOX_LMT_INT_LOCK_MASK(N) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(0))
/** Mailbox register structure. */
typedef struct {
QM_RW uint32_t ch_ctrl; /**< Channel Control Word */
@ -783,14 +730,16 @@ typedef struct {
} qm_pwm_context_t;
#if (UNIT_TEST)
qm_pwm_reg_t test_pwm_t;
#define QM_PWM ((qm_pwm_reg_t *)(&test_pwm_t))
qm_pwm_reg_t test_pwm_instance[QM_PWM_NUM];
qm_pwm_reg_t *test_pwm[QM_PWM_NUM];
#define QM_PWM test_pwm
#else
extern qm_pwm_reg_t *qm_pwm[QM_PWM_NUM];
/* PWM register base address. */
#define QM_PWM_BASE (0xB0000800)
/* PWM register block. */
#define QM_PWM ((qm_pwm_reg_t *)QM_PWM_BASE)
#define QM_PWM qm_pwm
#endif
#define PWM_START (1)
@ -800,6 +749,8 @@ qm_pwm_reg_t test_pwm_t;
#define QM_PWM_INTERRUPT_MASK_OFFSET (0x2)
#define NUM_PWM_CONTROLLER_INTERRUPTS (1)
/**
* Timer N Control (TimerNControlReg)
*
@ -867,15 +818,17 @@ typedef struct {
} qm_wdt_context_t;
#if (UNIT_TEST)
qm_wdt_reg_t test_wdt;
#define QM_WDT ((qm_wdt_reg_t *)(&test_wdt))
qm_wdt_reg_t test_wdt_instance[QM_WDT_NUM];
qm_wdt_reg_t *test_wdt[QM_WDT_NUM];
#define QM_WDT test_wdt
#else
extern qm_wdt_reg_t *qm_wdt[QM_WDT_NUM];
/* WDT register base address. */
#define QM_WDT_BASE (0xB0000000)
#define QM_WDT_0_BASE (0xB0000000)
/* WDT register block. */
#define QM_WDT ((qm_wdt_reg_t *)QM_WDT_BASE)
#define QM_WDT qm_wdt
#endif
/* Watchdog enable. */
@ -886,6 +839,18 @@ qm_wdt_reg_t test_wdt;
#define QM_WDT_CR_RMOD_OFFSET (1)
/* Watchdog Timeout Mask. */
#define QM_WDT_TORR_TOP_MASK (0xF)
/* Watchdog reload special value. */
#define QM_WDT_RELOAD_VALUE (0x76)
/* Number of WDT controllers. */
#define NUM_WDT_CONTROLLERS (1)
/* Watchdog does not have pause enable. */
#define HAS_WDT_PAUSE (0)
/* Software SoC watch required. */
#define HAS_SW_SOCWATCH (1)
/* Peripheral WDT clock enable mask. */
#define QM_WDT_CLOCK_EN_MASK (BIT(1))
/* Required to enable WDT clock on start. */
#define HAS_WDT_CLOCK_ENABLE (1)
/**
* WDT timeout table (in clock cycles):
@ -1079,8 +1044,13 @@ extern qm_uart_reg_t *qm_uart[QM_UART_NUM];
* @{
*/
/** Number of SPI controllers (only master driver available). */
typedef enum { QM_SPI_MST_0 = 0, QM_SPI_MST_1, QM_SPI_NUM } qm_spi_t;
/** Number of SPI controllers. */
typedef enum {
QM_SPI_MST_0 = 0,
QM_SPI_MST_1,
QM_SPI_SLV_0,
QM_SPI_NUM
} qm_spi_t;
/** SPI register map. */
typedef struct {
@ -1152,6 +1122,7 @@ extern qm_spi_reg_t *qm_spi_controllers[QM_SPI_NUM];
#define QM_SPI_CTRLR0_TMOD_OFFSET (8)
#define QM_SPI_CTRLR0_SCPOL_SCPH_OFFSET (6)
#define QM_SPI_CTRLR0_FRF_OFFSET (4)
#define QM_SPI_CTRLR0_SLV_OE BIT(10)
/* SPI SSI Enable register. */
#define QM_SPI_SSIENR_SSIENR BIT(0)
@ -1211,20 +1182,26 @@ typedef struct {
QM_RW uint32_t rtc_comp_version; /**< End of Interrupt Register */
} qm_rtc_reg_t;
/* Nothing to save for rtc on Quark SE. */
#define qm_rtc_context_t uint8_t
#define QM_RTC_CCR_INTERRUPT_ENABLE BIT(0)
#define QM_RTC_CCR_INTERRUPT_MASK BIT(1)
#define QM_RTC_CCR_ENABLE BIT(2)
#if (UNIT_TEST)
qm_rtc_reg_t test_rtc;
#define QM_RTC ((qm_rtc_reg_t *)(&test_rtc))
qm_rtc_reg_t test_rtc_instance[QM_RTC_NUM];
qm_rtc_reg_t *test_rtc[QM_RTC_NUM];
#define QM_RTC test_rtc
#else
extern qm_rtc_reg_t *qm_rtc[QM_RTC_NUM];
/* RTC register base address. */
#define QM_RTC_BASE (0xB0000400)
/* RTC register block. */
#define QM_RTC ((qm_rtc_reg_t *)QM_RTC_BASE)
#define QM_RTC qm_rtc
#endif
/** @} */
@ -1279,7 +1256,8 @@ typedef struct {
QM_RW uint32_t ic_sda_hold; /**< SDA Hold */
QM_RW uint32_t ic_tx_abrt_source; /**< Transmit Abort Source */
QM_RW uint32_t reserved;
QM_RW uint32_t ic_dma_cr; /**< SDA Setup */
QM_RW uint32_t ic_dma_cr; /**< DMA Control Register for Tx and Rx
Handshaking Interface */
QM_RW uint32_t ic_dma_tdlr; /**< DMA Transmit Data Level Register */
QM_RW uint32_t ic_dma_rdlr; /**< I2C Receive Data Level Register */
QM_RW uint32_t ic_sda_setup; /**< SDA Setup */
@ -1311,6 +1289,8 @@ typedef struct {
uint32_t enable; /**< Enable. */
uint32_t fs_spklen; /**< SS and FS Spike Suppression Limit. */
uint32_t ic_intr_mask; /**< I2C Interrupt Mask. */
uint32_t rx_tl; /** Receive FIFO threshold register. */
uint32_t tx_tl; /** Transmit FIFO threshold register. */
} qm_i2c_context_t;
#if (UNIT_TEST)
@ -1389,6 +1369,7 @@ extern qm_i2c_reg_t *qm_i2c[QM_I2C_NUM];
#define QM_I2C_IC_LCNT_MIN (8)
#define QM_I2C_IC_HCNT_MAX (65525)
#define QM_I2C_IC_HCNT_MIN (6)
#define QM_I2C_IC_TAR_MASK (0x3FF)
#define QM_I2C_FIFO_SIZE (16)
@ -1476,6 +1457,9 @@ extern qm_gpio_reg_t *qm_gpio[QM_GPIO_NUM];
* @{
*/
#define NUM_FLASH_CONTROLLERS (2)
#define HAS_FLASH_WRITE_DISABLE (1)
/** Number of Flash controllers. */
typedef enum { QM_FLASH_0 = 0, QM_FLASH_1, QM_FLASH_NUM } qm_flash_t;
@ -1579,6 +1563,11 @@ extern qm_flash_reg_t *qm_flash[QM_FLASH_NUM];
/* Flash perform mass erase. */
#define MASS_ERASE BIT(7)
/* ROM read disable for upper 4k. */
#define ROM_RD_DIS_U BIT(3)
/* ROM read disable for lower 4k. */
#define ROM_RD_DIS_L BIT(2)
#define QM_FLASH_ADDRESS_MASK (0x7FF)
/* Increment by 4 bytes each time, but there is an offset of 2, so 0x10. */
#define QM_FLASH_ADDR_INC (0x10)
@ -1589,6 +1578,8 @@ extern qm_flash_reg_t *qm_flash[QM_FLASH_NUM];
#define QM_FLASH_PAGE_SIZE_BYTES (0x800)
/* Flash page size in bits. */
#define QM_FLASH_PAGE_SIZE_BITS (11)
/* OTP ROM_PROG bit. */
#define QM_FLASH_STTS_ROM_PROG BIT(2)
/** @} */
@ -1620,6 +1611,9 @@ typedef struct {
uint32_t fpr_rd_cfg[QM_FPR_NUM];
} qm_fpr_context_t;
/* The addressing granularity of MPRs. */
#define QM_FPR_GRANULARITY (1024)
/** @} */
/**
@ -1654,6 +1648,9 @@ typedef struct {
uint32_t mpr_cfg[QM_MPR_NUM]; /**< MPR Configuration Register. */
} qm_mpr_context_t;
/* The addressing granularity of MPRs. */
#define QM_MPR_GRANULARITY (1024)
#if (UNIT_TEST)
qm_mpr_reg_t test_mpr;
@ -1842,6 +1839,9 @@ typedef struct {
#define QM_DMA_CFG_H_DEST_PER_OFFSET (11)
#define QM_DMA_CFG_H_DEST_PER_MASK (0xf << QM_DMA_CFG_H_DEST_PER_OFFSET)
#define QM_DMA_ENABLE_CLOCK(dma) \
(QM_SCSS_CCU->ccu_mlayer_ahb_ctl |= QM_CCU_DMA_CLK_EN)
/** DMA interrupt register map. */
typedef struct {
QM_RW uint32_t raw_tfr_low; /**< RawTfr */
@ -2083,6 +2083,8 @@ uint32_t test_usb_pll;
/* Refer to "HARDWARE_ISSUES.rst" for fix description. */
#define FIX_1 (1)
#define FIX_2 (1)
#define FIX_3 (1)
/** @} */
@ -2095,7 +2097,10 @@ uint32_t test_usb_pll;
uint32_t test_rom_version;
#define ROM_VERSION_ADDRESS &test_rom_version;
#else
#define ROM_VERSION_ADDRESS (0xFFFFFFEC);
#define ROM_VERSION_ADDRESS \
(BL_DATA_FLASH_REGION_BASE + \
(BL_DATA_SECTION_BASE_PAGE * QM_FLASH_PAGE_SIZE_BYTES) + \
sizeof(qm_flash_data_trim_t))
#endif
/** @} */

View file

@ -44,9 +44,9 @@
* Sensor Subsystem SS1 Timers mode type.
*/
typedef enum {
SS_POWER_CPU_SS1_TIMER_OFF = 0, /**< Disable SS Timers in SS1. */
SS_POWER_CPU_SS1_TIMER_ON /**< Keep SS Timers enabled in SS1. */
} ss_power_cpu_ss1_mode_t;
QM_SS_POWER_CPU_SS1_TIMER_OFF = 0, /**< Disable SS Timers in SS1. */
QM_SS_POWER_CPU_SS1_TIMER_ON /**< Keep SS Timers enabled in SS1. */
} qm_ss_power_cpu_ss1_mode_t;
/**
* Enable LPSS state entry.
@ -55,7 +55,7 @@ typedef enum {
* This function needs to be called on the Sensor Core to
* Clock Gate ADC, I2C0, I2C1, SPI0 and SPI1 sensor peripherals.<BR>
* Clock Gating sensor peripherals is a requirement to enter LPSS state.<BR>
* After LPSS, ss_power_soc_lpss_disable needs to be called to
* After LPSS, qm_ss_power_soc_lpss_disable needs to be called to
* restore clock gating.<BR>
*
* This needs to be called before any transition to C2/C2LP and SS2
@ -70,7 +70,7 @@ typedef enum {
* - AON Timer Interrupt
* - RTC Interrupt
*/
void ss_power_soc_lpss_enable(void);
void qm_ss_power_soc_lpss_enable(void);
#if (ENABLE_RESTORE_CONTEXT)
/**
@ -86,9 +86,9 @@ void ss_power_soc_lpss_enable(void);
*
* This function calls qm_ss_save_context and qm_ss_restore_context
* in order to restore execution where it stopped.
* All power management transitions are done by power_soc_sleep().
* All power management transitions are done by qm_power_soc_sleep().
*/
void ss_power_soc_sleep_restore(void);
void qm_ss_power_soc_sleep_restore(void);
/**
* Enter SoC sleep state and restore after wake up.
*
@ -102,25 +102,25 @@ void ss_power_soc_sleep_restore(void);
*
* This function calls qm_ss_save_context and qm_ss_restore_context
* in order to restore execution where it stopped.
* All power management transitions are done by power_soc_deep_sleep().
* All power management transitions are done by power_qm_soc_deep_sleep().
*/
void ss_power_soc_deep_sleep_restore(void);
void qm_ss_power_soc_deep_sleep_restore(void);
/**
* Save context, enter ARC SS1 power save state and restore after wake up.
*
* This routine is same as ss_power_soc_sleep_restore(), just instead of
* This routine is same as qm_ss_power_soc_sleep_restore(), just instead of
* going to sleep it will go to SS1 power save state.
* Note: this function has a while(1) which will spin until we enter
* (and exit) sleep and the power state change will be managed by the other
* core.
*/
void ss_power_sleep_wait(void);
void qm_ss_power_sleep_wait(void);
/**
* Enable the SENSOR startup restore flag.
*/
void power_soc_set_ss_restore_flag(void);
void qm_power_soc_set_ss_restore_flag(void);
#endif /* ENABLE_RESTORE_CONTEXT */
@ -132,7 +132,7 @@ void power_soc_set_ss_restore_flag(void);
* peripherals.<BR>
* This will prevent entry in LPSS when cores are in C2/C2LP and SS2 states.
*/
void ss_power_soc_lpss_disable(void);
void qm_ss_power_soc_lpss_disable(void);
/**
* Enter Sensor SS1 state.
@ -147,7 +147,7 @@ void ss_power_soc_lpss_disable(void);
*
* @param[in] mode Mode selection for SS1 state.
*/
void ss_power_cpu_ss1(const ss_power_cpu_ss1_mode_t mode);
void qm_ss_power_cpu_ss1(const qm_ss_power_cpu_ss1_mode_t mode);
/**
* Enter Sensor SS2 state or SoC LPSS state.
@ -171,7 +171,7 @@ void ss_power_cpu_ss1(const ss_power_cpu_ss1_mode_t mode);
* If Host wakes the SoC from LPSS,
* Sensor also transitions back to SS0.
*/
void ss_power_cpu_ss2(void);
void qm_ss_power_cpu_ss2(void);
#if (ENABLE_RESTORE_CONTEXT) && (!UNIT_TEST)
/**