ext qmsi: Update to QMSI 1.3 release

Update the QMSI drop we maintain in Zephyr, and fix the build where
needed:

- QM_SCSS_INT is renamed to QM_INTERRUPT_ROUTER;
- every member of QM_INTERRUPT_ROUTER was renamed as well;
- QM_IRQ_* renamed too, mostly added _INT at the end;
- some isr functions were renamed to keep their names consistent;
- build for x86 needs to define QM_LAKEMONT, as QM_SENSOR was for ARC.

Change-Id: I459029ca0d373f6c831e2bb8ebd52402a55994d1
Signed-off-by: Iván Briano <ivan.briano@intel.com>
This commit is contained in:
Iván Briano 2016-10-18 19:23:19 -02:00 committed by Anas Nashif
commit 0094ab228d
77 changed files with 4097 additions and 1056 deletions

View file

@ -12,5 +12,6 @@ OUTPUT_ARCH = iamcu:intel
else else
soc-cflags += $(call cc-option,-mno-iamcu) soc-cflags += $(call cc-option,-mno-iamcu)
endif endif
soc-cflags += -DQM_LAKEMONT=1
SOC_SERIES = quark_d2000 SOC_SERIES = quark_d2000

View file

@ -12,5 +12,6 @@ OUTPUT_ARCH = iamcu:intel
else else
soc-cflags += $(call cc-option,-mno-iamcu) soc-cflags += $(call cc-option,-mno-iamcu)
endif endif
soc-cflags += -DQM_LAKEMONT=1
SOC_SERIES = quark_se SOC_SERIES = quark_se

View file

@ -268,10 +268,10 @@ DEVICE_AND_API_INIT(adc_qmsi, CONFIG_ADC_0_NAME, &adc_qmsi_init,
static void adc_config_irq(void) static void adc_config_irq(void)
{ {
IRQ_CONNECT(QM_IRQ_ADC_0, CONFIG_ADC_0_IRQ_PRI, qm_adc_0_isr, IRQ_CONNECT(QM_IRQ_ADC_0_CAL_INT, CONFIG_ADC_0_IRQ_PRI,
NULL, (IOAPIC_LEVEL | IOAPIC_HIGH)); qm_adc_0_cal_isr, NULL, (IOAPIC_LEVEL | IOAPIC_HIGH));
irq_enable(QM_IRQ_ADC_0); irq_enable(QM_IRQ_ADC_0_CAL_INT);
QM_SCSS_INT->int_adc_calib_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->adc_0_cal_int_mask &= ~BIT(0);
} }

View file

@ -234,7 +234,7 @@ void adc_qmsi_ss_rx_isr(void *arg)
void adc_qmsi_ss_err_isr(void *arg) void adc_qmsi_ss_err_isr(void *arg)
{ {
ARG_UNUSED(arg); ARG_UNUSED(arg);
qm_ss_adc_0_err_isr(NULL); qm_ss_adc_0_error_isr(NULL);
} }
static const struct adc_driver_api api_funcs = { static const struct adc_driver_api api_funcs = {
@ -288,9 +288,10 @@ static void adc_config_irq(void)
adc_qmsi_ss_err_isr, DEVICE_GET(adc_qmsi_ss), 0); adc_qmsi_ss_err_isr, DEVICE_GET(adc_qmsi_ss), 0);
irq_enable(IRQ_ADC_ERR); irq_enable(IRQ_ADC_ERR);
scss_intmask = (uint32_t *)&QM_SCSS_INT->int_ss_adc_err_mask; scss_intmask =
(uint32_t *)&QM_INTERRUPT_ROUTER->ss_adc_0_error_int_mask;
*scss_intmask &= ~BIT(8); *scss_intmask &= ~BIT(8);
scss_intmask = (uint32_t *)&QM_SCSS_INT->int_ss_adc_irq_mask; scss_intmask = (uint32_t *)&QM_INTERRUPT_ROUTER->ss_adc_0_int_mask;
*scss_intmask &= ~BIT(8); *scss_intmask &= ~BIT(8);
} }

View file

@ -52,7 +52,7 @@ static int aio_qmsi_cmp_disable(struct device *dev, uint8_t index)
} }
/* Disable interrupt to host */ /* Disable interrupt to host */
QM_SCSS_INT->int_comparators_host_mask |= (1 << index); QM_INTERRUPT_ROUTER->comparator_0_host_int_mask |= (1 << index);
/* Disable comparator according to index */ /* Disable comparator according to index */
config.int_en &= ~(1 << index); config.int_en &= ~(1 << index);
@ -104,7 +104,7 @@ static int aio_qmsi_cmp_configure(struct device *dev, uint8_t index,
} }
/* Enable Interrupts to host for an specific comparator */ /* Enable Interrupts to host for an specific comparator */
QM_SCSS_INT->int_comparators_host_mask &= ~(1 << index); QM_INTERRUPT_ROUTER->comparator_0_host_int_mask &= ~(1 << index);
return 0; return 0;
} }
@ -123,7 +123,7 @@ static int aio_qmsi_cmp_init(struct device *dev)
aio_cmp_config(dev); aio_cmp_config(dev);
/* Disable all comparator interrupts */ /* Disable all comparator interrupts */
QM_SCSS_INT->int_comparators_host_mask |= INT_COMPARATORS_MASK; QM_INTERRUPT_ROUTER->comparator_0_host_int_mask |= INT_COMPARATORS_MASK;
/* Clear status and dissble all comparators */ /* Clear status and dissble all comparators */
QM_SCSS_CMP->cmp_stat_clr |= INT_COMPARATORS_MASK; QM_SCSS_CMP->cmp_stat_clr |= INT_COMPARATORS_MASK;
@ -144,7 +144,7 @@ static int aio_qmsi_cmp_init(struct device *dev)
dev_data->cb[i].param = NULL; dev_data->cb[i].param = NULL;
} }
irq_enable(QM_IRQ_AC); irq_enable(QM_IRQ_COMPARATOR_0_INT);
return 0; return 0;
} }
@ -183,7 +183,7 @@ static int aio_cmp_config(struct device *dev)
{ {
ARG_UNUSED(dev); ARG_UNUSED(dev);
IRQ_CONNECT(QM_IRQ_AC, CONFIG_AIO_COMPARATOR_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_COMPARATOR_0_INT, CONFIG_AIO_COMPARATOR_0_IRQ_PRI,
aio_qmsi_cmp_isr, DEVICE_GET(aio_qmsi_cmp), 0); aio_qmsi_cmp_isr, DEVICE_GET(aio_qmsi_cmp), 0);
return 0; return 0;

View file

@ -188,7 +188,7 @@ static uint32_t aonpt_qmsi_get_power_state(struct device *dev)
static int aonpt_suspend_device(struct device *dev) static int aonpt_suspend_device(struct device *dev)
{ {
int_aonpt_mask_save = QM_SCSS_INT->int_aon_timer_mask; int_aonpt_mask_save = QM_INTERRUPT_ROUTER->aonpt_0_int_mask;
aonpt_qmsi_set_power_state(dev, DEVICE_PM_SUSPEND_STATE); aonpt_qmsi_set_power_state(dev, DEVICE_PM_SUSPEND_STATE);
return 0; return 0;
@ -196,7 +196,7 @@ static int aonpt_suspend_device(struct device *dev)
static int aonpt_resume_device_from_suspend(struct device *dev) static int aonpt_resume_device_from_suspend(struct device *dev)
{ {
QM_SCSS_INT->int_aon_timer_mask = int_aonpt_mask_save; QM_INTERRUPT_ROUTER->aonpt_0_int_mask = int_aonpt_mask_save;
aonpt_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); aonpt_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE);
return 0; return 0;
@ -232,12 +232,12 @@ static int aon_timer_init(struct device *dev)
user_cb = NULL; user_cb = NULL;
IRQ_CONNECT(QM_IRQ_AONPT_0, CONFIG_AON_TIMER_IRQ_PRI, IRQ_CONNECT(QM_IRQ_AONPT_0_INT, CONFIG_AON_TIMER_IRQ_PRI,
qm_aonpt_isr_0, NULL, IOAPIC_EDGE | IOAPIC_HIGH); qm_aonpt_0_isr, NULL, IOAPIC_EDGE | IOAPIC_HIGH);
irq_enable(QM_IRQ_AONPT_0); irq_enable(QM_IRQ_AONPT_0_INT);
QM_SCSS_INT->int_aon_timer_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->aonpt_0_int_mask &= ~BIT(0);
aon_reentrancy_init(dev); aon_reentrancy_init(dev);

View file

@ -157,53 +157,53 @@ static void dma_qmsi_config(struct device *dev)
{ {
ARG_UNUSED(dev); ARG_UNUSED(dev);
IRQ_CONNECT(QM_IRQ_DMA_0, CONFIG_DMA_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_DMA_0_INT_0, CONFIG_DMA_0_IRQ_PRI,
qm_dma_0_isr_0, DEVICE_GET(dma_qmsi), 0); qm_dma_0_isr_0, DEVICE_GET(dma_qmsi), 0);
irq_enable(QM_IRQ_DMA_0); irq_enable(QM_IRQ_DMA_0_INT_0);
QM_SCSS_INT->int_dma_channel_0_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->dma_0_int_0_mask &= ~BIT(0);
IRQ_CONNECT(QM_IRQ_DMA_1, CONFIG_DMA_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_DMA_0_INT_1, CONFIG_DMA_0_IRQ_PRI,
qm_dma_0_isr_1, DEVICE_GET(dma_qmsi), 0); qm_dma_0_isr_1, DEVICE_GET(dma_qmsi), 0);
irq_enable(QM_IRQ_DMA_1); irq_enable(QM_IRQ_DMA_0_INT_1);
QM_SCSS_INT->int_dma_channel_1_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->dma_0_int_1_mask &= ~BIT(0);
#if (CONFIG_SOC_QUARK_SE_C1000) #if (CONFIG_SOC_QUARK_SE_C1000)
IRQ_CONNECT(QM_IRQ_DMA_2, CONFIG_DMA_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_DMA_0_INT_2, CONFIG_DMA_0_IRQ_PRI,
qm_dma_0_isr_2, DEVICE_GET(dma_qmsi), 0); qm_dma_0_isr_2, DEVICE_GET(dma_qmsi), 0);
irq_enable(QM_IRQ_DMA_2); irq_enable(QM_IRQ_DMA_0_INT_2);
QM_SCSS_INT->int_dma_channel_2_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->dma_0_int_2_mask &= ~BIT(0);
IRQ_CONNECT(QM_IRQ_DMA_3, CONFIG_DMA_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_DMA_0_INT_3, CONFIG_DMA_0_IRQ_PRI,
qm_dma_0_isr_3, DEVICE_GET(dma_qmsi), 0); qm_dma_0_isr_3, DEVICE_GET(dma_qmsi), 0);
irq_enable(QM_IRQ_DMA_3); irq_enable(QM_IRQ_DMA_0_INT_3);
QM_SCSS_INT->int_dma_channel_3_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->dma_0_int_3_mask &= ~BIT(0);
IRQ_CONNECT(QM_IRQ_DMA_4, CONFIG_DMA_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_DMA_0_INT_4, CONFIG_DMA_0_IRQ_PRI,
qm_dma_0_isr_4, DEVICE_GET(dma_qmsi), 0); qm_dma_0_isr_4, DEVICE_GET(dma_qmsi), 0);
irq_enable(QM_IRQ_DMA_4); irq_enable(QM_IRQ_DMA_0_INT_4);
QM_SCSS_INT->int_dma_channel_4_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->dma_0_int_4_mask &= ~BIT(0);
IRQ_CONNECT(QM_IRQ_DMA_5, CONFIG_DMA_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_DMA_0_INT_5, CONFIG_DMA_0_IRQ_PRI,
qm_dma_0_isr_5, DEVICE_GET(dma_qmsi), 0); qm_dma_0_isr_5, DEVICE_GET(dma_qmsi), 0);
irq_enable(QM_IRQ_DMA_5); irq_enable(QM_IRQ_DMA_0_INT_5);
QM_SCSS_INT->int_dma_channel_5_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->dma_0_int_5_mask &= ~BIT(0);
IRQ_CONNECT(QM_IRQ_DMA_6, CONFIG_DMA_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_DMA_0_INT_6, CONFIG_DMA_0_IRQ_PRI,
qm_dma_0_isr_6, DEVICE_GET(dma_qmsi), 0); qm_dma_0_isr_6, DEVICE_GET(dma_qmsi), 0);
irq_enable(QM_IRQ_DMA_6); irq_enable(QM_IRQ_DMA_0_INT_6);
QM_SCSS_INT->int_dma_channel_6_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->dma_0_int_6_mask &= ~BIT(0);
IRQ_CONNECT(QM_IRQ_DMA_7, CONFIG_DMA_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_DMA_0_INT_7, CONFIG_DMA_0_IRQ_PRI,
qm_dma_0_isr_7, DEVICE_GET(dma_qmsi), 0); qm_dma_0_isr_7, DEVICE_GET(dma_qmsi), 0);
irq_enable(QM_IRQ_DMA_7); irq_enable(QM_IRQ_DMA_0_INT_7);
QM_SCSS_INT->int_dma_channel_7_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->dma_0_int_7_mask &= ~BIT(0);
#endif /* CONFIG_SOC_QUARK_SE_C1000 */ #endif /* CONFIG_SOC_QUARK_SE_C1000 */
IRQ_CONNECT(QM_IRQ_DMA_ERR, CONFIG_DMA_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_DMA_0_ERROR_INT, CONFIG_DMA_0_IRQ_PRI,
qm_dma_0_isr_err, DEVICE_GET(dma_qmsi), 0); qm_dma_0_error_isr, DEVICE_GET(dma_qmsi), 0);
irq_enable(QM_IRQ_DMA_ERR); irq_enable(QM_IRQ_DMA_0_ERROR_INT);
QM_SCSS_INT->int_dma_error_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->dma_0_error_int_mask &= ~BIT(0);
} }

View file

@ -116,7 +116,7 @@ static uint32_t int_gpio_mask_save;
static int gpio_suspend_device(struct device *dev) static int gpio_suspend_device(struct device *dev)
{ {
int_gpio_mask_save = REG_VAL(&QM_SCSS_INT->int_gpio_mask); int_gpio_mask_save = REG_VAL(&QM_INTERRUPT_ROUTER->gpio_0_int_mask);
save_reg[0] = REG_VAL(&QM_GPIO[QM_GPIO_0]->gpio_swporta_dr); save_reg[0] = REG_VAL(&QM_GPIO[QM_GPIO_0]->gpio_swporta_dr);
save_reg[1] = REG_VAL(&QM_GPIO[QM_GPIO_0]->gpio_swporta_ddr); save_reg[1] = REG_VAL(&QM_GPIO[QM_GPIO_0]->gpio_swporta_ddr);
save_reg[2] = REG_VAL(&QM_GPIO[QM_GPIO_0]->gpio_swporta_ctl); save_reg[2] = REG_VAL(&QM_GPIO[QM_GPIO_0]->gpio_swporta_ctl);
@ -145,7 +145,7 @@ static int gpio_resume_device_from_suspend(struct device *dev)
REG_VAL(&QM_GPIO[QM_GPIO_0]->gpio_debounce) = save_reg[7]; REG_VAL(&QM_GPIO[QM_GPIO_0]->gpio_debounce) = save_reg[7];
REG_VAL(&QM_GPIO[QM_GPIO_0]->gpio_ls_sync) = save_reg[8]; REG_VAL(&QM_GPIO[QM_GPIO_0]->gpio_ls_sync) = save_reg[8];
REG_VAL(&QM_GPIO[QM_GPIO_0]->gpio_int_bothedge) = save_reg[9]; REG_VAL(&QM_GPIO[QM_GPIO_0]->gpio_int_bothedge) = save_reg[9];
REG_VAL(&QM_SCSS_INT->int_gpio_mask) = int_gpio_mask_save; REG_VAL(&QM_INTERRUPT_ROUTER->gpio_0_int_mask) = int_gpio_mask_save;
gpio_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); gpio_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE);
@ -193,14 +193,16 @@ static uint32_t int_gpio_aon_mask_save;
static int gpio_aon_suspend_device(struct device *dev) static int gpio_aon_suspend_device(struct device *dev)
{ {
int_gpio_aon_mask_save = REG_VAL(&QM_SCSS_INT->int_aon_gpio_mask); int_gpio_aon_mask_save =
REG_VAL(&QM_INTERRUPT_ROUTER->aon_gpio_0_int_mask);
gpio_qmsi_set_power_state(dev, DEVICE_PM_SUSPEND_STATE); gpio_qmsi_set_power_state(dev, DEVICE_PM_SUSPEND_STATE);
return 0; return 0;
} }
static int gpio_aon_resume_device_from_suspend(struct device *dev) static int gpio_aon_resume_device_from_suspend(struct device *dev)
{ {
REG_VAL(&QM_SCSS_INT->int_aon_gpio_mask) = int_gpio_aon_mask_save; REG_VAL(&QM_INTERRUPT_ROUTER->aon_gpio_0_int_mask) =
int_gpio_aon_mask_save;
gpio_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); gpio_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE);
return 0; return 0;
} }
@ -460,18 +462,18 @@ static int gpio_qmsi_init(struct device *port)
CLK_PERIPH_GPIO_INTERRUPT | CLK_PERIPH_GPIO_INTERRUPT |
CLK_PERIPH_GPIO_DB | CLK_PERIPH_GPIO_DB |
CLK_PERIPH_CLK); CLK_PERIPH_CLK);
IRQ_CONNECT(QM_IRQ_GPIO_0, CONFIG_GPIO_QMSI_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_GPIO_0_INT, CONFIG_GPIO_QMSI_0_IRQ_PRI,
qm_gpio_isr_0, 0, IOAPIC_LEVEL | IOAPIC_HIGH); qm_gpio_0_isr, 0, IOAPIC_LEVEL | IOAPIC_HIGH);
irq_enable(QM_IRQ_GPIO_0); irq_enable(QM_IRQ_GPIO_0_INT);
QM_SCSS_INT->int_gpio_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->gpio_0_int_mask &= ~BIT(0);
break; break;
#ifdef CONFIG_GPIO_QMSI_1 #ifdef CONFIG_GPIO_QMSI_1
case QM_AON_GPIO_0: case QM_AON_GPIO_0:
IRQ_CONNECT(QM_IRQ_AONGPIO_0, IRQ_CONNECT(QM_IRQ_AON_GPIO_0_INT,
CONFIG_GPIO_QMSI_1_IRQ_PRI, qm_aon_gpio_isr_0, CONFIG_GPIO_QMSI_1_IRQ_PRI, qm_aon_gpio_0_isr,
0, IOAPIC_LEVEL | IOAPIC_HIGH); 0, IOAPIC_LEVEL | IOAPIC_HIGH);
irq_enable(QM_IRQ_AONGPIO_0); irq_enable(QM_IRQ_AON_GPIO_0_INT);
QM_SCSS_INT->int_aon_gpio_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->aon_gpio_0_int_mask &= ~BIT(0);
break; break;
#endif /* CONFIG_GPIO_QMSI_1 */ #endif /* CONFIG_GPIO_QMSI_1 */
default: default:

View file

@ -331,9 +331,9 @@ void ss_gpio_isr(void *arg)
port->config->config_info; port->config->config_info;
if (gpio_config->gpio == QM_SS_GPIO_0) { if (gpio_config->gpio == QM_SS_GPIO_0) {
qm_ss_gpio_isr_0(NULL); qm_ss_gpio_0_isr(NULL);
} else { } else {
qm_ss_gpio_isr_1(NULL); qm_ss_gpio_1_isr(NULL);
} }
} }
@ -356,7 +356,7 @@ static int ss_gpio_qmsi_init(struct device *port)
ss_clk_gpio_enable(QM_SS_GPIO_0); ss_clk_gpio_enable(QM_SS_GPIO_0);
scss_intmask = scss_intmask =
(uint32_t *)&QM_SCSS_INT->int_ss_gpio_0_intr_mask; (uint32_t *)&QM_INTERRUPT_ROUTER->ss_gpio_0_int_mask;
*scss_intmask &= ~BIT(8); *scss_intmask &= ~BIT(8);
break; break;
#endif /* CONFIG_GPIO_QMSI_0 */ #endif /* CONFIG_GPIO_QMSI_0 */
@ -370,7 +370,7 @@ static int ss_gpio_qmsi_init(struct device *port)
ss_clk_gpio_enable(QM_SS_GPIO_1); ss_clk_gpio_enable(QM_SS_GPIO_1);
scss_intmask = scss_intmask =
(uint32_t *)&QM_SCSS_INT->int_ss_gpio_1_intr_mask; (uint32_t *)&QM_INTERRUPT_ROUTER->ss_gpio_1_int_mask;
*scss_intmask &= ~BIT(8); *scss_intmask &= ~BIT(8);
break; break;
#endif /* CONFIG_GPIO_QMSI_1 */ #endif /* CONFIG_GPIO_QMSI_1 */

View file

@ -118,9 +118,11 @@ static int i2c_suspend_device(struct device *dev)
ctx_save->ic_dma_rdlr = regs->ic_dma_rdlr; ctx_save->ic_dma_rdlr = regs->ic_dma_rdlr;
if (instance == QM_I2C_0) { if (instance == QM_I2C_0) {
ctx_save->int_i2c_mst_mask = QM_SCSS_INT->int_i2c_mst_0_mask; ctx_save->int_i2c_mst_mask =
QM_INTERRUPT_ROUTER->i2c_master_0_int_mask;
} else { } else {
ctx_save->int_i2c_mst_mask = QM_SCSS_INT->int_i2c_mst_1_mask; ctx_save->int_i2c_mst_mask =
QM_INTERRUPT_ROUTER->i2c_master_1_int_mask;
} }
i2c_qmsi_set_power_state(dev, DEVICE_PM_SUSPEND_STATE); i2c_qmsi_set_power_state(dev, DEVICE_PM_SUSPEND_STATE);
@ -153,9 +155,11 @@ static int i2c_resume_device_from_suspend(struct device *dev)
regs->ic_dma_rdlr = ctx_save->ic_dma_rdlr; regs->ic_dma_rdlr = ctx_save->ic_dma_rdlr;
if (config->instance == QM_I2C_0) { if (config->instance == QM_I2C_0) {
QM_SCSS_INT->int_i2c_mst_0_mask = ctx_save->int_i2c_mst_mask; QM_INTERRUPT_ROUTER->i2c_master_0_int_mask =
ctx_save->int_i2c_mst_mask;
} else { } else {
QM_SCSS_INT->int_i2c_mst_1_mask = ctx_save->int_i2c_mst_mask; QM_INTERRUPT_ROUTER->i2c_master_1_int_mask =
ctx_save->int_i2c_mst_mask;
} }
i2c_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); i2c_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE);
@ -348,20 +352,20 @@ static int i2c_qmsi_init(struct device *dev)
/* Register interrupt handler, unmask IRQ and route it /* Register interrupt handler, unmask IRQ and route it
* to Lakemont core. * to Lakemont core.
*/ */
IRQ_CONNECT(QM_IRQ_I2C_0, IRQ_CONNECT(QM_IRQ_I2C_0_INT,
CONFIG_I2C_0_IRQ_PRI, qm_i2c_0_isr, NULL, CONFIG_I2C_0_IRQ_PRI, qm_i2c_0_isr, NULL,
(IOAPIC_LEVEL | IOAPIC_HIGH)); (IOAPIC_LEVEL | IOAPIC_HIGH));
irq_enable(QM_IRQ_I2C_0); irq_enable(QM_IRQ_I2C_0_INT);
QM_SCSS_INT->int_i2c_mst_0_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->i2c_master_0_int_mask &= ~BIT(0);
break; break;
#ifdef CONFIG_I2C_1 #ifdef CONFIG_I2C_1
case QM_I2C_1: case QM_I2C_1:
IRQ_CONNECT(QM_IRQ_I2C_1, IRQ_CONNECT(QM_IRQ_I2C_1_INT,
CONFIG_I2C_1_IRQ_PRI, qm_i2c_1_isr, NULL, CONFIG_I2C_1_IRQ_PRI, qm_i2c_1_isr, NULL,
(IOAPIC_LEVEL | IOAPIC_HIGH)); (IOAPIC_LEVEL | IOAPIC_HIGH));
irq_enable(QM_IRQ_I2C_1); irq_enable(QM_IRQ_I2C_1_INT);
QM_SCSS_INT->int_i2c_mst_1_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->i2c_master_1_int_mask &= ~BIT(0);
break; break;
#endif /* CONFIG_I2C_1 */ #endif /* CONFIG_I2C_1 */

View file

@ -50,9 +50,9 @@ static void i2c_qmsi_ss_isr(void *arg)
qm_ss_i2c_t instance = GET_CONTROLLER_INSTANCE(dev); qm_ss_i2c_t instance = GET_CONTROLLER_INSTANCE(dev);
if (instance == QM_SS_I2C_0) { if (instance == QM_SS_I2C_0) {
qm_ss_i2c_isr_0(NULL); qm_ss_i2c_0_isr(NULL);
} else { } else {
qm_ss_i2c_isr_1(NULL); qm_ss_i2c_1_isr(NULL);
} }
} }

View file

@ -404,7 +404,7 @@ static int pwm_qmsi_suspend(struct device *dev)
int i; int i;
pwm_ctx_save.int_pwm_timer_mask = pwm_ctx_save.int_pwm_timer_mask =
QM_SCSS_INT->int_pwm_timer_mask; QM_INTERRUPT_ROUTER->pwm_0_int_mask;
for (i = 0; i < CONFIG_PWM_QMSI_NUM_PORTS; i++) { for (i = 0; i < CONFIG_PWM_QMSI_NUM_PORTS; i++) {
qm_pwm_channel_t *channel; qm_pwm_channel_t *channel;
struct pwm_channel_ctx *channel_save; struct pwm_channel_ctx *channel_save;
@ -433,7 +433,7 @@ static int pwm_qmsi_resume_from_suspend(struct device *dev)
channel->controlreg = channel_save->controlreg; channel->controlreg = channel_save->controlreg;
QM_PWM->timer_loadcount2[i] = channel_save->loadcount2; QM_PWM->timer_loadcount2[i] = channel_save->loadcount2;
} }
QM_SCSS_INT->int_pwm_timer_mask = pwm_ctx_save.int_pwm_timer_mask; QM_INTERRUPT_ROUTER->pwm_0_int_mask = pwm_ctx_save.int_pwm_timer_mask;
pwm_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); pwm_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE);
return 0; return 0;
} }

View file

@ -166,15 +166,15 @@ static int rtc_qmsi_init(struct device *dev)
{ {
rtc_reentrancy_init(dev); rtc_reentrancy_init(dev);
IRQ_CONNECT(QM_IRQ_RTC_0, CONFIG_RTC_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_RTC_0_INT, CONFIG_RTC_0_IRQ_PRI,
qm_rtc_isr_0, NULL, qm_rtc_0_isr, NULL,
IOAPIC_EDGE | IOAPIC_HIGH); IOAPIC_EDGE | IOAPIC_HIGH);
/* Unmask RTC interrupt */ /* Unmask RTC interrupt */
irq_enable(QM_IRQ_RTC_0); irq_enable(QM_IRQ_RTC_0_INT);
/* Route RTC interrupt to Lakemont */ /* Route RTC interrupt to Lakemont */
QM_SCSS_INT->int_rtc_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->rtc_0_int_mask &= ~BIT(0);
rtc_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); rtc_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE);
@ -186,14 +186,14 @@ static uint32_t int_rtc_mask_save;
static int rtc_suspend_device(struct device *dev) static int rtc_suspend_device(struct device *dev)
{ {
int_rtc_mask_save = QM_SCSS_INT->int_rtc_mask; int_rtc_mask_save = QM_INTERRUPT_ROUTER->rtc_0_int_mask;
rtc_qmsi_set_power_state(dev, DEVICE_PM_SUSPEND_STATE); rtc_qmsi_set_power_state(dev, DEVICE_PM_SUSPEND_STATE);
return 0; return 0;
} }
static int rtc_resume_device(struct device *dev) static int rtc_resume_device(struct device *dev)
{ {
QM_SCSS_INT->int_rtc_mask = int_rtc_mask_save; QM_INTERRUPT_ROUTER->rtc_0_int_mask = int_rtc_mask_save;
rtc_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); rtc_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE);
return 0; return 0;
} }

View file

@ -35,12 +35,12 @@
* receive it. * receive it.
*/ */
#ifdef CONFIG_SOC_QUARK_SE_C1000_SS #ifdef CONFIG_SOC_QUARK_SE_C1000_SS
# define UART0_IRQ QM_IRQ_UART_0_VECTOR # define UART0_IRQ QM_IRQ_UART_0_INT_VECTOR
# define UART1_IRQ QM_IRQ_UART_1_VECTOR # define UART1_IRQ QM_IRQ_UART_1_INT_VECTOR
# define SCSS_IRQ_ROUTING_MASK BIT(8) # define SCSS_IRQ_ROUTING_MASK BIT(8)
#else #else
# define UART0_IRQ QM_IRQ_UART_0 # define UART0_IRQ QM_IRQ_UART_0_INT
# define UART1_IRQ QM_IRQ_UART_1 # define UART1_IRQ QM_IRQ_UART_1_INT
# define SCSS_IRQ_ROUTING_MASK BIT(0) # define SCSS_IRQ_ROUTING_MASK BIT(0)
#endif #endif
@ -123,9 +123,9 @@ static int uart_suspend_device(struct device *dev)
struct uart_context_t *const ctx_save = &drv_data->ctx_save; struct uart_context_t *const ctx_save = &drv_data->ctx_save;
if (config->instance == QM_UART_0) { if (config->instance == QM_UART_0) {
ctx_save->int_uart_mask = QM_SCSS_INT->int_uart_0_mask; ctx_save->int_uart_mask = QM_INTERRUPT_ROUTER->uart_0_int_mask;
} else { } else {
ctx_save->int_uart_mask = QM_SCSS_INT->int_uart_1_mask; ctx_save->int_uart_mask = QM_INTERRUPT_ROUTER->uart_1_int_mask;
} }
ctx_save->ier = regs->ier_dlh; ctx_save->ier = regs->ier_dlh;
@ -156,9 +156,9 @@ static int uart_resume_device_from_suspend(struct device *dev)
clk_periph_enable(config->clock_gate); clk_periph_enable(config->clock_gate);
if (config->instance == QM_UART_0) { if (config->instance == QM_UART_0) {
QM_SCSS_INT->int_uart_0_mask = ctx_save->int_uart_mask; QM_INTERRUPT_ROUTER->uart_0_int_mask = ctx_save->int_uart_mask;
} else { } else {
QM_SCSS_INT->int_uart_1_mask = ctx_save->int_uart_mask; QM_INTERRUPT_ROUTER->uart_1_int_mask = ctx_save->int_uart_mask;
} }
/* When DLAB is set, DLL and DLH registers can be accessed. */ /* When DLAB is set, DLL and DLH registers can be accessed. */
@ -459,7 +459,7 @@ static void irq_config_func_0(struct device *dev)
uart_qmsi_isr, DEVICE_GET(uart_0), uart_qmsi_isr, DEVICE_GET(uart_0),
UART_IRQ_FLAGS); UART_IRQ_FLAGS);
irq_enable(UART0_IRQ); irq_enable(UART0_IRQ);
QM_SCSS_INT->int_uart_0_mask &= ~SCSS_IRQ_ROUTING_MASK; QM_INTERRUPT_ROUTER->uart_0_int_mask &= ~SCSS_IRQ_ROUTING_MASK;
} }
#endif /* CONFIG_UART_QMSI_0 */ #endif /* CONFIG_UART_QMSI_0 */
@ -470,7 +470,7 @@ static void irq_config_func_1(struct device *dev)
uart_qmsi_isr, DEVICE_GET(uart_1), uart_qmsi_isr, DEVICE_GET(uart_1),
UART_IRQ_FLAGS); UART_IRQ_FLAGS);
irq_enable(UART1_IRQ); irq_enable(UART1_IRQ);
QM_SCSS_INT->int_uart_1_mask &= ~SCSS_IRQ_ROUTING_MASK; QM_INTERRUPT_ROUTER->uart_1_int_mask &= ~SCSS_IRQ_ROUTING_MASK;
} }
#endif /* CONFIG_UART_QMSI_1 */ #endif /* CONFIG_UART_QMSI_1 */
#endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */

View file

@ -268,22 +268,22 @@ static int spi_qmsi_init(struct device *dev)
switch (spi_config->spi) { switch (spi_config->spi) {
case QM_SPI_MST_0: case QM_SPI_MST_0:
IRQ_CONNECT(QM_IRQ_SPI_MASTER_0, IRQ_CONNECT(QM_IRQ_SPI_MASTER_0_INT,
CONFIG_SPI_0_IRQ_PRI, qm_spi_master_0_isr, CONFIG_SPI_0_IRQ_PRI, qm_spi_master_0_isr,
0, IOAPIC_LEVEL | IOAPIC_HIGH); 0, IOAPIC_LEVEL | IOAPIC_HIGH);
irq_enable(QM_IRQ_SPI_MASTER_0); irq_enable(QM_IRQ_SPI_MASTER_0_INT);
clk_periph_enable(CLK_PERIPH_CLK | CLK_PERIPH_SPI_M0_REGISTER); clk_periph_enable(CLK_PERIPH_CLK | CLK_PERIPH_SPI_M0_REGISTER);
QM_SCSS_INT->int_spi_mst_0_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->spi_master_0_int_mask &= ~BIT(0);
break; break;
#ifdef CONFIG_SPI_1 #ifdef CONFIG_SPI_1
case QM_SPI_MST_1: case QM_SPI_MST_1:
IRQ_CONNECT(QM_IRQ_SPI_MASTER_1, IRQ_CONNECT(QM_IRQ_SPI_MASTER_1_INT,
CONFIG_SPI_1_IRQ_PRI, qm_spi_master_1_isr, CONFIG_SPI_1_IRQ_PRI, qm_spi_master_1_isr,
0, IOAPIC_LEVEL | IOAPIC_HIGH); 0, IOAPIC_LEVEL | IOAPIC_HIGH);
irq_enable(QM_IRQ_SPI_MASTER_1); irq_enable(QM_IRQ_SPI_MASTER_1_INT);
clk_periph_enable(CLK_PERIPH_CLK | CLK_PERIPH_SPI_M1_REGISTER); clk_periph_enable(CLK_PERIPH_CLK | CLK_PERIPH_SPI_M1_REGISTER);
QM_SCSS_INT->int_spi_mst_1_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->spi_master_1_int_mask &= ~BIT(0);
break; break;
#endif /* CONFIG_SPI_1 */ #endif /* CONFIG_SPI_1 */
@ -316,9 +316,11 @@ static int spi_master_suspend_device(struct device *dev)
struct spi_context_t *const ctx_save = &drv_data->ctx_save; struct spi_context_t *const ctx_save = &drv_data->ctx_save;
if (config->spi == QM_SPI_MST_0) { if (config->spi == QM_SPI_MST_0) {
ctx_save->int_spi_mask = QM_SCSS_INT->int_spi_mst_0_mask; ctx_save->int_spi_mask =
QM_INTERRUPT_ROUTER->spi_master_0_int_mask;
} else { } else {
ctx_save->int_spi_mask = QM_SCSS_INT->int_spi_mst_1_mask; ctx_save->int_spi_mask =
QM_INTERRUPT_ROUTER->spi_master_1_int_mask;
} }
ctx_save->ctrlr0 = regs->ctrlr0; ctx_save->ctrlr0 = regs->ctrlr0;
@ -337,9 +339,11 @@ static int spi_master_resume_device_from_suspend(struct device *dev)
struct spi_context_t *const ctx_save = &drv_data->ctx_save; struct spi_context_t *const ctx_save = &drv_data->ctx_save;
if (config->spi == QM_SPI_MST_0) { if (config->spi == QM_SPI_MST_0) {
QM_SCSS_INT->int_spi_mst_0_mask = ctx_save->int_spi_mask; QM_INTERRUPT_ROUTER->spi_master_0_int_mask =
ctx_save->int_spi_mask;
} else { } else {
QM_SCSS_INT->int_spi_mst_1_mask = ctx_save->int_spi_mask; QM_INTERRUPT_ROUTER->spi_master_1_int_mask =
ctx_save->int_spi_mask;
} }
regs->ctrlr0 = ctx_save->ctrlr0; regs->ctrlr0 = ctx_save->ctrlr0;
regs->ser = ctx_save->ser; regs->ser = ctx_save->ser;

View file

@ -288,9 +288,9 @@ static void ss_spi_err_isr(void *arg)
const struct ss_spi_qmsi_config *spi_config = dev->config->config_info; const struct ss_spi_qmsi_config *spi_config = dev->config->config_info;
if (spi_config->spi == QM_SS_SPI_0) { if (spi_config->spi == QM_SS_SPI_0) {
qm_ss_spi_0_err_isr(NULL); qm_ss_spi_0_error_isr(NULL);
} else { } else {
qm_ss_spi_1_err_isr(NULL); qm_ss_spi_1_error_isr(NULL);
} }
} }
@ -300,9 +300,9 @@ static void ss_spi_rx_isr(void *arg)
const struct ss_spi_qmsi_config *spi_config = dev->config->config_info; const struct ss_spi_qmsi_config *spi_config = dev->config->config_info;
if (spi_config->spi == QM_SS_SPI_0) { if (spi_config->spi == QM_SS_SPI_0) {
qm_ss_spi_0_rx_isr(NULL); qm_ss_spi_0_rx_avail_isr(NULL);
} else { } else {
qm_ss_spi_1_rx_isr(NULL); qm_ss_spi_1_rx_avail_isr(NULL);
} }
} }
@ -312,9 +312,9 @@ static void ss_spi_tx_isr(void *arg)
const struct ss_spi_qmsi_config *spi_config = dev->config->config_info; const struct ss_spi_qmsi_config *spi_config = dev->config->config_info;
if (spi_config->spi == QM_SS_SPI_0) { if (spi_config->spi == QM_SS_SPI_0) {
qm_ss_spi_0_tx_isr(NULL); qm_ss_spi_0_tx_req_isr(NULL);
} else { } else {
qm_ss_spi_1_tx_isr(NULL); qm_ss_spi_1_tx_req_isr(NULL);
} }
} }
@ -342,7 +342,7 @@ static int ss_spi_qmsi_init(struct device *dev)
ss_clk_spi_enable(0); ss_clk_spi_enable(0);
/* Route SPI interrupts to Sensor Subsystem */ /* Route SPI interrupts to Sensor Subsystem */
scss_intmask = (uint32_t *)&QM_SCSS_INT->int_ss_spi_0; scss_intmask = (uint32_t *)&QM_INTERRUPT_ROUTER->ss_spi_0_int;
*scss_intmask &= ~BIT(8); *scss_intmask &= ~BIT(8);
scss_intmask++; scss_intmask++;
*scss_intmask &= ~BIT(8); *scss_intmask &= ~BIT(8);
@ -368,7 +368,7 @@ static int ss_spi_qmsi_init(struct device *dev)
ss_clk_spi_enable(1); ss_clk_spi_enable(1);
/* Route SPI interrupts to Sensor Subsystem */ /* Route SPI interrupts to Sensor Subsystem */
scss_intmask = (uint32_t *)&QM_SCSS_INT->int_ss_spi_1; scss_intmask = (uint32_t *)&QM_INTERRUPT_ROUTER->ss_spi_1_int;
*scss_intmask &= ~BIT(8); *scss_intmask &= ~BIT(8);
scss_intmask++; scss_intmask++;
*scss_intmask &= ~BIT(8); *scss_intmask &= ~BIT(8);

View file

@ -71,7 +71,7 @@ static struct usb_dw_ctrl_prv usb_dw_ctrl;
static inline void _usb_dw_int_unmask(void) static inline void _usb_dw_int_unmask(void)
{ {
#if defined(CONFIG_SOC_QUARK_SE_C1000) #if defined(CONFIG_SOC_QUARK_SE_C1000)
QM_SCSS_INT->int_usb_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->usb_0_int_mask &= ~BIT(0);
#endif #endif
} }

View file

@ -206,7 +206,7 @@ struct usb_dw_reg {
#if defined(CONFIG_SOC_QUARK_SE_C1000) #if defined(CONFIG_SOC_QUARK_SE_C1000)
#define USB_DW_BASE QM_USB_0_BASE #define USB_DW_BASE QM_USB_0_BASE
#define USB_DW_IRQ QM_IRQ_USB_0 #define USB_DW_IRQ QM_IRQ_USB_0_INT
#else #else
#error "Unsupported board" #error "Unsupported board"
#endif #endif

View file

@ -172,7 +172,7 @@ static int wdt_suspend_device(struct device *dev)
wdt_ctx_save.wdt_torr = QM_WDT[QM_WDT_0].wdt_torr; wdt_ctx_save.wdt_torr = QM_WDT[QM_WDT_0].wdt_torr;
wdt_ctx_save.wdt_cr = QM_WDT[QM_WDT_0].wdt_cr; wdt_ctx_save.wdt_cr = QM_WDT[QM_WDT_0].wdt_cr;
wdt_ctx_save.int_watchdog_mask = wdt_ctx_save.int_watchdog_mask =
QM_SCSS_INT->int_watchdog_mask; QM_INTERRUPT_ROUTER->wdt_0_int_mask;
wdt_qmsi_set_power_state(dev, DEVICE_PM_SUSPEND_STATE); wdt_qmsi_set_power_state(dev, DEVICE_PM_SUSPEND_STATE);
@ -186,7 +186,7 @@ static int wdt_resume_device_from_suspend(struct device *dev)
*/ */
QM_WDT[QM_WDT_0].wdt_torr = wdt_ctx_save.wdt_torr; QM_WDT[QM_WDT_0].wdt_torr = wdt_ctx_save.wdt_torr;
QM_WDT[QM_WDT_0].wdt_cr = wdt_ctx_save.wdt_cr; QM_WDT[QM_WDT_0].wdt_cr = wdt_ctx_save.wdt_cr;
QM_SCSS_INT->int_watchdog_mask = wdt_ctx_save.int_watchdog_mask; QM_INTERRUPT_ROUTER->wdt_0_int_mask = wdt_ctx_save.int_watchdog_mask;
wdt_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); wdt_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE);
@ -221,14 +221,14 @@ static int init(struct device *dev)
{ {
wdt_reentrancy_init(dev); wdt_reentrancy_init(dev);
IRQ_CONNECT(QM_IRQ_WDT_0, CONFIG_WDT_0_IRQ_PRI, IRQ_CONNECT(QM_IRQ_WDT_0_INT, CONFIG_WDT_0_IRQ_PRI,
qm_wdt_isr_0, 0, IOAPIC_EDGE | IOAPIC_HIGH); qm_wdt_0_isr, 0, IOAPIC_EDGE | IOAPIC_HIGH);
/* Unmask watchdog interrupt */ /* Unmask watchdog interrupt */
irq_enable(QM_IRQ_WDT_0); irq_enable(QM_IRQ_WDT_0_INT);
/* Route watchdog interrupt to Lakemont */ /* Route watchdog interrupt to Lakemont */
QM_SCSS_INT->int_watchdog_mask &= ~BIT(0); QM_INTERRUPT_ROUTER->wdt_0_int_mask &= ~BIT(0);
wdt_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE); wdt_qmsi_set_power_state(dev, DEVICE_PM_ACTIVE_STATE);

View file

@ -8,7 +8,7 @@ Microcontroller products. It currently supports the following SoCs:
- Intel® Quark™ D2000 Microcontroller - Intel® Quark™ D2000 Microcontroller
- Intel® Quark™ SE Microcontroller - Intel® Quark™ SE Microcontroller
The current version supported in Zephyr is QMSI 1.2.0. See: The current version supported in Zephyr is QMSI 1.3.0. See:
https://github.com/quark-mcu/qmsi/releases https://github.com/quark-mcu/qmsi/releases

View file

@ -190,19 +190,19 @@ static void qm_adc_pwr_0_isr_handler(const qm_adc_t adc)
} }
/* ISR for ADC 0 Command/Calibration Complete. */ /* ISR for ADC 0 Command/Calibration Complete. */
QM_ISR_DECLARE(qm_adc_0_isr) QM_ISR_DECLARE(qm_adc_0_cal_isr)
{ {
qm_adc_isr_handler(QM_ADC_0); qm_adc_isr_handler(QM_ADC_0);
QM_ISR_EOI(QM_IRQ_ADC_0_VECTOR); QM_ISR_EOI(QM_IRQ_ADC_0_CAL_INT_VECTOR);
} }
/* ISR for ADC 0 Mode Change. */ /* ISR for ADC 0 Mode Change. */
QM_ISR_DECLARE(qm_adc_pwr_0_isr) QM_ISR_DECLARE(qm_adc_0_pwr_isr)
{ {
qm_adc_pwr_0_isr_handler(QM_ADC_0); qm_adc_pwr_0_isr_handler(QM_ADC_0);
QM_ISR_EOI(QM_IRQ_ADC_PWR_0_VECTOR); QM_ISR_EOI(QM_IRQ_ADC_0_PWR_0_VECTOR);
} }
static void setup_seq_table(const qm_adc_t adc, qm_adc_xfer_t *xfer) static void setup_seq_table(const qm_adc_t adc, qm_adc_xfer_t *xfer)

View file

@ -225,7 +225,7 @@ QM_ISR_DECLARE(qm_ss_adc_0_isr)
} }
/* ISR for SS ADC 0 Error. */ /* ISR for SS ADC 0 Error. */
QM_ISR_DECLARE(qm_ss_adc_0_err_isr) QM_ISR_DECLARE(qm_ss_adc_0_error_isr)
{ {
qm_ss_adc_isr_err_handler(QM_SS_ADC_0); qm_ss_adc_isr_err_handler(QM_SS_ADC_0);
} }
@ -392,9 +392,9 @@ int qm_ss_adc_set_mode(const qm_ss_adc_t adc, const qm_ss_adc_mode_t mode)
QM_CHECK(mode <= QM_SS_ADC_MODE_NORM_NO_CAL, -EINVAL); QM_CHECK(mode <= QM_SS_ADC_MODE_NORM_NO_CAL, -EINVAL);
/* Save the state of the mode interrupt mask. */ /* Save the state of the mode interrupt mask. */
intstat = QM_SCSS_INT->int_adc_pwr_mask & QM_INT_ADC_PWR_MASK; intstat = QM_IR_GET_MASK(QM_INTERRUPT_ROUTER->adc_0_pwr_int_mask);
/* Mask the ADC mode change interrupt. */ /* Mask the ADC mode change interrupt. */
QM_SCSS_INT->int_adc_pwr_mask |= QM_INT_ADC_PWR_MASK; QM_IR_MASK_INTERRUPTS(QM_INTERRUPT_ROUTER->adc_0_pwr_int_mask);
/* Calculate the delay. */ /* Calculate the delay. */
delay = CALCULATE_DELAY(); delay = CALCULATE_DELAY();
@ -415,7 +415,8 @@ int qm_ss_adc_set_mode(const qm_ss_adc_t adc, const qm_ss_adc_mode_t mode)
/* Restore the state of the mode change interrupt mask if necessary. */ /* Restore the state of the mode change interrupt mask if necessary. */
if (!intstat) { if (!intstat) {
ignore_spurious_interrupt[adc] = true; ignore_spurious_interrupt[adc] = true;
QM_SCSS_INT->int_adc_pwr_mask &= ~(QM_INT_ADC_PWR_MASK); QM_IR_UNMASK_INTERRUPTS(
QM_INTERRUPT_ROUTER->adc_0_pwr_int_mask);
} }
/* Perform a dummy conversion if transitioning to Normal Mode. */ /* Perform a dummy conversion if transitioning to Normal Mode. */
@ -460,9 +461,9 @@ int qm_ss_adc_calibrate(const qm_ss_adc_t adc __attribute__((unused)))
QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL); QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL);
/* Save the state of the calibration interrupt mask. */ /* Save the state of the calibration interrupt mask. */
intstat = QM_SCSS_INT->int_adc_calib_mask & QM_INT_ADC_CALIB_MASK; intstat = QM_IR_GET_MASK(QM_INTERRUPT_ROUTER->adc_0_cal_int_mask);
/* Mask the ADC calibration interrupt. */ /* Mask the ADC calibration interrupt. */
QM_SCSS_INT->int_adc_calib_mask |= QM_INT_ADC_CALIB_MASK; QM_IR_MASK_INTERRUPTS(QM_INTERRUPT_ROUTER->adc_0_cal_int_mask);
/* Enable the ADC. */ /* Enable the ADC. */
enable_adc(); enable_adc();
@ -493,7 +494,8 @@ int qm_ss_adc_calibrate(const qm_ss_adc_t adc __attribute__((unused)))
/* Restore the state of the calibration interrupt mask if necessary. */ /* Restore the state of the calibration interrupt mask if necessary. */
if (!intstat) { if (!intstat) {
QM_SCSS_INT->int_adc_calib_mask &= ~(QM_INT_ADC_CALIB_MASK); QM_IR_UNMASK_INTERRUPTS(
QM_INTERRUPT_ROUTER->adc_0_cal_int_mask);
} }
return 0; return 0;
@ -535,9 +537,9 @@ int qm_ss_adc_set_calibration(const qm_ss_adc_t adc __attribute__((unused)),
QM_CHECK(cal_data <= QM_SS_IO_CREG_MST0_CTRL_ADC_CAL_VAL_MAX, -EINVAL); QM_CHECK(cal_data <= QM_SS_IO_CREG_MST0_CTRL_ADC_CAL_VAL_MAX, -EINVAL);
/* Save the state of the calibration interrupt mask. */ /* Save the state of the calibration interrupt mask. */
intstat = QM_SCSS_INT->int_adc_calib_mask & QM_INT_ADC_CALIB_MASK; intstat = QM_IR_GET_MASK(QM_INTERRUPT_ROUTER->adc_0_cal_int_mask);
/* Mask the ADC calibration interrupt. */ /* Mask the ADC calibration interrupt. */
QM_SCSS_INT->int_adc_calib_mask |= QM_INT_ADC_CALIB_MASK; QM_IR_MASK_INTERRUPTS(QM_INTERRUPT_ROUTER->adc_0_cal_int_mask);
/* Issue the load calibrate command. */ /* Issue the load calibrate command. */
creg = __builtin_arc_lr(QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL); creg = __builtin_arc_lr(QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL);
@ -564,7 +566,8 @@ int qm_ss_adc_set_calibration(const qm_ss_adc_t adc __attribute__((unused)),
/* Restore the state of the calibration interrupt mask if necessary. */ /* Restore the state of the calibration interrupt mask if necessary. */
if (!intstat) { if (!intstat) {
QM_SCSS_INT->int_adc_calib_mask &= ~(QM_INT_ADC_CALIB_MASK); QM_IR_UNMASK_INTERRUPTS(
QM_INTERRUPT_ROUTER->adc_0_cal_int_mask);
} }
return 0; return 0;
@ -702,3 +705,48 @@ int qm_ss_adc_irq_convert(const qm_ss_adc_t adc, qm_ss_adc_xfer_t *xfer)
return 0; return 0;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_ss_adc_save_context(const qm_ss_adc_t adc,
qm_ss_adc_context_t *const ctx)
{
const uint32_t controller = adc_base[adc];
QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL);
QM_CHECK(NULL != ctx, -EINVAL);
ctx->adc_set = __builtin_arc_lr(controller + QM_SS_ADC_SET);
ctx->adc_divseqstat =
__builtin_arc_lr(controller + QM_SS_ADC_DIVSEQSTAT);
ctx->adc_seq = __builtin_arc_lr(controller + QM_SS_ADC_SEQ);
/* Restore control register with ADC enable bit cleared. */
ctx->adc_ctrl = __builtin_arc_lr(controller + QM_SS_ADC_CTRL) &
~QM_SS_ADC_CTRL_ADC_ENA;
return 0;
}
int qm_ss_adc_restore_context(const qm_ss_adc_t adc,
const qm_ss_adc_context_t *const ctx)
{
const uint32_t controller = adc_base[adc];
QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL);
QM_CHECK(NULL != ctx, -EINVAL);
/*
* The IRQ associated with the mode change fires an interrupt as soon
* as it is enabled so it is necessary to ignore it the first time the
* ISR runs.
*/
ignore_spurious_interrupt[adc] = true;
__builtin_arc_sr(ctx->adc_set, controller + QM_SS_ADC_SET);
__builtin_arc_sr(ctx->adc_divseqstat,
controller + QM_SS_ADC_DIVSEQSTAT);
__builtin_arc_sr(ctx->adc_seq, controller + QM_SS_ADC_SEQ);
__builtin_arc_sr(ctx->adc_ctrl, controller + QM_SS_ADC_CTRL);
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -57,13 +57,13 @@ static void pt_reset(const qm_aonc_t aonc)
QM_AONC[aonc].aonpt_ctrl |= BIT(1); QM_AONC[aonc].aonpt_ctrl |= BIT(1);
} }
QM_ISR_DECLARE(qm_aonpt_isr_0) QM_ISR_DECLARE(qm_aonpt_0_isr)
{ {
if (callback) { if (callback) {
(*callback)(callback_data); (*callback)(callback_data);
} }
QM_AONC[0].aonpt_ctrl |= BIT(0); /* Clear pending interrupts */ QM_AONC[0].aonpt_ctrl |= BIT(0); /* Clear pending interrupts */
QM_ISR_EOI(QM_IRQ_AONPT_0_VECTOR); QM_ISR_EOI(QM_IRQ_AONPT_0_INT_VECTOR);
} }
int qm_aonc_enable(const qm_aonc_t aonc) int qm_aonc_enable(const qm_aonc_t aonc)

View file

@ -32,7 +32,7 @@
static void (*callback)(void *, uint32_t) = NULL; static void (*callback)(void *, uint32_t) = NULL;
static void *callback_data; static void *callback_data;
QM_ISR_DECLARE(qm_ac_isr) QM_ISR_DECLARE(qm_comparator_0_isr)
{ {
uint32_t int_status = QM_SCSS_CMP->cmp_stat_clr; uint32_t int_status = QM_SCSS_CMP->cmp_stat_clr;
@ -61,7 +61,7 @@ QM_ISR_DECLARE(qm_ac_isr)
/* Clear all pending interrupts */ /* Clear all pending interrupts */
QM_SCSS_CMP->cmp_stat_clr = int_status; QM_SCSS_CMP->cmp_stat_clr = int_status;
QM_ISR_EOI(QM_IRQ_AC_VECTOR); QM_ISR_EOI(QM_IRQ_COMPARATOR_0_INT_VECTOR);
} }
int qm_ac_set_config(const qm_ac_config_t *const config) int qm_ac_set_config(const qm_ac_config_t *const config)

View file

@ -38,10 +38,6 @@
#define STANDARD_TIMEOUT_MICROSECOND (1000) #define STANDARD_TIMEOUT_MICROSECOND (1000)
#define ONE_MICROSECOND (1) #define ONE_MICROSECOND (1)
/* Temporary LLP values while waiting for linked list initialization . */
#define LLP_LL_TO_BE_SET_MULTI_LL BIT(2)
#define LLP_LL_TO_BE_SET_MULTI_LL_CIRCULAR BIT(3)
/* Set specific register bits */ /* Set specific register bits */
#define UPDATE_REG_BITS(reg, value, offset, mask) \ #define UPDATE_REG_BITS(reg, value, offset, mask) \
{ \ { \
@ -91,6 +87,12 @@ typedef struct dma_cfg_prv_t {
* mode), decremented on each single block transfer callback. * mode), decremented on each single block transfer callback.
*/ */
uint16_t num_blocks_remaining; uint16_t num_blocks_remaining;
/*
* In multiblock linked list mode, indicates whether transfer is linear
* or circular. This information cannot be extracted from the DMA regs.
*/
bool transfer_type_ll_circular;
} dma_cfg_prv_t; } dma_cfg_prv_t;
/* /*
@ -126,7 +128,8 @@ get_transfer_length(const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
/* Read the length from the block_ts field. The units of this field /* Read the length from the block_ts field. The units of this field
* are dependent on the source transfer width. */ * are dependent on the source transfer width. */
transfer_length = ((ctrl_high & QM_DMA_CTL_H_BLOCK_TS_MASK) >> transfer_length = ((ctrl_high & QM_DMA_CTL_H_BLOCK_TS_MASK) >>
QM_DMA_CTL_H_BLOCK_TS_OFFSET); QM_DMA_CTL_H_BLOCK_TS_OFFSET) *
prv_cfg->num_blocks_per_buffer;
/* To convert this to bytes the transfer length can be shifted using /* To convert this to bytes the transfer length can be shifted using
* the source transfer width value. This value correspond to the * the source transfer width value. This value correspond to the
@ -279,34 +282,21 @@ dma_set_transfer_type(const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
chan_reg->ctrl_low &= ~QM_DMA_CTL_L_LLP_DST_EN_MASK; chan_reg->ctrl_low &= ~QM_DMA_CTL_L_LLP_DST_EN_MASK;
break; break;
case QM_DMA_TYPE_MULTI_LL_CIRCULAR:
chan_reg->llp_low |= LLP_LL_TO_BE_SET_MULTI_LL_CIRCULAR;
case QM_DMA_TYPE_MULTI_LL: case QM_DMA_TYPE_MULTI_LL:
chan_reg->llp_low |= LLP_LL_TO_BE_SET_MULTI_LL; case QM_DMA_TYPE_MULTI_LL_CIRCULAR:
/* Destination status update disable. */ /* Destination status update disable. */
chan_reg->cfg_high &= ~QM_DMA_CFG_H_DS_UPD_EN_MASK; chan_reg->cfg_high &= ~QM_DMA_CFG_H_DS_UPD_EN_MASK;
/* Source status update disable. */ /* Source status update disable. */
chan_reg->cfg_high &= ~QM_DMA_CFG_H_SS_UPD_EN_MASK; chan_reg->cfg_high &= ~QM_DMA_CFG_H_SS_UPD_EN_MASK;
/* Enable linked lists for source if necessary. */ /* Enable linked lists for source. */
if (QM_DMA_PERIPHERAL_TO_MEMORY == channel_direction) {
chan_reg->ctrl_low &= ~QM_DMA_CTL_L_LLP_SRC_EN_MASK;
chan_reg->cfg_low |= QM_DMA_CFG_L_RELOAD_SRC_MASK;
} else {
chan_reg->ctrl_low |= QM_DMA_CTL_L_LLP_SRC_EN_MASK; chan_reg->ctrl_low |= QM_DMA_CTL_L_LLP_SRC_EN_MASK;
chan_reg->cfg_low &= ~QM_DMA_CFG_L_RELOAD_SRC_MASK; chan_reg->cfg_low &= ~QM_DMA_CFG_L_RELOAD_SRC_MASK;
}
/* Enable linked lists for destination if necessary. */ /* Enable linked lists for destination. */
if (QM_DMA_MEMORY_TO_PERIPHERAL == channel_direction) {
chan_reg->ctrl_low &= ~QM_DMA_CTL_L_LLP_DST_EN_MASK;
chan_reg->cfg_low |= QM_DMA_CFG_L_RELOAD_DST_MASK;
} else {
chan_reg->ctrl_low |= QM_DMA_CTL_L_LLP_DST_EN_MASK; chan_reg->ctrl_low |= QM_DMA_CTL_L_LLP_DST_EN_MASK;
chan_reg->cfg_low &= ~QM_DMA_CFG_L_RELOAD_DST_MASK; chan_reg->cfg_low &= ~QM_DMA_CFG_L_RELOAD_DST_MASK;
}
break; break;
default: default:
@ -316,6 +306,35 @@ dma_set_transfer_type(const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
return 0; return 0;
} }
static __inline__ qm_dma_transfer_type_t
dma_get_transfer_type(const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
const dma_cfg_prv_t *prv_cfg)
{
qm_dma_transfer_type_t transfer_type;
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
if (0 == (chan_reg->ctrl_low & (QM_DMA_CTL_L_LLP_SRC_EN_MASK |
QM_DMA_CTL_L_LLP_DST_EN_MASK))) {
/* Block chaining disabled */
if (0 == (chan_reg->cfg_low & (QM_DMA_CFG_L_RELOAD_SRC_MASK |
QM_DMA_CFG_L_RELOAD_DST_MASK))) {
/* Single block transfer */
transfer_type = QM_DMA_TYPE_SINGLE;
} else {
/* Contiguous multiblock */
transfer_type = QM_DMA_TYPE_MULTI_CONT;
}
} else {
/* LLP enabled, linked list multiblock */
transfer_type = (prv_cfg->transfer_type_ll_circular)
? QM_DMA_TYPE_MULTI_LL_CIRCULAR
: QM_DMA_TYPE_MULTI_LL;
}
return transfer_type;
}
static __inline__ void static __inline__ void
dma_set_source_transfer_width(const qm_dma_t dma, dma_set_source_transfer_width(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id, const qm_dma_channel_id_t channel_id,

View file

@ -54,32 +54,38 @@ static void qm_dma_isr_handler(const qm_dma_t dma,
uint32_t transfer_length = uint32_t transfer_length =
get_transfer_length(dma, channel_id, prv_cfg); get_transfer_length(dma, channel_id, prv_cfg);
QM_ASSERT(int_reg->status_int_low & /* The status can't be asserted here as there is a possible race
(QM_DMA_INT_STATUS_TFR | QM_DMA_INT_STATUS_BLOCK)); * condition when terminating channels. It's possible that an interrupt
* can be generated before the terminate function masks the
if (0 != prv_cfg->num_blocks_per_buffer) { * interrupts. */
/* Multiblock transfer. */
transfer_length *= prv_cfg->num_blocks_per_buffer;
}
if (int_reg->status_int_low & QM_DMA_INT_STATUS_TFR) { if (int_reg->status_int_low & QM_DMA_INT_STATUS_TFR) {
QM_ASSERT(int_reg->status_tfr_low & BIT(channel_id)); QM_ASSERT(int_reg->status_tfr_low & BIT(channel_id));
/* Transfer completed, clear interrupt */ /* Transfer completed, clear interrupt */
int_reg->clear_tfr_low = BIT(channel_id); int_reg->clear_tfr_low = BIT(channel_id);
/* If multiblock, the final block is also completed. */
int_reg->clear_block_low = BIT(channel_id);
/* Mask interrupts for this channel */ /* Mask interrupts for this channel */
int_reg->mask_block_low = BIT(channel_id) << 8;
int_reg->mask_tfr_low = BIT(channel_id) << 8; int_reg->mask_tfr_low = BIT(channel_id) << 8;
int_reg->mask_err_low = BIT(channel_id) << 8; int_reg->mask_err_low = BIT(channel_id) << 8;
/* Clear llp register */
chan_reg->llp_low = 0;
/* /*
* Call the callback if registered and pass the transfer length. * Call the callback if registered and pass the transfer length.
*/ */
if (prv_cfg->client_callback && NULL == prv_cfg->lli_tail) { if (prv_cfg->client_callback) {
/* Single block or contiguous multiblock. */ /* Single block or contiguous multiblock. */
prv_cfg->client_callback(prv_cfg->callback_context, prv_cfg->client_callback(prv_cfg->callback_context,
transfer_length, 0); transfer_length, 0);
} }
} else { } else if (int_reg->status_int_low & QM_DMA_INT_STATUS_BLOCK) {
/* Block interrupts are only unmasked in multiblock mode. */ /* Block interrupts are only unmasked in multiblock mode. */
QM_ASSERT(int_reg->status_block_low & BIT(channel_id)); QM_ASSERT(int_reg->status_block_low & BIT(channel_id));
@ -150,6 +156,7 @@ static void qm_dma_isr_err_handler(const qm_dma_t dma)
int_reg->clear_err_low = BIT(channel_id); int_reg->clear_err_low = BIT(channel_id);
/* Mask interrupts for this channel */ /* Mask interrupts for this channel */
int_reg->mask_block_low = BIT(channel_id) << 8;
int_reg->mask_tfr_low = BIT(channel_id) << 8; int_reg->mask_tfr_low = BIT(channel_id) << 8;
int_reg->mask_err_low = BIT(channel_id) << 8; int_reg->mask_err_low = BIT(channel_id) << 8;
@ -166,59 +173,59 @@ static void qm_dma_isr_err_handler(const qm_dma_t dma)
} }
} }
QM_ISR_DECLARE(qm_dma_0_isr_err) QM_ISR_DECLARE(qm_dma_0_error_isr)
{ {
qm_dma_isr_err_handler(QM_DMA_0); qm_dma_isr_err_handler(QM_DMA_0);
QM_ISR_EOI(QM_IRQ_DMA_ERR_VECTOR); QM_ISR_EOI(QM_IRQ_DMA_0_ERROR_INT_VECTOR);
} }
QM_ISR_DECLARE(qm_dma_0_isr_0) QM_ISR_DECLARE(qm_dma_0_isr_0)
{ {
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_0); qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_0);
QM_ISR_EOI(QM_IRQ_DMA_0_VECTOR); QM_ISR_EOI(QM_IRQ_DMA_0_INT_0_VECTOR);
} }
QM_ISR_DECLARE(qm_dma_0_isr_1) QM_ISR_DECLARE(qm_dma_0_isr_1)
{ {
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_1); qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_1);
QM_ISR_EOI(QM_IRQ_DMA_1_VECTOR); QM_ISR_EOI(QM_IRQ_DMA_0_INT_1_VECTOR);
} }
#if (QUARK_SE) #if (QUARK_SE)
QM_ISR_DECLARE(qm_dma_0_isr_2) QM_ISR_DECLARE(qm_dma_0_isr_2)
{ {
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_2); qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_2);
QM_ISR_EOI(QM_IRQ_DMA_2_VECTOR); QM_ISR_EOI(QM_IRQ_DMA_0_INT_2_VECTOR);
} }
QM_ISR_DECLARE(qm_dma_0_isr_3) QM_ISR_DECLARE(qm_dma_0_isr_3)
{ {
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_3); qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_3);
QM_ISR_EOI(QM_IRQ_DMA_3_VECTOR); QM_ISR_EOI(QM_IRQ_DMA_0_INT_3_VECTOR);
} }
QM_ISR_DECLARE(qm_dma_0_isr_4) QM_ISR_DECLARE(qm_dma_0_isr_4)
{ {
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_4); qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_4);
QM_ISR_EOI(QM_IRQ_DMA_4_VECTOR); QM_ISR_EOI(QM_IRQ_DMA_0_INT_4_VECTOR);
} }
QM_ISR_DECLARE(qm_dma_0_isr_5) QM_ISR_DECLARE(qm_dma_0_isr_5)
{ {
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_5); qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_5);
QM_ISR_EOI(QM_IRQ_DMA_5_VECTOR); QM_ISR_EOI(QM_IRQ_DMA_0_INT_5_VECTOR);
} }
QM_ISR_DECLARE(qm_dma_0_isr_6) QM_ISR_DECLARE(qm_dma_0_isr_6)
{ {
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_6); qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_6);
QM_ISR_EOI(QM_IRQ_DMA_6_VECTOR); QM_ISR_EOI(QM_IRQ_DMA_0_INT_6_VECTOR);
} }
QM_ISR_DECLARE(qm_dma_0_isr_7) QM_ISR_DECLARE(qm_dma_0_isr_7)
{ {
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_7); qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_7);
QM_ISR_EOI(QM_IRQ_DMA_7_VECTOR); QM_ISR_EOI(QM_IRQ_DMA_0_INT_7_VECTOR);
} }
#endif /* QUARK_SE */ #endif /* QUARK_SE */
@ -279,7 +286,7 @@ int qm_dma_channel_set_config(const qm_dma_t dma,
dma_cfg_prv_t *chan_cfg = &dma_channel_config[dma][channel_id]; dma_cfg_prv_t *chan_cfg = &dma_channel_config[dma][channel_id];
int return_code; int return_code;
/* Set the transfer type. Only one currently supported */ /* Set the transfer type. */
return_code = dma_set_transfer_type(dma, channel_id, return_code = dma_set_transfer_type(dma, channel_id,
channel_config->transfer_type, channel_config->transfer_type,
channel_config->channel_direction); channel_config->channel_direction);
@ -347,8 +354,14 @@ int qm_dma_channel_set_config(const qm_dma_t dma,
/* Multiblock linked list not configured. */ /* Multiblock linked list not configured. */
chan_cfg->lli_tail = NULL; chan_cfg->lli_tail = NULL;
/* Multiblock number of blocks per buffer (LL and contiguous modes). */ /* Number of blocks per buffer (>1 when multiblock). */
chan_cfg->num_blocks_per_buffer = 0; chan_cfg->num_blocks_per_buffer = 1;
/* Multiblock circular linked list flag. */
chan_cfg->transfer_type_ll_circular =
(channel_config->transfer_type == QM_DMA_TYPE_MULTI_LL_CIRCULAR)
? true
: false;
return 0; return 0;
} }
@ -449,54 +462,68 @@ int qm_dma_multi_transfer_set_config(
QM_CHECK(multi_transfer_config->num_blocks > 0, -EINVAL); QM_CHECK(multi_transfer_config->num_blocks > 0, -EINVAL);
dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id]; dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
qm_dma_transfer_type_t transfer_type =
dma_get_transfer_type(dma, channel_id, prv_cfg);
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
/* /*
* Node to which last node points to, 0 on linear linked lists or first * Node to which last node points to, 0 on linear linked lists or first
* node on circular linked lists. * node on circular linked lists.
*/ */
uint32_t tail_pointing_lli; uint32_t tail_pointing_lli;
if (NULL == prv_cfg->lli_tail) { /*
/* First call to this function after DMA channel config. */ * Initialize block counting internal variables, needed in ISR to manage
volatile qm_dma_chan_reg_t *chan_reg = * client callback invocations.
&QM_DMA[dma]->chan_reg[channel_id]; */
if (0 == chan_reg->llp_low) { if (0 == chan_reg->llp_low) {
prv_cfg->num_blocks_per_buffer =
multi_transfer_config->num_blocks;
prv_cfg->num_blocks_remaining =
multi_transfer_config->num_blocks;
}
switch (transfer_type) {
case QM_DMA_TYPE_MULTI_CONT:
/* Contiguous multiblock transfer. */ /* Contiguous multiblock transfer. */
dma_set_source_address( dma_set_source_address(
dma, channel_id, dma, channel_id,
(uint32_t)multi_transfer_config->source_address); (uint32_t)multi_transfer_config->source_address);
dma_set_destination_address( dma_set_destination_address(
dma, channel_id, (uint32_t)multi_transfer_config dma, channel_id,
->destination_address); (uint32_t)multi_transfer_config->destination_address);
dma_set_block_size(dma, channel_id, dma_set_block_size(dma, channel_id,
multi_transfer_config->block_size); multi_transfer_config->block_size);
break;
} else if (multi_transfer_config->linked_list_first != NULL && case QM_DMA_TYPE_MULTI_LL_CIRCULAR:
case QM_DMA_TYPE_MULTI_LL:
if (multi_transfer_config->linked_list_first == NULL ||
((uint32_t)multi_transfer_config->linked_list_first & ((uint32_t)multi_transfer_config->linked_list_first &
0x3) == 0) { 0x3) != 0) {
/* /*
* Linked list multiblock tranfer (uninitialized). User * User-allocated linked list memory needs to be 4-byte
* allocated linked list memory needs to be 4-byte
* alligned. * alligned.
*/ */
QM_ASSERT((uint32_t)chan_reg->llp_low & return -EINVAL;
LLP_LL_TO_BE_SET_MULTI_LL); }
QM_ASSERT(0 == ((uint32_t)chan_reg->llp_low &
~(LLP_LL_TO_BE_SET_MULTI_LL | if (0 == chan_reg->llp_low) {
LLP_LL_TO_BE_SET_MULTI_LL_CIRCULAR)));
/* /*
* With circular operation, the last LLI node should * Either first call to this function after DMA channel
* point to the first one (this node). * config or transfer reconfiguration after a completed
* multiblock transfer.
*/ */
tail_pointing_lli = tail_pointing_lli =
(chan_reg->llp_low & (transfer_type == QM_DMA_TYPE_MULTI_LL_CIRCULAR)
LLP_LL_TO_BE_SET_MULTI_LL_CIRCULAR)
? (uint32_t) ? (uint32_t)
multi_transfer_config->linked_list_first multi_transfer_config->linked_list_first
: 0; : 0;
/* Initialize LLIs using CTL drom DMA register (plus /*
* INT_EN bit). */ * Initialize LLIs using CTL drom DMA register (plus
* INT_EN bit).
*/
prv_cfg->lli_tail = dma_linked_list_init( prv_cfg->lli_tail = dma_linked_list_init(
multi_transfer_config, multi_transfer_config,
chan_reg->ctrl_low | QM_DMA_CTL_L_INT_EN_MASK, chan_reg->ctrl_low | QM_DMA_CTL_L_INT_EN_MASK,
@ -506,24 +533,11 @@ int qm_dma_multi_transfer_set_config(
chan_reg->llp_low = chan_reg->llp_low =
(uint32_t)multi_transfer_config->linked_list_first; (uint32_t)multi_transfer_config->linked_list_first;
} else { } else {
return -EINVAL;
}
/* /*
* Initialize block counting internal variables, needed in ISR * Linked list multiblock transfer (additional appended
* to manage client callback invocations. * LLIs). The number of blocks needs to match the number
*/ * of blocks on previous calls to this function (we only
prv_cfg->num_blocks_per_buffer = * allow scatter/gather buffers of same size).
multi_transfer_config->num_blocks;
prv_cfg->num_blocks_remaining =
multi_transfer_config->num_blocks;
} else {
/*
* Linked list multiblock transfer (additional appended LLIs).
* The number of blocks needs to match the number of blocks on
* previous calls to this function (we only allow scatter/gather
* buffers of same size).
*/ */
if (prv_cfg->num_blocks_per_buffer != if (prv_cfg->num_blocks_per_buffer !=
multi_transfer_config->num_blocks) { multi_transfer_config->num_blocks) {
@ -531,18 +545,24 @@ int qm_dma_multi_transfer_set_config(
} }
/* /*
* Reference to NULL (linear LL) or the first LLI node (circular * Reference to NULL (linear LL) or the first LLI node
* LL), extracted from previously configured linked list. * (circular LL), extracted from previously configured
* linked list.
*/ */
tail_pointing_lli = prv_cfg->lli_tail->linked_list_address; tail_pointing_lli =
prv_cfg->lli_tail->linked_list_address;
/* Point last previously configured linked list to this node. */ /*
* Point last previously configured linked list to this
* node.
*/
prv_cfg->lli_tail->linked_list_address = prv_cfg->lli_tail->linked_list_address =
(uint32_t)multi_transfer_config->linked_list_first; (uint32_t)multi_transfer_config->linked_list_first;
/* /*
* Initialize LLI using CTL from last previously configured LLI, * Initialize LLI using CTL from last previously
* returning a pointer to the new tail node. * configured LLI, returning a pointer to the new tail
* node.
*/ */
prv_cfg->lli_tail = dma_linked_list_init( prv_cfg->lli_tail = dma_linked_list_init(
multi_transfer_config, prv_cfg->lli_tail->ctrl_low, multi_transfer_config, prv_cfg->lli_tail->ctrl_low,
@ -551,6 +571,13 @@ int qm_dma_multi_transfer_set_config(
QM_ASSERT(prv_cfg->lli_tail->linked_list_address == QM_ASSERT(prv_cfg->lli_tail->linked_list_address ==
tail_pointing_lli); tail_pointing_lli);
} }
break;
default:
/* Single block not allowed */
return -EINVAL;
break;
}
return 0; return 0;
} }
@ -564,11 +591,17 @@ int qm_dma_transfer_start(const qm_dma_t dma,
volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg; volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id]; dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
/* Clear all interrupts as they may be asserted from a previous
* transfer */
int_reg->clear_tfr_low = BIT(channel_id);
int_reg->clear_block_low = BIT(channel_id);
int_reg->clear_err_low = BIT(channel_id);
/* Unmask Interrupts */ /* Unmask Interrupts */
int_reg->mask_tfr_low = ((BIT(channel_id) << 8) | BIT(channel_id)); int_reg->mask_tfr_low = ((BIT(channel_id) << 8) | BIT(channel_id));
int_reg->mask_err_low = ((BIT(channel_id) << 8) | BIT(channel_id)); int_reg->mask_err_low = ((BIT(channel_id) << 8) | BIT(channel_id));
if (prv_cfg->num_blocks_per_buffer != 0) { if (prv_cfg->num_blocks_per_buffer > 1) {
/* Block interrupts are only unmasked in multiblock mode. */ /* Block interrupts are only unmasked in multiblock mode. */
int_reg->mask_block_low = int_reg->mask_block_low =
((BIT(channel_id) << 8) | BIT(channel_id)); ((BIT(channel_id) << 8) | BIT(channel_id));
@ -589,6 +622,8 @@ int qm_dma_transfer_terminate(const qm_dma_t dma,
int return_code; int return_code;
volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg; volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
/* Disable interrupts for the channel */ /* Disable interrupts for the channel */
dma_interrupt_disable(dma, channel_id); dma_interrupt_disable(dma, channel_id);
@ -598,6 +633,9 @@ int qm_dma_transfer_terminate(const qm_dma_t dma,
int_reg->mask_block_low = (BIT(channel_id) << 8); int_reg->mask_block_low = (BIT(channel_id) << 8);
int_reg->mask_err_low = (BIT(channel_id) << 8); int_reg->mask_err_low = (BIT(channel_id) << 8);
/* Clear llp register */
chan_reg->llp_low = 0;
/* The channel is disabled and the transfer complete callback is /* The channel is disabled and the transfer complete callback is
* triggered. This callback provides the client with the data length * triggered. This callback provides the client with the data length
* transferred before the transfer was stopped. */ * transferred before the transfer was stopped. */
@ -605,15 +643,9 @@ int qm_dma_transfer_terminate(const qm_dma_t dma,
if (!return_code) { if (!return_code) {
dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id]; dma_cfg_prv_t *prv_cfg = &dma_channel_config[dma][channel_id];
if (prv_cfg->client_callback) { if (prv_cfg->client_callback) {
uint32_t transfer_length = prv_cfg->client_callback(
get_transfer_length(dma, channel_id, prv_cfg); prv_cfg->callback_context,
if (0 != prv_cfg->num_blocks_per_buffer) { get_transfer_length(dma, channel_id, prv_cfg), 0);
/* Multiblock transfer. */
transfer_length *=
prv_cfg->num_blocks_per_buffer;
}
prv_cfg->client_callback(prv_cfg->callback_context,
transfer_length, 0);
} }
} }
@ -643,3 +675,50 @@ int qm_dma_transfer_mem_to_mem(const qm_dma_t dma,
return return_code; return return_code;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_dma_save_context(const qm_dma_t dma, qm_dma_context_t *const ctx)
{
QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
int i;
QM_RW qm_dma_misc_reg_t *misc_reg = &QM_DMA[dma]->misc_reg;
ctx->misc_cfg_low = misc_reg->cfg_low;
for (i = 0; i < QM_DMA_CHANNEL_NUM; i++) {
QM_RW qm_dma_chan_reg_t *chan_reg = &QM_DMA[dma]->chan_reg[i];
/* Masking the bit QM_DMA_CTL_L_INT_EN_MASK disables a possible
* trigger of a new transition. */
ctx->channel[i].ctrl_low =
chan_reg->ctrl_low & ~QM_DMA_CTL_L_INT_EN_MASK;
ctx->channel[i].cfg_low = chan_reg->cfg_low;
ctx->channel[i].cfg_high = chan_reg->cfg_high;
ctx->channel[i].llp_low = chan_reg->llp_low;
}
return 0;
}
int qm_dma_restore_context(const qm_dma_t dma,
const qm_dma_context_t *const ctx)
{
QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
int i;
QM_RW qm_dma_misc_reg_t *misc_reg = &QM_DMA[dma]->misc_reg;
misc_reg->cfg_low = ctx->misc_cfg_low;
for (i = 0; i < QM_DMA_CHANNEL_NUM; i++) {
QM_RW qm_dma_chan_reg_t *chan_reg = &QM_DMA[dma]->chan_reg[i];
chan_reg->ctrl_low = ctx->channel[i].ctrl_low;
chan_reg->cfg_low = ctx->channel[i].cfg_low;
chan_reg->cfg_high = ctx->channel[i].cfg_high;
chan_reg->llp_low = ctx->channel[i].llp_low;
}
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -324,3 +324,32 @@ int qm_flash_mass_erase(const qm_flash_t flash, const uint8_t include_rom)
; ;
return 0; return 0;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_flash_save_context(const qm_flash_t flash, qm_flash_context_t *const ctx)
{
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_flash_reg_t *const controller = QM_FLASH[flash];
ctx->tmg_ctrl = controller->tmg_ctrl;
ctx->ctrl = controller->ctrl;
return 0;
}
int qm_flash_restore_context(const qm_flash_t flash,
const qm_flash_context_t *const ctx)
{
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_flash_reg_t *const controller = QM_FLASH[flash];
controller->tmg_ctrl = ctx->tmg_ctrl;
controller->ctrl = ctx->ctrl;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -33,21 +33,21 @@
static void (*callback[QM_FLASH_NUM])(void *); static void (*callback[QM_FLASH_NUM])(void *);
static void *callback_data[QM_FLASH_NUM]; static void *callback_data[QM_FLASH_NUM];
QM_ISR_DECLARE(qm_fpr_isr_0) QM_ISR_DECLARE(qm_flash_mpr_0_isr)
{ {
(*callback[QM_FLASH_0])(callback_data[QM_FLASH_0]); (*callback[QM_FLASH_0])(callback_data[QM_FLASH_0]);
QM_FLASH[QM_FLASH_0]->mpr_vsts = QM_FPR_MPR_VSTS_VALID; QM_FLASH[QM_FLASH_0]->mpr_vsts = QM_FPR_MPR_VSTS_VALID;
QM_ISR_EOI(QM_IRQ_FLASH_0_VECTOR); QM_ISR_EOI(QM_IRQ_FLASH_MPR_0_INT_VECTOR);
} }
#if (QUARK_SE) #if (QUARK_SE)
QM_ISR_DECLARE(qm_fpr_isr_1) QM_ISR_DECLARE(qm_flash_mpr_1_isr)
{ {
(*callback[QM_FLASH_1])(callback_data[QM_FLASH_1]); (*callback[QM_FLASH_1])(callback_data[QM_FLASH_1]);
QM_FLASH[QM_FLASH_1]->mpr_vsts = QM_FPR_MPR_VSTS_VALID; QM_FLASH[QM_FLASH_1]->mpr_vsts = QM_FPR_MPR_VSTS_VALID;
QM_ISR_EOI(QM_IRQ_FLASH_1_VECTOR); QM_ISR_EOI(QM_IRQ_FLASH_MPR_1_INT_VECTOR);
} }
#endif #endif
@ -94,7 +94,7 @@ int qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
QM_CHECK(mode <= FPR_VIOL_MODE_PROBE, -EINVAL); QM_CHECK(mode <= FPR_VIOL_MODE_PROBE, -EINVAL);
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL); QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
volatile uint32_t *int_flash_controller_mask = volatile uint32_t *int_flash_controller_mask =
&QM_SCSS_INT->int_flash_controller_0_mask; &QM_INTERRUPT_ROUTER->flash_mpr_0_int_mask;
/* interrupt mode */ /* interrupt mode */
if (FPR_VIOL_MODE_INTERRUPT == mode) { if (FPR_VIOL_MODE_INTERRUPT == mode) {
@ -102,20 +102,18 @@ int qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
callback[flash] = callback_fn; callback[flash] = callback_fn;
callback_data[flash] = data; callback_data[flash] = data;
int_flash_controller_mask[flash] &= QM_IR_UNMASK_INTERRUPTS(int_flash_controller_mask[flash]);
~QM_INT_FLASH_CONTROLLER_SS_MASK;
int_flash_controller_mask[flash] |= QM_IR_MASK_HALTS(int_flash_controller_mask[flash]);
QM_INT_FLASH_CONTROLLER_SS_HALT_MASK;
QM_SCSS_SS->ss_cfg &= ~QM_SS_STS_HALT_INTERRUPT_REDIRECTION; QM_SCSS_SS->ss_cfg &= ~QM_SS_STS_HALT_INTERRUPT_REDIRECTION;
} }
/* probe or reset mode */ /* probe or reset mode */
else { else {
int_flash_controller_mask[flash] |= QM_IR_MASK_INTERRUPTS(int_flash_controller_mask[flash]);
QM_INT_FLASH_CONTROLLER_SS_MASK;
int_flash_controller_mask[flash] &= QM_IR_UNMASK_HALTS(int_flash_controller_mask[flash]);
~QM_INT_FLASH_CONTROLLER_SS_HALT_MASK;
if (FPR_VIOL_MODE_PROBE == mode) { if (FPR_VIOL_MODE_PROBE == mode) {
@ -145,7 +143,7 @@ int qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
QM_CHECK(mode <= FPR_VIOL_MODE_PROBE, -EINVAL); QM_CHECK(mode <= FPR_VIOL_MODE_PROBE, -EINVAL);
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL); QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
volatile uint32_t *int_flash_controller_mask = volatile uint32_t *int_flash_controller_mask =
&QM_SCSS_INT->int_flash_controller_0_mask; &QM_INTERRUPT_ROUTER->flash_mpr_0_int_mask;
/* interrupt mode */ /* interrupt mode */
if (FPR_VIOL_MODE_INTERRUPT == mode) { if (FPR_VIOL_MODE_INTERRUPT == mode) {
@ -155,15 +153,14 @@ int qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
/* unmask interrupt */ /* unmask interrupt */
if (flash == QM_FLASH_0) { if (flash == QM_FLASH_0) {
qm_irq_unmask(QM_IRQ_FLASH_0); qm_irq_unmask(QM_IRQ_FLASH_MPR_0_INT);
#if (QUARK_SE) #if (QUARK_SE)
} else { } else {
qm_irq_unmask(QM_IRQ_FLASH_1); qm_irq_unmask(QM_IRQ_FLASH_MPR_1_INT);
#endif #endif
} }
int_flash_controller_mask[flash] |= QM_IR_MASK_HALTS(int_flash_controller_mask[flash]);
QM_INT_FLASH_CONTROLLER_HOST_HALT_MASK;
QM_SCSS_PMU->p_sts &= ~QM_P_STS_HALT_INTERRUPT_REDIRECTION; QM_SCSS_PMU->p_sts &= ~QM_P_STS_HALT_INTERRUPT_REDIRECTION;
} }
@ -172,15 +169,14 @@ int qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
else { else {
/* mask interrupt */ /* mask interrupt */
if (flash == QM_FLASH_0) { if (flash == QM_FLASH_0) {
qm_irq_mask(QM_IRQ_FLASH_0); qm_irq_mask(QM_IRQ_FLASH_MPR_0_INT);
#if (QUARK_SE) #if (QUARK_SE)
} else { } else {
qm_irq_mask(QM_IRQ_FLASH_1); qm_irq_mask(QM_IRQ_FLASH_MPR_1_INT);
#endif #endif
} }
int_flash_controller_mask[flash] &= QM_IR_UNMASK_HALTS(int_flash_controller_mask[flash]);
~QM_INT_FLASH_CONTROLLER_HOST_HALT_MASK;
if (FPR_VIOL_MODE_PROBE == mode) { if (FPR_VIOL_MODE_PROBE == mode) {
@ -201,3 +197,36 @@ int qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
return 0; return 0;
} }
#endif /* QM_SENSOR */ #endif /* QM_SENSOR */
#if (ENABLE_RESTORE_CONTEXT)
int qm_fpr_save_context(const qm_flash_t flash, qm_fpr_context_t *const ctx)
{
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
uint8_t i;
qm_flash_reg_t *const controller = QM_FLASH[flash];
for (i = 0; i < QM_FPR_NUM; i++) {
ctx->fpr_rd_cfg[i] = controller->fpr_rd_cfg[i];
}
return 0;
}
int qm_fpr_restore_context(const qm_flash_t flash,
const qm_fpr_context_t *const ctx)
{
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
uint8_t i;
qm_flash_reg_t *const controller = QM_FLASH[flash];
for (i = 0; i < QM_FPR_NUM; i++) {
controller->fpr_rd_cfg[i] = ctx->fpr_rd_cfg[i];
}
return 0;
}
#endif

View file

@ -75,17 +75,17 @@ static void gpio_isr(const qm_gpio_t gpio)
QM_GPIO[gpio]->gpio_porta_eoi; QM_GPIO[gpio]->gpio_porta_eoi;
} }
QM_ISR_DECLARE(qm_gpio_isr_0) QM_ISR_DECLARE(qm_gpio_0_isr)
{ {
gpio_isr(QM_GPIO_0); gpio_isr(QM_GPIO_0);
QM_ISR_EOI(QM_IRQ_GPIO_0_VECTOR); QM_ISR_EOI(QM_IRQ_GPIO_0_INT_VECTOR);
} }
#if (HAS_AON_GPIO) #if (HAS_AON_GPIO)
QM_ISR_DECLARE(qm_aon_gpio_isr_0) QM_ISR_DECLARE(qm_aon_gpio_0_isr)
{ {
gpio_isr(QM_AON_GPIO_0); gpio_isr(QM_AON_GPIO_0);
QM_ISR_EOI(QM_IRQ_AONGPIO_0_VECTOR); QM_ISR_EOI(QM_IRQ_AON_GPIO_0_INT_VECTOR);
} }
#endif #endif
@ -178,3 +178,53 @@ int qm_gpio_write_port(const qm_gpio_t gpio, const uint32_t val)
return 0; return 0;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_gpio_save_context(const qm_gpio_t gpio, qm_gpio_context_t *const ctx)
{
QM_CHECK(gpio < QM_GPIO_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_gpio_reg_t *const controller = QM_GPIO[gpio];
if (gpio == QM_GPIO_0) {
ctx->gpio_swporta_dr = controller->gpio_swporta_dr;
ctx->gpio_swporta_ddr = controller->gpio_swporta_ddr;
ctx->gpio_swporta_ctl = controller->gpio_swporta_ctl;
ctx->gpio_inten = controller->gpio_inten;
ctx->gpio_intmask = controller->gpio_intmask;
ctx->gpio_inttype_level = controller->gpio_inttype_level;
ctx->gpio_int_polarity = controller->gpio_int_polarity;
ctx->gpio_debounce = controller->gpio_debounce;
ctx->gpio_ls_sync = controller->gpio_ls_sync;
ctx->gpio_int_bothedge = controller->gpio_int_bothedge;
}
return 0;
}
int qm_gpio_restore_context(const qm_gpio_t gpio,
const qm_gpio_context_t *const ctx)
{
QM_CHECK(gpio < QM_GPIO_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_gpio_reg_t *const controller = QM_GPIO[gpio];
if (gpio == QM_GPIO_0) {
controller->gpio_intmask = 0xffffffff;
controller->gpio_swporta_dr = ctx->gpio_swporta_dr;
controller->gpio_swporta_ddr = ctx->gpio_swporta_ddr;
controller->gpio_swporta_ctl = ctx->gpio_swporta_ctl;
controller->gpio_inten = ctx->gpio_inten;
controller->gpio_inttype_level = ctx->gpio_inttype_level;
controller->gpio_int_polarity = ctx->gpio_int_polarity;
controller->gpio_debounce = ctx->gpio_debounce;
controller->gpio_ls_sync = ctx->gpio_ls_sync;
controller->gpio_int_bothedge = ctx->gpio_int_bothedge;
controller->gpio_intmask = ctx->gpio_intmask;
}
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -43,18 +43,18 @@ static void ss_gpio_isr_handler(qm_ss_gpio_t gpio)
int_status = __builtin_arc_lr(controller + QM_SS_GPIO_INTSTATUS); int_status = __builtin_arc_lr(controller + QM_SS_GPIO_INTSTATUS);
if (callback[gpio]) { if (callback[gpio]) {
callback[gpio](callback_data, int_status); callback[gpio](callback_data[gpio], int_status);
} }
__builtin_arc_sr(int_status, controller + QM_SS_GPIO_PORTA_EOI); __builtin_arc_sr(int_status, controller + QM_SS_GPIO_PORTA_EOI);
} }
QM_ISR_DECLARE(qm_ss_gpio_isr_0) QM_ISR_DECLARE(qm_ss_gpio_0_isr)
{ {
ss_gpio_isr_handler(QM_SS_GPIO_0); ss_gpio_isr_handler(QM_SS_GPIO_0);
} }
QM_ISR_DECLARE(qm_ss_gpio_isr_1) QM_ISR_DECLARE(qm_ss_gpio_1_isr)
{ {
ss_gpio_isr_handler(QM_SS_GPIO_1); ss_gpio_isr_handler(QM_SS_GPIO_1);
} }
@ -156,3 +156,58 @@ int qm_ss_gpio_write_port(const qm_ss_gpio_t gpio, const uint32_t val)
return 0; return 0;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_ss_gpio_save_context(const qm_ss_gpio_t gpio,
qm_ss_gpio_context_t *const ctx)
{
uint32_t controller;
QM_CHECK(gpio < QM_SS_GPIO_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
controller = gpio_base[gpio];
ctx->gpio_swporta_dr =
__builtin_arc_lr(controller + QM_SS_GPIO_SWPORTA_DR);
ctx->gpio_swporta_ddr =
__builtin_arc_lr(controller + QM_SS_GPIO_SWPORTA_DDR);
ctx->gpio_inten = __builtin_arc_lr(controller + QM_SS_GPIO_INTEN);
ctx->gpio_intmask = __builtin_arc_lr(controller + QM_SS_GPIO_INTMASK);
ctx->gpio_inttype_level =
__builtin_arc_lr(controller + QM_SS_GPIO_INTTYPE_LEVEL);
ctx->gpio_int_polarity =
__builtin_arc_lr(controller + QM_SS_GPIO_INT_POLARITY);
ctx->gpio_debounce = __builtin_arc_lr(controller + QM_SS_GPIO_DEBOUNCE);
ctx->gpio_ls_sync = __builtin_arc_lr(controller + QM_SS_GPIO_LS_SYNC);
return 0;
}
int qm_ss_gpio_restore_context(const qm_ss_gpio_t gpio,
const qm_ss_gpio_context_t *const ctx)
{
uint32_t controller;
QM_CHECK(gpio < QM_SS_GPIO_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
controller = gpio_base[gpio];
__builtin_arc_sr(0xffffffff, controller + QM_SS_GPIO_INTMASK);
__builtin_arc_sr(ctx->gpio_swporta_dr,
controller + QM_SS_GPIO_SWPORTA_DR);
__builtin_arc_sr(ctx->gpio_swporta_ddr,
controller + QM_SS_GPIO_SWPORTA_DDR);
__builtin_arc_sr(ctx->gpio_inten, controller + QM_SS_GPIO_INTEN);
__builtin_arc_sr(ctx->gpio_inttype_level,
controller + QM_SS_GPIO_INTTYPE_LEVEL);
__builtin_arc_sr(ctx->gpio_int_polarity,
controller + QM_SS_GPIO_INT_POLARITY);
__builtin_arc_sr(ctx->gpio_debounce, controller + QM_SS_GPIO_DEBOUNCE);
__builtin_arc_sr(ctx->gpio_ls_sync, controller + QM_SS_GPIO_LS_SYNC);
__builtin_arc_sr(ctx->gpio_intmask, controller + QM_SS_GPIO_INTMASK);
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -53,6 +53,24 @@ static volatile const qm_i2c_transfer_t *i2c_transfer[QM_I2C_NUM];
static volatile uint32_t i2c_write_pos[QM_I2C_NUM], i2c_read_pos[QM_I2C_NUM], static volatile uint32_t i2c_write_pos[QM_I2C_NUM], i2c_read_pos[QM_I2C_NUM],
i2c_read_cmd_send[QM_I2C_NUM]; i2c_read_cmd_send[QM_I2C_NUM];
/* True if user buffers have been updated. */
static volatile bool transfer_ongoing = false;
/*
* Keep track of activity if addressed.
* There is no register which keeps track of the internal state machine status,
* whether it is addressed, transmitting or receiving.
* The only way to keep track of this is to save the information that the driver
* received one the following interrupts:
* - General call interrupt
* - Read request
* - RX FIFO full.
* Also, if no interrupt has been received during an RX transaction, the driver
* can check the controller has been addressed if data has been effectively
* received.
*/
static volatile bool is_addressed = false;
/* /*
* I2C DMA controller configuration descriptor. * I2C DMA controller configuration descriptor.
*/ */
@ -96,6 +114,82 @@ void *i2c_dma_callbacks[] = {NULL, i2c_dma_transmit_callback,
static void controller_enable(const qm_i2c_t i2c); static void controller_enable(const qm_i2c_t i2c);
static int controller_disable(const qm_i2c_t i2c); static int controller_disable(const qm_i2c_t i2c);
/*
* Empty RX FIFO.
* Try to empty FIFO to user buffer. If RX buffer is full, trigger callback.
* If user does not update buffer when requested, empty FIFO without storing
* received data.
*/
static void empty_rx_fifo(const qm_i2c_t i2c,
const volatile qm_i2c_transfer_t *const transfer,
qm_i2c_reg_t *const controller)
{
while (controller->ic_status & QM_I2C_IC_STATUS_RFNE) {
if (!transfer_ongoing) {
/* Dummy read. */
controller->ic_data_cmd;
} else {
if (transfer->rx_len > i2c_read_pos[i2c]) {
transfer->rx[i2c_read_pos[i2c]++] =
controller->ic_data_cmd & 0xFF;
}
if (transfer->rx_len == i2c_read_pos[i2c]) {
/*
* End user transfer if user does not update
* buffers.
*/
transfer_ongoing = false;
if (transfer->callback) {
transfer->callback(
transfer->callback_data, 0,
QM_I2C_RX_FULL, transfer->rx_len);
}
}
}
}
}
/*
* Fill TX FIFO.
* Try to fill the FIFO with user data. If TX buffer is empty, trigger callback.
* If user does not update buffer when requested, fill the FIFO with dummy
* data.
*/
static void fill_tx_fifo(const qm_i2c_t i2c,
const volatile qm_i2c_transfer_t *const transfer,
qm_i2c_reg_t *const controller)
{
while ((controller->ic_status & QM_I2C_IC_STATUS_TNF) &&
(!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_TX_ABRT))) {
if (!transfer_ongoing) {
/* Dummy write. */
controller->ic_data_cmd = 0;
} else {
if (transfer->tx_len > i2c_write_pos[i2c]) {
controller->ic_data_cmd =
transfer->tx[i2c_write_pos[i2c]++];
}
if (transfer->tx_len == i2c_write_pos[i2c]) {
/*
* End user transfer if user does not update
* buffers.
*/
transfer_ongoing = false;
if (transfer->callback) {
transfer->callback(
transfer->callback_data, 0,
QM_I2C_TX_EMPTY, transfer->tx_len);
}
}
}
}
}
static __inline__ void static __inline__ void
handle_tx_abrt(const qm_i2c_t i2c, handle_tx_abrt(const qm_i2c_t i2c,
const volatile qm_i2c_transfer_t *const transfer, const volatile qm_i2c_transfer_t *const transfer,
@ -137,6 +231,183 @@ handle_tx_abrt(const qm_i2c_t i2c,
} }
} }
static __inline__ void
i2c_isr_slave_handler(const qm_i2c_t i2c,
const volatile qm_i2c_transfer_t *const transfer,
qm_i2c_reg_t *const controller)
{
/* Save register to speed up process in interrupt. */
uint32_t ic_intr_stat = controller->ic_intr_stat;
/*
* Order of interrupt handling:
* - RX Status interrupts
* - TX Status interrupts (RD_REQ, RX_DONE, TX_EMPTY)
* - General call (will only appear after few SCL clock cycles after
* start interrupt).
* - Stop (can appear very shortly after RX_DONE interrupt)
* - Start (can appear very shortly after a stop interrupt or RX_DONE
* interrupt)
*/
/*
* Check RX status.
* Master write (TX), slave read (RX).
*
* Interrupts handled for RX status:
* - RX FIFO Overflow
* - RX FIFO Full (interrupt remains active until FIFO emptied)
*
* RX FIFO overflow must always be checked though, in case of an
* overflow happens during RX_FULL interrupt handling.
*/
/* RX FIFO Overflow. */
if (ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_OVER) {
controller->ic_clr_rx_over;
if (transfer->callback) {
transfer->callback(transfer->callback_data, 0,
QM_I2C_RX_OVER, 0);
}
}
/* RX FIFO FULL. */
if (ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_FULL) {
/* Empty RX FIFO. */
empty_rx_fifo(i2c, transfer, controller);
/* Track activity of controller when addressed. */
is_addressed = true;
}
/*
* Check TX status.
* Master read (RX), slave write (TX).
*
* Interrupts handled for TX status:
* - Read request
* - RX done (actually a TX state: RX done by master)
* - TX FIFO empty.
*
* TX FIFO empty interrupt must be handled after RX DONE interrupt: when
* RX DONE is triggered, TX FIFO is flushed (thus emptied) creating a
* TX_ABORT interrupt and a TX_EMPTY condition. TX_ABORT shall be
* cleared and TX_EMPTY interrupt disabled.
*/
else if (ic_intr_stat & QM_I2C_IC_INTR_STAT_RD_REQ) {
/* Clear read request interrupt. */
controller->ic_clr_rd_req;
/* Track activity of controller when addressed. */
is_addressed = true;
fill_tx_fifo(i2c, transfer, controller);
/* Enable TX EMPTY interrupts. */
controller->ic_intr_mask |= QM_I2C_IC_INTR_MASK_TX_EMPTY;
} else if (ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_DONE) {
controller->ic_clr_rx_done;
/* Clear TX ABORT as it is triggered when FIFO is flushed. */
controller->ic_clr_tx_abrt;
/* Disable TX EMPTY interrupt. */
controller->ic_intr_mask &= ~QM_I2C_IC_INTR_MASK_TX_EMPTY;
/*
* Read again the interrupt status in case of a stop or a start
* interrupt has been triggered in the meantime.
*/
ic_intr_stat = controller->ic_intr_stat;
} else if (ic_intr_stat & QM_I2C_IC_INTR_STAT_TX_EMPTY) {
fill_tx_fifo(i2c, transfer, controller);
}
/* General call detected. */
else if (ic_intr_stat & QM_I2C_IC_INTR_STAT_GEN_CALL_DETECTED) {
if (transfer->callback) {
transfer->callback(transfer->callback_data, 0,
QM_I2C_GEN_CALL_DETECTED, 0);
}
#if (FIX_1)
/*
* Workaround.
* The interrupt may not actually be cleared when register is
* read too early.
*/
while (controller->ic_intr_stat &
QM_I2C_IC_INTR_STAT_GEN_CALL_DETECTED) {
/* Clear General call interrupt. */
controller->ic_clr_gen_call;
}
#else
controller->ic_clr_gen_call;
#endif
/* Track activity of controller when addressed. */
is_addressed = true;
}
/* Stop condition detected. */
if (ic_intr_stat & QM_I2C_IC_INTR_STAT_STOP_DETECTED) {
/* Empty RX FIFO. */
empty_rx_fifo(i2c, transfer, controller);
/*
* Stop transfer if single transfer asked and controller has
* been addressed.
* Driver only knows it has been addressed if:
* - It already triggered an interrupt on TX_EMPTY or RX_FULL
* - Data was read from RX FIFO.
*/
if ((transfer->stop == true) &&
(is_addressed || (i2c_read_pos[i2c] != 0))) {
controller_disable(i2c);
}
if (transfer->callback) {
transfer->callback(
transfer->callback_data, 0, QM_I2C_STOP_DETECTED,
(transfer_ongoing) ? i2c_read_pos[i2c] : 0);
}
i2c_write_pos[i2c] = 0;
i2c_read_pos[i2c] = 0;
controller->ic_intr_mask &= ~QM_I2C_IC_INTR_MASK_TX_EMPTY;
is_addressed = false;
/* Clear stop interrupt. */
controller->ic_clr_stop_det;
/*
* Read again the interrupt status in case of a start interrupt
* has been triggered in the meantime.
*/
ic_intr_stat = controller->ic_intr_stat;
}
/*
* START or RESTART condition detected.
* The RESTART_DETECTED interrupt is not used as it is redundant with
* the START_DETECTED interrupt.
*/
if (ic_intr_stat & QM_I2C_IC_INTR_STAT_START_DETECTED) {
empty_rx_fifo(i2c, transfer, controller);
if (transfer->callback) {
transfer->callback(
transfer->callback_data, 0, QM_I2C_START_DETECTED,
(transfer_ongoing) ? i2c_read_pos[i2c] : 0);
}
transfer_ongoing = true;
i2c_write_pos[i2c] = 0;
i2c_read_pos[i2c] = 0;
/* Clear Start detected interrupt. */
controller->ic_clr_start_det;
}
}
static __inline__ void static __inline__ void
i2c_isr_master_handler(const qm_i2c_t i2c, i2c_isr_master_handler(const qm_i2c_t i2c,
const volatile qm_i2c_transfer_t *const transfer, const volatile qm_i2c_transfer_t *const transfer,
@ -307,38 +578,41 @@ static void i2c_isr_handler(const qm_i2c_t i2c)
/* Check for errors. */ /* Check for errors. */
QM_ASSERT(!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_TX_OVER)); QM_ASSERT(!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_TX_OVER));
QM_ASSERT(!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_UNDER)); QM_ASSERT(!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_UNDER));
QM_ASSERT(!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_OVER));
/* /*
* TX ABORT interrupt. * TX ABORT interrupt.
* Avoid spurious interrupts by checking RX DONE interrupt. * Avoid spurious interrupts by checking RX DONE interrupt: RX_DONE
* interrupt also trigger a TX_ABORT interrupt when flushing FIFO.
*/ */
if ((controller->ic_intr_stat &
if (controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_TX_ABRT) { (QM_I2C_IC_INTR_STAT_TX_ABRT | QM_I2C_IC_INTR_STAT_RX_DONE)) ==
QM_I2C_IC_INTR_STAT_TX_ABRT) {
handle_tx_abrt(i2c, transfer, controller); handle_tx_abrt(i2c, transfer, controller);
} }
/* Master mode. */ /* Master mode. */
if (controller->ic_con & QM_I2C_IC_CON_MASTER_MODE) { if (controller->ic_con & QM_I2C_IC_CON_MASTER_MODE) {
QM_ASSERT(
!(controller->ic_intr_stat & QM_I2C_IC_INTR_STAT_RX_OVER));
i2c_isr_master_handler(i2c, transfer, controller); i2c_isr_master_handler(i2c, transfer, controller);
} }
/* Slave mode. */ /* Slave mode. */
else { else {
/* Add I2C ISR slave handler here. */ i2c_isr_slave_handler(i2c, transfer, controller);
} }
} }
QM_ISR_DECLARE(qm_i2c_0_isr) QM_ISR_DECLARE(qm_i2c_0_isr)
{ {
i2c_isr_handler(QM_I2C_0); i2c_isr_handler(QM_I2C_0);
QM_ISR_EOI(QM_IRQ_I2C_0_VECTOR); QM_ISR_EOI(QM_IRQ_I2C_0_INT_VECTOR);
} }
#if (QUARK_SE) #if (QUARK_SE)
QM_ISR_DECLARE(qm_i2c_1_isr) QM_ISR_DECLARE(qm_i2c_1_isr)
{ {
i2c_isr_handler(QM_I2C_1); i2c_isr_handler(QM_I2C_1);
QM_ISR_EOI(QM_IRQ_I2C_1_VECTOR); QM_ISR_EOI(QM_IRQ_I2C_1_INT_VECTOR);
} }
#endif #endif
@ -471,6 +745,12 @@ int qm_i2c_set_config(const qm_i2c_t i2c, const qm_i2c_config_t *const cfg)
ic_con = cfg->address_mode ic_con = cfg->address_mode
<< QM_I2C_IC_CON_10BITADDR_SLAVE_OFFSET; << QM_I2C_IC_CON_10BITADDR_SLAVE_OFFSET;
if (cfg->stop_detect_behaviour ==
QM_I2C_SLAVE_INTERRUPT_WHEN_ADDRESSED) {
/* Set stop interrupt only when addressed. */
ic_con |= QM_I2C_IC_CON_STOP_DET_IFADDRESSED;
}
/* Set slave address. */ /* Set slave address. */
controller->ic_sar = cfg->slave_addr; controller->ic_sar = cfg->slave_addr;
break; break;
@ -727,6 +1007,57 @@ int qm_i2c_master_irq_transfer(const qm_i2c_t i2c,
return 0; return 0;
} }
int qm_i2c_slave_irq_transfer(const qm_i2c_t i2c,
volatile const qm_i2c_transfer_t *const xfer)
{
QM_CHECK(i2c < QM_I2C_NUM, -EINVAL);
QM_CHECK(xfer != NULL, -EINVAL);
/* Assign common properties. */
i2c_transfer[i2c] = xfer;
i2c_write_pos[i2c] = 0;
i2c_read_pos[i2c] = 0;
transfer_ongoing = false;
is_addressed = false;
QM_I2C[i2c]->ic_intr_mask = QM_I2C_IC_INTR_MASK_ALL;
controller_enable(i2c);
/*
* Almost all interrupts must be active to handle everything from the
* driver, for the controller not to be stuck in a specific state.
* Only TX_EMPTY must be set when needed, otherwise it will be triggered
* everytime, even when it is not required to fill the TX FIFO.
*/
QM_I2C[i2c]->ic_intr_mask =
QM_I2C_IC_INTR_MASK_RX_UNDER | QM_I2C_IC_INTR_MASK_RX_OVER |
QM_I2C_IC_INTR_MASK_RX_FULL | QM_I2C_IC_INTR_MASK_TX_ABORT |
QM_I2C_IC_INTR_MASK_RX_DONE | QM_I2C_IC_INTR_MASK_STOP_DETECTED |
QM_I2C_IC_INTR_MASK_START_DETECTED | QM_I2C_IC_INTR_MASK_RD_REQ |
QM_I2C_IC_INTR_MASK_GEN_CALL_DETECTED;
return 0;
}
int qm_i2c_slave_irq_transfer_update(
const qm_i2c_t i2c, volatile const qm_i2c_transfer_t *const xfer)
{
QM_CHECK(i2c < QM_I2C_NUM, -EINVAL);
QM_CHECK(xfer != NULL, -EINVAL);
/* Assign common properties. */
i2c_transfer[i2c] = xfer;
i2c_write_pos[i2c] = 0;
i2c_read_pos[i2c] = 0;
/* Tell the ISR we still have data to transfer. */
transfer_ongoing = true;
return 0;
}
static void controller_enable(const qm_i2c_t i2c) static void controller_enable(const qm_i2c_t i2c)
{ {
qm_i2c_reg_t *const controller = QM_I2C[i2c]; qm_i2c_reg_t *const controller = QM_I2C[i2c];
@ -885,7 +1216,11 @@ static void i2c_dma_transmit_callback(void *callback_context, uint32_t len,
data_command |= data_command |=
QM_I2C_IC_DATA_CMD_STOP_BIT_CTRL; QM_I2C_IC_DATA_CMD_STOP_BIT_CTRL;
} }
/* Write last byte and increase len count. */
/* Wait if FIFO is full */
while (!(QM_I2C[i2c]->ic_status & QM_I2C_IC_STATUS_TNF))
;
/* Write last byte and increase len count */
QM_I2C[i2c]->ic_data_cmd = data_command; QM_I2C[i2c]->ic_data_cmd = data_command;
len++; len++;
@ -1054,10 +1389,10 @@ int qm_i2c_dma_channel_config(const qm_i2c_t i2c,
dma_channel_config.channel_direction = direction; dma_channel_config.channel_direction = direction;
dma_channel_config.source_transfer_width = QM_DMA_TRANS_WIDTH_8; dma_channel_config.source_transfer_width = QM_DMA_TRANS_WIDTH_8;
dma_channel_config.destination_transfer_width = QM_DMA_TRANS_WIDTH_8; dma_channel_config.destination_transfer_width = QM_DMA_TRANS_WIDTH_8;
/* NOTE: This can be optimized for performance. */ /* Burst length is set to half the FIFO for performance */
dma_channel_config.source_burst_length = QM_DMA_BURST_TRANS_LENGTH_1; dma_channel_config.source_burst_length = QM_DMA_BURST_TRANS_LENGTH_8;
dma_channel_config.destination_burst_length = dma_channel_config.destination_burst_length =
QM_DMA_BURST_TRANS_LENGTH_1; QM_DMA_BURST_TRANS_LENGTH_8;
dma_channel_config.client_callback = i2c_dma_callbacks[direction]; dma_channel_config.client_callback = i2c_dma_callbacks[direction];
dma_channel_config.transfer_type = QM_DMA_TYPE_SINGLE; dma_channel_config.transfer_type = QM_DMA_TYPE_SINGLE;
@ -1106,13 +1441,12 @@ int qm_i2c_master_dma_transfer(const qm_i2c_t i2c,
i2c_read_cmd_send[i2c] = xfer->rx_len; i2c_read_cmd_send[i2c] = xfer->rx_len;
i2c_transfer[i2c] = xfer; i2c_transfer[i2c] = xfer;
/* /* Set DMA TX and RX waterlevels to half the FIFO depth for performance
* Set DMA TX and RX waterlevels to 0, to make sure no data is lost. reasons */
* QM_I2C[i2c]->ic_dma_tdlr = (QM_I2C_FIFO_SIZE / 2);
* NOTE: This can be optimized for performance. /* RDLR value is desired watermark-1, according to I2C datasheet section
*/ 3.17.7 */
QM_I2C[i2c]->ic_dma_rdlr = 0; QM_I2C[i2c]->ic_dma_rdlr = (QM_I2C_FIFO_SIZE / 2) - 1;
QM_I2C[i2c]->ic_dma_tdlr = 0;
i2c_dma_context[i2c].i2c_error_code = 0; i2c_dma_context[i2c].i2c_error_code = 0;
@ -1208,3 +1542,46 @@ int qm_i2c_master_dma_transfer(const qm_i2c_t i2c,
return rc; return rc;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_i2c_save_context(const qm_i2c_t i2c, qm_i2c_context_t *const ctx)
{
QM_CHECK(i2c < QM_I2C_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_i2c_reg_t *const regs = QM_I2C[i2c];
ctx->con = regs->ic_con;
ctx->sar = regs->ic_sar;
ctx->ss_scl_hcnt = regs->ic_ss_scl_hcnt;
ctx->ss_scl_lcnt = regs->ic_ss_scl_lcnt;
ctx->fs_scl_hcnt = regs->ic_fs_scl_hcnt;
ctx->fs_scl_lcnt = regs->ic_fs_scl_lcnt;
ctx->fs_spklen = regs->ic_fs_spklen;
ctx->ic_intr_mask = regs->ic_intr_mask;
ctx->enable = regs->ic_enable;
return 0;
}
int qm_i2c_restore_context(const qm_i2c_t i2c,
const qm_i2c_context_t *const ctx)
{
QM_CHECK(i2c < QM_I2C_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_i2c_reg_t *const regs = QM_I2C[i2c];
regs->ic_con = ctx->con;
regs->ic_sar = ctx->sar;
regs->ic_ss_scl_hcnt = ctx->ss_scl_hcnt;
regs->ic_ss_scl_lcnt = ctx->ss_scl_lcnt;
regs->ic_fs_scl_hcnt = ctx->fs_scl_hcnt;
regs->ic_fs_scl_lcnt = ctx->fs_scl_lcnt;
regs->ic_fs_spklen = ctx->fs_spklen;
regs->ic_intr_mask = ctx->ic_intr_mask;
regs->ic_enable = ctx->enable;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -282,12 +282,12 @@ static void qm_ss_i2c_isr_handler(const qm_ss_i2c_t i2c)
} }
} }
QM_ISR_DECLARE(qm_ss_i2c_isr_0) QM_ISR_DECLARE(qm_ss_i2c_0_isr)
{ {
qm_ss_i2c_isr_handler(QM_SS_I2C_0); qm_ss_i2c_isr_handler(QM_SS_I2C_0);
} }
QM_ISR_DECLARE(qm_ss_i2c_isr_1) QM_ISR_DECLARE(qm_ss_i2c_1_isr)
{ {
qm_ss_i2c_isr_handler(QM_SS_I2C_1); qm_ss_i2c_isr_handler(QM_SS_I2C_1);
} }
@ -721,3 +721,39 @@ int qm_ss_i2c_irq_transfer_terminate(const qm_ss_i2c_t i2c)
return 0; return 0;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_ss_i2c_save_context(const qm_ss_i2c_t i2c,
qm_ss_i2c_context_t *const ctx)
{
uint32_t controller = i2c_base[i2c];
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
ctx->i2c_fs_scl_cnt =
__builtin_arc_lr(controller + QM_SS_I2C_FS_SCL_CNT);
ctx->i2c_ss_scl_cnt =
__builtin_arc_lr(controller + QM_SS_I2C_SS_SCL_CNT);
ctx->i2c_con = __builtin_arc_lr(controller + QM_SS_I2C_CON);
return 0;
}
int qm_ss_i2c_restore_context(const qm_ss_i2c_t i2c,
const qm_ss_i2c_context_t *const ctx)
{
uint32_t controller = i2c_base[i2c];
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
__builtin_arc_sr(ctx->i2c_fs_scl_cnt,
controller + QM_SS_I2C_FS_SCL_CNT);
__builtin_arc_sr(ctx->i2c_ss_scl_cnt,
controller + QM_SS_I2C_SS_SCL_CNT);
__builtin_arc_sr(ctx->i2c_con, controller + QM_SS_I2C_CON);
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -341,6 +341,39 @@ int qm_dma_transfer_mem_to_mem(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id, const qm_dma_channel_id_t channel_id,
qm_dma_transfer_t *const transfer_config); qm_dma_transfer_t *const transfer_config);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save DMA peripheral's context.
*
* Saves the configuration of the specified DMA peripheral
* before entering sleep.
*
* @param[in] dma DMA device.
* @param[out] ctx DMA context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_dma_save_context(const qm_dma_t dma, qm_dma_context_t *const ctx);
/**
* Restore DMA peripheral's context.
*
* Restore the configuration of the specified DMA peripheral
* after exiting sleep.
*
* @param[in] dma DMA device.
* @param[in] ctx DMA context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_dma_restore_context(const qm_dma_t dma,
const qm_dma_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -196,6 +196,41 @@ int qm_flash_page_erase(const qm_flash_t flash, const qm_flash_region_t region,
*/ */
int qm_flash_mass_erase(const qm_flash_t flash, const uint8_t include_rom); int qm_flash_mass_erase(const qm_flash_t flash, const uint8_t include_rom);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save flash context.
*
* Save the configuration of the specified flash controller.
*
* @param[in] flash Flash controller index.
* @param[out] ctx Flash context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_flash_save_context(const qm_flash_t flash,
qm_flash_context_t *const ctx);
/**
* Restore flash context.
*
* Restore the configuration of the specified flash controller.
* If the system clock frequency is lowered, the flash timings need to be
* restored. Otherwise, reading from flash will not be optimal
* (there will be 2 wait states instead of 0 wait states.)
*
* @param[in] flash Flash controller index.
* @param[in] ctx Flash context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_flash_restore_context(const qm_flash_t flash,
const qm_flash_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -41,17 +41,6 @@
*/ */
typedef void (*qm_fpr_callback_t)(void *data); typedef void (*qm_fpr_callback_t)(void *data);
/**
* FPR register map.
*/
typedef enum {
QM_FPR_0, /**< FPR 0. */
QM_FPR_1, /**< FPR 1. */
QM_FPR_2, /**< FPR 2. */
QM_FPR_3, /**< FPR 3. */
QM_FPR_NUM
} qm_fpr_id_t;
/** /**
* FPR enable type. * FPR enable type.
*/ */
@ -63,7 +52,7 @@ typedef enum {
} qm_fpr_en_t; } qm_fpr_en_t;
/** /**
* FPR vilation mode type. * FPR violation mode type.
*/ */
typedef enum { typedef enum {
FPR_VIOL_MODE_INTERRUPT = 0, /**< Generate interrupt on violation. */ FPR_VIOL_MODE_INTERRUPT = 0, /**< Generate interrupt on violation. */
@ -162,6 +151,53 @@ int qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
const qm_flash_t flash, const qm_flash_t flash,
qm_fpr_callback_t fpr_cb, void *data); qm_fpr_callback_t fpr_cb, void *data);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save FPR context.
*
* Save the configuration of the specified FPR peripheral
* before entering sleep.
* The Flash peripheral linked to the FPR saved needs
* to be saved as well by calling qm_flash_save_context().
*
* FPR configuration is lost after sleep and can therefore
* be modified even if this configuration was locked before sleep.
* To support persistent configuration, the configuration must be
* restored when resuming as part of the bootloader.
*
* @param[in] flash Flash index.
* @param[out] ctx FPR context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_fpr_save_context(const qm_flash_t flash, qm_fpr_context_t *const ctx);
/**
* Restore FPR context.
*
* Restore the configuration of the specified FPR peripheral
* after exiting sleep.
* The Flash peripheral linked to the FPR restored needs
* to be restored as well by calling qm_flash_restore_context().
*
* FPR configuration is lost after sleep and can therefore
* be modified even if this configuration was locked before sleep.
* To support persistent configuration, the configuration must be
* restored when resuming as part of the bootloader.
*
* @param[in] flash Flash index.
* @param[in] ctx FPR context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_fpr_restore_context(const qm_flash_t flash,
const qm_fpr_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -173,6 +173,39 @@ int qm_gpio_read_port(const qm_gpio_t gpio, uint32_t *const port);
*/ */
int qm_gpio_write_port(const qm_gpio_t gpio, const uint32_t val); int qm_gpio_write_port(const qm_gpio_t gpio, const uint32_t val);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save GPIO context.
*
* Save the configuration of the specified GPIO peripheral
* before entering sleep.
*
* @param[in] gpio GPIO port index.
* @param[out] ctx GPIO context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_gpio_save_context(const qm_gpio_t gpio, qm_gpio_context_t *const ctx);
/**
* Restore GPIO context.
*
* Restore the configuration of the specified GPIO peripheral
* after exiting sleep.
*
* @param[in] gpio GPIO port index.
* @param[in] ctx GPIO context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_gpio_restore_context(const qm_gpio_t gpio,
const qm_gpio_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -31,8 +31,8 @@
#define __QM_I2C_H__ #define __QM_I2C_H__
#include "qm_common.h" #include "qm_common.h"
#include "qm_soc_regs.h"
#include "qm_dma.h" #include "qm_dma.h"
#include "qm_soc_regs.h"
/** /**
* I2C. * I2C.
@ -111,7 +111,12 @@ typedef enum {
QM_I2C_TX_ABORT = BIT(18), /**< Tx abort. */ QM_I2C_TX_ABORT = BIT(18), /**< Tx abort. */
QM_I2C_TX_OVER = BIT(19), /**< Tx overflow. */ QM_I2C_TX_OVER = BIT(19), /**< Tx overflow. */
QM_I2C_RX_OVER = BIT(20), /**< Rx overflow. */ QM_I2C_RX_OVER = BIT(20), /**< Rx overflow. */
QM_I2C_RX_UNDER = BIT(21) /**< Rx underflow. */ QM_I2C_RX_UNDER = BIT(21), /**< Rx underflow. */
QM_I2C_START_DETECTED = BIT(22), /**< Start or restart detected. */
QM_I2C_TX_EMPTY = BIT(23), /**< TX buffer empty. */
QM_I2C_RX_FULL = BIT(24), /**< RX buffer full. */
QM_I2C_STOP_DETECTED = BIT(25), /** Stop detected. */
QM_I2C_GEN_CALL_DETECTED = BIT(26) /**< General call detected. */
} qm_i2c_status_t; } qm_i2c_status_t;
/** /**
@ -121,7 +126,7 @@ typedef enum {
/** Interrupt regardless of whether this slave is addressed or not. */ /** Interrupt regardless of whether this slave is addressed or not. */
QM_I2C_SLAVE_INTERRUPT_ALWAYS = 0x0, QM_I2C_SLAVE_INTERRUPT_ALWAYS = 0x0,
/** Only interrupt if this slave is being addressed. */ /** Trigger interrupt only if this slave is being addressed. */
QM_I2C_SLAVE_INTERRUPT_WHEN_ADDRESSED = 0x1 QM_I2C_SLAVE_INTERRUPT_WHEN_ADDRESSED = 0x1
} qm_i2c_slave_stop_t; } qm_i2c_slave_stop_t;
@ -136,6 +141,7 @@ typedef struct {
/** Slave stop detect behaviour */ /** Slave stop detect behaviour */
qm_i2c_slave_stop_t stop_detect_behaviour; qm_i2c_slave_stop_t stop_detect_behaviour;
} qm_i2c_config_t; } qm_i2c_config_t;
/** /**
@ -145,15 +151,18 @@ typedef struct {
* - If rx len is 0: perform transmit-only transaction. * - If rx len is 0: perform transmit-only transaction.
* - Both tx and rx len not 0: perform a transmit-then-receive * - Both tx and rx len not 0: perform a transmit-then-receive
* combined transaction. * combined transaction.
* Slave mode:
* - If read or write exceed the buffer, then wrap around.
*/ */
typedef struct { typedef struct {
uint8_t *tx; /**< Write data. */ uint8_t *tx; /**< Write data. */
uint32_t tx_len; /**< Write data length. */ uint32_t tx_len; /**< Write data length. */
uint8_t *rx; /**< Read data. */ uint8_t *rx; /**< Read data. */
uint32_t rx_len; /**< Read buffer length. */ uint32_t rx_len; /**< Read buffer length. */
bool stop; /**< Generate master STOP. */
/**
* Master: Generate STOP.
* Slave: stop at the end of transaction.
*/
bool stop;
/** /**
* Transfer callback. * Transfer callback.
@ -161,6 +170,13 @@ typedef struct {
* Called after all data is transmitted/received or if the driver * Called after all data is transmitted/received or if the driver
* detects an error during the I2C transfer. * detects an error during the I2C transfer.
* *
* In slave mode, qm_i2c_slave_irq_transfer_update shall be called from
* this callback to update transfer buffers when receiving a
* QM_I2C_RX_FULL or QM_I2C_TX_EMPTY status. If the update function is
* not called with these statuses, the driver will drop every new data
* received or send dummy data (0x00) for each byte until next bus
* start.
*
* @param[in] data User defined data. * @param[in] data User defined data.
* @param[in] rc 0 on success. * @param[in] rc 0 on success.
* Negative @ref errno for possible error codes. * Negative @ref errno for possible error codes.
@ -295,7 +311,33 @@ int qm_i2c_master_irq_transfer(const qm_i2c_t i2c,
* @retval Negative @ref errno for possible error codes. * @retval Negative @ref errno for possible error codes.
*/ */
int qm_i2c_slave_irq_transfer(const qm_i2c_t i2c, int qm_i2c_slave_irq_transfer(const qm_i2c_t i2c,
const qm_i2c_transfer_t *const xfer); volatile const qm_i2c_transfer_t *const xfer);
/**
* I2C interrupt based slave transfer buffer update.
*
* Update transfer buffers location and size. The function will
* replenish/empty TX/RX FIFOs on I2C empty/full interrupts.
* This function must be called from callback function to update transfer
* buffers when requested by ISR.
*
* It is strongly recommended to use this function for slave-based applications
* only, as slave controllers usually do not know how many frames an external
* master will send or request before starting the communication.
* Master controllers should not use this function as it will most likely
* corrupt the transaction.
*
* @param[in] i2c Which I2C to transfer from.
* @param[in] xfer Transfer structure includes write / read buffers, length,
* user callback function and the callback context. This must
* not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_i2c_slave_irq_transfer_update(
const qm_i2c_t i2c, volatile const qm_i2c_transfer_t *const xfer);
/** /**
* Terminate I2C IRQ transfer. * Terminate I2C IRQ transfer.
@ -407,6 +449,41 @@ int qm_i2c_slave_dma_transfer(const qm_i2c_t i2c,
*/ */
int qm_i2c_dma_transfer_terminate(const qm_i2c_t i2c); int qm_i2c_dma_transfer_terminate(const qm_i2c_t i2c);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save I2C context.
*
* Saves the configuration of the specified I2C peripheral
* before entering sleep. The slave operations need to be disabled before
* being able to save the context as otherwise we could be interrupted by
* an I2C transfer while saving registers.
*
* @param[in] i2c I2C port index.
* @param[out] ctx I2C context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_i2c_save_context(const qm_i2c_t i2c, qm_i2c_context_t *const ctx);
/**
* Restore I2C context.
*
* Restore the configuration of the specified I2C peripheral
* after exiting sleep.
*
* @param[in] i2c I2C port index.
* @param[in] ctx I2C context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_i2c_restore_context(const qm_i2c_t i2c,
const qm_i2c_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -33,6 +33,10 @@
#include "qm_common.h" #include "qm_common.h"
#include "qm_soc_regs.h" #include "qm_soc_regs.h"
#if (QM_SENSOR)
#include "qm_sensor_regs.h"
#endif
/* /*
* Linear mapping between IRQs and interrupt vectors * Linear mapping between IRQs and interrupt vectors
*/ */
@ -44,6 +48,45 @@
#endif #endif
#if (ENABLE_RESTORE_CONTEXT)
#if (HAS_APIC) || (QM_SENSOR)
/**
* Save IRQ context.
*
* On x86:
* - Save IOAPIC Redirection Table for all IRQs.
*
* On sensor:
* - Save interrupt enable, priority and trigger for all IRQs.
*
* @param[out] ctx IRQ context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_irq_save_context(qm_irq_context_t *const ctx);
/**
* Restore IRQ context.
*
* On x86:
* Restore IOAPIC Redirection Table for all IRQs.
* Restore LAPIC to default configuration.
*
* On sensor:
* - Restore interrupt enable, priority and trigger for all IRQs.
*
* @param[in] ctx IRQ context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_irq_restore_context(const qm_irq_context_t *const ctx);
#endif /* HAS_APIC || QM_SENSOR */
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* Interrupt driver. * Interrupt driver.
* *

View file

@ -45,53 +45,53 @@
* ISR for ADC 0 convert and calibration interrupt. * ISR for ADC 0 convert and calibration interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_ADC_0, qm_adc_0_isr); * @code qm_irq_request(QM_IRQ_ADC_0_CAL_INT, qm_adc_0_cal_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_adc_0_isr); QM_ISR_DECLARE(qm_adc_0_cal_isr);
/** /**
* ISR for ADC 0 change mode interrupt. * ISR for ADC 0 change mode interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_ADC_PWR_0, qm_adc_pwr_0_isr); * @code qm_irq_request(QM_IRQ_ADC_0_PWR_INT, qm_adc_0_pwr_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_adc_pwr_0_isr); QM_ISR_DECLARE(qm_adc_0_pwr_isr);
#endif /* QUARK_D2000 */ #endif /* QUARK_D2000 */
/** /**
* ISR for Always-on Periodic Timer 0 interrupt. * ISR for Always-on Periodic Timer 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_AONPT_0, qm_aonpt_isr_0); * @code qm_irq_request(QM_IRQ_AONPT_0_INT, qm_aonpt_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_aonpt_isr_0); QM_ISR_DECLARE(qm_aonpt_0_isr);
/** /**
* ISR for Analog Comparator 0 interrupt. * ISR for Analog Comparator 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_AC, qm_ac_isr); * @code qm_irq_request(QM_IRQ_COMPARATOR_0_INT, qm_comparator_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_ac_isr); QM_ISR_DECLARE(qm_comparator_0_isr);
/** /**
* ISR for DMA error interrupt. * ISR for DMA error interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_ERR, qm_dma_0_isr_err); * @code qm_irq_request(QM_IRQ_DMA_0_ERROR_INT, qm_dma_0_error_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_dma_0_isr_err); QM_ISR_DECLARE(qm_dma_0_error_isr);
/** /**
* ISR for DMA channel 0 interrupt. * ISR for DMA channel 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_0, qm_dma_0_isr_0); * @code qm_irq_request(QM_IRQ_DMA_0_INT_0, qm_dma_0_isr_0);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_dma_0_isr_0); QM_ISR_DECLARE(qm_dma_0_isr_0);
@ -100,7 +100,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_0);
* ISR for DMA channel 1 interrupt. * ISR for DMA channel 1 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_1, qm_dma_0_isr_1); * @code qm_irq_request(QM_IRQ_DMA_0_INT_1, qm_dma_0_isr_1);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_dma_0_isr_1); QM_ISR_DECLARE(qm_dma_0_isr_1);
@ -110,7 +110,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_1);
* ISR for DMA channel 2 interrupt. * ISR for DMA channel 2 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_2, qm_dma_0_isr_2); * @code qm_irq_request(QM_IRQ_DMA_0_INT_2, qm_dma_0_isr_2);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_dma_0_isr_2); QM_ISR_DECLARE(qm_dma_0_isr_2);
@ -119,7 +119,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_2);
* ISR for DMA channel 3 interrupt. * ISR for DMA channel 3 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_3, qm_dma_0_isr_3); * @code qm_irq_request(QM_IRQ_DMA_0_INT_3, qm_dma_0_isr_3);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_dma_0_isr_3); QM_ISR_DECLARE(qm_dma_0_isr_3);
@ -128,7 +128,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_3);
* ISR for DMA channel 4 interrupt. * ISR for DMA channel 4 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_4, qm_dma_0_isr_4); * @code qm_irq_request(QM_IRQ_DMA_0_INT_4, qm_dma_0_isr_4);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_dma_0_isr_4); QM_ISR_DECLARE(qm_dma_0_isr_4);
@ -137,7 +137,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_4);
* ISR for DMA channel 5 interrupt. * ISR for DMA channel 5 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_5, qm_dma_0_isr_5); * @code qm_irq_request(QM_IRQ_DMA_0_INT_5, qm_dma_0_isr_5);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_dma_0_isr_5); QM_ISR_DECLARE(qm_dma_0_isr_5);
@ -146,7 +146,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_5);
* ISR for DMA channel 6 interrupt. * ISR for DMA channel 6 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_6, qm_dma_0_isr_6); * @code qm_irq_request(QM_IRQ_DMA_0_INT_6, qm_dma_0_isr_6);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_dma_0_isr_6); QM_ISR_DECLARE(qm_dma_0_isr_6);
@ -155,7 +155,7 @@ QM_ISR_DECLARE(qm_dma_0_isr_6);
* ISR for DMA 0 channel 7 interrupt. * ISR for DMA 0 channel 7 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_7, qm_dma_0_isr_7); * @code qm_irq_request(QM_IRQ_DMA_0_INT_7, qm_dma_0_isr_7);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_dma_0_isr_7); QM_ISR_DECLARE(qm_dma_0_isr_7);
@ -165,45 +165,45 @@ QM_ISR_DECLARE(qm_dma_0_isr_7);
* ISR for FPR 0 interrupt. * ISR for FPR 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_FLASH_0, qm_fpr_isr_0); * @code qm_irq_request(QM_IRQ_FLASH_MPR_0_INT, qm_flash_mpr_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_fpr_isr_0); QM_ISR_DECLARE(qm_flash_mpr_0_isr);
/** /**
* ISR for FPR 1 interrupt. * ISR for FPR 1 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_FLASH_1, qm_fpr_isr_1); * @code qm_irq_request(QM_IRQ_FLASH_MPR_1_INT, qm_flash_mpr_1_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_fpr_isr_1); QM_ISR_DECLARE(qm_flash_mpr_1_isr);
/** /**
* ISR for GPIO 0 interrupt. * ISR for GPIO 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_GPIO_0, qm_gpio_isr_0); * @code qm_irq_request(QM_IRQ_GPIO_0_INT, qm_gpio_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_gpio_isr_0); QM_ISR_DECLARE(qm_gpio_0_isr);
#if (HAS_AON_GPIO) #if (HAS_AON_GPIO)
/** /**
* ISR for AON GPIO 0 interrupt. * ISR for AON GPIO 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_AONGPIO_0, qm_aon_gpio_isr_0); * @code qm_irq_request(QM_IRQ_AON_GPIO_0_INT, qm_aon_gpio_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_aon_gpio_isr_0); QM_ISR_DECLARE(qm_aon_gpio_0_isr);
#endif /* HAS_AON_GPIO */ #endif /* HAS_AON_GPIO */
/** /**
* ISR for I2C 0 interrupt. * ISR for I2C 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_I2C_0, qm_i2c_0_isr); * @code qm_irq_request(QM_IRQ_I2C_0_INT, qm_i2c_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_i2c_0_isr); QM_ISR_DECLARE(qm_i2c_0_isr);
@ -212,7 +212,7 @@ QM_ISR_DECLARE(qm_i2c_0_isr);
* ISR for I2C 1 interrupt. * ISR for I2C 1 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_I2C_1, qm_i2c_1_isr); * @code qm_irq_request(QM_IRQ_I2C_1_INT, qm_i2c_1_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_i2c_1_isr); QM_ISR_DECLARE(qm_i2c_1_isr);
@ -221,57 +221,58 @@ QM_ISR_DECLARE(qm_i2c_1_isr);
* ISR for Mailbox interrupt. * ISR for Mailbox interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_MBOX, qm_mbox_isr); * @code qm_irq_request(QM_IRQ_MAILBOX_0_INT, qm_mailbox_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_mbox_isr); QM_ISR_DECLARE(qm_mailbox_0_isr);
/** /**
* ISR for Memory Protection Region interrupt. * ISR for Memory Protection Region interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_SRAM, qm_mpr_isr); * @code qm_irq_request(QM_IRQ_SRAM_MPR_0_INT, qm_sram_mpr_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_mpr_isr); QM_ISR_DECLARE(qm_sram_mpr_0_isr);
/** /**
* ISR for PIC Timer interrupt. * ISR for PIC Timer interrupt.
* *
* On Quark Microcontroller D2000 Development Platform, * On Quark Microcontroller D2000 Development Platform,
* this function needs to be registered with: * this function needs to be registered with:
* @code qm_int_vector_request(QM_INT_VECTOR_PIC_TIMER, qm_pic_timer_isr); * @code qm_int_vector_request(QM_X86_PIC_TIMER_INT_VECTOR,
* qm_pic_timer_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
* *
* On Quark SE, this function needs to be registered with: * On Quark SE, this function needs to be registered with:
* @code qm_irq_request(QM_IRQ_PIC_TIMER, qm_pic_timer_isr); * @code qm_irq_request(QM_IRQ_PIC_TIMER, qm_pic_timer_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_pic_timer_isr); QM_ISR_DECLARE(qm_pic_timer_0_isr);
/** /**
* ISR for PWM 0 interrupt. * ISR for PWM 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_PWM_0, qm_pwm_isr_0); * @code qm_irq_request(QM_IRQ_PWM_0_INT, qm_pwm_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_pwm_isr_0); QM_ISR_DECLARE(qm_pwm_0_isr);
/** /**
* ISR for RTC 0 interrupt. * ISR for RTC 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_RTC_0, qm_rtc_isr_0); * @code qm_irq_request(QM_IRQ_RTC_0_INT, qm_rtc_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_rtc_isr_0); QM_ISR_DECLARE(qm_rtc_0_isr);
/** /**
* ISR for SPI Master 0 interrupt. * ISR for SPI Master 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_SPI_MASTER_0, qm_spi_master_0_isr); * @code qm_irq_request(QM_IRQ_SPI_MASTER_0_INT, qm_spi_master_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_spi_master_0_isr); QM_ISR_DECLARE(qm_spi_master_0_isr);
@ -281,7 +282,7 @@ QM_ISR_DECLARE(qm_spi_master_0_isr);
* ISR for SPI Master 1 interrupt. * ISR for SPI Master 1 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_SPI_MASTER_1, qm_spi_master_1_isr); * @code qm_irq_request(QM_IRQ_SPI_MASTER_1_INT, qm_spi_master_1_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_spi_master_1_isr); QM_ISR_DECLARE(qm_spi_master_1_isr);
@ -291,7 +292,7 @@ QM_ISR_DECLARE(qm_spi_master_1_isr);
* ISR for UART 0 interrupt. * ISR for UART 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_UART_0, qm_uart_0_isr); * @code qm_irq_request(QM_IRQ_UART_0_INT, qm_uart_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_uart_0_isr); QM_ISR_DECLARE(qm_uart_0_isr);
@ -300,7 +301,7 @@ QM_ISR_DECLARE(qm_uart_0_isr);
* ISR for UART 1 interrupt. * ISR for UART 1 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_UART_1, qm_uart_1_isr); * @code qm_irq_request(QM_IRQ_UART_1_INT, qm_uart_1_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_uart_1_isr); QM_ISR_DECLARE(qm_uart_1_isr);
@ -309,19 +310,19 @@ QM_ISR_DECLARE(qm_uart_1_isr);
* ISR for WDT 0 interrupt. * ISR for WDT 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_WDT_0, qm_wdt_isr_0); * @code qm_irq_request(QM_IRQ_WDT_0_INT, qm_wdt_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_wdt_isr_0); QM_ISR_DECLARE(qm_wdt_0_isr);
/** /**
* ISR for USB 0 interrupt. * ISR for USB 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_IRQ_USB_0, qm_usb_0_isr_0); * @code qm_irq_request(QM_IRQ_USB_0_INT, qm_usb_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_usb_0_isr_0); QM_ISR_DECLARE(qm_usb_0_isr);
/** /**
* @} * @}

View file

@ -55,15 +55,6 @@
typedef void (*qm_mpr_callback_t)(void *); typedef void (*qm_mpr_callback_t)(void *);
/* MPR identifier */
typedef enum {
QM_MPR_0 = 0,
QM_MPR_1,
QM_MPR_2,
QM_MPR_3,
QM_MPR_NUM
} qm_mpr_id_t;
/** SRAM Memory Protection Region configuration type. */ /** SRAM Memory Protection Region configuration type. */
typedef struct { typedef struct {
uint8_t en_lock_mask; /**< Enable/lock bitmask */ uint8_t en_lock_mask; /**< Enable/lock bitmask */
@ -99,6 +90,46 @@ int qm_mpr_set_config(const qm_mpr_id_t id, const qm_mpr_config_t *const cfg);
int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode, int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
qm_mpr_callback_t callback_fn, void *data); qm_mpr_callback_t callback_fn, void *data);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save MPR context.
*
* Save the configuration of the specified MPR peripheral
* before entering sleep.
*
* MPR configuration is lost after sleep and can therefore
* be modified even if this configuration was locked before sleep.
* To support persistent configuration, the configuration must be
* restored when resuming as part of the bootloader.
*
* @param[out] ctx MPR context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_mpr_save_context(qm_mpr_context_t *const ctx);
/**
* Restore MPR context.
*
* Restore the configuration of the specified MPR peripheral
* after exiting sleep.
*
* MPR configuration is lost after sleep and can therefore
* be modified even if this configuration was locked before sleep.
* To support persistent configuration, the configuration must be
* restored when resuming as part of the bootloader.
*
* @param[in] ctx MPR context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_mpr_restore_context(const qm_mpr_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -105,6 +105,36 @@ int qm_pic_timer_set(const uint32_t count);
*/ */
int qm_pic_timer_get(uint32_t *const count); int qm_pic_timer_get(uint32_t *const count);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save PIC Timer peripheral's context.
*
* Saves the configuration of the specified PIC Timer peripheral
* before entering sleep.
*
* @param[out] ctx PIC Timer context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_pic_timer_save_context(qm_pic_timer_context_t *const ctx);
/**
* Restore PIC Timer peripheral's context.
*
* Restore the configuration of the specified PIC Timer peripheral
* after exiting sleep.
* The timer is restored to the count saved before sleep.
*
* @param[in] ctx PIC Timer context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_pic_timer_restore_context(const qm_pic_timer_context_t *const ctx);
#endif
/** /**
* @} * @}
*/ */

View file

@ -163,6 +163,39 @@ int qm_pwm_start(const qm_pwm_t pwm, const qm_pwm_id_t id);
*/ */
int qm_pwm_stop(const qm_pwm_t pwm, const qm_pwm_id_t id); int qm_pwm_stop(const qm_pwm_t pwm, const qm_pwm_id_t id);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save PWM peripheral's context.
*
* Saves the configuration of the specified PWM peripheral
* before entering sleep.
*
* @param[in] pwm PWM device.
* @param[out] ctx PWM context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_pwm_save_context(const qm_pwm_t pwm, qm_pwm_context_t *const ctx);
/**
* Restore PWM peripheral's context.
*
* Restore the configuration of the specified PWM peripheral
* after exiting sleep.
*
* @param[in] pwm PWM device.
* @param[in] ctx PWM context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_pwm_restore_context(const qm_pwm_t pwm,
const qm_pwm_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -526,6 +526,39 @@ int qm_spi_enter_xip_mode(const qm_spi_t spi,
int qm_spi_exit_xip_mode(const qm_spi_t spi); int qm_spi_exit_xip_mode(const qm_spi_t spi);
#endif /* HAS_QSPI */ #endif /* HAS_QSPI */
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save SPI context.
*
* Saves the configuration of the specified SPI peripheral
* before entering sleep.
*
* @param[in] spi SPI controller identifier.
* @param[out] ctx SPI context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_spi_save_context(const qm_spi_t spi, qm_spi_context_t *const ctx);
/**
* Restore SPI context.
*
* Restore the configuration of the specified SPI peripheral
* after exiting sleep.
*
* @param[in] spi SPI controller identifier.
* @param[in] ctx SPI context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_spi_restore_context(const qm_spi_t spi,
const qm_spi_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -300,6 +300,46 @@ int qm_ss_adc_convert(const qm_ss_adc_t adc, qm_ss_adc_xfer_t *const xfer,
*/ */
int qm_ss_adc_irq_convert(const qm_ss_adc_t adc, qm_ss_adc_xfer_t *const xfer); int qm_ss_adc_irq_convert(const qm_ss_adc_t adc, qm_ss_adc_xfer_t *const xfer);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save SS ADC context.
*
* Save the configuration of the specified ADC peripheral before entering sleep.
*
* Note: Calibration data is not saved with this function. The value of the
* ADC_ENA bit in the ADC Control register is also not saved with this function.
*
* @param[in] adc SS ADC port index.
* @param[out] ctx SS ADC context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_adc_save_context(const qm_ss_adc_t adc,
qm_ss_adc_context_t *const ctx);
/**
* Restore SS ADC context.
*
* Restore the configuration of the specified ADC peripheral after exiting
* sleep.
*
* Note: Previous calibration data is not restored with this function, the user
* may need to recalibrate the ADC. The user will need to set the ADC_ENA bit
* in the ADC Control register as it is initialized to 0.
*
* @param[in] adc SS ADC port index.
* @param[in] ctx SS ADC context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_adc_restore_context(const qm_ss_adc_t adc,
const qm_ss_adc_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -178,8 +178,41 @@ int qm_ss_gpio_read_port(const qm_ss_gpio_t gpio, uint32_t *const port);
*/ */
int qm_ss_gpio_write_port(const qm_ss_gpio_t gpio, const uint32_t val); int qm_ss_gpio_write_port(const qm_ss_gpio_t gpio, const uint32_t val);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save SS GPIO context.
*
* Save the configuration of the specified GPIO peripheral
* before entering sleep.
*
* @param[in] gpio SS GPIO port index.
* @param[out] ctx SS GPIO context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_gpio_save_context(const qm_ss_gpio_t gpio,
qm_ss_gpio_context_t *const ctx);
/**
* Restore SS GPIO context.
*
* Restore the configuration of the specified GPIO peripheral
* after exiting sleep.
*
* @param[in] gpio SS GPIO port index.
* @param[in] ctx SS GPIO context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_gpio_restore_context(const qm_ss_gpio_t gpio,
const qm_ss_gpio_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */
#endif /* __QM_SS_GPIO_H__ */ #endif /* __QM_SS_GPIO_H__ */

View file

@ -266,6 +266,42 @@ int qm_ss_i2c_master_irq_transfer(const qm_ss_i2c_t i2c,
*/ */
int qm_ss_i2c_irq_transfer_terminate(const qm_ss_i2c_t i2c); int qm_ss_i2c_irq_transfer_terminate(const qm_ss_i2c_t i2c);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save SS I2C context.
*
* Saves the configuration of the specified SS I2C peripheral
* before entering sleep. The slave operations need to be disabled before
* being able to save the context as otherwise we could be interrupted by
* an I2C transfer while saving registers.
*
* @param[in] i2c I2C port index.
* @param[out] ctx I2C context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_i2c_save_context(const qm_ss_i2c_t i2c,
qm_ss_i2c_context_t *const ctx);
/**
* Restore SS I2C context.
*
* Restore the configuration of the specified SS I2C peripheral
* after exiting sleep.
*
* @param[in] i2c I2C port index.
* @param[in] ctx I2C context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_i2c_restore_context(const qm_ss_i2c_t i2c,
const qm_ss_i2c_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -43,7 +43,7 @@
* ISR for ADC interrupt. * ISR for ADC interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_ADC_IRQ, qm_ss_adc_0_isr); * @code qm_ss_irq_request(QM_SS_IRQ_ADC_0_INT, qm_ss_adc_0_isr);
* @endcode if IRQ based conversions are used. * @endcode if IRQ based conversions are used.
*/ */
QM_ISR_DECLARE(qm_ss_adc_0_isr); QM_ISR_DECLARE(qm_ss_adc_0_isr);
@ -52,16 +52,17 @@ QM_ISR_DECLARE(qm_ss_adc_0_isr);
* ISR for ADC error interrupt. * ISR for ADC error interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_ADC_ERR, qm_ss_adc_0_err_isr); * @code qm_ss_irq_request(QM_SS_IRQ_ADC_0_ERROR_INT,
* qm_ss_adc_0_error_isr);
* @endcode if IRQ based conversions are used. * @endcode if IRQ based conversions are used.
*/ */
QM_ISR_DECLARE(qm_ss_adc_0_err_isr); QM_ISR_DECLARE(qm_ss_adc_0_error_isr);
/** /**
* ISR for SS ADC 0 calibration interrupt. * ISR for SS ADC 0 calibration interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_SS_IRQ_ADC_CAL, qm_ss_adc_0_cal_isr); * @code qm_irq_request(QM_SS_IRQ_ADC_0_CAL_INT, qm_ss_adc_0_cal_isr);
* @endcode if IRQ based calibration is used. * @endcode if IRQ based calibration is used.
*/ */
QM_ISR_DECLARE(qm_ss_adc_0_cal_isr); QM_ISR_DECLARE(qm_ss_adc_0_cal_isr);
@ -70,7 +71,7 @@ QM_ISR_DECLARE(qm_ss_adc_0_cal_isr);
* ISR for SS ADC 0 mode change interrupt. * ISR for SS ADC 0 mode change interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_irq_request(QM_SS_IRQ_ADC_PWR, qm_ss_adc_0_pwr_isr); * @code qm_irq_request(QM_SS_IRQ_ADC_0_PWR_INT, qm_ss_adc_0_pwr_isr);
* @endcode if IRQ based mode change is used. * @endcode if IRQ based mode change is used.
*/ */
QM_ISR_DECLARE(qm_ss_adc_0_pwr_isr); QM_ISR_DECLARE(qm_ss_adc_0_pwr_isr);
@ -79,106 +80,112 @@ QM_ISR_DECLARE(qm_ss_adc_0_pwr_isr);
* ISR for GPIO 0 error interrupt. * ISR for GPIO 0 error interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_GPIO_INTR_0, qm_ss_gpio_isr_0); * @code qm_ss_irq_request(QM_SS_IRQ_GPIO_0_INT, qm_ss_gpio_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_ss_gpio_isr_0); QM_ISR_DECLARE(qm_ss_gpio_0_isr);
/** /**
* ISR for GPIO 1 error interrupt. * ISR for GPIO 1 error interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_GPIO_INTR_1, qm_ss_gpio_isr_1); * @code qm_ss_irq_request(QM_SS_IRQ_GPIO_1_INT, qm_ss_gpio_1_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_ss_gpio_isr_1); QM_ISR_DECLARE(qm_ss_gpio_1_isr);
/** /**
* ISR for I2C 0 error interrupt. * ISR for I2C 0 error interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_ERR, qm_ss_i2c_isr_0); * @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_ERROR_INT, qm_ss_i2c_0_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_RX_AVAIL, qm_ss_i2c_isr_0); * @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_RX_AVAIL_INT, qm_ss_i2c_0_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_TX_REQ, qm_ss_i2c_isr_0); * @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_TX_REQ_INT, qm_ss_i2c_0_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_STOP_DET, qm_ss_i2c_isr_0); * @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_STOP_DET_INT, qm_ss_i2c_0_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_ss_i2c_isr_0); QM_ISR_DECLARE(qm_ss_i2c_0_isr);
/** /**
* ISR for I2C 1 error interrupt. * ISR for I2C 1 error interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_ERR, qm_ss_i2c_isr_1); * @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_ERROR_INT, qm_ss_i2c_1_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_RX_AVAIL, qm_ss_i2c_isr_1); * @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_RX_AVAIL_INT, qm_ss_i2c_1_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_TX_REQ, qm_ss_i2c_isr_1); * @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_TX_REQ_INT, qm_ss_i2c_1_isr);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_STOP_DET, qm_ss_i2c_isr_1); * @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_STOP_DET_INT, qm_ss_i2c_1_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_ss_i2c_isr_1); QM_ISR_DECLARE(qm_ss_i2c_1_isr);
/** /**
* ISR for SPI 0 error interrupt. * ISR for SPI 0 error interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_SPI_0_ERR_INT, qm_ss_spi_0_err_isr); * @code qm_ss_irq_request(QM_SS_IRQ_SPI_0_ERROR_INT,
* qm_ss_spi_0_error_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_ss_spi_0_err_isr); QM_ISR_DECLARE(qm_ss_spi_0_error_isr);
/** /**
* ISR for SPI 1 error interrupt. * ISR for SPI 1 error interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_SPI_1_ERR_INT, qm_ss_spi_1_err_isr); * @code qm_ss_irq_request(QM_SS_IRQ_SPI_1_ERROR_INT,
* qm_ss_spi_1_error_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_ss_spi_1_err_isr); QM_ISR_DECLARE(qm_ss_spi_1_error_isr);
/** /**
* ISR for SPI 0 TX data requested interrupt. * ISR for SPI 0 TX data requested interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_SPI_0_TX_REQ, qm_ss_spi_0_tx_isr); * @code qm_ss_irq_request(QM_SS_IRQ_SPI_0_TX_REQ_INT,
* qm_ss_spi_0_tx_req_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_ss_spi_0_tx_isr); QM_ISR_DECLARE(qm_ss_spi_0_tx_req_isr);
/** /**
* ISR for SPI 1 TX data requested interrupt. * ISR for SPI 1 TX data requested interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_SPI_1_TX_REQ, qm_ss_spi_1_tx_isr); * @code qm_ss_irq_request(QM_SS_IRQ_SPI_1_TX_REQ_INT,
* qm_ss_spi_1_tx_req_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_ss_spi_1_tx_isr); QM_ISR_DECLARE(qm_ss_spi_1_tx_req_isr);
/** /**
* ISR for SPI 0 RX data available interrupt. * ISR for SPI 0 RX data available interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_SPI_0_RX_AVAIL, qm_ss_spi_0_rx_isr); * @code qm_ss_irq_request(QM_SS_IRQ_SPI_0_RX_AVAIL_INT,
* qm_ss_spi_0_rx_avail_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_ss_spi_0_rx_isr); QM_ISR_DECLARE(qm_ss_spi_0_rx_avail_isr);
/** /**
* ISR for SPI 1 data available interrupt. * ISR for SPI 1 data available interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_SPI_1_RX_AVAIL, qm_ss_spi_1_rx_isr); * @code qm_ss_irq_request(QM_SS_IRQ_SPI_1_RX_AVAIL_INT,
* qm_ss_spi_1_rx_avail_isr);
* @endcode if IRQ based transfers are used. * @endcode if IRQ based transfers are used.
*/ */
QM_ISR_DECLARE(qm_ss_spi_1_rx_isr); QM_ISR_DECLARE(qm_ss_spi_1_rx_avail_isr);
/** /**
* ISR for SS Timer 0 interrupt. * ISR for SS Timer 0 interrupt.
* *
* This function needs to be registered with * This function needs to be registered with
* @code qm_ss_int_vector_request(QM_SS_INT_TIMER_0, qm_ss_timer_isr_0); * @code qm_ss_int_vector_request(QM_ARC_TIMER_0_INT, qm_ss_timer_0_isr);
* @endcode * @endcode
*/ */
QM_ISR_DECLARE(qm_ss_timer_isr_0); QM_ISR_DECLARE(qm_ss_timer_0_isr);
/** /**
* @} * @}

View file

@ -398,6 +398,40 @@ int qm_ss_spi_transfer_terminate(const qm_ss_spi_t spi);
int qm_ss_spi_dma_transfer_terminate(const qm_ss_spi_t spi); int qm_ss_spi_dma_transfer_terminate(const qm_ss_spi_t spi);
#endif /* HAS_SS_QMSI_DMA */ #endif /* HAS_SS_QMSI_DMA */
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save SS SPI context.
*
* Saves the configuration of the specified SS SPI peripheral
* before entering sleep.
*
* @param[in] spi SPI controller identifier.
* @param[out] ctx SPI context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_spi_save_context(const qm_ss_spi_t spi,
qm_ss_spi_context_t *const ctx);
/**
* Restore SS SPI context.
*
* Restore the configuration of the specified SS SPI peripheral
* after exiting sleep.
*
* @param[in] spi SPI controller identifier.
* @param[in] ctx SPI context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_spi_restore_context(const qm_ss_spi_t spi,
const qm_ss_spi_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -113,6 +113,40 @@ int qm_ss_timer_set(const qm_ss_timer_t timer, const uint32_t count);
*/ */
int qm_ss_timer_get(const qm_ss_timer_t timer, uint32_t *const count); int qm_ss_timer_get(const qm_ss_timer_t timer, uint32_t *const count);
#if (ENABLE_RESTORE_CONTEXT)
/*
* Save SS TIMER context.
*
* Save the configuration of the specified TIMER peripheral
* before entering sleep.
*
* @param[in] timer SS TIMER index.
* @param[out] ctx SS TIMER context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_timer_save_context(const qm_ss_timer_t timer,
qm_ss_timer_context_t *const ctx);
/*
* Restore SS TIMER context.
*
* Restore the configuration of the specified TIMER peripheral
* after exiting sleep.
*
* @param[in] timer SS TIMER index.
* @param[in] ctx SS TIMER context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_timer_restore_context(const qm_ss_timer_t timer,
const qm_ss_timer_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -424,6 +424,43 @@ int qm_uart_dma_write_terminate(const qm_uart_t uart);
*/ */
int qm_uart_dma_read_terminate(const qm_uart_t uart); int qm_uart_dma_read_terminate(const qm_uart_t uart);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save UART context.
*
* Saves the configuration of the specified UART peripheral
* before entering sleep.
*
* @param[in] uart UART port index.
* @param[out] ctx UART context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_uart_save_context(const qm_uart_t uart, qm_uart_context_t *const ctx);
/**
* Restore UART context.
*
* Restore the configuration of the specified UART peripheral
* after exiting sleep.
*
* FIFO control register cannot be read back,
* the default configuration is applied for this register.
* Application will need to restore its own parameters.
*
* @param[in] uart UART port index.
* @param[in] ctx UART context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_uart_restore_context(const qm_uart_t uart,
const qm_uart_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -116,8 +116,6 @@ int qm_wdt_start(const qm_wdt_t wdt);
* @param[in] wdt WDT index. * @param[in] wdt WDT index.
* @param[in] cfg New configuration for WDT. * @param[in] cfg New configuration for WDT.
* This must not be NULL. * This must not be NULL.
* If QM_WDT_MODE_INTERRUPT_RESET mode is set,
* the 'callback' cannot be null.
* *
* @return Standard errno return type for QMSI. * @return Standard errno return type for QMSI.
* @retval 0 on success. * @retval 0 on success.
@ -138,6 +136,37 @@ int qm_wdt_set_config(const qm_wdt_t wdt, const qm_wdt_config_t *const cfg);
*/ */
int qm_wdt_reload(const qm_wdt_t wdt); int qm_wdt_reload(const qm_wdt_t wdt);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Save watchdog context.
*
* Save the configuration of the watchdog before entering sleep.
*
* @param[in] wdt WDT index.
* @param[out] ctx WDT context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_wdt_save_context(const qm_wdt_t wdt, qm_wdt_context_t *const ctx);
/**
* Restore watchdog context.
*
* Restore the configuration of the watchdog after exiting sleep.
*
* @param[in] wdt WDT index.
* @param[in] ctx WDT context structure. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_wdt_restore_context(const qm_wdt_t wdt,
const qm_wdt_context_t *const ctx);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */

View file

@ -109,9 +109,9 @@ static __inline__ void ioapic_register_irq(unsigned int irq,
/* Set trigger mode. */ /* Set trigger mode. */
switch (irq) { switch (irq) {
case QM_IRQ_RTC_0: case QM_IRQ_RTC_0_INT:
case QM_IRQ_AONPT_0: case QM_IRQ_AONPT_0_INT:
case QM_IRQ_WDT_0: case QM_IRQ_WDT_0_INT:
/* Edge sensitive. */ /* Edge sensitive. */
value &= ~BIT(15); value &= ~BIT(15);
break; break;

View file

@ -85,10 +85,10 @@ static __inline__ void mvic_register_irq(uint32_t irq)
/* Set IRQ triggering scheme and unmask the line. */ /* Set IRQ triggering scheme and unmask the line. */
switch (irq) { switch (irq) {
case QM_IRQ_RTC_0: case QM_IRQ_RTC_0_INT:
case QM_IRQ_AONPT_0: case QM_IRQ_AONPT_0_INT:
case QM_IRQ_PIC_TIMER: case QM_IRQ_PIC_TIMER:
case QM_IRQ_WDT_0: case QM_IRQ_WDT_0_INT:
/* positive edge */ /* positive edge */
_mvic_set_irq_val(irq, 0); _mvic_set_irq_val(irq, 0);
break; break;

View file

@ -42,19 +42,15 @@
#include "qm_sensor_regs.h" #include "qm_sensor_regs.h"
extern qm_ss_isr_t __ivt_vect_table[]; extern qm_ss_isr_t __ivt_vect_table[];
static void ss_register_irq(unsigned int vector);
#else #else
#error "Unsupported / unspecified processor detected." #error "Unsupported / unspecified processor detected."
#endif #endif
/* SCSS base addr for LMT interrupt routing, for linear IRQ mapping */ /* Event router base addr for LMT interrupt routing, for linear IRQ mapping */
#define SCSS_LMT_INT_MASK_BASE (&QM_SCSS_INT->int_i2c_mst_0_mask) #define INTERRUPT_ROUTER_LMT_INT_MASK_BASE \
(&QM_INTERRUPT_ROUTER->i2c_master_0_int_mask)
#if (QM_SENSOR)
#define SCSS_INT_MASK BIT(8) /* Sensor Subsystem interrupt masking */
static void ss_register_irq(unsigned int vector);
#else
#define SCSS_INT_MASK BIT(0) /* Lakemont interrupt masking */
#endif
/* x86 CPU FLAGS.IF register field (Interrupt enable Flag, bit 9), indicating /* x86 CPU FLAGS.IF register field (Interrupt enable Flag, bit 9), indicating
* whether or not CPU interrupts are enabled. * whether or not CPU interrupts are enabled.
@ -169,9 +165,103 @@ void qm_irq_unmask(uint32_t irq)
#endif #endif
} }
#if (ENABLE_RESTORE_CONTEXT)
#if (HAS_APIC)
int qm_irq_save_context(qm_irq_context_t *const ctx)
{
uint32_t rte_low;
uint8_t irq;
QM_CHECK(ctx != NULL, -EINVAL);
for (irq = 0; irq < QM_IOAPIC_NUM_RTES; irq++) {
rte_low = _ioapic_get_redtbl_entry_lo(irq);
ctx->redtbl_entries[irq] = rte_low;
}
return 0;
}
int qm_irq_restore_context(const qm_irq_context_t *const ctx)
{
uint32_t rte_low;
uint8_t irq;
QM_CHECK(ctx != NULL, -EINVAL);
apic_init();
for (irq = 0; irq < QM_IOAPIC_NUM_RTES; irq++) {
rte_low = ctx->redtbl_entries[irq];
_ioapic_set_redtbl_entry_lo(irq, rte_low);
}
return 0;
}
#elif(QM_SENSOR) /* HAS_APIC */
int qm_irq_save_context(qm_irq_context_t *const ctx)
{
uint8_t i;
uint32_t status32;
QM_CHECK(ctx != NULL, -EINVAL);
/* Start from i=1, skip reset vector. */
for (i = 1; i < QM_SS_INT_VECTOR_NUM; i++) {
__builtin_arc_sr(i, QM_SS_AUX_IRQ_SELECT);
ctx->irq_config[i - 1] =
__builtin_arc_lr(QM_SS_AUX_IRQ_PRIORITY) << 2;
ctx->irq_config[i - 1] |=
__builtin_arc_lr(QM_SS_AUX_IRQ_TRIGGER) << 1;
ctx->irq_config[i - 1] |=
__builtin_arc_lr(QM_SS_AUX_IRQ_ENABLE);
}
status32 = __builtin_arc_lr(QM_SS_AUX_STATUS32);
ctx->status32_irq_threshold = status32 & QM_SS_STATUS32_E_MASK;
ctx->status32_irq_enable = status32 & QM_SS_STATUS32_IE_MASK;
ctx->irq_ctrl = __builtin_arc_lr(QM_SS_AUX_IRQ_CTRL);
return 0;
}
int qm_irq_restore_context(const qm_irq_context_t *const ctx)
{
uint8_t i;
uint32_t reg;
QM_CHECK(ctx != NULL, -EINVAL);
/* Start from i=1, skip reset vector. */
for (i = 1; i < QM_SS_INT_VECTOR_NUM; i++) {
__builtin_arc_sr(i, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(ctx->irq_config[i - 1] >> 2,
QM_SS_AUX_IRQ_PRIORITY);
__builtin_arc_sr((ctx->irq_config[i - 1] >> 1) & BIT(0),
QM_SS_AUX_IRQ_TRIGGER);
__builtin_arc_sr(ctx->irq_config[i - 1] & BIT(0),
QM_SS_AUX_IRQ_ENABLE);
}
__builtin_arc_sr(ctx->irq_ctrl, QM_SS_AUX_IRQ_CTRL);
/* Setting an interrupt priority threshold. */
reg = __builtin_arc_lr(QM_SS_AUX_STATUS32);
reg |= (ctx->status32_irq_threshold & QM_SS_STATUS32_E_MASK);
reg |= (ctx->status32_irq_enable & QM_SS_STATUS32_IE_MASK);
/* This one has to be a kernel operation. */
__builtin_arc_kflag(reg);
return 0;
}
#endif /* QM_SENSOR */
#endif /* ENABLE_RESTORE_CONTEXT */
void _qm_irq_setup(uint32_t irq, uint16_t register_offset) void _qm_irq_setup(uint32_t irq, uint16_t register_offset)
{ {
uint32_t *scss_intmask; uint32_t *event_router_intmask;
#if (HAS_APIC) #if (HAS_APIC)
/* /*
@ -186,29 +276,30 @@ void _qm_irq_setup(uint32_t irq, uint16_t register_offset)
#endif #endif
/* Route peripheral interrupt to Lakemont/Sensor Subsystem */ /* Route peripheral interrupt to Lakemont/Sensor Subsystem */
scss_intmask = (uint32_t *)SCSS_LMT_INT_MASK_BASE + register_offset; event_router_intmask =
(uint32_t *)INTERRUPT_ROUTER_LMT_INT_MASK_BASE + register_offset;
/* On Quark D2000 and Quark SE the register for the analog comparator /* On Quark D2000 and Quark SE the register for the analog comparator
* host mask has a different bit field than the other host mask * host mask has a different bit field than the other host mask
* registers. */ * registers. */
if (QM_IRQ_AC_MASK_OFFSET == register_offset) { if (QM_IRQ_COMPARATOR_0_INT_MASK_OFFSET == register_offset) {
*scss_intmask &= ~0x0007ffff; *event_router_intmask &= ~0x0007ffff;
#if !defined(QUARK_D2000) #if !defined(QUARK_D2000)
} else if (QM_IRQ_MBOX_MASK_OFFSET == register_offset) { } else if (QM_IRQ_MAILBOX_0_INT_MASK_OFFSET == register_offset) {
/* Masking MAILBOX irq id done inside mbox driver */ /* Masking MAILBOX irq id done inside mbox driver */
#endif #endif
/* /*
* DMA error mask uses 1 bit per DMA channel rather than the * DMA error mask uses 1 bit per DMA channel rather than the
* generic host mask. * generic host mask.
*/ */
} else if (QM_IRQ_DMA_ERR_MASK_OFFSET == register_offset) { } else if (QM_IRQ_DMA_0_ERROR_INT_MASK_OFFSET == register_offset) {
#if (QM_SENSOR) #if (QM_SENSOR)
*scss_intmask &= ~QM_INT_DMA_ERR_SS_MASK; *event_router_intmask &= ~QM_IR_DMA_ERROR_SS_MASK;
#else #else
*scss_intmask &= ~QM_INT_DMA_ERR_HOST_MASK; *event_router_intmask &= ~QM_IR_DMA_ERROR_HOST_MASK;
#endif #endif
} else { } else {
*scss_intmask &= ~SCSS_INT_MASK; QM_IR_UNMASK_INTERRUPTS(*event_router_intmask);
} }
#if (HAS_APIC) #if (HAS_APIC)
@ -256,10 +347,10 @@ static void ss_register_irq(unsigned int vector)
* triggered. * triggered.
*/ */
switch (vector) { switch (vector) {
case QM_SS_IRQ_ADC_PWR_VECTOR: case QM_SS_IRQ_ADC_0_PWR_INT_VECTOR:
case QM_IRQ_RTC_0_VECTOR: case QM_IRQ_RTC_0_INT_VECTOR:
case QM_IRQ_AONPT_0_VECTOR: case QM_IRQ_AONPT_0_INT_VECTOR:
case QM_IRQ_WDT_0_VECTOR: case QM_IRQ_WDT_0_INT_VECTOR:
/* Edge sensitive. */ /* Edge sensitive. */
__builtin_arc_sr(vector, QM_SS_AUX_IRQ_SELECT); __builtin_arc_sr(vector, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, __builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE,

View file

@ -33,8 +33,8 @@
/* SCSS base addr for Sensor Subsystem interrupt routing, for linear IRQ /* SCSS base addr for Sensor Subsystem interrupt routing, for linear IRQ
* mapping */ * mapping */
#define SCSS_SS_INT_MASK_BASE (&QM_SCSS_INT->int_ss_adc_err_mask) #define INTERRUPT_ROUTER_SS_INT_MASK_BASE \
#define SCSS_SS_INT_MASK BIT(8) /* Sensor Subsystem interrupt masking */ (&QM_INTERRUPT_ROUTER->ss_adc_0_error_int_mask)
#if (UNIT_TEST) #if (UNIT_TEST)
qm_ss_isr_t __ivt_vect_table[QM_SS_INT_VECTOR_NUM]; qm_ss_isr_t __ivt_vect_table[QM_SS_INT_VECTOR_NUM];
@ -90,8 +90,8 @@ void qm_ss_irq_request(uint32_t irq, qm_ss_isr_t isr)
qm_ss_int_vector_request(vector, isr); qm_ss_int_vector_request(vector, isr);
/* Route peripheral interrupt to Sensor Subsystem */ /* Route peripheral interrupt to Sensor Subsystem */
scss_intmask = (uint32_t *)SCSS_SS_INT_MASK_BASE + irq; scss_intmask = (uint32_t *)INTERRUPT_ROUTER_SS_INT_MASK_BASE + irq;
*scss_intmask &= ~SCSS_SS_INT_MASK; QM_IR_UNMASK_SS_INTERRUPTS(*scss_intmask);
qm_ss_irq_unmask(vector); qm_ss_irq_unmask(vector);
} }

View file

@ -79,7 +79,7 @@ typedef struct {
/* Mailbox channels private data structures */ /* Mailbox channels private data structures */
static qm_mailbox_info_t mailbox_devs[QM_MBOX_CH_NUM]; static qm_mailbox_info_t mailbox_devs[QM_MBOX_CH_NUM];
QM_ISR_DECLARE(qm_mbox_isr) QM_ISR_DECLARE(qm_mailbox_0_isr)
{ {
qm_mailbox_t *const mbox_reg = (qm_mailbox_t *)QM_MAILBOX; qm_mailbox_t *const mbox_reg = (qm_mailbox_t *)QM_MAILBOX;
uint8_t i = 0; uint8_t i = 0;
@ -106,7 +106,7 @@ QM_ISR_DECLARE(qm_mbox_isr)
} }
} }
QM_ISR_EOI(QM_IRQ_MBOX_VECTOR); QM_ISR_EOI(QM_IRQ_MAILBOX_0_INT_VECTOR);
} }
int qm_mbox_ch_set_config(const qm_mbox_ch_t mbox_ch, int qm_mbox_ch_set_config(const qm_mbox_ch_t mbox_ch,
@ -118,7 +118,7 @@ int qm_mbox_ch_set_config(const qm_mbox_ch_t mbox_ch,
qm_mailbox_info_t *device = &mailbox_devs[mbox_ch]; qm_mailbox_info_t *device = &mailbox_devs[mbox_ch];
/* Block interrupts while configuring MBOX */ /* Block interrupts while configuring MBOX */
qm_irq_mask(QM_IRQ_MBOX); qm_irq_mask(QM_IRQ_MAILBOX_0_INT);
/* Store the device destination */ /* Store the device destination */
device->dest = config->dest; device->dest = config->dest;
@ -171,7 +171,7 @@ int qm_mbox_ch_set_config(const qm_mbox_ch_t mbox_ch,
} }
/* UnBlock MBOX interrupts. */ /* UnBlock MBOX interrupts. */
qm_irq_unmask(QM_IRQ_MBOX); qm_irq_unmask(QM_IRQ_MAILBOX_0_INT);
return 0; return 0;
} }

View file

@ -35,14 +35,14 @@
static void (*callback)(void *data); static void (*callback)(void *data);
static void *callback_data; static void *callback_data;
QM_ISR_DECLARE(qm_mpr_isr) QM_ISR_DECLARE(qm_sram_mpr_0_isr)
{ {
if (callback) { if (callback) {
(*callback)(callback_data); (*callback)(callback_data);
} }
QM_MPR->mpr_vsts = QM_MPR_VSTS_VALID; QM_MPR->mpr_vsts = QM_MPR_VSTS_VALID;
QM_ISR_EOI(QM_IRQ_SRAM_VECTOR); QM_ISR_EOI(QM_IRQ_SRAM_MPR_0_INT_VECTOR);
} }
int qm_mpr_set_config(const qm_mpr_id_t id, const qm_mpr_config_t *const cfg) int qm_mpr_set_config(const qm_mpr_id_t id, const qm_mpr_config_t *const cfg)
@ -67,21 +67,19 @@ int qm_mpr_set_config(const qm_mpr_id_t id, const qm_mpr_config_t *const cfg)
} }
#if (QM_SENSOR) #if (QM_SENSOR)
int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode, int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
qm_mpr_callback_t callback_fn, qm_mpr_callback_t callback_fn, void *cb_data)
void *callback_data)
{ {
QM_CHECK(mode <= MPR_VIOL_MODE_PROBE, -EINVAL); QM_CHECK(mode <= MPR_VIOL_MODE_PROBE, -EINVAL);
/* interrupt mode */ /* interrupt mode */
if (MPR_VIOL_MODE_INTERRUPT == mode) { if (MPR_VIOL_MODE_INTERRUPT == mode) {
callback = callback_fn; callback = callback_fn;
callback_data = callback_data; callback_data = cb_data;
/* unmask interrupt */ /* unmask interrupt */
QM_SCSS_INT->int_sram_controller_mask &= QM_IR_UNMASK_INTERRUPTS(
~QM_INT_SRAM_CONTROLLER_SS_MASK; QM_INTERRUPT_ROUTER->sram_mpr_0_int_mask);
QM_SCSS_INT->int_sram_controller_mask |= QM_IR_MASK_HALTS(QM_INTERRUPT_ROUTER->sram_mpr_0_int_mask);
QM_INT_SRAM_CONTROLLER_SS_HALT_MASK;
QM_SCSS_SS->ss_cfg &= ~QM_SS_STS_HALT_INTERRUPT_REDIRECTION; QM_SCSS_SS->ss_cfg &= ~QM_SS_STS_HALT_INTERRUPT_REDIRECTION;
} }
@ -89,11 +87,9 @@ int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
/* probe or reset mode */ /* probe or reset mode */
else { else {
/* mask interrupt */ /* mask interrupt */
QM_SCSS_INT->int_sram_controller_mask |= QM_IR_MASK_INTERRUPTS(QM_INTERRUPT_ROUTER->sram_mpr_0_int_mask);
QM_INT_SRAM_CONTROLLER_SS_MASK;
QM_SCSS_INT->int_sram_controller_mask &= QM_IR_UNMASK_HALTS(QM_INTERRUPT_ROUTER->sram_mpr_0_int_mask);
~QM_INT_SRAM_CONTROLLER_SS_HALT_MASK;
if (MPR_VIOL_MODE_PROBE == mode) { if (MPR_VIOL_MODE_PROBE == mode) {
@ -115,29 +111,26 @@ int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
} }
#else #else
int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode, int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
qm_mpr_callback_t callback_fn, qm_mpr_callback_t callback_fn, void *cb_data)
void *callback_data)
{ {
QM_CHECK(mode <= MPR_VIOL_MODE_PROBE, -EINVAL); QM_CHECK(mode <= MPR_VIOL_MODE_PROBE, -EINVAL);
/* interrupt mode */ /* interrupt mode */
if (MPR_VIOL_MODE_INTERRUPT == mode) { if (MPR_VIOL_MODE_INTERRUPT == mode) {
callback = callback_fn; callback = callback_fn;
callback_data = callback_data; callback_data = cb_data;
/* unmask interrupt */ /* unmask interrupt */
qm_irq_unmask(QM_IRQ_SRAM); qm_irq_unmask(QM_IRQ_SRAM_MPR_0_INT);
QM_SCSS_INT->int_sram_controller_mask |= QM_IR_MASK_HALTS(QM_INTERRUPT_ROUTER->sram_mpr_0_int_mask);
QM_INT_SRAM_CONTROLLER_HOST_HALT_MASK;
} }
/* probe or reset mode */ /* probe or reset mode */
else { else {
/* mask interrupt */ /* mask interrupt */
qm_irq_mask(QM_IRQ_SRAM); qm_irq_mask(QM_IRQ_SRAM_MPR_0_INT);
QM_SCSS_INT->int_sram_controller_mask &= QM_IR_UNMASK_HALTS(QM_INTERRUPT_ROUTER->sram_mpr_0_int_mask);
~QM_INT_SRAM_CONTROLLER_HOST_HALT_MASK;
if (MPR_VIOL_MODE_PROBE == mode) { if (MPR_VIOL_MODE_PROBE == mode) {
@ -158,3 +151,33 @@ int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
return 0; return 0;
} }
#endif /* QM_SENSOR */ #endif /* QM_SENSOR */
#if (ENABLE_RESTORE_CONTEXT)
int qm_mpr_save_context(qm_mpr_context_t *const ctx)
{
QM_CHECK(ctx != NULL, -EINVAL);
int i;
qm_mpr_reg_t *const controller = QM_MPR;
for (i = 0; i < QM_MPR_NUM; i++) {
ctx->mpr_cfg[i] = controller->mpr_cfg[i];
}
return 0;
}
int qm_mpr_restore_context(const qm_mpr_context_t *const ctx)
{
QM_CHECK(ctx != NULL, -EINVAL);
int i;
qm_mpr_reg_t *const controller = QM_MPR;
for (i = 0; i < QM_MPR_NUM; i++) {
controller->mpr_cfg[i] = ctx->mpr_cfg[i];
}
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -33,7 +33,7 @@ static void (*callback[QM_PWM_NUM])(void *data, uint32_t int_status);
static void *callback_data[QM_PWM_NUM]; static void *callback_data[QM_PWM_NUM];
QM_ISR_DECLARE(qm_pwm_isr_0) QM_ISR_DECLARE(qm_pwm_0_isr)
{ {
/* Which timers fired. */ /* Which timers fired. */
uint32_t int_status = QM_PWM[QM_PWM_0].timersintstatus; uint32_t int_status = QM_PWM[QM_PWM_0].timersintstatus;
@ -43,7 +43,7 @@ QM_ISR_DECLARE(qm_pwm_isr_0)
if (callback[QM_PWM_0]) { if (callback[QM_PWM_0]) {
(*callback[QM_PWM_0])(callback_data[QM_PWM_0], int_status); (*callback[QM_PWM_0])(callback_data[QM_PWM_0], int_status);
} }
QM_ISR_EOI(QM_IRQ_PWM_0_VECTOR); QM_ISR_EOI(QM_IRQ_PWM_0_INT_VECTOR);
} }
int qm_pwm_start(const qm_pwm_t pwm, const qm_pwm_id_t id) int qm_pwm_start(const qm_pwm_t pwm, const qm_pwm_id_t id)
@ -122,3 +122,40 @@ int qm_pwm_get(const qm_pwm_t pwm, const qm_pwm_id_t id,
return 0; return 0;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_pwm_save_context(const qm_pwm_t pwm, qm_pwm_context_t *const ctx)
{
QM_CHECK(pwm < QM_PWM_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_pwm_reg_t *const controller = &QM_PWM[pwm];
uint8_t i;
for (i = 0; i < QM_PWM_ID_NUM; i++) {
ctx->channel[i].loadcount = controller->timer[i].loadcount;
ctx->channel[i].controlreg = controller->timer[i].controlreg;
ctx->channel[i].loadcount2 = controller->timer_loadcount2[i];
}
return 0;
}
int qm_pwm_restore_context(const qm_pwm_t pwm,
const qm_pwm_context_t *const ctx)
{
QM_CHECK(pwm < QM_PWM_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_pwm_reg_t *const controller = &QM_PWM[pwm];
uint8_t i;
for (i = 0; i < QM_PWM_ID_NUM; i++) {
controller->timer[i].loadcount = ctx->channel[i].loadcount;
controller->timer[i].controlreg = ctx->channel[i].controlreg;
controller->timer_loadcount2[i] = ctx->channel[i].loadcount2;
}
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -33,7 +33,7 @@
static void (*callback[QM_RTC_NUM])(void *data); static void (*callback[QM_RTC_NUM])(void *data);
static void *callback_data[QM_RTC_NUM]; static void *callback_data[QM_RTC_NUM];
QM_ISR_DECLARE(qm_rtc_isr_0) QM_ISR_DECLARE(qm_rtc_0_isr)
{ {
/* Disable RTC interrupt */ /* Disable RTC interrupt */
QM_RTC[QM_RTC_0].rtc_ccr &= ~QM_RTC_CCR_INTERRUPT_ENABLE; QM_RTC[QM_RTC_0].rtc_ccr &= ~QM_RTC_CCR_INTERRUPT_ENABLE;
@ -71,7 +71,7 @@ QM_ISR_DECLARE(qm_rtc_isr_0)
/* clear interrupt */ /* clear interrupt */
QM_RTC[QM_RTC_0].rtc_eoi; QM_RTC[QM_RTC_0].rtc_eoi;
QM_ISR_EOI(QM_IRQ_RTC_0_VECTOR); QM_ISR_EOI(QM_IRQ_RTC_0_INT_VECTOR);
} }
int qm_rtc_set_config(const qm_rtc_t rtc, const qm_rtc_config_t *const cfg) int qm_rtc_set_config(const qm_rtc_t rtc, const qm_rtc_config_t *const cfg)

View file

@ -475,14 +475,14 @@ int qm_spi_irq_transfer(const qm_spi_t spi,
QM_ISR_DECLARE(qm_spi_master_0_isr) QM_ISR_DECLARE(qm_spi_master_0_isr)
{ {
handle_spi_interrupt(QM_SPI_MST_0); handle_spi_interrupt(QM_SPI_MST_0);
QM_ISR_EOI(QM_IRQ_SPI_MASTER_0_VECTOR); QM_ISR_EOI(QM_IRQ_SPI_MASTER_0_INT_VECTOR);
} }
#if (QUARK_SE) #if (QUARK_SE)
QM_ISR_DECLARE(qm_spi_master_1_isr) QM_ISR_DECLARE(qm_spi_master_1_isr)
{ {
handle_spi_interrupt(QM_SPI_MST_1); handle_spi_interrupt(QM_SPI_MST_1);
QM_ISR_EOI(QM_IRQ_SPI_MASTER_1_VECTOR); QM_ISR_EOI(QM_IRQ_SPI_MASTER_1_INT_VECTOR);
} }
#endif #endif
@ -858,3 +858,34 @@ int qm_spi_dma_transfer_terminate(qm_spi_t spi)
return ret; return ret;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_spi_save_context(const qm_spi_t spi, qm_spi_context_t *const ctx)
{
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_spi_reg_t *const regs = QM_SPI[spi];
ctx->ctrlr0 = regs->ctrlr0;
ctx->ser = regs->ser;
ctx->baudr = regs->baudr;
return 0;
}
int qm_spi_restore_context(const qm_spi_t spi,
const qm_spi_context_t *const ctx)
{
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_spi_reg_t *const regs = QM_SPI[spi];
regs->ctrlr0 = ctx->ctrlr0;
regs->ser = ctx->ser;
regs->baudr = ctx->baudr;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -407,27 +407,59 @@ static void handle_spi_rx_interrupt(const qm_ss_spi_t spi)
} }
} }
QM_ISR_DECLARE(qm_ss_spi_0_err_isr) QM_ISR_DECLARE(qm_ss_spi_0_error_isr)
{ {
handle_spi_err_interrupt(QM_SS_SPI_0); handle_spi_err_interrupt(QM_SS_SPI_0);
} }
QM_ISR_DECLARE(qm_ss_spi_1_err_isr) QM_ISR_DECLARE(qm_ss_spi_1_error_isr)
{ {
handle_spi_err_interrupt(QM_SS_SPI_1); handle_spi_err_interrupt(QM_SS_SPI_1);
} }
QM_ISR_DECLARE(qm_ss_spi_0_rx_isr) QM_ISR_DECLARE(qm_ss_spi_0_rx_avail_isr)
{ {
handle_spi_rx_interrupt(QM_SS_SPI_0); handle_spi_rx_interrupt(QM_SS_SPI_0);
} }
QM_ISR_DECLARE(qm_ss_spi_1_rx_isr) QM_ISR_DECLARE(qm_ss_spi_1_rx_avail_isr)
{ {
handle_spi_rx_interrupt(QM_SS_SPI_1); handle_spi_rx_interrupt(QM_SS_SPI_1);
} }
QM_ISR_DECLARE(qm_ss_spi_0_tx_isr) QM_ISR_DECLARE(qm_ss_spi_0_tx_req_isr)
{ {
handle_spi_tx_interrupt(QM_SS_SPI_0); handle_spi_tx_interrupt(QM_SS_SPI_0);
} }
QM_ISR_DECLARE(qm_ss_spi_1_tx_isr) QM_ISR_DECLARE(qm_ss_spi_1_tx_req_isr)
{ {
handle_spi_tx_interrupt(QM_SS_SPI_1); handle_spi_tx_interrupt(QM_SS_SPI_1);
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_ss_spi_save_context(const qm_ss_spi_t spi,
qm_ss_spi_context_t *const ctx)
{
const uint32_t controller = base[spi];
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
ctx->spi_timing = __builtin_arc_lr(controller + QM_SS_SPI_TIMING);
ctx->spi_spien = __builtin_arc_lr(controller + QM_SS_SPI_SPIEN);
ctx->spi_ctrl = __builtin_arc_lr(controller + QM_SS_SPI_CTRL);
return 0;
}
int qm_ss_spi_restore_context(const qm_ss_spi_t spi,
const qm_ss_spi_context_t *const ctx)
{
const uint32_t controller = base[spi];
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
__builtin_arc_sr(ctx->spi_timing, controller + QM_SS_SPI_TIMING);
__builtin_arc_sr(ctx->spi_spien, controller + QM_SS_SPI_SPIEN);
__builtin_arc_sr(ctx->spi_ctrl, controller + QM_SS_SPI_CTRL);
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -49,7 +49,7 @@ static void *callback_data;
#define PIC_TIMER (QM_PIC_TIMER) #define PIC_TIMER (QM_PIC_TIMER)
#endif #endif
QM_ISR_DECLARE(qm_pic_timer_isr) QM_ISR_DECLARE(qm_pic_timer_0_isr)
{ {
if (callback) { if (callback) {
callback(callback_data); callback(callback_data);
@ -72,7 +72,7 @@ int qm_pic_timer_set_config(const qm_pic_timer_config_t *const cfg)
PIC_TIMER->timer_icr.reg = 0; PIC_TIMER->timer_icr.reg = 0;
PIC_TIMER->lvttimer.reg = BIT(LVTTIMER_INT_MASK_OFFS) | PIC_TIMER->lvttimer.reg = BIT(LVTTIMER_INT_MASK_OFFS) |
#if (HAS_APIC) #if (HAS_APIC)
QM_INT_VECTOR_PIC_TIMER; QM_X86_PIC_TIMER_INT_VECTOR;
#else #else
QM_IRQ_PIC_TIMER; QM_IRQ_PIC_TIMER;
#endif #endif
@ -106,3 +106,28 @@ int qm_pic_timer_get(uint32_t *const count)
return 0; return 0;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_pic_timer_save_context(qm_pic_timer_context_t *const ctx)
{
QM_CHECK(ctx != NULL, -EINVAL);
ctx->timer_icr = PIC_TIMER->timer_ccr.reg;
ctx->timer_dcr = PIC_TIMER->timer_dcr.reg;
ctx->lvttimer = PIC_TIMER->lvttimer.reg;
return 0;
}
int qm_pic_timer_restore_context(const qm_pic_timer_context_t *const ctx)
{
QM_CHECK(ctx != NULL, -EINVAL);
/* The PIC Timer is restored to the value before sleep. */
PIC_TIMER->timer_icr.reg = ctx->timer_icr;
PIC_TIMER->timer_dcr.reg = ctx->timer_dcr;
PIC_TIMER->lvttimer.reg = ctx->lvttimer;
return 0;
}
#endif

View file

@ -46,7 +46,7 @@ static __inline__ void qm_ss_timer_isr(qm_ss_timer_t timer)
__builtin_arc_sr(ctrl, qm_ss_timer_base[timer] + QM_SS_TIMER_CONTROL); __builtin_arc_sr(ctrl, qm_ss_timer_base[timer] + QM_SS_TIMER_CONTROL);
} }
QM_ISR_DECLARE(qm_ss_timer_isr_0) QM_ISR_DECLARE(qm_ss_timer_0_isr)
{ {
qm_ss_timer_isr(QM_SS_TIMER_0); qm_ss_timer_isr(QM_SS_TIMER_0);
} }
@ -90,3 +90,39 @@ int qm_ss_timer_get(const qm_ss_timer_t timer, uint32_t *const count)
return 0; return 0;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_ss_timer_save_context(const qm_ss_timer_t timer,
qm_ss_timer_context_t *const ctx)
{
uint32_t controller;
QM_CHECK(timer < QM_SS_TIMER_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
controller = qm_ss_timer_base[timer];
ctx->timer_control = __builtin_arc_lr(controller + QM_SS_TIMER_CONTROL);
ctx->timer_limit = __builtin_arc_lr(controller + QM_SS_TIMER_LIMIT);
ctx->timer_count = __builtin_arc_lr(controller + QM_SS_TIMER_COUNT);
return 0;
}
int qm_ss_timer_restore_context(const qm_ss_timer_t timer,
const qm_ss_timer_context_t *const ctx)
{
uint32_t controller;
QM_CHECK(timer < QM_SS_TIMER_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
controller = qm_ss_timer_base[timer];
__builtin_arc_sr(ctx->timer_control, controller + QM_SS_TIMER_CONTROL);
__builtin_arc_sr(ctx->timer_limit, controller + QM_SS_TIMER_LIMIT);
__builtin_arc_sr(ctx->timer_count, controller + QM_SS_TIMER_COUNT);
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -228,13 +228,13 @@ static void qm_uart_isr_handler(const qm_uart_t uart)
QM_ISR_DECLARE(qm_uart_0_isr) QM_ISR_DECLARE(qm_uart_0_isr)
{ {
qm_uart_isr_handler(QM_UART_0); qm_uart_isr_handler(QM_UART_0);
QM_ISR_EOI(QM_IRQ_UART_0_VECTOR); QM_ISR_EOI(QM_IRQ_UART_0_INT_VECTOR);
} }
QM_ISR_DECLARE(qm_uart_1_isr) QM_ISR_DECLARE(qm_uart_1_isr)
{ {
qm_uart_isr_handler(QM_UART_1); qm_uart_isr_handler(QM_UART_1);
QM_ISR_EOI(QM_IRQ_UART_1_VECTOR); QM_ISR_EOI(QM_IRQ_UART_1_INT_VECTOR);
} }
int qm_uart_set_config(const qm_uart_t uart, const qm_uart_config_t *cfg) int qm_uart_set_config(const qm_uart_t uart, const qm_uart_config_t *cfg)
@ -699,3 +699,60 @@ int qm_uart_dma_read_terminate(const qm_uart_t uart)
return ret; return ret;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_uart_save_context(const qm_uart_t uart, qm_uart_context_t *const ctx)
{
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_uart_reg_t *const regs = QM_UART[uart];
ctx->ier = regs->ier_dlh;
ctx->lcr = regs->lcr;
ctx->mcr = regs->mcr;
ctx->scr = regs->scr;
ctx->htx = regs->htx;
ctx->dlf = regs->dlf;
regs->lcr |= QM_UART_LCR_DLAB;
ctx->dlh = regs->ier_dlh;
ctx->dll = regs->rbr_thr_dll;
regs->lcr &= ~QM_UART_LCR_DLAB;
return 0;
}
int qm_uart_restore_context(const qm_uart_t uart,
const qm_uart_context_t *const ctx)
{
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
qm_uart_reg_t *const regs = QM_UART[uart];
/* When DLAB is set, DLL and DLH registers can be accessed. */
regs->lcr |= QM_UART_LCR_DLAB;
regs->ier_dlh = ctx->dlh;
regs->rbr_thr_dll = ctx->dll;
regs->lcr &= ~QM_UART_LCR_DLAB;
regs->ier_dlh = ctx->ier;
regs->lcr = ctx->lcr;
regs->mcr = ctx->mcr;
regs->scr = ctx->scr;
regs->htx = ctx->htx;
regs->dlf = ctx->dlf;
/*
* FIFO control register cannot be read back,
* default config is applied for this register.
* Application will need to restore its own parameters.
*/
regs->iir_fcr =
(QM_UART_FCR_FIFOE | QM_UART_FCR_RFIFOR | QM_UART_FCR_XFIFOR |
QM_UART_FCR_DEFAULT_TX_RX_THRESHOLD);
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -435,10 +435,10 @@ static void usb_dc_isr_handler(const qm_usb_t usb)
} }
} }
QM_ISR_DECLARE(qm_usb_0_isr_0) QM_ISR_DECLARE(qm_usb_0_isr)
{ {
usb_dc_isr_handler(QM_USB_0); usb_dc_isr_handler(QM_USB_0);
QM_ISR_EOI(QM_IRQ_USB_0_VECTOR); QM_ISR_EOI(QM_IRQ_USB_0_INT_VECTOR);
} }
int qm_usb_attach(const qm_usb_t usb) int qm_usb_attach(const qm_usb_t usb)

View file

@ -36,12 +36,12 @@
static void (*callback[QM_WDT_NUM])(void *data); static void (*callback[QM_WDT_NUM])(void *data);
static void *callback_data[QM_WDT_NUM]; static void *callback_data[QM_WDT_NUM];
QM_ISR_DECLARE(qm_wdt_isr_0) QM_ISR_DECLARE(qm_wdt_0_isr)
{ {
if (callback[QM_WDT_0]) { if (callback[QM_WDT_0]) {
callback[QM_WDT_0](callback_data[QM_WDT_0]); callback[QM_WDT_0](callback_data[QM_WDT_0]);
} }
QM_ISR_EOI(QM_IRQ_WDT_0_VECTOR); QM_ISR_EOI(QM_IRQ_WDT_0_INT_VECTOR);
} }
int qm_wdt_start(const qm_wdt_t wdt) int qm_wdt_start(const qm_wdt_t wdt)
@ -92,3 +92,36 @@ int qm_wdt_reload(const qm_wdt_t wdt)
return 0; return 0;
} }
#if (ENABLE_RESTORE_CONTEXT)
int qm_wdt_save_context(const qm_wdt_t wdt, qm_wdt_context_t *const ctx)
{
QM_CHECK(wdt < QM_WDT_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
ctx->wdt_torr = QM_WDT[wdt].wdt_torr;
ctx->wdt_cr = QM_WDT[wdt].wdt_cr;
return 0;
}
int qm_wdt_restore_context(const qm_wdt_t wdt,
const qm_wdt_context_t *const ctx)
{
QM_CHECK(wdt < QM_WDT_NUM, -EINVAL);
QM_CHECK(ctx != NULL, -EINVAL);
/*
* TOP_INIT field has to be written before Watchdog Timer is enabled.
*/
QM_WDT[wdt].wdt_torr = ctx->wdt_torr;
QM_WDT[wdt].wdt_cr = ctx->wdt_cr;
/*
* Reload the wdt value to avoid interrupts to fire on wake up.
*/
QM_WDT[wdt].wdt_crr = QM_WDT_RELOAD_VALUE;
return 0;
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -0,0 +1,145 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_INTERRUPT_ROUTER_REGS_H__
#define __QM_INTERRUPT_ROUTER_REGS_H__
/**
* Quark D2000 SoC Interrupt Router registers.
*
* @defgroup groupQUARKD2000INTERRUPTROUTER SoC Interrupt Router (D2000)
* @{
*/
/**
* Masks for single source interrupts in the Interrupt Router.
* To enable: reg &= ~(MASK)
* To disable: reg |= MASK;
*/
#define QM_IR_INT_LMT_MASK BIT(0)
/* Masks for single source halts in the Interrupt Router. */
#define QM_IR_INT_LMT_HALT_MASK BIT(16)
/* Interrupt Router Unmask interrupts for a peripheral. */
#define QM_IR_UNMASK_LMT_INTERRUPTS(_peripheral_) \
(_peripheral_ &= ~(QM_IR_INT_LMT_MASK))
/* Mask interrupts for a peripheral. */
#define QM_IR_MASK_LMT_INTERRUPTS(_peripheral_) \
(_peripheral_ |= QM_IR_INT_LMT_MASK)
/* Unmask halt for a peripheral. */
#define QM_IR_UNMASK_LMT_HALTS(_peripheral_) \
(_peripheral_ &= ~(QM_IR_INT_LMT_HALT_MASK))
/* Mask halt for a peripheral. */
#define QM_IR_MASK_LMT_HALTS(_peripheral_) \
(_peripheral_ |= QM_IR_INT_LMT_HALT_MASK)
#define QM_IR_GET_LMT_MASK(_peripheral_) (_peripheral_ & QM_IR_INT_LMT_MASK)
#define QM_IR_GET_LMT_HALT_MASK(_peripheral_) \
(_peripheral_ & QM_IR_INT_LMT_HALT_MASK)
/* Define macros for use by the active core. */
#if (QM_LAKEMONT)
#define QM_IR_UNMASK_INTERRUPTS(_peripheral_) \
QM_IR_UNMASK_LMT_INTERRUPTS(_peripheral_)
#define QM_IR_MASK_INTERRUPTS(_peripheral_) \
QM_IR_MASK_LMT_INTERRUPTS(_peripheral_)
#define QM_IR_UNMASK_HALTS(_peripheral_) QM_IR_UNMASK_LMT_HALTS(_peripheral_)
#define QM_IR_MASK_HALTS(_peripheral_) QM_IR_MASK_LMT_HALTS(_peripheral_)
#define QM_IR_INT_MASK QM_IR_INT_LMT_MASK
#define QM_IR_INT_HALT_MASK QM_IR_INT_LMT_HALT_MASK
#define QM_IR_GET_MASK(_peripheral_) QM_IR_GET_LMT_MASK(_peripheral_)
#define QM_IR_GET_HALT_MASK(_peripheral_) QM_IR_GET_LMT_HALT_MASK(_peripheral_)
#else
#error "No active core selected."
#endif
/** Interrupt register map. */
typedef struct {
QM_RW uint32_t i2c_master_0_int_mask; /**< I2C Master 0, Mask 0. */
QM_R uint32_t reserved[2];
QM_RW uint32_t spi_master_0_int_mask; /**< SPI Master 0, Mask 3. */
QM_R uint32_t reserved1;
QM_RW uint32_t spi_slave_0_int_mask; /**< SPI Slave 0, Mask 5. */
QM_RW uint32_t uart_0_int_mask; /**< UART 0, Mask 6. */
QM_RW uint32_t uart_1_int_mask; /**< UART 1, Mask 7. */
QM_RW uint32_t reserved2;
QM_RW uint32_t gpio_0_int_mask; /**< GPIO 0, Mask 9. */
QM_RW uint32_t timer_0_int_mask; /**< Timer 0, Mask 10. */
QM_R uint32_t reserved3;
QM_RW uint32_t rtc_0_int_mask; /**< RTC 0, Mask 12. */
QM_RW uint32_t wdt_0_int_mask; /**< WDT 0, Mask 13. */
QM_RW uint32_t dma_0_int_0_mask; /**< DMA 0 int 0, Mask 14. */
QM_RW uint32_t dma_0_int_1_mask; /**< DMA 0 int 1, Mask 15. */
QM_RW uint32_t reserved4[8];
/** Comparator 0 Host halt, Mask 24. */
QM_RW uint32_t comparator_0_host_halt_int_mask;
QM_R uint32_t reserved5;
/** Comparator 0 Host, Mask 26. */
QM_RW uint32_t comparator_0_host_int_mask;
QM_RW uint32_t host_bus_error_int_mask; /**< Host bus error, Mask 27. */
QM_RW uint32_t dma_0_error_int_mask; /**< DMA 0 Error, Mask 28. */
QM_RW uint32_t sram_mpr_0_int_mask; /**< SRAM MPR 0, Mask 29. */
QM_RW uint32_t flash_mpr_0_int_mask; /**< Flash MPR 0, Mask 30. */
QM_R uint32_t reserved6;
QM_RW uint32_t aonpt_0_int_mask; /**< AONPT 0, Mask 32. */
QM_RW uint32_t adc_0_pwr_int_mask; /**< ADC 0 PWR, Mask 33. */
QM_RW uint32_t adc_0_cal_int_mask; /**< ADC 0 CAL, Mask 34. */
QM_R uint32_t reserved7;
QM_RW uint32_t lock_int_mask_reg; /**< Interrupt Mask Lock Register. */
} qm_interrupt_router_reg_t;
/* Number of interrupt mask registers (excluding mask lock register). */
#define QM_INTERRUPT_ROUTER_MASK_NUMREG \
((sizeof(qm_interrupt_router_reg_t) / sizeof(uint32_t)) - 1)
/* Default POR interrupt mask (all interrupts masked). */
#define QM_INTERRUPT_ROUTER_MASK_DEFAULT (0xFFFFFFFF)
#if (UNIT_TEST)
qm_interrupt_router_reg_t test_interrupt_router;
#define QM_INTERRUPT_ROUTER \
((qm_interrupt_router_reg_t *)(&test_interrupt_router))
#else
#define QM_INTERRUPT_ROUTER_BASE (0xB0800448)
#define QM_INTERRUPT_ROUTER \
((qm_interrupt_router_reg_t *)QM_INTERRUPT_ROUTER_BASE)
#endif
#define QM_IR_DMA_ERROR_HOST_MASK (0x00000003)
/** @} */
#endif /* __QM_INTERRUPT_ROUTER_REGS_H__ */

View file

@ -0,0 +1,165 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_SOC_INTERRUPTS_H__
#define __QM_SOC_INTERRUPTS_H__
#include "qm_common.h"
/**
* Quark D2000 SoC Interrupts.
*
* @defgroup groupQUARKD2000SEINT SoC Interrupts (D2000)
* @{
*/
/* x86 internal interrupt vectors. */
#define QM_X86_DIVIDE_ERROR_INT (0)
#define QM_X86_DEBUG_EXCEPTION_INT (1)
#define QM_X86_NMI_INTERRUPT_INT (2)
#define QM_X86_BREAKPOINT_INT (3)
#define QM_X86_OVERFLOW_INT (4)
#define QM_X86_BOUND_RANGE_EXCEEDED_INT (5)
#define QM_X86_INVALID_OPCODE_INT (6)
#define QM_X86_DEVICE_NOT_AVAILABLE_INT (7)
#define QM_X86_DOUBLE_FAULT_INT (8)
#define QM_X86_INTEL_RESERVED_09_INT (9)
#define QM_X86_INVALID_TSS_INT (10)
#define QM_X86_SEGMENT_NOT_PRESENT_INT (11)
#define QM_X86_STACK_SEGMENT_FAULT_INT (12)
#define QM_X86_GENERAL_PROTECT_FAULT_INT (13)
#define QM_X86_PAGE_FAULT_INT (14)
#define QM_X86_INTEL_RESERVED_15_INT (15)
#define QM_X86_FLOATING_POINT_ERROR_INT (16)
#define QM_X86_ALIGNMENT_CHECK_INT (17)
#define QM_X86_INTEL_RESERVED_18_INT (18)
#define QM_X86_INTEL_RESERVED_19_INT (19)
#define QM_X86_INTEL_RESERVED_20_INT (20)
#define QM_X86_INTEL_RESERVED_21_INT (21)
#define QM_X86_INTEL_RESERVED_22_INT (22)
#define QM_X86_INTEL_RESERVED_23_INT (23)
#define QM_X86_INTEL_RESERVED_24_INT (24)
#define QM_X86_INTEL_RESERVED_25_INT (25)
#define QM_X86_INTEL_RESERVED_26_INT (26)
#define QM_X86_INTEL_RESERVED_27_INT (27)
#define QM_X86_INTEL_RESERVED_28_INT (28)
#define QM_X86_INTEL_RESERVED_29_INT (29)
#define QM_X86_INTEL_RESERVED_30_INT (30)
#define QM_X86_INTEL_RESERVED_31_INT (31)
/* IRQs and interrupt vectors.
*
* The vector numbers must be defined without arithmetic expressions nor
* parentheses because they are expanded as token concatenation.
*/
#define QM_IRQ_DMA_0_ERROR_INT 0
#define QM_IRQ_DMA_0_ERROR_INT_MASK_OFFSET 28
#define QM_IRQ_DMA_0_ERROR_INT_VECTOR 32
#define QM_IRQ_HOST_BUS_ERROR_INT 1
#define QM_IRQ_HOST_BUS_ERROR_INT_MASK_OFFSET 29
#define QM_IRQ_HOST_BUS_ERROR_INT_VECTOR 33
#define QM_IRQ_RTC_0_INT 2
#define QM_IRQ_RTC_0_INT_MASK_OFFSET 12
#define QM_IRQ_RTC_0_INT_VECTOR 34
#define QM_IRQ_AONPT_0_INT 3
#define QM_IRQ_AONPT_0_INT_MASK_OFFSET 32
#define QM_IRQ_AONPT_0_INT_VECTOR 35
#define QM_IRQ_I2C_0_INT 4
#define QM_IRQ_I2C_0_INT_MASK_OFFSET 0
#define QM_IRQ_I2C_0_INT_VECTOR 36
#define QM_IRQ_SPI_SLAVE_0_INT 5
#define QM_IRQ_SPI_SLAVE_0_INT_MASK_OFFSET 3
#define QM_IRQ_SPI_SLAVE_0_INT_VECTOR 37
#define QM_IRQ_UART_1_INT 6
#define QM_IRQ_UART_1_INT_MASK_OFFSET 7
#define QM_IRQ_UART_1_INT_VECTOR 38
#define QM_IRQ_SPI_MASTER_0_INT 7
#define QM_IRQ_SPI_MASTER_0_INT_MASK_OFFSET 3
#define QM_IRQ_SPI_MASTER_0_INT_VECTOR 39
#define QM_IRQ_UART_0_INT 8
#define QM_IRQ_UART_0_INT_MASK_OFFSET 6
#define QM_IRQ_UART_0_INT_VECTOR 40
#define QM_IRQ_ADC_0_CAL_INT 9
#define QM_IRQ_ADC_0_CAL_INT_MASK_OFFSET 34
#define QM_IRQ_ADC_0_CAL_INT_VECTOR 41
#define QM_IRQ_PIC_TIMER 10
/* No SCSS mask register for PIC timer: point to an unused register */
#define QM_IRQ_PIC_TIMER_MASK_OFFSET 1
#define QM_IRQ_PIC_TIMER_VECTOR 42
#define QM_IRQ_PWM_0_INT 11
#define QM_IRQ_PWM_0_INT_MASK_OFFSET 10
#define QM_IRQ_PWM_0_INT_VECTOR 43
#define QM_IRQ_DMA_0_INT_1 12
#define QM_IRQ_DMA_0_INT_1_MASK_OFFSET 15
#define QM_IRQ_DMA_0_INT_1_VECTOR 44
#define QM_IRQ_DMA_0_INT_0 13
#define QM_IRQ_DMA_0_INT_0_MASK_OFFSET 14
#define QM_IRQ_DMA_0_INT_0_VECTOR 45
#define QM_IRQ_COMPARATOR_0_INT 14
#define QM_IRQ_COMPARATOR_0_INT_MASK_OFFSET 26
#define QM_IRQ_COMPARATOR_0_INT_VECTOR 46
#define QM_IRQ_GPIO_0_INT 15
#define QM_IRQ_GPIO_0_INT_MASK_OFFSET 9
#define QM_IRQ_GPIO_0_INT_VECTOR 47
#define QM_IRQ_WDT_0_INT 16
#define QM_IRQ_WDT_0_INT_MASK_OFFSET 13
#define QM_IRQ_WDT_0_INT_VECTOR 48
#define QM_IRQ_SRAM_MPR_0_INT 17
#define QM_IRQ_SRAM_MPR_0_INT_MASK_OFFSET 29
#define QM_IRQ_SRAM_MPR_0_INT_VECTOR 49
#define QM_IRQ_FLASH_MPR_0_INT 18
#define QM_IRQ_FLASH_MPR_0_INT_MASK_OFFSET 30
#define QM_IRQ_FLASH_MPR_0_INT_VECTOR 50
#define QM_IRQ_ADC_0_PWR_INT 19
#define QM_IRQ_ADC_0_PWR_INT_MASK_OFFSET 33
#define QM_IRQ_ADC_0_PWR_INT_VECTOR 51
/** @} */
#endif /* __QM_SOC_INTERRUPTS_H__ */

View file

@ -31,6 +31,8 @@
#define __REGISTERS_H__ #define __REGISTERS_H__
#include "qm_common.h" #include "qm_common.h"
#include "qm_soc_interrupts.h"
#include "qm_interrupt_router_regs.h"
/** /**
* Quark D2000 SoC Registers. * Quark D2000 SoC Registers.
@ -218,75 +220,6 @@ qm_scss_cmp_reg_t test_scss_cmp;
/** @} */ /** @} */
/**
* @name Interrupt
* @{
*/
/** Interrupt register map. */
typedef struct {
QM_RW uint32_t int_i2c_mst_0_mask; /**< Interrupt Routing Mask 0. */
QM_RW uint32_t reserved[2]; /* There is a hole in the address space. */
QM_RW uint32_t int_spi_mst_0_mask; /**< Interrupt Routing Mask 2. */
QM_RW uint32_t reserved1;
QM_RW uint32_t int_spi_slv_0_mask; /**< Interrupt Routing Mask 4. */
QM_RW uint32_t int_uart_0_mask; /**< Interrupt Routing Mask 5. */
QM_RW uint32_t int_uart_1_mask; /**< Interrupt Routing Mask 6. */
QM_RW uint32_t reserved2;
QM_RW uint32_t int_gpio_mask; /**< Interrupt Routing Mask 8. */
QM_RW uint32_t int_timer_mask; /**< Interrupt Routing Mask 9. */
QM_RW uint32_t reserved3;
QM_RW uint32_t int_rtc_mask; /**< Interrupt Routing Mask 11. */
QM_RW uint32_t int_watchdog_mask; /**< Interrupt Routing Mask 12. */
QM_RW uint32_t
int_dma_channel_0_mask; /**< Interrupt Routing Mask 13. */
QM_RW uint32_t
int_dma_channel_1_mask; /**< Interrupt Routing Mask 14. */
QM_RW uint32_t reserved4[8];
QM_RW uint32_t
int_comparators_host_halt_mask; /**< Interrupt Routing Mask 23. */
QM_RW uint32_t reserved5;
QM_RW uint32_t
int_comparators_host_mask; /**< Interrupt Routing Mask 25. */
QM_RW uint32_t int_host_bus_err_mask; /**< Interrupt Routing Mask 26. */
QM_RW uint32_t int_dma_error_mask; /**< Interrupt Routing Mask 27. */
QM_RW uint32_t
int_sram_controller_mask; /**< Interrupt Routing Mask 28. */
QM_RW uint32_t
int_flash_controller_0_mask; /**< Interrupt Routing Mask 29. */
QM_RW uint32_t reserved6;
QM_RW uint32_t int_aon_timer_mask; /**< Interrupt Routing Mask 31. */
QM_RW uint32_t int_adc_pwr_mask; /**< Interrupt Routing Mask 32. */
QM_RW uint32_t int_adc_calib_mask; /**< Interrupt Routing Mask 33. */
QM_RW uint32_t reserved7;
QM_RW uint32_t lock_int_mask_reg; /**< Interrupt Mask Lock Register. */
} qm_scss_int_reg_t;
/* Number of SCSS interrupt mask registers (excluding mask lock register). */
#define QM_SCSS_INT_MASK_NUMREG \
((sizeof(qm_scss_int_reg_t) / sizeof(uint32_t)) - 1)
/* Default POR SCSS interrupt mask (all interrupts masked). */
#define QM_SCSS_INT_MASK_DEFAULT (0xFFFFFFFF)
#if (UNIT_TEST)
qm_scss_int_reg_t test_scss_int;
#define QM_SCSS_INT ((qm_scss_int_reg_t *)(&test_scss_int))
#else
#define QM_SCSS_INT_BASE (0xB0800448)
#define QM_SCSS_INT ((qm_scss_int_reg_t *)QM_SCSS_INT_BASE)
#endif
#define QM_INT_TIMER_HOST_HALT_MASK BIT(0)
#define QM_INT_DMA_ERR_HOST_MASK (0x00000003)
#define QM_INT_SRAM_CONTROLLER_HOST_HALT_MASK BIT(16)
#define QM_INT_SRAM_CONTROLLER_HOST_MASK BIT(0)
#define QM_INT_FLASH_CONTROLLER_HOST_HALT_MASK BIT(16)
#define QM_INT_FLASH_CONTROLLER_HOST_MASK BIT(0)
/** @} */
/** /**
* @name Power Management * @name Power Management
* @{ * @{
@ -445,94 +378,6 @@ qm_scss_info_reg_t test_scss_info;
/** @} */ /** @} */
/**
* @name IRQs and Interrupts
* @{
*/
/* IRQs and interrupt vectors.
*
* The vector numbers must be defined without arithmetic expressions nor
* parentheses because they are expanded as token concatenation.
*/
#define QM_INT_VECTOR_DOUBLE_FAULT 8
#define QM_IRQ_RTC_0 2
#define QM_IRQ_RTC_0_MASK_OFFSET 12
#define QM_IRQ_RTC_0_VECTOR 34
#define QM_IRQ_AONPT_0 3
#define QM_IRQ_AONPT_0_MASK_OFFSET 32
#define QM_IRQ_AONPT_0_VECTOR 35
#define QM_IRQ_PWM_0 11
#define QM_IRQ_PWM_0_MASK_OFFSET 10
#define QM_IRQ_PWM_0_VECTOR 43
#define QM_IRQ_SPI_MASTER_0 7
#define QM_IRQ_SPI_MASTER_0_MASK_OFFSET 3
#define QM_IRQ_SPI_MASTER_0_VECTOR 39
#define QM_IRQ_ADC_0 9
#define QM_IRQ_ADC_0_MASK_OFFSET 34
#define QM_IRQ_ADC_0_VECTOR 41
#define QM_IRQ_ADC_PWR_0 19
#define QM_IRQ_ADC_PWR_0_MASK_OFFSET 33
#define QM_IRQ_ADC_PWR_0_VECTOR 51
#define QM_IRQ_WDT_0 16
#define QM_IRQ_WDT_0_MASK_OFFSET 13
#define QM_IRQ_WDT_0_VECTOR 48
#define QM_IRQ_GPIO_0 15
#define QM_IRQ_GPIO_0_MASK_OFFSET 9
#define QM_IRQ_GPIO_0_VECTOR 47
#define QM_IRQ_I2C_0 4
#define QM_IRQ_I2C_0_MASK_OFFSET 0
#define QM_IRQ_I2C_0_VECTOR 36
#define QM_IRQ_PIC_TIMER 10
/* No SCSS mask register for PIC timer: point to an unused register */
#define QM_IRQ_PIC_TIMER_MASK_OFFSET 1
#define QM_IRQ_PIC_TIMER_VECTOR 42
#define QM_IRQ_AC 14
#define QM_IRQ_AC_MASK_OFFSET 26
#define QM_IRQ_AC_VECTOR 46
#define QM_IRQ_SRAM 17
#define QM_IRQ_SRAM_MASK_OFFSET 29
#define QM_IRQ_SRAM_VECTOR 49
#define QM_IRQ_FLASH_0 18
#define QM_IRQ_FLASH_0_MASK_OFFSET 30
#define QM_IRQ_FLASH_0_VECTOR 50
#define QM_IRQ_UART_0 8
#define QM_IRQ_UART_0_MASK_OFFSET 6
#define QM_IRQ_UART_0_VECTOR 40
#define QM_IRQ_UART_1 6
#define QM_IRQ_UART_1_MASK_OFFSET 7
#define QM_IRQ_UART_1_VECTOR 38
#define QM_IRQ_DMA_0 13
#define QM_IRQ_DMA_0_MASK_OFFSET 14
#define QM_IRQ_DMA_0_VECTOR 45
#define QM_IRQ_DMA_1 12
#define QM_IRQ_DMA_1_MASK_OFFSET 15
#define QM_IRQ_DMA_1_VECTOR 44
#define QM_IRQ_DMA_ERR 0
#define QM_IRQ_DMA_ERR_MASK_OFFSET 28
#define QM_IRQ_DMA_ERR_VECTOR 32
/** @} */
/** /**
* @name PWM / Timer * @name PWM / Timer
* @{ * @{
@ -1074,16 +919,20 @@ extern qm_i2c_reg_t *qm_i2c[QM_I2C_NUM];
#define QM_I2C_IC_CON_SPEED_FS_FSP BIT(2) #define QM_I2C_IC_CON_SPEED_FS_FSP BIT(2)
#define QM_I2C_IC_CON_SPEED_MASK (0x06) #define QM_I2C_IC_CON_SPEED_MASK (0x06)
#define QM_I2C_IC_CON_RESTART_EN BIT(5) #define QM_I2C_IC_CON_RESTART_EN BIT(5)
#define QM_I2C_IC_CON_STOP_DET_IFADDRESSED BIT(7)
#define QM_I2C_IC_DATA_CMD_READ BIT(8) #define QM_I2C_IC_DATA_CMD_READ BIT(8)
#define QM_I2C_IC_DATA_CMD_STOP_BIT_CTRL BIT(9) #define QM_I2C_IC_DATA_CMD_STOP_BIT_CTRL BIT(9)
#define QM_I2C_IC_DATA_CMD_LSB_MASK (0x000000FF) #define QM_I2C_IC_DATA_CMD_LSB_MASK (0x000000FF)
#define QM_I2C_IC_RAW_INTR_STAT_RX_FULL BIT(2) #define QM_I2C_IC_RAW_INTR_STAT_RX_FULL BIT(2)
#define QM_I2C_IC_RAW_INTR_STAT_TX_ABRT BIT(6) #define QM_I2C_IC_RAW_INTR_STAT_TX_ABRT BIT(6)
#define QM_I2C_IC_RAW_INTR_STAT_GEN_CALL BIT(11)
#define QM_I2C_IC_RAW_INTR_STAT_RESTART_DETECTED BIT(12)
#define QM_I2C_IC_TX_ABRT_SOURCE_NAK_MASK (0x1F) #define QM_I2C_IC_TX_ABRT_SOURCE_NAK_MASK (0x1F)
#define QM_I2C_IC_TX_ABRT_SOURCE_ARB_LOST BIT(12) #define QM_I2C_IC_TX_ABRT_SOURCE_ARB_LOST BIT(12)
#define QM_I2C_IC_TX_ABRT_SOURCE_ABRT_SBYTE_NORSTRT BIT(9) #define QM_I2C_IC_TX_ABRT_SOURCE_ABRT_SBYTE_NORSTRT BIT(9)
#define QM_I2C_IC_TX_ABRT_SOURCE_ALL_MASK (0x1FFFF) #define QM_I2C_IC_TX_ABRT_SOURCE_ALL_MASK (0x1FFFF)
#define QM_I2C_IC_STATUS_BUSY_MASK (0x00000060) #define QM_I2C_IC_STATUS_BUSY_MASK (0x00000060)
#define QM_I2C_IC_STATUS_RFF BIT(4)
#define QM_I2C_IC_STATUS_RFNE BIT(3) #define QM_I2C_IC_STATUS_RFNE BIT(3)
#define QM_I2C_IC_STATUS_TFE BIT(2) #define QM_I2C_IC_STATUS_TFE BIT(2)
#define QM_I2C_IC_STATUS_TNF BIT(1) #define QM_I2C_IC_STATUS_TNF BIT(1)
@ -1093,15 +942,25 @@ extern qm_i2c_reg_t *qm_i2c[QM_I2C_NUM];
#define QM_I2C_IC_INTR_MASK_RX_FULL BIT(2) #define QM_I2C_IC_INTR_MASK_RX_FULL BIT(2)
#define QM_I2C_IC_INTR_MASK_TX_OVER BIT(3) #define QM_I2C_IC_INTR_MASK_TX_OVER BIT(3)
#define QM_I2C_IC_INTR_MASK_TX_EMPTY BIT(4) #define QM_I2C_IC_INTR_MASK_TX_EMPTY BIT(4)
#define QM_I2C_IC_INTR_MASK_RD_REQ BIT(5)
#define QM_I2C_IC_INTR_MASK_TX_ABORT BIT(6) #define QM_I2C_IC_INTR_MASK_TX_ABORT BIT(6)
#define QM_I2C_IC_INTR_MASK_RX_DONE BIT(7)
#define QM_I2C_IC_INTR_MASK_ACTIVITY BIT(8)
#define QM_I2C_IC_INTR_MASK_STOP_DETECTED BIT(9) #define QM_I2C_IC_INTR_MASK_STOP_DETECTED BIT(9)
#define QM_I2C_IC_INTR_MASK_START_DETECTED BIT(10) #define QM_I2C_IC_INTR_MASK_START_DETECTED BIT(10)
#define QM_I2C_IC_INTR_MASK_GEN_CALL_DETECTED BIT(11)
#define QM_I2C_IC_INTR_MASK_RESTART_DETECTED BIT(12)
#define QM_I2C_IC_INTR_STAT_RX_UNDER BIT(0) #define QM_I2C_IC_INTR_STAT_RX_UNDER BIT(0)
#define QM_I2C_IC_INTR_STAT_RX_OVER BIT(1) #define QM_I2C_IC_INTR_STAT_RX_OVER BIT(1)
#define QM_I2C_IC_INTR_STAT_RX_FULL BIT(2) #define QM_I2C_IC_INTR_STAT_RX_FULL BIT(2)
#define QM_I2C_IC_INTR_STAT_TX_OVER BIT(3) #define QM_I2C_IC_INTR_STAT_TX_OVER BIT(3)
#define QM_I2C_IC_INTR_STAT_TX_EMPTY BIT(4) #define QM_I2C_IC_INTR_STAT_TX_EMPTY BIT(4)
#define QM_I2C_IC_INTR_STAT_RD_REQ BIT(5)
#define QM_I2C_IC_INTR_STAT_TX_ABRT BIT(6) #define QM_I2C_IC_INTR_STAT_TX_ABRT BIT(6)
#define QM_I2C_IC_INTR_STAT_RX_DONE BIT(7)
#define QM_I2C_IC_INTR_STAT_STOP_DETECTED BIT(9)
#define QM_I2C_IC_INTR_STAT_START_DETECTED BIT(10)
#define QM_I2C_IC_INTR_STAT_GEN_CALL_DETECTED BIT(11)
#define QM_I2C_IC_LCNT_MAX (65525) #define QM_I2C_IC_LCNT_MAX (65525)
#define QM_I2C_IC_LCNT_MIN (8) #define QM_I2C_IC_LCNT_MIN (8)
#define QM_I2C_IC_HCNT_MAX (65525) #define QM_I2C_IC_HCNT_MAX (65525)
@ -1346,11 +1205,38 @@ extern qm_flash_reg_t *qm_flash[QM_FLASH_NUM];
/** @} */ /** @} */
/**
* @name Flash Protection Region
* @{
*/
/**
* FPR register map.
*/
typedef enum {
QM_FPR_0, /**< FPR 0. */
QM_FPR_1, /**< FPR 1. */
QM_FPR_2, /**< FPR 2. */
QM_FPR_3, /**< FPR 3. */
QM_FPR_NUM
} qm_fpr_id_t;
/** @} */
/** /**
* @name Memory Protection Region * @name Memory Protection Region
* @{ * @{
*/ */
/* MPR identifier */
typedef enum {
QM_MPR_0 = 0, /**< Memory Protection Region 0. */
QM_MPR_1, /**< Memory Protection Region 1. */
QM_MPR_2, /**< Memory Protection Region 2. */
QM_MPR_3, /**< Memory Protection Region 3. */
QM_MPR_NUM /**< Number of Memory Protection Regions. */
} qm_mpr_id_t;
/** Memory Protection Region register map. */ /** Memory Protection Region register map. */
typedef struct { typedef struct {
QM_RW uint32_t mpr_cfg[4]; /**< MPR CFG */ QM_RW uint32_t mpr_cfg[4]; /**< MPR CFG */
@ -1714,6 +1600,16 @@ extern qm_dma_reg_t *qm_dma[QM_DMA_NUM];
/** @} */ /** @} */
/**
* @name Hardware Fixes
* @{
*/
/* Refer to "HARDWARE_ISSUES.rst" for fix description. */
#define FIX_1 (1)
/** @} */
/** /**
* @name Versioning * @name Versioning
* @{ * @{

View file

@ -37,19 +37,9 @@
void power_soc_sleep() void power_soc_sleep()
{ {
#if (QM_SENSOR)
/* The sensor cannot be woken up with an edge triggered
* interrupt from the RTC.
* Switch to Level triggered interrupts.
* When waking up, the ROM will configure the RTC back to
* its initial settings.
*/
__builtin_arc_sr(QM_IRQ_RTC_0_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
#endif
/* Go to sleep */ /* Go to sleep */
QM_SCSS_PMU->slp_cfg &= ~QM_SCSS_SLP_CFG_LPMODE_EN; QM_SCSS_PMU->slp_cfg &= ~QM_SCSS_SLP_CFG_LPMODE_EN;
SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER, SOCW_REG_SLP_CFG); SOC_WATCH_LOG_EVENT(SOCW_EVENT_REGISTER, SOCW_REG_SLP_CFG);
SOC_WATCH_LOG_EVENT(SOCW_EVENT_SLEEP, 0); SOC_WATCH_LOG_EVENT(SOCW_EVENT_SLEEP, 0);
QM_SCSS_PMU->pm1c |= QM_SCSS_PM1C_SLPEN; QM_SCSS_PMU->pm1c |= QM_SCSS_PM1C_SLPEN;
@ -57,17 +47,6 @@ void power_soc_sleep()
void power_soc_deep_sleep() void power_soc_deep_sleep()
{ {
#if (QM_SENSOR)
/* The sensor cannot be woken up with an edge triggered
* interrupt from the RTC.
* Switch to Level triggered interrupts.
* When waking up, the ROM will configure the RTC back to
* its initial settings.
*/
__builtin_arc_sr(QM_IRQ_RTC_0_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
#endif
/* Switch to linear regulators. /* Switch to linear regulators.
* For low power deep sleep mode, it is a requirement that the platform * For low power deep sleep mode, it is a requirement that the platform
* voltage regulators are not in switching mode. * voltage regulators are not in switching mode.
@ -82,6 +61,111 @@ void power_soc_deep_sleep()
QM_SCSS_PMU->pm1c |= QM_SCSS_PM1C_SLPEN; QM_SCSS_PMU->pm1c |= QM_SCSS_PM1C_SLPEN;
} }
#if (ENABLE_RESTORE_CONTEXT) && (!QM_SENSOR)
/*
* The restore trap address is stored in the variable __x86_restore_info.
* The variable __x86_restore_info is defined in the linker script as a new
* and independent memory segment.
*/
extern uint32_t __x86_restore_info[];
/*
* The stack pointer is saved in the global variable sp_restore_storage
* by qm_x86_save_context() before sleep and it is restored by
* qm_x86_restore_context() after wake up.
*/
uint32_t sp_restore_storage;
void power_soc_sleep_restore()
{
/*
* Save x86 restore trap address.
* The first parameter in this macro represents the label defined in
* the qm_x86_restore_context() macro, which is actually the restore
* trap address.
*/
qm_x86_set_resume_vector(sleep_restore_trap, __x86_restore_info);
/* Save x86 execution context. */
qm_x86_save_context(sp_restore_storage);
/* Set restore flags. */
power_soc_set_x86_restore_flag();
/* Enter sleep. */
power_soc_sleep();
/*
* Restore x86 execution context.
* The bootloader code will jump to this location after waking up from
* sleep. The restore trap address is the label defined in the macro.
* That label is exposed here through the first parameter.
*/
qm_x86_restore_context(sleep_restore_trap, sp_restore_storage);
}
void power_soc_deep_sleep_restore()
{
/*
* Save x86 restore trap address.
* The first parameter in this macro represents the label defined in
* the qm_x86_restore_context() macro, which is actually the restore
* trap address.
*/
qm_x86_set_resume_vector(deep_sleep_restore_trap, __x86_restore_info);
/* Save x86 execution context. */
qm_x86_save_context(sp_restore_storage);
/* Set restore flags. */
power_soc_set_x86_restore_flag();
/* Enter sleep. */
power_soc_deep_sleep();
/*
* Restore x86 execution context.
* The bootloader code will jump to this location after waking up from
* sleep. The restore trap address is the label defined in the macro.
* That label is exposed here through the first parameter.
*/
qm_x86_restore_context(deep_sleep_restore_trap, sp_restore_storage);
}
void power_sleep_wait()
{
/*
* Save x86 restore trap address.
* The first parameter in this macro represents the label defined in
* the qm_x86_restore_context() macro, which is actually the restore
* trap address.
*/
qm_x86_set_resume_vector(sleep_restore_trap, __x86_restore_info);
/* Save x86 execution context. */
qm_x86_save_context(sp_restore_storage);
/* Set restore flags. */
power_soc_set_x86_restore_flag();
/* Enter C2 and stay in it until sleep and wake-up. */
while (1) {
power_cpu_c2();
}
/*
* Restore x86 execution context.
* The bootloader code will jump to this location after waking up from
* sleep. The restore trap address is the label defined in the macro.
* That label is exposed here through the first parameter.
*/
qm_x86_restore_context(sleep_restore_trap, sp_restore_storage);
}
void power_soc_set_x86_restore_flag(void)
{
QM_SCSS_GP->gps0 |= BIT(QM_GPS0_BIT_X86_WAKEUP);
}
#endif /* ENABLE_RESTORE_CONTEXT */
#if (!QM_SENSOR) #if (!QM_SENSOR)
void power_cpu_c1() void power_cpu_c1()
{ {

View file

@ -27,6 +27,7 @@
* POSSIBILITY OF SUCH DAMAGE. * POSSIBILITY OF SUCH DAMAGE.
*/ */
#include "power_states.h"
#include "ss_power_states.h" #include "ss_power_states.h"
#include "qm_isr.h" #include "qm_isr.h"
#include "qm_sensor_regs.h" #include "qm_sensor_regs.h"
@ -111,10 +112,10 @@ void ss_power_cpu_ss1(const ss_power_cpu_ss1_mode_t mode)
* Switch to Level triggered interrupts and restore * Switch to Level triggered interrupts and restore
* the setting when waking up. * the setting when waking up.
*/ */
__builtin_arc_sr(QM_IRQ_RTC_0_VECTOR, QM_SS_AUX_IRQ_SELECT); __builtin_arc_sr(QM_IRQ_RTC_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER); __builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
__builtin_arc_sr(QM_IRQ_AONPT_0_VECTOR, QM_SS_AUX_IRQ_SELECT); __builtin_arc_sr(QM_IRQ_AONPT_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER); __builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
/* Enter SS1 */ /* Enter SS1 */
@ -134,10 +135,10 @@ void ss_power_cpu_ss1(const ss_power_cpu_ss1_mode_t mode)
} }
/* Restore the RTC and AONC to edge interrupt after when waking up. */ /* Restore the RTC and AONC to edge interrupt after when waking up. */
__builtin_arc_sr(QM_IRQ_RTC_0_VECTOR, QM_SS_AUX_IRQ_SELECT); __builtin_arc_sr(QM_IRQ_RTC_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER); __builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
__builtin_arc_sr(QM_IRQ_AONPT_0_VECTOR, QM_SS_AUX_IRQ_SELECT); __builtin_arc_sr(QM_IRQ_AONPT_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER); __builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
} }
@ -152,10 +153,10 @@ void ss_power_cpu_ss2(void)
* Switch to Level triggered interrupts and restore * Switch to Level triggered interrupts and restore
* the setting when waking up. * the setting when waking up.
*/ */
__builtin_arc_sr(QM_IRQ_RTC_0_VECTOR, QM_SS_AUX_IRQ_SELECT); __builtin_arc_sr(QM_IRQ_RTC_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER); __builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
__builtin_arc_sr(QM_IRQ_AONPT_0_VECTOR, QM_SS_AUX_IRQ_SELECT); __builtin_arc_sr(QM_IRQ_AONPT_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER); __builtin_arc_sr(QM_SS_IRQ_LEVEL_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
/* Enter SS2 */ /* Enter SS2 */
@ -164,9 +165,104 @@ void ss_power_cpu_ss2(void)
: "i"(QM_SS_SLEEP_MODE_CORE_TIMERS_RTC_OFF)); : "i"(QM_SS_SLEEP_MODE_CORE_TIMERS_RTC_OFF));
/* Restore the RTC and AONC to edge interrupt after when waking up. */ /* Restore the RTC and AONC to edge interrupt after when waking up. */
__builtin_arc_sr(QM_IRQ_RTC_0_VECTOR, QM_SS_AUX_IRQ_SELECT); __builtin_arc_sr(QM_IRQ_RTC_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER); __builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
__builtin_arc_sr(QM_IRQ_AONPT_0_VECTOR, QM_SS_AUX_IRQ_SELECT); __builtin_arc_sr(QM_IRQ_AONPT_0_INT_VECTOR, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER); __builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE, QM_SS_AUX_IRQ_TRIGGER);
} }
#if (ENABLE_RESTORE_CONTEXT)
extern uint32_t arc_restore_addr;
uint32_t cpu_context[32];
void ss_power_soc_sleep_restore(void)
{
/*
* Save sensor restore trap address.
* The first parameter in this macro represents the label defined in
* the qm_ss_restore_context() macro, which is actually the restore
* trap address.
*/
qm_ss_set_resume_vector(sleep_restore_trap, arc_restore_addr);
/* Save ARC execution context. */
qm_ss_save_context(cpu_context);
/* Set restore flags. */
power_soc_set_ss_restore_flag();
/* Enter sleep. */
power_soc_sleep();
/*
* Restore sensor execution context.
* The sensor startup code will jump to this location after waking up
* from sleep. The restore trap address is the label defined in the
* macro and the label is exposed here through the first parameter.
*/
qm_ss_restore_context(sleep_restore_trap, cpu_context);
}
void ss_power_soc_deep_sleep_restore(void)
{
/*
* Save sensor restore trap address.
* The first parameter in this macro represents the label defined in
* the qm_ss_restore_context() macro, which is actually the restore
* trap address.
*/
qm_ss_set_resume_vector(deep_sleep_restore_trap, arc_restore_addr);
/* Save ARC execution context. */
qm_ss_save_context(cpu_context);
/* Set restore flags. */
power_soc_set_ss_restore_flag();
/* Enter sleep. */
power_soc_deep_sleep();
/*
* Restore sensor execution context.
* The sensor startup code will jump to this location after waking up
* from sleep. The restore trap address is the label defined in the
* macro and the label is exposed here through the first parameter.
*/
qm_ss_restore_context(deep_sleep_restore_trap, cpu_context);
}
void ss_power_sleep_wait(void)
{
/*
* Save sensor restore trap address.
* The first parameter in this macro represents the label defined in
* the qm_ss_restore_context() macro, which is actually the restore
* trap address.
*/
qm_ss_set_resume_vector(sleep_restore_trap, arc_restore_addr);
/* Save ARC execution context. */
qm_ss_save_context(cpu_context);
/* Set restore flags. */
power_soc_set_ss_restore_flag();
/* Enter SS1 and stay in it until sleep and wake-up. */
while (1) {
ss_power_cpu_ss1(SS_POWER_CPU_SS1_TIMER_ON);
}
/*
* Restore sensor execution context.
* The sensor startup code will jump to this location after waking up
* from sleep. The restore trap address is the label defined in the
* macro and the label is exposed here through the first parameter.
*/
qm_ss_restore_context(sleep_restore_trap, cpu_context);
}
void power_soc_set_ss_restore_flag(void)
{
QM_SCSS_GP->gps0 |= BIT(QM_GPS0_BIT_SENSOR_WAKEUP);
}
#endif /* ENABLE_RESTORE_CONTEXT */

View file

@ -85,6 +85,59 @@ void power_soc_sleep(void);
*/ */
void power_soc_deep_sleep(void); void power_soc_deep_sleep(void);
#if (ENABLE_RESTORE_CONTEXT) && (!QM_SENSOR)
/**
* Enter SoC sleep state and restore after wake up.
*
* Put the SoC into sleep state until next SoC wake event
* and continue execution after wake up where the application stopped.
*
* If the library is built with ENABLE_RESTORE_CONTEXT=1, then this function
* will use the common RAM __x86_restore_info[0] to save the necessary context
* to bring back the CPU to the point where this function was called.
* This means that applications should refrain from using them.
*
* This function calls qm_x86_save_context and qm_x86_restore_context
* in order to restore execution where it stopped.
* All power management transitions are done by power_soc_sleep().
*/
void power_soc_sleep_restore(void);
/**
* Enter SoC deep sleep state and restore after wake up.
*
* Put the SoC into deep sleep state until next SoC wake event
* and continue execution after wake up where the application stopped.
*
* If the library is built with ENABLE_RESTORE_CONTEXT=1, then this function
* will use the common RAM __x86_restore_info[0] to save the necessary context
* to bring back the CPU to the point where this function was called.
* This means that applications should refrain from using them.
*
* This function calls qm_x86_save_context and qm_x86_restore_context
* in order to restore execution where it stopped.
* All power management transitions are done by power_soc_deep_sleep().
*/
void power_soc_deep_sleep_restore(void);
/**
* Save context, enter x86 C2 power save state and restore after wake up.
*
* This routine is same as power_soc_sleep_restore(), just instead of
* going to sleep it will go to C2 power save state.
* Note: this function has a while(1) which will spin until we enter
* (and exit) sleep while the power state change will be managed by
* the other core.
*/
void power_sleep_wait(void);
/**
* Enable the x86 startup restore flag, see GPS0 #define in qm_soc_regs.h
*/
void power_soc_set_x86_restore_flag(void);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* @} * @}
*/ */
@ -157,6 +210,68 @@ void power_cpu_c2(void);
void power_cpu_c2lp(void); void power_cpu_c2lp(void);
#endif #endif
#if (ENABLE_RESTORE_CONTEXT) && (!QM_SENSOR) && (!UNIT_TEST)
/**
* Save resume vector.
*
* Saves the resume vector in the common RAM __x86_restore_info[0] location.
* The bootloader will jump to the resume vector once a wake up event
* is triggered.
*/
#define qm_x86_set_resume_vector(_restore_label, shared_mem) \
__asm__ __volatile__("movl $" #_restore_label ", %[trap]\n\t" \
: /* Output operands. */ \
: /* Input operands. */ \
[trap] "m"(shared_mem) \
: /* Clobbered registers list. */ \
)
/* Save execution context.
*
* This routine saves 'idtr', EFLAGS and general purpose registers onto the
* stack.
*
* The bootloader will set the 'gdt' before calling into the 'restore_trap'
* function, so we don't need to save it here.
*/
#define qm_x86_save_context(stack_pointer) \
__asm__ __volatile__("sub $8, %%esp\n\t" \
"sidt (%%esp)\n\t" \
"lea %[stackpointer], %%eax\n\t" \
"pushfl\n\t" \
"pushal\n\t" \
"movl %%esp, (%%eax)\n\t" \
: /* Output operands. */ \
: /* Input operands. */ \
[stackpointer] "m"(stack_pointer) \
: /* Clobbered registers list. */ \
"eax")
/* Restore trap. This routine recovers the stack pointer into esp and retrieves
* 'idtr', EFLAGS and general purpose registers from stack.
*
* This routine is called from the bootloader to restore the execution context
* from before entering in sleep mode.
*/
#define qm_x86_restore_context(_restore_label, stack_pointer) \
__asm__ __volatile__(#_restore_label ":\n\t" \
"lea %[stackpointer], %%eax\n\t" \
"movl (%%eax), %%esp\n\t" \
"popal\n\t" \
"popfl\n\t" \
"lidt (%%esp)\n\t" \
"add $8, %%esp\n\t" \
: /* Output operands. */ \
: /* Input operands. */ \
[stackpointer] "m"(stack_pointer) \
: /* Clobbered registers list. */ \
"eax")
#else
#define qm_x86_set_resume_vector(_restore_label, shared_mem)
#define qm_x86_save_context(stack_pointer)
#define qm_x86_restore_context(_restore_label, stack_pointer)
#endif
/** /**
* @} * @}
*/ */

View file

@ -0,0 +1,207 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_INTERRUPT_ROUTER_REGS_H__
#define __QM_INTERRUPT_ROUTER_REGS_H__
/**
* Quark SE SoC Event Router registers.
*
* @defgroup groupQUARKSESEEVENTROUTER SoC Event Router (SE)
* @{
*/
/**
* Masks for single source interrupts in the Event Router.
* To enable: reg &= ~(MASK)
* To disable: reg |= MASK;
*/
#define QM_IR_INT_LMT_MASK BIT(0)
#define QM_IR_INT_SS_MASK BIT(8)
/* Masks for single source halts in the Event Router. */
#define QM_IR_INT_LMT_HALT_MASK BIT(16)
#define QM_IR_INT_SS_HALT_MASK BIT(24)
/* Event Router Unmask interrupts for a peripheral. */
#define QM_IR_UNMASK_LMT_INTERRUPTS(_peripheral_) \
(_peripheral_ &= ~(QM_IR_INT_LMT_MASK))
#define QM_IR_UNMASK_SS_INTERRUPTS(_peripheral_) \
(_peripheral_ &= ~(QM_IR_INT_SS_MASK))
/* Mask interrupts for a peripheral. */
#define QM_IR_MASK_LMT_INTERRUPTS(_peripheral_) \
(_peripheral_ |= QM_IR_INT_LMT_MASK)
#define QM_IR_MASK_SS_INTERRUPTS(_peripheral_) \
(_peripheral_ |= QM_IR_INT_SS_MASK)
/* Unmask halt for a peripheral. */
#define QM_IR_UNMASK_LMT_HALTS(_peripheral_) \
(_peripheral_ &= ~(QM_IR_INT_LMT_HALT_MASK))
#define QM_IR_UNMASK_SS_HALTS(_peripheral_) \
(_peripheral_ &= ~(QM_IR_INT_SS_HALT_MASK))
/* Mask halt for a peripheral. */
#define QM_IR_MASK_LMT_HALTS(_peripheral_) \
(_peripheral_ |= QM_IR_INT_LMT_HALT_MASK)
#define QM_IR_MASK_SS_HALTS(_peripheral_) \
(_peripheral_ |= QM_IR_INT_SS_HALT_MASK)
#define QM_IR_GET_LMT_MASK(_peripheral_) (_peripheral_ & QM_IR_INT_LMT_MASK)
#define QM_IR_GET_LMT_HALT_MASK(_peripheral_) \
(_peripheral_ & QM_IR_INT_LMT_HALT_MASK)
#define QM_IR_GET_SS_MASK(_peripheral_) (_peripheral_ & QM_IR_INT_SS_MASK)
#define QM_IR_GET_SS_HALT_MASK(_peripheral_) \
(_peripheral_ & QM_IR_INT_SS_HALT_MASK)
/* Define macros for use by the active core. */
#if (QM_LAKEMONT)
#define QM_IR_UNMASK_INTERRUPTS(_peripheral_) \
QM_IR_UNMASK_LMT_INTERRUPTS(_peripheral_)
#define QM_IR_MASK_INTERRUPTS(_peripheral_) \
QM_IR_MASK_LMT_INTERRUPTS(_peripheral_)
#define QM_IR_UNMASK_HALTS(_peripheral_) QM_IR_UNMASK_LMT_HALTS(_peripheral_)
#define QM_IR_MASK_HALTS(_peripheral_) QM_IR_MASK_LMT_HALTS(_peripheral_)
#define QM_IR_INT_MASK QM_IR_INT_LMT_MASK
#define QM_IR_INT_HALT_MASK QM_IR_INT_LMT_HALT_MASK
#define QM_IR_GET_MASK(_peripheral_) QM_IR_GET_LMT_MASK(_peripheral_)
#define QM_IR_GET_HALT_MASK(_peripheral_) QM_IR_GET_LMT_HALT_MASK(_peripheral_)
#elif(QM_SENSOR)
#define QM_IR_UNMASK_INTERRUPTS(_peripheral_) \
QM_IR_UNMASK_SS_INTERRUPTS(_peripheral_)
#define QM_IR_MASK_INTERRUPTS(_peripheral_) \
QM_IR_MASK_SS_INTERRUPTS(_peripheral_)
#define QM_IR_UNMASK_HALTS(_peripheral_) QM_IR_UNMASK_SS_HALTS(_peripheral_)
#define QM_IR_MASK_HALTS(_peripheral_) QM_IR_MASK_SS_HALTS(_peripheral_)
#define QM_IR_INT_MASK QM_IR_INT_SS_MASK
#define QM_IR_INT_HALT_MASK QM_IR_INT_SS_HALT_MASK
#define QM_IR_GET_MASK(_peripheral_) QM_IR_GET_SS_MASK(_peripheral_)
#define QM_IR_GET_HALT_MASK(_peripheral_) QM_IR_GET_SS_HALT_MASK(_peripheral_)
#else
#error "No active core selected."
#endif
/** SS I2C Interrupt register map. */
typedef struct {
QM_RW uint32_t err_mask;
QM_RW uint32_t rx_avail_mask;
QM_RW uint32_t tx_req_mask;
QM_RW uint32_t stop_det_mask;
} int_ss_i2c_reg_t;
/** SS SPI Interrupt register map. */
typedef struct {
QM_RW uint32_t err_int_mask;
QM_RW uint32_t rx_avail_mask;
QM_RW uint32_t tx_req_mask;
} int_ss_spi_reg_t;
/** Interrupt register map. */
typedef struct {
QM_RW uint32_t ss_adc_0_error_int_mask; /**< Sensor ADC 0 Error. */
QM_RW uint32_t ss_adc_0_int_mask; /**< Sensor ADC 0. */
QM_RW uint32_t ss_gpio_0_int_mask; /**< Sensor GPIO 0. */
QM_RW uint32_t ss_gpio_1_int_mask; /**< Sensor GPIO 1. */
int_ss_i2c_reg_t ss_i2c_0_int; /**< Sensor I2C 0 Masks. */
int_ss_i2c_reg_t ss_i2c_1_int; /**< Sensor I2C 1 Masks. */
int_ss_spi_reg_t ss_spi_0_int; /**< Sensor SPI 0 Masks. */
int_ss_spi_reg_t ss_spi_1_int; /**< Sensor SPI 1 Masks. */
QM_RW uint32_t i2c_master_0_int_mask; /**< I2C Master 0. */
QM_RW uint32_t i2c_master_1_int_mask; /**< I2C Master 1. */
QM_R uint32_t reserved;
QM_RW uint32_t spi_master_0_int_mask; /**< SPI Master 0. */
QM_RW uint32_t spi_master_1_int_mask; /**< SPI Master 1. */
QM_RW uint32_t spi_slave_0_int_mask; /**< SPI Slave 0. */
QM_RW uint32_t uart_0_int_mask; /**< UART 0. */
QM_RW uint32_t uart_1_int_mask; /**< UART 1. */
QM_RW uint32_t i2s_0_int_mask; /**< I2S 0. */
QM_RW uint32_t gpio_0_int_mask; /**< GPIO 0. */
QM_RW uint32_t pwm_0_int_mask; /**< PWM 0. */
QM_RW uint32_t usb_0_int_mask; /**< USB 0. */
QM_RW uint32_t rtc_0_int_mask; /**< RTC 0. */
QM_RW uint32_t wdt_0_int_mask; /**< WDT 0. */
QM_RW uint32_t dma_0_int_0_mask; /**< DMA 0 Ch 0. */
QM_RW uint32_t dma_0_int_1_mask; /**< DMA 0 Ch 1. */
QM_RW uint32_t dma_0_int_2_mask; /**< DMA 0 Ch 2. */
QM_RW uint32_t dma_0_int_3_mask; /**< DMA 0 Ch 3. */
QM_RW uint32_t dma_0_int_4_mask; /**< DMA 0 Ch 4. */
QM_RW uint32_t dma_0_int_5_mask; /**< DMA 0 Ch 5. */
QM_RW uint32_t dma_0_int_6_mask; /**< DMA 0 Ch 6. */
QM_RW uint32_t dma_0_int_7_mask; /**< DMA 0 Ch 7. */
/** Mailbox 0 Combined 8 Channel Host and Sensor Masks. */
QM_RW uint32_t mailbox_0_int_mask;
/** Comparator Sensor Halt Mask. */
QM_RW uint32_t comparator_0_ss_halt_int_mask;
/** Comparator Host Halt Mask. */
QM_RW uint32_t comparator_0_host_halt_int_mask;
/** Comparator Sensor Mask. */
QM_RW uint32_t comparator_0_ss_int_mask;
/** Comparator Host Mask. */
QM_RW uint32_t comparator_0_host_int_mask;
QM_RW uint32_t host_bus_error_int_mask; /**< Host bus error. */
QM_RW uint32_t dma_0_error_int_mask; /**< DMA 0 Error. */
QM_RW uint32_t sram_mpr_0_int_mask; /**< SRAM MPR 0. */
QM_RW uint32_t flash_mpr_0_int_mask; /**< Flash MPR 0. */
QM_RW uint32_t flash_mpr_1_int_mask; /**< Flash MPR 1. */
QM_RW uint32_t aonpt_0_int_mask; /**< AONPT 0. */
QM_RW uint32_t adc_0_pwr_int_mask; /**< ADC 0 PWR. */
QM_RW uint32_t adc_0_cal_int_mask; /**< ADC 0 CAL. */
QM_RW uint32_t aon_gpio_0_int_mask; /**< AON GPIO 0. */
QM_RW uint32_t lock_int_mask_reg; /**< Interrupt Mask Lock Register. */
} qm_interrupt_router_reg_t;
/* Number of SCSS interrupt mask registers (excluding mask lock register). */
#define QM_INTERRUPT_ROUTER_MASK_NUMREG \
((sizeof(qm_interrupt_router_reg_t) / sizeof(uint32_t)) - 1)
/* Default POR SCSS interrupt mask (all interrupts masked). */
#define QM_INTERRUPT_ROUTER_MASK_DEFAULT (0xFFFFFFFF)
#if (UNIT_TEST)
qm_interrupt_router_reg_t test_interrupt_router;
#define QM_INTERRUPT_ROUTER \
((qm_interrupt_router_reg_t *)(&test_interrupt_router))
#else
/* System control subsystem interrupt masking register block. */
#define QM_INTERRUPT_ROUTER_BASE (0xB0800400)
#define QM_INTERRUPT_ROUTER \
((qm_interrupt_router_reg_t *)QM_INTERRUPT_ROUTER_BASE)
#endif
#define QM_IR_DMA_ERROR_HOST_MASK (0x000000FF)
#define QM_IR_DMA_ERROR_SS_MASK (0x0000FF00)
/** @} */
#endif /* __QM_INTERRUPT_ROUTER_REGS_H__ */

View file

@ -31,6 +31,7 @@
#define __SENSOR_REGISTERS_H__ #define __SENSOR_REGISTERS_H__
#include "qm_common.h" #include "qm_common.h"
#include "qm_soc_interrupts.h"
/** /**
* Quark SE SoC Sensor Subsystem Registers. * Quark SE SoC Sensor Subsystem Registers.
@ -89,6 +90,10 @@ uint32_t test_sensor_aux[QM_SS_AUX_REGS_SIZE];
/* Sensor Subsystem status32 register. */ /* Sensor Subsystem status32 register. */
#define QM_SS_AUX_STATUS32 (0xA) #define QM_SS_AUX_STATUS32 (0xA)
/** Interrupt priority threshold. */
#define QM_SS_STATUS32_E_MASK (0x1E)
/** Interrupt enable. */
#define QM_SS_STATUS32_IE_MASK BIT(31)
/* Sensor Subsystem control register. */ /* Sensor Subsystem control register. */
#define QM_SS_AUX_IC_CTRL (0x11) #define QM_SS_AUX_IC_CTRL (0x11)
/* Sensor Subsystem cache invalidate register. */ /* Sensor Subsystem cache invalidate register. */
@ -96,6 +101,34 @@ uint32_t test_sensor_aux[QM_SS_AUX_REGS_SIZE];
/* Sensor Subsystem vector base register. */ /* Sensor Subsystem vector base register. */
#define QM_SS_AUX_INT_VECTOR_BASE (0x25) #define QM_SS_AUX_INT_VECTOR_BASE (0x25)
/**
* @name SS Interrupt
* @{
*/
/**
* SS IRQ context type.
*
* Applications should not modify the content.
* This structure is only intended to be used by
* qm_irq_save_context and qm_irq_restore_context functions.
*/
typedef struct {
uint32_t status32_irq_threshold; /**< STATUS32 Interrupt Threshold. */
uint32_t status32_irq_enable; /**< STATUS32 Interrupt Enable. */
uint32_t irq_ctrl; /**< Interrupt Context Saving Control Register. */
/**
* IRQ configuration:
* - IRQ Priority:BIT(6):BIT(2)
* - IRQ Trigger:BIT(1)
* - IRQ Enable:BIT(0)
*/
uint8_t irq_config[QM_SS_INT_VECTOR_NUM - 1];
} qm_irq_context_t;
/** @} */
/** /**
* @name SS Timer * @name SS Timer
* @{ * @{
@ -112,6 +145,19 @@ typedef enum {
*/ */
typedef enum { QM_SS_TIMER_0 = 0, QM_SS_TIMER_NUM } qm_ss_timer_t; typedef enum { QM_SS_TIMER_0 = 0, QM_SS_TIMER_NUM } qm_ss_timer_t;
/*
* SS TIMER context type.
*
* Application should not modify the content.
* This structure is only intended to be used by the qm_ss_timer_save_context
* and qm_ss_timer_restore_context functions.
*/
typedef struct {
uint32_t timer_count; /**< Timer count. */
uint32_t timer_control; /**< Timer control. */
uint32_t timer_limit; /**< Timer limit. */
} qm_ss_timer_context_t;
#define QM_SS_TIMER_0_BASE (0x21) #define QM_SS_TIMER_0_BASE (0x21)
#define QM_SS_TIMER_1_BASE (0x100) #define QM_SS_TIMER_1_BASE (0x100)
#define QM_SS_TSC_BASE QM_SS_TIMER_1_BASE #define QM_SS_TSC_BASE QM_SS_TIMER_1_BASE
@ -144,6 +190,24 @@ typedef enum {
QM_SS_GPIO_LS_SYNC QM_SS_GPIO_LS_SYNC
} qm_ss_gpio_reg_t; } qm_ss_gpio_reg_t;
/**
* SS GPIO context type.
*
* Application should not modify the content.
* This structure is only intended to be used by the qm_ss_gpio_save_context and
* qm_ss_gpio_restore_context functions.
*/
typedef struct {
uint32_t gpio_swporta_dr; /**< Port A Data. */
uint32_t gpio_swporta_ddr; /**< Port A Data Direction. */
uint32_t gpio_inten; /**< Interrupt Enable. */
uint32_t gpio_intmask; /**< Interrupt Mask. */
uint32_t gpio_inttype_level; /**< Interrupt Type. */
uint32_t gpio_int_polarity; /**< Interrupt Polarity. */
uint32_t gpio_debounce; /**< Debounce Enable. */
uint32_t gpio_ls_sync; /**< Synchronization Level. */
} qm_ss_gpio_context_t;
#define QM_SS_GPIO_NUM_PINS (16) #define QM_SS_GPIO_NUM_PINS (16)
#define QM_SS_GPIO_LS_SYNC_CLK_EN BIT(31) #define QM_SS_GPIO_LS_SYNC_CLK_EN BIT(31)
#define QM_SS_GPIO_LS_SYNC_SYNC_LVL BIT(0) #define QM_SS_GPIO_LS_SYNC_SYNC_LVL BIT(0)
@ -181,6 +245,19 @@ typedef enum {
QM_SS_I2C_ENABLE_STATUS = 0x11 QM_SS_I2C_ENABLE_STATUS = 0x11
} qm_ss_i2c_reg_t; } qm_ss_i2c_reg_t;
/**
* SS I2C context type.
*
* Application should not modify the content.
* This structure is only intended to be used by the qm_ss_gpio_save_context and
* qm_ss_gpio_restore_context functions.
*/
typedef struct {
uint32_t i2c_con;
uint32_t i2c_ss_scl_cnt;
uint32_t i2c_fs_scl_cnt;
} qm_ss_i2c_context_t;
#define QM_SS_I2C_CON_ENABLE BIT(0) #define QM_SS_I2C_CON_ENABLE BIT(0)
#define QM_SS_I2C_CON_ABORT BIT(1) #define QM_SS_I2C_CON_ABORT BIT(1)
#define QM_SS_I2C_CON_SPEED_SS BIT(3) #define QM_SS_I2C_CON_SPEED_SS BIT(3)
@ -272,6 +349,21 @@ typedef enum {
QM_SS_ADC_NUM QM_SS_ADC_NUM
} qm_ss_adc_t; } qm_ss_adc_t;
/**
* SS ADC context type.
*
* The application should not modify the content of this structure.
*
* This structure is intented to be used by qm_ss_adc_save_context and
* qm_ss_adc_restore_context functions only.
*/
typedef struct {
uint32_t adc_set; /**< ADC settings. */
uint32_t adc_divseqstat; /**< ADC clock divider and sequencer status. */
uint32_t adc_seq; /**< ADC sequencer entry. */
uint32_t adc_ctrl; /**< ADC control. */
} qm_ss_adc_context_t;
/* SS ADC register base. */ /* SS ADC register base. */
#define QM_SS_ADC_BASE (0x80015000) #define QM_SS_ADC_BASE (0x80015000)
@ -366,109 +458,6 @@ typedef enum {
/** @} */ /** @} */
/**
* IRQs and interrupt vectors.
*
* @name SS Interrupt
* @{
*/
#define QM_SS_EXCEPTION_NUM (16) /* Exceptions and traps in ARC EM core. */
#define QM_SS_INT_TIMER_NUM (2) /* Internal interrupts in ARC EM core. */
#define QM_SS_IRQ_SENSOR_NUM (18) /* IRQ's from the Sensor Subsystem. */
#define QM_SS_IRQ_COMMON_NUM (32) /* IRQ's from the common SoC fabric. */
#define QM_SS_INT_VECTOR_NUM \
(QM_SS_EXCEPTION_NUM + QM_SS_INT_TIMER_NUM + QM_SS_IRQ_SENSOR_NUM + \
QM_SS_IRQ_COMMON_NUM)
#define QM_SS_IRQ_NUM (QM_SS_IRQ_SENSOR_NUM + QM_SS_IRQ_COMMON_NUM)
/*
* The following definitions are Sensor Subsystem interrupt irq and vector
* numbers:
* #define QM_SS_xxx - irq number
* #define QM_SS_xxx_VECTOR - vector number
*/
#define QM_SS_INT_TIMER_0 16
#define QM_SS_INT_TIMER_1 17
#define QM_SS_IRQ_ADC_ERR 0
#define QM_SS_IRQ_ADC_ERR_VECTOR 18
#define QM_SS_IRQ_ADC_IRQ 1
#define QM_SS_IRQ_ADC_IRQ_VECTOR 19
#define QM_SS_IRQ_GPIO_INTR_0 2
#define QM_SS_IRQ_GPIO_INTR_0_VECTOR 20
#define QM_SS_IRQ_GPIO_INTR_1 3
#define QM_SS_IRQ_GPIO_INTR_1_VECTOR 21
#define QM_SS_IRQ_I2C_0_ERR 4
#define QM_SS_IRQ_I2C_0_ERR_VECTOR 22
#define QM_SS_IRQ_I2C_0_RX_AVAIL 5
#define QM_SS_IRQ_I2C_0_RX_AVAIL_VECTOR 23
#define QM_SS_IRQ_I2C_0_TX_REQ 6
#define QM_SS_IRQ_I2C_0_TX_REQ_VECTOR 24
#define QM_SS_IRQ_I2C_0_STOP_DET 7
#define QM_SS_IRQ_I2C_0_STOP_DET_VECTOR 25
#define QM_SS_IRQ_I2C_1_ERR 8
#define QM_SS_IRQ_I2C_1_ERR_VECTOR 26
#define QM_SS_IRQ_I2C_1_RX_AVAIL 9
#define QM_SS_IRQ_I2C_1_RX_AVAIL_VECTOR 27
#define QM_SS_IRQ_I2C_1_TX_REQ 10
#define QM_SS_IRQ_I2C_1_TX_REQ_VECTOR 28
#define QM_SS_IRQ_I2C_1_STOP_DET 11
#define QM_SS_IRQ_I2C_1_STOP_DET_VECTOR 29
#define QM_SS_IRQ_SPI_0_ERR_INT 12
#define QM_SS_IRQ_SPI_0_ERR_INT_VECTOR 30
#define QM_SS_IRQ_SPI_0_RX_AVAIL 13
#define QM_SS_IRQ_SPI_0_RX_AVAIL_VECTOR 31
#define QM_SS_IRQ_SPI_0_TX_REQ 14
#define QM_SS_IRQ_SPI_0_TX_REQ_VECTOR 32
#define QM_SS_IRQ_SPI_1_ERR_INT 15
#define QM_SS_IRQ_SPI_1_ERR_INT_VECTOR 33
#define QM_SS_IRQ_SPI_1_RX_AVAIL 16
#define QM_SS_IRQ_SPI_1_RX_AVAIL_VECTOR 34
#define QM_SS_IRQ_SPI_1_TX_REQ 17
#define QM_SS_IRQ_SPI_1_TX_REQ_VECTOR 35
typedef enum {
QM_SS_INT_PRIORITY_0 = 0,
QM_SS_INT_PRIORITY_1 = 1,
QM_SS_INT_PRIORITY_15 = 15,
QM_SS_INT_PRIORITY_NUM
} qm_ss_irq_priority_t;
typedef enum { QM_SS_INT_DISABLE = 0, QM_SS_INT_ENABLE = 1 } qm_ss_irq_mask_t;
typedef enum {
QM_SS_IRQ_LEVEL_SENSITIVE = 0,
QM_SS_IRQ_EDGE_SENSITIVE = 1
} qm_ss_irq_trigger_t;
#define QM_SS_AUX_IRQ_CTRL (0xE)
#define QM_SS_AUX_IRQ_HINT (0x201)
#define QM_SS_AUX_IRQ_PRIORITY (0x206)
#define QM_SS_AUX_IRQ_STATUS (0x406)
#define QM_SS_AUX_IRQ_SELECT (0x40B)
#define QM_SS_AUX_IRQ_ENABLE (0x40C)
#define QM_SS_AUX_IRQ_TRIGGER (0x40D)
/** @} */
/** /**
* I2C registers and definitions. * I2C registers and definitions.
* *
@ -491,6 +480,19 @@ typedef enum {
QM_SS_SPI_DR, /**< RW buffer for FIFOs. */ QM_SS_SPI_DR, /**< RW buffer for FIFOs. */
} qm_ss_spi_reg_t; } qm_ss_spi_reg_t;
/**
* Sensor Subsystem SPI context type.
*
* Applications should not modify the content.
* This structure is only intended to be used by
* the qm_ss_spi_save_context and qm_ss_spi_restore_context functions.
*/
typedef struct {
uint32_t spi_ctrl; /**< Control Register. */
uint32_t spi_spien; /**< SPI Enable Register. */
uint32_t spi_timing; /**< Timing Register. */
} qm_ss_spi_context_t;
/** Sensor Subsystem SPI modules. */ /** Sensor Subsystem SPI modules. */
typedef enum { typedef enum {
QM_SS_SPI_0 = 0, /**< SPI module 0 */ QM_SS_SPI_0 = 0, /**< SPI module 0 */

View file

@ -0,0 +1,410 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_SOC_INTERRUPTS_H__
#define __QM_SOC_INTERRUPTS_H__
/**
* Quark SE SoC Interrupts.
*
* @defgroup groupQUARKSESEINT SoC Interrupts (SE)
* @{
*/
#if (QM_LAKEMONT)
/* x86 internal interrupt vectors. */
#define QM_X86_DIVIDE_ERROR_INT (0)
#define QM_X86_DEBUG_EXCEPTION_INT (1)
#define QM_X86_NMI_INTERRUPT_INT (2)
#define QM_X86_BREAKPOINT_INT (3)
#define QM_X86_OVERFLOW_INT (4)
#define QM_X86_BOUND_RANGE_EXCEEDED_INT (5)
#define QM_X86_INVALID_OPCODE_INT (6)
#define QM_X86_DEVICE_NOT_AVAILABLE_INT (7)
#define QM_X86_DOUBLE_FAULT_INT (8)
#define QM_X86_INTEL_RESERVED_09_INT (9)
#define QM_X86_INVALID_TSS_INT (10)
#define QM_X86_SEGMENT_NOT_PRESENT_INT (11)
#define QM_X86_STACK_SEGMENT_FAULT_INT (12)
#define QM_X86_GENERAL_PROTECT_FAULT_INT (13)
#define QM_X86_PAGE_FAULT_INT (14)
#define QM_X86_INTEL_RESERVED_15_INT (15)
#define QM_X86_FLOATING_POINT_ERROR_INT (16)
#define QM_X86_ALIGNMENT_CHECK_INT (17)
#define QM_X86_INTEL_RESERVED_18_INT (18)
#define QM_X86_INTEL_RESERVED_19_INT (19)
#define QM_X86_INTEL_RESERVED_20_INT (20)
#define QM_X86_INTEL_RESERVED_21_INT (21)
#define QM_X86_INTEL_RESERVED_22_INT (22)
#define QM_X86_INTEL_RESERVED_23_INT (23)
#define QM_X86_INTEL_RESERVED_24_INT (24)
#define QM_X86_INTEL_RESERVED_25_INT (25)
#define QM_X86_INTEL_RESERVED_26_INT (26)
#define QM_X86_INTEL_RESERVED_27_INT (27)
#define QM_X86_INTEL_RESERVED_28_INT (28)
#define QM_X86_INTEL_RESERVED_29_INT (29)
#define QM_X86_INTEL_RESERVED_30_INT (30)
#define QM_X86_INTEL_RESERVED_31_INT (31)
#define QM_X86_PIC_TIMER_INT_VECTOR (32)
#endif /* QM_LAKEMONT */
#if (QM_SENSOR)
/* ARC EM processor internal interrupt vector assignments. */
#define QM_ARC_RESET_INT (0)
#define QM_ARC_MEMORY_ERROR_INT (1)
#define QM_ARC_INSTRUCTION_ERROR_INT (2)
#define QM_ARC_MACHINE_CHECK_EXCEPTION_INT (3)
#define QM_ARC_INSTRUCTION_TLB_MISS_INT (4)
#define QM_ARC_DATA_TLB_MISS_INT (5)
#define QM_ARC_PROTECTION_VIOLATION_INT (6)
#define QM_ARC_PRIVILEGE_VIOLATION_INT (7)
#define QM_ARC_SOFTWARE_INTERRUPT_INT (8)
#define QM_ARC_TRAP_INT (9)
#define QM_ARC_EXTENSION_INSTRUCTION_EXCEPTION_INT (10)
#define QM_ARC_DIVIDE_BY_ZERO_INT (11)
#define QM_ARC_DATA_CACHE_CONSISTENCY_ERROR_INT (12)
#define QM_ARC_MISALIGNED_DATA_ACCESS_INT (13)
#define QM_ARC_RESERVED_14_INT (14)
#define QM_ARC_RESERVED_15_INT (15)
#define QM_ARC_TIMER_0_INT (16)
#define QM_ARC_TIMER_1_INT (17)
#endif /* QM_SENSOR */
#if (QM_SENSOR)
/**
* Sensor Sub-System Specific IRQs and interrupt vectors.
*
* @name SS Interrupt
* @{
*/
#define QM_SS_EXCEPTION_NUM (16) /* Exceptions and traps in ARC EM core. */
#define QM_SS_INT_TIMER_NUM (2) /* Internal interrupts in ARC EM core. */
#define QM_SS_IRQ_SENSOR_NUM (18) /* IRQ's from the Sensor Subsystem. */
#define QM_SS_IRQ_COMMON_NUM (32) /* IRQ's from the common SoC fabric. */
#define QM_SS_INT_VECTOR_NUM \
(QM_SS_EXCEPTION_NUM + QM_SS_INT_TIMER_NUM + QM_SS_IRQ_SENSOR_NUM + \
QM_SS_IRQ_COMMON_NUM)
#define QM_SS_IRQ_NUM (QM_SS_IRQ_SENSOR_NUM + QM_SS_IRQ_COMMON_NUM)
/*
* The following definitions are Sensor Subsystem interrupt irq and vector
* numbers:
* #define QM_SS_xxx - irq number
* #define QM_SS_xxx_VECTOR - vector number
*/
/** Sensor Subsystem ADC Rx Fifo Error Interrupt. */
#define QM_SS_IRQ_ADC_0_ERROR_INT 0
#define QM_SS_IRQ_ADC_0_ERROR_INT_VECTOR 18
/** Sensor Subsystem ADC Data Available Interrupt. */
#define QM_SS_IRQ_ADC_0_INT 1
#define QM_SS_IRQ_ADC_0_INT_VECTOR 19
/** Sensor Subsystem GPIO Single Interrupt 0 */
#define QM_SS_IRQ_GPIO_0_INT 2
#define QM_SS_IRQ_GPIO_0_INT_VECTOR 20
/** Sensor Subsystem GPIO Single Interrupt 1. */
#define QM_SS_IRQ_GPIO_1_INT 3
#define QM_SS_IRQ_GPIO_1_INT_VECTOR 21
/** Sensor Subsystem I2C 0 Error Interrupt. */
#define QM_SS_IRQ_I2C_0_ERROR_INT 4
#define QM_SS_IRQ_I2C_0_ERROR_INT_VECTOR 22
/** Sensor Subsystem I2C 0 Data Available Interrupt. */
#define QM_SS_IRQ_I2C_0_RX_AVAIL_INT 5
#define QM_SS_IRQ_I2C_0_RX_AVAIL_INT_VECTOR 23
/** Sensor Subsystem I2C 0 Data Required Interrupt. */
#define QM_SS_IRQ_I2C_0_TX_REQ_INT 6
#define QM_SS_IRQ_I2C_0_TX_REQ_INT_VECTOR 24
/** Sensor Subsystem I2C 0 Stop Detect Interrupt. */
#define QM_SS_IRQ_I2C_0_STOP_DET_INT 7
#define QM_SS_IRQ_I2C_0_STOP_DET_INT_VECTOR 25
/** Sensor Subsystem I2C 1 Error Interrupt. */
#define QM_SS_IRQ_I2C_1_ERROR_INT 8
#define QM_SS_IRQ_I2C_1_ERROR_INT_VECTOR 26
/** Sensor Subsystem I2C 1 Data Available Interrupt. */
#define QM_SS_IRQ_I2C_1_RX_AVAIL_INT 9
#define QM_SS_IRQ_I2C_1_RX_AVAIL_INT_VECTOR 27
/** Sensor Subsystem I2C 1 Data Required Interrupt. */
#define QM_SS_IRQ_I2C_1_TX_REQ_INT 10
#define QM_SS_IRQ_I2C_1_TX_REQ_INT_VECTOR 28
/** Sensor Subsystem I2C 1 Stop Detect Interrupt. */
#define QM_SS_IRQ_I2C_1_STOP_DET_INT 11
#define QM_SS_IRQ_I2C_1_STOP_DET_INT_VECTOR 29
/** Sensor Subsystem SPI 0 Error Interrupt. */
#define QM_SS_IRQ_SPI_0_ERROR_INT 12
#define QM_SS_IRQ_SPI_0_ERROR_INT_VECTOR 30
/** Sensor Subsystem SPI 0 Data Available Interrupt. */
#define QM_SS_IRQ_SPI_0_RX_AVAIL_INT 13
#define QM_SS_IRQ_SPI_0_RX_AVAIL_INT_VECTOR 31
/** Sensor Subsystem SPI 0 Data Required Interrupt. */
#define QM_SS_IRQ_SPI_0_TX_REQ_INT 14
#define QM_SS_IRQ_SPI_0_TX_REQ_INT_VECTOR 32
/** Sensor Subsystem SPI 1 Error Interrupt. */
#define QM_SS_IRQ_SPI_1_ERROR_INT 15
#define QM_SS_IRQ_SPI_1_ERROR_INT_VECTOR 33
/** Sensor Subsystem SPI 1 Data Available Interrupt. */
#define QM_SS_IRQ_SPI_1_RX_AVAIL_INT 16
#define QM_SS_IRQ_SPI_1_RX_AVAIL_INT_VECTOR 34
/** Sensor Subsystem SPI 1 Data Required Interrupt. */
#define QM_SS_IRQ_SPI_1_TX_REQ_INT 17
#define QM_SS_IRQ_SPI_1_TX_REQ_INT_VECTOR 35
typedef enum {
QM_SS_INT_PRIORITY_0 = 0,
QM_SS_INT_PRIORITY_1 = 1,
QM_SS_INT_PRIORITY_15 = 15,
QM_SS_INT_PRIORITY_NUM
} qm_ss_irq_priority_t;
typedef enum { QM_SS_INT_DISABLE = 0, QM_SS_INT_ENABLE = 1 } qm_ss_irq_mask_t;
typedef enum {
QM_SS_IRQ_LEVEL_SENSITIVE = 0,
QM_SS_IRQ_EDGE_SENSITIVE = 1
} qm_ss_irq_trigger_t;
#define QM_SS_AUX_IRQ_CTRL (0xE)
#define QM_SS_AUX_IRQ_HINT (0x201)
#define QM_SS_AUX_IRQ_PRIORITY (0x206)
#define QM_SS_AUX_IRQ_STATUS (0x406)
#define QM_SS_AUX_IRQ_SELECT (0x40B)
#define QM_SS_AUX_IRQ_ENABLE (0x40C)
#define QM_SS_AUX_IRQ_TRIGGER (0x40D)
/** @} */
#endif /* QM_SENSOR */
/**
* @name Common SoC IRQs and Interrupts
* @{
*/
/* IRQs and interrupt vectors.
*
* Any IRQ > 1 actually has a event router mask register offset of +1.
* The vector numbers must be defined without arithmetic expressions nor
* parentheses because they are expanded as token concatenation.
*/
/** I2C Master 0 Single Interrupt. */
#define QM_IRQ_I2C_0_INT 0
#define QM_IRQ_I2C_0_INT_MASK_OFFSET 0
#define QM_IRQ_I2C_0_INT_VECTOR 36
/** I2C Master 1 Single Interrupt. */
#define QM_IRQ_I2C_1_INT 1
#define QM_IRQ_I2C_1_INT_MASK_OFFSET 1
#define QM_IRQ_I2C_1_INT_VECTOR 37
/** SPI Master 0 Single Interrupt. */
#define QM_IRQ_SPI_MASTER_0_INT 2
#define QM_IRQ_SPI_MASTER_0_INT_MASK_OFFSET 3
#define QM_IRQ_SPI_MASTER_0_INT_VECTOR 38
/** SPI Master 1 Single Interrupt. */
#define QM_IRQ_SPI_MASTER_1_INT 3
#define QM_IRQ_SPI_MASTER_1_INT_MASK_OFFSET 4
#define QM_IRQ_SPI_MASTER_1_INT_VECTOR 39
/** SPI Slave Single Interrupt. */
#define QM_IRQ_SPI_SLAVE_0_INT 4
#define QM_IRQ_SPI_SLAVE_0_INT_MASK_OFFSET 5
#define QM_IRQ_SPI_SLAVE_0_INT_VECTOR 40
/** UART 0 Single Interrupt. */
#define QM_IRQ_UART_0_INT 5
#define QM_IRQ_UART_0_INT_MASK_OFFSET 6
#define QM_IRQ_UART_0_INT_VECTOR 41
/** UART 1 Single Interrupt. */
#define QM_IRQ_UART_1_INT 6
#define QM_IRQ_UART_1_INT_MASK_OFFSET 7
#define QM_IRQ_UART_1_INT_VECTOR 42
/** I2S Single Interrupt. */
#define QM_IRQ_I2S_0_INT 7
#define QM_IRQ_I2S_0_INT_MASK_OFFSET 8
#define QM_IRQ_I2S_0_INT_VECTOR 43
/** GPIO Single Interrupt. */
#define QM_IRQ_GPIO_0_INT 8
#define QM_IRQ_GPIO_0_INT_MASK_OFFSET 9
#define QM_IRQ_GPIO_0_INT_VECTOR 44
/** PWM/Timer Single Interrupt. */
#define QM_IRQ_PWM_0_INT 9
#define QM_IRQ_PWM_0_INT_MASK_OFFSET 10
#define QM_IRQ_PWM_0_INT_VECTOR 45
/** USB Single Interrupt. */
#define QM_IRQ_USB_0_INT (10)
#define QM_IRQ_USB_0_INT_MASK_OFFSET (11)
#define QM_IRQ_USB_0_INT_VECTOR 46
/** RTC Single Interrupt. */
#define QM_IRQ_RTC_0_INT 11
#define QM_IRQ_RTC_0_INT_MASK_OFFSET 12
#define QM_IRQ_RTC_0_INT_VECTOR 47
/** WDT Single Interrupt. */
#define QM_IRQ_WDT_0_INT 12
#define QM_IRQ_WDT_0_INT_MASK_OFFSET 13
#define QM_IRQ_WDT_0_INT_VECTOR 48
/** DMA Channel 0 Single Interrupt. */
#define QM_IRQ_DMA_0_INT_0 13
#define QM_IRQ_DMA_0_INT_0_MASK_OFFSET 14
#define QM_IRQ_DMA_0_INT_0_VECTOR 49
/** DMA Channel 1 Single Interrupt. */
#define QM_IRQ_DMA_0_INT_1 14
#define QM_IRQ_DMA_0_INT_1_MASK_OFFSET 15
#define QM_IRQ_DMA_0_INT_1_VECTOR 50
/** DMA Channel 2 Single Interrupt. */
#define QM_IRQ_DMA_0_INT_2 15
#define QM_IRQ_DMA_0_INT_2_MASK_OFFSET 16
#define QM_IRQ_DMA_0_INT_2_VECTOR 51
/** DMA Channel 3 Single Interrupt. */
#define QM_IRQ_DMA_0_INT_3 16
#define QM_IRQ_DMA_0_INT_3_MASK_OFFSET 17
#define QM_IRQ_DMA_0_INT_3_VECTOR 52
/** DMA Channel 4 Single Interrupt. */
#define QM_IRQ_DMA_0_INT_4 17
#define QM_IRQ_DMA_0_INT_4_MASK_OFFSET 18
#define QM_IRQ_DMA_0_INT_4_VECTOR 53
/** DMA Channel 5 Single Interrupt. */
#define QM_IRQ_DMA_0_INT_5 18
#define QM_IRQ_DMA_0_INT_5_MASK_OFFSET 19
#define QM_IRQ_DMA_0_INT_5_VECTOR 54
/** DMA Channel 6 Single Interrupt. */
#define QM_IRQ_DMA_0_INT_6 19
#define QM_IRQ_DMA_0_INT_6_MASK_OFFSET 20
#define QM_IRQ_DMA_0_INT_6_VECTOR 55
/** DMA Channel 7 Single Interrupt. */
#define QM_IRQ_DMA_0_INT_7 20
#define QM_IRQ_DMA_0_INT_7_MASK_OFFSET 21
#define QM_IRQ_DMA_0_INT_7_VECTOR 56
/**
* 8 Mailbox Channel Interrupts Routed to Single Interrupt
* with 8bit Mask per Destination.
*/
#define QM_IRQ_MAILBOX_0_INT 21
#define QM_IRQ_MAILBOX_0_INT_MASK_OFFSET 22
#define QM_IRQ_MAILBOX_0_INT_VECTOR 57
/**
* 19 Comparators Routed to Single Interrupt with 19bit Mask per Destination.
*/
#define QM_IRQ_COMPARATOR_0_INT 22
#define QM_IRQ_COMPARATOR_0_INT_MASK_OFFSET 26
#define QM_IRQ_COMPARATOR_0_INT_VECTOR 58
/** System and Power Management Single Interrupt. */
#define QM_IRQ_PMU_0_INT 23
#define QM_IRQ_PMU_0_INT_MASK_OFFSET 26
#define QM_IRQ_PMU_0_INT_VECTOR 58
/**
* 8 DMA Channel Error Interrupts Routed to Single Interrupt with 8bit Mask
* per Destination.
*/
#define QM_IRQ_DMA_0_ERROR_INT 24
#define QM_IRQ_DMA_0_ERROR_INT_MASK_OFFSET 28
#define QM_IRQ_DMA_0_ERROR_INT_VECTOR 60
/** Internal SRAM Memory Protection Error Single Interrupt. */
#define QM_IRQ_SRAM_MPR_0_INT 25
#define QM_IRQ_SRAM_MPR_0_INT_MASK_OFFSET 29
#define QM_IRQ_SRAM_MPR_0_INT_VECTOR 61
/** Internal Flash Controller 0 Memory Protection Error Single Interrupt. */
#define QM_IRQ_FLASH_MPR_0_INT 26
#define QM_IRQ_FLASH_MPR_0_INT_MASK_OFFSET 30
#define QM_IRQ_FLASH_MPR_0_INT_VECTOR 62
/** Internal Flash Controller 1 Memory Protection Error Single Interrupt. */
#define QM_IRQ_FLASH_MPR_1_INT 27
#define QM_IRQ_FLASH_MPR_1_INT_MASK_OFFSET 31
#define QM_IRQ_FLASH_MPR_1_INT_VECTOR 63
/** Always-On Timer Interrupt. */
#define QM_IRQ_AONPT_0_INT 28
#define QM_IRQ_AONPT_0_INT_MASK_OFFSET 32
#define QM_IRQ_AONPT_0_INT_VECTOR 64
/** ADC power sequence done. */
#define QM_SS_IRQ_ADC_0_PWR_INT 29
#define QM_SS_IRQ_ADC_0_PWR_INT_MASK_OFFSET 33
#define QM_SS_IRQ_ADC_0_PWR_INT_VECTOR 65
/** ADC calibration done. */
#define QM_SS_IRQ_ADC_0_CAL_INT 30
#define QM_SS_IRQ_ADC_0_CAL_INT_MASK_OFFSET 34
#define QM_SS_IRQ_ADC_0_CAL_INT_VECTOR 66
/** Always-On GPIO Interrupt. */
#define QM_IRQ_AON_GPIO_0_INT 31
#define QM_IRQ_AON_GPIO_0_INT_MASK_OFFSET 35
#define QM_IRQ_AON_GPIO_0_INT_VECTOR 67
/** @} */
/** @} */
#endif /* __QM_SOC_INTERRUPTS_H__ */

View file

@ -31,6 +31,8 @@
#define __REGISTERS_H__ #define __REGISTERS_H__
#include "qm_common.h" #include "qm_common.h"
#include "qm_soc_interrupts.h"
#include "qm_interrupt_router_regs.h"
/** /**
* Quark SE SoC Registers. * Quark SE SoC Registers.
@ -175,6 +177,11 @@ qm_scss_gp_reg_t test_scss_gp;
#define QM_SCSS_GP ((qm_scss_gp_reg_t *)QM_SCSS_GP_BASE) #define QM_SCSS_GP ((qm_scss_gp_reg_t *)QM_SCSS_GP_BASE)
#endif #endif
/* The GPS0 register usage. */
#define QM_GPS0_BIT_FM (0) /**< Start Firmware Manager. */
#define QM_GPS0_BIT_X86_WAKEUP (1) /**< Lakemont core reset type. */
#define QM_GPS0_BIT_SENSOR_WAKEUP (2) /**< Sensor core reset type. */
/** @} */ /** @} */
/** /**
@ -270,6 +277,39 @@ typedef struct {
QM_RW apic_reg_pad_t timer_dcr; /**< Timer divide configuration */ QM_RW apic_reg_pad_t timer_dcr; /**< Timer divide configuration */
} qm_lapic_reg_t; } qm_lapic_reg_t;
#if (HAS_APIC)
/*
* The size of IOAPIC redirection table, as returned by _ioapic_get_redtbl_size
* function.
*/
#define QM_IOAPIC_NUM_RTES (32)
/**
* IRQ context type.
*
* Applications should not modify the content.
* This structure is only intended to be used by
* qm_irq_save_context and qm_irq_restore_context functions.
*/
typedef struct {
/** Redirection Table Entries. */
uint32_t redtbl_entries[QM_IOAPIC_NUM_RTES];
} qm_irq_context_t;
#endif
/**
* PIC TIMER context type.
*
* Applications should not modify the content.
* This structure is only intended to be used by the qm_pic_timer_save_context
* and qm_pic_timer_restore_context functions.
*/
typedef struct {
uint32_t timer_icr; /**< Initial Count Register. */
uint32_t timer_dcr; /**< Divide Configuration Register. */
uint32_t lvttimer; /**< Timer Entry in Local Vector Table. */
} qm_pic_timer_context_t;
#if (UNIT_TEST) #if (UNIT_TEST)
qm_lapic_reg_t test_lapic; qm_lapic_reg_t test_lapic;
#define QM_LAPIC ((qm_lapic_reg_t *)(&test_lapic)) #define QM_LAPIC ((qm_lapic_reg_t *)(&test_lapic))
@ -319,110 +359,6 @@ qm_ioapic_reg_t test_ioapic;
/** @} */ /** @} */
/**
* @name Interrupt
* @{
*/
/** SS I2C Interrupt register map. */
typedef struct {
QM_RW uint32_t err_mask;
QM_RW uint32_t rx_avail_mask;
QM_RW uint32_t tx_req_mask;
QM_RW uint32_t stop_det_mask;
} int_ss_i2c_reg_t;
/** SS SPI Interrupt register map. */
typedef struct {
QM_RW uint32_t err_int_mask;
QM_RW uint32_t rx_avail_mask;
QM_RW uint32_t tx_req_mask;
} int_ss_spi_reg_t;
/** Interrupt register map. */
typedef struct {
QM_RW uint32_t int_ss_adc_err_mask;
QM_RW uint32_t int_ss_adc_irq_mask;
QM_RW uint32_t int_ss_gpio_0_intr_mask;
QM_RW uint32_t int_ss_gpio_1_intr_mask;
int_ss_i2c_reg_t int_ss_i2c_0;
int_ss_i2c_reg_t int_ss_i2c_1;
int_ss_spi_reg_t int_ss_spi_0;
int_ss_spi_reg_t int_ss_spi_1;
QM_RW uint32_t int_i2c_mst_0_mask;
QM_RW uint32_t int_i2c_mst_1_mask;
QM_RW uint32_t reserved;
QM_RW uint32_t int_spi_mst_0_mask;
QM_RW uint32_t int_spi_mst_1_mask;
QM_RW uint32_t int_spi_slv_mask;
QM_RW uint32_t int_uart_0_mask;
QM_RW uint32_t int_uart_1_mask;
QM_RW uint32_t int_i2s_mask;
QM_RW uint32_t int_gpio_mask;
QM_RW uint32_t int_pwm_timer_mask;
QM_RW uint32_t int_usb_mask;
QM_RW uint32_t int_rtc_mask;
QM_RW uint32_t int_watchdog_mask;
QM_RW uint32_t int_dma_channel_0_mask;
QM_RW uint32_t int_dma_channel_1_mask;
QM_RW uint32_t int_dma_channel_2_mask;
QM_RW uint32_t int_dma_channel_3_mask;
QM_RW uint32_t int_dma_channel_4_mask;
QM_RW uint32_t int_dma_channel_5_mask;
QM_RW uint32_t int_dma_channel_6_mask;
QM_RW uint32_t int_dma_channel_7_mask;
QM_RW uint32_t int_mailbox_mask;
QM_RW uint32_t int_comparators_ss_halt_mask;
QM_RW uint32_t int_comparators_host_halt_mask;
QM_RW uint32_t int_comparators_ss_mask;
QM_RW uint32_t int_comparators_host_mask;
QM_RW uint32_t int_host_bus_err_mask;
QM_RW uint32_t int_dma_error_mask;
QM_RW uint32_t
int_sram_controller_mask; /**< Interrupt Routing Mask 28. */
QM_RW uint32_t
int_flash_controller_0_mask; /**< Interrupt Routing Mask 29. */
QM_RW uint32_t
int_flash_controller_1_mask; /**< Interrupt Routing Mask 30. */
QM_RW uint32_t int_aon_timer_mask; /**< Interrupt Routing Mask 31. */
QM_RW uint32_t int_adc_pwr_mask; /**< Interrupt Routing Mask 32. */
QM_RW uint32_t int_adc_calib_mask; /**< Interrupt Routing Mask 33. */
QM_RW uint32_t int_aon_gpio_mask;
QM_RW uint32_t lock_int_mask_reg; /**< Interrupt Mask Lock Register. */
} qm_scss_int_reg_t;
/* Number of SCSS interrupt mask registers (excluding mask lock register). */
#define QM_SCSS_INT_MASK_NUMREG \
((sizeof(qm_scss_int_reg_t) / sizeof(uint32_t)) - 1)
/* Default POR SCSS interrupt mask (all interrupts masked). */
#define QM_SCSS_INT_MASK_DEFAULT (0xFFFFFFFF)
#if (UNIT_TEST)
qm_scss_int_reg_t test_scss_int;
#define QM_SCSS_INT ((qm_scss_int_reg_t *)(&test_scss_int))
#else
/* System control subsystem interrupt masking register block. */
#define QM_SCSS_INT_BASE (0xB0800400)
#define QM_SCSS_INT ((qm_scss_int_reg_t *)QM_SCSS_INT_BASE)
#endif
#define QM_INT_DMA_ERR_HOST_MASK (0x000000FF)
#define QM_INT_DMA_ERR_SS_MASK (0x0000FF00)
#define QM_INT_SRAM_CONTROLLER_HOST_HALT_MASK BIT(16)
#define QM_INT_SRAM_CONTROLLER_HOST_MASK BIT(0)
#define QM_INT_SRAM_CONTROLLER_SS_HALT_MASK BIT(24)
#define QM_INT_SRAM_CONTROLLER_SS_MASK BIT(8)
#define QM_INT_FLASH_CONTROLLER_HOST_HALT_MASK BIT(16)
#define QM_INT_FLASH_CONTROLLER_HOST_MASK BIT(0)
#define QM_INT_FLASH_CONTROLLER_SS_HALT_MASK BIT(24)
#define QM_INT_FLASH_CONTROLLER_SS_MASK BIT(8)
#define QM_INT_ADC_PWR_MASK BIT(8)
#define QM_INT_ADC_CALIB_MASK BIT(8)
/** @} */
/** /**
* @name Power Management * @name Power Management
* @{ * @{
@ -706,13 +642,17 @@ qm_scss_info_reg_t test_scss_info;
* The interrupt destination adds an offset to the bit shift. * The interrupt destination adds an offset to the bit shift.
*/ */
#define QM_MBOX_ENABLE_LMT_INT_MASK(N) \ #define QM_MBOX_ENABLE_LMT_INT_MASK(N) \
QM_SCSS_INT->int_mailbox_mask &= ~(BIT(N + QM_MBOX_HOST_MASK_OFFSET)) QM_INTERRUPT_ROUTER->mailbox_0_int_mask &= \
~(BIT(N + QM_MBOX_HOST_MASK_OFFSET))
#define QM_MBOX_DISABLE_LMT_INT_MASK(N) \ #define QM_MBOX_DISABLE_LMT_INT_MASK(N) \
QM_SCSS_INT->int_mailbox_mask |= (BIT(N + QM_MBOX_HOST_MASK_OFFSET)) QM_INTERRUPT_ROUTER->mailbox_0_int_mask |= \
(BIT(N + QM_MBOX_HOST_MASK_OFFSET))
#define QM_MBOX_ENABLE_SS_INT_MASK(N) \ #define QM_MBOX_ENABLE_SS_INT_MASK(N) \
QM_SCSS_INT->int_mailbox_mask &= ~(BIT(N + QM_MBOX_SS_MASK_OFFSET)) QM_INTERRUPT_ROUTER->mailbox_0_int_mask &= \
~(BIT(N + QM_MBOX_SS_MASK_OFFSET))
#define QM_MBOX_DISABLE_SS_INT_MASK(N) \ #define QM_MBOX_DISABLE_SS_INT_MASK(N) \
QM_SCSS_INT->int_mailbox_mask |= (BIT(N + QM_MBOX_SS_MASK_OFFSET)) QM_INTERRUPT_ROUTER->mailbox_0_int_mask |= \
(BIT(N + QM_MBOX_SS_MASK_OFFSET))
/** /**
* Mailbox Interrupt Halt Mask enable/disable definitions * Mailbox Interrupt Halt Mask enable/disable definitions
@ -723,41 +663,47 @@ qm_scss_info_reg_t test_scss_info;
* see above for the bit position layout * see above for the bit position layout
*/ */
#define QM_MBOX_ENABLE_LMT_INT_HALT_MASK(N) \ #define QM_MBOX_ENABLE_LMT_INT_HALT_MASK(N) \
QM_SCSS_INT->int_mailbox_mask &= \ QM_INTERRUPT_ROUTER->mailbox_0_int_mask &= \
~(BIT(N + QM_MBOX_HOST_HALT_MASK_OFFSET)) ~(BIT(N + QM_MBOX_HOST_HALT_MASK_OFFSET))
#define QM_MBOX_DISABLE_LMT_INT_HALT_MASK(N) \ #define QM_MBOX_DISABLE_LMT_INT_HALT_MASK(N) \
QM_SCSS_INT->int_mailbox_mask |= \ QM_INTERRUPT_ROUTER->mailbox_0_int_mask |= \
(BIT(N + QM_MBOX_HOST_HALT_MASK_OFFSET)) (BIT(N + QM_MBOX_HOST_HALT_MASK_OFFSET))
#define QM_MBOX_ENABLE_SS_INT_HALT_MASK(N) \ #define QM_MBOX_ENABLE_SS_INT_HALT_MASK(N) \
QM_SCSS_INT->int_mailbox_mask &= ~(BIT(N + QM_MBOX_SS_HALT_MASK_OFFSET)) QM_INTERRUPT_ROUTER->mailbox_0_int_mask &= \
~(BIT(N + QM_MBOX_SS_HALT_MASK_OFFSET))
#define QM_MBOX_DISABLE_SS_INT_HALT_MASK(N) \ #define QM_MBOX_DISABLE_SS_INT_HALT_MASK(N) \
QM_SCSS_INT->int_mailbox_mask |= (BIT(N + QM_MBOX_SS_HALT_MASK_OFFSET)) QM_INTERRUPT_ROUTER->mailbox_0_int_mask |= \
(BIT(N + QM_MBOX_SS_HALT_MASK_OFFSET))
/** /**
* Mailbox interrupt mask definitions to return the current mask values * Mailbox interrupt mask definitions to return the current mask values
*/ */
#define QM_MBOX_SS_INT_HALT_MASK \ #define QM_MBOX_SS_INT_HALT_MASK \
((QM_MBOX_SS_HALT_MASK_MASK & QM_SCSS_INT->int_mailbox_mask) >> \ ((QM_MBOX_SS_HALT_MASK_MASK & \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask) >> \
QM_MBOX_SS_HALT_MASK_OFFSET) QM_MBOX_SS_HALT_MASK_OFFSET)
#define QM_MBOX_LMT_INT_HALT_MASK \ #define QM_MBOX_LMT_INT_HALT_MASK \
((QM_MBOX_HOST_HALT_MASK_MASK & QM_SCSS_INT->int_mailbox_mask) >> \ ((QM_MBOX_HOST_HALT_MASK_MASK & \
QM_INTERRUPT_ROUTER->mailbox_0_int_mask) >> \
QM_MBOX_SS_HALT_MASK_OFFSET) QM_MBOX_SS_HALT_MASK_OFFSET)
#define QM_MBOX_SS_INT_MASK \ #define QM_MBOX_SS_INT_MASK \
((QM_MBOX_SS_MASK_MASK & QM_SCSS_INT->int_mailbox_mask) >> \ ((QM_MBOX_SS_MASK_MASK & QM_INTERRUPT_ROUTER->mailbox_0_int_mask) >> \
QM_MBOX_SS_MASK_OFFSET) QM_MBOX_SS_MASK_OFFSET)
#define QM_MBOX_LMT_INT_MASK \ #define QM_MBOX_LMT_INT_MASK \
(QM_MBOX_HOST_MASK_MASK & QM_SCSS_INT->int_mailbox_mask) (QM_MBOX_HOST_MASK_MASK & QM_INTERRUPT_ROUTER->mailbox_0_int_mask)
/** /**
* Mailbox interrupt macros to determine if the specified mailbox interrupt mask * Mailbox interrupt macros to determine if the specified mailbox interrupt mask
* has been locked. * has been locked.
*/ */
#define QM_MBOX_SS_INT_LOCK_HALT_MASK(N) \ #define QM_MBOX_SS_INT_LOCK_HALT_MASK(N) \
(QM_SCSS_INT->lock_int_mask_reg & BIT(3)) (QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(3))
#define QM_MBOX_LMT_INT_LOCK_HALT_MASK(N) \ #define QM_MBOX_LMT_INT_LOCK_HALT_MASK(N) \
(QM_SCSS_INT->lock_int_mask_reg & BIT(2)) (QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(2))
#define QM_MBOX_SS_INT_LOCK_MASK(N) (QM_SCSS_INT->lock_int_mask_reg & BIT(1)) #define QM_MBOX_SS_INT_LOCK_MASK(N) \
#define QM_MBOX_LMT_INT_LOCK_MASK(N) (QM_SCSS_INT->lock_int_mask_reg & BIT(0)) (QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(1))
#define QM_MBOX_LMT_INT_LOCK_MASK(N) \
(QM_INTERRUPT_ROUTER->lock_int_mask_reg & BIT(0))
/** Mailbox register structure. */ /** Mailbox register structure. */
typedef struct { typedef struct {
@ -783,139 +729,6 @@ qm_mailbox_reg_t test_mailbox;
/** @} */ /** @} */
/**
* @name IRQs and Interrupts
* @{
*/
/* IRQs and interrupt vectors.
*
* Any IRQ > 1 actually has a SCSS mask register offset of +1.
* The vector numbers must be defined without arithmetic expressions nor
* parentheses because they are expanded as token concatenation.
*/
#define QM_INT_VECTOR_DOUBLE_FAULT 8
#define QM_INT_VECTOR_PIC_TIMER 32
#define QM_IRQ_RTC_0 11
#define QM_IRQ_RTC_0_MASK_OFFSET 12
#define QM_IRQ_RTC_0_VECTOR 47
#define QM_IRQ_PWM_0 9
#define QM_IRQ_PWM_0_MASK_OFFSET 10
#define QM_IRQ_PWM_0_VECTOR 45
#define QM_IRQ_USB_0 (10)
#define QM_IRQ_USB_0_MASK_OFFSET (11)
#define QM_IRQ_USB_0_VECTOR 46
#define QM_IRQ_SPI_MASTER_0 2
#define QM_IRQ_SPI_MASTER_0_MASK_OFFSET 3
#define QM_IRQ_SPI_MASTER_0_VECTOR 38
#define QM_IRQ_SPI_MASTER_1 3
#define QM_IRQ_SPI_MASTER_1_MASK_OFFSET 4
#define QM_IRQ_SPI_MASTER_1_VECTOR 39
#define QM_IRQ_WDT_0 12
#define QM_IRQ_WDT_0_MASK_OFFSET 13
#define QM_IRQ_WDT_0_VECTOR 48
#define QM_IRQ_GPIO_0 8
#define QM_IRQ_GPIO_0_MASK_OFFSET 9
#define QM_IRQ_GPIO_0_VECTOR 44
#define QM_IRQ_I2C_0 0
#define QM_IRQ_I2C_0_MASK_OFFSET 0
#define QM_IRQ_I2C_0_VECTOR 36
#define QM_IRQ_I2C_1 1
#define QM_IRQ_I2C_1_MASK_OFFSET 1
#define QM_IRQ_I2C_1_VECTOR 37
#define QM_IRQ_MBOX 21
#define QM_IRQ_MBOX_MASK_OFFSET 22
#define QM_IRQ_MBOX_VECTOR 57
#define QM_IRQ_AC 22
#define QM_IRQ_AC_MASK_OFFSET 26
#define QM_IRQ_AC_VECTOR 58
#define QM_IRQ_SRAM 25
#define QM_IRQ_SRAM_MASK_OFFSET 29
#define QM_IRQ_SRAM_VECTOR 61
#define QM_IRQ_FLASH_0 26
#define QM_IRQ_FLASH_0_MASK_OFFSET 30
#define QM_IRQ_FLASH_0_VECTOR 62
#define QM_IRQ_FLASH_1 27
#define QM_IRQ_FLASH_1_MASK_OFFSET 31
#define QM_IRQ_FLASH_1_VECTOR 63
#define QM_IRQ_AONPT_0 28
#define QM_IRQ_AONPT_0_MASK_OFFSET 32
#define QM_IRQ_AONPT_0_VECTOR 64
#define QM_IRQ_AONGPIO_0 31
#define QM_IRQ_AONGPIO_0_MASK_OFFSET 35
#define QM_IRQ_AONGPIO_0_VECTOR 67
#define QM_IRQ_UART_0 5
#define QM_IRQ_UART_0_MASK_OFFSET 6
#define QM_IRQ_UART_0_VECTOR 41
#define QM_IRQ_UART_1 6
#define QM_IRQ_UART_1_MASK_OFFSET 7
#define QM_IRQ_UART_1_VECTOR 42
#define QM_IRQ_DMA_0 13
#define QM_IRQ_DMA_0_MASK_OFFSET 14
#define QM_IRQ_DMA_0_VECTOR 49
#define QM_IRQ_DMA_1 14
#define QM_IRQ_DMA_1_MASK_OFFSET 15
#define QM_IRQ_DMA_1_VECTOR 50
#define QM_IRQ_DMA_2 15
#define QM_IRQ_DMA_2_MASK_OFFSET 16
#define QM_IRQ_DMA_2_VECTOR 51
#define QM_IRQ_DMA_3 16
#define QM_IRQ_DMA_3_MASK_OFFSET 17
#define QM_IRQ_DMA_3_VECTOR 52
#define QM_IRQ_DMA_4 17
#define QM_IRQ_DMA_4_MASK_OFFSET 18
#define QM_IRQ_DMA_4_VECTOR 53
#define QM_IRQ_DMA_5 18
#define QM_IRQ_DMA_5_MASK_OFFSET 19
#define QM_IRQ_DMA_5_VECTOR 54
#define QM_IRQ_DMA_6 19
#define QM_IRQ_DMA_6_MASK_OFFSET 20
#define QM_IRQ_DMA_6_VECTOR 55
#define QM_IRQ_DMA_7 20
#define QM_IRQ_DMA_7_MASK_OFFSET 21
#define QM_IRQ_DMA_7_VECTOR 56
#define QM_IRQ_DMA_ERR 24
#define QM_IRQ_DMA_ERR_MASK_OFFSET 28
#define QM_IRQ_DMA_ERR_VECTOR 60
#define QM_SS_IRQ_ADC_PWR 29
#define QM_SS_IRQ_ADC_PWR_MASK_OFFSET 33
#define QM_SS_IRQ_ADC_PWR_VECTOR 65
#define QM_SS_IRQ_ADC_CAL 30
#define QM_SS_IRQ_ADC_CAL_MASK_OFFSET 34
#define QM_SS_IRQ_ADC_CAL_VECTOR 66
/** @} */
/** /**
* @name PWM / Timer * @name PWM / Timer
* @{ * @{
@ -954,6 +767,21 @@ typedef struct {
timer_loadcount2[QM_PWM_ID_NUM]; /**< Timer Load Count 2 */ timer_loadcount2[QM_PWM_ID_NUM]; /**< Timer Load Count 2 */
} qm_pwm_reg_t; } qm_pwm_reg_t;
/**
* PWM context type.
*
* Applications should not modify the content.
* This structure is only intended to be used by
* the qm_pwm_save_context and qm_pwm_restore_context functions.
*/
typedef struct {
struct {
uint32_t loadcount; /**< Load Count 1. */
uint32_t loadcount2; /**< Load Count 2. */
uint32_t controlreg; /**< Control Register. */
} channel[QM_PWM_ID_NUM];
} qm_pwm_context_t;
#if (UNIT_TEST) #if (UNIT_TEST)
qm_pwm_reg_t test_pwm_t; qm_pwm_reg_t test_pwm_t;
#define QM_PWM ((qm_pwm_reg_t *)(&test_pwm_t)) #define QM_PWM ((qm_pwm_reg_t *)(&test_pwm_t))
@ -1026,6 +854,18 @@ typedef struct {
QM_RW uint32_t wdt_comp_type; /**< Component Type Register */ QM_RW uint32_t wdt_comp_type; /**< Component Type Register */
} qm_wdt_reg_t; } qm_wdt_reg_t;
/*
* WDT context type.
*
* Application should not modify the content.
* This structure is only intended to be used by the qm_wdt_save_context and
* qm_wdt_restore_context functions.
*/
typedef struct {
uint32_t wdt_cr; /**< Control Register. */
uint32_t wdt_torr; /**< Timeout Range Register. */
} qm_wdt_context_t;
#if (UNIT_TEST) #if (UNIT_TEST)
qm_wdt_reg_t test_wdt; qm_wdt_reg_t test_wdt;
#define QM_WDT ((qm_wdt_reg_t *)(&test_wdt)) #define QM_WDT ((qm_wdt_reg_t *)(&test_wdt))
@ -1200,6 +1040,24 @@ typedef struct {
QM_RW uint32_t padding[0xCF]; /* (0x400 - 0xC4) / 4 */ QM_RW uint32_t padding[0xCF]; /* (0x400 - 0xC4) / 4 */
} qm_uart_reg_t; } qm_uart_reg_t;
/**
* UART context to be saved between sleep/resume.
*
* Application should not modify the content.
* This structure is only intended to be used by the qm_uart_save_context and
* qm_uart_restore_context functions.
*/
typedef struct {
uint32_t ier; /**< Interrupt Enable Register. */
uint32_t dlh; /**< Divisor Latch High. */
uint32_t dll; /**< Divisor Latch Low. */
uint32_t lcr; /**< Line Control. */
uint32_t mcr; /**< Modem Control. */
uint32_t scr; /**< Scratchpad. */
uint32_t htx; /**< Halt Transmission. */
uint32_t dlf; /**< Divisor Latch Fraction. */
} qm_uart_context_t;
#if (UNIT_TEST) #if (UNIT_TEST)
qm_uart_reg_t test_uart_instance; qm_uart_reg_t test_uart_instance;
qm_uart_reg_t *test_uart[QM_UART_NUM]; qm_uart_reg_t *test_uart[QM_UART_NUM];
@ -1255,6 +1113,19 @@ typedef struct {
QM_RW uint32_t padding[0xC4]; /* (0x400 - 0xF0) / 4 */ QM_RW uint32_t padding[0xC4]; /* (0x400 - 0xF0) / 4 */
} qm_spi_reg_t; } qm_spi_reg_t;
/**
* SPI context type.
*
* Applications should not modify the content.
* This structure is only intended to be used by
* the qm_spi_save_context and qm_spi_restore_context functions.
*/
typedef struct {
uint32_t ctrlr0; /**< Control Register 0. */
uint32_t ser; /**< Slave Enable Register. */
uint32_t baudr; /**< Baud Rate Select. */
} qm_spi_context_t;
#if (UNIT_TEST) #if (UNIT_TEST)
qm_spi_reg_t test_spi; qm_spi_reg_t test_spi;
qm_spi_reg_t *test_spi_controllers[QM_SPI_NUM]; qm_spi_reg_t *test_spi_controllers[QM_SPI_NUM];
@ -1423,6 +1294,25 @@ typedef struct {
QM_RW uint32_t padding[0xC0]; /* Padding (0x400-0xFC)/4 */ QM_RW uint32_t padding[0xC0]; /* Padding (0x400-0xFC)/4 */
} qm_i2c_reg_t; } qm_i2c_reg_t;
/**
* I2C context to be saved between sleep/resume.
*
* Application should not modify the content.
* This structure is only intended to be used by the qm_i2c_save_context and
* qm_i2c_restore_context functions.
*/
typedef struct {
uint32_t con; /**< Control Register. */
uint32_t sar; /**< Slave Address. */
uint32_t ss_scl_hcnt; /**< Standard Speed Clock SCL High Count. */
uint32_t ss_scl_lcnt; /**< Standard Speed Clock SCL Low Count. */
uint32_t fs_scl_hcnt; /**< Fast Speed Clock SCL High Count. */
uint32_t fs_scl_lcnt; /**< Fast Speed I2C Clock SCL Low Count. */
uint32_t enable; /**< Enable. */
uint32_t fs_spklen; /**< SS and FS Spike Suppression Limit. */
uint32_t ic_intr_mask; /**< I2C Interrupt Mask. */
} qm_i2c_context_t;
#if (UNIT_TEST) #if (UNIT_TEST)
qm_i2c_reg_t test_i2c_instance[QM_I2C_NUM]; qm_i2c_reg_t test_i2c_instance[QM_I2C_NUM];
qm_i2c_reg_t *test_i2c[QM_I2C_NUM]; qm_i2c_reg_t *test_i2c[QM_I2C_NUM];
@ -1453,16 +1343,20 @@ extern qm_i2c_reg_t *qm_i2c[QM_I2C_NUM];
#define QM_I2C_IC_CON_SPEED_FS_FSP BIT(2) #define QM_I2C_IC_CON_SPEED_FS_FSP BIT(2)
#define QM_I2C_IC_CON_SPEED_MASK (0x06) #define QM_I2C_IC_CON_SPEED_MASK (0x06)
#define QM_I2C_IC_CON_RESTART_EN BIT(5) #define QM_I2C_IC_CON_RESTART_EN BIT(5)
#define QM_I2C_IC_CON_STOP_DET_IFADDRESSED BIT(7)
#define QM_I2C_IC_DATA_CMD_READ BIT(8) #define QM_I2C_IC_DATA_CMD_READ BIT(8)
#define QM_I2C_IC_DATA_CMD_STOP_BIT_CTRL BIT(9) #define QM_I2C_IC_DATA_CMD_STOP_BIT_CTRL BIT(9)
#define QM_I2C_IC_DATA_CMD_LSB_MASK (0x000000FF) #define QM_I2C_IC_DATA_CMD_LSB_MASK (0x000000FF)
#define QM_I2C_IC_RAW_INTR_STAT_RX_FULL BIT(2) #define QM_I2C_IC_RAW_INTR_STAT_RX_FULL BIT(2)
#define QM_I2C_IC_RAW_INTR_STAT_TX_ABRT BIT(6) #define QM_I2C_IC_RAW_INTR_STAT_TX_ABRT BIT(6)
#define QM_I2C_IC_RAW_INTR_STAT_GEN_CALL BIT(11)
#define QM_I2C_IC_RAW_INTR_STAT_RESTART_DETECTED BIT(12)
#define QM_I2C_IC_TX_ABRT_SOURCE_NAK_MASK (0x1F) #define QM_I2C_IC_TX_ABRT_SOURCE_NAK_MASK (0x1F)
#define QM_I2C_IC_TX_ABRT_SOURCE_ARB_LOST BIT(12) #define QM_I2C_IC_TX_ABRT_SOURCE_ARB_LOST BIT(12)
#define QM_I2C_IC_TX_ABRT_SOURCE_ABRT_SBYTE_NORSTRT BIT(9) #define QM_I2C_IC_TX_ABRT_SOURCE_ABRT_SBYTE_NORSTRT BIT(9)
#define QM_I2C_IC_TX_ABRT_SOURCE_ALL_MASK (0x1FFFF) #define QM_I2C_IC_TX_ABRT_SOURCE_ALL_MASK (0x1FFFF)
#define QM_I2C_IC_STATUS_BUSY_MASK (0x00000060) #define QM_I2C_IC_STATUS_BUSY_MASK (0x00000060)
#define QM_I2C_IC_STATUS_RFF BIT(4)
#define QM_I2C_IC_STATUS_RFNE BIT(3) #define QM_I2C_IC_STATUS_RFNE BIT(3)
#define QM_I2C_IC_STATUS_TFE BIT(2) #define QM_I2C_IC_STATUS_TFE BIT(2)
#define QM_I2C_IC_STATUS_TNF BIT(1) #define QM_I2C_IC_STATUS_TNF BIT(1)
@ -1472,15 +1366,25 @@ extern qm_i2c_reg_t *qm_i2c[QM_I2C_NUM];
#define QM_I2C_IC_INTR_MASK_RX_FULL BIT(2) #define QM_I2C_IC_INTR_MASK_RX_FULL BIT(2)
#define QM_I2C_IC_INTR_MASK_TX_OVER BIT(3) #define QM_I2C_IC_INTR_MASK_TX_OVER BIT(3)
#define QM_I2C_IC_INTR_MASK_TX_EMPTY BIT(4) #define QM_I2C_IC_INTR_MASK_TX_EMPTY BIT(4)
#define QM_I2C_IC_INTR_MASK_RD_REQ BIT(5)
#define QM_I2C_IC_INTR_MASK_TX_ABORT BIT(6) #define QM_I2C_IC_INTR_MASK_TX_ABORT BIT(6)
#define QM_I2C_IC_INTR_MASK_RX_DONE BIT(7)
#define QM_I2C_IC_INTR_MASK_ACTIVITY BIT(8)
#define QM_I2C_IC_INTR_MASK_STOP_DETECTED BIT(9) #define QM_I2C_IC_INTR_MASK_STOP_DETECTED BIT(9)
#define QM_I2C_IC_INTR_MASK_START_DETECTED BIT(10) #define QM_I2C_IC_INTR_MASK_START_DETECTED BIT(10)
#define QM_I2C_IC_INTR_MASK_GEN_CALL_DETECTED BIT(11)
#define QM_I2C_IC_INTR_MASK_RESTART_DETECTED BIT(12)
#define QM_I2C_IC_INTR_STAT_RX_UNDER BIT(0) #define QM_I2C_IC_INTR_STAT_RX_UNDER BIT(0)
#define QM_I2C_IC_INTR_STAT_RX_OVER BIT(1) #define QM_I2C_IC_INTR_STAT_RX_OVER BIT(1)
#define QM_I2C_IC_INTR_STAT_RX_FULL BIT(2) #define QM_I2C_IC_INTR_STAT_RX_FULL BIT(2)
#define QM_I2C_IC_INTR_STAT_TX_OVER BIT(3) #define QM_I2C_IC_INTR_STAT_TX_OVER BIT(3)
#define QM_I2C_IC_INTR_STAT_TX_EMPTY BIT(4) #define QM_I2C_IC_INTR_STAT_TX_EMPTY BIT(4)
#define QM_I2C_IC_INTR_STAT_RD_REQ BIT(5)
#define QM_I2C_IC_INTR_STAT_TX_ABRT BIT(6) #define QM_I2C_IC_INTR_STAT_TX_ABRT BIT(6)
#define QM_I2C_IC_INTR_STAT_RX_DONE BIT(7)
#define QM_I2C_IC_INTR_STAT_STOP_DETECTED BIT(9)
#define QM_I2C_IC_INTR_STAT_START_DETECTED BIT(10)
#define QM_I2C_IC_INTR_STAT_GEN_CALL_DETECTED BIT(11)
#define QM_I2C_IC_LCNT_MAX (65525) #define QM_I2C_IC_LCNT_MAX (65525)
#define QM_I2C_IC_LCNT_MIN (8) #define QM_I2C_IC_LCNT_MIN (8)
#define QM_I2C_IC_HCNT_MAX (65525) #define QM_I2C_IC_HCNT_MAX (65525)
@ -1526,6 +1430,26 @@ typedef struct {
QM_RW uint32_t gpio_config_reg1; /**< GPIO Configuration Register 1 */ QM_RW uint32_t gpio_config_reg1; /**< GPIO Configuration Register 1 */
} qm_gpio_reg_t; } qm_gpio_reg_t;
/**
* GPIO context type.
*
* Application should not modify the content.
* This structure is only intended to be used by the qm_gpio_save_context and
* qm_gpio_restore_context functions.
*/
typedef struct {
uint32_t gpio_swporta_dr; /**< Port A Data. */
uint32_t gpio_swporta_ddr; /**< Port A Data Direction. */
uint32_t gpio_swporta_ctl; /**< Port A Data Source. */
uint32_t gpio_inten; /**< Interrupt Enable. */
uint32_t gpio_intmask; /**< Interrupt Mask. */
uint32_t gpio_inttype_level; /**< Interrupt Type. */
uint32_t gpio_int_polarity; /**< Interrupt Polarity. */
uint32_t gpio_debounce; /**< Debounce Enable. */
uint32_t gpio_ls_sync; /**< Synchronization Level. */
uint32_t gpio_int_bothedge; /**< Interrupt both edge type. */
} qm_gpio_context_t;
#define QM_NUM_GPIO_PINS (32) #define QM_NUM_GPIO_PINS (32)
#define QM_NUM_AON_GPIO_PINS (6) #define QM_NUM_AON_GPIO_PINS (6)
@ -1570,6 +1494,20 @@ typedef struct {
QM_RW uint32_t mpr_vsts; /**< Protection Status Register. */ QM_RW uint32_t mpr_vsts; /**< Protection Status Register. */
} qm_flash_reg_t; } qm_flash_reg_t;
/**
* Flash context type.
*
* Applications should not modify the content.
* This structure is only intended to be used by the
* qm_flash_save_context and qm_flash_restore_context functions.
*/
typedef struct {
/** Flash Timing Control Register. */
uint32_t tmg_ctrl;
/** Control Register. */
uint32_t ctrl;
} qm_flash_context_t;
#if (UNIT_TEST) #if (UNIT_TEST)
qm_flash_reg_t test_flash_instance; qm_flash_reg_t test_flash_instance;
qm_flash_reg_t *test_flash[QM_FLASH_NUM]; qm_flash_reg_t *test_flash[QM_FLASH_NUM];
@ -1654,11 +1592,50 @@ extern qm_flash_reg_t *qm_flash[QM_FLASH_NUM];
/** @} */ /** @} */
/**
* @name Flash Protection Region
* @{
*/
/**
* FPR register map.
*/
typedef enum {
QM_FPR_0, /**< FPR 0. */
QM_FPR_1, /**< FPR 1. */
QM_FPR_2, /**< FPR 2. */
QM_FPR_3, /**< FPR 3. */
QM_FPR_NUM
} qm_fpr_id_t;
/**
* FPR context type.
*
* Applications should not modify the content.
* This structure is only intended to be used by the
* qm_fpr_save_context and qm_fpr_restore_context functions.
*/
typedef struct {
/** Flash Protection Region Read Control Register. */
uint32_t fpr_rd_cfg[QM_FPR_NUM];
} qm_fpr_context_t;
/** @} */
/** /**
* @name Memory Protection Region * @name Memory Protection Region
* @{ * @{
*/ */
/* MPR identifier */
typedef enum {
QM_MPR_0 = 0, /**< Memory Protection Region 0. */
QM_MPR_1, /**< Memory Protection Region 1. */
QM_MPR_2, /**< Memory Protection Region 2. */
QM_MPR_3, /**< Memory Protection Region 3. */
QM_MPR_NUM /**< Number of Memory Protection Regions. */
} qm_mpr_id_t;
/** Memory Protection Region register map. */ /** Memory Protection Region register map. */
typedef struct { typedef struct {
QM_RW uint32_t mpr_cfg[4]; /**< MPR CFG */ QM_RW uint32_t mpr_cfg[4]; /**< MPR CFG */
@ -1666,6 +1643,17 @@ typedef struct {
QM_RW uint32_t mpr_vsts; /**< MPR_VSTS */ QM_RW uint32_t mpr_vsts; /**< MPR_VSTS */
} qm_mpr_reg_t; } qm_mpr_reg_t;
/**
* MPR context type.
*
* Application should not modify the content.
* This structure is only intended to be used by the qm_mpr_save_context and
* qm_mpr_restore_context functions.
*/
typedef struct {
uint32_t mpr_cfg[QM_MPR_NUM]; /**< MPR Configuration Register. */
} qm_mpr_context_t;
#if (UNIT_TEST) #if (UNIT_TEST)
qm_mpr_reg_t test_mpr; qm_mpr_reg_t test_mpr;
@ -1931,6 +1919,23 @@ typedef struct {
QM_RW qm_dma_misc_reg_t misc_reg; /**< Miscellaneous Register */ QM_RW qm_dma_misc_reg_t misc_reg; /**< Miscellaneous Register */
} qm_dma_reg_t; } qm_dma_reg_t;
/**
* DMA context type.
*
* Applications should not modify the content.
* This structure is only intended to be used by
* the qm_dma_save_context and qm_dma_restore_context functions.
*/
typedef struct {
struct {
uint32_t ctrl_low; /**< Channel Control Lower. */
uint32_t cfg_low; /**< Channel Configuration Lower. */
uint32_t cfg_high; /**< Channel Configuration Upper. */
uint32_t llp_low; /**< Channel Linked List Pointer. */
} channel[QM_DMA_CHANNEL_NUM];
uint32_t misc_cfg_low; /**< DMA Configuration. */
} qm_dma_context_t;
#if (UNIT_TEST) #if (UNIT_TEST)
qm_dma_reg_t test_dma_instance[QM_DMA_NUM]; qm_dma_reg_t test_dma_instance[QM_DMA_NUM];
qm_dma_reg_t *test_dma[QM_DMA_NUM]; qm_dma_reg_t *test_dma[QM_DMA_NUM];
@ -2071,6 +2076,16 @@ uint32_t test_usb_pll;
/** @} */ /** @} */
/**
* @name Hardware Fixes
* @{
*/
/* Refer to "HARDWARE_ISSUES.rst" for fix description. */
#define FIX_1 (1)
/** @} */
/** /**
* @name Versioning * @name Versioning
* @{ * @{

View file

@ -72,6 +72,58 @@ typedef enum {
*/ */
void ss_power_soc_lpss_enable(void); void ss_power_soc_lpss_enable(void);
#if (ENABLE_RESTORE_CONTEXT)
/**
* Enter SoC sleep state and restore after wake up.
*
* Put the ARC core into sleep state until next SoC wake event
* and continue execution after wake up where the application stopped.
*
* If the library is built with ENABLE_RESTORE_CONTEXT=1, then this function
* will use the arc_restore_addr to save restore trap address which brings back
* the ARC CPU to the point where this function was called.
* This means that applications should refrain from using them.
*
* This function calls qm_ss_save_context and qm_ss_restore_context
* in order to restore execution where it stopped.
* All power management transitions are done by power_soc_sleep().
*/
void ss_power_soc_sleep_restore(void);
/**
* Enter SoC sleep state and restore after wake up.
*
* Put the ARC core into sleep state until next SoC wake event
* and continue execution after wake up where the application stopped.
*
* If the library is built with ENABLE_RESTORE_CONTEXT=1, then this function
* will use the arc_restore_addr to save restore trap address which brings back
* the ARC CPU to the point where this function was called.
* This means that applications should refrain from using them.
*
* This function calls qm_ss_save_context and qm_ss_restore_context
* in order to restore execution where it stopped.
* All power management transitions are done by power_soc_deep_sleep().
*/
void ss_power_soc_deep_sleep_restore(void);
/**
* Save context, enter ARC SS1 power save state and restore after wake up.
*
* This routine is same as ss_power_soc_sleep_restore(), just instead of
* going to sleep it will go to SS1 power save state.
* Note: this function has a while(1) which will spin until we enter
* (and exit) sleep and the power state change will be managed by the other
* core.
*/
void ss_power_sleep_wait(void);
/**
* Enable the SENSOR startup restore flag.
*/
void power_soc_set_ss_restore_flag(void);
#endif /* ENABLE_RESTORE_CONTEXT */
/** /**
* Disable LPSS state entry. * Disable LPSS state entry.
* *
@ -121,6 +173,125 @@ void ss_power_cpu_ss1(const ss_power_cpu_ss1_mode_t mode);
*/ */
void ss_power_cpu_ss2(void); void ss_power_cpu_ss2(void);
#if (ENABLE_RESTORE_CONTEXT) && (!UNIT_TEST)
/**
* Save resume vector.
*
* Saves the resume vector in the global "arc_restore_addr" location.
* The ARC will jump to the resume vector once a wake up event is
* triggered and x86 resumes the ARC.
*/
#define qm_ss_set_resume_vector(_restore_label, arc_restore_addr) \
__asm__ __volatile__("mov r0, @arc_restore_addr\n\t" \
"st " #_restore_label ", [r0]\n\t" \
: /* Output operands. */ \
: /* Input operands. */ \
: /* Clobbered registers list. */ \
"r0")
/* Save execution context.
*
* This routine saves CPU registers onto cpu_context,
* array.
*
*/
#define qm_ss_save_context(cpu_context) \
__asm__ __volatile__("push_s r0\n\t" \
"mov r0, @cpu_context\n\t" \
"st r1, [r0, 4]\n\t" \
"st r2, [r0, 8]\n\t" \
"st r3, [r0, 12]\n\t" \
"st r4, [r0, 16]\n\t" \
"st r5, [r0, 20]\n\t" \
"st r6, [r0, 24]\n\t" \
"st r7, [r0, 28]\n\t" \
"st r8, [r0, 32]\n\t" \
"st r9, [r0, 36]\n\t" \
"st r10, [r0, 40]\n\t" \
"st r11, [r0, 44]\n\t" \
"st r12, [r0, 48]\n\t" \
"st r13, [r0, 52]\n\t" \
"st r14, [r0, 56]\n\t" \
"st r15, [r0, 60]\n\t" \
"st r16, [r0, 64]\n\t" \
"st r17, [r0, 68]\n\t" \
"st r18, [r0, 72]\n\t" \
"st r19, [r0, 76]\n\t" \
"st r20, [r0, 80]\n\t" \
"st r21, [r0, 84]\n\t" \
"st r22, [r0, 88]\n\t" \
"st r23, [r0, 92]\n\t" \
"st r24, [r0, 96]\n\t" \
"st r25, [r0, 100]\n\t" \
"st r26, [r0, 104]\n\t" \
"st r27, [r0, 108]\n\t" \
"st r28, [r0, 112]\n\t" \
"st r29, [r0, 116]\n\t" \
"st r30, [r0, 120]\n\t" \
"st r31, [r0, 124]\n\t" \
: /* Output operands. */ \
: /* Input operands. */ \
: /* Clobbered registers list. */ \
"r0")
/* Restore execution context.
*
* This routine restores CPU registers from cpu_context,
* array.
*
* This routine is called from the bootloader to restore the execution context
* from before entering in sleep mode.
*/
#define qm_ss_restore_context(_restore_label, cpu_context) \
__asm__ __volatile__( \
#_restore_label \
":\n\t" \
"mov r0, @cpu_context\n\t" \
"ld r1, [r0, 4]\n\t" \
"ld r2, [r0, 8]\n\t" \
"ld r3, [r0, 12]\n\t" \
"ld r4, [r0, 16]\n\t" \
"ld r5, [r0, 20]\n\t" \
"ld r6, [r0, 24]\n\t" \
"ld r7, [r0, 28]\n\t" \
"ld r8, [r0, 32]\n\t" \
"ld r9, [r0, 36]\n\t" \
"ld r10, [r0, 40]\n\t" \
"ld r11, [r0, 44]\n\t" \
"ld r12, [r0, 48]\n\t" \
"ld r13, [r0, 52]\n\t" \
"ld r14, [r0, 56]\n\t" \
"ld r15, [r0, 60]\n\t" \
"ld r16, [r0, 64]\n\t" \
"ld r17, [r0, 68]\n\t" \
"ld r18, [r0, 72]\n\t" \
"ld r19, [r0, 76]\n\t" \
"ld r20, [r0, 80]\n\t" \
"ld r21, [r0, 84]\n\t" \
"ld r22, [r0, 88]\n\t" \
"ld r23, [r0, 92]\n\t" \
"ld r24, [r0, 96]\n\t" \
"ld r25, [r0, 100]\n\t" \
"ld r26, [r0, 104]\n\t" \
"ld r27, [r0, 108]\n\t" \
"ld r28, [r0, 112]\n\t" \
"ld r29, [r0, 116]\n\t" \
"ld r30, [r0, 120]\n\t" \
"ld r31, [r0, 124]\n\t" \
"pop_s r0\n\t" \
"sr 0,[0x101]\n\t" /* Setup Sensor Subsystem TimeStamp Counter */ \
"sr 0,[0x100]\n\t" \
"sr -1,[0x102]\n\t" \
: /* Output operands. */ \
: /* Input operands. */ \
: /* Clobbered registers list. */ \
"r0")
#else
#define qm_ss_set_resume_vector(_restore_label, arc_restore_addr)
#define qm_ss_save_context(cpu_context)
#define qm_ss_restore_context(_restore_label, cpu_context)
#endif
/** /**
* @} * @}
*/ */