diff --git a/arch/arc/soc/quark_se_c1000_ss/Kconfig.defconfig b/arch/arc/soc/quark_se_c1000_ss/Kconfig.defconfig index 7dafea808c8..fa29a5d0144 100644 --- a/arch/arc/soc/quark_se_c1000_ss/Kconfig.defconfig +++ b/arch/arc/soc/quark_se_c1000_ss/Kconfig.defconfig @@ -203,5 +203,11 @@ config SPI_1_IRQ_PRI default 1 endif # SPI +if DMA + +config DMA_QMSI + def_bool y + +endif # DMA endif #SOC_QUARK_SE_C1000_SS diff --git a/drivers/dma/dma_qmsi.c b/drivers/dma/dma_qmsi.c index 89ac17a7f49..81d66a05b38 100644 --- a/drivers/dma/dma_qmsi.c +++ b/drivers/dma/dma_qmsi.c @@ -157,53 +157,58 @@ static void dma_qmsi_config(struct device *dev) { ARG_UNUSED(dev); - IRQ_CONNECT(QM_IRQ_DMA_0_INT_0, CONFIG_DMA_0_IRQ_PRI, + IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_0), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_0, DEVICE_GET(dma_qmsi), 0); - irq_enable(QM_IRQ_DMA_0_INT_0); - QM_INTERRUPT_ROUTER->dma_0_int_0_mask &= ~BIT(0); + irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_0)); + QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_0_mask); - IRQ_CONNECT(QM_IRQ_DMA_0_INT_1, CONFIG_DMA_0_IRQ_PRI, + IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_1), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_1, DEVICE_GET(dma_qmsi), 0); - irq_enable(QM_IRQ_DMA_0_INT_1); - QM_INTERRUPT_ROUTER->dma_0_int_1_mask &= ~BIT(0); + irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_1)); + QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_1_mask); #if (CONFIG_SOC_QUARK_SE_C1000) - IRQ_CONNECT(QM_IRQ_DMA_0_INT_2, CONFIG_DMA_0_IRQ_PRI, + IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_2), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_2, DEVICE_GET(dma_qmsi), 0); - irq_enable(QM_IRQ_DMA_0_INT_2); - QM_INTERRUPT_ROUTER->dma_0_int_2_mask &= ~BIT(0); + irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_2)); + QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_2_mask); - IRQ_CONNECT(QM_IRQ_DMA_0_INT_3, CONFIG_DMA_0_IRQ_PRI, + IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_3), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_3, DEVICE_GET(dma_qmsi), 0); - irq_enable(QM_IRQ_DMA_0_INT_3); - QM_INTERRUPT_ROUTER->dma_0_int_3_mask &= ~BIT(0); + irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_3)); + QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_3_mask); - IRQ_CONNECT(QM_IRQ_DMA_0_INT_4, CONFIG_DMA_0_IRQ_PRI, + IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_4), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_4, DEVICE_GET(dma_qmsi), 0); - irq_enable(QM_IRQ_DMA_0_INT_4); - QM_INTERRUPT_ROUTER->dma_0_int_4_mask &= ~BIT(0); + irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_4)); + QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_4_mask); - IRQ_CONNECT(QM_IRQ_DMA_0_INT_5, CONFIG_DMA_0_IRQ_PRI, + IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_5), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_5, DEVICE_GET(dma_qmsi), 0); - irq_enable(QM_IRQ_DMA_0_INT_5); - QM_INTERRUPT_ROUTER->dma_0_int_5_mask &= ~BIT(0); + irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_5)); + QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_5_mask); - IRQ_CONNECT(QM_IRQ_DMA_0_INT_6, CONFIG_DMA_0_IRQ_PRI, + IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_6), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_6, DEVICE_GET(dma_qmsi), 0); - irq_enable(QM_IRQ_DMA_0_INT_6); - QM_INTERRUPT_ROUTER->dma_0_int_6_mask &= ~BIT(0); + irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_6)); + QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_6_mask); - IRQ_CONNECT(QM_IRQ_DMA_0_INT_7, CONFIG_DMA_0_IRQ_PRI, + IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_7), CONFIG_DMA_0_IRQ_PRI, qm_dma_0_isr_7, DEVICE_GET(dma_qmsi), 0); - irq_enable(QM_IRQ_DMA_0_INT_7); - QM_INTERRUPT_ROUTER->dma_0_int_7_mask &= ~BIT(0); + irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_INT_7)); + QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->dma_0_int_7_mask); #endif /* CONFIG_SOC_QUARK_SE_C1000 */ - IRQ_CONNECT(QM_IRQ_DMA_0_ERROR_INT, CONFIG_DMA_0_IRQ_PRI, - qm_dma_0_error_isr, DEVICE_GET(dma_qmsi), 0); - irq_enable(QM_IRQ_DMA_0_ERROR_INT); - QM_INTERRUPT_ROUTER->dma_0_error_int_mask &= ~BIT(0); + IRQ_CONNECT(IRQ_GET_NUMBER(QM_IRQ_DMA_0_ERROR_INT), + CONFIG_DMA_0_IRQ_PRI, qm_dma_0_error_isr, + DEVICE_GET(dma_qmsi), 0); + irq_enable(IRQ_GET_NUMBER(QM_IRQ_DMA_0_ERROR_INT)); +#if (QM_LAKEMONT) + QM_INTERRUPT_ROUTER->dma_0_error_int_mask &= ~QM_IR_DMA_ERROR_HOST_MASK; +#elif (QM_SENSOR) + QM_INTERRUPT_ROUTER->dma_0_error_int_mask &= ~QM_IR_DMA_ERROR_SS_MASK; +#endif }