drivers: serial: nrfx_uarte: Add low power mode

Lowest power consumption can be achieved when uarte peripheral
is disabled when not used. In low power mode, need for both
directions is tracked and if both are no in use peripheral is
disabled. TX disabling is instant but RX requires flushing RX
fifo because data in hardware fifo is lost when peripheral is
re-enabled.

Signed-off-by: Krzysztof Chruscinski <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
Krzysztof Chruscinski 2020-12-13 06:43:44 +01:00 committed by Carles Cufí
commit f075fee107
2 changed files with 183 additions and 9 deletions

View file

@ -85,6 +85,15 @@ config UART_0_NRF_HW_ASYNC
It is recommended to use hardware byte counting in such scenarios. It is recommended to use hardware byte counting in such scenarios.
Hardware RX byte counting requires timer instance and one PPI channel Hardware RX byte counting requires timer instance and one PPI channel
config UART_0_NRF_ASYNC_LOW_POWER
bool "Low power mode"
depends on UART_0_NRF_UARTE
depends on UART_ASYNC_API
help
When enabled, UARTE is enabled before each TX or RX usage and disabled
when not used. Disabling UARTE while in idle allows to achieve lowest
power consumption. It is only feasible if receiver is not always on.
config UART_0_NRF_HW_ASYNC_TIMER config UART_0_NRF_HW_ASYNC_TIMER
int "Timer instance" int "Timer instance"
depends on UART_0_NRF_HW_ASYNC depends on UART_0_NRF_HW_ASYNC
@ -156,6 +165,14 @@ config UART_1_NRF_HW_ASYNC
It is recommended to use hardware byte counting in such scenarios. It is recommended to use hardware byte counting in such scenarios.
Hardware RX byte counting requires timer instance and one PPI channel Hardware RX byte counting requires timer instance and one PPI channel
config UART_1_NRF_ASYNC_LOW_POWER
bool "Low power mode"
depends on UART_ASYNC_API
help
When enabled, UARTE is enabled before each TX or RX usage and disabled
when not used. Disabling UARTE while in idle allows to achieve lowest
power consumption. It is only feasible if receiver is not always on.
config UART_1_NRF_HW_ASYNC_TIMER config UART_1_NRF_HW_ASYNC_TIMER
int "Timer instance" int "Timer instance"
depends on UART_1_NRF_HW_ASYNC depends on UART_1_NRF_HW_ASYNC
@ -226,6 +243,14 @@ config UART_2_NRF_HW_ASYNC
It is recommended to use hardware byte counting in such scenarios. It is recommended to use hardware byte counting in such scenarios.
Hardware RX byte counting requires timer instance and one PPI channel Hardware RX byte counting requires timer instance and one PPI channel
config UART_2_NRF_ASYNC_LOW_POWER
bool "Low power mode"
depends on UART_ASYNC_API
help
When enabled, UARTE is enabled before each TX or RX usage and disabled
when not used. Disabling UARTE while in idle allows to achieve lowest
power consumption. It is only feasible if receiver is not always on.
config UART_2_NRF_HW_ASYNC_TIMER config UART_2_NRF_HW_ASYNC_TIMER
int "Timer instance" int "Timer instance"
depends on UART_2_NRF_HW_ASYNC depends on UART_2_NRF_HW_ASYNC
@ -296,6 +321,14 @@ config UART_3_NRF_HW_ASYNC
It is recommended to use hardware byte counting in such scenarios. It is recommended to use hardware byte counting in such scenarios.
Hardware RX byte counting requires timer instance and one PPI channel Hardware RX byte counting requires timer instance and one PPI channel
config UART_3_NRF_ASYNC_LOW_POWER
bool "Low power mode"
depends on UART_ASYNC_API
help
When enabled, UARTE is enabled before each TX or RX usage and disabled
when not used. Disabling UARTE while in idle allows to achieve lowest
power consumption. It is only feasible if receiver is not always on.
config UART_3_NRF_HW_ASYNC_TIMER config UART_3_NRF_HW_ASYNC_TIMER
int "Timer instance" int "Timer instance"
depends on UART_3_NRF_HW_ASYNC depends on UART_3_NRF_HW_ASYNC

View file

@ -1,5 +1,5 @@
* /*
* Copyright (c) 2018 Nordic Semiconductor ASA * Copyright (c) 2018-2021 Nordic Semiconductor ASA
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -57,6 +57,9 @@ LOG_MODULE_REGISTER(uart_nrfx_uarte, LOG_LEVEL_ERR);
*/ */
#define RX_TIMEOUT_DIV 5 #define RX_TIMEOUT_DIV 5
/* Size of hardware fifo in RX path. */
#define UARTE_HW_RX_FIFO_SIZE 5
#ifdef CONFIG_UART_ASYNC_API #ifdef CONFIG_UART_ASYNC_API
struct uarte_async_cb { struct uarte_async_cb {
uart_callback_t user_callback; uart_callback_t user_callback;
@ -85,6 +88,9 @@ struct uarte_async_cb {
} rx_cnt; } rx_cnt;
volatile int tx_amount; volatile int tx_amount;
atomic_t low_power_mask;
uint8_t rx_flush_buffer[UARTE_HW_RX_FIFO_SIZE];
uint8_t rx_flush_cnt;
bool rx_enabled; bool rx_enabled;
bool hw_rx_counting; bool hw_rx_counting;
/* Flag to ensure that RX timeout won't be executed during ENDRX ISR */ /* Flag to ensure that RX timeout won't be executed during ENDRX ISR */
@ -125,6 +131,9 @@ struct uarte_nrfx_data {
gppi_channel_t ppi_ch_endtx; gppi_channel_t ppi_ch_endtx;
}; };
#define UARTE_LOW_POWER_TX BIT(0)
#define UARTE_LOW_POWER_RX BIT(1)
/* Flag indicating that CTS pin is used. */ /* Flag indicating that CTS pin is used. */
#define UARTE_CFG_FLAG_CTS_PIN_SET BIT(0) #define UARTE_CFG_FLAG_CTS_PIN_SET BIT(0)
@ -137,6 +146,11 @@ struct uarte_nrfx_data {
/* If enabled then ENDTX is PPI'ed to TXSTOP */ /* If enabled then ENDTX is PPI'ed to TXSTOP */
#define UARTE_CFG_FLAG_PPI_ENDTX BIT(3) #define UARTE_CFG_FLAG_PPI_ENDTX BIT(3)
/* If enabled then UARTE peripheral is disabled when not used. This allows
* to achieve lowest power consumption in idle.
*/
#define UARTE_CFG_FLAG_LOW_POWER BIT(4)
#define IS_CTS_PIN_SET(flags) (flags & UARTE_CFG_FLAG_CTS_PIN_SET) #define IS_CTS_PIN_SET(flags) (flags & UARTE_CFG_FLAG_CTS_PIN_SET)
#define IS_RTS_PIN_SET(flags) (flags & UARTE_CFG_FLAG_RTS_PIN_SET) #define IS_RTS_PIN_SET(flags) (flags & UARTE_CFG_FLAG_RTS_PIN_SET)
#define IS_HWFC_PINS_USED(flags) \ #define IS_HWFC_PINS_USED(flags) \
@ -216,6 +230,26 @@ static void uarte_nrfx_isr_int(void *arg)
endtx_isr(dev); endtx_isr(dev);
} }
if (get_dev_config(dev)->flags & UARTE_CFG_FLAG_LOW_POWER) {
int key = irq_lock();
if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) {
nrf_uarte_disable(uarte);
}
#ifdef UARTE_INTERRUPT_DRIVEN
struct uarte_nrfx_data *data = get_dev_data(dev);
if (!data->int_driven)
#endif
{
nrf_uarte_int_disable(uarte,
NRF_UARTE_INT_TXSTOPPED_MASK);
}
irq_unlock(key);
}
#ifdef UARTE_INTERRUPT_DRIVEN #ifdef UARTE_INTERRUPT_DRIVEN
struct uarte_nrfx_data *data = get_dev_data(dev); struct uarte_nrfx_data *data = get_dev_data(dev);
@ -473,6 +507,26 @@ static inline bool hw_rx_counting_enabled(struct uarte_nrfx_data *data)
} }
#endif /* CONFIG_UART_ASYNC_API */ #endif /* CONFIG_UART_ASYNC_API */
static void uarte_enable(const struct device *dev, uint32_t mask)
{
#ifdef CONFIG_UART_ASYNC_API
struct uarte_nrfx_data *data = get_dev_data(dev);
bool disabled = data->async->low_power_mask == 0;
data->async->low_power_mask |= mask;
if (hw_rx_counting_enabled(data) && disabled) {
const nrfx_timer_t *timer = &get_dev_config(dev)->timer;
nrfx_timer_enable(timer);
for (int i = 0; i < data->async->rx_flush_cnt; i++) {
nrfx_timer_increment(timer);
}
}
#endif
nrf_uarte_enable(get_uarte_instance(dev));
}
static void tx_start(const struct device *dev, const uint8_t *buf, size_t len) static void tx_start(const struct device *dev, const uint8_t *buf, size_t len)
{ {
NRF_UARTE_Type *uarte = get_uarte_instance(dev); NRF_UARTE_Type *uarte = get_uarte_instance(dev);
@ -480,6 +534,12 @@ static void tx_start(const struct device *dev, const uint8_t *buf, size_t len)
nrf_uarte_tx_buffer_set(uarte, buf, len); nrf_uarte_tx_buffer_set(uarte, buf, len);
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_TXSTOPPED); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_TXSTOPPED);
if (get_dev_config(dev)->flags & UARTE_CFG_FLAG_LOW_POWER) {
uarte_enable(dev, UARTE_LOW_POWER_TX);
nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
}
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX);
} }
@ -566,6 +626,8 @@ static int uarte_nrfx_init(const struct device *dev)
if (ret != 0) { if (ret != 0) {
return ret; return ret;
} }
data->async->low_power_mask = UARTE_LOW_POWER_TX;
nrf_uarte_int_enable(uarte, nrf_uarte_int_enable(uarte,
NRF_UARTE_INT_ENDRX_MASK | NRF_UARTE_INT_ENDRX_MASK |
NRF_UARTE_INT_RXSTARTED_MASK | NRF_UARTE_INT_RXSTARTED_MASK |
@ -720,13 +782,44 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
data->async->rx_offset = 0; data->async->rx_offset = 0;
data->async->rx_next_buf = NULL; data->async->rx_next_buf = NULL;
data->async->rx_next_buf_len = 0; data->async->rx_next_buf_len = 0;
if (get_dev_config(dev)->flags & UARTE_CFG_FLAG_LOW_POWER) {
if (data->async->rx_flush_cnt) {
int cpy_len = MIN(len, data->async->rx_flush_cnt);
memcpy(buf, data->async->rx_flush_buffer, cpy_len);
buf += cpy_len;
len -= cpy_len;
/* If flush content filled whole new buffer complete the
* request and indicate rx being disabled.
*/
if (!len) {
data->async->rx_flush_cnt -= cpy_len;
notify_uart_rx_rdy(dev, cpy_len);
notify_rx_buf_release(dev, &data->async->rx_buf,
true);
notify_rx_disable(dev);
return 0;
}
}
}
nrf_uarte_rx_buffer_set(uarte, buf, len); nrf_uarte_rx_buffer_set(uarte, buf, len);
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
data->async->rx_enabled = true; data->async->rx_enabled = true;
if (get_dev_config(dev)->flags & UARTE_CFG_FLAG_LOW_POWER) {
int key = irq_lock();
uarte_enable(dev, UARTE_LOW_POWER_RX);
irq_unlock(key);
}
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX);
return 0; return 0;
} }
@ -874,6 +967,7 @@ static void rx_timeout(struct k_timer *timer)
nrf_uarte_int_enable(get_uarte_instance(dev), nrf_uarte_int_enable(get_uarte_instance(dev),
NRF_UARTE_INT_ENDRX_MASK); NRF_UARTE_INT_ENDRX_MASK);
} }
#define UARTE_ERROR_FROM_MASK(mask) \ #define UARTE_ERROR_FROM_MASK(mask) \
@ -925,7 +1019,10 @@ static void endrx_isr(const struct device *dev)
/* this is the amount that the EasyDMA controller has copied into the /* this is the amount that the EasyDMA controller has copied into the
* buffer * buffer
*/ */
const int rx_amount = nrf_uarte_rx_amount_get(uarte); const int rx_amount = nrf_uarte_rx_amount_get(uarte) +
data->async->rx_flush_cnt;
data->async->rx_flush_cnt = 0;
/* The 'rx_offset' can be bigger than 'rx_amount', so it the length /* The 'rx_offset' can be bigger than 'rx_amount', so it the length
* of data we report back the the user may need to be clipped. * of data we report back the the user may need to be clipped.
@ -1024,7 +1121,7 @@ static uint8_t rx_flush(const struct device *dev, uint8_t *buf, uint32_t len)
static const uint8_t dirty; static const uint8_t dirty;
NRF_UARTE_Type *uarte = get_uarte_instance(dev); NRF_UARTE_Type *uarte = get_uarte_instance(dev);
uint32_t prev_rx_amount = nrf_uarte_rx_amount_get(uarte); uint32_t prev_rx_amount = nrf_uarte_rx_amount_get(uarte);
uint8_t tmp_buf[5]; uint8_t tmp_buf[UARTE_HW_RX_FIFO_SIZE];
uint8_t *flush_buf = buf ? buf : tmp_buf; uint8_t *flush_buf = buf ? buf : tmp_buf;
size_t flush_len = buf ? len : sizeof(tmp_buf); size_t flush_len = buf ? len : sizeof(tmp_buf);
@ -1081,10 +1178,27 @@ static void async_uart_disable(const struct device *dev)
nrf_uarte_disable(get_uarte_instance(dev)); nrf_uarte_disable(get_uarte_instance(dev));
} }
/* This handler is called when the reception is interrupted, in contrary to static void async_uart_release(const struct device *dev, uint32_t dir_mask)
* finishing the reception after filling all provided buffers, in which case {
* the events UART_RX_BUF_RELEASED and UART_RX_DISABLED are reported struct uarte_nrfx_data *data = get_dev_data(dev);
* from endrx_isr. int key = irq_lock();
data->async->low_power_mask &= ~dir_mask;
if (!data->async->low_power_mask) {
if (dir_mask == UARTE_LOW_POWER_RX) {
data->async->rx_flush_cnt =
rx_flush(dev, data->async->rx_flush_buffer,
sizeof(data->async->rx_flush_buffer));
}
async_uart_disable(dev);
}
irq_unlock(key);
}
/* This handler is called when the receiver is stopped. If rx was aborted
* data from fifo is flushed.
*/ */
static void rxto_isr(const struct device *dev) static void rxto_isr(const struct device *dev)
{ {
@ -1097,6 +1211,10 @@ static void rxto_isr(const struct device *dev)
(void)rx_flush(dev, NULL, 0); (void)rx_flush(dev, NULL, 0);
} }
if (get_dev_config(dev)->flags & UARTE_CFG_FLAG_LOW_POWER) {
async_uart_release(dev, UARTE_LOW_POWER_RX);
}
notify_rx_disable(dev); notify_rx_disable(dev);
} }
@ -1106,6 +1224,15 @@ static void txstopped_isr(const struct device *dev)
NRF_UARTE_Type *uarte = get_uarte_instance(dev); NRF_UARTE_Type *uarte = get_uarte_instance(dev);
int key; int key;
if (get_dev_config(dev)->flags & UARTE_CFG_FLAG_LOW_POWER) {
nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
async_uart_release(dev, UARTE_LOW_POWER_TX);
if (!data->async->tx_size) {
return;
}
}
if (!data->async->tx_buf) { if (!data->async->tx_buf) {
/* If there is a pending tx request, it means that uart_tx() /* If there is a pending tx request, it means that uart_tx()
* was called when there was ongoing uart_poll_out. Handling * was called when there was ongoing uart_poll_out. Handling
@ -1574,6 +1701,10 @@ static int uarte_instance_init(const struct device *dev,
nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK); nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK);
} }
if (get_dev_config(dev)->flags & UARTE_CFG_FLAG_LOW_POWER) {
nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK);
}
/* Set TXSTOPPED event by requesting fake (zero-length) transfer. /* Set TXSTOPPED event by requesting fake (zero-length) transfer.
* Pointer to RAM variable (data->tx_buffer) is set because otherwise * Pointer to RAM variable (data->tx_buffer) is set because otherwise
* such operation may result in HardFault or RAM corruption. * such operation may result in HardFault or RAM corruption.
@ -1776,6 +1907,15 @@ static int uarte_nrfx_pm_control(const struct device *dev,
!UARTE_PROP(idx, hw_flow_control) \ !UARTE_PROP(idx, hw_flow_control) \
) )
/* Low power mode is used when rx pin is not defined or in async mode if
* kconfig option is enabled.
*/
#define USE_LOW_POWER(idx) \
((UARTE_HAS_PROP(idx, rx_pin) && \
COND_CODE_1(CONFIG_UART_##idx##_ASYNC, \
(!IS_ENABLED(CONFIG_UART_##idx##_NRF_ASYNC_LOW_POWER)), \
(1))) ? 0 : UARTE_CFG_FLAG_LOW_POWER)
#define UART_NRF_UARTE_DEVICE(idx) \ #define UART_NRF_UARTE_DEVICE(idx) \
HWFC_CONFIG_CHECK(idx); \ HWFC_CONFIG_CHECK(idx); \
UARTE_INT_DRIVEN(idx); \ UARTE_INT_DRIVEN(idx); \
@ -1797,7 +1937,8 @@ static int uarte_nrfx_pm_control(const struct device *dev,
(IS_ENABLED(CONFIG_UART_##idx##_GPIO_MANAGEMENT) ? \ (IS_ENABLED(CONFIG_UART_##idx##_GPIO_MANAGEMENT) ? \
UARTE_CFG_FLAG_GPIO_MGMT : 0) | \ UARTE_CFG_FLAG_GPIO_MGMT : 0) | \
(IS_ENABLED(CONFIG_UART_##idx##_ENHANCED_POLL_OUT) ? \ (IS_ENABLED(CONFIG_UART_##idx##_ENHANCED_POLL_OUT) ? \
UARTE_CFG_FLAG_PPI_ENDTX : 0), \ UARTE_CFG_FLAG_PPI_ENDTX : 0) | \
USE_LOW_POWER(idx), \
IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC, \ IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC, \
(.timer = NRFX_TIMER_INSTANCE( \ (.timer = NRFX_TIMER_INSTANCE( \
CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER),)) \ CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER),)) \