From fed6bde788522fbbacf7196280c700f0be3feaf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Mo=C5=84?= Date: Tue, 13 Dec 2022 11:24:05 +0100 Subject: [PATCH] usb: device: cdc_acm: send more than 1 byte in poll out MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Schedule CDC ACM tx work to happen 1 ms after first unprocessed data write. This gives enough leeway for the task writing to the queue to write more than 1 byte before USB workqueue preempts and schedules IN transfer. Sending more than 1 byte at a time increases data throughput because transaction overhead remains the same regardless of packet size. Prior to this change, virtually every IN transaction carried only one character when CDC ACM was used as a console. Signed-off-by: Tomasz Moń --- subsys/usb/device/class/cdc_acm.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/subsys/usb/device/class/cdc_acm.c b/subsys/usb/device/class/cdc_acm.c index f28ae7d4387..0c269bd4b2c 100644 --- a/subsys/usb/device/class/cdc_acm.c +++ b/subsys/usb/device/class/cdc_acm.c @@ -96,7 +96,7 @@ struct cdc_acm_dev_data_t { #if defined(CONFIG_CDC_ACM_DTE_RATE_CALLBACK_SUPPORT) cdc_dte_rate_callback_t rate_cb; #endif - struct k_work tx_work; + struct k_work_delayable tx_work; /* Tx ready status. Signals when */ bool tx_ready; bool rx_ready; /* Rx ready status */ @@ -218,7 +218,7 @@ static void cdc_acm_write_cb(uint8_t ep, int size, void *priv) return; } - k_work_submit_to_queue(&USB_WORK_Q, &dev_data->tx_work); + k_work_schedule_for_queue(&USB_WORK_Q, &dev_data->tx_work, K_NO_WAIT); } static void tx_work_handler(struct k_work *work) @@ -477,7 +477,7 @@ static int cdc_acm_init(const struct device *dev) dev, dev_data, dev->config, &cdc_acm_data_devlist); k_work_init(&dev_data->cb_work, cdc_acm_irq_callback_work_handler); - k_work_init(&dev_data->tx_work, tx_work_handler); + k_work_init_delayable(&dev_data->tx_work, tx_work_handler); return ret; } @@ -513,7 +513,7 @@ static int cdc_acm_fifo_fill(const struct device *dev, LOG_WRN("Ring buffer full, drop %zd bytes", len - wrote); } - k_work_submit_to_queue(&USB_WORK_Q, &dev_data->tx_work); + k_work_schedule_for_queue(&USB_WORK_Q, &dev_data->tx_work, K_NO_WAIT); /* Return written to ringbuf data len */ return wrote; @@ -1016,7 +1016,11 @@ static void cdc_acm_poll_out(const struct device *dev, unsigned char c) } } - k_work_submit_to_queue(&USB_WORK_Q, &dev_data->tx_work); + /* Schedule with minimal timeout to make it possible to send more than + * one byte per USB transfer. The latency increase is negligible while + * the increased throughput and reduced CPU usage is easily observable. + */ + k_work_schedule_for_queue(&USB_WORK_Q, &dev_data->tx_work, K_MSEC(1)); } static const struct uart_driver_api cdc_acm_driver_api = {