diff --git a/subsys/usb/device/class/cdc_acm.c b/subsys/usb/device/class/cdc_acm.c index f28ae7d4387..0c269bd4b2c 100644 --- a/subsys/usb/device/class/cdc_acm.c +++ b/subsys/usb/device/class/cdc_acm.c @@ -96,7 +96,7 @@ struct cdc_acm_dev_data_t { #if defined(CONFIG_CDC_ACM_DTE_RATE_CALLBACK_SUPPORT) cdc_dte_rate_callback_t rate_cb; #endif - struct k_work tx_work; + struct k_work_delayable tx_work; /* Tx ready status. Signals when */ bool tx_ready; bool rx_ready; /* Rx ready status */ @@ -218,7 +218,7 @@ static void cdc_acm_write_cb(uint8_t ep, int size, void *priv) return; } - k_work_submit_to_queue(&USB_WORK_Q, &dev_data->tx_work); + k_work_schedule_for_queue(&USB_WORK_Q, &dev_data->tx_work, K_NO_WAIT); } static void tx_work_handler(struct k_work *work) @@ -477,7 +477,7 @@ static int cdc_acm_init(const struct device *dev) dev, dev_data, dev->config, &cdc_acm_data_devlist); k_work_init(&dev_data->cb_work, cdc_acm_irq_callback_work_handler); - k_work_init(&dev_data->tx_work, tx_work_handler); + k_work_init_delayable(&dev_data->tx_work, tx_work_handler); return ret; } @@ -513,7 +513,7 @@ static int cdc_acm_fifo_fill(const struct device *dev, LOG_WRN("Ring buffer full, drop %zd bytes", len - wrote); } - k_work_submit_to_queue(&USB_WORK_Q, &dev_data->tx_work); + k_work_schedule_for_queue(&USB_WORK_Q, &dev_data->tx_work, K_NO_WAIT); /* Return written to ringbuf data len */ return wrote; @@ -1016,7 +1016,11 @@ static void cdc_acm_poll_out(const struct device *dev, unsigned char c) } } - k_work_submit_to_queue(&USB_WORK_Q, &dev_data->tx_work); + /* Schedule with minimal timeout to make it possible to send more than + * one byte per USB transfer. The latency increase is negligible while + * the increased throughput and reduced CPU usage is easily observable. + */ + k_work_schedule_for_queue(&USB_WORK_Q, &dev_data->tx_work, K_MSEC(1)); } static const struct uart_driver_api cdc_acm_driver_api = {