drivers: nordic: usb: USBD event handling rework

Some of the events from USBD peripheral (i.e. cable disconnect)
were handled in IRQ context and some of them (i.e. ep r/w events) in
system workqueue (inherited from initial driver implementation).
This may lead to race condition in some specific situations.
Currently, all of the events are enqueued in ISR and processed in
workqueue. Driver is reinitialized on queue overflow and queue size
is configurable in KConfig.

Fixes #12016

Signed-off-by: Paweł Zadrożniak <pawel.zadrozniak@nordicsemi.no>
This commit is contained in:
Paweł Zadrożniak 2019-01-04 16:20:30 +01:00 committed by Carles Cufí
commit a0e2737aff
2 changed files with 330 additions and 196 deletions

View file

@ -56,7 +56,7 @@ config USB_DC_SAM
help
SAM family USB HS device controller Driver.
config USB_NRF52840
menuconfig USB_NRF52840
bool "Nordic Semiconductor nRF52840 USB Device Controller Driver"
default y
depends on SOC_NRF52840
@ -66,6 +66,14 @@ config USB_NRF52840
help
nRF52840 USB Device Controller Driver
config USB_NRFX_EVT_QUEUE_SIZE
int "USBD event queue size"
default 32
depends on USB_NRF52840
help
Size of the driver's internal event queue.
Required size will depend on number of endpoints (class instances) in use.
config USB_KINETIS
bool "Kinetis USB Device Controller Driver"
select USB_DEVICE_DRIVER

View file

@ -48,9 +48,6 @@ LOG_MODULE_REGISTER(usb_nrfx);
#define USBD_EPDATASTATUS_EPIN_MASK (0x7F << USBD_EPDATASTATUS_EPIN1_Pos)
#define USBD_EPDATASTATUS_EPOUT_MASK (0x7F << USBD_EPDATASTATUS_EPOUT1_Pos)
/** USB Work flags */
#define NRF_USB_STATE_CHANGE 0
#define NRF_USB_STATUS_CHANGE 1
/**
* @brief nRF USBD peripheral states
@ -68,13 +65,23 @@ enum usbd_periph_state {
/**
* @brief Endpoint event types.
*/
enum ep_event_type {
enum usbd_ep_event_type {
EP_EVT_SETUP_RECV,
EP_EVT_RECV_REQ,
EP_EVT_RECV_COMPLETE,
EP_EVT_WRITE_COMPLETE,
};
/**
* @brief USBD peripheral event types.
*/
enum usbd_event_type {
USBD_EVT_POWER,
USBD_EVT_EP,
USBD_EVT_RESET,
USBD_EVT_SOF,
USBD_EVT_REINIT
};
/**
* @brief Endpoint configuration.
@ -128,25 +135,43 @@ struct nrf_usbd_ep_ctx {
bool write_fragmented;
};
/**
* @brief Endpoint event structure
*
* @param ep Endpoint control block pointer
* @param evt_type Event type
*/
struct usbd_ep_event {
struct nrf_usbd_ep_ctx *ep;
enum usbd_ep_event_type evt_type;
};
/**
* @brief Power event structure
*
* @param state New USBD peripheral state.
*/
struct usbd_pwr_event {
enum usbd_periph_state state;
};
/**
* @brief Endpoint USB event
* Used by ISR to send events to work handler
*
* @param node Used by the kernel for FIFO management
* @param ep Endpoint context pointer that needs service
* @param evt Event that has occurred from the USBD peripheral
* @param block Mempool block pointer for freeing up after use
* @param misc_u Miscellaneous information passed as flags
* @param evt Event data field
* @param evt_type Type of event that has occurred from the USBD peripheral
*/
struct usbd_ep_event {
struct usbd_event {
sys_snode_t node;
struct nrf_usbd_ep_ctx *ep;
enum ep_event_type evt;
struct k_mem_block block;
union {
u32_t flags;
u32_t frame_counter;
} misc_u;
struct usbd_ep_event ep_evt;
struct usbd_pwr_event pwr_evt;
} evt;
enum usbd_event_type evt_type;
};
/**
@ -156,14 +181,18 @@ struct usbd_ep_event {
* be derived from the theoretical number of backlog events possible depending
* on the number of endpoints configured.
*/
#define FIFO_ELEM_MIN_SZ sizeof(struct usbd_ep_event)
#define FIFO_ELEM_MAX_SZ sizeof(struct usbd_ep_event)
#define FIFO_ELEM_COUNT 32
#define FIFO_ELEM_MIN_SZ sizeof(struct usbd_event)
#define FIFO_ELEM_MAX_SZ sizeof(struct usbd_event)
#define FIFO_ELEM_COUNT CONFIG_USB_NRFX_EVT_QUEUE_SIZE
#define FIFO_ELEM_ALIGN sizeof(unsigned int)
K_MEM_POOL_DEFINE(fifo_elem_pool, FIFO_ELEM_MIN_SZ, FIFO_ELEM_MAX_SZ,
FIFO_ELEM_COUNT, FIFO_ELEM_ALIGN);
#if CONFIG_USB_NRFX_EVT_QUEUE_SIZE < 4
#error Invalid USBD event queue size (CONFIG_USB_NRFX_EVT_QUEUE_SIZE). Minimum size: 4.
#endif
/**
* @brief Endpoint buffer pool
* Used for allocating buffers for the endpoints' data transfer
@ -219,29 +248,22 @@ K_MEM_POOL_DEFINE(ep_buf_pool, EP_BUF_MIN_SZ, EP_BUF_MAX_SZ,
EP_BUF_COUNT, EP_BUF_ALIGN);
/**
* @brief USBD private structure
* @brief USBD control structure
*
* @param status_cb Status callback for USB DC notifications
* @param status_cb Status callback for USB DC notifications
* @param attached USBD Attached flag
* @param ready USBD Ready flag set after pullup
* @param state USBD state
* @param status_code Device Status code
* @param flags Flags used in work context
* @param usb_work USBD work item
* @param work_queue FIFO used for queuing up events from ISR
* @param drv_lock Mutex for thread-safe nrfx driver use
* @param ep_ctx Endpoint contexts
* @param ctrl_read_len State of control read operation (EP0).
*/
struct nrf_usbd_ctx {
usb_dc_status_callback status_cb;
bool attached;
bool ready;
enum usbd_periph_state state;
enum usb_dc_status_code status_code;
u32_t flags;
struct k_work usb_work;
struct k_fifo work_queue;
@ -351,51 +373,6 @@ static inline void usbd_work_schedule(void)
k_work_submit(&get_usbd_ctx()->usb_work);
}
/**
* @brief Update USB DC status code.
*
* @param status New status code.
*/
static inline void usbd_status_code_update(enum usb_dc_status_code status)
{
struct nrf_usbd_ctx *ctx = get_usbd_ctx();
ctx->status_code = status;
ctx->flags |= BIT(NRF_USB_STATUS_CHANGE);
usbd_work_schedule();
}
/**
* @brief Allocate USBD event.
*
* This function should be called prior to usbd_evt_put().
*
* @returns Pointer to the allocated event or NULL if there was no space left.
*/
static inline struct usbd_ep_event *usbd_evt_alloc(void)
{
int ret;
struct usbd_ep_event *ev;
struct k_mem_block block;
ret = k_mem_pool_alloc(&fifo_elem_pool, &block,
sizeof(struct usbd_ep_event),
K_NO_WAIT);
if (ret < 0) {
LOG_DBG("USBD event alloc failed!");
__ASSERT_NO_MSG(0);
return NULL;
}
ev = (struct usbd_ep_event *)block.data;
ev->block = block;
ev->misc_u.flags = 0U;
return ev;
}
/**
* @brief Free previously allocated USBD event.
*
@ -403,7 +380,7 @@ static inline struct usbd_ep_event *usbd_evt_alloc(void)
*
* @param Pointer to the USBD event structure.
*/
static inline void usbd_evt_free(struct usbd_ep_event *ev)
static inline void usbd_evt_free(struct usbd_event *ev)
{
k_mem_pool_free(&ev->block);
}
@ -413,7 +390,7 @@ static inline void usbd_evt_free(struct usbd_ep_event *ev)
*
* @param Pointer to the previously allocated and filled event structure.
*/
static inline void usbd_evt_put(struct usbd_ep_event *ev)
static inline void usbd_evt_put(struct usbd_event *ev)
{
k_fifo_put(&get_usbd_ctx()->work_queue, ev);
}
@ -421,7 +398,7 @@ static inline void usbd_evt_put(struct usbd_ep_event *ev)
/**
* @brief Get next enqueued USBD event if present.
*/
static inline struct usbd_ep_event *usbd_evt_get(void)
static inline struct usbd_event *usbd_evt_get(void)
{
return k_fifo_get(&get_usbd_ctx()->work_queue, K_NO_WAIT);
}
@ -431,7 +408,7 @@ static inline struct usbd_ep_event *usbd_evt_get(void)
*/
static inline void usbd_evt_flush(void)
{
struct usbd_ep_event *ev;
struct usbd_event *ev;
do {
ev = usbd_evt_get();
@ -441,27 +418,88 @@ static inline void usbd_evt_flush(void)
} while (ev != NULL);
}
/**
* @brief Allocate USBD event.
*
* This function should be called prior to usbd_evt_put().
*
* @returns Pointer to the allocated event or NULL if there was no space left.
*/
static inline struct usbd_event *usbd_evt_alloc(void)
{
int ret;
struct usbd_event *ev;
struct k_mem_block block;
ret = k_mem_pool_alloc(&fifo_elem_pool, &block,
sizeof(struct usbd_event),
K_NO_WAIT);
if (ret < 0) {
LOG_ERR("USBD event allocation failed!");
/* This should NOT happen in a properly designed system.
* Allocation may fail if workqueue thread is starved
* or event queue size is too small (CONFIG_USB_NRFX_EVT_QUEUE_SIZE).
* Wipe all events, free the space and schedule reinitialization.
*/
usbd_evt_flush();
ret = k_mem_pool_alloc(&fifo_elem_pool, &block,
sizeof(struct usbd_event),
K_NO_WAIT);
if (ret < 0) {
/* This should never fail in a properly operating system. */
LOG_ERR("USBD event memory corrupted.");
__ASSERT_NO_MSG(0);
return NULL;
}
ev = (struct usbd_event *)block.data;
ev->block = block;
ev->evt_type = USBD_EVT_REINIT;
usbd_evt_put(ev);
usbd_work_schedule();
return NULL;
}
ev = (struct usbd_event *)block.data;
ev->block = block;
return ev;
}
void usb_dc_nrfx_power_event_callback(nrf_power_event_t event)
{
struct nrf_usbd_ctx *ctx = get_usbd_ctx();
enum usbd_periph_state new_state;
switch (event) {
case NRF_POWER_EVENT_USBDETECTED:
ctx->state = USBD_ATTACHED;
new_state = USBD_ATTACHED;
break;
case NRF_POWER_EVENT_USBPWRRDY:
ctx->state = USBD_POWERED;
new_state = USBD_POWERED;
break;
case NRF_POWER_EVENT_USBREMOVED:
ctx->state = USBD_DETACHED;
new_state = USBD_DETACHED;
break;
default:
LOG_DBG("Unknown USB power event");
LOG_ERR("Unknown USB power event");
return;
}
ctx->flags |= BIT(NRF_USB_STATE_CHANGE);
k_work_submit(&ctx->usb_work);
struct usbd_event *ev = usbd_evt_alloc();
if (!ev) {
return;
}
ev->evt_type = USBD_EVT_POWER;
ev->evt.pwr_evt.state = new_state;
usbd_evt_put(ev);
usbd_work_schedule();
}
/**
@ -676,9 +714,11 @@ static void eps_ctx_uninit(void)
}
}
static void usbd_handle_state_change(struct nrf_usbd_ctx *ctx)
static inline void usbd_work_process_pwr_events(struct usbd_pwr_event *pwr_evt)
{
switch (ctx->state) {
struct nrf_usbd_ctx *ctx = get_usbd_ctx();
switch (pwr_evt->state) {
case USBD_ATTACHED:
LOG_DBG("USB detected");
nrfx_usbd_enable();
@ -686,42 +726,28 @@ static void usbd_handle_state_change(struct nrf_usbd_ctx *ctx)
case USBD_POWERED:
LOG_DBG("USB Powered");
ctx->status_code = USB_DC_CONNECTED;
ctx->flags |= BIT(NRF_USB_STATUS_CHANGE);
usbd_enable_endpoints(ctx);
nrfx_usbd_start(true);
ctx->ready = true;
if (ctx->status_cb) {
ctx->status_cb(USB_DC_CONNECTED, NULL);
}
break;
case USBD_DETACHED:
LOG_DBG("USB Removed");
ctx->ready = false;
nrfx_usbd_disable();
ctx->status_code = USB_DC_DISCONNECTED;
ctx->flags |= BIT(NRF_USB_STATUS_CHANGE);
if (ctx->status_cb) {
ctx->status_cb(USB_DC_DISCONNECTED, NULL);
}
break;
default:
break;
}
if (ctx->flags) {
k_work_submit(&ctx->usb_work);
}
}
static void usbd_handle_status_change(struct nrf_usbd_ctx *ctx)
{
if (ctx->status_code == USB_DC_RESET) {
struct nrf_usbd_ctx *ctx = get_usbd_ctx();
k_mutex_lock(&ctx->drv_lock, K_FOREVER);
eps_ctx_init();
k_mutex_unlock(&ctx->drv_lock);
}
if (ctx->status_cb) {
ctx->status_cb(ctx->status_code, NULL);
}
}
static inline void usbd_work_process_setup(struct nrf_usbd_ep_ctx *ep_ctx)
@ -793,66 +819,45 @@ static inline void usbd_work_process_recvreq(struct nrf_usbd_ctx *ctx,
k_mutex_unlock(&ctx->drv_lock);
}
/* Work handler */
static void usbd_work_handler(struct k_work *item)
static inline void usbd_work_process_ep_events(struct usbd_ep_event *ep_evt)
{
struct nrf_usbd_ctx *ctx;
struct usbd_ep_event *ev;
struct nrf_usbd_ctx *ctx = get_usbd_ctx();
struct nrf_usbd_ep_ctx *ep_ctx = ep_evt->ep;
ctx = CONTAINER_OF(item, struct nrf_usbd_ctx, usb_work);
__ASSERT_NO_MSG(ep_ctx);
if (ctx->flags) {
if (ctx->flags & BIT(NRF_USB_STATE_CHANGE)) {
usbd_handle_state_change(ctx);
ctx->flags &= ~BIT(NRF_USB_STATE_CHANGE);
switch (ep_evt->evt_type) {
case EP_EVT_SETUP_RECV:
usbd_work_process_setup(ep_ctx);
break;
case EP_EVT_RECV_REQ:
usbd_work_process_recvreq(ctx, ep_ctx);
break;
case EP_EVT_RECV_COMPLETE:
ep_ctx->cfg.cb(ep_ctx->cfg.addr,
USB_DC_EP_DATA_OUT);
break;
case EP_EVT_WRITE_COMPLETE:
if ((ep_ctx->cfg.type == USB_DC_EP_CONTROL)
&& (!ep_ctx->write_fragmented)) {
/* Trigger the hardware to perform
* status stage, but only if there is
* no more data to send (IN transfer
* has not beed fragmented).
*/
k_mutex_lock(&ctx->drv_lock, K_FOREVER);
nrfx_usbd_setup_clear();
k_mutex_unlock(&ctx->drv_lock);
}
if (ctx->flags & BIT(NRF_USB_STATUS_CHANGE)) {
usbd_handle_status_change(ctx);
ctx->flags &= ~BIT(NRF_USB_STATUS_CHANGE);
}
}
while ((ev = usbd_evt_get()) != NULL) {
if (!ctx->attached) {
LOG_ERR("USBD event dropped (not attached): %d.",
(uint32_t)ev->evt);
} else {
struct nrf_usbd_ep_ctx *ep_ctx = ev->ep;
switch (ev->evt) {
case EP_EVT_SETUP_RECV: {
usbd_work_process_setup(ep_ctx);
break;
}
case EP_EVT_RECV_REQ: {
usbd_work_process_recvreq(ctx, ep_ctx);
break;
}
case EP_EVT_RECV_COMPLETE:
ep_ctx->cfg.cb(ep_ctx->cfg.addr,
USB_DC_EP_DATA_OUT);
break;
case EP_EVT_WRITE_COMPLETE:
if ((ep_ctx->cfg.type == USB_DC_EP_CONTROL)
&& (!ep_ctx->write_fragmented)) {
/* Trigger the hardware to perform
* status stage, but only if there is
* no more data to send (IN transfer
* has not beed fragmented). */
k_mutex_lock(&ctx->drv_lock, K_FOREVER);
nrfx_usbd_setup_clear();
k_mutex_unlock(&ctx->drv_lock);
}
ep_ctx->cfg.cb(ep_ctx->cfg.addr,
USB_DC_EP_DATA_IN);
break;
default:
break;
}
}
usbd_evt_free(ev);
ep_ctx->cfg.cb(ep_ctx->cfg.addr,
USB_DC_EP_DATA_IN);
break;
default:
break;
}
}
@ -874,12 +879,17 @@ static void usbd_event_transfer_ctrl(nrfx_usbd_evt_t const *const p_event)
if (NRF_USBD_EPIN_CHECK(p_event->data.eptransfer.ep)) {
switch (p_event->data.eptransfer.status) {
case NRFX_USBD_EP_OK: {
struct usbd_ep_event *ev = usbd_evt_alloc();
struct usbd_event *ev = usbd_evt_alloc();
ev->ep = ep_ctx;
ev->evt = EP_EVT_WRITE_COMPLETE;
if (!ev) {
return;
}
ep_ctx->write_in_progress = false;
ev->evt_type = USBD_EVT_EP;
ev->evt.ep_evt.evt_type = EP_EVT_WRITE_COMPLETE;
ev->evt.ep_evt.ep = ep_ctx;
LOG_DBG("ctrl write complete");
usbd_evt_put(ev);
usbd_work_schedule();
@ -897,13 +907,19 @@ static void usbd_event_transfer_ctrl(nrfx_usbd_evt_t const *const p_event)
} else {
switch (p_event->data.eptransfer.status) {
case NRFX_USBD_EP_WAITING: {
struct usbd_ep_event *ev = usbd_evt_alloc();
struct usbd_event *ev = usbd_evt_alloc();
if (!ev) {
return;
}
LOG_DBG("ctrl read request");
ep_ctx->read_pending = true;
ev->ep = ep_ctx;
ev->evt = EP_EVT_RECV_REQ;
ev->evt_type = USBD_EVT_EP;
ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ;
ev->evt.ep_evt.ep = ep_ctx;
usbd_evt_put(ev);
usbd_work_schedule();
}
@ -911,11 +927,16 @@ static void usbd_event_transfer_ctrl(nrfx_usbd_evt_t const *const p_event)
case NRFX_USBD_EP_OK: {
struct nrf_usbd_ctx *ctx = get_usbd_ctx();
struct usbd_ep_event *ev = usbd_evt_alloc();
struct usbd_event *ev = usbd_evt_alloc();
if (!ev) {
return;
}
nrfx_err_t err_code;
ev->ep = ep_ctx;
ev->evt = EP_EVT_RECV_COMPLETE;
ev->evt_type = USBD_EVT_EP;
ev->evt.ep_evt.evt_type = EP_EVT_RECV_COMPLETE;
ev->evt.ep_evt.ep = ep_ctx;
err_code = nrfx_usbd_ep_status_get(
p_event->data.eptransfer.ep, &ep_ctx->buf.len);
@ -958,14 +979,19 @@ static void usbd_event_transfer_data(nrfx_usbd_evt_t const *const p_event)
if (NRF_USBD_EPIN_CHECK(p_event->data.eptransfer.ep)) {
switch (p_event->data.eptransfer.status) {
case NRFX_USBD_EP_OK: {
struct usbd_ep_event *ev = usbd_evt_alloc();
struct usbd_event *ev = usbd_evt_alloc();
ev->ep = ep_ctx;
ev->evt = EP_EVT_WRITE_COMPLETE;
if (!ev) {
return;
}
LOG_DBG("write complete, ep %d",
(u32_t)p_event->data.eptransfer.ep);
ep_ctx->write_in_progress = false;
ev->evt_type = USBD_EVT_EP;
ev->evt.ep_evt.evt_type = EP_EVT_WRITE_COMPLETE;
ev->evt.ep_evt.ep = ep_ctx;
usbd_evt_put(ev);
usbd_work_schedule();
}
@ -982,24 +1008,31 @@ static void usbd_event_transfer_data(nrfx_usbd_evt_t const *const p_event)
} else {
switch (p_event->data.eptransfer.status) {
case NRFX_USBD_EP_WAITING: {
struct usbd_ep_event *ev = usbd_evt_alloc();
struct usbd_event *ev = usbd_evt_alloc();
if (!ev) {
return;
}
LOG_DBG("read request, ep %d",
(u32_t)p_event->data.eptransfer.ep);
ev->ep = ep_ctx;
ev->evt = EP_EVT_RECV_REQ;
usbd_evt_put(ev);
ep_ctx->read_pending = true;
ev->evt_type = USBD_EVT_EP;
ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ;
ev->evt.ep_evt.ep = ep_ctx;
usbd_evt_put(ev);
usbd_work_schedule();
}
break;
case NRFX_USBD_EP_OK: {
struct usbd_ep_event *ev = usbd_evt_alloc();
struct usbd_event *ev = usbd_evt_alloc();
ev->ep = ep_ctx;
ev->evt = EP_EVT_RECV_COMPLETE;
if (!ev) {
return;
}
ep_ctx->buf.len = nrf_usbd_ep_amount_get(
p_event->data.eptransfer.ep);
@ -1008,6 +1041,10 @@ static void usbd_event_transfer_data(nrfx_usbd_evt_t const *const p_event)
(u32_t)p_event->data.eptransfer.ep,
ep_ctx->buf.len);
ev->evt_type = USBD_EVT_EP;
ev->evt.ep_evt.evt_type = EP_EVT_RECV_COMPLETE;
ev->evt.ep_evt.ep = ep_ctx;
usbd_evt_put(ev);
usbd_work_schedule();
}
@ -1029,7 +1066,7 @@ static void usbd_event_transfer_data(nrfx_usbd_evt_t const *const p_event)
static void usbd_event_handler(nrfx_usbd_evt_t const *const p_event)
{
struct nrf_usbd_ep_ctx *ep_ctx;
struct usbd_ep_event *ev;
struct usbd_event *ev;
switch (p_event->type) {
case NRFX_USBD_EVT_SUSPEND:
@ -1042,12 +1079,23 @@ static void usbd_event_handler(nrfx_usbd_evt_t const *const p_event)
LOG_DBG("RemoteWU initiated.");
break;
case NRFX_USBD_EVT_RESET:
LOG_DBG("USBD Reset.");
usbd_status_code_update(USB_DC_RESET);
ev = usbd_evt_alloc();
if (!ev) {
return;
}
ev->evt_type = USBD_EVT_RESET;
usbd_evt_put(ev);
usbd_work_schedule();
break;
case NRFX_USBD_EVT_SOF:
#ifdef CONFIG_USB_DEVICE_SOF
usbd_status_code_update(USB_DC_SOF);
ev = usbd_evt_alloc();
if (!ev) {
return;
}
ev->evt_type = USBD_EVT_SOF;
usbd_evt_put(ev);
usbd_work_schedule();
#endif
break;
@ -1083,8 +1131,12 @@ static void usbd_event_handler(nrfx_usbd_evt_t const *const p_event)
struct nrf_usbd_ep_ctx *ep_ctx =
endpoint_ctx(NRF_USBD_EPOUT(0));
ev = usbd_evt_alloc();
ev->ep = ep_ctx;
ev->evt = EP_EVT_SETUP_RECV;
if (!ev) {
return;
}
ev->evt_type = USBD_EVT_EP;
ev->evt.ep_evt.ep = ep_ctx;
ev->evt.ep_evt.evt_type = EP_EVT_SETUP_RECV;
usbd_evt_put(ev);
usbd_work_schedule();
}
@ -1096,6 +1148,80 @@ static void usbd_event_handler(nrfx_usbd_evt_t const *const p_event)
}
}
static inline void usbd_reinit(void)
{
int ret;
nrfx_err_t err;
nrf5_power_usb_power_int_enable(false);
nrfx_usbd_disable();
nrfx_usbd_uninit();
usbd_evt_flush();
ret = eps_ctx_init();
__ASSERT_NO_MSG(ret == 0);
nrf5_power_usb_power_int_enable(true);
err = nrfx_usbd_init(usbd_event_handler);
if (err != NRFX_SUCCESS) {
LOG_DBG("nRF USBD driver reinit failed. Code: %d.",
(u32_t)err);
__ASSERT_NO_MSG(0);
}
}
/* Work handler */
static void usbd_work_handler(struct k_work *item)
{
struct nrf_usbd_ctx *ctx;
struct usbd_event *ev;
ctx = CONTAINER_OF(item, struct nrf_usbd_ctx, usb_work);
while ((ev = usbd_evt_get()) != NULL) {
switch (ev->evt_type) {
case USBD_EVT_EP:
if (!ctx->attached) {
LOG_ERR("EP %d event dropped (not attached).",
(u32_t)ev->evt.ep_evt.ep->cfg.addr);
}
usbd_work_process_ep_events(&ev->evt.ep_evt);
break;
case USBD_EVT_POWER:
usbd_work_process_pwr_events(&ev->evt.pwr_evt);
break;
case USBD_EVT_RESET:
LOG_DBG("USBD reset event.");
k_mutex_lock(&ctx->drv_lock, K_FOREVER);
eps_ctx_init();
k_mutex_unlock(&ctx->drv_lock);
if (ctx->status_cb) {
ctx->status_cb(USB_DC_RESET, NULL);
}
break;
case USBD_EVT_SOF:
if (ctx->status_cb) {
ctx->status_cb(USB_DC_SOF, NULL);
}
break;
case USBD_EVT_REINIT: {
/* Reinitialize the peripheral after queue overflow. */
LOG_ERR("USBD event queue full!");
usbd_reinit();
break;
}
default:
LOG_ERR("Unknown USBD event: %"PRIu32".", ev->evt_type);
break;
}
usbd_evt_free(ev);
}
}
int usb_dc_attach(void)
{
struct nrf_usbd_ctx *ctx = get_usbd_ctx();
@ -1146,10 +1272,6 @@ int usb_dc_detach(void)
k_mutex_lock(&ctx->drv_lock, K_FOREVER);
ctx->flags = 0U;
ctx->state = USBD_DETACHED;
ctx->status_code = USB_DC_UNKNOWN;
usbd_evt_flush();
eps_ctx_uninit();
@ -1207,7 +1329,6 @@ int usb_dc_set_address(const u8_t addr)
__ASSERT(addr == (u8_t)NRF_USBD->USBADDR, "USB Address incorrect!");
ctx = get_usbd_ctx();
ctx->state = USBD_ADDRESS_SET;
LOG_DBG("Address set to: %d.", addr);
@ -1582,10 +1703,15 @@ int usb_dc_ep_read_continue(u8_t ep)
ep_ctx->read_complete = true;
if (ep_ctx->read_pending) {
struct usbd_ep_event *ev = usbd_evt_alloc();
struct usbd_event *ev = usbd_evt_alloc();
ev->ep = ep_ctx;
ev->evt = EP_EVT_RECV_REQ;
if (!ev) {
return -ENOMEM;
}
ev->evt_type = USBD_EVT_EP;
ev->evt.ep_evt.ep = ep_ctx;
ev->evt.ep_evt.evt_type = EP_EVT_RECV_REQ;
usbd_evt_put(ev);
usbd_work_schedule();
}