diff --git a/drivers/modem/hl7800.c b/drivers/modem/hl7800.c index 0b1e4189316..15b33155362 100644 --- a/drivers/modem/hl7800.c +++ b/drivers/modem/hl7800.c @@ -422,7 +422,7 @@ struct hl7800_socket { /** socket callbacks */ struct k_work recv_cb_work; struct k_work rx_data_work; - struct k_delayed_work notif_work; + struct k_work_delayable notif_work; net_context_recv_cb_t recv_cb; struct net_pkt *recv_pkt; void *recv_user_data; @@ -467,12 +467,12 @@ struct hl7800_iface_ctx { struct k_sem mdm_awake; /* work */ - struct k_delayed_work rssi_query_work; - struct k_delayed_work iface_status_work; - struct k_delayed_work dns_work; + struct k_work_delayable rssi_query_work; + struct k_work_delayable iface_status_work; + struct k_work_delayable dns_work; struct k_work mdm_vgpio_work; - struct k_delayed_work mdm_reset_work; - struct k_delayed_work allow_sleep_work; + struct k_work_delayable mdm_reset_work; + struct k_work_delayable allow_sleep_work; #ifdef CONFIG_MODEM_HL7800_FW_UPDATE /* firmware update */ @@ -820,12 +820,12 @@ static void allow_sleep(bool allow) { #ifdef CONFIG_MODEM_HL7800_LOW_POWER_MODE if (allow) { - k_delayed_work_submit_to_queue(&hl7800_workq, - &ictx.allow_sleep_work, - ALLOW_SLEEP_DELAY_SECS); + k_work_reschedule_for_queue(&hl7800_workq, + &ictx.allow_sleep_work, + ALLOW_SLEEP_DELAY_SECS); } else { LOG_DBG("Keep awake"); - k_delayed_work_cancel(&ictx.allow_sleep_work); + k_work_cancel_delayable(&ictx.allow_sleep_work); ictx.allow_sleep = false; modem_assert_wake(true); modem_assert_uart_dtr(true); @@ -955,8 +955,8 @@ int32_t mdm_hl7800_update_apn(char *access_point_name) /* After a reset the APN will be re-read from the modem * and an event will be generated. */ - k_delayed_work_submit_to_queue(&hl7800_workq, - &ictx.mdm_reset_work, K_NO_WAIT); + k_work_reschedule_for_queue(&hl7800_workq, &ictx.mdm_reset_work, + K_NO_WAIT); } return ret; } @@ -1005,8 +1005,8 @@ error: * state are valid */ if (ret >= 0) { - k_delayed_work_submit_to_queue(&hl7800_workq, - &ictx.mdm_reset_work, K_NO_WAIT); + k_work_reschedule_for_queue(&hl7800_workq, &ictx.mdm_reset_work, + K_NO_WAIT); } return ret; @@ -1641,8 +1641,8 @@ static bool on_cmd_atcmdinfo_ipaddr(struct net_buf **buf, uint16_t len) * stack is still starting up */ delay = K_SECONDS(DNS_WORK_DELAY_SECS); } - k_delayed_work_submit_to_queue(&hl7800_workq, &ictx.dns_work, - delay); + k_work_reschedule_for_queue(&hl7800_workq, &ictx.dns_work, + delay); } else { LOG_ERR("iface NULL"); } @@ -1946,8 +1946,8 @@ static bool on_cmd_startup_report(struct net_buf **buf, uint16_t len) ictx.fw_updated = false; set_fota_state(HL7800_FOTA_REBOOT_AND_RECONFIGURE); /* issue reset after a firmware update to reconfigure modem state */ - k_delayed_work_submit_to_queue(&hl7800_workq, - &ictx.mdm_reset_work, K_NO_WAIT); + k_work_reschedule_for_queue(&hl7800_workq, &ictx.mdm_reset_work, + K_NO_WAIT); } else #endif { @@ -2157,13 +2157,18 @@ static int hl7800_query_rssi(void) static void hl7800_start_rssi_work(void) { - k_delayed_work_submit_to_queue(&hl7800_workq, &ictx.rssi_query_work, - K_NO_WAIT); + k_work_reschedule_for_queue(&hl7800_workq, &ictx.rssi_query_work, + K_NO_WAIT); } static void hl7800_stop_rssi_work(void) { - k_delayed_work_cancel(&ictx.rssi_query_work); + int rc; + + rc = k_work_cancel_delayable(&ictx.rssi_query_work); + if (rc != 0) { + LOG_ERR("Could not cancel RSSI work [%d]", rc); + } } static void hl7800_rssi_query_work(struct k_work *work) @@ -2175,8 +2180,8 @@ static void hl7800_rssi_query_work(struct k_work *work) hl7800_unlock(); /* re-start RSSI query work */ - k_delayed_work_submit_to_queue(&hl7800_workq, &ictx.rssi_query_work, - K_SECONDS(RSSI_TIMEOUT_SECS)); + k_work_reschedule_for_queue(&hl7800_workq, &ictx.rssi_query_work, + K_SECONDS(RSSI_TIMEOUT_SECS)); } static void notify_all_tcp_sockets_closed(void) @@ -2207,18 +2212,18 @@ static void iface_status_work_cb(struct k_work *work) if (!ictx.initialized && ictx.restarting) { LOG_DBG("Wait for driver init, process network state later"); /* we are not ready to process this yet, try again later */ - k_delayed_work_submit_to_queue(&hl7800_workq, - &ictx.iface_status_work, - IFACE_WORK_DELAY); + k_work_reschedule_for_queue(&hl7800_workq, + &ictx.iface_status_work, + IFACE_WORK_DELAY); goto done; } else if (ictx.wait_for_KSUP && ictx.wait_for_KSUP_tries < WAIT_FOR_KSUP_RETRIES) { LOG_DBG("Wait for +KSUP before updating network state"); ictx.wait_for_KSUP_tries++; /* we have not received +KSUP yet, lets wait more time to receive +KSUP */ - k_delayed_work_submit_to_queue(&hl7800_workq, - &ictx.iface_status_work, - IFACE_WORK_DELAY); + k_work_reschedule_for_queue(&hl7800_workq, + &ictx.iface_status_work, + IFACE_WORK_DELAY); goto done; } else if (ictx.wait_for_KSUP && ictx.wait_for_KSUP_tries >= WAIT_FOR_KSUP_RETRIES) { @@ -2324,10 +2329,9 @@ static bool on_cmd_network_report_query(struct net_buf **buf, uint16_t len) set_network_state(strtol(val, NULL, 0)); /* start work to adjust iface */ - k_delayed_work_cancel(&ictx.iface_status_work); - k_delayed_work_submit_to_queue(&hl7800_workq, - &ictx.iface_status_work, - IFACE_WORK_DELAY); + k_work_reschedule_for_queue(&hl7800_workq, + &ictx.iface_status_work, + IFACE_WORK_DELAY); } return true; @@ -2466,9 +2470,8 @@ static bool on_cmd_network_report(struct net_buf **buf, uint16_t len) /* keep HL7800 awake because we want to process the network state soon */ allow_sleep(false); /* start work to adjust iface */ - k_delayed_work_cancel(&ictx.iface_status_work); - k_delayed_work_submit_to_queue(&hl7800_workq, &ictx.iface_status_work, - IFACE_WORK_DELAY); + k_work_reschedule_for_queue(&hl7800_workq, &ictx.iface_status_work, + IFACE_WORK_DELAY); return true; } @@ -2618,13 +2621,10 @@ static bool on_cmd_sock_error_code(struct net_buf **buf, uint16_t len) static void sock_notif_cb_work(struct k_work *work) { struct hl7800_socket *sock = NULL; + struct k_work_delayable *dwork; - sock = CONTAINER_OF(work, struct hl7800_socket, notif_work); - - if (!sock) { - LOG_ERR("sock_notif_cb_work: Socket not found"); - return; - } + dwork = k_work_delayable_from_work(work); + sock = CONTAINER_OF(dwork, struct hl7800_socket, notif_work); hl7800_lock(); /* send null packet */ @@ -2632,8 +2632,8 @@ static void sock_notif_cb_work(struct k_work *work) /* we are in the middle of RX, * requeue this and try again */ - k_delayed_work_submit_to_queue(&hl7800_workq, &sock->notif_work, - MDM_SOCK_NOTIF_DELAY); + k_work_reschedule_for_queue(&hl7800_workq, &sock->notif_work, + MDM_SOCK_NOTIF_DELAY); } else { LOG_DBG("Sock %d trigger NULL packet", sock->socket_id); sock->state = SOCK_SERVER_CLOSED; @@ -2696,9 +2696,9 @@ static bool on_cmd_sock_notif(struct net_buf **buf, uint16_t len) */ sock->error = true; sock->error_val = notif_val; - k_delayed_work_submit_to_queue(&hl7800_workq, - &sock->notif_work, - MDM_SOCK_NOTIF_DELAY); + k_work_reschedule_for_queue(&hl7800_workq, + &sock->notif_work, + MDM_SOCK_NOTIF_DELAY); if (trigger_sem) { k_sem_give(&sock->sock_send_sem); } @@ -2747,10 +2747,7 @@ static void sockreadrecv_cb_work(struct k_work *work) struct net_pkt *pkt; sock = CONTAINER_OF(work, struct hl7800_socket, recv_cb_work); - if (!sock) { - LOG_ERR("Sock not found"); - return; - } + LOG_DBG("Sock %d RX CB", sock->socket_id); /* return data */ pkt = sock->recv_pkt; @@ -2781,9 +2778,8 @@ static void sock_read(struct net_buf **buf, uint16_t len) if (sock->error) { /* cancel notif work and restart */ - k_delayed_work_cancel(&sock->notif_work); - k_delayed_work_submit_to_queue(&hl7800_workq, &sock->notif_work, - MDM_SOCK_NOTIF_DELAY); + k_work_reschedule_for_queue(&hl7800_workq, &sock->notif_work, + MDM_SOCK_NOTIF_DELAY); } LOG_DBG("Socket %d RX %u bytes", sock->socket_id, sock->rx_size); @@ -3001,11 +2997,6 @@ static void sock_rx_data_cb_work(struct k_work *work) sock = CONTAINER_OF(work, struct hl7800_socket, rx_data_work); - if (!sock) { - LOG_ERR("sock_rx_data_cb_work: Socket not found"); - return; - } - hl7800_lock(); wakeup_hl7800(); @@ -3347,7 +3338,7 @@ static void hl7800_rx(void) while (true) { /* wait for incoming data */ - (void) k_sem_take(&ictx.mdm_ctx.rx_sem, K_FOREVER); + (void)k_sem_take(&ictx.mdm_ctx.rx_sem, K_FOREVER); hl7800_read_rx(&rx_buf); /* If an external module hasn't locked the command processor, @@ -4560,7 +4551,7 @@ static int offload_put(struct net_context *context) } /* cancel notif work if queued */ - k_delayed_work_cancel(&sock->notif_work); + k_work_cancel_delayable(&sock->notif_work); hl7800_lock(); @@ -4713,8 +4704,8 @@ static int hl7800_init(const struct device *dev) sockreadrecv_cb_work); k_work_init(&ictx.sockets[i].rx_data_work, sock_rx_data_cb_work); - k_delayed_work_init(&ictx.sockets[i].notif_work, - sock_notif_cb_work); + k_work_init_delayable(&ictx.sockets[i].notif_work, + sock_notif_cb_work); k_sem_init(&ictx.sockets[i].sock_send_sem, 0, 1); } ictx.last_socket_id = 0; @@ -4722,17 +4713,18 @@ static int hl7800_init(const struct device *dev) k_sem_init(&ictx.mdm_awake, 0, 1); /* initialize the work queue */ - k_work_q_start(&hl7800_workq, hl7800_workq_stack, - K_THREAD_STACK_SIZEOF(hl7800_workq_stack), - WORKQ_PRIORITY); + k_work_queue_start(&hl7800_workq, hl7800_workq_stack, + K_THREAD_STACK_SIZEOF(hl7800_workq_stack), + WORKQ_PRIORITY, NULL); /* init work tasks */ - k_delayed_work_init(&ictx.rssi_query_work, hl7800_rssi_query_work); - k_delayed_work_init(&ictx.iface_status_work, iface_status_work_cb); - k_delayed_work_init(&ictx.dns_work, dns_work_cb); + k_work_init_delayable(&ictx.rssi_query_work, hl7800_rssi_query_work); + k_work_init_delayable(&ictx.iface_status_work, iface_status_work_cb); + k_work_init_delayable(&ictx.dns_work, dns_work_cb); k_work_init(&ictx.mdm_vgpio_work, mdm_vgpio_work_cb); - k_delayed_work_init(&ictx.mdm_reset_work, mdm_reset_work_callback); - k_delayed_work_init(&ictx.allow_sleep_work, allow_sleep_work_callback); + k_work_init_delayable(&ictx.mdm_reset_work, mdm_reset_work_callback); + k_work_init_delayable(&ictx.allow_sleep_work, + allow_sleep_work_callback); #ifdef CONFIG_MODEM_HL7800_FW_UPDATE k_work_init(&ictx.finish_fw_update_work, @@ -4886,7 +4878,6 @@ static struct net_if_api api_funcs = { .init = offload_iface_init, }; -NET_DEVICE_DT_INST_OFFLOAD_DEFINE(0, hl7800_init, device_pm_control_nop, - &ictx, NULL, - CONFIG_MODEM_HL7800_INIT_PRIORITY, &api_funcs, - MDM_MTU); +NET_DEVICE_DT_INST_OFFLOAD_DEFINE(0, hl7800_init, device_pm_control_nop, &ictx, + NULL, CONFIG_MODEM_HL7800_INIT_PRIORITY, + &api_funcs, MDM_MTU);