net: Conversion of k_work API

Replace all existing deprecated API with the recommended alternative.

Signed-off-by: Peter Bigot <peter.bigot@nordicsemi.no>
Signed-off-by: Jukka Rissanen <jukka.rissanen@linux.intel.com>
This commit is contained in:
Peter Bigot 2021-03-31 10:31:30 -05:00 committed by Anas Nashif
commit 188cb2cb7c
40 changed files with 338 additions and 341 deletions

View file

@ -716,9 +716,9 @@ static int ppp_driver_init(const struct device *dev)
ring_buf_init(&ppp->rx_ringbuf, sizeof(ppp->rx_buf), ppp->rx_buf);
k_work_init(&ppp->cb_work, ppp_isr_cb_work);
k_work_q_start(&ppp->cb_workq, ppp_workq,
K_KERNEL_STACK_SIZEOF(ppp_workq),
K_PRIO_COOP(PPP_WORKQ_PRIORITY));
k_work_queue_start(&ppp->cb_workq, ppp_workq,
K_KERNEL_STACK_SIZEOF(ppp_workq),
K_PRIO_COOP(PPP_WORKQ_PRIORITY), NULL);
k_thread_name_set(&ppp->cb_workq.thread, "ppp_workq");
#endif

View file

@ -140,7 +140,7 @@ struct dsa_context {
struct dsa_api *dapi;
/** DSA related work (e.g. monitor if network interface is up) */
struct k_delayed_work dsa_work;
struct k_work_delayable dsa_work;
/** The switch_id, which equals to the reg property number from
* DTS is used to distinct between many connected switches.

View file

@ -155,7 +155,7 @@ struct http_response {
*/
struct http_client_internal_data {
/** Work for handling timeout */
struct k_delayed_work work;
struct k_work_delayable work;
/** HTTP parser context */
struct http_parser parser;

View file

@ -77,7 +77,7 @@ struct lwm2m_ctx {
/** Private CoAP and networking structures */
struct coap_pending pendings[CONFIG_LWM2M_ENGINE_MAX_PENDING];
struct coap_reply replies[CONFIG_LWM2M_ENGINE_MAX_REPLIES];
struct k_delayed_work retransmit_work;
struct k_work_delayable retransmit_work;
struct sys_mutex send_lock;
/** A pointer to currently processed request, for internal LwM2M engine

View file

@ -223,7 +223,7 @@ struct ppp_my_option_info;
*/
struct ppp_fsm {
/** Timeout timer */
struct k_delayed_work timer;
struct k_work_delayable timer;
struct {
/** Acknowledge Configuration Information */
@ -382,7 +382,7 @@ struct ppp_context {
atomic_t flags;
/** PPP startup worker. */
struct k_delayed_work startup;
struct k_work_delayable startup;
/** Carrier ON/OFF handler worker. This is used to create
* network interface UP/DOWN event when PPP L2 driver

View file

@ -61,7 +61,7 @@ struct net_trickle {
uint32_t Imax_abs; /**< Max interval size in ms (not doublings) */
struct k_delayed_work timer;
struct k_work_delayable timer;
net_trickle_cb_t cb; /**< Callback to be called when timer expires */
void *user_data;
};

View file

@ -39,9 +39,9 @@ static int nfds;
static bool mqtt_connected;
static struct k_delayed_work pub_message;
static struct k_work_delayable pub_message;
#if defined(CONFIG_NET_DHCPV4)
static struct k_delayed_work check_network_conn;
static struct k_work_delayable check_network_conn;
/* Network Management events */
#define L4_EVENT_MASK (NET_EVENT_L4_CONNECTED | NET_EVENT_L4_DISCONNECTED)
@ -343,7 +343,7 @@ static void publish_timeout(struct k_work *work)
LOG_DBG("mqtt_publish OK");
end:
k_delayed_work_submit(&pub_message, K_SECONDS(timeout_for_publish()));
k_work_reschedule(&pub_message, K_SECONDS(timeout_for_publish()));
}
static int try_to_connect(struct mqtt_client *client)
@ -374,8 +374,8 @@ static int try_to_connect(struct mqtt_client *client)
if (mqtt_connected) {
subscribe(client);
k_delayed_work_submit(&pub_message,
K_SECONDS(timeout_for_publish()));
k_work_reschedule(&pub_message,
K_SECONDS(timeout_for_publish()));
return 0;
}
@ -471,7 +471,7 @@ static void check_network_connection(struct k_work *work)
LOG_INF("waiting for DHCP to acquire addr");
end:
k_delayed_work_submit(&check_network_conn, K_SECONDS(3));
k_work_reschedule(&check_network_conn, K_SECONDS(3));
}
#endif
@ -481,7 +481,7 @@ static void abort_mqtt_connection(void)
if (mqtt_connected) {
mqtt_connected = false;
mqtt_abort(&client_ctx);
k_delayed_work_cancel(&pub_message);
k_work_cancel_delayable(&pub_message);
}
}
@ -494,14 +494,14 @@ static void l4_event_handler(struct net_mgmt_event_callback *cb,
if (mgmt_event == NET_EVENT_L4_CONNECTED) {
/* Wait for DHCP to be back in BOUND state */
k_delayed_work_submit(&check_network_conn, K_SECONDS(3));
k_work_reschedule(&check_network_conn, K_SECONDS(3));
return;
}
if (mgmt_event == NET_EVENT_L4_DISCONNECTED) {
abort_mqtt_connection();
k_delayed_work_cancel(&check_network_conn);
k_work_cancel_delayable(&check_network_conn);
return;
}
@ -519,10 +519,10 @@ void main(void)
return;
}
k_delayed_work_init(&pub_message, publish_timeout);
k_work_init_delayable(&pub_message, publish_timeout);
#if defined(CONFIG_NET_DHCPV4)
k_delayed_work_init(&check_network_conn, check_network_connection);
k_work_init_delayable(&check_network_conn, check_network_connection);
net_mgmt_init_event_callback(&l4_mgmt_cb, l4_event_handler,
L4_EVENT_MASK);

View file

@ -19,11 +19,11 @@ LOG_MODULE_REGISTER(net_dns_resolve_client_sample, LOG_LEVEL_DBG);
#if defined(CONFIG_MDNS_RESOLVER)
#if defined(CONFIG_NET_IPV4)
static struct k_delayed_work mdns_ipv4_timer;
static struct k_work_delayable mdns_ipv4_timer;
static void do_mdns_ipv4_lookup(struct k_work *work);
#endif
#if defined(CONFIG_NET_IPV6)
static struct k_delayed_work mdns_ipv6_timer;
static struct k_work_delayable mdns_ipv6_timer;
static void do_mdns_ipv6_lookup(struct k_work *work);
#endif
#endif
@ -130,7 +130,7 @@ void mdns_result_cb(enum dns_resolve_status status,
#if defined(CONFIG_NET_DHCPV4)
static struct net_mgmt_event_callback mgmt4_cb;
static struct k_delayed_work ipv4_timer;
static struct k_work_delayable ipv4_timer;
static void do_ipv4_lookup(struct k_work *work)
{
@ -192,12 +192,12 @@ static void ipv4_addr_add_handler(struct net_mgmt_event_callback *cb,
* management event thread stack is very small by default.
* So run it from work queue instead.
*/
k_delayed_work_init(&ipv4_timer, do_ipv4_lookup);
k_delayed_work_submit(&ipv4_timer, K_NO_WAIT);
k_work_init_delayable(&ipv4_timer, do_ipv4_lookup);
k_work_reschedule(&ipv4_timer, K_NO_WAIT);
#if defined(CONFIG_MDNS_RESOLVER)
k_delayed_work_init(&mdns_ipv4_timer, do_mdns_ipv4_lookup);
k_delayed_work_submit(&mdns_ipv4_timer, K_NO_WAIT);
k_work_init_delayable(&mdns_ipv4_timer, do_mdns_ipv4_lookup);
k_work_reschedule(&mdns_ipv4_timer, K_NO_WAIT);
#endif
}
@ -274,8 +274,8 @@ static void setup_ipv4(struct net_if *iface)
do_ipv4_lookup();
#if defined(CONFIG_MDNS_RESOLVER) && defined(CONFIG_NET_IPV4)
k_delayed_work_init(&mdns_ipv4_timer, do_mdns_ipv4_lookup);
k_delayed_work_submit(&mdns_ipv4_timer, K_NO_WAIT);
k_work_init_delayable(&mdns_ipv4_timer, do_mdns_ipv4_lookup);
k_work_reschedule(&mdns_ipv4_timer, K_NO_WAIT);
#endif
}
@ -316,8 +316,8 @@ static void setup_ipv6(struct net_if *iface)
do_ipv6_lookup();
#if defined(CONFIG_MDNS_RESOLVER) && defined(CONFIG_NET_IPV6)
k_delayed_work_init(&mdns_ipv6_timer, do_mdns_ipv6_lookup);
k_delayed_work_submit(&mdns_ipv6_timer, K_NO_WAIT);
k_work_init_delayable(&mdns_ipv6_timer, do_mdns_ipv6_lookup);
k_work_reschedule(&mdns_ipv6_timer, K_NO_WAIT);
#endif
}

View file

@ -18,7 +18,7 @@ LOG_MODULE_DECLARE(net_gptp_sample);
#include "ethernet/gptp/gptp_data_set.h"
static int run_duration = CONFIG_NET_SAMPLE_RUN_DURATION;
static struct k_delayed_work stop_sample;
static struct k_work_delayable stop_sample;
static struct k_sem quit_lock;
static void stop_handler(struct k_work *work)
@ -82,8 +82,8 @@ void init_testing(void)
k_sem_init(&quit_lock, 0, K_SEM_MAX_LIMIT);
k_delayed_work_init(&stop_sample, stop_handler);
k_delayed_work_submit(&stop_sample, K_SECONDS(run_duration));
k_work_init_delayable(&stop_sample, stop_handler);
k_work_reschedule(&stop_sample, K_SECONDS(run_duration));
k_sem_take(&quit_lock, K_FOREVER);

View file

@ -49,13 +49,13 @@ static struct coap_observer observers[NUM_OBSERVERS];
static struct coap_pending pendings[NUM_PENDINGS];
static struct k_delayed_work observer_work;
static struct k_work_delayable observer_work;
static int obs_counter;
static struct coap_resource *resource_to_notify;
static struct k_delayed_work retransmit_work;
static struct k_work_delayable retransmit_work;
#if defined(CONFIG_NET_IPV6)
static bool join_coap_multicast_group(void)
@ -966,7 +966,7 @@ static void retransmit_request(struct k_work *work)
return;
}
k_delayed_work_submit(&retransmit_work, K_MSEC(pending->timeout));
k_work_reschedule(&retransmit_work, K_MSEC(pending->timeout));
}
static void update_counter(struct k_work *work)
@ -977,7 +977,7 @@ static void update_counter(struct k_work *work)
coap_resource_notify(resource_to_notify);
}
k_delayed_work_submit(&observer_work, K_SECONDS(5));
k_work_reschedule(&observer_work, K_SECONDS(5));
}
static int create_pending_request(struct coap_packet *response,
@ -1004,7 +1004,7 @@ static int create_pending_request(struct coap_packet *response,
return 0;
}
k_delayed_work_submit(&retransmit_work, K_MSEC(pending->timeout));
k_work_reschedule(&retransmit_work, K_MSEC(pending->timeout));
return 0;
}
@ -1081,7 +1081,7 @@ static int send_notification_packet(const struct sockaddr *addr,
}
}
k_delayed_work_submit(&observer_work, K_SECONDS(5));
k_work_reschedule(&observer_work, K_SECONDS(5));
r = send_coap_reply(&response, addr, addr_len);
@ -1416,8 +1416,8 @@ void main(void)
goto quit;
}
k_delayed_work_init(&retransmit_work, retransmit_request);
k_delayed_work_init(&observer_work, update_counter);
k_work_init_delayable(&retransmit_work, retransmit_request);
k_work_init_delayable(&observer_work, update_counter);
while (1) {
r = process_client_request();

View file

@ -33,8 +33,8 @@ struct data {
struct {
int sock;
/* Work controlling udp data sending */
struct k_delayed_work recv;
struct k_delayed_work transmit;
struct k_work_delayable recv;
struct k_work_delayable transmit;
uint32_t expecting;
uint32_t counter;
uint32_t mtu;

View file

@ -40,7 +40,7 @@ static int send_udp_data(struct data *data)
LOG_DBG("%s UDP: Sent %d bytes", data->proto, data->udp.expecting);
k_delayed_work_submit(&data->udp.recv, UDP_WAIT);
k_work_reschedule(&data->udp.recv, UDP_WAIT);
return ret < 0 ? -EIO : 0;
}
@ -83,8 +83,8 @@ static int start_udp_proto(struct data *data, struct sockaddr *addr,
{
int ret;
k_delayed_work_init(&data->udp.recv, wait_reply);
k_delayed_work_init(&data->udp.transmit, wait_transmit);
k_work_init_delayable(&data->udp.recv, wait_reply);
k_work_init_delayable(&data->udp.transmit, wait_transmit);
#if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
data->udp.sock = socket(addr->sa_family, SOCK_DGRAM, IPPROTO_DTLS_1_2);
@ -168,11 +168,11 @@ static int process_udp_proto(struct data *data)
data->udp.counter);
}
k_delayed_work_cancel(&data->udp.recv);
k_work_cancel_delayable(&data->udp.recv);
/* Do not flood the link if we have also TCP configured */
if (IS_ENABLED(CONFIG_NET_TCP)) {
k_delayed_work_submit(&data->udp.transmit, UDP_SLEEP);
k_work_reschedule(&data->udp.transmit, UDP_SLEEP);
ret = 0;
} else {
ret = send_udp_data(data);
@ -251,8 +251,8 @@ int process_udp(void)
void stop_udp(void)
{
if (IS_ENABLED(CONFIG_NET_IPV6)) {
k_delayed_work_cancel(&conf.ipv6.udp.recv);
k_delayed_work_cancel(&conf.ipv6.udp.transmit);
k_work_cancel_delayable(&conf.ipv6.udp.recv);
k_work_cancel_delayable(&conf.ipv6.udp.transmit);
if (conf.ipv6.udp.sock >= 0) {
(void)close(conf.ipv6.udp.sock);
@ -260,8 +260,8 @@ void stop_udp(void)
}
if (IS_ENABLED(CONFIG_NET_IPV4)) {
k_delayed_work_cancel(&conf.ipv4.udp.recv);
k_delayed_work_cancel(&conf.ipv4.udp.transmit);
k_work_cancel_delayable(&conf.ipv4.udp.recv);
k_work_cancel_delayable(&conf.ipv4.udp.transmit);
if (conf.ipv4.udp.sock >= 0) {
(void)close(conf.ipv4.udp.sock);

View file

@ -42,13 +42,13 @@ struct data {
char recv_buffer[RECV_BUFFER_SIZE];
uint32_t counter;
atomic_t bytes_received;
struct k_delayed_work stats_print;
struct k_work_delayable stats_print;
} udp;
struct {
int sock;
atomic_t bytes_received;
struct k_delayed_work stats_print;
struct k_work_delayable stats_print;
struct {
int sock;

View file

@ -305,8 +305,7 @@ static void process_tcp4(void)
return;
}
k_delayed_work_submit(&conf.ipv4.tcp.stats_print,
K_SECONDS(STATS_TIMER));
k_work_reschedule(&conf.ipv4.tcp.stats_print, K_SECONDS(STATS_TIMER));
while (ret == 0) {
ret = process_tcp(&conf.ipv4);
@ -334,8 +333,7 @@ static void process_tcp6(void)
return;
}
k_delayed_work_submit(&conf.ipv6.tcp.stats_print,
K_SECONDS(STATS_TIMER));
k_work_reschedule(&conf.ipv6.tcp.stats_print, K_SECONDS(STATS_TIMER));
while (ret == 0) {
ret = process_tcp(&conf.ipv6);
@ -364,7 +362,7 @@ static void print_stats(struct k_work *work)
atomic_set(&data->tcp.bytes_received, 0);
}
k_delayed_work_submit(&data->tcp.stats_print, K_SECONDS(STATS_TIMER));
k_work_reschedule(&data->tcp.stats_print, K_SECONDS(STATS_TIMER));
}
void start_tcp(void)
@ -395,7 +393,7 @@ void start_tcp(void)
}
#endif
k_delayed_work_init(&conf.ipv6.tcp.stats_print, print_stats);
k_work_init_delayable(&conf.ipv6.tcp.stats_print, print_stats);
k_thread_start(tcp6_thread_id);
#endif
@ -411,7 +409,7 @@ void start_tcp(void)
}
#endif
k_delayed_work_init(&conf.ipv4.tcp.stats_print, print_stats);
k_work_init_delayable(&conf.ipv4.tcp.stats_print, print_stats);
k_thread_start(tcp4_thread_id);
#endif
}

View file

@ -156,8 +156,7 @@ static void process_udp4(void)
return;
}
k_delayed_work_submit(&conf.ipv4.udp.stats_print,
K_SECONDS(STATS_TIMER));
k_work_reschedule(&conf.ipv4.udp.stats_print, K_SECONDS(STATS_TIMER));
while (ret == 0) {
ret = process_udp(&conf.ipv4);
@ -183,8 +182,7 @@ static void process_udp6(void)
return;
}
k_delayed_work_submit(&conf.ipv6.udp.stats_print,
K_SECONDS(STATS_TIMER));
k_work_reschedule(&conf.ipv6.udp.stats_print, K_SECONDS(STATS_TIMER));
while (ret == 0) {
ret = process_udp(&conf.ipv6);
@ -211,7 +209,7 @@ static void print_stats(struct k_work *work)
atomic_set(&data->udp.bytes_received, 0);
}
k_delayed_work_submit(&data->udp.stats_print, K_SECONDS(STATS_TIMER));
k_work_reschedule(&data->udp.stats_print, K_SECONDS(STATS_TIMER));
}
void start_udp(void)
@ -221,7 +219,7 @@ void start_udp(void)
k_mem_domain_add_thread(&app_domain, udp6_thread_id);
#endif
k_delayed_work_init(&conf.ipv6.udp.stats_print, print_stats);
k_work_init_delayable(&conf.ipv6.udp.stats_print, print_stats);
k_thread_name_set(udp6_thread_id, "udp6");
k_thread_start(udp6_thread_id);
}
@ -231,7 +229,7 @@ void start_udp(void)
k_mem_domain_add_thread(&app_domain, udp4_thread_id);
#endif
k_delayed_work_init(&conf.ipv4.udp.stats_print, print_stats);
k_work_init_delayable(&conf.ipv4.udp.stats_print, print_stats);
k_thread_name_set(udp4_thread_id, "udp4");
k_thread_start(udp4_thread_id);
}

View file

@ -14,7 +14,7 @@ LOG_MODULE_REGISTER(net_stats_sample, LOG_LEVEL_DBG);
#include <net/net_if.h>
#include <net/net_stats.h>
static struct k_delayed_work stats_timer;
static struct k_work_delayable stats_timer;
#if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
#define GET_STAT(iface, s) (iface ? iface->stats.s : data->s)
@ -177,13 +177,13 @@ static void stats(struct k_work *work)
net_if_foreach(eth_iface_cb, &data);
#endif
k_delayed_work_submit(&stats_timer, K_SECONDS(CONFIG_SAMPLE_PERIOD));
k_work_reschedule(&stats_timer, K_SECONDS(CONFIG_SAMPLE_PERIOD));
}
static void init_app(void)
{
k_delayed_work_init(&stats_timer, stats);
k_delayed_work_submit(&stats_timer, K_SECONDS(CONFIG_SAMPLE_PERIOD));
k_work_init_delayable(&stats_timer, stats);
k_work_reschedule(&stats_timer, K_SECONDS(CONFIG_SAMPLE_PERIOD));
}
void main(void)

View file

@ -23,7 +23,7 @@ LOG_MODULE_REGISTER(net_ipv4_autoconf, CONFIG_NET_IPV4_AUTO_LOG_LEVEL);
#include "ipv4_autoconf_internal.h"
/* Have only one timer in order to save memory */
static struct k_delayed_work ipv4auto_timer;
static struct k_work_delayable ipv4auto_timer;
/* Track currently active timers */
static sys_slist_t ipv4auto_ifaces;
@ -248,14 +248,12 @@ static uint32_t ipv4_autoconf_get_timeout(struct net_if_ipv4_autoconf *ipv4auto)
static void ipv4_autoconf_submit_work(uint32_t timeout)
{
if (!k_delayed_work_remaining_get(&ipv4auto_timer) ||
timeout < k_delayed_work_remaining_get(&ipv4auto_timer)) {
k_delayed_work_cancel(&ipv4auto_timer);
k_delayed_work_submit(&ipv4auto_timer, K_MSEC(timeout));
k_work_cancel_delayable(&ipv4auto_timer);
k_work_reschedule(&ipv4auto_timer, K_MSEC(timeout));
NET_DBG("Next wakeup in %d ms",
k_delayed_work_remaining_get(&ipv4auto_timer));
}
NET_DBG("Next wakeup in %d ms",
k_ticks_to_ms_ceil32(
k_work_delayable_remaining_get(&ipv4auto_timer)));
}
static bool ipv4_autoconf_check_timeout(int64_t start, uint32_t time, int64_t timeout)
@ -314,7 +312,7 @@ static void ipv4_autoconf_timeout(struct k_work *work)
if (timeout_update != UINT32_MAX && timeout_update > 0) {
NET_DBG("Waiting for %u ms", timeout_update);
k_delayed_work_submit(&ipv4auto_timer, K_MSEC(timeout_update));
k_work_reschedule(&ipv4auto_timer, K_MSEC(timeout_update));
}
}
@ -376,11 +374,11 @@ void net_ipv4_autoconf_reset(struct net_if *iface)
sys_slist_find_and_remove(&ipv4auto_ifaces, &cfg->ipv4auto.node);
if (sys_slist_is_empty(&ipv4auto_ifaces)) {
k_delayed_work_cancel(&ipv4auto_timer);
k_work_cancel_delayable(&ipv4auto_timer);
}
}
void net_ipv4_autoconf_init(void)
{
k_delayed_work_init(&ipv4auto_timer, ipv4_autoconf_timeout);
k_work_init_delayable(&ipv4auto_timer, ipv4_autoconf_timeout);
}

View file

@ -404,7 +404,7 @@ struct net_ipv6_reassembly {
* Timeout for cancelling the reassembly. The timer is used
* also to detect if this reassembly slot is used or not.
*/
struct k_delayed_work timer;
struct k_work_delayable timer;
/** Pointers to pending fragments */
struct net_pkt *pkt[NET_IPV6_FRAGMENTS_MAX_PKT];

View file

@ -128,15 +128,14 @@ static struct net_ipv6_reassembly *reassembly_get(uint32_t id,
int i, avail = -1;
for (i = 0; i < CONFIG_NET_IPV6_FRAGMENT_MAX_COUNT; i++) {
if (k_delayed_work_remaining_get(&reassembly[i].timer) &&
if (k_work_delayable_remaining_get(&reassembly[i].timer) &&
reassembly[i].id == id &&
net_ipv6_addr_cmp(src, &reassembly[i].src) &&
net_ipv6_addr_cmp(dst, &reassembly[i].dst)) {
return &reassembly[i];
}
if (k_delayed_work_remaining_get(&reassembly[i].timer)) {
if (k_work_delayable_remaining_get(&reassembly[i].timer)) {
continue;
}
@ -149,8 +148,7 @@ static struct net_ipv6_reassembly *reassembly_get(uint32_t id,
return NULL;
}
k_delayed_work_submit(&reassembly[avail].timer,
IPV6_REASSEMBLY_TIMEOUT);
k_work_reschedule(&reassembly[avail].timer, IPV6_REASSEMBLY_TIMEOUT);
net_ipaddr_copy(&reassembly[avail].src, src);
net_ipaddr_copy(&reassembly[avail].dst, dst);
@ -177,10 +175,9 @@ static bool reassembly_cancel(uint32_t id,
continue;
}
remaining = k_delayed_work_remaining_get(&reassembly[i].timer);
if (remaining) {
k_delayed_work_cancel(&reassembly[i].timer);
}
remaining = k_ticks_to_ms_ceil32(
k_work_delayable_remaining_get(&reassembly[i].timer));
k_work_cancel_delayable(&reassembly[i].timer);
NET_DBG("IPv6 reassembly id 0x%x remaining %d ms",
reassembly[i].id, remaining);
@ -211,7 +208,8 @@ static void reassembly_info(char *str, struct net_ipv6_reassembly *reass)
NET_DBG("%s id 0x%x src %s dst %s remain %d ms", str, reass->id,
log_strdup(net_sprint_ipv6_addr(&reass->src)),
log_strdup(net_sprint_ipv6_addr(&reass->dst)),
k_delayed_work_remaining_get(&reass->timer));
k_ticks_to_ms_ceil32(
k_work_delayable_remaining_get(&reass->timer)));
}
static void reassembly_timeout(struct k_work *work)
@ -238,7 +236,7 @@ static void reassemble_packet(struct net_ipv6_reassembly *reass)
uint8_t next_hdr;
int i, len;
k_delayed_work_cancel(&reass->timer);
k_work_cancel_delayable(&reass->timer);
NET_ASSERT(reass->pkt[0]);
@ -355,7 +353,7 @@ void net_ipv6_frag_foreach(net_ipv6_frag_cb_t cb, void *user_data)
for (i = 0; reassembly_init_done &&
i < CONFIG_NET_IPV6_FRAGMENT_MAX_COUNT; i++) {
if (!k_delayed_work_remaining_get(&reassembly[i].timer)) {
if (!k_work_delayable_remaining_get(&reassembly[i].timer)) {
continue;
}
@ -441,8 +439,8 @@ enum net_verdict net_ipv6_handle_fragment_hdr(struct net_pkt *pkt,
* so we must do it at runtime.
*/
for (i = 0; i < CONFIG_NET_IPV6_FRAGMENT_MAX_COUNT; i++) {
k_delayed_work_init(&reassembly[i].timer,
reassembly_timeout);
k_work_init_delayable(&reassembly[i].timer,
reassembly_timeout);
}
reassembly_init_done = true;

View file

@ -67,7 +67,7 @@ static struct k_sem nbr_lock;
#endif
#if defined(CONFIG_NET_IPV6_ND)
static struct k_delayed_work ipv6_nd_reachable_timer;
static struct k_work_delayable ipv6_nd_reachable_timer;
static void ipv6_nd_reachable_timeout(struct k_work *work);
static void ipv6_nd_restart_reachable_timer(struct net_nbr *nbr, int64_t time);
#endif
@ -84,7 +84,7 @@ extern void net_neighbor_data_remove(struct net_nbr *nbr);
extern void net_neighbor_table_clear(struct net_nbr_table *table);
/** Neighbor Solicitation reply timer */
static struct k_delayed_work ipv6_ns_reply_timer;
static struct k_work_delayable ipv6_ns_reply_timer;
NET_NBR_POOL_INIT(net_neighbor_pool,
CONFIG_NET_IPV6_MAX_NEIGHBORS,
@ -364,10 +364,10 @@ static void ipv6_ns_reply_timeout(struct k_work *work)
remaining = data->send_ns + NS_REPLY_TIMEOUT - current;
if (remaining > 0) {
if (!k_delayed_work_remaining_get(
&ipv6_ns_reply_timer)) {
k_delayed_work_submit(&ipv6_ns_reply_timer,
K_MSEC(remaining));
if (!k_work_delayable_remaining_get(
&ipv6_ns_reply_timer)) {
k_work_reschedule(&ipv6_ns_reply_timer,
K_MSEC(remaining));
}
continue;
@ -1402,9 +1402,10 @@ static void ipv6_nd_restart_reachable_timer(struct net_nbr *nbr, int64_t time)
net_ipv6_nbr_data(nbr)->reachable_timeout = time;
}
remaining = k_delayed_work_remaining_get(&ipv6_nd_reachable_timer);
remaining = k_ticks_to_ms_ceil32(
k_work_delayable_remaining_get(&ipv6_nd_reachable_timer));
if (!remaining || remaining > time) {
k_delayed_work_submit(&ipv6_nd_reachable_timer, K_MSEC(time));
k_work_reschedule(&ipv6_nd_reachable_timer, K_MSEC(time));
}
}
@ -1926,9 +1927,9 @@ int net_ipv6_send_ns(struct net_if *iface,
net_ipv6_nbr_data(nbr)->send_ns = k_uptime_get();
/* Let's start the timer if necessary */
if (!k_delayed_work_remaining_get(&ipv6_ns_reply_timer)) {
k_delayed_work_submit(&ipv6_ns_reply_timer,
K_MSEC(NS_REPLY_TIMEOUT));
if (!k_work_delayable_remaining_get(&ipv6_ns_reply_timer)) {
k_work_reschedule(&ipv6_ns_reply_timer,
K_MSEC(NS_REPLY_TIMEOUT));
}
}
@ -2487,12 +2488,12 @@ void net_ipv6_nbr_init(void)
#if defined(CONFIG_NET_IPV6_NBR_CACHE)
net_icmpv6_register_handler(&ns_input_handler);
net_icmpv6_register_handler(&na_input_handler);
k_delayed_work_init(&ipv6_ns_reply_timer, ipv6_ns_reply_timeout);
k_work_init_delayable(&ipv6_ns_reply_timer, ipv6_ns_reply_timeout);
#endif
#if defined(CONFIG_NET_IPV6_ND)
net_icmpv6_register_handler(&ra_input_handler);
k_delayed_work_init(&ipv6_nd_reachable_timer,
ipv6_nd_reachable_timeout);
k_work_init_delayable(&ipv6_nd_reachable_timer,
ipv6_nd_reachable_timeout);
k_sem_init(&nbr_lock, 1, K_SEM_MAX_LIMIT);
#endif
}

View file

@ -45,31 +45,31 @@ extern struct net_if _net_if_list_end[];
#if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
static struct net_if_router routers[CONFIG_NET_MAX_ROUTERS];
static struct k_delayed_work router_timer;
static struct k_work_delayable router_timer;
static sys_slist_t active_router_timers;
#endif
#if defined(CONFIG_NET_NATIVE_IPV6)
/* Timer that triggers network address renewal */
static struct k_delayed_work address_lifetime_timer;
static struct k_work_delayable address_lifetime_timer;
/* Track currently active address lifetime timers */
static sys_slist_t active_address_lifetime_timers;
/* Timer that triggers IPv6 prefix lifetime */
static struct k_delayed_work prefix_lifetime_timer;
static struct k_work_delayable prefix_lifetime_timer;
/* Track currently active IPv6 prefix lifetime timers */
static sys_slist_t active_prefix_lifetime_timers;
#if defined(CONFIG_NET_IPV6_DAD)
/** Duplicate address detection (DAD) timer */
static struct k_delayed_work dad_timer;
static struct k_work_delayable dad_timer;
static sys_slist_t active_dad_timers;
#endif
#if defined(CONFIG_NET_IPV6_ND)
static struct k_delayed_work rs_timer;
static struct k_work_delayable rs_timer;
static sys_slist_t active_rs_timers;
#endif
@ -750,9 +750,9 @@ static void iface_router_update_timer(uint32_t now)
}
if (new_delay == UINT32_MAX) {
k_delayed_work_cancel(&router_timer);
k_work_cancel_delayable(&router_timer);
} else {
k_delayed_work_submit(&router_timer, K_MSEC(new_delay));
k_work_reschedule(&router_timer, K_MSEC(new_delay));
}
k_mutex_unlock(&lock);
@ -937,7 +937,7 @@ out:
static void iface_router_init(void)
{
k_delayed_work_init(&router_timer, iface_router_expired);
k_work_init_delayable(&router_timer, iface_router_expired);
sys_slist_init(&active_router_timers);
}
#else
@ -1150,7 +1150,7 @@ static void dad_timeout(struct k_work *work)
}
if ((ifaddr != NULL) && (delay > 0)) {
k_delayed_work_submit(&dad_timer, K_MSEC((uint32_t)delay));
k_work_reschedule(&dad_timer, K_MSEC((uint32_t)delay));
}
k_mutex_unlock(&lock);
@ -1177,9 +1177,9 @@ static void net_if_ipv6_start_dad(struct net_if *iface,
sys_slist_append(&active_dad_timers, &ifaddr->dad_node);
/* FUTURE: use schedule, not reschedule. */
if (!k_delayed_work_remaining_get(&dad_timer)) {
k_delayed_work_submit(&dad_timer,
K_MSEC(DAD_TIMEOUT));
if (!k_work_delayable_remaining_get(&dad_timer)) {
k_work_reschedule(&dad_timer,
K_MSEC(DAD_TIMEOUT));
}
}
} else {
@ -1266,7 +1266,7 @@ out:
static inline void iface_ipv6_dad_init(void)
{
k_delayed_work_init(&dad_timer, dad_timeout);
k_work_init_delayable(&dad_timer, dad_timeout);
sys_slist_init(&active_dad_timers);
}
@ -1333,9 +1333,8 @@ static void rs_timeout(struct k_work *work)
}
if ((ipv6 != NULL) && (delay > 0)) {
k_delayed_work_submit(&rs_timer,
K_MSEC(ipv6->rs_start +
RS_TIMEOUT - current_time));
k_work_reschedule(&rs_timer, K_MSEC(ipv6->rs_start +
RS_TIMEOUT - current_time));
}
k_mutex_unlock(&lock);
@ -1359,8 +1358,8 @@ void net_if_start_rs(struct net_if *iface)
sys_slist_append(&active_rs_timers, &ipv6->rs_node);
/* FUTURE: use schedule, not reschedule. */
if (!k_delayed_work_remaining_get(&rs_timer)) {
k_delayed_work_submit(&rs_timer, K_MSEC(RS_TIMEOUT));
if (!k_work_delayable_remaining_get(&rs_timer)) {
k_work_reschedule(&rs_timer, K_MSEC(RS_TIMEOUT));
}
}
@ -1389,7 +1388,7 @@ out:
static inline void iface_ipv6_nd_init(void)
{
k_delayed_work_init(&rs_timer, rs_timeout);
k_work_init_delayable(&rs_timer, rs_timeout);
sys_slist_init(&active_rs_timers);
}
@ -1548,8 +1547,7 @@ static void address_lifetime_timeout(struct k_work *work)
if (next_update != UINT32_MAX) {
NET_DBG("Waiting for %d ms", (int32_t)next_update);
k_delayed_work_submit(&address_lifetime_timer,
K_MSEC(next_update));
k_work_reschedule(&address_lifetime_timer, K_MSEC(next_update));
}
k_mutex_unlock(&lock);
@ -1568,7 +1566,7 @@ static void address_start_timer(struct net_if_addr *ifaddr, uint32_t vlifetime)
&ifaddr->lifetime.node);
net_timeout_set(&ifaddr->lifetime, vlifetime, k_uptime_get_32());
k_delayed_work_submit(&address_lifetime_timer, K_NO_WAIT);
k_work_reschedule(&address_lifetime_timer, K_NO_WAIT);
}
void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr,
@ -1736,7 +1734,8 @@ bool net_if_ipv6_addr_rm(struct net_if *iface, const struct in6_addr *addr)
if (sys_slist_is_empty(
&active_address_lifetime_timers)) {
k_delayed_work_cancel(&address_lifetime_timer);
k_work_cancel_delayable(
&address_lifetime_timer);
}
}
@ -2143,8 +2142,7 @@ static void prefix_lifetime_timeout(struct k_work *work)
}
if (next_update != UINT32_MAX) {
k_delayed_work_submit(&prefix_lifetime_timer,
K_MSEC(next_update));
k_work_reschedule(&prefix_lifetime_timer, K_MSEC(next_update));
}
k_mutex_unlock(&lock);
@ -2161,7 +2159,7 @@ static void prefix_start_timer(struct net_if_ipv6_prefix *ifprefix,
&ifprefix->lifetime.node);
net_timeout_set(&ifprefix->lifetime, lifetime, k_uptime_get_32());
k_delayed_work_submit(&prefix_lifetime_timer, K_NO_WAIT);
k_work_reschedule(&prefix_lifetime_timer, K_NO_WAIT);
k_mutex_unlock(&lock);
}
@ -2793,8 +2791,9 @@ static void iface_ipv6_init(int if_count)
iface_ipv6_dad_init();
iface_ipv6_nd_init();
k_delayed_work_init(&address_lifetime_timer, address_lifetime_timeout);
k_delayed_work_init(&prefix_lifetime_timer, prefix_lifetime_timeout);
k_work_init_delayable(&address_lifetime_timer,
address_lifetime_timeout);
k_work_init_delayable(&prefix_lifetime_timer, prefix_lifetime_timeout);
if (if_count > ARRAY_SIZE(ipv6_addresses)) {
NET_WARN("You have %lu IPv6 net_if addresses but %d "

View file

@ -1574,9 +1574,8 @@ static void ipv6_frag_cb(struct net_ipv6_reassembly *reass,
snprintk(src, ADDR_LEN, "%s", net_sprint_ipv6_addr(&reass->src));
PR("%p 0x%08x %5d %16s\t%16s\n",
reass, reass->id,
k_delayed_work_remaining_get(&reass->timer),
PR("%p 0x%08x %5d %16s\t%16s\n", reass, reass->id,
k_ticks_to_ms_ceil32(k_work_delayable_remaining_get(&reass->timer)),
src, net_sprint_ipv6_addr(&reass->dst));
for (i = 0; i < NET_IPV6_FRAGMENTS_MAX_PKT; i++) {

View file

@ -224,10 +224,9 @@ void net_tc_tx_init(void)
"coop" : "preempt",
priority);
k_work_q_start(&tx_classes[i].work_q,
tx_stack[i],
K_KERNEL_STACK_SIZEOF(tx_stack[i]),
priority);
k_work_queue_start(&tx_classes[i].work_q, tx_stack[i],
K_KERNEL_STACK_SIZEOF(tx_stack[i]), priority,
NULL);
if (IS_ENABLED(CONFIG_THREAD_NAME)) {
char name[MAX_NAME_LEN];
@ -267,10 +266,9 @@ void net_tc_rx_init(void)
"coop" : "preempt",
priority);
k_work_q_start(&rx_classes[i].work_q,
rx_stack[i],
K_KERNEL_STACK_SIZEOF(rx_stack[i]),
priority);
k_work_queue_start(&rx_classes[i].work_q, rx_stack[i],
K_KERNEL_STACK_SIZEOF(rx_stack[i]), priority,
NULL);
if (IS_ENABLED(CONFIG_THREAD_NAME)) {
char name[MAX_NAME_LEN];

View file

@ -344,7 +344,7 @@ static void tcp_send_queue_flush(struct tcp *conn)
{
struct net_pkt *pkt;
k_delayed_work_cancel(&conn->send_timer);
k_work_cancel_delayable(&conn->send_timer);
while ((pkt = tcp_slist(conn, &conn->send_queue, get,
struct net_pkt, next))) {
@ -413,15 +413,15 @@ static int tcp_conn_unref(struct tcp *conn)
tcp_send_queue_flush(conn);
k_delayed_work_cancel(&conn->send_data_timer);
k_work_cancel_delayable(&conn->send_data_timer);
tcp_pkt_unref(conn->send_data);
if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT) {
tcp_pkt_unref(conn->queue_recv_data);
}
k_delayed_work_cancel(&conn->timewait_timer);
k_delayed_work_cancel(&conn->fin_timer);
k_work_cancel_delayable(&conn->timewait_timer);
k_work_cancel_delayable(&conn->fin_timer);
sys_slist_find_and_remove(&tcp_conns, &conn->next);
@ -489,16 +489,16 @@ static bool tcp_send_process_no_lock(struct tcp *conn)
tcp_send(pkt);
if (forget == false && !k_delayed_work_remaining_get(
&conn->send_timer)) {
if (forget == false &&
!k_work_delayable_remaining_get(&conn->send_timer)) {
conn->send_retries = tcp_retries;
conn->in_retransmission = true;
}
}
if (conn->in_retransmission) {
k_delayed_work_submit_to_queue(&tcp_work_q, &conn->send_timer,
K_MSEC(tcp_rto));
k_work_reschedule_for_queue(&tcp_work_q, &conn->send_timer,
K_MSEC(tcp_rto));
}
out:
@ -527,7 +527,7 @@ static void tcp_send_timer_cancel(struct tcp *conn)
return;
}
k_delayed_work_cancel(&conn->send_timer);
k_work_cancel_delayable(&conn->send_timer);
{
struct net_pkt *pkt = tcp_slist(conn, &conn->send_queue, get,
@ -542,8 +542,8 @@ static void tcp_send_timer_cancel(struct tcp *conn)
conn->in_retransmission = false;
} else {
conn->send_retries = tcp_retries;
k_delayed_work_submit_to_queue(&tcp_work_q, &conn->send_timer,
K_MSEC(tcp_rto));
k_work_reschedule_for_queue(&tcp_work_q, &conn->send_timer,
K_MSEC(tcp_rto));
}
}
@ -694,7 +694,7 @@ static size_t tcp_check_pending_data(struct tcp *conn, struct net_pkt *pkt,
conn->queue_recv_data->buffer);
conn->queue_recv_data->buffer = NULL;
k_delayed_work_cancel(&conn->recv_queue_timer);
k_work_cancel_delayable(&conn->recv_queue_timer);
}
}
@ -993,7 +993,7 @@ static int tcp_send_queued_data(struct tcp *conn)
subscribe = true;
}
if (k_delayed_work_remaining_get(&conn->send_data_timer)) {
if (k_work_delayable_remaining_get(&conn->send_data_timer)) {
subscribe = false;
}
@ -1002,14 +1002,13 @@ static int tcp_send_queued_data(struct tcp *conn)
*/
if (ret == -ENOBUFS) {
NET_DBG("No bufs, cancelling retransmit timer");
k_delayed_work_cancel(&conn->send_data_timer);
k_work_cancel_delayable(&conn->send_data_timer);
}
if (subscribe) {
conn->send_data_retries = 0;
k_delayed_work_submit_to_queue(&tcp_work_q,
&conn->send_data_timer,
K_MSEC(tcp_rto));
k_work_reschedule_for_queue(&tcp_work_q, &conn->send_data_timer,
K_MSEC(tcp_rto));
}
out:
return ret;
@ -1058,9 +1057,8 @@ static void tcp_resend_data(struct k_work *work)
NET_DBG("TCP connection in active close, "
"not disposing yet (waiting %dms)",
FIN_TIMEOUT_MS);
k_delayed_work_submit_to_queue(&tcp_work_q,
&conn->fin_timer,
FIN_TIMEOUT);
k_work_reschedule_for_queue(
&tcp_work_q, &conn->fin_timer, FIN_TIMEOUT);
conn_state(conn, TCP_FIN_WAIT_1);
@ -1074,8 +1072,8 @@ static void tcp_resend_data(struct k_work *work)
}
}
k_delayed_work_submit_to_queue(&tcp_work_q, &conn->send_data_timer,
K_MSEC(tcp_rto));
k_work_reschedule_for_queue(&tcp_work_q, &conn->send_data_timer,
K_MSEC(tcp_rto));
out:
k_mutex_unlock(&conn->lock);
@ -1169,11 +1167,11 @@ static struct tcp *tcp_conn_alloc(void)
sys_slist_init(&conn->send_queue);
k_delayed_work_init(&conn->send_timer, tcp_send_process);
k_delayed_work_init(&conn->timewait_timer, tcp_timewait_timeout);
k_delayed_work_init(&conn->fin_timer, tcp_fin_timeout);
k_delayed_work_init(&conn->send_data_timer, tcp_resend_data);
k_delayed_work_init(&conn->recv_queue_timer, tcp_cleanup_recv_queue);
k_work_init_delayable(&conn->send_timer, tcp_send_process);
k_work_init_delayable(&conn->timewait_timer, tcp_timewait_timeout);
k_work_init_delayable(&conn->fin_timer, tcp_fin_timeout);
k_work_init_delayable(&conn->send_data_timer, tcp_resend_data);
k_work_init_delayable(&conn->recv_queue_timer, tcp_cleanup_recv_queue);
tcp_conn_ref(conn);
@ -1581,10 +1579,10 @@ static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
/* We need to keep the received data but free the pkt */
pkt->buffer = NULL;
if (!k_delayed_work_pending(&conn->recv_queue_timer)) {
k_delayed_work_submit_to_queue(&tcp_work_q,
&conn->recv_queue_timer,
K_MSEC(CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT));
if (!k_work_delayable_is_pending(&conn->recv_queue_timer)) {
k_work_reschedule_for_queue(
&tcp_work_q, &conn->recv_queue_timer,
K_MSEC(CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT));
}
}
}
@ -1710,9 +1708,9 @@ next_state:
/* Close the connection if we do not receive ACK on time.
*/
k_delayed_work_submit_to_queue(&tcp_work_q,
&conn->establish_timer,
ACK_TIMEOUT);
k_work_reschedule_for_queue(&tcp_work_q,
&conn->establish_timer,
ACK_TIMEOUT);
} else {
tcp_out(conn, SYN);
conn_seq(conn, + 1);
@ -1722,7 +1720,7 @@ next_state:
case TCP_SYN_RECEIVED:
if (FL(&fl, &, ACK, th_ack(th) == conn->seq &&
th_seq(th) == conn->ack)) {
k_delayed_work_cancel(&conn->establish_timer);
k_work_cancel_delayable(&conn->establish_timer);
tcp_send_timer_cancel(conn);
next = TCP_ESTABLISHED;
net_context_set_state(conn->context,
@ -1826,13 +1824,14 @@ next_state:
conn_send_data_dump(conn);
if (!k_delayed_work_remaining_get(&conn->send_data_timer)) {
if (!k_work_delayable_remaining_get(
&conn->send_data_timer)) {
NET_DBG("conn: %p, Missing a subscription "
"of the send_data queue timer", conn);
break;
}
conn->send_data_retries = 0;
k_delayed_work_cancel(&conn->send_data_timer);
k_work_cancel_delayable(&conn->send_data_timer);
if (conn->data_mode == TCP_DATA_MODE_RESEND) {
conn->unacked_len = 0;
}
@ -1904,7 +1903,7 @@ next_state:
if (th && (FL(&fl, ==, FIN, th_seq(th) == conn->ack) ||
FL(&fl, ==, FIN | ACK, th_seq(th) == conn->ack))) {
/* Received FIN on FIN_WAIT_2, so cancel the timer */
k_delayed_work_cancel(&conn->fin_timer);
k_work_cancel_delayable(&conn->fin_timer);
conn_ack(conn, + 1);
tcp_out(conn, ACK);
@ -1918,9 +1917,9 @@ next_state:
}
break;
case TCP_TIME_WAIT:
k_delayed_work_submit_to_queue(&tcp_work_q,
&conn->timewait_timer,
K_MSEC(CONFIG_NET_TCP_TIME_WAIT_DELAY));
k_work_reschedule_for_queue(
&tcp_work_q, &conn->timewait_timer,
K_MSEC(CONFIG_NET_TCP_TIME_WAIT_DELAY));
break;
default:
NET_ASSERT(false, "%s is unimplemented",
@ -1996,17 +1995,16 @@ int net_tcp_put(struct net_context *context)
/* How long to wait until all the data has been sent?
*/
k_delayed_work_submit_to_queue(&tcp_work_q,
&conn->send_data_timer,
K_MSEC(tcp_rto));
k_work_reschedule_for_queue(&tcp_work_q,
&conn->send_data_timer,
K_MSEC(tcp_rto));
} else {
int ret;
NET_DBG("TCP connection in active close, not "
"disposing yet (waiting %dms)", FIN_TIMEOUT_MS);
k_delayed_work_submit_to_queue(&tcp_work_q,
&conn->fin_timer,
FIN_TIMEOUT);
k_work_reschedule_for_queue(
&tcp_work_q, &conn->fin_timer, FIN_TIMEOUT);
ret = tcp_out_ext(conn, FIN | ACK, NULL,
conn->seq + conn->unacked_len);
@ -2084,8 +2082,8 @@ int net_tcp_queue_data(struct net_context *context, struct net_pkt *pkt)
* conn is embedded, and calling that function directly here
* and in the work handler.
*/
(void)k_work_schedule_for_queue(&tcp_work_q,
&conn->send_data_timer.work, K_NO_WAIT);
(void)k_work_schedule_for_queue(
&tcp_work_q, &conn->send_data_timer, K_NO_WAIT);
ret = -EAGAIN;
goto out;
@ -2732,9 +2730,9 @@ void net_tcp_init(void)
/* Use private workqueue in order not to block the system work queue.
*/
k_work_q_start(&tcp_work_q, work_q_stack,
K_KERNEL_STACK_SIZEOF(work_q_stack),
THREAD_PRIORITY);
k_work_queue_start(&tcp_work_q, work_q_stack,
K_KERNEL_STACK_SIZEOF(work_q_stack), THREAD_PRIORITY,
NULL);
k_thread_name_set(&tcp_work_q.thread, "tcp_work");
NET_DBG("Workq started. Thread ID: %p", &tcp_work_q.thread);

View file

@ -122,18 +122,20 @@
(_conn)->state = _s; \
})
#define conn_send_data_dump(_conn) \
({ \
NET_DBG("conn: %p total=%zd, unacked_len=%d, " \
"send_win=%hu, mss=%hu", \
(_conn), net_pkt_get_len((_conn)->send_data), \
conn->unacked_len, conn->send_win, \
(uint16_t)conn_mss((_conn))); \
NET_DBG("conn: %p send_data_timer=%hu, send_data_retries=%hu", \
(_conn), \
(bool)k_delayed_work_remaining_get(&(_conn)->send_data_timer),\
(_conn)->send_data_retries); \
})
#define conn_send_data_dump(_conn) \
({ \
NET_DBG("conn: %p total=%zd, unacked_len=%d, " \
"send_win=%hu, mss=%hu", \
(_conn), net_pkt_get_len((_conn)->send_data), \
conn->unacked_len, conn->send_win, \
(uint16_t)conn_mss((_conn))); \
NET_DBG("conn: %p send_data_timer=%hu, send_data_retries=%hu", \
(_conn), \
(bool)k_ticks_to_ms_ceil32( \
k_work_delayable_remaining_get( \
&(_conn)->send_data_timer)), \
(_conn)->send_data_retries); \
})
#define TCPOPT_END 0
#define TCPOPT_NOP 1
@ -223,17 +225,17 @@ struct tcp { /* TCP connection */
struct k_sem connect_sem; /* semaphore for blocking connect */
struct k_fifo recv_data; /* temp queue before passing data to app */
struct tcp_options recv_options;
struct k_delayed_work send_timer;
struct k_delayed_work recv_queue_timer;
struct k_delayed_work send_data_timer;
struct k_delayed_work timewait_timer;
struct k_work_delayable send_timer;
struct k_work_delayable recv_queue_timer;
struct k_work_delayable send_data_timer;
struct k_work_delayable timewait_timer;
union {
/* Because FIN and establish timers are never happening
* at the same time, share the timer between them to
* save memory.
*/
struct k_delayed_work fin_timer;
struct k_delayed_work establish_timer;
struct k_work_delayable fin_timer;
struct k_work_delayable establish_timer;
};
union tcp_endpoint src;
union tcp_endpoint dst;

View file

@ -80,8 +80,8 @@ static void double_interval_timeout(struct k_work *work)
NET_DBG("doubling time %u", rand_time);
trickle->Istart = k_uptime_get_32() + rand_time;
k_delayed_work_init(&trickle->timer, trickle_timeout);
k_delayed_work_submit(&trickle->timer, K_MSEC(rand_time));
k_work_init_delayable(&trickle->timer, trickle_timeout);
k_work_reschedule(&trickle->timer, K_MSEC(rand_time));
NET_DBG("last end %u new end %u for %u I %u",
last_end, get_end(trickle), trickle->Istart, trickle->I);
@ -100,8 +100,8 @@ static inline void reschedule(struct net_trickle *trickle)
NET_DBG("Clock wrap");
}
k_delayed_work_init(&trickle->timer, double_interval_timeout);
k_delayed_work_submit(&trickle->timer, K_MSEC(diff));
k_work_init_delayable(&trickle->timer, double_interval_timeout);
k_work_reschedule(&trickle->timer, K_MSEC(diff));
}
static void trickle_timeout(struct k_work *work)
@ -135,7 +135,7 @@ static void setup_new_interval(struct net_trickle *trickle)
trickle->Istart = k_uptime_get_32();
k_delayed_work_submit(&trickle->timer, K_MSEC(t));
k_work_reschedule(&trickle->timer, K_MSEC(t));
NET_DBG("new interval at %d ends %d t %d I %d",
trickle->Istart,
@ -167,7 +167,7 @@ int net_trickle_create(struct net_trickle *trickle,
trickle->Imin, trickle->Imax, trickle->k,
trickle->Imax_abs);
k_delayed_work_init(&trickle->timer, trickle_timeout);
k_work_init_delayable(&trickle->timer, trickle_timeout);
return 0;
}
@ -198,7 +198,7 @@ int net_trickle_stop(struct net_trickle *trickle)
{
NET_ASSERT(trickle);
k_delayed_work_cancel(&trickle->timer);
k_work_cancel_delayable(&trickle->timer);
trickle->I = 0U;

View file

@ -1741,9 +1741,9 @@ void net_6locan_init(struct net_if *iface)
thread_priority = K_PRIO_PREEMPT(6);
}
k_work_q_start(&net_canbus_workq, net_canbus_stack,
K_KERNEL_STACK_SIZEOF(net_canbus_stack),
thread_priority);
k_work_queue_start(&net_canbus_workq, net_canbus_stack,
K_KERNEL_STACK_SIZEOF(net_canbus_stack),
thread_priority, NULL);
k_thread_name_set(&net_canbus_workq.thread, "isotp_work");
NET_DBG("Workq started. Thread ID: %p", &net_canbus_workq.thread);
}

View file

@ -30,7 +30,7 @@ static sys_slist_t arp_free_entries;
static sys_slist_t arp_pending_entries;
static sys_slist_t arp_table;
struct k_delayed_work arp_request_timer;
struct k_work_delayable arp_request_timer;
static void arp_entry_cleanup(struct arp_entry *entry, bool pending)
{
@ -122,7 +122,7 @@ static struct arp_entry *arp_entry_get_pending(struct net_if *iface,
}
if (sys_slist_is_empty(&arp_pending_entries)) {
k_delayed_work_cancel(&arp_request_timer);
k_work_cancel_delayable(&arp_request_timer);
}
return entry;
@ -171,9 +171,9 @@ static void arp_entry_register_pending(struct arp_entry *entry)
entry->req_start = k_uptime_get_32();
/* Let's start the timer if necessary */
if (!k_delayed_work_remaining_get(&arp_request_timer)) {
k_delayed_work_submit(&arp_request_timer,
K_MSEC(ARP_REQUEST_TIMEOUT));
if (!k_work_delayable_remaining_get(&arp_request_timer)) {
k_work_reschedule(&arp_request_timer,
K_MSEC(ARP_REQUEST_TIMEOUT));
}
}
@ -200,9 +200,9 @@ static void arp_request_timeout(struct k_work *work)
}
if (entry) {
k_delayed_work_submit(&arp_request_timer,
K_MSEC(entry->req_start +
ARP_REQUEST_TIMEOUT - current));
k_work_reschedule(&arp_request_timer,
K_MSEC(entry->req_start +
ARP_REQUEST_TIMEOUT - current));
}
}
@ -709,7 +709,7 @@ void net_arp_clear_cache(struct net_if *iface)
}
if (sys_slist_is_empty(&arp_pending_entries)) {
k_delayed_work_cancel(&arp_request_timer);
k_work_cancel_delayable(&arp_request_timer);
}
}
@ -743,7 +743,7 @@ void net_arp_init(void)
sys_slist_prepend(&arp_free_entries, &arp_entries[i].node);
}
k_delayed_work_init(&arp_request_timer, arp_request_timeout);
k_work_init_delayable(&arp_request_timer, arp_request_timeout);
arp_cache_initialized = true;
}

View file

@ -22,7 +22,7 @@ LOG_MODULE_REGISTER(net_lldp, CONFIG_NET_LLDP_LOG_LEVEL);
static struct net_mgmt_event_callback cb;
/* Have only one timer in order to save memory */
static struct k_delayed_work lldp_tx_timer;
static struct k_work_delayable lldp_tx_timer;
/* Track currently active timers */
static sys_slist_t lldp_ifaces;
@ -53,14 +53,12 @@ static int lldp_find(struct ethernet_context *ctx, struct net_if *iface)
static void lldp_submit_work(uint32_t timeout)
{
if (!k_delayed_work_remaining_get(&lldp_tx_timer) ||
timeout < k_delayed_work_remaining_get(&lldp_tx_timer)) {
k_delayed_work_cancel(&lldp_tx_timer);
k_delayed_work_submit(&lldp_tx_timer, K_MSEC(timeout));
k_work_cancel_delayable(&lldp_tx_timer);
k_work_reschedule(&lldp_tx_timer, K_MSEC(timeout));
NET_DBG("Next wakeup in %d ms",
k_delayed_work_remaining_get(&lldp_tx_timer));
}
NET_DBG("Next wakeup in %d ms",
k_ticks_to_ms_ceil32(
k_work_delayable_remaining_get(&lldp_tx_timer)));
}
static bool lldp_check_timeout(int64_t start, uint32_t time, int64_t timeout)
@ -193,7 +191,7 @@ static void lldp_tx_timeout(struct k_work *work)
if (timeout_update < (UINT32_MAX - 1)) {
NET_DBG("Waiting for %u ms", timeout_update);
k_delayed_work_submit(&lldp_tx_timer, K_MSEC(timeout_update));
k_work_reschedule(&lldp_tx_timer, K_MSEC(timeout_update));
}
}
@ -249,7 +247,7 @@ static int lldp_start(struct net_if *iface, uint32_t mgmt_event)
&ctx->lldp[slot].node);
if (sys_slist_is_empty(&lldp_ifaces)) {
k_delayed_work_cancel(&lldp_tx_timer);
k_work_cancel_delayable(&lldp_tx_timer);
}
} else if (mgmt_event == NET_EVENT_IF_UP) {
NET_DBG("Starting timer for iface %p", iface);
@ -388,7 +386,7 @@ void net_lldp_unset_lldpdu(struct net_if *iface)
void net_lldp_init(void)
{
k_delayed_work_init(&lldp_tx_timer, lldp_tx_timeout);
k_work_init_delayable(&lldp_tx_timer, lldp_tx_timeout);
net_if_foreach(iface_cb, NULL);

View file

@ -42,7 +42,7 @@ static uint16_t datagram_tag;
* IPv6 packets simultaneously.
*/
struct frag_cache {
struct k_delayed_work timer; /* Reassemble timer */
struct k_work_delayable timer; /* Reassemble timer */
struct net_pkt *pkt; /* Reassemble packet */
uint16_t size; /* Datagram size */
uint16_t tag; /* Datagram tag */
@ -277,7 +277,7 @@ static inline void clear_reass_cache(uint16_t size, uint16_t tag)
cache[i].size = 0U;
cache[i].tag = 0U;
cache[i].used = false;
k_delayed_work_cancel(&cache[i].timer);
k_work_cancel_delayable(&cache[i].timer);
}
}
@ -319,8 +319,8 @@ static inline struct frag_cache *set_reass_cache(struct net_pkt *pkt,
cache[i].tag = tag;
cache[i].used = true;
k_delayed_work_init(&cache[i].timer, reass_timeout);
k_delayed_work_submit(&cache[i].timer, FRAG_REASSEMBLY_TIMEOUT);
k_work_init_delayable(&cache[i].timer, reass_timeout);
k_work_reschedule(&cache[i].timer, FRAG_REASSEMBLY_TIMEOUT);
return &cache[i];
}

View file

@ -89,7 +89,7 @@ static void fsm_send_configure_req(struct ppp_fsm *fsm, bool retransmit)
fsm->retransmits--;
(void)k_delayed_work_submit(&fsm->timer, FSM_TIMEOUT);
(void)k_work_reschedule(&fsm->timer, FSM_TIMEOUT);
}
static void ppp_fsm_timeout(struct k_work *work)
@ -147,7 +147,7 @@ static void ppp_fsm_timeout(struct k_work *work)
fsm->retransmits--;
(void)k_delayed_work_submit(&fsm->timer, FSM_TIMEOUT);
(void)k_work_reschedule(&fsm->timer, FSM_TIMEOUT);
}
break;
@ -177,7 +177,7 @@ void ppp_fsm_init(struct ppp_fsm *fsm, uint16_t protocol)
fsm->state = PPP_INITIAL;
fsm->flags = 0U;
k_delayed_work_init(&fsm->timer, ppp_fsm_timeout);
k_work_init_delayable(&fsm->timer, ppp_fsm_timeout);
}
static void fsm_down(struct ppp_fsm *fsm)
@ -196,7 +196,7 @@ static void fsm_down(struct ppp_fsm *fsm)
static void terminate(struct ppp_fsm *fsm, enum ppp_state next_state)
{
if (fsm->state != PPP_OPENED) {
k_delayed_work_cancel(&fsm->timer);
k_work_cancel_delayable(&fsm->timer);
} else {
fsm_down(fsm);
}
@ -218,7 +218,7 @@ static void terminate(struct ppp_fsm *fsm, enum ppp_state next_state)
return;
}
(void)k_delayed_work_submit(&fsm->timer, FSM_TIMEOUT);
(void)k_work_reschedule(&fsm->timer, FSM_TIMEOUT);
fsm->retransmits--;
@ -276,7 +276,7 @@ void ppp_fsm_lower_down(struct ppp_fsm *fsm)
case PPP_REQUEST_SENT:
case PPP_STOPPING:
ppp_change_state(fsm, PPP_STARTING);
k_delayed_work_cancel(&fsm->timer);
k_work_cancel_delayable(&fsm->timer);
break;
case PPP_CLOSED:
@ -285,7 +285,7 @@ void ppp_fsm_lower_down(struct ppp_fsm *fsm)
case PPP_CLOSING:
ppp_change_state(fsm, PPP_INITIAL);
k_delayed_work_cancel(&fsm->timer);
k_work_cancel_delayable(&fsm->timer);
break;
case PPP_OPENED:
@ -651,7 +651,7 @@ static enum net_verdict fsm_recv_configure_req(struct ppp_fsm *fsm,
if (code == PPP_CONFIGURE_ACK) {
if (fsm->state == PPP_ACK_RECEIVED) {
k_delayed_work_cancel(&fsm->timer);
k_work_cancel_delayable(&fsm->timer);
ppp_change_state(fsm, PPP_OPENED);
@ -706,13 +706,13 @@ static enum net_verdict fsm_recv_configure_ack(struct ppp_fsm *fsm, uint8_t id,
switch (fsm->state) {
case PPP_ACK_RECEIVED:
k_delayed_work_cancel(&fsm->timer);
k_work_cancel_delayable(&fsm->timer);
fsm_send_configure_req(fsm, false);
ppp_change_state(fsm, PPP_REQUEST_SENT);
break;
case PPP_ACK_SENT:
k_delayed_work_cancel(&fsm->timer);
k_work_cancel_delayable(&fsm->timer);
ppp_change_state(fsm, PPP_OPENED);
fsm->retransmits = MAX_CONFIGURE_REQ;
if (fsm->cb.up) {
@ -816,14 +816,14 @@ static enum net_verdict fsm_recv_configure_nack_rej(struct ppp_fsm *fsm,
switch (fsm->state) {
case PPP_ACK_RECEIVED:
k_delayed_work_cancel(&fsm->timer);
k_work_cancel_delayable(&fsm->timer);
fsm_send_configure_req(fsm, false);
ppp_change_state(fsm, PPP_REQUEST_SENT);
break;
case PPP_ACK_SENT:
case PPP_REQUEST_SENT:
k_delayed_work_cancel(&fsm->timer);
k_work_cancel_delayable(&fsm->timer);
fsm_send_configure_req(fsm, false);
break;
@ -881,7 +881,7 @@ static enum net_verdict fsm_recv_terminate_req(struct ppp_fsm *fsm, uint8_t id,
fsm_down(fsm);
(void)k_delayed_work_submit(&fsm->timer, FSM_TIMEOUT);
(void)k_work_reschedule(&fsm->timer, FSM_TIMEOUT);
break;
default:
@ -934,7 +934,7 @@ static enum net_verdict fsm_recv_terminate_ack(struct ppp_fsm *fsm, uint8_t id,
return NET_OK;
stopped:
k_delayed_work_cancel(&fsm->timer);
k_work_cancel_delayable(&fsm->timer);
ppp_change_state(fsm, new_state);
if (fsm->cb.finished) {
@ -983,7 +983,7 @@ void ppp_fsm_proto_reject(struct ppp_fsm *fsm)
case PPP_ACK_SENT:
case PPP_STOPPING:
case PPP_REQUEST_SENT:
k_delayed_work_cancel(&fsm->timer);
k_work_cancel_delayable(&fsm->timer);
ppp_change_state(fsm, PPP_STOPPED);
if (fsm->cb.finished) {
fsm->cb.finished(fsm);
@ -1000,7 +1000,7 @@ void ppp_fsm_proto_reject(struct ppp_fsm *fsm)
break;
case PPP_CLOSING:
k_delayed_work_cancel(&fsm->timer);
k_work_cancel_delayable(&fsm->timer);
ppp_change_state(fsm, PPP_CLOSED);
if (fsm->cb.finished) {
fsm->cb.finished(fsm);

View file

@ -451,10 +451,10 @@ void net_ppp_init(struct net_if *iface)
* system. The issue is not very likely as typically there
* would be only one PPP network interface in the system.
*/
k_delayed_work_init(&ctx->startup, ppp_startup);
k_work_init_delayable(&ctx->startup, ppp_startup);
ctx->is_startup_pending = true;
k_delayed_work_submit(&ctx->startup,
K_MSEC(CONFIG_NET_L2_PPP_DELAY_STARTUP_MS));
k_work_reschedule(&ctx->startup,
K_MSEC(CONFIG_NET_L2_PPP_DELAY_STARTUP_MS));
}

View file

@ -652,9 +652,9 @@ int http_client_req(int sock, struct http_request *req,
if (!K_TIMEOUT_EQ(req->internal.timeout, K_FOREVER) &&
!K_TIMEOUT_EQ(req->internal.timeout, K_NO_WAIT)) {
k_delayed_work_init(&req->internal.work, http_timeout);
(void)k_delayed_work_submit(&req->internal.work,
req->internal.timeout);
k_work_init_delayable(&req->internal.work, http_timeout);
(void)k_work_reschedule(&req->internal.work,
req->internal.timeout);
}
/* Request is sent, now wait data to be received */
@ -667,7 +667,7 @@ int http_client_req(int sock, struct http_request *req,
if (!K_TIMEOUT_EQ(req->internal.timeout, K_FOREVER) &&
!K_TIMEOUT_EQ(req->internal.timeout, K_NO_WAIT)) {
(void)k_delayed_work_cancel(&req->internal.work);
(void)k_work_cancel_delayable(&req->internal.work);
}
return total_sent;

View file

@ -49,7 +49,7 @@ struct ipso_buzzer_data {
uint64_t trigger_offset;
struct k_delayed_work buzzer_work;
struct k_work_delayable buzzer_work;
uint16_t obj_inst_id;
bool onoff; /* toggle from resource */
@ -129,7 +129,7 @@ static int start_buzzer(struct ipso_buzzer_data *buzzer)
lwm2m_engine_set_bool(path, true);
float2ms(&buzzer->delay_duration, &temp);
k_delayed_work_submit(&buzzer->buzzer_work, K_MSEC(temp));
k_work_reschedule(&buzzer->buzzer_work, K_MSEC(temp));
return 0;
}
@ -148,7 +148,7 @@ static int stop_buzzer(struct ipso_buzzer_data *buzzer, bool cancel)
lwm2m_engine_set_bool(path, false);
if (cancel) {
k_delayed_work_cancel(&buzzer->buzzer_work);
k_work_cancel_delayable(&buzzer->buzzer_work);
}
return 0;
@ -209,7 +209,7 @@ static struct lwm2m_engine_obj_inst *buzzer_create(uint16_t obj_inst_id)
/* Set default values */
(void)memset(&buzzer_data[avail], 0, sizeof(buzzer_data[avail]));
k_delayed_work_init(&buzzer_data[avail].buzzer_work, buzzer_work_cb);
k_work_init_delayable(&buzzer_data[avail].buzzer_work, buzzer_work_cb);
buzzer_data[avail].level.val1 = 50; /* 50% */
buzzer_data[avail].delay_duration.val1 = 1; /* 1 seconds */
buzzer_data[avail].obj_inst_id = obj_inst_id;

View file

@ -55,7 +55,7 @@ struct ipso_timer_data {
uint32_t trigger_counter;
uint32_t cumulative_time_ms;
struct k_delayed_work timer_work;
struct k_work_delayable timer_work;
uint16_t obj_inst_id;
uint8_t timer_mode;
@ -144,7 +144,7 @@ static int start_timer(struct ipso_timer_data *timer)
lwm2m_engine_set_bool(path, true);
float2ms(&timer->delay_duration, &temp);
k_delayed_work_submit(&timer->timer_work, K_MSEC(temp));
k_work_reschedule(&timer->timer_work, K_MSEC(temp));
return 0;
}
@ -164,7 +164,7 @@ static int stop_timer(struct ipso_timer_data *timer, bool cancel)
lwm2m_engine_set_bool(path, false);
if (cancel) {
k_delayed_work_cancel(&timer->timer_work);
k_work_cancel_delayable(&timer->timer_work);
}
return 0;
@ -317,7 +317,7 @@ static struct lwm2m_engine_obj_inst *timer_create(uint16_t obj_inst_id)
/* Set default values */
(void)memset(&timer_data[avail], 0, sizeof(timer_data[avail]));
k_delayed_work_init(&timer_data[avail].timer_work, timer_work_cb);
k_work_init_delayable(&timer_data[avail].timer_work, timer_work_cb);
timer_data[avail].delay_duration.val1 = 5; /* 5 seconds */
timer_data[avail].enabled = true;
timer_data[avail].timer_mode = TIMER_MODE_ONE_SHOT;

View file

@ -1052,15 +1052,16 @@ int lwm2m_send_message(struct lwm2m_message *msg)
}
if (msg->type == COAP_TYPE_CON) {
int32_t remaining = k_delayed_work_remaining_get(
&msg->ctx->retransmit_work);
int32_t remaining =
k_ticks_to_ms_ceil32(k_work_delayable_remaining_get(
&msg->ctx->retransmit_work));
/* If the item is already pending and its timeout is smaller
* than the new one, skip the submission.
*/
if (remaining == 0 || remaining > msg->pending->timeout) {
k_delayed_work_submit(&msg->ctx->retransmit_work,
K_MSEC(msg->pending->timeout));
k_work_reschedule(&msg->ctx->retransmit_work,
K_MSEC(msg->pending->timeout));
}
} else {
lwm2m_reset_message(msg, true);
@ -4250,7 +4251,7 @@ next:
remaining = 0;
}
k_delayed_work_submit(&client_ctx->retransmit_work, K_MSEC(remaining));
k_work_reschedule(&client_ctx->retransmit_work, K_MSEC(remaining));
}
static int notify_message_reply_cb(const struct coap_packet *response,
@ -4496,7 +4497,7 @@ int lwm2m_engine_context_close(struct lwm2m_ctx *client_ctx)
size_t i;
/* Cancel pending retransmit work */
k_delayed_work_cancel(&client_ctx->retransmit_work);
k_work_cancel_delayable(&client_ctx->retransmit_work);
/* Remove observes for this context */
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&engine_observer_list,
@ -4532,7 +4533,7 @@ int lwm2m_engine_context_close(struct lwm2m_ctx *client_ctx)
void lwm2m_engine_context_init(struct lwm2m_ctx *client_ctx)
{
k_delayed_work_init(&client_ctx->retransmit_work, retransmit_request);
k_work_init_delayable(&client_ctx->retransmit_work, retransmit_request);
sys_mutex_init(&client_ctx->send_lock);
}

View file

@ -222,9 +222,9 @@ void platformRadioInit(void)
return;
}
k_work_q_start(&ot_work_q, ot_task_stack,
K_KERNEL_STACK_SIZEOF(ot_task_stack),
OT_WORKER_PRIORITY);
k_work_queue_start(&ot_work_q, ot_task_stack,
K_KERNEL_STACK_SIZEOF(ot_task_stack),
OT_WORKER_PRIORITY, NULL);
k_thread_name_set(&ot_work_q.thread, "ot_radio_workq");
if ((radio_api->get_capabilities(radio_dev) &

View file

@ -708,7 +708,7 @@ void test_v6_so_rcvtimeo(void)
}
struct test_msg_waitall_data {
struct k_delayed_work tx_work;
struct k_work_delayable tx_work;
int sock;
const uint8_t *data;
size_t offset;
@ -724,7 +724,7 @@ static void test_msg_waitall_tx_work_handler(struct k_work *work)
test_send(test_data->sock, test_data->data + test_data->offset, 1, 0);
test_data->offset++;
test_data->retries--;
k_delayed_work_submit(&test_data->tx_work, K_MSEC(10));
k_work_reschedule(&test_data->tx_work, K_MSEC(10));
}
}
@ -767,14 +767,15 @@ void test_v4_msg_waitall(void)
test_data.offset = 0;
test_data.retries = sizeof(rx_buf);
test_data.sock = c_sock;
k_delayed_work_init(&test_data.tx_work, test_msg_waitall_tx_work_handler);
k_delayed_work_submit(&test_data.tx_work, K_MSEC(10));
k_work_init_delayable(&test_data.tx_work,
test_msg_waitall_tx_work_handler);
k_work_reschedule(&test_data.tx_work, K_MSEC(10));
ret = recv(new_sock, rx_buf, sizeof(rx_buf), MSG_WAITALL);
zassert_equal(ret, sizeof(rx_buf), "Invalid length received");
zassert_mem_equal(rx_buf, TEST_STR_SMALL, sizeof(rx_buf),
"Invalid data received");
k_delayed_work_cancel(&test_data.tx_work);
k_work_cancel_delayable(&test_data.tx_work);
/* MSG_WAITALL + SO_RCVTIMEO - make sure recv returns the amount of data
* received so far
@ -787,14 +788,15 @@ void test_v4_msg_waitall(void)
test_data.offset = 0;
test_data.retries = sizeof(rx_buf) - 1;
test_data.sock = c_sock;
k_delayed_work_init(&test_data.tx_work, test_msg_waitall_tx_work_handler);
k_delayed_work_submit(&test_data.tx_work, K_MSEC(10));
k_work_init_delayable(&test_data.tx_work,
test_msg_waitall_tx_work_handler);
k_work_reschedule(&test_data.tx_work, K_MSEC(10));
ret = recv(new_sock, rx_buf, sizeof(rx_buf) - 1, MSG_WAITALL);
zassert_equal(ret, sizeof(rx_buf) - 1, "Invalid length received");
zassert_mem_equal(rx_buf, TEST_STR_SMALL, sizeof(rx_buf) - 1,
"Invalid data received");
k_delayed_work_cancel(&test_data.tx_work);
k_work_cancel_delayable(&test_data.tx_work);
test_close(new_sock);
test_close(s_sock);
@ -839,14 +841,15 @@ void test_v6_msg_waitall(void)
test_data.offset = 0;
test_data.retries = sizeof(rx_buf);
test_data.sock = c_sock;
k_delayed_work_init(&test_data.tx_work, test_msg_waitall_tx_work_handler);
k_delayed_work_submit(&test_data.tx_work, K_MSEC(10));
k_work_init_delayable(&test_data.tx_work,
test_msg_waitall_tx_work_handler);
k_work_reschedule(&test_data.tx_work, K_MSEC(10));
ret = recv(new_sock, rx_buf, sizeof(rx_buf), MSG_WAITALL);
zassert_equal(ret, sizeof(rx_buf), "Invalid length received");
zassert_mem_equal(rx_buf, TEST_STR_SMALL, sizeof(rx_buf),
"Invalid data received");
k_delayed_work_cancel(&test_data.tx_work);
k_work_cancel_delayable(&test_data.tx_work);
/* MSG_WAITALL + SO_RCVTIMEO - make sure recv returns the amount of data
* received so far
@ -859,14 +862,15 @@ void test_v6_msg_waitall(void)
test_data.offset = 0;
test_data.retries = sizeof(rx_buf) - 1;
test_data.sock = c_sock;
k_delayed_work_init(&test_data.tx_work, test_msg_waitall_tx_work_handler);
k_delayed_work_submit(&test_data.tx_work, K_MSEC(10));
k_work_init_delayable(&test_data.tx_work,
test_msg_waitall_tx_work_handler);
k_work_reschedule(&test_data.tx_work, K_MSEC(10));
ret = recv(new_sock, rx_buf, sizeof(rx_buf) - 1, MSG_WAITALL);
zassert_equal(ret, sizeof(rx_buf) - 1, "Invalid length received");
zassert_mem_equal(rx_buf, TEST_STR_SMALL, sizeof(rx_buf) - 1,
"Invalid data received");
k_delayed_work_cancel(&test_data.tx_work);
k_work_cancel_delayable(&test_data.tx_work);
test_close(new_sock);
test_close(s_sock);

View file

@ -191,7 +191,7 @@ void test_so_protocol(void)
}
struct test_msg_waitall_data {
struct k_delayed_work tx_work;
struct k_work_delayable tx_work;
int sock;
const uint8_t *data;
size_t offset;
@ -207,7 +207,7 @@ static void test_msg_waitall_tx_work_handler(struct k_work *work)
test_send(test_data->sock, test_data->data + test_data->offset, 1, 0);
test_data->offset++;
test_data->retries--;
k_delayed_work_submit(&test_data->tx_work, K_MSEC(10));
k_work_reschedule(&test_data->tx_work, K_MSEC(10));
}
}
@ -253,14 +253,15 @@ void test_v4_msg_waitall(void)
test_data.offset = 0;
test_data.retries = sizeof(rx_buf);
test_data.sock = c_sock;
k_delayed_work_init(&test_data.tx_work, test_msg_waitall_tx_work_handler);
k_delayed_work_submit(&test_data.tx_work, K_MSEC(10));
k_work_init_delayable(&test_data.tx_work,
test_msg_waitall_tx_work_handler);
k_work_reschedule(&test_data.tx_work, K_MSEC(10));
ret = recv(new_sock, rx_buf, sizeof(rx_buf), MSG_WAITALL);
zassert_equal(ret, sizeof(rx_buf), "Invalid length received");
zassert_mem_equal(rx_buf, TEST_STR_SMALL, sizeof(rx_buf),
"Invalid data received");
k_delayed_work_cancel(&test_data.tx_work);
k_work_cancel_delayable(&test_data.tx_work);
/* MSG_WAITALL + SO_RCVTIMEO - make sure recv returns the amount of data
* received so far
@ -273,14 +274,15 @@ void test_v4_msg_waitall(void)
test_data.offset = 0;
test_data.retries = sizeof(rx_buf) - 1;
test_data.sock = c_sock;
k_delayed_work_init(&test_data.tx_work, test_msg_waitall_tx_work_handler);
k_delayed_work_submit(&test_data.tx_work, K_MSEC(10));
k_work_init_delayable(&test_data.tx_work,
test_msg_waitall_tx_work_handler);
k_work_reschedule(&test_data.tx_work, K_MSEC(10));
ret = recv(new_sock, rx_buf, sizeof(rx_buf) - 1, MSG_WAITALL);
zassert_equal(ret, sizeof(rx_buf) - 1, "Invalid length received");
zassert_mem_equal(rx_buf, TEST_STR_SMALL, sizeof(rx_buf) - 1,
"Invalid data received");
k_delayed_work_cancel(&test_data.tx_work);
k_work_cancel_delayable(&test_data.tx_work);
test_close(new_sock);
test_close(s_sock);
@ -329,14 +331,15 @@ void test_v6_msg_waitall(void)
test_data.offset = 0;
test_data.retries = sizeof(rx_buf);
test_data.sock = c_sock;
k_delayed_work_init(&test_data.tx_work, test_msg_waitall_tx_work_handler);
k_delayed_work_submit(&test_data.tx_work, K_MSEC(10));
k_work_init_delayable(&test_data.tx_work,
test_msg_waitall_tx_work_handler);
k_work_reschedule(&test_data.tx_work, K_MSEC(10));
ret = recv(new_sock, rx_buf, sizeof(rx_buf), MSG_WAITALL);
zassert_equal(ret, sizeof(rx_buf), "Invalid length received");
zassert_mem_equal(rx_buf, TEST_STR_SMALL, sizeof(rx_buf),
"Invalid data received");
k_delayed_work_cancel(&test_data.tx_work);
k_work_cancel_delayable(&test_data.tx_work);
/* MSG_WAITALL + SO_RCVTIMEO - make sure recv returns the amount of data
* received so far
@ -349,14 +352,15 @@ void test_v6_msg_waitall(void)
test_data.offset = 0;
test_data.retries = sizeof(rx_buf) - 1;
test_data.sock = c_sock;
k_delayed_work_init(&test_data.tx_work, test_msg_waitall_tx_work_handler);
k_delayed_work_submit(&test_data.tx_work, K_MSEC(10));
k_work_init_delayable(&test_data.tx_work,
test_msg_waitall_tx_work_handler);
k_work_reschedule(&test_data.tx_work, K_MSEC(10));
ret = recv(new_sock, rx_buf, sizeof(rx_buf) - 1, MSG_WAITALL);
zassert_equal(ret, sizeof(rx_buf) - 1, "Invalid length received");
zassert_mem_equal(rx_buf, TEST_STR_SMALL, sizeof(rx_buf) - 1,
"Invalid data received");
k_delayed_work_cancel(&test_data.tx_work);
k_work_cancel_delayable(&test_data.tx_work);
test_close(new_sock);
test_close(s_sock);
@ -364,7 +368,7 @@ void test_v6_msg_waitall(void)
}
struct test_msg_trunc_data {
struct k_delayed_work tx_work;
struct k_work_delayable tx_work;
int sock;
const uint8_t *data;
size_t datalen;
@ -407,8 +411,9 @@ void test_msg_trunc(int sock_c, int sock_s, struct sockaddr *addr_c,
/* MSG_TRUNC */
test_data.sock = sock_c;
k_delayed_work_init(&test_data.tx_work, test_msg_trunc_tx_work_handler);
k_delayed_work_submit(&test_data.tx_work, K_MSEC(10));
k_work_init_delayable(&test_data.tx_work,
test_msg_trunc_tx_work_handler);
k_work_reschedule(&test_data.tx_work, K_MSEC(10));
memset(rx_buf, 0, sizeof(rx_buf));
rv = recv(sock_s, rx_buf, 2, ZSOCK_MSG_TRUNC);

View file

@ -108,7 +108,7 @@ enum test_state {
static enum test_state t_state;
static struct k_delayed_work test_server;
static struct k_work_delayable test_server;
static void test_server_timeout(struct k_work *work);
static int tester_send(const struct device *dev, struct net_pkt *pkt);
@ -447,7 +447,7 @@ static void test_presetup(void)
zassert_true(false, "Failed to add IPv6 address");
}
k_delayed_work_init(&test_server, test_server_timeout);
k_work_init_delayable(&test_server, test_server_timeout);
}
static void handle_client_test(sa_family_t af, struct tcphdr *th)
@ -760,7 +760,7 @@ static void test_server_ipv4(void)
}
/* Trigger the peer to send SYN */
k_delayed_work_submit(&test_server, K_NO_WAIT);
k_work_reschedule(&test_server, K_NO_WAIT);
ret = net_context_accept(ctx, test_tcp_accept_cb, K_FOREVER, NULL);
if (ret < 0) {
@ -773,7 +773,7 @@ static void test_server_ipv4(void)
test_sem_take(K_MSEC(100), __LINE__);
/* Trigger the peer to send DATA */
k_delayed_work_submit(&test_server, K_NO_WAIT);
k_work_reschedule(&test_server, K_NO_WAIT);
ret = net_context_recv(ctx, test_tcp_recv_cb, K_MSEC(200), NULL);
if (ret < 0) {
@ -781,7 +781,7 @@ static void test_server_ipv4(void)
}
/* Trigger the peer to send FIN after timeout */
k_delayed_work_submit(&test_server, K_NO_WAIT);
k_work_reschedule(&test_server, K_NO_WAIT);
net_context_put(ctx);
}
@ -823,7 +823,7 @@ static void test_server_with_options_ipv4(void)
}
/* Trigger the peer to send SYN */
k_delayed_work_submit(&test_server, K_NO_WAIT);
k_work_reschedule(&test_server, K_NO_WAIT);
ret = net_context_accept(ctx, test_tcp_accept_cb, K_FOREVER, NULL);
if (ret < 0) {
@ -836,7 +836,7 @@ static void test_server_with_options_ipv4(void)
test_sem_take(K_MSEC(100), __LINE__);
/* Trigger the peer to send DATA */
k_delayed_work_submit(&test_server, K_NO_WAIT);
k_work_reschedule(&test_server, K_NO_WAIT);
ret = net_context_recv(ctx, test_tcp_recv_cb, K_MSEC(200), NULL);
if (ret < 0) {
@ -844,7 +844,7 @@ static void test_server_with_options_ipv4(void)
}
/* Trigger the peer to send FIN after timeout */
k_delayed_work_submit(&test_server, K_NO_WAIT);
k_work_reschedule(&test_server, K_NO_WAIT);
net_context_put(ctx);
}
@ -886,7 +886,7 @@ static void test_server_ipv6(void)
}
/* Trigger the peer to send SYN */
k_delayed_work_submit(&test_server, K_NO_WAIT);
k_work_reschedule(&test_server, K_NO_WAIT);
ret = net_context_accept(ctx, test_tcp_accept_cb, K_FOREVER, NULL);
if (ret < 0) {
@ -899,7 +899,7 @@ static void test_server_ipv6(void)
test_sem_take(K_MSEC(100), __LINE__);
/* Trigger the peer to send DATA */
k_delayed_work_submit(&test_server, K_NO_WAIT);
k_work_reschedule(&test_server, K_NO_WAIT);
ret = net_context_recv(ctx, test_tcp_recv_cb, K_MSEC(200), NULL);
if (ret < 0) {
@ -907,7 +907,7 @@ static void test_server_ipv6(void)
}
/* Trigger the peer to send FIN after timeout */
k_delayed_work_submit(&test_server, K_NO_WAIT);
k_work_reschedule(&test_server, K_NO_WAIT);
net_context_put(ctx);
}
@ -1230,7 +1230,7 @@ static struct net_context *create_server_socket(uint32_t my_seq,
}
/* Trigger the peer to send SYN */
k_delayed_work_submit(&test_server, K_NO_WAIT);
k_work_reschedule(&test_server, K_NO_WAIT);
ret = net_context_accept(ctx, test_tcp_accept_cb, K_FOREVER, NULL);
if (ret < 0) {