net: timeout: refactor to fix multiple problems
The net_timeout structure is documented to exist because of behavior that is no longer true, i.e. that `k_delayed_work_submit()` supports only delays up to INT32_MAX milliseconds. Nonetheless, use of 32-bit timestamps within the work handlers mean the restriction is still present. This infrastructure is currently used for two timers with long durations: * address for IPv6 addresses * prefix for IPv6 prefixes The handling of rollover was subtly different between these: address wraps reset the start time while prefix wraps did not. The calculation of remaining time in ipv6_nbr was incorrect when the original requested time in seconds was a multiple of NET_TIMEOUT_MAX_VALUE: the remainder value would be zero while the wrap counter was positive, causing the calculation to indicate no time remained. The maximum value was set to allow a 100 ms latency between elapse of the deadline and assessment of a given timer, but detection of rollover assumed that the captured time in the work handler was precisely the expected deadline, which is unlikely to be true. Use of the shared system work queue also risks observed latency exceeding 100 ms. These calculations could produce delays to next event that exceeded the maximum delay, which introduced special cases. Refactor so all operations that use this structure are encapsulated into API that is documented and has a full-coverage unit test. Switch to the standard mechanism of detecting completed deadlines by calculating the signed difference between the deadline and the current time, which eliminates some special cases. Uniformly rely on the scanning the set of timers to determine the next deadline, rather than assuming that the most recent update is always next. Signed-off-by: Peter Bigot <peter.bigot@nordicsemi.no>
This commit is contained in:
parent
9f95d8d029
commit
acd43cbaac
11 changed files with 716 additions and 221 deletions
|
@ -1320,79 +1320,40 @@ static inline int z_vrfy_net_if_ipv6_addr_lookup_by_index(
|
|||
#include <syscalls/net_if_ipv6_addr_lookup_by_index_mrsh.c>
|
||||
#endif
|
||||
|
||||
static bool check_timeout(uint32_t start, int32_t timeout, uint32_t counter,
|
||||
uint32_t current_time)
|
||||
{
|
||||
if (counter > 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((int32_t)((start + (uint32_t)timeout) - current_time) > 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void address_expired(struct net_if_addr *ifaddr)
|
||||
{
|
||||
NET_DBG("IPv6 address %s is deprecated",
|
||||
log_strdup(net_sprint_ipv6_addr(&ifaddr->address.in6_addr)));
|
||||
|
||||
ifaddr->addr_state = NET_ADDR_DEPRECATED;
|
||||
ifaddr->lifetime.timer_timeout = 0;
|
||||
ifaddr->lifetime.wrap_counter = 0;
|
||||
|
||||
sys_slist_find_and_remove(&active_address_lifetime_timers,
|
||||
&ifaddr->lifetime.node);
|
||||
}
|
||||
|
||||
static bool address_manage_timeout(struct net_if_addr *ifaddr,
|
||||
uint32_t current_time, uint32_t *next_wakeup)
|
||||
{
|
||||
if (check_timeout(ifaddr->lifetime.timer_start,
|
||||
ifaddr->lifetime.timer_timeout,
|
||||
ifaddr->lifetime.wrap_counter,
|
||||
current_time)) {
|
||||
address_expired(ifaddr);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (current_time == NET_TIMEOUT_MAX_VALUE) {
|
||||
ifaddr->lifetime.timer_start = k_uptime_get_32();
|
||||
ifaddr->lifetime.wrap_counter--;
|
||||
}
|
||||
|
||||
if (ifaddr->lifetime.wrap_counter > 0) {
|
||||
*next_wakeup = NET_TIMEOUT_MAX_VALUE;
|
||||
} else {
|
||||
*next_wakeup = ifaddr->lifetime.timer_timeout;
|
||||
}
|
||||
|
||||
return false;
|
||||
net_timeout_set(&ifaddr->lifetime, 0, 0);
|
||||
}
|
||||
|
||||
static void address_lifetime_timeout(struct k_work *work)
|
||||
{
|
||||
uint64_t timeout_update = UINT64_MAX;
|
||||
uint32_t next_update = UINT32_MAX;
|
||||
uint32_t current_time = k_uptime_get_32();
|
||||
bool found = false;
|
||||
struct net_if_addr *current, *next;
|
||||
|
||||
ARG_UNUSED(work);
|
||||
|
||||
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_address_lifetime_timers,
|
||||
current, next, lifetime.node) {
|
||||
uint32_t next_timeout;
|
||||
bool is_timeout;
|
||||
struct net_timeout *timeout = ¤t->lifetime;
|
||||
uint32_t this_update = net_timeout_evaluate(timeout,
|
||||
current_time);
|
||||
|
||||
is_timeout = address_manage_timeout(current, current_time,
|
||||
&next_timeout);
|
||||
if (!is_timeout) {
|
||||
if (next_timeout < timeout_update) {
|
||||
timeout_update = next_timeout;
|
||||
found = true;
|
||||
}
|
||||
if (this_update == 0U) {
|
||||
address_expired(current);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (this_update < next_update) {
|
||||
next_update = this_update;
|
||||
}
|
||||
|
||||
if (current == next) {
|
||||
|
@ -1400,18 +1361,11 @@ static void address_lifetime_timeout(struct k_work *work)
|
|||
}
|
||||
}
|
||||
|
||||
if (found) {
|
||||
/* If we are near upper limit of int32_t timeout, then lower it
|
||||
* a bit so that kernel timeout variable will not overflow.
|
||||
*/
|
||||
if (timeout_update >= NET_TIMEOUT_MAX_VALUE) {
|
||||
timeout_update = NET_TIMEOUT_MAX_VALUE;
|
||||
}
|
||||
|
||||
NET_DBG("Waiting for %d ms", (int32_t)timeout_update);
|
||||
if (next_update != UINT32_MAX) {
|
||||
NET_DBG("Waiting for %d ms", (int32_t)next_update);
|
||||
|
||||
k_delayed_work_submit(&address_lifetime_timer,
|
||||
K_MSEC(timeout_update));
|
||||
K_MSEC(next_update));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1422,43 +1376,13 @@ void net_address_lifetime_timeout(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void address_submit_work(struct net_if_addr *ifaddr)
|
||||
{
|
||||
int32_t remaining;
|
||||
|
||||
remaining = k_delayed_work_remaining_get(&address_lifetime_timer);
|
||||
if (!remaining || (ifaddr->lifetime.wrap_counter == 0 &&
|
||||
ifaddr->lifetime.timer_timeout < remaining)) {
|
||||
k_delayed_work_cancel(&address_lifetime_timer);
|
||||
|
||||
if (ifaddr->lifetime.wrap_counter > 0 && remaining == 0) {
|
||||
k_delayed_work_submit(&address_lifetime_timer,
|
||||
K_MSEC(NET_TIMEOUT_MAX_VALUE));
|
||||
} else {
|
||||
k_delayed_work_submit(&address_lifetime_timer,
|
||||
K_MSEC(ifaddr->lifetime.timer_timeout));
|
||||
}
|
||||
|
||||
NET_DBG("Next wakeup in %d ms",
|
||||
k_delayed_work_remaining_get(&address_lifetime_timer));
|
||||
}
|
||||
}
|
||||
|
||||
static void address_start_timer(struct net_if_addr *ifaddr, uint32_t vlifetime)
|
||||
{
|
||||
uint64_t expire_timeout = (uint64_t)MSEC_PER_SEC * (uint64_t)vlifetime;
|
||||
|
||||
sys_slist_append(&active_address_lifetime_timers,
|
||||
&ifaddr->lifetime.node);
|
||||
|
||||
ifaddr->lifetime.timer_start = k_uptime_get_32();
|
||||
ifaddr->lifetime.wrap_counter = expire_timeout /
|
||||
(uint64_t)NET_TIMEOUT_MAX_VALUE;
|
||||
ifaddr->lifetime.timer_timeout = expire_timeout -
|
||||
(uint64_t)NET_TIMEOUT_MAX_VALUE *
|
||||
(uint64_t)ifaddr->lifetime.wrap_counter;
|
||||
|
||||
address_submit_work(ifaddr);
|
||||
net_timeout_set(&ifaddr->lifetime, vlifetime, k_uptime_get_32());
|
||||
k_delayed_work_submit(&address_lifetime_timer, K_NO_WAIT);
|
||||
}
|
||||
|
||||
void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr,
|
||||
|
@ -1906,58 +1830,33 @@ static void prefix_timer_remove(struct net_if_ipv6_prefix *ifprefix)
|
|||
log_strdup(net_sprint_ipv6_addr(&ifprefix->prefix)),
|
||||
ifprefix->len);
|
||||
|
||||
ifprefix->lifetime.timer_timeout = 0;
|
||||
ifprefix->lifetime.wrap_counter = 0;
|
||||
|
||||
sys_slist_find_and_remove(&active_prefix_lifetime_timers,
|
||||
&ifprefix->lifetime.node);
|
||||
}
|
||||
|
||||
static bool prefix_manage_timeout(struct net_if_ipv6_prefix *ifprefix,
|
||||
uint32_t current_time, uint32_t *next_wakeup)
|
||||
{
|
||||
if (check_timeout(ifprefix->lifetime.timer_start,
|
||||
ifprefix->lifetime.timer_timeout,
|
||||
ifprefix->lifetime.wrap_counter,
|
||||
current_time)) {
|
||||
prefix_lifetime_expired(ifprefix);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (current_time == NET_TIMEOUT_MAX_VALUE) {
|
||||
ifprefix->lifetime.wrap_counter--;
|
||||
}
|
||||
|
||||
if (ifprefix->lifetime.wrap_counter > 0) {
|
||||
*next_wakeup = NET_TIMEOUT_MAX_VALUE;
|
||||
} else {
|
||||
*next_wakeup = ifprefix->lifetime.timer_timeout;
|
||||
}
|
||||
|
||||
return false;
|
||||
net_timeout_set(&ifprefix->lifetime, 0, 0);
|
||||
}
|
||||
|
||||
static void prefix_lifetime_timeout(struct k_work *work)
|
||||
{
|
||||
uint64_t timeout_update = UINT64_MAX;
|
||||
uint32_t next_update = UINT32_MAX;
|
||||
uint32_t current_time = k_uptime_get_32();
|
||||
bool found = false;
|
||||
struct net_if_ipv6_prefix *current, *next;
|
||||
|
||||
ARG_UNUSED(work);
|
||||
|
||||
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_prefix_lifetime_timers,
|
||||
current, next, lifetime.node) {
|
||||
uint32_t next_timeout;
|
||||
bool is_timeout;
|
||||
struct net_timeout *timeout = ¤t->lifetime;
|
||||
uint32_t this_update = net_timeout_evaluate(timeout,
|
||||
current_time);
|
||||
|
||||
is_timeout = prefix_manage_timeout(current, current_time,
|
||||
&next_timeout);
|
||||
if (!is_timeout) {
|
||||
if (next_timeout < timeout_update) {
|
||||
timeout_update = next_timeout;
|
||||
found = true;
|
||||
}
|
||||
if (this_update == 0U) {
|
||||
prefix_lifetime_expired(current);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (this_update < next_update) {
|
||||
next_update = this_update;
|
||||
}
|
||||
|
||||
if (current == next) {
|
||||
|
@ -1965,61 +1864,22 @@ static void prefix_lifetime_timeout(struct k_work *work)
|
|||
}
|
||||
}
|
||||
|
||||
if (found) {
|
||||
/* If we are near upper limit of int32_t timeout, then lower it
|
||||
* a bit so that kernel timeout will not overflow.
|
||||
*/
|
||||
if (timeout_update >= NET_TIMEOUT_MAX_VALUE) {
|
||||
timeout_update = NET_TIMEOUT_MAX_VALUE;
|
||||
}
|
||||
|
||||
NET_DBG("Waiting for %d ms", (uint32_t)timeout_update);
|
||||
|
||||
if (next_update != UINT32_MAX) {
|
||||
k_delayed_work_submit(&prefix_lifetime_timer,
|
||||
K_MSEC(timeout_update));
|
||||
}
|
||||
}
|
||||
|
||||
static void prefix_submit_work(struct net_if_ipv6_prefix *ifprefix)
|
||||
{
|
||||
int32_t remaining;
|
||||
|
||||
remaining = k_delayed_work_remaining_get(&prefix_lifetime_timer);
|
||||
if (!remaining || (ifprefix->lifetime.wrap_counter == 0 &&
|
||||
ifprefix->lifetime.timer_timeout < remaining)) {
|
||||
k_delayed_work_cancel(&prefix_lifetime_timer);
|
||||
|
||||
if (ifprefix->lifetime.wrap_counter > 0 && remaining == 0) {
|
||||
k_delayed_work_submit(&prefix_lifetime_timer,
|
||||
K_MSEC(NET_TIMEOUT_MAX_VALUE));
|
||||
} else {
|
||||
k_delayed_work_submit(&prefix_lifetime_timer,
|
||||
K_MSEC(ifprefix->lifetime.timer_timeout));
|
||||
}
|
||||
|
||||
NET_DBG("Next wakeup in %d ms",
|
||||
k_delayed_work_remaining_get(&prefix_lifetime_timer));
|
||||
K_MSEC(next_update));
|
||||
}
|
||||
}
|
||||
|
||||
static void prefix_start_timer(struct net_if_ipv6_prefix *ifprefix,
|
||||
uint32_t lifetime)
|
||||
{
|
||||
uint64_t expire_timeout = (uint64_t)MSEC_PER_SEC * (uint64_t)lifetime;
|
||||
|
||||
(void)sys_slist_find_and_remove(&active_prefix_lifetime_timers,
|
||||
&ifprefix->lifetime.node);
|
||||
sys_slist_append(&active_prefix_lifetime_timers,
|
||||
&ifprefix->lifetime.node);
|
||||
|
||||
ifprefix->lifetime.timer_start = k_uptime_get_32();
|
||||
ifprefix->lifetime.wrap_counter = expire_timeout /
|
||||
(uint64_t)NET_TIMEOUT_MAX_VALUE;
|
||||
ifprefix->lifetime.timer_timeout = expire_timeout -
|
||||
(uint64_t)NET_TIMEOUT_MAX_VALUE *
|
||||
(uint64_t)ifprefix->lifetime.wrap_counter;
|
||||
|
||||
prefix_submit_work(ifprefix);
|
||||
net_timeout_set(&ifprefix->lifetime, lifetime, k_uptime_get_32());
|
||||
k_delayed_work_submit(&prefix_lifetime_timer, K_NO_WAIT);
|
||||
}
|
||||
|
||||
static struct net_if_ipv6_prefix *ipv6_prefix_find(struct net_if *iface,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue