2016-05-02 09:02:04 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Intel Corporation.
|
|
|
|
*
|
2017-01-18 17:01:01 -08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2016-05-02 09:02:04 +02:00
|
|
|
*/
|
|
|
|
|
2018-11-30 12:54:56 +02:00
|
|
|
#include <logging/log.h>
|
|
|
|
LOG_MODULE_REGISTER(net_if, CONFIG_NET_IF_LOG_LEVEL);
|
2016-05-09 15:43:06 +03:00
|
|
|
|
2016-05-02 09:02:04 +02:00
|
|
|
#include <init.h>
|
2016-11-09 17:44:21 +01:00
|
|
|
#include <kernel.h>
|
2017-06-17 11:30:47 -04:00
|
|
|
#include <linker/sections.h>
|
2019-05-29 11:43:13 +08:00
|
|
|
#include <syscall_handler.h>
|
2018-08-17 10:20:05 +03:00
|
|
|
#include <stdlib.h>
|
2016-05-09 15:43:06 +03:00
|
|
|
#include <string.h>
|
|
|
|
#include <net/net_core.h>
|
2017-04-03 17:14:35 +02:00
|
|
|
#include <net/net_pkt.h>
|
2016-05-19 12:15:06 +03:00
|
|
|
#include <net/net_if.h>
|
2016-09-29 18:33:03 +02:00
|
|
|
#include <net/net_mgmt.h>
|
2018-03-14 10:55:19 +02:00
|
|
|
#include <net/ethernet.h>
|
2016-05-19 12:15:06 +03:00
|
|
|
|
|
|
|
#include "net_private.h"
|
2016-06-07 16:34:49 +03:00
|
|
|
#include "ipv6.h"
|
2018-07-30 18:28:35 +03:00
|
|
|
#include "ipv4_autoconf_internal.h"
|
2016-05-02 09:02:04 +02:00
|
|
|
|
2016-12-13 14:50:31 +01:00
|
|
|
#include "net_stats.h"
|
|
|
|
|
2018-05-24 12:41:58 +03:00
|
|
|
#define REACHABLE_TIME K_SECONDS(30) /* in ms */
|
2017-12-02 21:18:50 -08:00
|
|
|
/*
|
|
|
|
* split the min/max random reachable factors into numerator/denominator
|
|
|
|
* so that integer-based math works better
|
|
|
|
*/
|
|
|
|
#define MIN_RANDOM_NUMER (1)
|
|
|
|
#define MIN_RANDOM_DENOM (2)
|
|
|
|
#define MAX_RANDOM_NUMER (3)
|
|
|
|
#define MAX_RANDOM_DENOM (2)
|
2016-06-07 10:16:58 +03:00
|
|
|
|
2016-05-02 09:02:04 +02:00
|
|
|
/* net_if dedicated section limiters */
|
|
|
|
extern struct net_if __net_if_start[];
|
|
|
|
extern struct net_if __net_if_end[];
|
|
|
|
|
2018-02-07 15:00:08 +02:00
|
|
|
extern struct net_if_dev __net_if_dev_start[];
|
|
|
|
extern struct net_if_dev __net_if_dev_end[];
|
2017-03-08 09:30:03 +01:00
|
|
|
|
2019-08-09 14:49:35 +03:00
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
|
2016-06-07 10:16:58 +03:00
|
|
|
static struct net_if_router routers[CONFIG_NET_MAX_ROUTERS];
|
2019-05-22 12:04:04 +02:00
|
|
|
static struct k_delayed_work router_timer;
|
|
|
|
static sys_slist_t active_router_timers;
|
2019-02-07 15:00:44 +02:00
|
|
|
#endif
|
2016-06-07 10:16:58 +03:00
|
|
|
|
2019-08-09 14:49:35 +03:00
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV6)
|
2018-08-17 10:20:05 +03:00
|
|
|
/* Timer that triggers network address renewal */
|
|
|
|
static struct k_delayed_work address_lifetime_timer;
|
|
|
|
|
|
|
|
/* Track currently active address lifetime timers */
|
|
|
|
static sys_slist_t active_address_lifetime_timers;
|
|
|
|
|
2018-08-27 14:02:09 +03:00
|
|
|
/* Timer that triggers IPv6 prefix lifetime */
|
|
|
|
static struct k_delayed_work prefix_lifetime_timer;
|
|
|
|
|
|
|
|
/* Track currently active IPv6 prefix lifetime timers */
|
|
|
|
static sys_slist_t active_prefix_lifetime_timers;
|
|
|
|
|
2019-05-21 11:12:45 +02:00
|
|
|
#if defined(CONFIG_NET_IPV6_DAD)
|
|
|
|
/** Duplicate address detection (DAD) timer */
|
|
|
|
static struct k_delayed_work dad_timer;
|
|
|
|
static sys_slist_t active_dad_timers;
|
|
|
|
#endif
|
|
|
|
|
2019-05-21 12:07:08 +02:00
|
|
|
#if defined(CONFIG_NET_IPV6_ND)
|
|
|
|
static struct k_delayed_work rs_timer;
|
|
|
|
static sys_slist_t active_rs_timers;
|
|
|
|
#endif
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
static struct {
|
|
|
|
struct net_if_ipv6 ipv6;
|
|
|
|
struct net_if *iface;
|
|
|
|
} ipv6_addresses[CONFIG_NET_IF_MAX_IPV6_COUNT];
|
|
|
|
#endif /* CONFIG_NET_IPV6 */
|
|
|
|
|
2019-08-09 14:49:35 +03:00
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV4)
|
2018-01-19 19:01:23 +02:00
|
|
|
static struct {
|
|
|
|
struct net_if_ipv4 ipv4;
|
|
|
|
struct net_if *iface;
|
|
|
|
} ipv4_addresses[CONFIG_NET_IF_MAX_IPV4_COUNT];
|
|
|
|
#endif /* CONFIG_NET_IPV4 */
|
|
|
|
|
2016-09-28 14:18:55 +03:00
|
|
|
/* We keep track of the link callbacks in this list.
|
|
|
|
*/
|
|
|
|
static sys_slist_t link_callbacks;
|
|
|
|
|
2019-08-09 14:49:35 +03:00
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV6)
|
2017-08-29 09:57:27 +03:00
|
|
|
/* Multicast join/leave tracking.
|
|
|
|
*/
|
|
|
|
static sys_slist_t mcast_monitor_callbacks;
|
|
|
|
#endif
|
|
|
|
|
2019-05-30 18:32:07 +08:00
|
|
|
#if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
|
2018-01-24 15:20:21 +02:00
|
|
|
#if !defined(CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE)
|
|
|
|
#define CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE 1024
|
|
|
|
#endif
|
|
|
|
|
2020-03-12 14:40:29 -07:00
|
|
|
K_THREAD_STACK_DEFINE(tx_ts_stack, CONFIG_NET_PKT_TIMESTAMP_STACK_SIZE);
|
2018-01-24 15:20:21 +02:00
|
|
|
K_FIFO_DEFINE(tx_ts_queue);
|
|
|
|
|
|
|
|
static struct k_thread tx_thread_ts;
|
|
|
|
|
|
|
|
/* We keep track of the timestamp callbacks in this list.
|
|
|
|
*/
|
|
|
|
static sys_slist_t timestamp_callbacks;
|
2019-05-30 18:32:07 +08:00
|
|
|
#endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
|
2018-01-24 15:20:21 +02:00
|
|
|
|
2018-11-30 12:54:56 +02:00
|
|
|
#if CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG
|
2018-02-07 15:09:36 +02:00
|
|
|
#define debug_check_packet(pkt) \
|
|
|
|
do { \
|
2020-02-14 21:19:07 +02:00
|
|
|
NET_DBG("Processing (pkt %p, prio %d) network packet " \
|
|
|
|
"iface %p/%d", \
|
|
|
|
pkt, net_pkt_priority(pkt), \
|
|
|
|
net_pkt_iface(pkt), \
|
|
|
|
net_if_get_by_iface(net_pkt_iface(pkt))); \
|
2018-02-07 15:09:36 +02:00
|
|
|
\
|
|
|
|
NET_ASSERT(pkt->frags); \
|
|
|
|
} while (0)
|
2016-06-08 14:35:07 +03:00
|
|
|
#else
|
|
|
|
#define debug_check_packet(...)
|
2018-11-30 12:54:56 +02:00
|
|
|
#endif /* CONFIG_NET_IF_LOG_LEVEL >= LOG_LEVEL_DBG */
|
2016-06-08 14:35:07 +03:00
|
|
|
|
2016-12-13 14:50:31 +01:00
|
|
|
static inline void net_context_send_cb(struct net_context *context,
|
2019-02-21 10:04:36 +01:00
|
|
|
int status)
|
2016-12-13 14:50:31 +01:00
|
|
|
{
|
2017-08-25 23:28:02 +03:00
|
|
|
if (!context) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-12-13 14:50:31 +01:00
|
|
|
if (context->send_cb) {
|
2019-02-21 10:15:21 +01:00
|
|
|
context->send_cb(context, status, context->user_data);
|
2016-12-13 14:50:31 +01:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:46:14 +02:00
|
|
|
if (IS_ENABLED(CONFIG_NET_UDP) &&
|
|
|
|
net_context_get_ip_proto(context) == IPPROTO_UDP) {
|
2018-03-27 11:31:31 +03:00
|
|
|
net_stats_update_udp_sent(net_context_get_iface(context));
|
2019-05-20 15:46:14 +02:00
|
|
|
} else if (IS_ENABLED(CONFIG_NET_TCP) &&
|
|
|
|
net_context_get_ip_proto(context) == IPPROTO_TCP) {
|
2018-03-27 11:31:31 +03:00
|
|
|
net_stats_update_tcp_seg_sent(net_context_get_iface(context));
|
2017-05-16 15:27:27 +03:00
|
|
|
}
|
2016-12-13 14:50:31 +01:00
|
|
|
}
|
|
|
|
|
2018-02-07 15:00:08 +02:00
|
|
|
static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
|
2017-02-10 10:38:15 +02:00
|
|
|
{
|
2020-03-11 17:19:02 +02:00
|
|
|
struct net_linkaddr ll_dst = {
|
|
|
|
.addr = NULL
|
|
|
|
};
|
|
|
|
struct net_linkaddr_storage ll_dst_storage;
|
2017-02-10 10:38:15 +02:00
|
|
|
struct net_context *context;
|
|
|
|
int status;
|
|
|
|
|
2019-10-08 16:00:37 +03:00
|
|
|
/* Timestamp of the current network packet sent if enabled */
|
2019-05-30 18:32:07 +08:00
|
|
|
struct net_ptp_time start_timestamp;
|
|
|
|
u32_t curr_time = 0;
|
|
|
|
|
2019-10-08 16:00:37 +03:00
|
|
|
/* We collect send statistics for each socket priority if enabled */
|
2019-05-30 18:32:07 +08:00
|
|
|
u8_t pkt_priority;
|
|
|
|
|
2017-04-05 08:37:44 +02:00
|
|
|
if (!pkt) {
|
2017-03-08 09:30:03 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-04-05 08:37:44 +02:00
|
|
|
debug_check_packet(pkt);
|
2017-02-10 10:38:15 +02:00
|
|
|
|
2020-03-11 17:19:02 +02:00
|
|
|
/* If there're any link callbacks, with such a callback receiving
|
|
|
|
* a destination address, copy that address out of packet, just in
|
|
|
|
* case packet is freed before callback is called.
|
|
|
|
*/
|
|
|
|
if (!sys_slist_is_empty(&link_callbacks)) {
|
|
|
|
if (net_linkaddr_set(&ll_dst_storage,
|
|
|
|
net_pkt_lladdr_dst(pkt)->addr,
|
|
|
|
net_pkt_lladdr_dst(pkt)->len) == 0) {
|
|
|
|
ll_dst.addr = ll_dst_storage.addr;
|
|
|
|
ll_dst.len = ll_dst_storage.len;
|
|
|
|
ll_dst.type = net_pkt_lladdr_dst(pkt)->type;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-05 08:37:44 +02:00
|
|
|
context = net_pkt_context(pkt);
|
2017-02-10 10:38:15 +02:00
|
|
|
|
2019-04-16 10:55:34 +03:00
|
|
|
if (net_if_flag_is_set(iface, NET_IF_UP)) {
|
2018-11-13 16:22:52 +01:00
|
|
|
if (IS_ENABLED(CONFIG_NET_TCP) &&
|
|
|
|
net_pkt_family(pkt) != AF_UNSPEC) {
|
2017-08-16 14:39:59 +03:00
|
|
|
net_pkt_set_queued(pkt, false);
|
2017-08-15 16:13:30 +03:00
|
|
|
}
|
|
|
|
|
2019-10-08 16:00:37 +03:00
|
|
|
if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMP) && context) {
|
2019-05-30 18:32:07 +08:00
|
|
|
if (net_context_get_timestamp(context, pkt,
|
|
|
|
&start_timestamp) < 0) {
|
|
|
|
start_timestamp.nanosecond = 0;
|
|
|
|
} else {
|
|
|
|
pkt_priority = net_pkt_priority(pkt);
|
|
|
|
}
|
|
|
|
}
|
2019-10-08 16:00:37 +03:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
|
|
|
|
memcpy(&start_timestamp, net_pkt_timestamp(pkt),
|
|
|
|
sizeof(start_timestamp));
|
|
|
|
pkt_priority = net_pkt_priority(pkt);
|
|
|
|
}
|
2019-05-30 18:32:07 +08:00
|
|
|
|
2018-06-26 14:51:05 +02:00
|
|
|
status = net_if_l2(iface)->send(iface, pkt);
|
2019-05-30 18:32:07 +08:00
|
|
|
|
2019-10-08 16:00:37 +03:00
|
|
|
if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMP) && status >= 0 &&
|
|
|
|
context) {
|
2019-05-30 18:32:07 +08:00
|
|
|
if (start_timestamp.nanosecond > 0) {
|
|
|
|
curr_time = k_cycle_get_32();
|
|
|
|
}
|
|
|
|
}
|
2019-10-08 16:00:37 +03:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS) && status >= 0) {
|
|
|
|
net_stats_update_tc_tx_time(iface,
|
|
|
|
pkt_priority,
|
|
|
|
start_timestamp.nanosecond,
|
|
|
|
k_cycle_get_32());
|
|
|
|
}
|
2019-05-30 18:32:07 +08:00
|
|
|
|
2017-02-10 10:38:15 +02:00
|
|
|
} else {
|
|
|
|
/* Drop packet if interface is not up */
|
|
|
|
NET_WARN("iface %p is down", iface);
|
|
|
|
status = -ENETDOWN;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status < 0) {
|
2017-04-05 08:37:44 +02:00
|
|
|
net_pkt_unref(pkt);
|
2017-02-10 10:38:15 +02:00
|
|
|
} else {
|
2018-06-26 14:51:05 +02:00
|
|
|
net_stats_update_bytes_sent(iface, status);
|
2017-02-10 10:38:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (context) {
|
2019-02-21 10:04:36 +01:00
|
|
|
NET_DBG("Calling context send cb %p status %d",
|
|
|
|
context, status);
|
2017-02-10 10:38:15 +02:00
|
|
|
|
2019-02-21 10:04:36 +01:00
|
|
|
net_context_send_cb(context, status);
|
2019-05-30 18:32:07 +08:00
|
|
|
|
2019-10-08 16:00:37 +03:00
|
|
|
if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMP) && status >= 0 &&
|
|
|
|
start_timestamp.nanosecond && curr_time > 0) {
|
2019-05-30 18:32:07 +08:00
|
|
|
/* So we know now how long the network packet was in
|
|
|
|
* transit from when it was allocated to when we
|
|
|
|
* got information that it was sent successfully.
|
|
|
|
*/
|
|
|
|
net_stats_update_tc_tx_time(iface,
|
|
|
|
pkt_priority,
|
|
|
|
start_timestamp.nanosecond,
|
|
|
|
curr_time);
|
|
|
|
}
|
2017-02-10 10:38:15 +02:00
|
|
|
}
|
|
|
|
|
2020-03-11 17:19:02 +02:00
|
|
|
if (ll_dst.addr) {
|
|
|
|
net_if_call_link_cb(iface, &ll_dst, status);
|
2017-03-24 12:47:11 +02:00
|
|
|
}
|
2017-03-08 09:30:03 +01:00
|
|
|
|
|
|
|
return true;
|
2017-02-10 10:38:15 +02:00
|
|
|
}
|
|
|
|
|
2018-02-07 15:00:08 +02:00
|
|
|
static void process_tx_packet(struct k_work *work)
|
2017-02-10 10:38:15 +02:00
|
|
|
{
|
2020-03-19 18:48:50 +02:00
|
|
|
struct net_if *iface;
|
2018-02-07 15:00:08 +02:00
|
|
|
struct net_pkt *pkt;
|
2016-05-19 12:15:06 +03:00
|
|
|
|
2018-02-07 15:00:08 +02:00
|
|
|
pkt = CONTAINER_OF(work, struct net_pkt, work);
|
2016-06-01 10:35:00 +03:00
|
|
|
|
2020-03-19 18:48:50 +02:00
|
|
|
iface = net_pkt_iface(pkt);
|
|
|
|
|
|
|
|
net_if_tx(iface, pkt);
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET_POWER_MANAGEMENT)
|
|
|
|
iface->tx_pending--;
|
|
|
|
#endif
|
2017-03-08 09:30:03 +01:00
|
|
|
}
|
|
|
|
|
2018-02-07 15:00:08 +02:00
|
|
|
void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt)
|
2017-03-08 09:30:03 +01:00
|
|
|
{
|
2018-02-07 15:00:08 +02:00
|
|
|
u8_t prio = net_pkt_priority(pkt);
|
|
|
|
u8_t tc = net_tx_priority2tc(prio);
|
2017-02-10 10:38:15 +02:00
|
|
|
|
2018-02-07 15:00:08 +02:00
|
|
|
k_work_init(net_pkt_work(pkt), process_tx_packet);
|
2016-05-03 09:34:45 +03:00
|
|
|
|
2018-03-27 11:31:31 +03:00
|
|
|
net_stats_update_tc_sent_pkt(iface, tc);
|
2019-02-21 10:30:13 +01:00
|
|
|
net_stats_update_tc_sent_bytes(iface, tc, net_pkt_get_len(pkt));
|
2018-03-27 11:31:31 +03:00
|
|
|
net_stats_update_tc_sent_priority(iface, tc, prio);
|
2018-02-07 15:09:36 +02:00
|
|
|
|
2018-02-07 15:00:08 +02:00
|
|
|
#if NET_TC_TX_COUNT > 1
|
|
|
|
NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt);
|
|
|
|
#endif
|
2016-06-08 14:16:17 +03:00
|
|
|
|
2020-03-19 18:48:50 +02:00
|
|
|
#if defined(CONFIG_NET_POWER_MANAGEMENT)
|
|
|
|
iface->tx_pending++;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!net_tc_submit_to_tx_queue(tc, pkt)) {
|
|
|
|
#if defined(CONFIG_NET_POWER_MANAGEMENT)
|
|
|
|
iface->tx_pending--
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
}
|
2016-05-03 09:34:45 +03:00
|
|
|
}
|
|
|
|
|
2019-10-10 15:36:17 +03:00
|
|
|
void net_if_stats_reset(struct net_if *iface)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
|
|
|
|
struct net_if *tmp;
|
|
|
|
|
|
|
|
for (tmp = __net_if_start; tmp != __net_if_end; tmp++) {
|
|
|
|
if (iface == tmp) {
|
|
|
|
memset(&iface->stats, 0, sizeof(iface->stats));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void net_if_stats_reset_all(void)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_NET_STATISTICS_PER_INTERFACE)
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
|
|
|
memset(&iface->stats, 0, sizeof(iface->stats));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-03-08 09:30:03 +01:00
|
|
|
static inline void init_iface(struct net_if *iface)
|
2016-05-03 09:34:45 +03:00
|
|
|
{
|
2018-01-11 16:06:53 +02:00
|
|
|
const struct net_if_api *api = net_if_get_device(iface)->driver_api;
|
2017-03-08 09:30:03 +01:00
|
|
|
|
2019-04-15 23:58:00 +03:00
|
|
|
if (!api || !api->init) {
|
|
|
|
NET_ERR("Iface %p driver API init NULL", iface);
|
|
|
|
return;
|
|
|
|
}
|
2017-03-08 09:30:03 +01:00
|
|
|
|
2016-06-21 12:10:13 +02:00
|
|
|
NET_DBG("On iface %p", iface);
|
|
|
|
|
2017-03-08 09:30:03 +01:00
|
|
|
api->init(iface);
|
2016-05-03 09:34:45 +03:00
|
|
|
}
|
|
|
|
|
2017-04-05 08:37:44 +02:00
|
|
|
enum net_verdict net_if_send_data(struct net_if *iface, struct net_pkt *pkt)
|
2016-06-29 16:39:06 +03:00
|
|
|
{
|
2017-04-05 08:37:44 +02:00
|
|
|
struct net_context *context = net_pkt_context(pkt);
|
2018-09-11 09:16:03 +02:00
|
|
|
struct net_linkaddr *dst = net_pkt_lladdr_dst(pkt);
|
2018-06-26 14:51:05 +02:00
|
|
|
enum net_verdict verdict = NET_OK;
|
2016-12-19 14:18:10 +02:00
|
|
|
int status = -EIO;
|
2016-06-29 16:39:06 +03:00
|
|
|
|
2020-02-25 09:42:35 +01:00
|
|
|
if (!net_if_flag_is_set(iface, NET_IF_UP) ||
|
|
|
|
net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
|
2016-12-19 14:18:10 +02:00
|
|
|
/* Drop packet if interface is not up */
|
|
|
|
NET_WARN("iface %p is down", iface);
|
|
|
|
verdict = NET_DROP;
|
|
|
|
status = -ENETDOWN;
|
2017-01-31 16:43:43 +02:00
|
|
|
goto done;
|
2016-12-19 14:18:10 +02:00
|
|
|
}
|
2016-06-29 16:39:06 +03:00
|
|
|
|
2017-01-31 16:43:43 +02:00
|
|
|
/* If the ll address is not set at all, then we must set
|
|
|
|
* it here.
|
2017-02-02 11:58:15 +02:00
|
|
|
* Workaround Linux bug, see:
|
2018-03-26 10:11:00 -05:00
|
|
|
* https://github.com/zephyrproject-rtos/zephyr/issues/3111
|
2017-01-31 16:43:43 +02:00
|
|
|
*/
|
2019-04-16 10:55:34 +03:00
|
|
|
if (!net_if_flag_is_set(iface, NET_IF_POINTOPOINT) &&
|
2018-09-11 09:16:03 +02:00
|
|
|
!net_pkt_lladdr_src(pkt)->addr) {
|
|
|
|
net_pkt_lladdr_src(pkt)->addr = net_pkt_lladdr_if(pkt)->addr;
|
|
|
|
net_pkt_lladdr_src(pkt)->len = net_pkt_lladdr_if(pkt)->len;
|
2017-01-31 16:43:43 +02:00
|
|
|
}
|
|
|
|
|
2017-11-14 17:06:28 +02:00
|
|
|
#if defined(CONFIG_NET_LOOPBACK)
|
|
|
|
/* If the packet is destined back to us, then there is no need to do
|
|
|
|
* additional checks, so let the packet through.
|
|
|
|
*/
|
2018-01-11 16:06:53 +02:00
|
|
|
if (net_if_l2(iface) == &NET_L2_GET_NAME(DUMMY)) {
|
2018-06-26 14:51:05 +02:00
|
|
|
goto done;
|
2017-11-14 17:06:28 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-01-31 16:43:43 +02:00
|
|
|
/* If the ll dst address is not set check if it is present in the nbr
|
|
|
|
* cache.
|
|
|
|
*/
|
2019-05-20 15:16:09 +02:00
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
|
2018-12-21 09:08:19 +01:00
|
|
|
verdict = net_ipv6_prepare_for_send(pkt);
|
2017-01-31 16:43:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
2018-06-26 14:51:05 +02:00
|
|
|
/* NET_OK in which case packet has checked successfully. In this case
|
2017-02-27 16:33:34 +02:00
|
|
|
* the net_context callback is called after successful delivery in
|
|
|
|
* net_if_tx_thread().
|
|
|
|
*
|
|
|
|
* NET_DROP in which case we call net_context callback that will
|
|
|
|
* give the status to user application.
|
2016-06-29 16:39:06 +03:00
|
|
|
*
|
|
|
|
* NET_CONTINUE in which case the sending of the packet is delayed.
|
|
|
|
* This can happen for example if we need to do IPv6 ND to figure
|
|
|
|
* out link layer address.
|
|
|
|
*/
|
2018-06-26 14:51:05 +02:00
|
|
|
if (verdict == NET_DROP) {
|
|
|
|
if (context) {
|
2019-02-21 10:04:36 +01:00
|
|
|
NET_DBG("Calling ctx send cb %p verdict %d",
|
|
|
|
context, verdict);
|
|
|
|
net_context_send_cb(context, status);
|
2018-06-26 14:51:05 +02:00
|
|
|
}
|
2016-06-29 16:39:06 +03:00
|
|
|
|
2018-06-26 14:51:05 +02:00
|
|
|
if (dst->addr) {
|
|
|
|
net_if_call_link_cb(iface, dst, status);
|
|
|
|
}
|
|
|
|
} else if (verdict == NET_OK) {
|
|
|
|
/* Packet is ready to be sent by L2, let's queue */
|
|
|
|
net_if_queue_tx(iface, pkt);
|
2016-09-28 14:18:55 +03:00
|
|
|
}
|
|
|
|
|
2016-06-29 16:39:06 +03:00
|
|
|
return verdict;
|
|
|
|
}
|
|
|
|
|
2016-05-09 15:00:30 +03:00
|
|
|
struct net_if *net_if_get_by_link_addr(struct net_linkaddr *ll_addr)
|
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
2018-01-11 16:06:53 +02:00
|
|
|
if (!memcmp(net_if_get_link_addr(iface)->addr, ll_addr->addr,
|
2016-05-09 15:00:30 +03:00
|
|
|
ll_addr->len)) {
|
|
|
|
return iface;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-06-15 13:15:40 +02:00
|
|
|
struct net_if *net_if_lookup_by_dev(struct device *dev)
|
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
2018-01-11 16:06:53 +02:00
|
|
|
if (net_if_get_device(iface) == dev) {
|
2016-06-15 13:15:40 +02:00
|
|
|
return iface;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-06-24 17:34:30 +02:00
|
|
|
struct net_if *net_if_get_default(void)
|
|
|
|
{
|
2018-01-23 13:47:03 +02:00
|
|
|
struct net_if *iface = NULL;
|
|
|
|
|
2017-05-16 00:54:27 +03:00
|
|
|
if (__net_if_start == __net_if_end) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-01-23 13:47:03 +02:00
|
|
|
#if defined(CONFIG_NET_DEFAULT_IF_ETHERNET)
|
|
|
|
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(ETHERNET));
|
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_NET_DEFAULT_IF_IEEE802154)
|
|
|
|
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(IEEE802154));
|
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_NET_DEFAULT_IF_BLUETOOTH)
|
|
|
|
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(BLUETOOTH));
|
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_NET_DEFAULT_IF_DUMMY)
|
|
|
|
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(DUMMY));
|
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_NET_DEFAULT_IF_OFFLOAD)
|
2017-05-03 10:32:24 +02:00
|
|
|
iface = net_if_get_first_by_type(NULL);
|
2018-01-23 13:47:03 +02:00
|
|
|
#endif
|
2019-08-06 16:12:24 +02:00
|
|
|
#if defined(CONFIG_NET_DEFAULT_IF_CANBUS_RAW)
|
|
|
|
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(CANBUS_RAW));
|
|
|
|
#endif
|
2019-01-23 13:58:11 +02:00
|
|
|
#if defined(CONFIG_NET_DEFAULT_IF_CANBUS)
|
|
|
|
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(CANBUS));
|
|
|
|
#endif
|
2020-01-17 09:57:31 +01:00
|
|
|
#if defined(CONFIG_NET_DEFAULT_IF_PPP)
|
|
|
|
iface = net_if_get_first_by_type(&NET_L2_GET_NAME(PPP));
|
|
|
|
#endif
|
2018-01-23 13:47:03 +02:00
|
|
|
|
|
|
|
return iface ? iface : __net_if_start;
|
2016-06-24 17:34:30 +02:00
|
|
|
}
|
|
|
|
|
2017-08-08 08:38:47 +03:00
|
|
|
struct net_if *net_if_get_first_by_type(const struct net_l2 *l2)
|
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
2019-05-20 15:46:14 +02:00
|
|
|
if (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
|
|
|
|
!l2 && net_if_offload(iface)) {
|
2017-05-03 10:32:24 +02:00
|
|
|
return iface;
|
|
|
|
}
|
|
|
|
|
2018-01-11 16:06:53 +02:00
|
|
|
if (net_if_l2(iface) == l2) {
|
2017-08-08 08:38:47 +03:00
|
|
|
return iface;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-04-01 14:13:11 +03:00
|
|
|
static enum net_l2_flags l2_flags_get(struct net_if *iface)
|
|
|
|
{
|
|
|
|
enum net_l2_flags flags = 0;
|
|
|
|
|
2019-09-05 15:56:29 -07:00
|
|
|
if (net_if_l2(iface) && net_if_l2(iface)->get_flags) {
|
2019-04-01 14:13:11 +03:00
|
|
|
flags = net_if_l2(iface)->get_flags(iface);
|
|
|
|
}
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
2019-08-09 14:49:35 +03:00
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV4) || defined(CONFIG_NET_NATIVE_IPV6)
|
2018-04-27 13:01:33 +03:00
|
|
|
/* Return how many bits are shared between two IP addresses */
|
|
|
|
static u8_t get_ipaddr_diff(const u8_t *src, const u8_t *dst, int addr_len)
|
|
|
|
{
|
|
|
|
u8_t j, k, xor;
|
2018-11-29 11:23:03 -08:00
|
|
|
u8_t len = 0U;
|
2018-04-27 13:01:33 +03:00
|
|
|
|
2018-11-29 11:23:03 -08:00
|
|
|
for (j = 0U; j < addr_len; j++) {
|
2018-04-27 13:01:33 +03:00
|
|
|
if (src[j] == dst[j]) {
|
2019-03-26 19:57:45 -06:00
|
|
|
len += 8U;
|
2018-04-27 13:01:33 +03:00
|
|
|
} else {
|
|
|
|
xor = src[j] ^ dst[j];
|
2018-11-29 11:23:03 -08:00
|
|
|
for (k = 0U; k < 8; k++) {
|
2018-04-27 13:01:33 +03:00
|
|
|
if (!(xor & 0x80)) {
|
|
|
|
len++;
|
|
|
|
xor <<= 1;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
2019-05-22 12:04:04 +02:00
|
|
|
|
|
|
|
static struct net_if_router *iface_router_lookup(struct net_if *iface,
|
|
|
|
u8_t family, void *addr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
|
|
|
|
if (!routers[i].is_used ||
|
|
|
|
routers[i].address.family != family ||
|
|
|
|
routers[i].iface != iface) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6 &&
|
|
|
|
net_ipv6_addr_cmp(net_if_router_ipv6(&routers[i]),
|
|
|
|
(struct in6_addr *)addr)) ||
|
|
|
|
(IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET &&
|
|
|
|
net_ipv4_addr_cmp(net_if_router_ipv4(&routers[i]),
|
|
|
|
(struct in_addr *)addr))) {
|
|
|
|
return &routers[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iface_router_notify_deletion(struct net_if_router *router,
|
|
|
|
const char *delete_reason)
|
|
|
|
{
|
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV6) &&
|
|
|
|
router->address.family == AF_INET6) {
|
|
|
|
NET_DBG("IPv6 router %s %s",
|
|
|
|
log_strdup(net_sprint_ipv6_addr(
|
|
|
|
net_if_router_ipv6(router))),
|
|
|
|
delete_reason);
|
|
|
|
|
|
|
|
net_mgmt_event_notify_with_info(NET_EVENT_IPV6_ROUTER_DEL,
|
|
|
|
router->iface,
|
|
|
|
&router->address.in6_addr,
|
|
|
|
sizeof(struct in6_addr));
|
|
|
|
} else if (IS_ENABLED(CONFIG_NET_IPV4) &&
|
|
|
|
router->address.family == AF_INET) {
|
|
|
|
NET_DBG("IPv4 router %s %s",
|
|
|
|
log_strdup(net_sprint_ipv4_addr(
|
|
|
|
net_if_router_ipv4(router))),
|
|
|
|
delete_reason);
|
|
|
|
|
|
|
|
net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ROUTER_DEL,
|
|
|
|
router->iface,
|
|
|
|
&router->address.in_addr,
|
|
|
|
sizeof(struct in6_addr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void iface_router_run_timer(u32_t current_time)
|
|
|
|
{
|
|
|
|
struct net_if_router *router, *next;
|
|
|
|
u32_t new_timer = UINT_MAX;
|
|
|
|
|
|
|
|
if (k_delayed_work_remaining_get(&router_timer)) {
|
|
|
|
k_delayed_work_cancel(&router_timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
|
|
|
|
router, next, node) {
|
|
|
|
u32_t current_timer = router->life_start +
|
|
|
|
K_SECONDS(router->lifetime) - current_time;
|
|
|
|
|
|
|
|
new_timer = MIN(current_timer, new_timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_timer != UINT_MAX) {
|
|
|
|
k_delayed_work_submit(&router_timer, new_timer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iface_router_expired(struct k_work *work)
|
|
|
|
{
|
|
|
|
u32_t current_time = k_uptime_get_32();
|
|
|
|
struct net_if_router *router, *next;
|
2019-12-12 16:49:02 +01:00
|
|
|
sys_snode_t *prev_node = NULL;
|
2019-05-22 12:04:04 +02:00
|
|
|
|
|
|
|
ARG_UNUSED(work);
|
|
|
|
|
|
|
|
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_router_timers,
|
|
|
|
router, next, node) {
|
|
|
|
|
|
|
|
if ((s32_t)(router->life_start +
|
|
|
|
K_SECONDS(router->lifetime) - current_time) > 0) {
|
|
|
|
/* We have to loop on all active routers as their
|
|
|
|
* lifetime differ from each other.
|
|
|
|
*/
|
2019-12-12 16:49:02 +01:00
|
|
|
prev_node = &router->node;
|
2019-05-22 12:04:04 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
iface_router_notify_deletion(router, "has expired");
|
2019-12-12 16:49:02 +01:00
|
|
|
sys_slist_remove(&active_router_timers,
|
|
|
|
prev_node, &router->node);
|
2019-05-22 12:04:04 +02:00
|
|
|
router->is_used = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
iface_router_run_timer(current_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct net_if_router *iface_router_add(struct net_if *iface,
|
|
|
|
u8_t family, void *addr,
|
|
|
|
bool is_default,
|
|
|
|
u16_t lifetime)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
|
|
|
|
if (routers[i].is_used) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
routers[i].is_used = true;
|
|
|
|
routers[i].iface = iface;
|
|
|
|
routers[i].address.family = family;
|
|
|
|
|
|
|
|
if (lifetime) {
|
|
|
|
routers[i].is_default = true;
|
|
|
|
routers[i].is_infinite = false;
|
|
|
|
routers[i].lifetime = lifetime;
|
|
|
|
routers[i].life_start = k_uptime_get_32();
|
|
|
|
|
|
|
|
sys_slist_append(&active_router_timers,
|
|
|
|
&routers[i].node);
|
|
|
|
|
|
|
|
iface_router_run_timer(routers[i].life_start);
|
|
|
|
} else {
|
|
|
|
routers[i].is_default = false;
|
|
|
|
routers[i].is_infinite = true;
|
|
|
|
routers[i].lifetime = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
|
|
|
|
memcpy(net_if_router_ipv6(&routers[i]), addr,
|
|
|
|
sizeof(struct in6_addr));
|
|
|
|
net_mgmt_event_notify_with_info(
|
|
|
|
NET_EVENT_IPV6_ROUTER_ADD, iface,
|
|
|
|
&routers[i].address.in6_addr,
|
|
|
|
sizeof(struct in6_addr));
|
|
|
|
|
|
|
|
NET_DBG("interface %p router %s lifetime %u default %d "
|
|
|
|
"added", iface,
|
|
|
|
log_strdup(net_sprint_ipv6_addr(
|
|
|
|
(struct in6_addr *)addr)),
|
|
|
|
lifetime, routers[i].is_default);
|
|
|
|
} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
|
|
|
|
memcpy(net_if_router_ipv4(&routers[i]), addr,
|
|
|
|
sizeof(struct in_addr));
|
|
|
|
routers[i].is_default = is_default;
|
|
|
|
|
|
|
|
net_mgmt_event_notify_with_info(
|
|
|
|
NET_EVENT_IPV4_ROUTER_ADD, iface,
|
|
|
|
&routers[i].address.in_addr,
|
|
|
|
sizeof(struct in_addr));
|
|
|
|
|
|
|
|
NET_DBG("interface %p router %s lifetime %u default %d "
|
|
|
|
"added", iface,
|
|
|
|
log_strdup(net_sprint_ipv4_addr(
|
|
|
|
(struct in_addr *)addr)),
|
|
|
|
lifetime, is_default);
|
|
|
|
}
|
|
|
|
|
|
|
|
return &routers[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool iface_router_rm(struct net_if_router *router)
|
|
|
|
{
|
|
|
|
if (!router->is_used) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
iface_router_notify_deletion(router, "has been removed");
|
|
|
|
|
|
|
|
/* We recompute the timer if only the router was time limited */
|
|
|
|
if (sys_slist_find_and_remove(&active_router_timers, &router->node)) {
|
|
|
|
iface_router_run_timer(k_uptime_get_32());
|
|
|
|
}
|
|
|
|
|
|
|
|
router->is_used = false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct net_if_router *iface_router_find_default(struct net_if *iface,
|
|
|
|
u8_t family, void *addr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Todo: addr will need to be handled */
|
|
|
|
ARG_UNUSED(addr);
|
|
|
|
|
|
|
|
for (i = 0; i < CONFIG_NET_MAX_ROUTERS; i++) {
|
|
|
|
if (!routers[i].is_used ||
|
|
|
|
!routers[i].is_default ||
|
|
|
|
routers[i].address.family != family) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (iface && iface != routers[i].iface) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
return &routers[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iface_router_init(void)
|
|
|
|
{
|
|
|
|
k_delayed_work_init(&router_timer, iface_router_expired);
|
|
|
|
sys_slist_init(&active_router_timers);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define iface_router_init(...)
|
2019-02-07 15:00:44 +02:00
|
|
|
#endif
|
2018-04-27 13:01:33 +03:00
|
|
|
|
2019-08-09 14:49:35 +03:00
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV6)
|
2018-01-19 19:01:23 +02:00
|
|
|
int net_if_config_ipv6_get(struct net_if *iface, struct net_if_ipv6 **ipv6)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (iface->config.ip.ipv6) {
|
|
|
|
if (ipv6) {
|
|
|
|
*ipv6 = iface->config.ip.ipv6;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
|
|
|
|
if (ipv6_addresses[i].iface) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
iface->config.ip.ipv6 = &ipv6_addresses[i].ipv6;
|
|
|
|
ipv6_addresses[i].iface = iface;
|
|
|
|
|
|
|
|
if (ipv6) {
|
|
|
|
*ipv6 = &ipv6_addresses[i].ipv6;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ESRCH;
|
|
|
|
}
|
|
|
|
|
|
|
|
int net_if_config_ipv6_put(struct net_if *iface)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!iface->config.ip.ipv6) {
|
|
|
|
return -EALREADY;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
|
|
|
|
if (ipv6_addresses[i].iface != iface) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
iface->config.ip.ipv6 = NULL;
|
|
|
|
ipv6_addresses[i].iface = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ESRCH;
|
|
|
|
}
|
2016-06-24 17:34:30 +02:00
|
|
|
|
2017-02-09 09:57:06 +02:00
|
|
|
#if defined(CONFIG_NET_IPV6_MLD)
|
|
|
|
static void join_mcast_allnodes(struct net_if *iface)
|
|
|
|
{
|
|
|
|
struct in6_addr addr;
|
2017-03-06 11:03:26 +02:00
|
|
|
int ret;
|
2017-02-09 09:57:06 +02:00
|
|
|
|
|
|
|
net_ipv6_addr_create_ll_allnodes_mcast(&addr);
|
2017-03-06 11:03:26 +02:00
|
|
|
|
|
|
|
ret = net_ipv6_mld_join(iface, &addr);
|
2017-03-09 15:45:34 +02:00
|
|
|
if (ret < 0 && ret != -EALREADY) {
|
2017-03-06 11:03:26 +02:00
|
|
|
NET_ERR("Cannot join all nodes address %s (%d)",
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(&addr)), ret);
|
2017-03-06 11:03:26 +02:00
|
|
|
}
|
2017-02-09 09:57:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void join_mcast_solicit_node(struct net_if *iface,
|
|
|
|
struct in6_addr *my_addr)
|
|
|
|
{
|
|
|
|
struct in6_addr addr;
|
2017-03-06 11:03:26 +02:00
|
|
|
int ret;
|
2017-02-09 09:57:06 +02:00
|
|
|
|
|
|
|
/* Join to needed multicast groups, RFC 4291 ch 2.8 */
|
|
|
|
net_ipv6_addr_create_solicited_node(my_addr, &addr);
|
2017-03-06 11:03:26 +02:00
|
|
|
|
|
|
|
ret = net_ipv6_mld_join(iface, &addr);
|
2017-03-09 15:45:34 +02:00
|
|
|
if (ret < 0 && ret != -EALREADY) {
|
2017-03-06 11:03:26 +02:00
|
|
|
NET_ERR("Cannot join solicit node address %s (%d)",
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(&addr)), ret);
|
2017-03-06 11:03:26 +02:00
|
|
|
}
|
2017-02-09 09:57:06 +02:00
|
|
|
}
|
|
|
|
|
2017-02-10 09:36:09 +02:00
|
|
|
static void leave_mcast_all(struct net_if *iface)
|
2017-02-09 09:57:06 +02:00
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
2017-02-09 09:57:06 +02:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6) {
|
|
|
|
return;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2017-02-09 09:57:06 +02:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6->mcast[i].is_used ||
|
|
|
|
!ipv6->mcast[i].is_joined) {
|
2017-02-09 09:57:06 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
net_ipv6_mld_leave(iface, &ipv6->mcast[i].address.in6_addr);
|
2017-02-09 09:57:06 +02:00
|
|
|
}
|
|
|
|
}
|
2019-05-20 15:46:14 +02:00
|
|
|
|
|
|
|
static void join_mcast_nodes(struct net_if *iface, struct in6_addr *addr)
|
|
|
|
{
|
|
|
|
enum net_l2_flags flags = 0;
|
|
|
|
|
2019-09-05 15:56:29 -07:00
|
|
|
flags = l2_flags_get(iface);
|
2019-05-20 15:46:14 +02:00
|
|
|
if (flags & NET_L2_MULTICAST) {
|
|
|
|
join_mcast_allnodes(iface);
|
|
|
|
|
|
|
|
if (!(flags & NET_L2_MULTICAST_SKIP_JOIN_SOLICIT_NODE)) {
|
|
|
|
join_mcast_solicit_node(iface, addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-02-09 09:57:06 +02:00
|
|
|
#else
|
|
|
|
#define join_mcast_allnodes(...)
|
|
|
|
#define join_mcast_solicit_node(...)
|
2017-02-10 09:36:09 +02:00
|
|
|
#define leave_mcast_all(...)
|
2019-05-20 15:46:14 +02:00
|
|
|
#define join_mcast_nodes(...)
|
2017-02-09 09:57:06 +02:00
|
|
|
#endif /* CONFIG_NET_IPV6_MLD */
|
|
|
|
|
2016-06-16 11:01:40 +03:00
|
|
|
#if defined(CONFIG_NET_IPV6_DAD)
|
2018-05-24 12:41:58 +03:00
|
|
|
#define DAD_TIMEOUT K_MSEC(100)
|
2016-06-07 16:34:49 +03:00
|
|
|
|
2016-11-09 17:44:21 +01:00
|
|
|
static void dad_timeout(struct k_work *work)
|
2016-06-07 16:34:49 +03:00
|
|
|
{
|
2019-05-21 11:12:45 +02:00
|
|
|
u32_t current_time = k_uptime_get_32();
|
|
|
|
struct net_if_addr *ifaddr, *next;
|
2016-06-07 16:34:49 +03:00
|
|
|
|
2019-05-21 11:12:45 +02:00
|
|
|
ARG_UNUSED(work);
|
2016-06-07 16:34:49 +03:00
|
|
|
|
2019-05-21 11:12:45 +02:00
|
|
|
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_dad_timers,
|
|
|
|
ifaddr, next, dad_node) {
|
|
|
|
struct net_if_addr *tmp;
|
|
|
|
struct net_if *iface;
|
2017-02-21 17:16:19 +02:00
|
|
|
|
2019-05-21 11:12:45 +02:00
|
|
|
if ((s32_t)(ifaddr->dad_start +
|
|
|
|
DAD_TIMEOUT - current_time) > 0) {
|
|
|
|
break;
|
|
|
|
}
|
2017-04-12 10:25:54 +03:00
|
|
|
|
2019-05-21 11:12:45 +02:00
|
|
|
/* Removing the ifaddr from active_dad_timers list */
|
|
|
|
sys_slist_remove(&active_dad_timers, NULL, &ifaddr->dad_node);
|
|
|
|
|
|
|
|
NET_DBG("DAD succeeded for %s",
|
|
|
|
log_strdup(net_sprint_ipv6_addr(
|
|
|
|
&ifaddr->address.in6_addr)));
|
|
|
|
|
|
|
|
ifaddr->addr_state = NET_ADDR_PREFERRED;
|
|
|
|
|
|
|
|
/* Because we do not know the interface at this point,
|
|
|
|
* we need to lookup for it.
|
2017-02-21 17:16:19 +02:00
|
|
|
*/
|
2019-05-21 11:12:45 +02:00
|
|
|
iface = NULL;
|
|
|
|
tmp = net_if_ipv6_addr_lookup(&ifaddr->address.in6_addr,
|
|
|
|
&iface);
|
|
|
|
if (tmp == ifaddr) {
|
|
|
|
net_mgmt_event_notify_with_info(
|
|
|
|
NET_EVENT_IPV6_DAD_SUCCEED,
|
|
|
|
iface, &ifaddr->address.in6_addr,
|
|
|
|
sizeof(struct in6_addr));
|
|
|
|
|
|
|
|
/* The address gets added to neighbor cache which is not
|
|
|
|
* needed in this case as the address is our own one.
|
|
|
|
*/
|
|
|
|
net_ipv6_nbr_rm(iface, &ifaddr->address.in6_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
ifaddr = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ifaddr) {
|
|
|
|
k_delayed_work_submit(&dad_timer,
|
|
|
|
ifaddr->dad_start +
|
|
|
|
DAD_TIMEOUT - current_time);
|
2017-02-21 17:16:19 +02:00
|
|
|
}
|
2016-06-07 16:34:49 +03:00
|
|
|
}
|
|
|
|
|
2016-11-11 10:34:51 +02:00
|
|
|
static void net_if_ipv6_start_dad(struct net_if *iface,
|
|
|
|
struct net_if_addr *ifaddr)
|
2016-06-07 16:34:49 +03:00
|
|
|
{
|
|
|
|
ifaddr->addr_state = NET_ADDR_TENTATIVE;
|
|
|
|
|
2017-07-27 15:20:35 +03:00
|
|
|
if (net_if_is_up(iface)) {
|
|
|
|
NET_DBG("Interface %p ll addr %s tentative IPv6 addr %s",
|
|
|
|
iface,
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ll_addr(
|
|
|
|
net_if_get_link_addr(iface)->addr,
|
|
|
|
net_if_get_link_addr(iface)->len)),
|
|
|
|
log_strdup(net_sprint_ipv6_addr(
|
|
|
|
&ifaddr->address.in6_addr)));
|
2017-07-27 15:20:35 +03:00
|
|
|
|
2019-03-26 19:57:45 -06:00
|
|
|
ifaddr->dad_count = 1U;
|
2016-06-07 16:34:49 +03:00
|
|
|
|
2017-07-27 15:20:35 +03:00
|
|
|
if (!net_ipv6_start_dad(iface, ifaddr)) {
|
2019-05-21 11:12:45 +02:00
|
|
|
ifaddr->dad_start = k_uptime_get_32();
|
|
|
|
sys_slist_append(&active_dad_timers, &ifaddr->dad_node);
|
|
|
|
|
|
|
|
if (!k_delayed_work_remaining_get(&dad_timer)) {
|
|
|
|
k_delayed_work_submit(&dad_timer, DAD_TIMEOUT);
|
|
|
|
}
|
2017-07-27 15:20:35 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
NET_DBG("Interface %p is down, starting DAD for %s later.",
|
|
|
|
iface,
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(
|
|
|
|
&ifaddr->address.in6_addr)));
|
2016-06-07 16:34:49 +03:00
|
|
|
}
|
|
|
|
}
|
2016-11-11 10:34:51 +02:00
|
|
|
|
|
|
|
void net_if_start_dad(struct net_if *iface)
|
|
|
|
{
|
|
|
|
struct net_if_addr *ifaddr;
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6;
|
2016-11-11 20:14:20 +02:00
|
|
|
struct in6_addr addr = { };
|
2017-07-27 15:20:35 +03:00
|
|
|
int i;
|
2016-11-11 10:34:51 +02:00
|
|
|
|
2019-05-20 15:46:14 +02:00
|
|
|
NET_DBG("Starting DAD for iface %p", iface);
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
|
|
|
|
NET_WARN("Cannot do DAD IPv6 config is not valid.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ipv6) {
|
|
|
|
return;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
|
|
|
net_ipv6_addr_create_iid(&addr, net_if_get_link_addr(iface));
|
2016-11-11 10:34:51 +02:00
|
|
|
|
|
|
|
ifaddr = net_if_ipv6_addr_add(iface, &addr, NET_ADDR_AUTOCONF, 0);
|
|
|
|
if (!ifaddr) {
|
|
|
|
NET_ERR("Cannot add %s address to interface %p, DAD fails",
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(&addr)), iface);
|
2016-11-11 10:34:51 +02:00
|
|
|
}
|
2017-07-27 15:20:35 +03:00
|
|
|
|
|
|
|
/* Start DAD for all the addresses that were added earlier when
|
|
|
|
* the interface was down.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6->unicast[i].is_used ||
|
|
|
|
ipv6->unicast[i].address.family != AF_INET6 ||
|
|
|
|
&ipv6->unicast[i] == ifaddr) {
|
2017-07-27 15:20:35 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
|
2017-07-27 15:20:35 +03:00
|
|
|
}
|
2016-11-11 10:34:51 +02:00
|
|
|
}
|
2017-04-12 10:25:54 +03:00
|
|
|
|
|
|
|
void net_if_ipv6_dad_failed(struct net_if *iface, const struct in6_addr *addr)
|
|
|
|
{
|
|
|
|
struct net_if_addr *ifaddr;
|
|
|
|
|
|
|
|
ifaddr = net_if_ipv6_addr_lookup(addr, &iface);
|
|
|
|
if (!ifaddr) {
|
|
|
|
NET_ERR("Cannot find %s address in interface %p",
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(addr)), iface);
|
2017-04-12 10:25:54 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-05-21 11:12:45 +02:00
|
|
|
sys_slist_find_and_remove(&active_dad_timers, &ifaddr->dad_node);
|
2017-04-12 10:25:54 +03:00
|
|
|
|
2019-06-12 10:01:42 +03:00
|
|
|
net_mgmt_event_notify_with_info(NET_EVENT_IPV6_DAD_FAILED, iface,
|
|
|
|
&ifaddr->address.in6_addr,
|
|
|
|
sizeof(struct in6_addr));
|
2017-04-12 10:25:54 +03:00
|
|
|
|
|
|
|
net_if_ipv6_addr_rm(iface, addr);
|
|
|
|
}
|
2019-05-20 18:26:01 +02:00
|
|
|
|
2019-05-21 11:12:45 +02:00
|
|
|
static inline void iface_ipv6_dad_init(void)
|
2019-05-20 18:26:01 +02:00
|
|
|
{
|
2019-05-21 11:12:45 +02:00
|
|
|
k_delayed_work_init(&dad_timer, dad_timeout);
|
|
|
|
sys_slist_init(&active_dad_timers);
|
2019-05-20 18:26:01 +02:00
|
|
|
}
|
|
|
|
|
2016-11-11 10:34:51 +02:00
|
|
|
#else
|
2017-03-24 14:33:34 +01:00
|
|
|
static inline void net_if_ipv6_start_dad(struct net_if *iface,
|
|
|
|
struct net_if_addr *ifaddr)
|
|
|
|
{
|
|
|
|
ifaddr->addr_state = NET_ADDR_PREFERRED;
|
|
|
|
}
|
2019-05-20 18:26:01 +02:00
|
|
|
|
|
|
|
#define iface_ipv6_dad_init(...)
|
2016-06-16 11:01:40 +03:00
|
|
|
#endif /* CONFIG_NET_IPV6_DAD */
|
2016-06-07 16:34:49 +03:00
|
|
|
|
2016-06-16 11:01:40 +03:00
|
|
|
#if defined(CONFIG_NET_IPV6_ND)
|
2018-05-24 12:41:58 +03:00
|
|
|
#define RS_TIMEOUT K_SECONDS(1)
|
2016-06-10 10:31:21 +03:00
|
|
|
#define RS_COUNT 3
|
|
|
|
|
2016-11-09 17:44:21 +01:00
|
|
|
static void rs_timeout(struct k_work *work)
|
2016-06-10 10:31:21 +03:00
|
|
|
{
|
2019-05-21 12:07:08 +02:00
|
|
|
u32_t current_time = k_uptime_get_32();
|
|
|
|
struct net_if_ipv6 *ipv6, *next;
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2019-05-21 12:07:08 +02:00
|
|
|
ARG_UNUSED(work);
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2019-05-21 12:07:08 +02:00
|
|
|
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_rs_timers,
|
|
|
|
ipv6, next, rs_node) {
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
if ((s32_t)(ipv6->rs_start + RS_TIMEOUT - current_time) > 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Removing the ipv6 from active_rs_timers list */
|
|
|
|
sys_slist_remove(&active_rs_timers, NULL, &ipv6->rs_node);
|
|
|
|
|
|
|
|
/* Did not receive RA yet. */
|
|
|
|
ipv6->rs_count++;
|
|
|
|
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
|
|
|
if (iface->config.ip.ipv6 == ipv6) {
|
|
|
|
break;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
}
|
2016-06-10 10:31:21 +03:00
|
|
|
|
2019-05-21 12:07:08 +02:00
|
|
|
if (iface != __net_if_end) {
|
|
|
|
NET_DBG("RS no respond iface %p count %d",
|
|
|
|
iface, ipv6->rs_count);
|
|
|
|
if (ipv6->rs_count < RS_COUNT) {
|
|
|
|
net_if_start_rs(iface);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
NET_DBG("Interface IPv6 config %p not found", ipv6);
|
|
|
|
}
|
2016-06-10 10:31:21 +03:00
|
|
|
|
2019-05-21 12:07:08 +02:00
|
|
|
ipv6 = NULL;
|
|
|
|
}
|
2016-06-10 10:31:21 +03:00
|
|
|
|
2019-05-21 12:07:08 +02:00
|
|
|
if (ipv6) {
|
|
|
|
k_delayed_work_submit(&rs_timer,
|
|
|
|
ipv6->rs_start +
|
|
|
|
RS_TIMEOUT - current_time);
|
2016-06-10 10:31:21 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void net_if_start_rs(struct net_if *iface)
|
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6) {
|
|
|
|
return;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2019-05-20 15:46:14 +02:00
|
|
|
NET_DBG("Starting ND/RS for iface %p", iface);
|
2016-06-10 10:31:21 +03:00
|
|
|
|
|
|
|
if (!net_ipv6_start_rs(iface)) {
|
2019-05-21 12:07:08 +02:00
|
|
|
ipv6->rs_start = k_uptime_get_32();
|
|
|
|
sys_slist_append(&active_rs_timers, &ipv6->rs_node);
|
|
|
|
|
|
|
|
if (!k_delayed_work_remaining_get(&rs_timer)) {
|
|
|
|
k_delayed_work_submit(&rs_timer, RS_TIMEOUT);
|
|
|
|
}
|
2016-06-10 10:31:21 +03:00
|
|
|
}
|
|
|
|
}
|
2019-05-20 18:26:01 +02:00
|
|
|
|
2019-05-21 12:07:08 +02:00
|
|
|
void net_if_stop_rs(struct net_if *iface)
|
2019-05-20 18:26:01 +02:00
|
|
|
{
|
2019-05-21 12:07:08 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
|
|
|
|
|
|
|
if (!ipv6) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
NET_DBG("Stopping ND/RS for iface %p", iface);
|
|
|
|
|
|
|
|
sys_slist_find_and_remove(&active_rs_timers, &ipv6->rs_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void iface_ipv6_nd_init(void)
|
|
|
|
{
|
|
|
|
k_delayed_work_init(&rs_timer, rs_timeout);
|
|
|
|
sys_slist_init(&active_rs_timers);
|
2019-05-20 18:26:01 +02:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:46:14 +02:00
|
|
|
#else
|
|
|
|
#define net_if_start_rs(...)
|
2019-05-21 12:07:08 +02:00
|
|
|
#define net_if_stop_rs(...)
|
2019-05-20 18:26:01 +02:00
|
|
|
#define iface_ipv6_nd_init(...)
|
2016-06-16 11:01:40 +03:00
|
|
|
#endif /* CONFIG_NET_IPV6_ND */
|
2016-06-10 10:31:21 +03:00
|
|
|
|
2016-06-22 15:34:47 +03:00
|
|
|
struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
|
|
|
|
struct net_if **ret)
|
2016-05-17 12:33:45 +03:00
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
2016-05-17 12:33:45 +03:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6) {
|
|
|
|
continue;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-05-17 12:33:45 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6->unicast[i].is_used ||
|
|
|
|
ipv6->unicast[i].address.family != AF_INET6) {
|
2016-05-17 12:33:45 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-11-02 16:05:58 +02:00
|
|
|
if (net_ipv6_is_prefix(
|
2018-01-19 19:01:23 +02:00
|
|
|
addr->s6_addr,
|
|
|
|
ipv6->unicast[i].address.in6_addr.s6_addr,
|
|
|
|
128)) {
|
2016-06-22 15:34:47 +03:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
*ret = iface;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
return &ipv6->unicast[i];
|
2016-05-17 12:33:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-08-07 12:57:21 +03:00
|
|
|
struct net_if_addr *net_if_ipv6_addr_lookup_by_iface(struct net_if *iface,
|
|
|
|
struct in6_addr *addr)
|
|
|
|
{
|
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!ipv6) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
|
|
|
|
if (!ipv6->unicast[i].is_used ||
|
|
|
|
ipv6->unicast[i].address.family != AF_INET6) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-11-02 16:05:58 +02:00
|
|
|
if (net_ipv6_is_prefix(
|
2018-08-07 12:57:21 +03:00
|
|
|
addr->s6_addr,
|
|
|
|
ipv6->unicast[i].address.in6_addr.s6_addr,
|
|
|
|
128)) {
|
|
|
|
return &ipv6->unicast[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-05-29 11:43:13 +08:00
|
|
|
int z_impl_net_if_ipv6_addr_lookup_by_index(const struct in6_addr *addr)
|
|
|
|
{
|
|
|
|
struct net_if *iface = NULL;
|
|
|
|
struct net_if_addr *if_addr;
|
|
|
|
|
|
|
|
if_addr = net_if_ipv6_addr_lookup(addr, &iface);
|
|
|
|
if (!if_addr) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return net_if_get_by_iface(iface);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
2019-08-13 12:58:38 -07:00
|
|
|
static inline int z_vrfy_net_if_ipv6_addr_lookup_by_index(
|
|
|
|
const struct in6_addr *addr)
|
2019-05-29 11:43:13 +08:00
|
|
|
{
|
|
|
|
struct in6_addr addr_v6;
|
|
|
|
|
|
|
|
Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
|
|
|
|
|
|
|
|
return z_impl_net_if_ipv6_addr_lookup_by_index(&addr_v6);
|
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/net_if_ipv6_addr_lookup_by_index_mrsh.c>
|
2019-05-29 11:43:13 +08:00
|
|
|
#endif
|
|
|
|
|
2018-08-17 17:04:10 +03:00
|
|
|
static bool check_timeout(u32_t start, s32_t timeout, u32_t counter,
|
|
|
|
u32_t current_time)
|
2018-08-17 10:20:05 +03:00
|
|
|
{
|
2018-08-17 17:04:10 +03:00
|
|
|
if (counter > 0) {
|
|
|
|
return false;
|
|
|
|
}
|
2018-08-17 10:20:05 +03:00
|
|
|
|
2018-08-17 17:04:10 +03:00
|
|
|
if ((s32_t)((start + (u32_t)timeout) - current_time) > 0) {
|
2018-08-17 10:20:05 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-08-27 14:02:09 +03:00
|
|
|
static void address_expired(struct net_if_addr *ifaddr)
|
|
|
|
{
|
|
|
|
NET_DBG("IPv6 address %s is deprecated",
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(&ifaddr->address.in6_addr)));
|
2018-08-27 14:02:09 +03:00
|
|
|
|
|
|
|
ifaddr->addr_state = NET_ADDR_DEPRECATED;
|
|
|
|
ifaddr->lifetime.timer_timeout = 0;
|
|
|
|
ifaddr->lifetime.wrap_counter = 0;
|
|
|
|
|
|
|
|
sys_slist_find_and_remove(&active_address_lifetime_timers,
|
|
|
|
&ifaddr->lifetime.node);
|
|
|
|
}
|
|
|
|
|
2018-08-17 17:04:10 +03:00
|
|
|
static bool address_manage_timeout(struct net_if_addr *ifaddr,
|
|
|
|
u32_t current_time, u32_t *next_wakeup)
|
2018-08-17 10:20:05 +03:00
|
|
|
{
|
2018-08-17 17:04:10 +03:00
|
|
|
if (check_timeout(ifaddr->lifetime.timer_start,
|
|
|
|
ifaddr->lifetime.timer_timeout,
|
|
|
|
ifaddr->lifetime.wrap_counter,
|
|
|
|
current_time)) {
|
|
|
|
address_expired(ifaddr);
|
|
|
|
return true;
|
|
|
|
}
|
2018-08-17 10:20:05 +03:00
|
|
|
|
2018-08-17 17:04:10 +03:00
|
|
|
if (current_time == NET_TIMEOUT_MAX_VALUE) {
|
|
|
|
ifaddr->lifetime.timer_start = k_uptime_get_32();
|
|
|
|
ifaddr->lifetime.wrap_counter--;
|
|
|
|
}
|
2018-08-17 10:20:05 +03:00
|
|
|
|
2018-08-17 17:04:10 +03:00
|
|
|
if (ifaddr->lifetime.wrap_counter > 0) {
|
|
|
|
*next_wakeup = NET_TIMEOUT_MAX_VALUE;
|
|
|
|
} else {
|
|
|
|
*next_wakeup = ifaddr->lifetime.timer_timeout;
|
2018-08-17 10:20:05 +03:00
|
|
|
}
|
|
|
|
|
2018-08-17 17:04:10 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-17 10:20:05 +03:00
|
|
|
static void address_lifetime_timeout(struct k_work *work)
|
|
|
|
{
|
2018-08-17 17:04:10 +03:00
|
|
|
u64_t timeout_update = UINT64_MAX;
|
|
|
|
u32_t current_time = k_uptime_get_32();
|
|
|
|
bool found = false;
|
2018-08-17 10:20:05 +03:00
|
|
|
struct net_if_addr *current, *next;
|
|
|
|
|
|
|
|
ARG_UNUSED(work);
|
|
|
|
|
|
|
|
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_address_lifetime_timers,
|
2018-08-17 17:04:10 +03:00
|
|
|
current, next, lifetime.node) {
|
2018-08-17 10:20:05 +03:00
|
|
|
u32_t next_timeout;
|
2018-08-17 17:04:10 +03:00
|
|
|
bool is_timeout;
|
|
|
|
|
|
|
|
is_timeout = address_manage_timeout(current, current_time,
|
|
|
|
&next_timeout);
|
|
|
|
if (!is_timeout) {
|
|
|
|
if (next_timeout < timeout_update) {
|
|
|
|
timeout_update = next_timeout;
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
}
|
2018-08-17 10:20:05 +03:00
|
|
|
|
2018-08-17 17:04:10 +03:00
|
|
|
if (current == next) {
|
|
|
|
break;
|
2018-08-17 10:20:05 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-17 17:04:10 +03:00
|
|
|
if (found) {
|
|
|
|
/* If we are near upper limit of s32_t timeout, then lower it
|
|
|
|
* a bit so that kernel timeout variable will not overflow.
|
|
|
|
*/
|
|
|
|
if (timeout_update >= NET_TIMEOUT_MAX_VALUE) {
|
|
|
|
timeout_update = NET_TIMEOUT_MAX_VALUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
NET_DBG("Waiting for %d ms", (s32_t)timeout_update);
|
2018-08-17 10:20:05 +03:00
|
|
|
|
|
|
|
k_delayed_work_submit(&address_lifetime_timer, timeout_update);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-27 10:05:17 +03:00
|
|
|
#if defined(CONFIG_NET_TEST)
|
|
|
|
void net_address_lifetime_timeout(void)
|
|
|
|
{
|
|
|
|
address_lifetime_timeout(NULL);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-08-17 17:04:10 +03:00
|
|
|
static void address_submit_work(struct net_if_addr *ifaddr)
|
2018-08-17 10:20:05 +03:00
|
|
|
{
|
2018-08-17 17:04:10 +03:00
|
|
|
s32_t remaining;
|
|
|
|
|
|
|
|
remaining = k_delayed_work_remaining_get(&address_lifetime_timer);
|
|
|
|
if (!remaining || (ifaddr->lifetime.wrap_counter == 0 &&
|
|
|
|
ifaddr->lifetime.timer_timeout < remaining)) {
|
2018-08-17 10:20:05 +03:00
|
|
|
k_delayed_work_cancel(&address_lifetime_timer);
|
2018-08-17 17:04:10 +03:00
|
|
|
|
|
|
|
if (ifaddr->lifetime.wrap_counter > 0 && remaining == 0) {
|
|
|
|
k_delayed_work_submit(&address_lifetime_timer,
|
|
|
|
NET_TIMEOUT_MAX_VALUE);
|
|
|
|
} else {
|
|
|
|
k_delayed_work_submit(&address_lifetime_timer,
|
|
|
|
ifaddr->lifetime.timer_timeout);
|
|
|
|
}
|
2018-08-17 10:20:05 +03:00
|
|
|
|
|
|
|
NET_DBG("Next wakeup in %d ms",
|
|
|
|
k_delayed_work_remaining_get(&address_lifetime_timer));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void address_start_timer(struct net_if_addr *ifaddr, u32_t vlifetime)
|
|
|
|
{
|
2018-08-17 17:04:10 +03:00
|
|
|
u64_t expire_timeout = K_SECONDS((u64_t)vlifetime);
|
|
|
|
|
|
|
|
sys_slist_append(&active_address_lifetime_timers,
|
|
|
|
&ifaddr->lifetime.node);
|
2018-08-17 10:20:05 +03:00
|
|
|
|
2018-08-17 17:04:10 +03:00
|
|
|
ifaddr->lifetime.timer_start = k_uptime_get_32();
|
|
|
|
ifaddr->lifetime.wrap_counter = expire_timeout /
|
|
|
|
(u64_t)NET_TIMEOUT_MAX_VALUE;
|
|
|
|
ifaddr->lifetime.timer_timeout = expire_timeout -
|
|
|
|
(u64_t)NET_TIMEOUT_MAX_VALUE *
|
|
|
|
(u64_t)ifaddr->lifetime.wrap_counter;
|
2018-08-17 10:20:05 +03:00
|
|
|
|
2018-08-17 17:04:10 +03:00
|
|
|
address_submit_work(ifaddr);
|
2016-11-11 10:34:51 +02:00
|
|
|
}
|
|
|
|
|
2016-11-11 10:26:17 +02:00
|
|
|
void net_if_ipv6_addr_update_lifetime(struct net_if_addr *ifaddr,
|
2017-04-21 09:27:50 -05:00
|
|
|
u32_t vlifetime)
|
2016-11-11 10:26:17 +02:00
|
|
|
{
|
2016-12-06 19:20:03 +02:00
|
|
|
NET_DBG("Updating expire time of %s by %u secs",
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(&ifaddr->address.in6_addr)),
|
2016-11-11 10:26:17 +02:00
|
|
|
vlifetime);
|
|
|
|
|
2018-08-17 13:05:28 +03:00
|
|
|
ifaddr->addr_state = NET_ADDR_PREFERRED;
|
|
|
|
|
2018-08-17 10:20:05 +03:00
|
|
|
address_start_timer(ifaddr, vlifetime);
|
2016-11-11 10:26:17 +02:00
|
|
|
}
|
|
|
|
|
2016-11-25 20:10:58 +02:00
|
|
|
static struct net_if_addr *ipv6_addr_find(struct net_if *iface,
|
|
|
|
struct in6_addr *addr)
|
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
2016-11-25 20:10:58 +02:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6->unicast[i].is_used) {
|
2016-11-25 20:10:58 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (net_ipv6_addr_cmp(
|
|
|
|
addr, &ipv6->unicast[i].address.in6_addr)) {
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
return &ipv6->unicast[i];
|
2016-11-25 20:10:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-03-15 11:25:27 +02:00
|
|
|
static inline void net_if_addr_init(struct net_if_addr *ifaddr,
|
|
|
|
struct in6_addr *addr,
|
|
|
|
enum net_addr_type addr_type,
|
2017-04-21 09:27:50 -05:00
|
|
|
u32_t vlifetime)
|
2017-03-15 11:25:27 +02:00
|
|
|
{
|
|
|
|
ifaddr->is_used = true;
|
|
|
|
ifaddr->address.family = AF_INET6;
|
|
|
|
ifaddr->addr_type = addr_type;
|
|
|
|
net_ipaddr_copy(&ifaddr->address.in6_addr, addr);
|
|
|
|
|
|
|
|
/* FIXME - set the mcast addr for this node */
|
|
|
|
|
|
|
|
if (vlifetime) {
|
|
|
|
ifaddr->is_infinite = false;
|
|
|
|
|
2018-10-02 14:57:55 +03:00
|
|
|
NET_DBG("Expiring %s in %u secs",
|
|
|
|
log_strdup(net_sprint_ipv6_addr(addr)),
|
2017-03-15 11:25:27 +02:00
|
|
|
vlifetime);
|
|
|
|
|
|
|
|
net_if_ipv6_addr_update_lifetime(ifaddr, vlifetime);
|
|
|
|
} else {
|
|
|
|
ifaddr->is_infinite = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-17 12:33:45 +03:00
|
|
|
struct net_if_addr *net_if_ipv6_addr_add(struct net_if *iface,
|
|
|
|
struct in6_addr *addr,
|
|
|
|
enum net_addr_type addr_type,
|
2017-04-21 09:27:50 -05:00
|
|
|
u32_t vlifetime)
|
2016-05-17 12:33:45 +03:00
|
|
|
{
|
2016-11-25 20:10:58 +02:00
|
|
|
struct net_if_addr *ifaddr;
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6;
|
2016-05-17 12:33:45 +03:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-11-25 20:10:58 +02:00
|
|
|
ifaddr = ipv6_addr_find(iface, addr);
|
|
|
|
if (ifaddr) {
|
|
|
|
return ifaddr;
|
|
|
|
}
|
|
|
|
|
2016-05-17 12:33:45 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (ipv6->unicast[i].is_used) {
|
2016-05-17 12:33:45 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
net_if_addr_init(&ipv6->unicast[i], addr, addr_type,
|
2017-03-15 11:25:27 +02:00
|
|
|
vlifetime);
|
2016-05-17 12:33:45 +03:00
|
|
|
|
2018-01-11 16:06:53 +02:00
|
|
|
NET_DBG("[%d] interface %p address %s type %s added", i,
|
2018-10-02 14:57:55 +03:00
|
|
|
iface, log_strdup(net_sprint_ipv6_addr(addr)),
|
2016-05-17 12:33:45 +03:00
|
|
|
net_addr_type2str(addr_type));
|
|
|
|
|
2019-04-01 14:13:11 +03:00
|
|
|
if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
|
|
|
|
/* RFC 4862 5.4.2
|
|
|
|
* Before sending a Neighbor Solicitation, an interface
|
|
|
|
* MUST join the all-nodes multicast address and the
|
|
|
|
* solicited-node multicast address of the tentative
|
|
|
|
* address.
|
|
|
|
*/
|
|
|
|
/* The allnodes multicast group is only joined once as
|
|
|
|
* net_ipv6_mcast_join() checks if we have already
|
|
|
|
* joined.
|
|
|
|
*/
|
|
|
|
join_mcast_nodes(iface,
|
|
|
|
&ipv6->unicast[i].address.in6_addr);
|
2018-01-08 13:30:52 +02:00
|
|
|
|
2019-04-01 14:13:11 +03:00
|
|
|
net_if_ipv6_start_dad(iface, &ipv6->unicast[i]);
|
|
|
|
}
|
2016-11-11 10:34:51 +02:00
|
|
|
|
2019-06-12 10:01:42 +03:00
|
|
|
net_mgmt_event_notify_with_info(
|
|
|
|
NET_EVENT_IPV6_ADDR_ADD, iface,
|
|
|
|
&ipv6->unicast[i].address.in6_addr,
|
|
|
|
sizeof(struct in6_addr));
|
2016-09-29 18:33:03 +02:00
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
return &ipv6->unicast[i];
|
2016-05-17 12:33:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-04-12 10:34:59 +03:00
|
|
|
bool net_if_ipv6_addr_rm(struct net_if *iface, const struct in6_addr *addr)
|
2016-06-01 10:54:10 +03:00
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
2016-06-01 10:54:10 +03:00
|
|
|
int i;
|
|
|
|
|
|
|
|
NET_ASSERT(addr);
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6) {
|
|
|
|
return false;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-06-01 10:54:10 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
|
|
|
|
struct in6_addr maddr;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6->unicast[i].is_used) {
|
2016-06-01 10:54:10 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!net_ipv6_addr_cmp(&ipv6->unicast[i].address.in6_addr,
|
|
|
|
addr)) {
|
2016-06-01 10:54:10 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6->unicast[i].is_infinite) {
|
2018-08-17 10:20:05 +03:00
|
|
|
sys_slist_find_and_remove(
|
|
|
|
&active_address_lifetime_timers,
|
2018-08-17 17:04:10 +03:00
|
|
|
&ipv6->unicast[i].lifetime.node);
|
2018-08-17 10:20:05 +03:00
|
|
|
|
|
|
|
if (sys_slist_is_empty(
|
|
|
|
&active_address_lifetime_timers)) {
|
|
|
|
k_delayed_work_cancel(&address_lifetime_timer);
|
|
|
|
}
|
2017-07-19 19:48:01 +03:00
|
|
|
}
|
2016-11-11 10:34:51 +02:00
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
ipv6->unicast[i].is_used = false;
|
2016-06-01 10:54:10 +03:00
|
|
|
|
|
|
|
net_ipv6_addr_create_solicited_node(addr, &maddr);
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-06-01 10:54:10 +03:00
|
|
|
net_if_ipv6_maddr_rm(iface, &maddr);
|
|
|
|
|
|
|
|
NET_DBG("[%d] interface %p address %s type %s removed",
|
2018-10-02 14:57:55 +03:00
|
|
|
i, iface, log_strdup(net_sprint_ipv6_addr(addr)),
|
2018-01-19 19:01:23 +02:00
|
|
|
net_addr_type2str(ipv6->unicast[i].addr_type));
|
2016-06-01 10:54:10 +03:00
|
|
|
|
2019-06-12 10:01:42 +03:00
|
|
|
/* Using the IPv6 address pointer here can give false
|
|
|
|
* info if someone adds a new IP address into this position
|
|
|
|
* in the address array. This is quite unlikely thou.
|
|
|
|
*/
|
|
|
|
net_mgmt_event_notify_with_info(
|
|
|
|
NET_EVENT_IPV6_ADDR_DEL,
|
|
|
|
iface,
|
|
|
|
&ipv6->unicast[i].address.in6_addr,
|
|
|
|
sizeof(struct in6_addr));
|
2016-09-29 18:33:03 +02:00
|
|
|
|
2016-06-01 10:54:10 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-05-29 11:43:13 +08:00
|
|
|
bool z_impl_net_if_ipv6_addr_add_by_index(int index,
|
|
|
|
struct in6_addr *addr,
|
|
|
|
enum net_addr_type addr_type,
|
|
|
|
u32_t vlifetime)
|
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
iface = net_if_get_by_index(index);
|
|
|
|
if (!iface) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return net_if_ipv6_addr_add(iface, addr, addr_type, vlifetime) ?
|
|
|
|
true : false;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
bool z_vrfy_net_if_ipv6_addr_add_by_index(int index,
|
|
|
|
struct in6_addr *addr,
|
|
|
|
enum net_addr_type addr_type,
|
|
|
|
u32_t vlifetime)
|
2019-05-29 11:43:13 +08:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_NET_IF_USERSPACE_ACCESS)
|
|
|
|
struct in6_addr addr_v6;
|
|
|
|
|
|
|
|
Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
|
|
|
|
|
|
|
|
return z_impl_net_if_ipv6_addr_add_by_index(index,
|
|
|
|
&addr_v6,
|
|
|
|
addr_type,
|
|
|
|
vlifetime);
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif /* CONFIG_NET_IF_USERSPACE_ACCESS */
|
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/net_if_ipv6_addr_add_by_index_mrsh.c>
|
2019-05-29 11:43:13 +08:00
|
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
|
|
|
|
bool z_impl_net_if_ipv6_addr_rm_by_index(int index,
|
|
|
|
const struct in6_addr *addr)
|
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
iface = net_if_get_by_index(index);
|
|
|
|
if (!iface) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return net_if_ipv6_addr_rm(iface, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
bool z_vrfy_net_if_ipv6_addr_rm_by_index(int index,
|
|
|
|
const struct in6_addr *addr)
|
2019-05-29 11:43:13 +08:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_NET_IF_USERSPACE_ACCESS)
|
|
|
|
struct in6_addr addr_v6;
|
|
|
|
|
|
|
|
Z_OOPS(z_user_from_copy(&addr_v6, (void *)addr, sizeof(addr_v6)));
|
|
|
|
|
|
|
|
return z_impl_net_if_ipv6_addr_rm_by_index(index, &addr_v6);
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif /* CONFIG_NET_IF_USERSPACE_ACCESS */
|
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/net_if_ipv6_addr_rm_by_index_mrsh.c>
|
2019-05-29 11:43:13 +08:00
|
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
|
2016-05-17 12:33:45 +03:00
|
|
|
struct net_if_mcast_addr *net_if_ipv6_maddr_add(struct net_if *iface,
|
2017-02-09 09:48:31 +02:00
|
|
|
const struct in6_addr *addr)
|
2016-05-17 12:33:45 +03:00
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6;
|
2016-05-17 12:33:45 +03:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-02 16:05:58 +02:00
|
|
|
if (!net_ipv6_is_addr_mcast(addr)) {
|
2016-05-17 12:33:45 +03:00
|
|
|
NET_DBG("Address %s is not a multicast address.",
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(addr)));
|
2016-05-17 12:33:45 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (ipv6->mcast[i].is_used) {
|
2016-05-17 12:33:45 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
ipv6->mcast[i].is_used = true;
|
|
|
|
ipv6->mcast[i].address.family = AF_INET6;
|
|
|
|
memcpy(&ipv6->mcast[i].address.in6_addr, addr, 16);
|
2016-05-17 12:33:45 +03:00
|
|
|
|
|
|
|
NET_DBG("[%d] interface %p address %s added", i, iface,
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(addr)));
|
2016-05-17 12:33:45 +03:00
|
|
|
|
2019-06-12 10:01:42 +03:00
|
|
|
net_mgmt_event_notify_with_info(
|
|
|
|
NET_EVENT_IPV6_MADDR_ADD, iface,
|
|
|
|
&ipv6->mcast[i].address.in6_addr,
|
|
|
|
sizeof(struct in6_addr));
|
2016-09-29 18:33:03 +02:00
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
return &ipv6->mcast[i];
|
2016-05-17 12:33:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-02-09 09:48:31 +02:00
|
|
|
bool net_if_ipv6_maddr_rm(struct net_if *iface, const struct in6_addr *addr)
|
2016-06-01 10:54:10 +03:00
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
2016-06-01 10:54:10 +03:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6) {
|
|
|
|
return false;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-06-01 10:54:10 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6->mcast[i].is_used) {
|
2016-06-01 10:54:10 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!net_ipv6_addr_cmp(&ipv6->mcast[i].address.in6_addr,
|
|
|
|
addr)) {
|
2016-06-01 10:54:10 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
ipv6->mcast[i].is_used = false;
|
2016-06-01 10:54:10 +03:00
|
|
|
|
|
|
|
NET_DBG("[%d] interface %p address %s removed",
|
2018-10-02 14:57:55 +03:00
|
|
|
i, iface, log_strdup(net_sprint_ipv6_addr(addr)));
|
2016-06-01 10:54:10 +03:00
|
|
|
|
2019-06-12 10:01:42 +03:00
|
|
|
net_mgmt_event_notify_with_info(
|
|
|
|
NET_EVENT_IPV6_MADDR_DEL, iface,
|
|
|
|
&ipv6->mcast[i].address.in6_addr,
|
|
|
|
sizeof(struct in6_addr));
|
2016-09-29 18:33:03 +02:00
|
|
|
|
2016-06-01 10:54:10 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-06-22 15:34:47 +03:00
|
|
|
struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *maddr,
|
|
|
|
struct net_if **ret)
|
2016-05-17 12:33:45 +03:00
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
2016-05-17 12:33:45 +03:00
|
|
|
int i;
|
|
|
|
|
2017-02-09 09:50:50 +02:00
|
|
|
if (ret && *ret && iface != *ret) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6) {
|
|
|
|
continue;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-05-17 12:33:45 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6->mcast[i].is_used ||
|
|
|
|
ipv6->mcast[i].address.family != AF_INET6) {
|
2016-05-17 12:33:45 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-11-02 16:05:58 +02:00
|
|
|
if (net_ipv6_is_prefix(
|
2018-01-19 19:01:23 +02:00
|
|
|
maddr->s6_addr,
|
|
|
|
ipv6->mcast[i].address.in6_addr.s6_addr,
|
|
|
|
128)) {
|
2016-06-22 15:34:47 +03:00
|
|
|
if (ret) {
|
|
|
|
*ret = iface;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
return &ipv6->mcast[i];
|
2016-05-17 12:33:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-08-29 09:57:27 +03:00
|
|
|
void net_if_mcast_mon_register(struct net_if_mcast_monitor *mon,
|
|
|
|
struct net_if *iface,
|
|
|
|
net_if_mcast_callback_t cb)
|
|
|
|
{
|
|
|
|
sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
|
|
|
|
sys_slist_prepend(&mcast_monitor_callbacks, &mon->node);
|
|
|
|
|
|
|
|
mon->iface = iface;
|
|
|
|
mon->cb = cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
void net_if_mcast_mon_unregister(struct net_if_mcast_monitor *mon)
|
|
|
|
{
|
|
|
|
sys_slist_find_and_remove(&mcast_monitor_callbacks, &mon->node);
|
|
|
|
}
|
|
|
|
|
2018-01-11 16:06:53 +02:00
|
|
|
void net_if_mcast_monitor(struct net_if *iface,
|
|
|
|
const struct in6_addr *addr,
|
2017-08-29 09:57:27 +03:00
|
|
|
bool is_joined)
|
|
|
|
{
|
|
|
|
struct net_if_mcast_monitor *mon, *tmp;
|
|
|
|
|
|
|
|
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&mcast_monitor_callbacks,
|
|
|
|
mon, tmp, node) {
|
|
|
|
if (iface == mon->iface) {
|
|
|
|
mon->cb(iface, addr, is_joined);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-17 13:33:20 +03:00
|
|
|
static void remove_prefix_addresses(struct net_if *iface,
|
|
|
|
struct net_if_ipv6 *ipv6,
|
|
|
|
struct in6_addr *addr,
|
|
|
|
u8_t len)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
|
|
|
|
if (!ipv6->unicast[i].is_used ||
|
|
|
|
ipv6->unicast[i].address.family != AF_INET6 ||
|
|
|
|
ipv6->unicast[i].addr_type != NET_ADDR_AUTOCONF) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-11-02 16:05:58 +02:00
|
|
|
if (net_ipv6_is_prefix(
|
2018-08-17 13:33:20 +03:00
|
|
|
addr->s6_addr,
|
|
|
|
ipv6->unicast[i].address.in6_addr.s6_addr,
|
|
|
|
len)) {
|
|
|
|
net_if_ipv6_addr_rm(iface,
|
|
|
|
&ipv6->unicast[i].address.in6_addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-27 14:02:09 +03:00
|
|
|
static void prefix_lifetime_expired(struct net_if_ipv6_prefix *ifprefix)
|
2017-03-15 11:25:27 +02:00
|
|
|
{
|
2018-08-17 13:33:20 +03:00
|
|
|
struct net_if_ipv6 *ipv6;
|
2017-03-15 11:25:27 +02:00
|
|
|
|
|
|
|
NET_DBG("Prefix %s/%d expired",
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(&ifprefix->prefix)),
|
|
|
|
ifprefix->len);
|
2017-03-15 11:25:27 +02:00
|
|
|
|
2018-08-27 14:02:09 +03:00
|
|
|
ifprefix->is_used = false;
|
2018-08-17 13:33:20 +03:00
|
|
|
|
2018-08-27 14:02:09 +03:00
|
|
|
if (net_if_config_ipv6_get(ifprefix->iface, &ipv6) < 0) {
|
2018-08-17 13:33:20 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove also all auto addresses if the they have the same prefix.
|
|
|
|
*/
|
2018-08-27 14:02:09 +03:00
|
|
|
remove_prefix_addresses(ifprefix->iface, ipv6, &ifprefix->prefix,
|
|
|
|
ifprefix->len);
|
|
|
|
|
2019-06-12 10:01:42 +03:00
|
|
|
net_mgmt_event_notify_with_info(
|
|
|
|
NET_EVENT_IPV6_PREFIX_DEL, ifprefix->iface,
|
|
|
|
&ifprefix->prefix, sizeof(struct in6_addr));
|
2018-08-27 14:02:09 +03:00
|
|
|
}
|
2018-08-17 13:33:20 +03:00
|
|
|
|
2018-08-27 14:02:09 +03:00
|
|
|
static void prefix_timer_remove(struct net_if_ipv6_prefix *ifprefix)
|
|
|
|
{
|
|
|
|
NET_DBG("IPv6 prefix %s/%d removed",
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(&ifprefix->prefix)),
|
2018-08-27 14:02:09 +03:00
|
|
|
ifprefix->len);
|
|
|
|
|
|
|
|
ifprefix->lifetime.timer_timeout = 0;
|
|
|
|
ifprefix->lifetime.wrap_counter = 0;
|
|
|
|
|
|
|
|
sys_slist_find_and_remove(&active_prefix_lifetime_timers,
|
|
|
|
&ifprefix->lifetime.node);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool prefix_manage_timeout(struct net_if_ipv6_prefix *ifprefix,
|
|
|
|
u32_t current_time, u32_t *next_wakeup)
|
|
|
|
{
|
|
|
|
if (check_timeout(ifprefix->lifetime.timer_start,
|
|
|
|
ifprefix->lifetime.timer_timeout,
|
|
|
|
ifprefix->lifetime.wrap_counter,
|
|
|
|
current_time)) {
|
|
|
|
prefix_lifetime_expired(ifprefix);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (current_time == NET_TIMEOUT_MAX_VALUE) {
|
|
|
|
ifprefix->lifetime.wrap_counter--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ifprefix->lifetime.wrap_counter > 0) {
|
|
|
|
*next_wakeup = NET_TIMEOUT_MAX_VALUE;
|
|
|
|
} else {
|
|
|
|
*next_wakeup = ifprefix->lifetime.timer_timeout;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prefix_lifetime_timeout(struct k_work *work)
|
|
|
|
{
|
|
|
|
u64_t timeout_update = UINT64_MAX;
|
|
|
|
u32_t current_time = k_uptime_get_32();
|
|
|
|
bool found = false;
|
|
|
|
struct net_if_ipv6_prefix *current, *next;
|
|
|
|
|
|
|
|
ARG_UNUSED(work);
|
|
|
|
|
|
|
|
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&active_prefix_lifetime_timers,
|
|
|
|
current, next, lifetime.node) {
|
|
|
|
u32_t next_timeout;
|
|
|
|
bool is_timeout;
|
|
|
|
|
|
|
|
is_timeout = prefix_manage_timeout(current, current_time,
|
|
|
|
&next_timeout);
|
|
|
|
if (!is_timeout) {
|
|
|
|
if (next_timeout < timeout_update) {
|
|
|
|
timeout_update = next_timeout;
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (current == next) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found) {
|
|
|
|
/* If we are near upper limit of s32_t timeout, then lower it
|
|
|
|
* a bit so that kernel timeout will not overflow.
|
|
|
|
*/
|
|
|
|
if (timeout_update >= NET_TIMEOUT_MAX_VALUE) {
|
|
|
|
timeout_update = NET_TIMEOUT_MAX_VALUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
NET_DBG("Waiting for %d ms", (u32_t)timeout_update);
|
|
|
|
|
|
|
|
k_delayed_work_submit(&prefix_lifetime_timer, timeout_update);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prefix_submit_work(struct net_if_ipv6_prefix *ifprefix)
|
|
|
|
{
|
|
|
|
s32_t remaining;
|
|
|
|
|
|
|
|
remaining = k_delayed_work_remaining_get(&prefix_lifetime_timer);
|
|
|
|
if (!remaining || (ifprefix->lifetime.wrap_counter == 0 &&
|
|
|
|
ifprefix->lifetime.timer_timeout < remaining)) {
|
|
|
|
k_delayed_work_cancel(&prefix_lifetime_timer);
|
|
|
|
|
|
|
|
if (ifprefix->lifetime.wrap_counter > 0 && remaining == 0) {
|
|
|
|
k_delayed_work_submit(&prefix_lifetime_timer,
|
|
|
|
NET_TIMEOUT_MAX_VALUE);
|
|
|
|
} else {
|
|
|
|
k_delayed_work_submit(&prefix_lifetime_timer,
|
|
|
|
ifprefix->lifetime.timer_timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
NET_DBG("Next wakeup in %d ms",
|
|
|
|
k_delayed_work_remaining_get(&prefix_lifetime_timer));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prefix_start_timer(struct net_if_ipv6_prefix *ifprefix,
|
|
|
|
u32_t lifetime)
|
|
|
|
{
|
|
|
|
u64_t expire_timeout = K_SECONDS((u64_t)lifetime);
|
|
|
|
|
|
|
|
sys_slist_append(&active_prefix_lifetime_timers,
|
|
|
|
&ifprefix->lifetime.node);
|
|
|
|
|
|
|
|
ifprefix->lifetime.timer_start = k_uptime_get_32();
|
|
|
|
ifprefix->lifetime.wrap_counter = expire_timeout /
|
|
|
|
(u64_t)NET_TIMEOUT_MAX_VALUE;
|
|
|
|
ifprefix->lifetime.timer_timeout = expire_timeout -
|
|
|
|
(u64_t)NET_TIMEOUT_MAX_VALUE *
|
|
|
|
(u64_t)ifprefix->lifetime.wrap_counter;
|
|
|
|
|
|
|
|
prefix_submit_work(ifprefix);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct net_if_ipv6_prefix *ipv6_prefix_find(struct net_if *iface,
|
|
|
|
struct in6_addr *prefix,
|
|
|
|
u8_t prefix_len)
|
|
|
|
{
|
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!ipv6) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
|
|
|
|
if (!ipv6->unicast[i].is_used) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (net_ipv6_addr_cmp(prefix, &ipv6->prefix[i].prefix) &&
|
|
|
|
prefix_len == ipv6->prefix[i].len) {
|
|
|
|
return &ipv6->prefix[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
2017-03-15 11:25:27 +02:00
|
|
|
}
|
|
|
|
|
2018-08-17 13:33:20 +03:00
|
|
|
static void net_if_ipv6_prefix_init(struct net_if *iface,
|
2018-08-27 14:02:09 +03:00
|
|
|
struct net_if_ipv6_prefix *ifprefix,
|
2017-04-21 09:27:50 -05:00
|
|
|
struct in6_addr *addr, u8_t len,
|
|
|
|
u32_t lifetime)
|
2017-03-15 11:25:27 +02:00
|
|
|
{
|
2018-08-27 14:02:09 +03:00
|
|
|
ifprefix->is_used = true;
|
|
|
|
ifprefix->len = len;
|
|
|
|
ifprefix->iface = iface;
|
|
|
|
net_ipaddr_copy(&ifprefix->prefix, addr);
|
2017-03-15 11:25:27 +02:00
|
|
|
|
|
|
|
if (lifetime == NET_IPV6_ND_INFINITE_LIFETIME) {
|
2018-08-27 14:02:09 +03:00
|
|
|
ifprefix->is_infinite = true;
|
2017-03-15 11:25:27 +02:00
|
|
|
} else {
|
2018-08-27 14:02:09 +03:00
|
|
|
ifprefix->is_infinite = false;
|
2017-03-15 11:25:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-07 10:16:58 +03:00
|
|
|
struct net_if_ipv6_prefix *net_if_ipv6_prefix_add(struct net_if *iface,
|
|
|
|
struct in6_addr *prefix,
|
2017-04-21 09:27:50 -05:00
|
|
|
u8_t len,
|
|
|
|
u32_t lifetime)
|
2016-06-07 10:16:58 +03:00
|
|
|
{
|
2018-08-27 14:02:09 +03:00
|
|
|
struct net_if_ipv6_prefix *ifprefix;
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6;
|
2016-06-07 10:16:58 +03:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (net_if_config_ipv6_get(iface, &ipv6) < 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-08-27 14:02:09 +03:00
|
|
|
ifprefix = ipv6_prefix_find(iface, prefix, len);
|
|
|
|
if (ifprefix) {
|
|
|
|
return ifprefix;
|
2016-11-25 20:12:37 +02:00
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-06-07 10:16:58 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (ipv6->prefix[i].is_used) {
|
2016-06-07 10:16:58 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-08-17 13:33:20 +03:00
|
|
|
net_if_ipv6_prefix_init(iface, &ipv6->prefix[i], prefix,
|
2018-01-11 16:06:53 +02:00
|
|
|
len, lifetime);
|
2016-06-07 10:16:58 +03:00
|
|
|
|
|
|
|
NET_DBG("[%d] interface %p prefix %s/%d added", i, iface,
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(prefix)), len);
|
2016-06-07 10:16:58 +03:00
|
|
|
|
2019-06-12 10:01:42 +03:00
|
|
|
net_mgmt_event_notify_with_info(
|
|
|
|
NET_EVENT_IPV6_PREFIX_ADD, iface,
|
|
|
|
&ipv6->prefix[i].prefix, sizeof(struct in6_addr));
|
2016-09-29 18:33:03 +02:00
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
return &ipv6->prefix[i];
|
2016-06-07 10:16:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool net_if_ipv6_prefix_rm(struct net_if *iface, struct in6_addr *addr,
|
2017-04-21 09:27:50 -05:00
|
|
|
u8_t len)
|
2016-06-07 10:16:58 +03:00
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
2016-06-07 10:16:58 +03:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6) {
|
|
|
|
return false;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-06-07 10:16:58 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6->prefix[i].is_used) {
|
2016-06-07 10:16:58 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!net_ipv6_addr_cmp(&ipv6->prefix[i].prefix, addr) ||
|
|
|
|
ipv6->prefix[i].len != len) {
|
2016-06-07 10:16:58 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
net_if_ipv6_prefix_unset_timer(&ipv6->prefix[i]);
|
2016-07-07 10:54:34 +03:00
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
ipv6->prefix[i].is_used = false;
|
2016-07-07 10:54:34 +03:00
|
|
|
|
2018-08-17 13:33:20 +03:00
|
|
|
/* Remove also all auto addresses if the they have the same
|
|
|
|
* prefix.
|
|
|
|
*/
|
|
|
|
remove_prefix_addresses(iface, ipv6, addr, len);
|
|
|
|
|
2019-06-12 10:01:42 +03:00
|
|
|
net_mgmt_event_notify_with_info(
|
|
|
|
NET_EVENT_IPV6_PREFIX_DEL, iface,
|
|
|
|
&ipv6->prefix[i].prefix, sizeof(struct in6_addr));
|
2016-09-29 18:33:03 +02:00
|
|
|
|
2016-06-07 10:16:58 +03:00
|
|
|
return true;
|
|
|
|
}
|
2016-06-24 17:34:30 +02:00
|
|
|
|
2016-06-07 10:16:58 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-22 12:41:30 +03:00
|
|
|
struct net_if_ipv6_prefix *net_if_ipv6_prefix_get(struct net_if *iface,
|
|
|
|
struct in6_addr *addr)
|
|
|
|
{
|
|
|
|
struct net_if_ipv6_prefix *prefix = NULL;
|
|
|
|
struct net_if_ipv6 *ipv6;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!iface) {
|
|
|
|
iface = net_if_get_default();
|
|
|
|
}
|
|
|
|
|
|
|
|
ipv6 = iface->config.ip.ipv6;
|
|
|
|
if (!ipv6) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
|
|
|
|
if (!ipv6->prefix[i].is_used) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-11-02 16:05:58 +02:00
|
|
|
if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
|
2018-08-22 12:41:30 +03:00
|
|
|
addr->s6_addr,
|
|
|
|
ipv6->prefix[i].len)) {
|
|
|
|
if (!prefix || prefix->len > ipv6->prefix[i].len) {
|
|
|
|
prefix = &ipv6->prefix[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return prefix;
|
|
|
|
}
|
|
|
|
|
2016-06-07 10:16:58 +03:00
|
|
|
struct net_if_ipv6_prefix *net_if_ipv6_prefix_lookup(struct net_if *iface,
|
|
|
|
struct in6_addr *addr,
|
2017-04-21 09:27:50 -05:00
|
|
|
u8_t len)
|
2016-06-07 10:16:58 +03:00
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
2016-06-07 10:16:58 +03:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-06-07 10:16:58 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6->prefix[i].is_used) {
|
2016-06-07 10:16:58 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-11-02 16:05:58 +02:00
|
|
|
if (net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
|
2018-01-19 19:01:23 +02:00
|
|
|
addr->s6_addr, len)) {
|
|
|
|
return &ipv6->prefix[i];
|
2016-06-07 10:16:58 +03:00
|
|
|
}
|
|
|
|
}
|
2016-06-24 17:34:30 +02:00
|
|
|
|
2016-06-07 10:16:58 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-10-17 13:00:45 +03:00
|
|
|
bool net_if_ipv6_addr_onlink(struct net_if **iface, struct in6_addr *addr)
|
|
|
|
{
|
|
|
|
struct net_if *tmp;
|
|
|
|
|
|
|
|
for (tmp = __net_if_start; tmp != __net_if_end; tmp++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = tmp->config.ip.ipv6;
|
2016-10-17 13:00:45 +03:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (iface && *iface && *iface != tmp) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6) {
|
|
|
|
continue;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-10-17 13:00:45 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_PREFIX; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (ipv6->prefix[i].is_used &&
|
2018-11-02 16:05:58 +02:00
|
|
|
net_ipv6_is_prefix(ipv6->prefix[i].prefix.s6_addr,
|
2018-01-19 19:01:23 +02:00
|
|
|
addr->s6_addr,
|
|
|
|
ipv6->prefix[i].len)) {
|
2016-10-17 13:00:45 +03:00
|
|
|
if (iface) {
|
|
|
|
*iface = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-07-07 10:54:34 +03:00
|
|
|
void net_if_ipv6_prefix_set_timer(struct net_if_ipv6_prefix *prefix,
|
2017-04-21 09:27:50 -05:00
|
|
|
u32_t lifetime)
|
2016-07-07 10:54:34 +03:00
|
|
|
{
|
2018-08-27 14:02:09 +03:00
|
|
|
/* No need to set a timer for infinite timeout */
|
|
|
|
if (lifetime == 0xffffffff) {
|
|
|
|
return;
|
2016-07-07 10:54:34 +03:00
|
|
|
}
|
|
|
|
|
2018-08-27 14:02:09 +03:00
|
|
|
NET_DBG("Prefix lifetime %u sec", lifetime);
|
2016-11-29 15:18:02 +02:00
|
|
|
|
2018-08-27 14:02:09 +03:00
|
|
|
prefix_start_timer(prefix, lifetime);
|
2016-07-07 10:54:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void net_if_ipv6_prefix_unset_timer(struct net_if_ipv6_prefix *prefix)
|
|
|
|
{
|
|
|
|
if (!prefix->is_used) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-08-27 14:02:09 +03:00
|
|
|
prefix_timer_remove(prefix);
|
2016-07-07 10:54:34 +03:00
|
|
|
}
|
|
|
|
|
2016-06-07 10:16:58 +03:00
|
|
|
struct net_if_router *net_if_ipv6_router_lookup(struct net_if *iface,
|
|
|
|
struct in6_addr *addr)
|
|
|
|
{
|
2019-05-22 12:04:04 +02:00
|
|
|
return iface_router_lookup(iface, AF_INET6, addr);
|
2016-06-07 10:16:58 +03:00
|
|
|
}
|
|
|
|
|
2016-10-17 13:23:36 +03:00
|
|
|
struct net_if_router *net_if_ipv6_router_find_default(struct net_if *iface,
|
|
|
|
struct in6_addr *addr)
|
|
|
|
{
|
2019-05-22 12:04:04 +02:00
|
|
|
return iface_router_find_default(iface, AF_INET6, addr);
|
2016-11-11 11:22:35 +02:00
|
|
|
}
|
|
|
|
|
2016-11-11 11:20:28 +02:00
|
|
|
void net_if_ipv6_router_update_lifetime(struct net_if_router *router,
|
2019-05-22 12:04:04 +02:00
|
|
|
u16_t lifetime)
|
2016-11-11 11:20:28 +02:00
|
|
|
{
|
2016-12-06 19:20:03 +02:00
|
|
|
NET_DBG("Updating expire time of %s by %u secs",
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv6_addr(&router->address.in6_addr)),
|
2016-11-11 11:20:28 +02:00
|
|
|
lifetime);
|
|
|
|
|
2019-05-22 12:04:04 +02:00
|
|
|
router->life_start = k_uptime_get_32();
|
|
|
|
router->lifetime = lifetime;
|
2017-03-15 11:25:27 +02:00
|
|
|
|
2019-05-22 12:04:04 +02:00
|
|
|
iface_router_run_timer(router->life_start);
|
2017-03-15 11:25:27 +02:00
|
|
|
}
|
|
|
|
|
2016-06-07 10:16:58 +03:00
|
|
|
struct net_if_router *net_if_ipv6_router_add(struct net_if *iface,
|
|
|
|
struct in6_addr *addr,
|
2017-04-21 09:27:50 -05:00
|
|
|
u16_t lifetime)
|
2016-06-07 10:16:58 +03:00
|
|
|
{
|
2019-05-22 12:04:04 +02:00
|
|
|
return iface_router_add(iface, AF_INET6, addr, false, lifetime);
|
2016-06-07 10:16:58 +03:00
|
|
|
}
|
|
|
|
|
2016-11-11 11:21:34 +02:00
|
|
|
bool net_if_ipv6_router_rm(struct net_if_router *router)
|
|
|
|
{
|
2019-05-22 12:04:04 +02:00
|
|
|
return iface_router_rm(router);
|
2016-11-11 11:21:34 +02:00
|
|
|
}
|
|
|
|
|
2016-05-18 11:15:41 +03:00
|
|
|
struct in6_addr *net_if_ipv6_get_ll(struct net_if *iface,
|
|
|
|
enum net_addr_state addr_state)
|
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
2016-05-18 11:15:41 +03:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-05-18 11:15:41 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6->unicast[i].is_used ||
|
2016-05-18 11:15:41 +03:00
|
|
|
(addr_state != NET_ADDR_ANY_STATE &&
|
2018-01-19 19:01:23 +02:00
|
|
|
ipv6->unicast[i].addr_state != addr_state) ||
|
|
|
|
ipv6->unicast[i].address.family != AF_INET6) {
|
2016-05-18 11:15:41 +03:00
|
|
|
continue;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2018-11-02 16:05:58 +02:00
|
|
|
if (net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
|
2018-01-19 19:01:23 +02:00
|
|
|
return &ipv6->unicast[i].address.in6_addr;
|
2016-05-18 11:15:41 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-26 13:55:44 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct in6_addr *net_if_ipv6_get_ll_addr(enum net_addr_state state,
|
|
|
|
struct net_if **iface)
|
|
|
|
{
|
|
|
|
struct net_if *tmp;
|
|
|
|
|
|
|
|
for (tmp = __net_if_start; tmp != __net_if_end; tmp++) {
|
|
|
|
struct in6_addr *addr;
|
|
|
|
|
|
|
|
addr = net_if_ipv6_get_ll(tmp, state);
|
|
|
|
if (addr) {
|
|
|
|
if (iface) {
|
|
|
|
*iface = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-05-14 14:41:56 +02:00
|
|
|
static inline struct in6_addr *check_global_addr(struct net_if *iface,
|
|
|
|
enum net_addr_state state)
|
|
|
|
{
|
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!ipv6) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
|
|
|
|
if (!ipv6->unicast[i].is_used ||
|
|
|
|
(ipv6->unicast[i].addr_state != state) ||
|
|
|
|
ipv6->unicast[i].address.family != AF_INET6) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!net_ipv6_is_ll_addr(&ipv6->unicast[i].address.in6_addr)) {
|
|
|
|
return &ipv6->unicast[i].address.in6_addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
|
|
|
|
struct net_if **iface)
|
2016-10-26 13:55:44 +03:00
|
|
|
{
|
|
|
|
struct net_if *tmp;
|
|
|
|
|
|
|
|
for (tmp = __net_if_start; tmp != __net_if_end; tmp++) {
|
|
|
|
struct in6_addr *addr;
|
|
|
|
|
|
|
|
if (iface && *iface && tmp != *iface) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-05-14 14:41:56 +02:00
|
|
|
addr = check_global_addr(tmp, state);
|
2016-10-26 13:55:44 +03:00
|
|
|
if (addr) {
|
|
|
|
if (iface) {
|
|
|
|
*iface = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-18 11:15:41 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-04-27 13:01:33 +03:00
|
|
|
static u8_t get_diff_ipv6(const struct in6_addr *src,
|
|
|
|
const struct in6_addr *dst)
|
2016-05-18 11:15:41 +03:00
|
|
|
{
|
2018-04-27 13:01:33 +03:00
|
|
|
return get_ipaddr_diff((const u8_t *)src, (const u8_t *)dst, 16);
|
2016-05-18 11:15:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool is_proper_ipv6_address(struct net_if_addr *addr)
|
|
|
|
{
|
|
|
|
if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
|
|
|
|
addr->address.family == AF_INET6 &&
|
2018-11-02 16:05:58 +02:00
|
|
|
!net_ipv6_is_ll_addr(&addr->address.in6_addr)) {
|
2016-05-18 11:15:41 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-12-07 15:15:31 +02:00
|
|
|
static struct in6_addr *net_if_ipv6_get_best_match(struct net_if *iface,
|
|
|
|
const struct in6_addr *dst,
|
|
|
|
u8_t *best_so_far)
|
2016-05-18 11:15:41 +03:00
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv6 *ipv6 = iface->config.ip.ipv6;
|
2016-05-18 11:15:41 +03:00
|
|
|
struct in6_addr *src = NULL;
|
2018-01-11 16:06:53 +02:00
|
|
|
u8_t len;
|
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv6) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-05-18 11:15:41 +03:00
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV6_ADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!is_proper_ipv6_address(&ipv6->unicast[i])) {
|
2016-05-18 11:15:41 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-04-27 13:01:33 +03:00
|
|
|
len = get_diff_ipv6(dst, &ipv6->unicast[i].address.in6_addr);
|
2016-05-18 11:15:41 +03:00
|
|
|
if (len >= *best_so_far) {
|
2019-01-24 15:56:10 +01:00
|
|
|
/* Mesh local address can only be selected for the same
|
|
|
|
* subnet.
|
|
|
|
*/
|
|
|
|
if (ipv6->unicast[i].is_mesh_local && len < 64) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-05-18 11:15:41 +03:00
|
|
|
*best_so_far = len;
|
2018-01-19 19:01:23 +02:00
|
|
|
src = &ipv6->unicast[i].address.in6_addr;
|
2016-05-18 11:15:41 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return src;
|
|
|
|
}
|
|
|
|
|
2016-06-02 16:57:03 +03:00
|
|
|
const struct in6_addr *net_if_ipv6_select_src_addr(struct net_if *dst_iface,
|
2018-12-07 15:15:31 +02:00
|
|
|
const struct in6_addr *dst)
|
2016-05-18 11:15:41 +03:00
|
|
|
{
|
|
|
|
struct in6_addr *src = NULL;
|
2018-11-29 11:23:03 -08:00
|
|
|
u8_t best_match = 0U;
|
2016-05-18 11:15:41 +03:00
|
|
|
struct net_if *iface;
|
|
|
|
|
2018-11-02 16:05:58 +02:00
|
|
|
if (!net_ipv6_is_ll_addr(dst) && !net_ipv6_is_addr_mcast(dst)) {
|
2016-05-18 11:15:41 +03:00
|
|
|
|
|
|
|
for (iface = __net_if_start;
|
|
|
|
!dst_iface && iface != __net_if_end;
|
|
|
|
iface++) {
|
|
|
|
struct in6_addr *addr;
|
|
|
|
|
|
|
|
addr = net_if_ipv6_get_best_match(iface, dst,
|
|
|
|
&best_match);
|
|
|
|
if (addr) {
|
|
|
|
src = addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If caller has supplied interface, then use that */
|
|
|
|
if (dst_iface) {
|
|
|
|
src = net_if_ipv6_get_best_match(dst_iface, dst,
|
|
|
|
&best_match);
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
for (iface = __net_if_start;
|
|
|
|
!dst_iface && iface != __net_if_end;
|
|
|
|
iface++) {
|
|
|
|
struct in6_addr *addr;
|
|
|
|
|
|
|
|
addr = net_if_ipv6_get_ll(iface, NET_ADDR_PREFERRED);
|
|
|
|
if (addr) {
|
|
|
|
src = addr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dst_iface) {
|
|
|
|
src = net_if_ipv6_get_ll(dst_iface, NET_ADDR_PREFERRED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!src) {
|
2016-11-08 21:12:21 +02:00
|
|
|
return net_ipv6_unspecified_address();
|
2016-05-18 11:15:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return src;
|
2016-06-24 17:34:30 +02:00
|
|
|
}
|
|
|
|
|
2018-12-07 15:15:31 +02:00
|
|
|
struct net_if *net_if_ipv6_select_src_iface(const struct in6_addr *dst)
|
2018-08-07 11:28:49 +03:00
|
|
|
{
|
|
|
|
const struct in6_addr *src;
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
src = net_if_ipv6_select_src_addr(NULL, dst);
|
|
|
|
if (src == net_ipv6_unspecified_address()) {
|
|
|
|
return net_if_get_default();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!net_if_ipv6_addr_lookup(src, &iface)) {
|
|
|
|
return net_if_get_default();
|
|
|
|
}
|
|
|
|
|
|
|
|
return iface;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
u32_t net_if_ipv6_calc_reachable_time(struct net_if_ipv6 *ipv6)
|
2016-06-24 17:34:30 +02:00
|
|
|
{
|
2017-12-02 21:18:50 -08:00
|
|
|
u32_t min_reachable, max_reachable;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
min_reachable = (MIN_RANDOM_NUMER * ipv6->base_reachable_time)
|
2017-12-02 21:18:50 -08:00
|
|
|
/ MIN_RANDOM_DENOM;
|
2018-01-19 19:01:23 +02:00
|
|
|
max_reachable = (MAX_RANDOM_NUMER * ipv6->base_reachable_time)
|
2017-12-02 21:18:50 -08:00
|
|
|
/ MAX_RANDOM_DENOM;
|
|
|
|
|
|
|
|
NET_DBG("min_reachable:%u max_reachable:%u", min_reachable,
|
|
|
|
max_reachable);
|
|
|
|
|
|
|
|
return min_reachable +
|
|
|
|
sys_rand32_get() % (max_reachable - min_reachable);
|
2016-06-24 17:34:30 +02:00
|
|
|
}
|
2019-05-20 15:46:14 +02:00
|
|
|
|
|
|
|
static void iface_ipv6_start(struct net_if *iface)
|
|
|
|
{
|
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV6_DAD)) {
|
|
|
|
net_if_start_dad(iface);
|
|
|
|
} else {
|
|
|
|
struct net_if_ipv6 *ipv6 __unused = iface->config.ip.ipv6;
|
|
|
|
|
|
|
|
join_mcast_nodes(iface,
|
|
|
|
&ipv6->mcast[0].address.in6_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
net_if_start_rs(iface);
|
|
|
|
}
|
|
|
|
|
2019-05-20 18:26:01 +02:00
|
|
|
static void iface_ipv6_init(int if_count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-05-21 11:12:45 +02:00
|
|
|
iface_ipv6_dad_init();
|
2019-05-21 12:07:08 +02:00
|
|
|
iface_ipv6_nd_init();
|
2019-05-21 11:12:45 +02:00
|
|
|
|
2019-05-20 18:26:01 +02:00
|
|
|
k_delayed_work_init(&address_lifetime_timer, address_lifetime_timeout);
|
|
|
|
k_delayed_work_init(&prefix_lifetime_timer, prefix_lifetime_timeout);
|
|
|
|
|
|
|
|
if (if_count > ARRAY_SIZE(ipv6_addresses)) {
|
|
|
|
NET_WARN("You have %lu IPv6 net_if addresses but %d "
|
|
|
|
"network interfaces", ARRAY_SIZE(ipv6_addresses),
|
|
|
|
if_count);
|
|
|
|
NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV6_COUNT "
|
|
|
|
"value.");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ipv6_addresses); i++) {
|
|
|
|
ipv6_addresses[i].ipv6.hop_limit = CONFIG_NET_INITIAL_HOP_LIMIT;
|
|
|
|
ipv6_addresses[i].ipv6.base_reachable_time = REACHABLE_TIME;
|
|
|
|
|
|
|
|
net_if_ipv6_set_reachable_time(&ipv6_addresses[i].ipv6);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-20 15:16:09 +02:00
|
|
|
#else
|
|
|
|
#define join_mcast_allnodes(...)
|
|
|
|
#define join_mcast_solicit_node(...)
|
|
|
|
#define leave_mcast_all(...)
|
|
|
|
#define join_mcast_nodes(...)
|
2019-05-20 15:46:14 +02:00
|
|
|
#define iface_ipv6_start(...)
|
2019-05-20 18:26:01 +02:00
|
|
|
#define iface_ipv6_init(...)
|
2019-08-09 14:49:35 +03:00
|
|
|
|
|
|
|
struct net_if_mcast_addr *net_if_ipv6_maddr_lookup(const struct in6_addr *addr,
|
|
|
|
struct net_if **iface)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(addr);
|
|
|
|
ARG_UNUSED(iface);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct net_if_addr *net_if_ipv6_addr_lookup(const struct in6_addr *addr,
|
|
|
|
struct net_if **ret)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(addr);
|
|
|
|
ARG_UNUSED(ret);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct in6_addr *net_if_ipv6_get_global_addr(enum net_addr_state state,
|
|
|
|
struct net_if **iface)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(state);
|
|
|
|
ARG_UNUSED(iface);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-05-20 15:16:09 +02:00
|
|
|
#endif /* CONFIG_NET_IPV6 */
|
2017-02-09 09:57:06 +02:00
|
|
|
|
2019-08-09 14:49:35 +03:00
|
|
|
#if defined(CONFIG_NET_NATIVE_IPV4)
|
2018-01-19 19:01:23 +02:00
|
|
|
int net_if_config_ipv4_get(struct net_if *iface, struct net_if_ipv4 **ipv4)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (iface->config.ip.ipv4) {
|
|
|
|
if (ipv4) {
|
|
|
|
*ipv4 = iface->config.ip.ipv4;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
|
|
|
|
if (ipv4_addresses[i].iface) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
iface->config.ip.ipv4 = &ipv4_addresses[i].ipv4;
|
|
|
|
ipv4_addresses[i].iface = iface;
|
|
|
|
|
|
|
|
if (ipv4) {
|
|
|
|
*ipv4 = &ipv4_addresses[i].ipv4;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ESRCH;
|
|
|
|
}
|
|
|
|
|
|
|
|
int net_if_config_ipv4_put(struct net_if *iface)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!iface->config.ip.ipv4) {
|
|
|
|
return -EALREADY;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
|
|
|
|
if (ipv4_addresses[i].iface != iface) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
iface->config.ip.ipv4 = NULL;
|
|
|
|
ipv4_addresses[i].iface = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-24 17:34:30 +02:00
|
|
|
struct net_if_router *net_if_ipv4_router_lookup(struct net_if *iface,
|
|
|
|
struct in_addr *addr)
|
|
|
|
{
|
2019-05-22 12:04:04 +02:00
|
|
|
return iface_router_lookup(iface, AF_INET, addr);
|
2016-06-24 17:34:30 +02:00
|
|
|
}
|
|
|
|
|
2019-05-22 19:41:04 +02:00
|
|
|
struct net_if_router *net_if_ipv4_router_find_default(struct net_if *iface,
|
|
|
|
struct in_addr *addr)
|
|
|
|
{
|
|
|
|
return iface_router_find_default(iface, AF_INET, addr);
|
|
|
|
}
|
|
|
|
|
2016-06-24 17:34:30 +02:00
|
|
|
struct net_if_router *net_if_ipv4_router_add(struct net_if *iface,
|
|
|
|
struct in_addr *addr,
|
|
|
|
bool is_default,
|
2017-04-21 09:27:50 -05:00
|
|
|
u16_t lifetime)
|
2016-06-24 17:34:30 +02:00
|
|
|
{
|
2019-05-22 12:04:04 +02:00
|
|
|
return iface_router_add(iface, AF_INET, addr, is_default, lifetime);
|
2016-06-24 17:34:30 +02:00
|
|
|
}
|
|
|
|
|
2019-05-22 19:41:04 +02:00
|
|
|
bool net_if_ipv4_router_rm(struct net_if_router *router)
|
|
|
|
{
|
|
|
|
return iface_router_rm(router);
|
|
|
|
}
|
|
|
|
|
2016-06-24 17:34:30 +02:00
|
|
|
bool net_if_ipv4_addr_mask_cmp(struct net_if *iface,
|
2018-10-23 17:43:22 +03:00
|
|
|
const struct in_addr *addr)
|
2016-06-24 17:34:30 +02:00
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
|
2018-01-11 16:06:53 +02:00
|
|
|
u32_t subnet;
|
2016-06-24 17:34:30 +02:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv4) {
|
|
|
|
return false;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2018-10-23 17:43:22 +03:00
|
|
|
subnet = UNALIGNED_GET(&addr->s_addr) & ipv4->netmask.s_addr;
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-06-24 17:34:30 +02:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv4->unicast[i].is_used ||
|
|
|
|
ipv4->unicast[i].address.family != AF_INET) {
|
2018-01-11 16:06:53 +02:00
|
|
|
continue;
|
2016-06-24 17:34:30 +02:00
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2018-10-23 17:43:22 +03:00
|
|
|
if ((ipv4->unicast[i].address.in_addr.s_addr &
|
|
|
|
ipv4->netmask.s_addr) == subnet) {
|
2016-06-24 17:34:30 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2016-05-18 11:15:41 +03:00
|
|
|
}
|
|
|
|
|
2018-10-23 17:49:32 +03:00
|
|
|
static bool ipv4_is_broadcast_address(struct net_if *iface,
|
|
|
|
const struct in_addr *addr)
|
|
|
|
{
|
|
|
|
struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
|
|
|
|
|
|
|
|
if (!ipv4) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!net_if_ipv4_addr_mask_cmp(iface, addr)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((UNALIGNED_GET(&addr->s_addr) & ~ipv4->netmask.s_addr) ==
|
|
|
|
~ipv4->netmask.s_addr) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool net_if_ipv4_is_addr_bcast(struct net_if *iface,
|
|
|
|
const struct in_addr *addr)
|
|
|
|
{
|
|
|
|
if (iface) {
|
|
|
|
return ipv4_is_broadcast_address(iface, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
ret = ipv4_is_broadcast_address(iface, addr);
|
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-12-07 15:15:31 +02:00
|
|
|
struct net_if *net_if_ipv4_select_src_iface(const struct in_addr *dst)
|
2018-01-31 16:09:38 +02:00
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
ret = net_if_ipv4_addr_mask_cmp(iface, dst);
|
|
|
|
if (ret) {
|
|
|
|
return iface;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return net_if_get_default();
|
|
|
|
}
|
|
|
|
|
2018-04-27 13:01:33 +03:00
|
|
|
static u8_t get_diff_ipv4(const struct in_addr *src,
|
|
|
|
const struct in_addr *dst)
|
|
|
|
{
|
|
|
|
return get_ipaddr_diff((const u8_t *)src, (const u8_t *)dst, 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool is_proper_ipv4_address(struct net_if_addr *addr)
|
|
|
|
{
|
|
|
|
if (addr->is_used && addr->addr_state == NET_ADDR_PREFERRED &&
|
|
|
|
addr->address.family == AF_INET &&
|
2018-11-02 16:05:58 +02:00
|
|
|
!net_ipv4_is_ll_addr(&addr->address.in_addr)) {
|
2018-04-27 13:01:33 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct in_addr *net_if_ipv4_get_best_match(struct net_if *iface,
|
2018-12-07 15:15:31 +02:00
|
|
|
const struct in_addr *dst,
|
2018-04-27 13:01:33 +03:00
|
|
|
u8_t *best_so_far)
|
|
|
|
{
|
|
|
|
struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
|
|
|
|
struct in_addr *src = NULL;
|
|
|
|
u8_t len;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!ipv4) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
|
|
|
|
if (!is_proper_ipv4_address(&ipv4->unicast[i])) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = get_diff_ipv4(dst, &ipv4->unicast[i].address.in_addr);
|
|
|
|
if (len >= *best_so_far) {
|
|
|
|
*best_so_far = len;
|
|
|
|
src = &ipv4->unicast[i].address.in_addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return src;
|
|
|
|
}
|
|
|
|
|
2019-05-14 14:49:25 +02:00
|
|
|
static struct in_addr *if_ipv4_get_addr(struct net_if *iface,
|
|
|
|
enum net_addr_state addr_state, bool ll)
|
|
|
|
{
|
2019-06-07 23:08:44 +03:00
|
|
|
struct net_if_ipv4 *ipv4;
|
2018-04-27 13:01:33 +03:00
|
|
|
int i;
|
|
|
|
|
2019-06-07 23:08:44 +03:00
|
|
|
if (!iface) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ipv4 = iface->config.ip.ipv4;
|
2018-04-27 13:01:33 +03:00
|
|
|
if (!ipv4) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
|
|
|
|
if (!ipv4->unicast[i].is_used ||
|
|
|
|
(addr_state != NET_ADDR_ANY_STATE &&
|
|
|
|
ipv4->unicast[i].addr_state != addr_state) ||
|
|
|
|
ipv4->unicast[i].address.family != AF_INET) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-11-02 16:05:58 +02:00
|
|
|
if (net_ipv4_is_ll_addr(&ipv4->unicast[i].address.in_addr)) {
|
2019-05-14 14:49:25 +02:00
|
|
|
if (!ll) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (ll) {
|
|
|
|
continue;
|
|
|
|
}
|
2018-04-27 13:01:33 +03:00
|
|
|
}
|
2019-05-14 14:49:25 +02:00
|
|
|
|
|
|
|
return &ipv4->unicast[i].address.in_addr;
|
2018-04-27 13:01:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-05-14 14:49:25 +02:00
|
|
|
|
|
|
|
struct in_addr *net_if_ipv4_get_ll(struct net_if *iface,
|
|
|
|
enum net_addr_state addr_state)
|
|
|
|
{
|
|
|
|
return if_ipv4_get_addr(iface, addr_state, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
|
|
|
|
enum net_addr_state addr_state)
|
|
|
|
{
|
|
|
|
return if_ipv4_get_addr(iface, addr_state, false);
|
|
|
|
}
|
2018-04-27 13:01:33 +03:00
|
|
|
|
|
|
|
const struct in_addr *net_if_ipv4_select_src_addr(struct net_if *dst_iface,
|
2018-12-07 15:15:31 +02:00
|
|
|
const struct in_addr *dst)
|
2018-04-27 13:01:33 +03:00
|
|
|
{
|
|
|
|
struct in_addr *src = NULL;
|
2018-11-29 11:23:03 -08:00
|
|
|
u8_t best_match = 0U;
|
2018-04-27 13:01:33 +03:00
|
|
|
struct net_if *iface;
|
|
|
|
|
2018-11-02 16:05:58 +02:00
|
|
|
if (!net_ipv4_is_ll_addr(dst) && !net_ipv4_is_addr_mcast(dst)) {
|
2018-04-27 13:01:33 +03:00
|
|
|
|
|
|
|
for (iface = __net_if_start;
|
|
|
|
!dst_iface && iface != __net_if_end;
|
|
|
|
iface++) {
|
|
|
|
struct in_addr *addr;
|
|
|
|
|
|
|
|
addr = net_if_ipv4_get_best_match(iface, dst,
|
|
|
|
&best_match);
|
|
|
|
if (addr) {
|
|
|
|
src = addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If caller has supplied interface, then use that */
|
|
|
|
if (dst_iface) {
|
|
|
|
src = net_if_ipv4_get_best_match(dst_iface, dst,
|
|
|
|
&best_match);
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
for (iface = __net_if_start;
|
|
|
|
!dst_iface && iface != __net_if_end;
|
|
|
|
iface++) {
|
|
|
|
struct in_addr *addr;
|
|
|
|
|
|
|
|
addr = net_if_ipv4_get_ll(iface, NET_ADDR_PREFERRED);
|
|
|
|
if (addr) {
|
|
|
|
src = addr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dst_iface) {
|
|
|
|
src = net_if_ipv4_get_ll(dst_iface, NET_ADDR_PREFERRED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!src) {
|
2019-05-20 16:43:32 +03:00
|
|
|
src = net_if_ipv4_get_global_addr(dst_iface,
|
|
|
|
NET_ADDR_PREFERRED);
|
|
|
|
if (src) {
|
|
|
|
return src;
|
|
|
|
}
|
|
|
|
|
2018-04-27 13:01:33 +03:00
|
|
|
return net_ipv4_unspecified_address();
|
|
|
|
}
|
|
|
|
|
|
|
|
return src;
|
|
|
|
}
|
|
|
|
|
2016-06-22 15:34:47 +03:00
|
|
|
struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
|
|
|
|
struct net_if **ret)
|
2016-05-17 14:13:17 +03:00
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
|
2016-05-17 14:13:17 +03:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv4) {
|
|
|
|
continue;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-05-17 14:13:17 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv4->unicast[i].is_used ||
|
|
|
|
ipv4->unicast[i].address.family != AF_INET) {
|
2016-05-17 14:13:17 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-04-10 17:13:35 +03:00
|
|
|
if (UNALIGNED_GET(&addr->s4_addr32[0]) ==
|
2018-01-19 19:01:23 +02:00
|
|
|
ipv4->unicast[i].address.in_addr.s_addr) {
|
2016-06-22 15:34:47 +03:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
*ret = iface;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
return &ipv4->unicast[i];
|
2016-05-17 14:13:17 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-05-29 11:43:13 +08:00
|
|
|
int z_impl_net_if_ipv4_addr_lookup_by_index(const struct in_addr *addr)
|
|
|
|
{
|
|
|
|
struct net_if_addr *if_addr;
|
|
|
|
struct net_if *iface = NULL;
|
|
|
|
|
|
|
|
if_addr = net_if_ipv4_addr_lookup(addr, &iface);
|
|
|
|
if (!if_addr) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return net_if_get_by_iface(iface);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
2019-08-13 12:58:38 -07:00
|
|
|
static inline int z_vrfy_net_if_ipv4_addr_lookup_by_index(
|
|
|
|
const struct in_addr *addr)
|
2019-05-29 11:43:13 +08:00
|
|
|
{
|
|
|
|
struct in_addr addr_v4;
|
|
|
|
|
|
|
|
Z_OOPS(z_user_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
|
|
|
|
|
|
|
|
return z_impl_net_if_ipv4_addr_lookup_by_index(&addr_v4);
|
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/net_if_ipv4_addr_lookup_by_index_mrsh.c>
|
2019-05-29 11:43:13 +08:00
|
|
|
#endif
|
|
|
|
|
2019-06-19 11:23:10 +03:00
|
|
|
void net_if_ipv4_set_netmask(struct net_if *iface,
|
|
|
|
const struct in_addr *netmask)
|
|
|
|
{
|
|
|
|
if (net_if_config_ipv4_get(iface, NULL) < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!iface->config.ip.ipv4) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
net_ipaddr_copy(&iface->config.ip.ipv4->netmask, netmask);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool z_impl_net_if_ipv4_set_netmask_by_index(int index,
|
|
|
|
const struct in_addr *netmask)
|
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
iface = net_if_get_by_index(index);
|
|
|
|
if (!iface) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
net_if_ipv4_set_netmask(iface, netmask);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
bool z_vrfy_net_if_ipv4_set_netmask_by_index(int index,
|
|
|
|
const struct in_addr *netmask)
|
2019-06-19 11:23:10 +03:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_NET_IF_USERSPACE_ACCESS)
|
|
|
|
struct in_addr netmask_addr;
|
|
|
|
|
|
|
|
Z_OOPS(z_user_from_copy(&netmask_addr, (void *)netmask,
|
|
|
|
sizeof(netmask_addr)));
|
|
|
|
|
|
|
|
return z_impl_net_if_ipv4_set_netmask_by_index(index, &netmask_addr);
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/net_if_ipv4_set_netmask_by_index_mrsh.c>
|
2019-06-19 11:23:10 +03:00
|
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
|
2019-06-19 11:36:16 +03:00
|
|
|
void net_if_ipv4_set_gw(struct net_if *iface, const struct in_addr *gw)
|
2019-06-19 11:35:05 +03:00
|
|
|
{
|
|
|
|
if (net_if_config_ipv4_get(iface, NULL) < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!iface->config.ip.ipv4) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
net_ipaddr_copy(&iface->config.ip.ipv4->gw, gw);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool z_impl_net_if_ipv4_set_gw_by_index(int index,
|
|
|
|
const struct in_addr *gw)
|
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
iface = net_if_get_by_index(index);
|
|
|
|
if (!iface) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
net_if_ipv4_set_gw(iface, gw);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
bool z_vrfy_net_if_ipv4_set_gw_by_index(int index,
|
|
|
|
const struct in_addr *gw)
|
2019-06-19 11:35:05 +03:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_NET_IF_USERSPACE_ACCESS)
|
|
|
|
struct in_addr gw_addr;
|
|
|
|
|
|
|
|
Z_OOPS(z_user_from_copy(&gw_addr, (void *)gw, sizeof(gw_addr)));
|
|
|
|
|
|
|
|
return z_impl_net_if_ipv4_set_gw_by_index(index, &gw_addr);
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/net_if_ipv4_set_gw_by_index_mrsh.c>
|
2019-06-19 11:35:05 +03:00
|
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
|
2016-11-25 20:25:34 +02:00
|
|
|
static struct net_if_addr *ipv4_addr_find(struct net_if *iface,
|
|
|
|
struct in_addr *addr)
|
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
|
2016-11-25 20:25:34 +02:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv4->unicast[i].is_used) {
|
2016-11-25 20:25:34 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (net_ipv4_addr_cmp(addr,
|
|
|
|
&ipv4->unicast[i].address.in_addr)) {
|
|
|
|
return &ipv4->unicast[i];
|
2016-11-25 20:25:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-05-17 14:13:17 +03:00
|
|
|
struct net_if_addr *net_if_ipv4_addr_add(struct net_if *iface,
|
|
|
|
struct in_addr *addr,
|
|
|
|
enum net_addr_type addr_type,
|
2017-04-21 09:27:50 -05:00
|
|
|
u32_t vlifetime)
|
2016-05-17 14:13:17 +03:00
|
|
|
{
|
2016-11-25 20:25:34 +02:00
|
|
|
struct net_if_addr *ifaddr;
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv4 *ipv4;
|
2016-05-17 14:13:17 +03:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (net_if_config_ipv4_get(iface, &ipv4) < 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-11-25 20:25:34 +02:00
|
|
|
ifaddr = ipv4_addr_find(iface, addr);
|
|
|
|
if (ifaddr) {
|
2018-01-19 15:44:22 +02:00
|
|
|
/* TODO: should set addr_type/vlifetime */
|
2016-11-25 20:25:34 +02:00
|
|
|
return ifaddr;
|
|
|
|
}
|
|
|
|
|
2016-05-17 14:13:17 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_addr *cur = &ipv4->unicast[i];
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2018-01-19 15:44:22 +02:00
|
|
|
if (addr_type == NET_ADDR_DHCP
|
|
|
|
&& cur->addr_type == NET_ADDR_OVERRIDABLE) {
|
|
|
|
ifaddr = cur;
|
|
|
|
break;
|
2016-05-17 14:13:17 +03:00
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv4->unicast[i].is_used) {
|
2018-01-19 15:44:22 +02:00
|
|
|
ifaddr = cur;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ifaddr) {
|
|
|
|
ifaddr->is_used = true;
|
|
|
|
ifaddr->address.family = AF_INET;
|
|
|
|
ifaddr->address.in_addr.s4_addr32[0] =
|
2016-05-17 14:13:17 +03:00
|
|
|
addr->s4_addr32[0];
|
2018-01-19 15:44:22 +02:00
|
|
|
ifaddr->addr_type = addr_type;
|
2016-05-17 14:13:17 +03:00
|
|
|
|
2016-09-12 16:37:36 +03:00
|
|
|
/* Caller has to take care of timers and their expiry */
|
2016-05-17 14:13:17 +03:00
|
|
|
if (vlifetime) {
|
2018-01-19 15:44:22 +02:00
|
|
|
ifaddr->is_infinite = false;
|
2016-05-17 14:13:17 +03:00
|
|
|
} else {
|
2018-01-19 15:44:22 +02:00
|
|
|
ifaddr->is_infinite = true;
|
2016-05-17 14:13:17 +03:00
|
|
|
}
|
|
|
|
|
2016-09-12 16:37:36 +03:00
|
|
|
/**
|
|
|
|
* TODO: Handle properly PREFERRED/DEPRECATED state when
|
|
|
|
* address in use, expired and renewal state.
|
|
|
|
*/
|
2018-01-19 15:44:22 +02:00
|
|
|
ifaddr->addr_state = NET_ADDR_PREFERRED;
|
2016-09-12 16:37:36 +03:00
|
|
|
|
2016-05-17 14:13:17 +03:00
|
|
|
NET_DBG("[%d] interface %p address %s type %s added", i, iface,
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv4_addr(addr)),
|
2016-05-17 14:13:17 +03:00
|
|
|
net_addr_type2str(addr_type));
|
|
|
|
|
2019-06-12 10:01:42 +03:00
|
|
|
net_mgmt_event_notify_with_info(NET_EVENT_IPV4_ADDR_ADD, iface,
|
|
|
|
&ifaddr->address.in_addr,
|
|
|
|
sizeof(struct in_addr));
|
2016-10-26 10:52:36 +03:00
|
|
|
|
2018-01-19 15:44:22 +02:00
|
|
|
return ifaddr;
|
2016-05-17 14:13:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-05-29 13:03:34 +08:00
|
|
|
bool net_if_ipv4_addr_rm(struct net_if *iface, const struct in_addr *addr)
|
2016-06-06 11:51:30 +03:00
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
|
2016-06-06 11:51:30 +03:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv4) {
|
|
|
|
return false;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2016-06-06 11:51:30 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv4->unicast[i].is_used) {
|
2016-06-06 11:51:30 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!net_ipv4_addr_cmp(&ipv4->unicast[i].address.in_addr,
|
|
|
|
addr)) {
|
2016-06-06 11:51:30 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
ipv4->unicast[i].is_used = false;
|
2016-06-06 11:51:30 +03:00
|
|
|
|
|
|
|
NET_DBG("[%d] interface %p address %s removed",
|
2018-10-02 14:57:55 +03:00
|
|
|
i, iface, log_strdup(net_sprint_ipv4_addr(addr)));
|
2016-06-06 11:51:30 +03:00
|
|
|
|
2019-06-12 10:01:42 +03:00
|
|
|
net_mgmt_event_notify_with_info(
|
|
|
|
NET_EVENT_IPV4_ADDR_DEL, iface,
|
|
|
|
&ipv4->unicast[i].address.in_addr,
|
|
|
|
sizeof(struct in_addr));
|
2016-10-26 10:52:36 +03:00
|
|
|
|
2016-06-06 11:51:30 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2017-09-21 23:06:59 +03:00
|
|
|
|
2019-05-29 11:43:13 +08:00
|
|
|
bool z_impl_net_if_ipv4_addr_add_by_index(int index,
|
|
|
|
struct in_addr *addr,
|
|
|
|
enum net_addr_type addr_type,
|
|
|
|
u32_t vlifetime)
|
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
struct net_if_addr *if_addr;
|
|
|
|
|
|
|
|
iface = net_if_get_by_index(index);
|
|
|
|
if (!iface) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if_addr = net_if_ipv4_addr_add(iface, addr, addr_type, vlifetime);
|
|
|
|
return if_addr ? true : false;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
bool z_vrfy_net_if_ipv4_addr_add_by_index(int index,
|
|
|
|
struct in_addr *addr,
|
|
|
|
enum net_addr_type addr_type,
|
|
|
|
u32_t vlifetime)
|
2019-05-29 11:43:13 +08:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_NET_IF_USERSPACE_ACCESS)
|
|
|
|
struct in_addr addr_v4;
|
|
|
|
|
|
|
|
Z_OOPS(z_user_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
|
|
|
|
|
|
|
|
return z_impl_net_if_ipv4_addr_add_by_index(index,
|
|
|
|
&addr_v4,
|
|
|
|
addr_type,
|
|
|
|
vlifetime);
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif /* CONFIG_NET_IF_USERSPACE_ACCESS */
|
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/net_if_ipv4_addr_add_by_index_mrsh.c>
|
2019-05-29 11:43:13 +08:00
|
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
|
|
|
|
bool z_impl_net_if_ipv4_addr_rm_by_index(int index,
|
|
|
|
const struct in_addr *addr)
|
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
iface = net_if_get_by_index(index);
|
|
|
|
if (!iface) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return net_if_ipv4_addr_rm(iface, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
bool z_vrfy_net_if_ipv4_addr_rm_by_index(int index,
|
|
|
|
const struct in_addr *addr)
|
2019-05-29 11:43:13 +08:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_NET_IF_USERSPACE_ACCESS)
|
|
|
|
struct in_addr addr_v4;
|
|
|
|
|
|
|
|
Z_OOPS(z_user_from_copy(&addr_v4, (void *)addr, sizeof(addr_v4)));
|
|
|
|
|
|
|
|
return (uint32_t)z_impl_net_if_ipv4_addr_rm_by_index(index, &addr_v4);
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif /* CONFIG_NET_IF_USERSPACE_ACCESS */
|
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 13:34:31 -07:00
|
|
|
#include <syscalls/net_if_ipv4_addr_rm_by_index_mrsh.c>
|
2019-05-29 11:43:13 +08:00
|
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
|
2017-09-21 23:06:59 +03:00
|
|
|
static struct net_if_mcast_addr *ipv4_maddr_find(struct net_if *iface,
|
|
|
|
bool is_used,
|
|
|
|
const struct in_addr *addr)
|
|
|
|
{
|
2018-01-19 19:01:23 +02:00
|
|
|
struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
|
2017-09-21 23:06:59 +03:00
|
|
|
int i;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!ipv4) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-01-11 16:06:53 +02:00
|
|
|
|
2017-09-21 23:06:59 +03:00
|
|
|
for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if ((is_used && !ipv4->mcast[i].is_used) ||
|
|
|
|
(!is_used && ipv4->mcast[i].is_used)) {
|
2017-09-21 23:06:59 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (addr) {
|
2018-01-19 19:01:23 +02:00
|
|
|
if (!net_ipv4_addr_cmp(&ipv4->mcast[i].address.in_addr,
|
|
|
|
addr)) {
|
2017-09-21 23:06:59 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
return &ipv4->mcast[i];
|
2017-09-21 23:06:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
struct net_if_mcast_addr *net_if_ipv4_maddr_add(struct net_if *iface,
|
|
|
|
const struct in_addr *addr)
|
|
|
|
{
|
|
|
|
struct net_if_mcast_addr *maddr;
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
if (net_if_config_ipv4_get(iface, NULL) < 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-02 16:05:58 +02:00
|
|
|
if (!net_ipv4_is_addr_mcast(addr)) {
|
2017-09-21 23:06:59 +03:00
|
|
|
NET_DBG("Address %s is not a multicast address.",
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv4_addr(addr)));
|
2017-09-21 23:06:59 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
maddr = ipv4_maddr_find(iface, false, NULL);
|
|
|
|
if (maddr) {
|
|
|
|
maddr->is_used = true;
|
|
|
|
maddr->address.family = AF_INET;
|
|
|
|
maddr->address.in_addr.s4_addr32[0] = addr->s4_addr32[0];
|
|
|
|
|
|
|
|
NET_DBG("interface %p address %s added", iface,
|
2018-10-02 14:57:55 +03:00
|
|
|
log_strdup(net_sprint_ipv4_addr(addr)));
|
2017-09-21 23:06:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return maddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool net_if_ipv4_maddr_rm(struct net_if *iface, const struct in_addr *addr)
|
|
|
|
{
|
|
|
|
struct net_if_mcast_addr *maddr;
|
|
|
|
|
|
|
|
maddr = ipv4_maddr_find(iface, true, addr);
|
|
|
|
if (maddr) {
|
|
|
|
maddr->is_used = false;
|
|
|
|
|
|
|
|
NET_DBG("interface %p address %s removed",
|
2018-10-02 14:57:55 +03:00
|
|
|
iface, log_strdup(net_sprint_ipv4_addr(addr)));
|
2017-09-21 23:06:59 +03:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *maddr,
|
|
|
|
struct net_if **ret)
|
|
|
|
{
|
|
|
|
struct net_if_mcast_addr *addr;
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
|
|
|
if (ret && *ret && iface != *ret) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = ipv4_maddr_find(iface, true, maddr);
|
|
|
|
if (addr) {
|
|
|
|
if (ret) {
|
|
|
|
*ret = iface;
|
|
|
|
}
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-05-20 18:26:01 +02:00
|
|
|
|
|
|
|
static void iface_ipv4_init(int if_count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (if_count > ARRAY_SIZE(ipv4_addresses)) {
|
|
|
|
NET_WARN("You have %lu IPv4 net_if addresses but %d "
|
|
|
|
"network interfaces", ARRAY_SIZE(ipv4_addresses),
|
|
|
|
if_count);
|
|
|
|
NET_WARN("Consider increasing CONFIG_NET_IF_MAX_IPV4_COUNT "
|
|
|
|
"value.");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ipv4_addresses); i++) {
|
|
|
|
ipv4_addresses[i].ipv4.ttl = CONFIG_NET_INITIAL_TTL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
#define iface_ipv4_init(...)
|
2019-08-09 14:49:35 +03:00
|
|
|
|
|
|
|
struct net_if_mcast_addr *net_if_ipv4_maddr_lookup(const struct in_addr *addr,
|
|
|
|
struct net_if **iface)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(addr);
|
|
|
|
ARG_UNUSED(iface);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct net_if_addr *net_if_ipv4_addr_lookup(const struct in_addr *addr,
|
|
|
|
struct net_if **ret)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(addr);
|
|
|
|
ARG_UNUSED(ret);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct in_addr *net_if_ipv4_get_global_addr(struct net_if *iface,
|
|
|
|
enum net_addr_state addr_state)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(addr_state);
|
|
|
|
ARG_UNUSED(iface);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-05-20 15:23:13 +02:00
|
|
|
#endif /* CONFIG_NET_IPV4 */
|
2016-06-07 10:16:58 +03:00
|
|
|
|
2018-08-07 13:05:55 +03:00
|
|
|
struct net_if *net_if_select_src_iface(const struct sockaddr *dst)
|
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
if (!dst) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV6) && dst->sa_family == AF_INET6) {
|
|
|
|
iface = net_if_ipv6_select_src_iface(&net_sin6(dst)->sin6_addr);
|
|
|
|
if (!iface) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
return iface;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_NET_IPV4) && dst->sa_family == AF_INET) {
|
|
|
|
iface = net_if_ipv4_select_src_iface(&net_sin(dst)->sin_addr);
|
|
|
|
if (!iface) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
return iface;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return net_if_get_default();
|
|
|
|
}
|
|
|
|
|
2018-07-23 14:03:11 +03:00
|
|
|
enum net_verdict net_if_recv_data(struct net_if *iface, struct net_pkt *pkt)
|
|
|
|
{
|
|
|
|
if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE) &&
|
|
|
|
net_if_is_promisc(iface)) {
|
|
|
|
/* If the packet is not for us and the promiscuous
|
|
|
|
* mode is enabled, then increase the ref count so
|
|
|
|
* that net_core.c:processing_data() will not free it.
|
|
|
|
* The promiscuous mode handler must free the packet
|
|
|
|
* after it has finished working with it.
|
|
|
|
*
|
|
|
|
* If packet is for us, then NET_CONTINUE is returned.
|
|
|
|
* In this case we must clone the packet, as the packet
|
|
|
|
* could be manipulated by other part of the stack.
|
|
|
|
*/
|
|
|
|
enum net_verdict verdict;
|
|
|
|
struct net_pkt *new_pkt;
|
|
|
|
|
|
|
|
/* This protects pkt so that it will not be freed by L2 recv()
|
|
|
|
*/
|
|
|
|
net_pkt_ref(pkt);
|
|
|
|
|
|
|
|
verdict = net_if_l2(iface)->recv(iface, pkt);
|
|
|
|
if (verdict == NET_CONTINUE) {
|
2019-02-08 09:26:29 +01:00
|
|
|
new_pkt = net_pkt_clone(pkt, K_NO_WAIT);
|
2018-07-23 14:03:11 +03:00
|
|
|
} else {
|
|
|
|
new_pkt = net_pkt_ref(pkt);
|
|
|
|
}
|
|
|
|
|
2019-03-18 14:34:36 +02:00
|
|
|
/* L2 has modified the buffer starting point, it is easier
|
|
|
|
* to re-initialize the cursor rather than updating it.
|
|
|
|
*/
|
|
|
|
net_pkt_cursor_init(new_pkt);
|
|
|
|
|
2018-07-23 14:03:11 +03:00
|
|
|
if (net_promisc_mode_input(new_pkt) == NET_DROP) {
|
|
|
|
net_pkt_unref(new_pkt);
|
|
|
|
}
|
|
|
|
|
|
|
|
net_pkt_unref(pkt);
|
|
|
|
|
|
|
|
return verdict;
|
|
|
|
}
|
|
|
|
|
|
|
|
return net_if_l2(iface)->recv(iface, pkt);
|
|
|
|
}
|
|
|
|
|
2016-09-28 14:18:55 +03:00
|
|
|
void net_if_register_link_cb(struct net_if_link_cb *link,
|
|
|
|
net_if_link_callback_t cb)
|
|
|
|
{
|
|
|
|
sys_slist_find_and_remove(&link_callbacks, &link->node);
|
|
|
|
sys_slist_prepend(&link_callbacks, &link->node);
|
|
|
|
|
|
|
|
link->cb = cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
void net_if_unregister_link_cb(struct net_if_link_cb *link)
|
|
|
|
{
|
|
|
|
sys_slist_find_and_remove(&link_callbacks, &link->node);
|
|
|
|
}
|
|
|
|
|
|
|
|
void net_if_call_link_cb(struct net_if *iface, struct net_linkaddr *lladdr,
|
|
|
|
int status)
|
|
|
|
{
|
2017-02-08 16:08:23 +02:00
|
|
|
struct net_if_link_cb *link, *tmp;
|
2016-09-28 14:18:55 +03:00
|
|
|
|
2017-02-08 16:08:23 +02:00
|
|
|
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&link_callbacks, link, tmp, node) {
|
2016-09-28 14:18:55 +03:00
|
|
|
link->cb(iface, lladdr, status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-06 10:57:13 +02:00
|
|
|
static bool need_calc_checksum(struct net_if *iface, enum ethernet_hw_caps caps)
|
2018-03-14 10:55:19 +02:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_NET_L2_ETHERNET)
|
2018-01-11 16:06:53 +02:00
|
|
|
if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) {
|
2018-03-14 10:55:19 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return !(net_eth_get_hw_capabilities(iface) & caps);
|
|
|
|
#else
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
bool net_if_need_calc_tx_checksum(struct net_if *iface)
|
|
|
|
{
|
2018-04-06 10:57:13 +02:00
|
|
|
return need_calc_checksum(iface, ETHERNET_HW_TX_CHKSUM_OFFLOAD);
|
2018-03-14 10:55:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool net_if_need_calc_rx_checksum(struct net_if *iface)
|
|
|
|
{
|
2018-04-06 10:57:13 +02:00
|
|
|
return need_calc_checksum(iface, ETHERNET_HW_RX_CHKSUM_OFFLOAD);
|
2018-03-14 10:55:19 +02:00
|
|
|
}
|
|
|
|
|
2019-02-14 11:37:23 +02:00
|
|
|
struct net_if *net_if_get_by_index(int index)
|
2016-06-22 15:42:55 +03:00
|
|
|
{
|
2019-02-14 11:37:23 +02:00
|
|
|
if (index <= 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (&__net_if_start[index - 1] >= __net_if_end) {
|
2016-06-22 15:42:55 +03:00
|
|
|
NET_DBG("Index %d is too large", index);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-02-14 11:37:23 +02:00
|
|
|
return &__net_if_start[index - 1];
|
2016-06-22 15:42:55 +03:00
|
|
|
}
|
|
|
|
|
2019-02-14 11:37:23 +02:00
|
|
|
int net_if_get_by_iface(struct net_if *iface)
|
2016-06-22 15:42:55 +03:00
|
|
|
{
|
2019-02-14 11:37:23 +02:00
|
|
|
if (!(iface >= __net_if_start && iface < __net_if_end)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2016-06-22 15:42:55 +03:00
|
|
|
|
2019-02-14 11:37:23 +02:00
|
|
|
return (iface - __net_if_start) + 1;
|
2016-06-22 15:42:55 +03:00
|
|
|
}
|
|
|
|
|
2016-10-11 18:28:09 +03:00
|
|
|
void net_if_foreach(net_if_cb_t cb, void *user_data)
|
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
|
|
|
cb(iface, user_data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-19 14:18:10 +02:00
|
|
|
int net_if_up(struct net_if *iface)
|
|
|
|
{
|
2016-12-21 13:33:09 +02:00
|
|
|
int status;
|
|
|
|
|
2016-12-19 14:18:10 +02:00
|
|
|
NET_DBG("iface %p", iface);
|
|
|
|
|
2019-04-16 10:55:34 +03:00
|
|
|
if (net_if_flag_is_set(iface, NET_IF_UP)) {
|
2016-12-19 14:18:10 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-14 16:08:18 +02:00
|
|
|
if ((IS_ENABLED(CONFIG_NET_OFFLOAD) &&
|
|
|
|
net_if_is_ip_offloaded(iface)) ||
|
|
|
|
(IS_ENABLED(CONFIG_NET_SOCKETS_OFFLOAD) &&
|
|
|
|
net_if_is_socket_offloaded(iface))) {
|
2019-05-09 14:27:00 +02:00
|
|
|
net_if_flag_set(iface, NET_IF_UP);
|
|
|
|
goto exit;
|
2018-02-21 15:24:07 -08:00
|
|
|
}
|
|
|
|
|
2016-12-21 13:33:09 +02:00
|
|
|
/* If the L2 does not support enable just set the flag */
|
2019-09-05 15:56:29 -07:00
|
|
|
if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
|
2016-12-21 13:33:09 +02:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Notify L2 to enable the interface */
|
2018-01-11 16:06:53 +02:00
|
|
|
status = net_if_l2(iface)->enable(iface, true);
|
2016-12-21 13:33:09 +02:00
|
|
|
if (status < 0) {
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
2018-11-02 12:51:14 +02:00
|
|
|
/* In many places it's assumed that link address was set with
|
|
|
|
* net_if_set_link_addr(). Better check that now.
|
|
|
|
*/
|
|
|
|
NET_ASSERT(net_if_get_link_addr(iface)->addr != NULL);
|
|
|
|
|
2019-04-16 10:55:34 +03:00
|
|
|
net_if_flag_set(iface, NET_IF_UP);
|
2016-12-19 14:18:10 +02:00
|
|
|
|
2019-04-01 14:13:11 +03:00
|
|
|
/* If the interface is only having point-to-point traffic then we do
|
|
|
|
* not need to run DAD etc for it.
|
|
|
|
*/
|
|
|
|
if (!(l2_flags_get(iface) & NET_L2_POINT_TO_POINT)) {
|
|
|
|
iface_ipv6_start(iface);
|
2016-12-19 14:18:10 +02:00
|
|
|
|
2019-04-01 14:13:11 +03:00
|
|
|
net_ipv4_autoconf_start(iface);
|
|
|
|
}
|
2018-07-30 18:28:35 +03:00
|
|
|
|
2019-05-09 14:27:00 +02:00
|
|
|
exit:
|
2017-01-04 15:54:20 +01:00
|
|
|
net_mgmt_event_notify(NET_EVENT_IF_UP, iface);
|
|
|
|
|
2016-12-19 14:18:10 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-19 10:36:06 +02:00
|
|
|
void net_if_carrier_down(struct net_if *iface)
|
|
|
|
{
|
|
|
|
NET_DBG("iface %p", iface);
|
|
|
|
|
2019-04-16 10:55:34 +03:00
|
|
|
net_if_flag_clear(iface, NET_IF_UP);
|
2018-02-19 10:36:06 +02:00
|
|
|
|
2018-07-30 18:28:35 +03:00
|
|
|
net_ipv4_autoconf_reset(iface);
|
|
|
|
|
2018-02-19 10:36:06 +02:00
|
|
|
net_mgmt_event_notify(NET_EVENT_IF_DOWN, iface);
|
|
|
|
}
|
|
|
|
|
2016-12-19 14:18:10 +02:00
|
|
|
int net_if_down(struct net_if *iface)
|
|
|
|
{
|
2016-12-21 13:33:09 +02:00
|
|
|
int status;
|
|
|
|
|
2016-12-19 14:18:10 +02:00
|
|
|
NET_DBG("iface %p", iface);
|
|
|
|
|
2017-02-10 09:36:09 +02:00
|
|
|
leave_mcast_all(iface);
|
2017-02-09 09:57:06 +02:00
|
|
|
|
2018-02-21 15:24:07 -08:00
|
|
|
if (net_if_is_ip_offloaded(iface)) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2017-01-24 15:25:37 +00:00
|
|
|
/* If the L2 does not support enable just clear the flag */
|
2019-09-05 15:56:29 -07:00
|
|
|
if (!net_if_l2(iface) || !net_if_l2(iface)->enable) {
|
2016-12-21 13:33:09 +02:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Notify L2 to disable the interface */
|
2018-01-11 16:06:53 +02:00
|
|
|
status = net_if_l2(iface)->enable(iface, false);
|
2016-12-21 13:33:09 +02:00
|
|
|
if (status < 0) {
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
2019-04-16 10:55:34 +03:00
|
|
|
net_if_flag_clear(iface, NET_IF_UP);
|
2016-12-19 14:18:10 +02:00
|
|
|
|
2017-01-04 15:54:20 +01:00
|
|
|
net_mgmt_event_notify(NET_EVENT_IF_DOWN, iface);
|
|
|
|
|
2016-12-19 14:18:10 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-12 16:30:51 +02:00
|
|
|
static int promisc_mode_set(struct net_if *iface, bool enable)
|
2018-07-20 16:42:54 +03:00
|
|
|
{
|
2018-08-06 17:46:37 +03:00
|
|
|
enum net_l2_flags l2_flags = 0;
|
2018-07-20 16:42:54 +03:00
|
|
|
|
|
|
|
NET_ASSERT(iface);
|
|
|
|
|
2019-09-05 15:56:29 -07:00
|
|
|
l2_flags = l2_flags_get(iface);
|
2018-08-06 17:46:37 +03:00
|
|
|
if (!(l2_flags & NET_L2_PROMISC_MODE)) {
|
2018-07-20 16:42:54 +03:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2018-08-06 17:46:37 +03:00
|
|
|
#if defined(CONFIG_NET_L2_ETHERNET)
|
|
|
|
if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
|
2018-11-12 16:30:51 +02:00
|
|
|
int ret = net_eth_promisc_mode(iface, enable);
|
|
|
|
|
2018-08-06 17:46:37 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2018-07-20 16:42:54 +03:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
return -ENOTSUP;
|
|
|
|
#endif
|
|
|
|
|
2018-11-12 16:30:51 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int net_if_set_promisc(struct net_if *iface)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = promisc_mode_set(iface, true);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-04-16 10:55:34 +03:00
|
|
|
ret = net_if_flag_test_and_set(iface, NET_IF_PROMISC);
|
2018-07-20 16:42:54 +03:00
|
|
|
if (ret) {
|
|
|
|
return -EALREADY;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void net_if_unset_promisc(struct net_if *iface)
|
|
|
|
{
|
2018-11-12 16:30:51 +02:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = promisc_mode_set(iface, false);
|
|
|
|
if (ret < 0) {
|
|
|
|
return;
|
|
|
|
}
|
2018-07-20 16:42:54 +03:00
|
|
|
|
2019-04-16 10:55:34 +03:00
|
|
|
net_if_flag_clear(iface, NET_IF_PROMISC);
|
2018-07-20 16:42:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
bool net_if_is_promisc(struct net_if *iface)
|
|
|
|
{
|
|
|
|
NET_ASSERT(iface);
|
|
|
|
|
2019-04-16 10:55:34 +03:00
|
|
|
return net_if_flag_is_set(iface, NET_IF_PROMISC);
|
2018-07-20 16:42:54 +03:00
|
|
|
}
|
|
|
|
|
2020-02-25 09:42:35 +01:00
|
|
|
#ifdef CONFIG_NET_POWER_MANAGEMENT
|
|
|
|
|
|
|
|
int net_if_suspend(struct net_if *iface)
|
|
|
|
{
|
|
|
|
if (net_if_are_pending_tx_packets(iface)) {
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (net_if_flag_test_and_set(iface, NET_IF_SUSPENDED)) {
|
|
|
|
return -EALREADY;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int net_if_resume(struct net_if *iface)
|
|
|
|
{
|
|
|
|
if (!net_if_flag_is_set(iface, NET_IF_SUSPENDED)) {
|
|
|
|
return -EALREADY;
|
|
|
|
}
|
|
|
|
|
|
|
|
net_if_flag_clear(iface, NET_IF_SUSPENDED);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_NET_POWER_MANAGEMENT */
|
|
|
|
|
2019-05-30 18:32:07 +08:00
|
|
|
#if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
|
2018-01-24 15:20:21 +02:00
|
|
|
static void net_tx_ts_thread(void)
|
|
|
|
{
|
|
|
|
struct net_pkt *pkt;
|
|
|
|
|
|
|
|
NET_DBG("Starting TX timestamp callback thread");
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
pkt = k_fifo_get(&tx_ts_queue, K_FOREVER);
|
|
|
|
if (pkt) {
|
|
|
|
net_if_call_timestamp_cb(pkt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void net_if_register_timestamp_cb(struct net_if_timestamp_cb *handle,
|
2018-06-04 14:23:12 +02:00
|
|
|
struct net_pkt *pkt,
|
2018-01-24 15:20:21 +02:00
|
|
|
struct net_if *iface,
|
|
|
|
net_if_timestamp_callback_t cb)
|
|
|
|
{
|
|
|
|
sys_slist_find_and_remove(×tamp_callbacks, &handle->node);
|
|
|
|
sys_slist_prepend(×tamp_callbacks, &handle->node);
|
|
|
|
|
|
|
|
handle->iface = iface;
|
|
|
|
handle->cb = cb;
|
2018-06-04 14:23:12 +02:00
|
|
|
handle->pkt = pkt;
|
2018-01-24 15:20:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void net_if_unregister_timestamp_cb(struct net_if_timestamp_cb *handle)
|
|
|
|
{
|
|
|
|
sys_slist_find_and_remove(×tamp_callbacks, &handle->node);
|
|
|
|
}
|
|
|
|
|
|
|
|
void net_if_call_timestamp_cb(struct net_pkt *pkt)
|
|
|
|
{
|
|
|
|
sys_snode_t *sn, *sns;
|
|
|
|
|
|
|
|
SYS_SLIST_FOR_EACH_NODE_SAFE(×tamp_callbacks, sn, sns) {
|
|
|
|
struct net_if_timestamp_cb *handle =
|
|
|
|
CONTAINER_OF(sn, struct net_if_timestamp_cb, node);
|
|
|
|
|
2018-06-04 14:23:12 +02:00
|
|
|
if (((handle->iface == NULL) ||
|
|
|
|
(handle->iface == net_pkt_iface(pkt))) &&
|
|
|
|
(handle->pkt == NULL || handle->pkt == pkt)) {
|
2018-01-24 15:20:21 +02:00
|
|
|
handle->cb(pkt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void net_if_add_tx_timestamp(struct net_pkt *pkt)
|
|
|
|
{
|
|
|
|
k_fifo_put(&tx_ts_queue, pkt);
|
|
|
|
}
|
2019-05-30 18:32:07 +08:00
|
|
|
#endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
|
2018-01-24 15:20:21 +02:00
|
|
|
|
2018-02-07 15:00:08 +02:00
|
|
|
void net_if_init(void)
|
2016-05-02 09:02:04 +02:00
|
|
|
{
|
|
|
|
struct net_if *iface;
|
2019-02-07 15:00:44 +02:00
|
|
|
int if_count;
|
2016-05-02 09:02:04 +02:00
|
|
|
|
2016-06-21 12:10:13 +02:00
|
|
|
NET_DBG("");
|
|
|
|
|
2018-02-07 15:00:08 +02:00
|
|
|
net_tc_tx_init();
|
|
|
|
|
2018-01-19 19:01:23 +02:00
|
|
|
for (iface = __net_if_start, if_count = 0; iface != __net_if_end;
|
|
|
|
iface++, if_count++) {
|
2017-03-08 09:30:03 +01:00
|
|
|
init_iface(iface);
|
2018-01-19 19:01:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (iface == __net_if_start) {
|
|
|
|
NET_ERR("There is no network interface to work with!");
|
|
|
|
return;
|
|
|
|
}
|
2016-05-19 11:26:06 +03:00
|
|
|
|
2019-05-20 18:26:01 +02:00
|
|
|
iface_ipv6_init(if_count);
|
|
|
|
iface_ipv4_init(if_count);
|
2019-05-22 12:04:04 +02:00
|
|
|
iface_router_init();
|
2018-01-19 12:24:33 +02:00
|
|
|
|
2019-05-30 18:32:07 +08:00
|
|
|
#if defined(CONFIG_NET_PKT_TIMESTAMP_THREAD)
|
2018-01-24 15:20:21 +02:00
|
|
|
k_thread_create(&tx_thread_ts, tx_ts_stack,
|
|
|
|
K_THREAD_STACK_SIZEOF(tx_ts_stack),
|
|
|
|
(k_thread_entry_t)net_tx_ts_thread,
|
2019-10-06 14:59:16 -05:00
|
|
|
NULL, NULL, NULL, K_PRIO_COOP(1), 0, K_NO_WAIT);
|
2018-10-12 21:17:19 +03:00
|
|
|
k_thread_name_set(&tx_thread_ts, "tx_tstamp");
|
2019-05-30 18:32:07 +08:00
|
|
|
#endif /* CONFIG_NET_PKT_TIMESTAMP_THREAD */
|
2018-01-24 15:20:21 +02:00
|
|
|
|
2018-01-19 12:24:33 +02:00
|
|
|
#if defined(CONFIG_NET_VLAN)
|
|
|
|
/* Make sure that we do not have too many network interfaces
|
|
|
|
* compared to the number of VLAN interfaces.
|
|
|
|
*/
|
|
|
|
for (iface = __net_if_start, if_count = 0;
|
|
|
|
iface != __net_if_end; iface++) {
|
|
|
|
if (net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET)) {
|
|
|
|
if_count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (if_count > CONFIG_NET_VLAN_COUNT) {
|
|
|
|
NET_WARN("You have configured only %d VLAN interfaces"
|
|
|
|
" but you have %d network interfaces.",
|
|
|
|
CONFIG_NET_VLAN_COUNT, if_count);
|
|
|
|
}
|
|
|
|
#endif
|
2017-03-13 15:50:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void net_if_post_init(void)
|
|
|
|
{
|
|
|
|
struct net_if *iface;
|
|
|
|
|
|
|
|
NET_DBG("");
|
|
|
|
|
|
|
|
/* After TX is running, attempt to bring the interface up */
|
|
|
|
for (iface = __net_if_start; iface != __net_if_end; iface++) {
|
2019-04-16 10:55:57 +03:00
|
|
|
if (!net_if_flag_is_set(iface, NET_IF_NO_AUTO_START)) {
|
|
|
|
net_if_up(iface);
|
|
|
|
}
|
2017-03-13 15:50:57 +02:00
|
|
|
}
|
2016-05-02 09:02:04 +02:00
|
|
|
}
|