diff --git a/include/linker/common-rom.ld b/include/linker/common-rom.ld index 4c914f1bb20..6b28bc4f365 100644 --- a/include/linker/common-rom.ld +++ b/include/linker/common-rom.ld @@ -82,6 +82,16 @@ } GROUP_LINK_IN(ROMABLE_REGION) #endif +#if defined(CONFIG_NET_L2_PPP) + SECTION_PROLOGUE(net_ppp_proto,,) + { + __net_ppp_proto_start = .; + *(".net_ppp_proto.*") + KEEP(*(SORT_BY_NAME(".net_ppp_proto.*"))) + __net_ppp_proto_end = .; + } GROUP_LINK_IN(ROMABLE_REGION) +#endif + SECTION_DATA_PROLOGUE(_bt_channels_area,,SUBALIGN(4)) { _bt_l2cap_fixed_chan_list_start = .; diff --git a/include/net/net_l2.h b/include/net/net_l2.h index f3c063a854e..6b141168edb 100644 --- a/include/net/net_l2.h +++ b/include/net/net_l2.h @@ -37,6 +37,11 @@ enum net_l2_flags { /** Is promiscuous mode supported */ NET_L2_PROMISC_MODE = BIT(2), + + /** Is this L2 point-to-point with tunneling so no need to have + * IP address etc to network interface. + */ + NET_L2_POINT_TO_POINT = BIT(3), } __packed; /** @@ -88,6 +93,11 @@ NET_L2_DECLARE_PUBLIC(DUMMY_L2); NET_L2_DECLARE_PUBLIC(ETHERNET_L2); #endif /* CONFIG_NET_L2_ETHERNET */ +#ifdef CONFIG_NET_L2_PPP +#define PPP_L2 PPP +NET_L2_DECLARE_PUBLIC(PPP_L2); +#endif /* CONFIG_NET_L2_PPP */ + #ifdef CONFIG_NET_L2_IEEE802154 #define IEEE802154_L2 IEEE802154 NET_L2_DECLARE_PUBLIC(IEEE802154_L2); diff --git a/include/net/net_pkt.h b/include/net/net_pkt.h index ecee22167d0..4793222c7dd 100644 --- a/include/net/net_pkt.h +++ b/include/net/net_pkt.h @@ -158,6 +158,7 @@ struct net_pkt { * Note: family needs to be * AF_UNSPEC. */ + u8_t ppp_msg : 1; /* This is a PPP message */ }; union { @@ -848,6 +849,33 @@ static inline void net_pkt_set_lldp(struct net_pkt *pkt, bool is_lldp) } #endif /* CONFIG_NET_LLDP */ +#if defined(CONFIG_NET_PPP) +static inline bool net_pkt_is_ppp(struct net_pkt *pkt) +{ + return pkt->ppp_msg; +} + +static inline void net_pkt_set_ppp(struct net_pkt *pkt, + bool is_ppp_msg) +{ + pkt->ppp_msg = is_ppp_msg; +} +#else /* CONFIG_NET_PPP */ +static inline bool net_pkt_is_ppp(struct net_pkt *pkt) +{ + ARG_UNUSED(pkt); + + return false; +} + +static inline void net_pkt_set_ppp(struct net_pkt *pkt, + bool is_ppp_msg) +{ + ARG_UNUSED(pkt); + ARG_UNUSED(is_ppp_msg); +} +#endif /* CONFIG_NET_PPP */ + #define NET_IPV6_HDR(pkt) ((struct net_ipv6_hdr *)net_pkt_ip_data(pkt)) #define NET_IPV4_HDR(pkt) ((struct net_ipv4_hdr *)net_pkt_ip_data(pkt)) diff --git a/include/net/net_stats.h b/include/net/net_stats.h index 76ad74776c1..f2326c5840a 100644 --- a/include/net/net_stats.h +++ b/include/net/net_stats.h @@ -376,6 +376,20 @@ struct net_stats_eth { #endif }; +/** + * @brief All PPP specific statistics + */ +struct net_stats_ppp { + struct net_stats_bytes bytes; + struct net_stats_pkts pkts; + + /** Number of received and dropped PPP frames. */ + net_stats_t drop; + + /** Number of received PPP frames with a bad checksum. */ + net_stats_t chkerr; +}; + #if defined(CONFIG_NET_STATISTICS_USER_API) /* Management part definitions */ @@ -398,6 +412,7 @@ enum net_request_stats_cmd { NET_REQUEST_STATS_CMD_GET_UDP, NET_REQUEST_STATS_CMD_GET_TCP, NET_REQUEST_STATS_CMD_GET_ETHERNET, + NET_REQUEST_STATS_CMD_GET_PPP, }; #define NET_REQUEST_STATS_GET_ALL \ @@ -469,6 +484,13 @@ NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_TCP); NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_ETHERNET); #endif /* CONFIG_NET_STATISTICS_ETHERNET */ +#if defined(CONFIG_NET_STATISTICS_PPP) +#define NET_REQUEST_STATS_GET_PPP \ + (_NET_STATS_BASE | NET_REQUEST_STATS_CMD_GET_PPP) + +NET_MGMT_DEFINE_REQUEST_HANDLER(NET_REQUEST_STATS_GET_PPP); +#endif /* CONFIG_NET_STATISTICS_PPP */ + #endif /* CONFIG_NET_STATISTICS_USER_API */ /** diff --git a/include/net/ppp.h b/include/net/ppp.h new file mode 100644 index 00000000000..9bd7e833eb1 --- /dev/null +++ b/include/net/ppp.h @@ -0,0 +1,541 @@ +/* + * Copyright (c) 2019 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +#ifndef ZEPHYR_INCLUDE_NET_PPP_H_ +#define ZEPHYR_INCLUDE_NET_PPP_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Point-to-point (PPP) L2/driver support functions + * @defgroup ppp PPP L2/driver Support Functions + * @ingroup networking + * @{ + */ + +/** PPP maximum receive unit (MRU) */ +#define PPP_MRU 1500 + +/** PPP maximum transfer unit (MTU) */ +#define PPP_MTU PPP_MRU + +/** Max length of terminate description string */ +#define PPP_MAX_TERMINATE_REASON_LEN 32 + +/** PPP L2 API */ +struct ppp_api { + /** + * The net_if_api must be placed in first position in this + * struct so that we are compatible with network interface API. + */ + struct net_if_api iface_api; + + /** Start the device */ + int (*start)(struct device *dev); + + /** Stop the device */ + int (*stop)(struct device *dev); + + /** Send a network packet */ + int (*send)(struct device *dev, struct net_pkt *pkt); + +#if defined(CONFIG_NET_STATISTICS_PPP) + /** Collect optional PPP specific statistics. This pointer + * should be set by driver if statistics needs to be collected + * for that driver. + */ + struct net_stats_ppp *(*get_stats)(struct device *dev); +#endif +}; + +/** + * PPP protocol types. + * See https://www.iana.org/assignments/ppp-numbers/ppp-numbers.xhtml + * for details. + */ +enum ppp_protocol_type { + PPP_IP = 0x0021, /**< RFC 1332 */ + PPP_IPV6 = 0x0057, /**< RFC 5072 */ + PPP_IPCP = 0x8021, /**< RFC 1332 */ + PPP_ECP = 0x8053, /**< RFC 1968 */ + PPP_CCP = 0x80FD, /**< RFC 1962 */ + PPP_LCP = 0xc021, /**< RFC 1661 */ +}; + +/** + * PPP phases + */ +enum ppp_phase { + /** Physical-layer not ready */ + PPP_DEAD, + /** Link is being established */ + PPP_ESTABLISH, + /** Link authentication with peer */ + PPP_AUTH, + /** Network connection establishment */ + PPP_NETWORK, + /** Network running */ + PPP_RUNNING, + /** Link termination */ + PPP_TERMINATE, +}; + +/** + * PPP states, RFC 1661 ch. 4.2 + */ +enum ppp_state { + PPP_INITIAL, + PPP_STARTING, + PPP_CLOSED, + PPP_STOPPED, + PPP_CLOSING, + PPP_STOPPING, + PPP_REQUEST_SENT, + PPP_ACK_RECEIVED, + PPP_ACK_SENT, + PPP_OPENED +}; + +/** + * PPP protocol operations from RFC 1661 + */ +enum ppp_packet_type { + PPP_CONFIGURE_REQ = 1, + PPP_CONFIGURE_ACK = 2, + PPP_CONFIGURE_NACK = 3, + PPP_CONFIGURE_REJ = 4, + PPP_TERMINATE_REQ = 5, + PPP_TERMINATE_ACK = 6, + PPP_CODE_REJ = 7, + PPP_PROTOCOL_REJ = 8, + PPP_ECHO_REQ = 9, + PPP_ECHO_REPLY = 10, + PPP_DISCARD_REQ = 11 +}; + +/** + * LCP option types from RFC 1661 ch. 6 + */ +enum lcp_option_type { + LCP_OPTION_RESERVED = 0, + + /** Maximum-Receive-Unit */ + LCP_OPTION_MRU = 1, + + /** Async-Control-Character-Map */ + LCP_OPTION_ASYNC_CTRL_CHAR_MAP = 2, + + /** Authentication-Protocol */ + LCP_OPTION_AUTH_PROTO = 3, + + /** Quality-Protocol */ + LCP_OPTION_QUALITY_PROTO = 4, + + /** Magic-Number */ + LCP_OPTION_MAGIC_NUMBER = 5, + + /** Protocol-Field-Compression */ + LCP_OPTION_PROTO_COMPRESS = 7, + + /** Address-and-Control-Field-Compression */ + LCP_OPTION_ADDR_CTRL_COMPRESS = 8 +} __packed; + +/** + * IPCP option types from RFC 1332 + */ +enum ipcp_option_type { + IPCP_OPTION_RESERVED = 0, + + /** IP Addresses */ + IPCP_OPTION_IP_ADDRESSES = 1, + + /** IP Compression Protocol */ + IPCP_OPTION_IP_COMP_PROTO = 2, + + /** IP Address */ + IPCP_OPTION_IP_ADDRESS = 3, +} __packed; + +/** + * Generic PPP Finite State Machine + */ +struct ppp_fsm { + /** Timeout timer */ + struct k_delayed_work timer; + + /* We need to send a packet from separate thread so that we do not + * receive reply before we are ready to receive it. The issue was seen + * with QEMU where the link to peer is so fast that we received the + * reply before the net_send_data() returned. + */ + struct { + /** Packet sending timer. */ + struct k_delayed_work work; + + /** Packet to send */ + struct net_pkt *pkt; + } sender; + + struct { + /** Acknowledge Configuration Information */ + int (*config_info_ack)(struct ppp_fsm *fsm, + struct net_pkt *pkt, + u16_t length); + + /** Add Configuration Information */ + struct net_buf *(*config_info_add)(struct ppp_fsm *fsm); + + /** Length of Configuration Information */ + int (*config_info_len)(struct ppp_fsm *fsm); + + /** Negative Acknowledge Configuration Information */ + int (*config_info_nack)(struct ppp_fsm *fsm, + struct net_pkt *pkt, + u16_t length, + bool rejected); + + /** Request peer's Configuration Information */ + int (*config_info_req)(struct ppp_fsm *fsm, + struct net_pkt *pkt, + u16_t length, + struct net_buf **buf); + + /** Reject Configuration Information */ + int (*config_info_rej)(struct ppp_fsm *fsm, + struct net_pkt *pkt, + u16_t length); + + /** Reset Configuration Information */ + void (*config_info_reset)(struct ppp_fsm *fsm); + + /** FSM goes to OPENED state */ + void (*up)(struct ppp_fsm *fsm); + + /** FSM leaves OPENED state */ + void (*down)(struct ppp_fsm *fsm); + + /** Starting this protocol */ + void (*starting)(struct ppp_fsm *fsm); + + /** Quitting this protocol */ + void (*finished)(struct ppp_fsm *fsm); + + /** We received Protocol-Reject */ + void (*proto_reject)(struct ppp_fsm *fsm); + + /** Retransmit */ + void (*retransmit)(struct ppp_fsm *fsm); + + /** Any code that is not understood by PPP is passed to + * this FSM for further processing. + */ + enum net_verdict (*proto_extension)(struct ppp_fsm *fsm, + enum ppp_packet_type code, + u8_t id, + struct net_pkt *pkt); + } cb; + + /** Option bits */ + u32_t flags; + + /** Number of re-transmissions left */; + u32_t retransmits; + + /** Number of NACK loops since last ACK */ + u32_t nack_loops; + + /** Number of NACKs received */ + u32_t recv_nack_loops; + + /** Reason for closing protocol */ + char terminate_reason[PPP_MAX_TERMINATE_REASON_LEN]; + + /** PPP protocol number for this FSM */ + u16_t protocol; + + /** Current state of PPP link */ + enum ppp_state state; + + /** Protocol/layer name of this FSM (for debugging) */ + const char *name; + + /** Current id */ + u8_t id; + + /** Current request id */ + u8_t req_id; + + /** Have received valid Ack, Nack or Reject to a Request */ + u8_t ack_received : 1; +}; + +/** PPP configuration options */ +struct ppp_option_pkt { + /** Option value */ + struct net_pkt_cursor value; + + /** Option type */ + union { + enum lcp_option_type lcp; + enum ipcp_option_type ipcp; + } type; + + /** Option length */ + u8_t len; +}; + +struct lcp_options { + /** Magic number */ + u32_t magic; + + /** Async char map */ + u32_t async_map; + + /** Maximum Receive Unit value */ + u16_t mru; + + /* Flags what to negotiate */ + + /** Negotiate MRU */ + u16_t negotiate_mru : 1; + + /** Negotiate */ + u16_t negotiate_async_map :1; + + /** Negotiate HDLC protocol field compression*/ + u16_t negotiate_proto_compression :1; + + /** Negotiate HDLC address/control field compression */ + u16_t negotiate_addr_compression :1; + + /** Negotiate magic number */ + u16_t negotiate_magic :1; +}; + +struct ipcp_options { + /** IPv4 address */ + struct in_addr address; +}; + +/** PPP L2 context specific to certain network interface */ +struct ppp_context { + struct { + /** Carrier ON/OFF handler worker. This is used to create + * network interface UP/DOWN event when PPP L2 driver + * notices carrier ON/OFF situation. We must not create another + * network management event from inside management handler thus + * we use worker thread to trigger the UP/DOWN event. + */ + struct k_work work; + + /** Is the carrier enabled already */ + bool enabled; + } carrier_mgmt; + + struct { + /** Finite state machine for LCP */ + struct ppp_fsm fsm; + + /** Options that we want to request */ + struct lcp_options my_options; + + /** Options that peer want to request */ + struct lcp_options peer_options; + + /** Options that we accepted */ + struct lcp_options my_accepted; + + /** Options that peer accepted */ + struct lcp_options peer_accepted; + + /** Magic-Number value */ + u32_t magic; + } lcp; + +#if defined(CONFIG_NET_IPV4) + struct { + /** Finite state machine for IPCP */ + struct ppp_fsm fsm; + + /** Options that we want to request */ + struct ipcp_options my_options; + + /** Options that peer want to request */ + struct ipcp_options peer_options; + + /** Options that we accepted */ + struct ipcp_options my_accepted; + + /** Options that peer accepted */ + struct ipcp_options peer_accepted; + } ipcp; +#endif + +#if defined(CONFIG_NET_SHELL) + struct { + /** Used when waiting Echo-Reply */ + struct k_sem wait_echo_reply; + + /** Echo-Req data value */ + u32_t echo_req_data; + + /** Echo-Reply data value */ + u32_t echo_reply_data; + } shell; +#endif + + /** Network interface related to this PPP connection */ + struct net_if *iface; + + /** Current phase of PPP link */ + enum ppp_phase phase; + + /** This tells what features the PPP supports. */ + enum net_l2_flags ppp_l2_flags; + + /** This tells how many network protocols are open */ + int network_protos_open; + + /** This tells how many network protocols are up */ + int network_protos_up; + + /** Is this context already initialized */ + u16_t is_init : 1; + + /** Is PPP ready to receive packets */ + u16_t is_ready_to_serve : 1; + + /** Is PPP L2 enabled or not */ + u16_t is_enabled : 1; + + /** Network status (up / down) */ + u16_t is_network_up : 1; + + /** IPCP status (up / down) */ + u16_t is_ipcp_up : 1; + + /** IPCP open status (open / closed) */ + u16_t is_ipcp_open : 1; +}; + +/** + * @brief Inform PPP L2 driver that carrier is detected. + * This happens when cable is connected etc. + * + * @param iface Network interface + */ +void net_ppp_carrier_on(struct net_if *iface); + +/** + * @brief Inform PPP L2 driver that carrier was lost. + * This happens when cable is disconnected etc. + * + * @param iface Network interface + */ +void net_ppp_carrier_off(struct net_if *iface); + +/** + * @brief Initialize PPP L2 stack for a given interface + * + * @param iface A valid pointer to a network interface + */ +void net_ppp_init(struct net_if *iface); + +/* Management API for PPP */ + +/** @cond INTERNAL_HIDDEN */ + +#define PPP_L2_CTX_TYPE struct ppp_context + +#define _NET_PPP_LAYER NET_MGMT_LAYER_L2 +#define _NET_PPP_CODE 0x209 +#define _NET_PPP_BASE (NET_MGMT_IFACE_BIT | \ + NET_MGMT_LAYER(_NET_PPP_LAYER) | \ + NET_MGMT_LAYER_CODE(_NET_PPP_CODE)) +#define _NET_PPP_EVENT (_NET_PPP_BASE | NET_MGMT_EVENT_BIT) + +enum net_event_ppp_cmd { + NET_EVENT_PPP_CMD_CARRIER_ON = 1, + NET_EVENT_PPP_CMD_CARRIER_OFF, +}; + +#define NET_EVENT_PPP_CARRIER_ON \ + (_NET_PPP_EVENT | NET_EVENT_PPP_CMD_CARRIER_ON) + +#define NET_EVENT_PPP_CARRIER_OFF \ + (_NET_PPP_EVENT | NET_EVENT_PPP_CMD_CARRIER_OFF) + +struct net_if; + +/** @endcond */ + +/** + * @brief Raise CARRIER_ON event when PPP is connected. + * + * @param iface PPP network interface. + */ +#if defined(CONFIG_NET_L2_PPP_MGMT) +void ppp_mgmt_raise_carrier_on_event(struct net_if *iface); +#else +static inline void ppp_mgmt_raise_carrier_on_event(struct net_if *iface) +{ + ARG_UNUSED(iface); +} +#endif + +/** + * @brief Raise CARRIER_OFF event when PPP is disconnected. + * + * @param iface PPP network interface. + */ +#if defined(CONFIG_NET_L2_PPP_MGMT) +void ppp_mgmt_raise_carrier_off_event(struct net_if *iface); +#else +static inline void ppp_mgmt_raise_carrier_off_event(struct net_if *iface) +{ + ARG_UNUSED(iface); +} +#endif + +/** + * @brief Send PPP Echo-Request to peer. We expect to receive Echo-Reply back. + * + * @param idx PPP network interface index + * @param timeout Amount of time to wait Echo-Reply. + * + * @return 0 if Echo-Reply was received, < 0 if there is a timeout or network + * index is not a valid PPP network index. + */ +#if defined(CONFIG_NET_L2_PPP) +int net_ppp_ping(int idx, s32_t timeout); +#else +static inline int net_ppp_ping(int idx, s32_t timeout) +{ + ARG_UNUSED(idx); + ARG_UNUSED(timeout); + + return -ENOTSUP; +} +#endif + +#ifdef __cplusplus +} +#endif + +/** + * @} + */ + +#endif /* ZEPHYR_INCLUDE_NET_PPP_H_ */ diff --git a/scripts/sanitycheck b/scripts/sanitycheck index d68cf5a9cdd..3fac9451486 100755 --- a/scripts/sanitycheck +++ b/scripts/sanitycheck @@ -922,7 +922,7 @@ class SizeCalculator: "rodata", "devconfig", "net_l2", "vector", "sw_isr_table", "_settings_handlers_area", "_bt_channels_area", "_bt_br_channels_area", "_bt_services_area", - "vectors", "net_socket_register"] + "vectors", "net_socket_register", "net_ppp_proto"] def __init__(self, filename, extra_sections): """Constructor diff --git a/subsys/net/ip/Kconfig b/subsys/net/ip/Kconfig index 85fe84a7eec..53983b3a130 100644 --- a/subsys/net/ip/Kconfig +++ b/subsys/net/ip/Kconfig @@ -22,6 +22,7 @@ if !NET_RAW_MODE choice prompt "Qemu networking" + default NET_QEMU_PPP if NET_PPP default NET_QEMU_SLIP depends on QEMU_TARGET help @@ -34,6 +35,11 @@ config NET_QEMU_SLIP help Connect to host or to another Qemu via SLIP. +config NET_QEMU_PPP + bool "PPP" + help + Connect to host via PPP. + config NET_QEMU_ETHERNET bool "Ethernet" help diff --git a/subsys/net/ip/Kconfig.stats b/subsys/net/ip/Kconfig.stats index fc3508dcaed..f402bd54976 100644 --- a/subsys/net/ip/Kconfig.stats +++ b/subsys/net/ip/Kconfig.stats @@ -89,6 +89,13 @@ config NET_STATISTICS_MLD help Keep track of MLD related statistics +config NET_STATISTICS_PPP + bool "Point-to-point (PPP) statistics" + depends on NET_PPP + default y + help + Keep track of PPP related statistics + config NET_STATISTICS_ETHERNET bool "Ethernet statistics" depends on NET_L2_ETHERNET diff --git a/subsys/net/l2/CMakeLists.txt b/subsys/net/l2/CMakeLists.txt index 843c8b57245..2170441cf40 100644 --- a/subsys/net/l2/CMakeLists.txt +++ b/subsys/net/l2/CMakeLists.txt @@ -12,6 +12,10 @@ if(CONFIG_NET_L2_ETHERNET) add_subdirectory(ethernet) endif() +if(CONFIG_NET_L2_PPP) + add_subdirectory(ppp) +endif() + if(CONFIG_NET_L2_IEEE802154) add_subdirectory(ieee802154) endif() diff --git a/subsys/net/l2/Kconfig b/subsys/net/l2/Kconfig index fdecd43b679..1cc1a566206 100644 --- a/subsys/net/l2/Kconfig +++ b/subsys/net/l2/Kconfig @@ -71,6 +71,8 @@ config NET_L2_BT_SHELL source "subsys/net/l2/ethernet/Kconfig" +source "subsys/net/l2/ppp/Kconfig" + source "subsys/net/l2/ieee802154/Kconfig" source "subsys/net/l2/openthread/Kconfig" diff --git a/subsys/net/l2/ppp/CMakeLists.txt b/subsys/net/l2/ppp/CMakeLists.txt new file mode 100644 index 00000000000..604e7d63f0f --- /dev/null +++ b/subsys/net/l2/ppp/CMakeLists.txt @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: Apache-2.0 + +zephyr_library() +zephyr_library_include_directories(. ${ZEPHYR_BASE}/subsys/net/ip) +zephyr_library_compile_definitions_ifdef( + CONFIG_NEWLIB_LIBC __LINUX_ERRNO_EXTENSIONS__ + ) + +zephyr_library_include_directories(${ZEPHYR_BASE}/subsys/net/ip) + +zephyr_library_sources_ifdef(CONFIG_NET_L2_PPP + ppp_l2.c + fsm.c + lcp.c + auth.c + options.c + link.c + network.c + misc.c) + +zephyr_library_sources_ifdef(CONFIG_NET_STATISTICS_PPP ppp_stats.c) + +if(CONFIG_NET_IPV4) + zephyr_library_sources_ifdef(CONFIG_NET_L2_PPP ipcp.c) +endif() diff --git a/subsys/net/l2/ppp/Kconfig b/subsys/net/l2/ppp/Kconfig new file mode 100644 index 00000000000..d1094e03474 --- /dev/null +++ b/subsys/net/l2/ppp/Kconfig @@ -0,0 +1,71 @@ +# +# Copyright (c) 2019 Intel Corporation. +# +# SPDX-License-Identifier: Apache-2.0 +# + +menuconfig NET_L2_PPP + bool "Enable point-to-point (PPP) support [EXPERIMENTAL]" + help + Add support for PPP. + +if NET_L2_PPP + +config NET_L2_PPP_TIMEOUT + int "Maximum timeout in ms for Configure-Req" + default 3000 + range 1 4294967295 + help + How long to wait Configure-Req. + +config NET_L2_PPP_MAX_CONFIGURE_REQ_RETRANSMITS + int "Maximum number of Configure-Req retransmits" + default 10 + range 0 4294967295 + help + How many times to resend Configure-Req messages before deciding the + link is not working properly. + +config NET_L2_PPP_MAX_TERMINATE_REQ_RETRANSMITS + int "Maximum number of Terminate-Req retransmits" + default 2 + range 0 4294967295 + help + How many times to resend Terminate-Req messages before terminating + the link. + +config NET_L2_PPP_MAX_NACK_LOOPS + int "Maximum number of NACK loops accepted" + default 5 + range 0 4294967295 + help + How many times to accept NACK loops. + +config NET_L2_PPP_MAX_OPTIONS + int "Maximum number of options supported" + default 8 + range 0 16 + help + How many options we support. This is used to allocate space for + each option. The default (8) is a reasonably small value. + +config NET_L2_PPP_OPTION_MRU_NEG + bool "Negotiate MRU option if needed" + help + Try to negotiate with peer for MRU (MTU) for the link. + +module = NET_L2_PPP +module-dep = NET_LOG +module-str = Log level for ppp L2 layer +module-help = Enables ppp L2 to output debug messages. +source "subsys/net/Kconfig.template.log_config.net" + +config NET_L2_PPP_MGMT + bool "Enable ppp network management interface" + select NET_MGMT + select NET_MGMT_EVENT + help + Enable support net_mgmt ppp interface which can be used to + configure at run-time ppp drivers and L2 settings. + +endif # NET_L2_PPP diff --git a/subsys/net/l2/ppp/auth.c b/subsys/net/l2/ppp/auth.c new file mode 100644 index 00000000000..d3d40034b5f --- /dev/null +++ b/subsys/net/l2/ppp/auth.c @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2019 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_DECLARE(net_l2_ppp, CONFIG_NET_L2_PPP_LOG_LEVEL); + +#include +#include + +#include + +#include "net_private.h" + +#include "ppp_internal.h" + + diff --git a/subsys/net/l2/ppp/fsm.c b/subsys/net/l2/ppp/fsm.c new file mode 100644 index 00000000000..158b74c5efc --- /dev/null +++ b/subsys/net/l2/ppp/fsm.c @@ -0,0 +1,1096 @@ +/* + * Copyright (c) 2019 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_DECLARE(net_l2_ppp, CONFIG_NET_L2_PPP_LOG_LEVEL); + +#include +#include + +#include + +#include "net_private.h" + +#include "ppp_internal.h" + +#define BUF_ALLOC_TIMEOUT K_MSEC(100) + +/* This timeout is in milliseconds */ +#define FSM_TIMEOUT CONFIG_NET_L2_PPP_TIMEOUT + +#define MAX_NACK_LOOPS CONFIG_NET_L2_PPP_MAX_NACK_LOOPS + +static void fsm_send_configure_req(struct ppp_fsm *fsm, bool retransmit) +{ + struct net_buf *options = NULL; + + if (fsm->state != PPP_ACK_RECEIVED && + fsm->state != PPP_ACK_SENT && + fsm->state != PPP_REQUEST_SENT) { + /* If we are not negotiating options, then reset them */ + if (fsm->cb.config_info_reset) { + fsm->cb.config_info_reset(fsm); + } + + fsm->recv_nack_loops = 0; + fsm->nack_loops = 0; + } + + if (!retransmit) { + fsm->retransmits = MAX_CONFIGURE_REQ; + fsm->req_id = ++fsm->id; + } + + fsm->ack_received = false; + + if (fsm->cb.config_info_add) { + options = fsm->cb.config_info_add(fsm); + } + + NET_DBG("[%s/%p] Sending %s (%d) id %d to peer while in %s (%d)", + fsm->name, fsm, ppp_pkt_type2str(PPP_CONFIGURE_REQ), + PPP_CONFIGURE_REQ, fsm->req_id, ppp_state_str(fsm->state), + fsm->state); + + (void)ppp_send_pkt(fsm, NULL, PPP_CONFIGURE_REQ, fsm->req_id, + options, options ? net_buf_frags_len(options) : 0); + + fsm->retransmits--; + + (void)k_delayed_work_submit(&fsm->timer, FSM_TIMEOUT); +} + +static void ppp_fsm_timeout(struct k_work *work) +{ + struct ppp_fsm *fsm = CONTAINER_OF(work, struct ppp_fsm, timer); + + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + switch (fsm->state) { + case PPP_ACK_RECEIVED: + case PPP_ACK_SENT: + case PPP_REQUEST_SENT: + if (fsm->retransmits <= 0) { + NET_DBG("[%s/%p] %s retransmit limit %d reached", + fsm->name, fsm, + ppp_pkt_type2str(PPP_CONFIGURE_REQ), + fsm->retransmits); + + ppp_change_state(fsm, PPP_STOPPED); + + if (fsm->cb.finished) { + fsm->cb.finished(fsm); + } + } else { + if (fsm->cb.retransmit) { + fsm->cb.retransmit(fsm); + } + + fsm_send_configure_req(fsm, true); + + if (fsm->state == PPP_ACK_RECEIVED) { + ppp_change_state(fsm, PPP_REQUEST_SENT); + } + } + + break; + + case PPP_CLOSING: + case PPP_STOPPING: + if (fsm->retransmits <= 0) { + ppp_change_state(fsm, + fsm->state == PPP_CLOSING ? + PPP_CLOSED : PPP_STOPPED); + + if (fsm->cb.finished) { + fsm->cb.finished(fsm); + } + } else { + fsm->req_id = ++fsm->id; + + ppp_send_pkt(fsm, NULL, PPP_TERMINATE_REQ, fsm->req_id, + fsm->terminate_reason, + strlen(fsm->terminate_reason)); + + fsm->retransmits--; + + (void)k_delayed_work_submit(&fsm->timer, + FSM_TIMEOUT); + } + + break; + + default: + NET_DBG("[%s/%p] %s state %s (%d)", fsm->name, fsm, "Invalid", + ppp_state_str(fsm->state), fsm->state); + break; + } +} + +static void ppp_pkt_send(struct k_work *work) +{ + struct ppp_fsm *fsm = CONTAINER_OF(work, struct ppp_fsm, + sender.work); + int ret; + + ret = net_send_data(fsm->sender.pkt); + if (ret < 0) { + net_pkt_unref(fsm->sender.pkt); + } +} + + +void ppp_fsm_init(struct ppp_fsm *fsm, u16_t protocol) +{ + fsm->protocol = protocol; + fsm->state = PPP_INITIAL; + fsm->flags = 0U; + + k_delayed_work_init(&fsm->timer, ppp_fsm_timeout); + k_delayed_work_init(&fsm->sender.work, ppp_pkt_send); +} + +static void terminate(struct ppp_fsm *fsm, enum ppp_state next_state) +{ + if (fsm->state != PPP_OPENED) { + k_delayed_work_cancel(&fsm->timer); + } else if (fsm->cb.down) { + fsm->cb.down(fsm); + } + + fsm->retransmits = MAX_CONFIGURE_REQ; + fsm->req_id = ++fsm->id; + + (void)ppp_send_pkt(fsm, NULL, PPP_TERMINATE_REQ, fsm->req_id, + fsm->terminate_reason, + strlen(fsm->terminate_reason)); + + if (fsm->retransmits == 0) { + ppp_change_state(fsm, next_state); + + if (fsm->cb.finished) { + fsm->cb.finished(fsm); + } + + return; + } + + (void)k_delayed_work_submit(&fsm->timer, FSM_TIMEOUT); + + fsm->retransmits--; + + ppp_change_state(fsm, next_state); +} + +void ppp_fsm_close(struct ppp_fsm *fsm, const u8_t *reason) +{ + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + switch (fsm->state) { + case PPP_ACK_RECEIVED: + case PPP_ACK_SENT: + case PPP_OPENED: + case PPP_REQUEST_SENT: + if (reason) { + int len = strlen(reason); + + len = MIN(sizeof(fsm->terminate_reason) - 1, len); + strncpy(fsm->terminate_reason, reason, len); + } + + terminate(fsm, PPP_CLOSING); + break; + + case PPP_INITIAL: + case PPP_STARTING: + ppp_change_state(fsm, PPP_INITIAL); + break; + + case PPP_STOPPED: + ppp_change_state(fsm, PPP_CLOSED); + break; + + case PPP_STOPPING: + ppp_change_state(fsm, PPP_CLOSING); + break; + + default: + NET_DBG("[%s/%p] %s state %s (%d)", fsm->name, fsm, "Invalid", + ppp_state_str(fsm->state), fsm->state); + break; + } +} + +void ppp_fsm_lower_down(struct ppp_fsm *fsm) +{ + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + switch (fsm->state) { + case PPP_ACK_RECEIVED: + case PPP_ACK_SENT: + case PPP_REQUEST_SENT: + case PPP_STOPPING: + ppp_change_state(fsm, PPP_STARTING); + k_delayed_work_cancel(&fsm->timer); + break; + + case PPP_CLOSED: + ppp_change_state(fsm, PPP_INITIAL); + break; + + case PPP_CLOSING: + ppp_change_state(fsm, PPP_INITIAL); + k_delayed_work_cancel(&fsm->timer); + break; + + case PPP_OPENED: + ppp_change_state(fsm, PPP_STARTING); + if (fsm->cb.down) { + fsm->cb.down(fsm); + } + + break; + + case PPP_STOPPED: + ppp_change_state(fsm, PPP_STARTING); + if (fsm->cb.starting) { + fsm->cb.starting(fsm); + } + + break; + + default: + NET_DBG("[%s/%p] %s state %s (%d)", fsm->name, fsm, "Invalid", + ppp_state_str(fsm->state), fsm->state); + break; + } +} + +void ppp_fsm_lower_up(struct ppp_fsm *fsm) +{ + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + switch (fsm->state) { + case PPP_CLOSED: + break; + + case PPP_INITIAL: + ppp_change_state(fsm, PPP_CLOSED); + break; + + case PPP_STARTING: + fsm_send_configure_req(fsm, false); + ppp_change_state(fsm, PPP_REQUEST_SENT); + + break; + + default: + NET_DBG("[%s/%p] %s state %s (%d)", fsm->name, fsm, "Invalid", + ppp_state_str(fsm->state), fsm->state); + break; + } +} + +void ppp_fsm_open(struct ppp_fsm *fsm) +{ + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + switch (fsm->state) { + case PPP_CLOSED: + fsm_send_configure_req(fsm, false); + ppp_change_state(fsm, PPP_REQUEST_SENT); + break; + + case PPP_CLOSING: + ppp_change_state(fsm, PPP_STOPPING); + if (fsm->flags & FSM_RESTART) { + ppp_fsm_lower_down(fsm); + ppp_fsm_lower_up(fsm); + } + + break; + + case PPP_INITIAL: + ppp_change_state(fsm, PPP_STARTING); + if (fsm->cb.starting) { + fsm->cb.starting(fsm); + } + + break; + + case PPP_OPENED: + case PPP_STOPPED: + if (fsm->flags & FSM_RESTART) { + ppp_fsm_lower_down(fsm); + ppp_fsm_lower_up(fsm); + } + + break; + + default: + NET_DBG("[%s/%p] %s state %s (%d)", fsm->name, fsm, "Invalid", + ppp_state_str(fsm->state), fsm->state); + break; + } +} + +int ppp_send_pkt(struct ppp_fsm *fsm, struct net_if *iface, + enum ppp_packet_type type, u8_t id, + void *data, u32_t data_len) +{ + /* Note that the data parameter is the received PPP packet if + * we want to send PROTOCOL or CODE reject packet. + */ + struct net_pkt *req_pkt = data; + struct ppp_packet ppp; + struct net_pkt *pkt; + size_t len = 0; + int ret; + + if (!iface) { + struct ppp_context *ctx; + + if (fsm->protocol == PPP_LCP) { + ctx = CONTAINER_OF(fsm, struct ppp_context, lcp.fsm); +#if defined(CONFIG_NET_IPV4) + } else if (fsm->protocol == PPP_IPCP) { + ctx = CONTAINER_OF(fsm, struct ppp_context, ipcp.fsm); +#endif + } else { + return -ENOENT; + } + + NET_ASSERT(ctx->iface); + + iface = ctx->iface; + } + + switch (type) { + case PPP_PROTOCOL_REJ: + case PPP_CODE_REJ: + len = net_pkt_get_len(req_pkt); + len = MIN(len, PPP_MRU); + break; + + case PPP_CONFIGURE_ACK: + case PPP_CONFIGURE_NACK: + case PPP_CONFIGURE_REJ: + case PPP_CONFIGURE_REQ: + /* 2 + 1 + 1 (configure-[req|ack|nack|rej]) + + * data_len (options) + */ + len = sizeof(u16_t) + sizeof(u8_t) + sizeof(u8_t) + data_len; + break; + + case PPP_DISCARD_REQ: + break; + + case PPP_ECHO_REQ: + len = sizeof(u16_t) + sizeof(u8_t) + sizeof(u8_t) + + sizeof(u32_t) + data_len; + break; + + case PPP_ECHO_REPLY: + break; + + case PPP_TERMINATE_REQ: + case PPP_TERMINATE_ACK: + break; + + default: + break; + } + + if (len == 0) { + return -EINVAL; + } + + ppp.code = type; + ppp.id = id; + ppp.length = htons(len); + + pkt = net_pkt_alloc_with_buffer(iface, + sizeof(u16_t) + len, + AF_UNSPEC, 0, BUF_ALLOC_TIMEOUT); + if (!pkt) { + goto out_of_mem; + } + + ret = net_pkt_write_be16(pkt, fsm->protocol); + if (ret < 0) { + goto out_of_mem; + } + + ret = net_pkt_write(pkt, &ppp, sizeof(ppp)); + if (ret < 0) { + goto out_of_mem; + } + + if (type == PPP_CODE_REJ) { + if (!req_pkt) { + goto out_of_mem; + } + + net_pkt_cursor_init(req_pkt); + net_pkt_copy(pkt, req_pkt, len); + + } else if (type == PPP_ECHO_REQ) { + struct ppp_context *ctx = CONTAINER_OF(fsm, struct ppp_context, + lcp.fsm); + if (ctx->lcp.magic) { + ctx->lcp.magic = sys_rand32_get(); + } + + ret = net_pkt_write_be32(pkt, ctx->lcp.magic); + if (ret < 0) { + goto out_of_mem; + } + + data_len = MIN(data_len, PPP_MRU); + if (data_len > 0) { + if (data_len == sizeof(u32_t)) { + ret = net_pkt_write_be32(pkt, + POINTER_TO_UINT(data)); + } else { + ret = net_pkt_write(pkt, data, data_len); + } + + if (ret < 0) { + goto out_of_mem; + } + } + + } else if (type == PPP_CONFIGURE_ACK || type == PPP_CONFIGURE_REQ || + type == PPP_CONFIGURE_REJ || type == PPP_CONFIGURE_NACK) { + /* add options */ + if (data) { + net_buf_frag_add(pkt->buffer, data); + } + } + + NET_DBG("[%s/%p] Sending %zd bytes pkt %p (options len %d)", fsm->name, + fsm, net_pkt_get_len(pkt), pkt, data_len); + + net_pkt_set_ppp(pkt, true); + + /* Do not call net_send_data() directly in order to make this thread + * run before the sending happens. If we call the net_send_data() from + * this thread, then in fast link (like when running inside QEMU) the + * reply might arrive before we have returned from this function. That + * is bad because the fsm would be in wrong state and the received pkt + * is dropped. + */ + fsm->sender.pkt = pkt; + + /* FIXME: qemu_x86 crashes if timeout is 0 when running ppp + * driver unit test. As a workaround set the timeout to 1 msec + * in that case. + */ + (void)k_delayed_work_submit(&fsm->sender.work, + IS_ENABLED(CONFIG_NET_TEST) ? K_MSEC(1) : 0); + + return 0; + +out_of_mem: + if (pkt) { + net_pkt_unref(pkt); + } + + return -ENOMEM; +} + +static enum net_verdict fsm_recv_configure_req(struct ppp_fsm *fsm, + u8_t id, + struct net_pkt *pkt, + u16_t remaining_len) +{ + struct net_buf *buf = NULL; + int len = 0; + enum ppp_packet_type code; + + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + switch (fsm->state) { + case PPP_ACK_SENT: + case PPP_ACK_RECEIVED: + break; + + case PPP_CLOSED: + (void)ppp_send_pkt(fsm, net_pkt_iface(pkt), PPP_TERMINATE_ACK, + id, NULL, 0); + return NET_OK; + + case PPP_CLOSING: + case PPP_STOPPING: + return NET_OK; + + case PPP_OPENED: + if (fsm->cb.down) { + fsm->cb.down(fsm); + } + + fsm_send_configure_req(fsm, false); + ppp_change_state(fsm, PPP_REQUEST_SENT); + break; + + case PPP_REQUEST_SENT: + /* Received request while waiting ACK */ + break; + + case PPP_STOPPED: + fsm_send_configure_req(fsm, false); + ppp_change_state(fsm, PPP_REQUEST_SENT); + break; + + default: + NET_DBG("[%s/%p] %s state %s (%d)", fsm->name, fsm, "Invalid", + ppp_state_str(fsm->state), fsm->state); + return NET_DROP; + } + + if (fsm->cb.config_info_req) { + int ret; + + ret = fsm->cb.config_info_req(fsm, pkt, remaining_len, &buf); + if (ret < 0) { + return NET_DROP; + } + + if (fsm->nack_loops >= MAX_NACK_LOOPS && + ret == PPP_CONFIGURE_NACK) { + ret = PPP_CONFIGURE_REJ; + } + + code = ret; + len = net_buf_frags_len(buf); + + } else if (remaining_len) { + /* If there are any options at this point, then reject. + * TODO: construct the NACKed options buf + */ + code = PPP_CONFIGURE_REJ; + } else { + code = PPP_CONFIGURE_ACK; + } + + NET_DBG("[%s/%p] Sending %s (%d) id %d to peer while in %s (%d)", + fsm->name, fsm, ppp_pkt_type2str(code), code, id, + ppp_state_str(fsm->state), fsm->state); + + (void)ppp_send_pkt(fsm, NULL, code, id, buf, len); + + if (code == PPP_CONFIGURE_ACK) { + if (fsm->state == PPP_ACK_RECEIVED) { + k_delayed_work_cancel(&fsm->timer); + + ppp_change_state(fsm, PPP_OPENED); + + if (fsm->cb.up) { + fsm->cb.up(fsm); + } + } else { + ppp_change_state(fsm, PPP_ACK_SENT); + } + + fsm->nack_loops = 0; + } else { + if (fsm->state != PPP_ACK_RECEIVED) { + ppp_change_state(fsm, PPP_REQUEST_SENT); + } + + if (code == PPP_CONFIGURE_NACK) { + fsm->nack_loops++; + } + } + + return NET_OK; +} + +static enum net_verdict fsm_recv_configure_ack(struct ppp_fsm *fsm, u8_t id, + struct net_pkt *pkt, + u16_t remaining_len) +{ + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + if (id != fsm->req_id || fsm->ack_received) { + return NET_DROP; + } + + if (fsm->cb.config_info_ack) { + if (fsm->cb.config_info_ack(fsm, pkt, remaining_len) < 0) { + NET_DBG("[%s/%p] %s %s received", fsm->name, fsm, + "Invalid", + ppp_pkt_type2str(PPP_CONFIGURE_ACK)); + return NET_DROP; + } + } + + fsm->ack_received = true; + fsm->recv_nack_loops = 0; + + switch (fsm->state) { + case PPP_ACK_RECEIVED: + k_delayed_work_cancel(&fsm->timer); + fsm_send_configure_req(fsm, false); + ppp_change_state(fsm, PPP_REQUEST_SENT); + break; + + case PPP_ACK_SENT: + k_delayed_work_cancel(&fsm->timer); + ppp_change_state(fsm, PPP_OPENED); + fsm->retransmits = MAX_CONFIGURE_REQ; + if (fsm->cb.up) { + fsm->cb.up(fsm); + } + + break; + + case PPP_CLOSED: + case PPP_STOPPED: + (void)ppp_send_pkt(fsm, net_pkt_iface(pkt), PPP_TERMINATE_ACK, + id, NULL, 0); + break; + + case PPP_OPENED: + if (fsm->cb.down) { + fsm->cb.down(fsm); + } + + fsm_send_configure_req(fsm, false); + ppp_change_state(fsm, PPP_REQUEST_SENT); + break; + + case PPP_REQUEST_SENT: + ppp_change_state(fsm, PPP_ACK_RECEIVED); + fsm->retransmits = MAX_CONFIGURE_REQ; + break; + + default: + NET_DBG("[%s/%p] %s state %s (%d)", fsm->name, fsm, "Invalid", + ppp_state_str(fsm->state), fsm->state); + return NET_DROP; + } + + return NET_OK; +} + +static enum net_verdict fsm_recv_configure_nack_rej(struct ppp_fsm *fsm, + enum ppp_packet_type code, + u8_t id, + struct net_pkt *pkt, + u16_t length) +{ + bool ret = false; + + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + if (id != fsm->req_id || fsm->ack_received) { + return NET_DROP; + } + + if (code == PPP_CONFIGURE_NACK) { + bool rejected = false; + + fsm->recv_nack_loops++; + + if (fsm->recv_nack_loops >= MAX_NACK_LOOPS) { + rejected = true; + } + + if (fsm->cb.config_info_nack) { + int err; + + err = fsm->cb.config_info_nack(fsm, pkt, length, + rejected); + if (err < 0) { + NET_DBG("[%s/%p] %s failed (%d)", + fsm->name, fsm, "Nack", err); + } else { + ret = true; + } + } + + if (!ret) { + NET_DBG("[%s/%p] %s %s (id %d)", fsm->name, fsm, + "Invalid", ppp_pkt_type2str(code), id); + return NET_DROP; + } + } else { + fsm->recv_nack_loops = 0; + + if (fsm->cb.config_info_rej) { + int err; + + err = fsm->cb.config_info_rej(fsm, pkt, length); + if (err < 0) { + NET_DBG("[%s/%p] %s failed (%d)", + fsm->name, fsm, "Reject", err); + } else { + ret = true; + } + } + + if (!ret) { + NET_DBG("[%s/%p] %s %s (id %d)", fsm->name, fsm, + "Invalid", ppp_pkt_type2str(code), id); + return NET_DROP; + } + } + + fsm->ack_received = true; + + switch (fsm->state) { + case PPP_ACK_RECEIVED: + k_delayed_work_cancel(&fsm->timer); + fsm_send_configure_req(fsm, false); + ppp_change_state(fsm, PPP_REQUEST_SENT); + break; + + case PPP_ACK_SENT: + case PPP_REQUEST_SENT: + k_delayed_work_cancel(&fsm->timer); + fsm_send_configure_req(fsm, false); + break; + + case PPP_CLOSED: + case PPP_STOPPED: + (void)ppp_send_pkt(fsm, net_pkt_iface(pkt), PPP_TERMINATE_ACK, + id, NULL, 0); + break; + + case PPP_OPENED: + if (fsm->cb.down) { + fsm->cb.down(fsm); + } + + fsm_send_configure_req(fsm, false); + ppp_change_state(fsm, PPP_REQUEST_SENT); + break; + + default: + NET_DBG("[%s/%p] %s state %s (%d)", fsm->name, fsm, "Invalid", + ppp_state_str(fsm->state), fsm->state); + return NET_DROP; + } + + return NET_OK; +} + +static enum net_verdict fsm_recv_terminate_req(struct ppp_fsm *fsm, u8_t id, + struct net_pkt *pkt, + u16_t length) +{ + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + switch (fsm->state) { + case PPP_ACK_RECEIVED: + case PPP_ACK_SENT: + ppp_change_state(fsm, PPP_REQUEST_SENT); + break; + + case PPP_OPENED: + if (length > 0) { + net_pkt_read(pkt, fsm->terminate_reason, + MIN(length, + sizeof(fsm->terminate_reason) - 1)); + + NET_DBG("[%s/%p] %s (%s)", + fsm->name, fsm, "Terminated by peer", + log_strdup(fsm->terminate_reason)); + } else { + NET_DBG("[%s/%p] Terminated by peer", + fsm->name, fsm); + } + + fsm->retransmits = 0; + ppp_change_state(fsm, PPP_STOPPING); + + if (fsm->cb.down) { + fsm->cb.down(fsm); + } + + (void)k_delayed_work_submit(&fsm->timer, FSM_TIMEOUT); + break; + + default: + NET_DBG("[%s/%p] %s state %s (%d)", fsm->name, fsm, "Invalid", + ppp_state_str(fsm->state), fsm->state); + return NET_DROP; + } + + (void)ppp_send_pkt(fsm, net_pkt_iface(pkt), PPP_TERMINATE_ACK, id, + NULL, 0); + + return NET_OK; +} + +static enum net_verdict fsm_recv_terminate_ack(struct ppp_fsm *fsm, u8_t id, + struct net_pkt *pkt, + u16_t length) +{ + enum ppp_state new_state; + + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + switch (fsm->state) { + case PPP_CLOSING: + new_state = PPP_CLOSED; + goto stopped; + + case PPP_OPENED: + if (fsm->cb.down) { + fsm->cb.down(fsm); + } + + fsm_send_configure_req(fsm, false); + ppp_change_state(fsm, PPP_REQUEST_SENT); + break; + + case PPP_STOPPING: + new_state = PPP_STOPPED; + goto stopped; + + case PPP_ACK_RECEIVED: + ppp_change_state(fsm, PPP_REQUEST_SENT); + break; + + default: + NET_DBG("[%s/%p] %s state %s (%d)", fsm->name, fsm, "Invalid", + ppp_state_str(fsm->state), fsm->state); + return NET_DROP; + } + + return NET_OK; + +stopped: + k_delayed_work_cancel(&fsm->timer); + ppp_change_state(fsm, new_state); + + if (fsm->cb.finished) { + fsm->cb.finished(fsm); + } + + return NET_OK; +} + +static enum net_verdict fsm_recv_code_rej(struct ppp_fsm *fsm, + struct net_pkt *pkt) +{ + u8_t code, id; + int ret; + + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + ret = net_pkt_read_u8(pkt, &code); + if (ret < 0) { + return NET_DROP; + } + + ret = net_pkt_read_u8(pkt, &id); + if (ret < 0) { + return NET_DROP; + } + + NET_DBG("[%s/%p] Received Code-Rej code %d id %d", fsm->name, fsm, + code, id); + + if (fsm->state == PPP_ACK_RECEIVED) { + ppp_change_state(fsm, PPP_REQUEST_SENT); + } + + return NET_OK; +} + +void ppp_fsm_proto_reject(struct ppp_fsm *fsm) +{ + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + switch (fsm->state) { + case PPP_ACK_RECEIVED: + case PPP_ACK_SENT: + case PPP_STOPPING: + case PPP_REQUEST_SENT: + k_delayed_work_cancel(&fsm->timer); + ppp_change_state(fsm, PPP_STOPPED); + if (fsm->cb.finished) { + fsm->cb.finished(fsm); + } + + break; + + case PPP_CLOSED: + ppp_change_state(fsm, PPP_CLOSED); + if (fsm->cb.finished) { + fsm->cb.finished(fsm); + } + + break; + + case PPP_CLOSING: + k_delayed_work_cancel(&fsm->timer); + ppp_change_state(fsm, PPP_CLOSED); + if (fsm->cb.finished) { + fsm->cb.finished(fsm); + } + + break; + + case PPP_OPENED: + terminate(fsm, PPP_STOPPING); + break; + + case PPP_STOPPED: + ppp_change_state(fsm, PPP_STOPPED); + if (fsm->cb.finished) { + fsm->cb.finished(fsm); + } + + break; + + default: + NET_DBG("[%s/%p] %s state %s (%d)", fsm->name, fsm, "Invalid", + ppp_state_str(fsm->state), fsm->state); + break; + } +} + +enum net_verdict ppp_fsm_input(struct ppp_fsm *fsm, u16_t proto, + struct net_pkt *pkt) +{ + u8_t code, id; + u16_t length; + int ret; + + ret = net_pkt_read_u8(pkt, &code); + if (ret < 0) { + NET_DBG("[%s/%p] Cannot read %s (pkt len %zd)", + fsm->name, fsm, "code", net_pkt_get_len(pkt)); + return NET_DROP; + } + + ret = net_pkt_read_u8(pkt, &id); + if (ret < 0) { + NET_DBG("[%s/%p] Cannot read %s (pkt len %zd)", + fsm->name, fsm, "id", net_pkt_get_len(pkt)); + return NET_DROP; + } + + ret = net_pkt_read_be16(pkt, &length); + if (ret < 0) { + NET_DBG("[%s/%p] Cannot read %s (pkt len %zd)", + fsm->name, fsm, "length", net_pkt_get_len(pkt)); + return NET_DROP; + } + + if (length > PPP_MRU) { + NET_DBG("[%s/%p] Too long msg %d", fsm->name, fsm, length); + return NET_DROP; + } + + if (fsm->state == PPP_INITIAL || fsm->state == PPP_STARTING) { + NET_DBG("[%s/%p] Received %s packet in wrong state %s (%d)", + fsm->name, fsm, ppp_proto2str(proto), + ppp_state_str(fsm->state), fsm->state); + return NET_DROP; + } + + /* Length will only contain payload/data length */ + length -= sizeof(code) + sizeof(id) + sizeof(length); + + NET_DBG("[%s/%p] %s %s (%d) id %d payload len %d", fsm->name, fsm, + ppp_proto2str(proto), ppp_pkt_type2str(code), code, id, + length); + + switch (code) { + case PPP_CODE_REJ: + return fsm_recv_code_rej(fsm, pkt); + + case PPP_CONFIGURE_ACK: + return fsm_recv_configure_ack(fsm, id, pkt, length); + + case PPP_CONFIGURE_NACK: + return fsm_recv_configure_nack_rej(fsm, code, id, pkt, length); + + case PPP_CONFIGURE_REQ: + return fsm_recv_configure_req(fsm, id, pkt, length); + + case PPP_CONFIGURE_REJ: + return fsm_recv_configure_nack_rej(fsm, code, id, pkt, length); + + case PPP_TERMINATE_ACK: + return fsm_recv_terminate_ack(fsm, id, pkt, length); + + case PPP_TERMINATE_REQ: + return fsm_recv_terminate_req(fsm, id, pkt, length); + + default: + if (fsm->cb.proto_extension) { + enum net_verdict verdict; + + verdict = fsm->cb.proto_extension(fsm, code, id, pkt); + if (verdict != NET_DROP) { + return verdict; + } + } + + (void)ppp_send_pkt(fsm, net_pkt_iface(pkt), PPP_CODE_REJ, + id, pkt, 0); + } + + return NET_DROP; +} + +enum net_verdict ppp_fsm_recv_protocol_rej(struct ppp_fsm *fsm, + u8_t id, + struct net_pkt *pkt) +{ + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + return NET_DROP; +} + +enum net_verdict ppp_fsm_recv_echo_req(struct ppp_fsm *fsm, + u8_t id, + struct net_pkt *pkt) +{ + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + return NET_DROP; +} + +enum net_verdict ppp_fsm_recv_echo_reply(struct ppp_fsm *fsm, + u8_t id, + struct net_pkt *pkt) +{ + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + return NET_DROP; +} + +enum net_verdict ppp_fsm_recv_discard_req(struct ppp_fsm *fsm, + u8_t id, + struct net_pkt *pkt) +{ + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); + + return NET_DROP; +} diff --git a/subsys/net/l2/ppp/ipcp.c b/subsys/net/l2/ppp/ipcp.c new file mode 100644 index 00000000000..dcbaed0d10d --- /dev/null +++ b/subsys/net/l2/ppp/ipcp.c @@ -0,0 +1,453 @@ +/* + * Copyright (c) 2019 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_DECLARE(net_l2_ppp, CONFIG_NET_L2_PPP_LOG_LEVEL); + +#include +#include + +#include + +#include "net_private.h" + +#include "ppp_internal.h" + +static enum net_verdict ipcp_handle(struct ppp_context *ctx, + struct net_if *iface, + struct net_pkt *pkt) +{ + return ppp_fsm_input(&ctx->ipcp.fsm, PPP_IPCP, pkt); +} + +static bool append_to_buf(struct net_buf *buf, u8_t *data, u8_t data_len) +{ + if (data_len > net_buf_tailroom(buf)) { + return false; + } + + /* FIXME: use net_pkt api so that we can handle a case where data might + * split to two net_buf's + */ + net_buf_add_mem(buf, data, data_len); + + return true; +} + +/* Length is (6): code + id + IPv4 address length */ +#define IP_ADDRESS_OPTION_LEN (1 + 1 + 4) + +static struct net_buf *ipcp_config_info_add(struct ppp_fsm *fsm) +{ + struct ppp_context *ctx = CONTAINER_OF(fsm, struct ppp_context, + ipcp.fsm); + + /* Currently we support only one option (IP address) */ + u8_t option[IP_ADDRESS_OPTION_LEN]; + const struct in_addr *my_addr; + struct net_buf *buf; + bool added; + + my_addr = net_if_ipv4_select_src_addr(ctx->iface, + &ctx->ipcp.peer_options.address); + if (!my_addr) { + my_addr = net_ipv4_unspecified_address(); + } + + option[0] = IPCP_OPTION_IP_ADDRESS; + option[1] = IP_ADDRESS_OPTION_LEN; + memcpy(&option[2], &my_addr->s_addr, sizeof(my_addr->s_addr)); + + buf = ppp_get_net_buf(NULL, 0); + if (!buf) { + goto out_of_mem; + } + + added = append_to_buf(buf, option, sizeof(option)); + if (!added) { + goto out_of_mem; + } + + return buf; + +out_of_mem: + if (buf) { + net_buf_unref(buf); + } + + return NULL; +} + +static int ipcp_config_info_req(struct ppp_fsm *fsm, + struct net_pkt *pkt, + u16_t length, + struct net_buf **ret_buf) +{ + int nack_idx = 0, count_rej = 0, address_option_idx = -1; + struct net_buf *buf = NULL; + struct ppp_option_pkt options[MAX_IPCP_OPTIONS]; + struct ppp_option_pkt nack_options[MAX_IPCP_OPTIONS]; + enum ppp_packet_type code; + enum net_verdict verdict; + int i; + + memset(options, 0, sizeof(options)); + memset(nack_options, 0, sizeof(nack_options)); + + verdict = ppp_parse_options(fsm, pkt, length, options, + ARRAY_SIZE(options)); + if (verdict != NET_OK) { + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(options); i++) { + if (options[i].type.ipcp != IPCP_OPTION_RESERVED) { + NET_DBG("[%s/%p] %s option %s (%d) len %d", + fsm->name, fsm, "Check", + ppp_option2str(PPP_IPCP, options[i].type.ipcp), + options[i].type.ipcp, options[i].len); + } + + switch (options[i].type.ipcp) { + case IPCP_OPTION_RESERVED: + continue; + + case IPCP_OPTION_IP_ADDRESSES: + count_rej++; + goto ignore_option; + + case IPCP_OPTION_IP_COMP_PROTO: + count_rej++; + goto ignore_option; + + case IPCP_OPTION_IP_ADDRESS: + /* Currently we only accept one option (IP address) */ + address_option_idx = i; + break; + + default: + ignore_option: + nack_options[nack_idx].type.ipcp = + options[i].type.ipcp; + nack_options[nack_idx].len = options[i].len; + + if (options[i].len > 2) { + memcpy(&nack_options[nack_idx].value, + &options[i].value, + sizeof(nack_options[nack_idx].value)); + } + + nack_idx++; + break; + } + } + + if (nack_idx > 0) { + struct net_buf *nack_buf; + + if (count_rej > 0) { + code = PPP_CONFIGURE_REJ; + } else { + code = PPP_CONFIGURE_NACK; + } + + /* Create net_buf containing options that are not accepted */ + for (i = 0; i < MIN(nack_idx, ARRAY_SIZE(nack_options)); i++) { + bool added; + + nack_buf = ppp_get_net_buf(buf, nack_options[i].len); + if (!nack_buf) { + goto out_of_mem; + } + + if (!buf) { + buf = nack_buf; + } + + added = append_to_buf(nack_buf, + &nack_options[i].type.ipcp, 1); + if (!added) { + goto out_of_mem; + } + + added = append_to_buf(nack_buf, &nack_options[i].len, + 1); + if (!added) { + goto out_of_mem; + } + + /* If there is some data, copy it to result buf */ + if (nack_options[i].value.pos) { + added = append_to_buf(nack_buf, + nack_options[i].value.pos, + nack_options[i].len - 1 - 1); + if (!added) { + goto out_of_mem; + } + } + + continue; + + out_of_mem: + if (nack_buf) { + net_buf_unref(nack_buf); + } + + goto bail_out; + } + } else { + struct ppp_context *ctx; + struct in_addr addr; + int ret; + + ctx = CONTAINER_OF(fsm, struct ppp_context, ipcp.fsm); + + if (address_option_idx < 0) { + /* Address option was not present */ + return -EINVAL; + } + + code = PPP_CONFIGURE_ACK; + + net_pkt_cursor_restore(pkt, + &options[address_option_idx].value); + + ret = net_pkt_read(pkt, (u32_t *)&addr, sizeof(addr)); + if (ret < 0) { + /* Should not happen, is the pkt corrupt? */ + return -EMSGSIZE; + } + + memcpy(&ctx->ipcp.peer_options.address, &addr, sizeof(addr)); + + if (CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG) { + char dst[INET_ADDRSTRLEN]; + char *addr_str; + + addr_str = net_addr_ntop(AF_INET, &addr, dst, + sizeof(dst)); + + NET_DBG("[%s/%p] Received %saddress %s", + fsm->name, fsm, "peer ", log_strdup(addr_str)); + } + + if (addr.s_addr) { + bool added; + u8_t val; + + /* The address is the remote address, we then need + * to figure out what our address should be. + * + * TODO: + * - check that the IP address can be accepted + */ + + buf = ppp_get_net_buf(NULL, IP_ADDRESS_OPTION_LEN); + if (!buf) { + goto bail_out; + } + + val = IPCP_OPTION_IP_ADDRESS; + added = append_to_buf(buf, &val, sizeof(val)); + if (!added) { + goto bail_out; + } + + val = IP_ADDRESS_OPTION_LEN; + added = append_to_buf(buf, &val, sizeof(val)); + if (!added) { + goto bail_out; + } + + added = append_to_buf(buf, (u8_t *)&addr.s_addr, + sizeof(addr.s_addr)); + if (!added) { + goto bail_out; + } + } + } + + if (buf) { + *ret_buf = buf; + } + + return code; + +bail_out: + if (buf) { + net_buf_unref(buf); + } + + return -ENOMEM; +} + +static int ipcp_config_info_rej(struct ppp_fsm *fsm, + struct net_pkt *pkt, + u16_t length) +{ + struct ppp_option_pkt nack_options[MAX_IPCP_OPTIONS]; + enum net_verdict verdict; + int i, ret, address_option_idx = -1; + struct in_addr addr; + + memset(nack_options, 0, sizeof(nack_options)); + + verdict = ppp_parse_options(fsm, pkt, length, nack_options, + ARRAY_SIZE(nack_options)); + if (verdict != NET_OK) { + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(nack_options); i++) { + if (nack_options[i].type.ipcp != IPCP_OPTION_RESERVED) { + NET_DBG("[%s/%p] %s option %s (%d) len %d", + fsm->name, fsm, "Check", + ppp_option2str(PPP_IPCP, + nack_options[i].type.ipcp), + nack_options[i].type.ipcp, + nack_options[i].len); + } + + switch (nack_options[i].type.ipcp) { + case IPCP_OPTION_RESERVED: + continue; + + case IPCP_OPTION_IP_ADDRESSES: + continue; + + case IPCP_OPTION_IP_COMP_PROTO: + continue; + + case IPCP_OPTION_IP_ADDRESS: + address_option_idx = i; + break; + + default: + continue; + } + } + + if (address_option_idx < 0) { + return -EINVAL; + } + + net_pkt_cursor_restore(pkt, &nack_options[address_option_idx].value); + + ret = net_pkt_read(pkt, (u32_t *)&addr, sizeof(addr)); + if (ret < 0) { + /* Should not happen, is the pkt corrupt? */ + return -EMSGSIZE; + } + + if (CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG) { + char dst[INET_ADDRSTRLEN]; + char *addr_str; + + addr_str = net_addr_ntop(AF_INET, &addr, dst, + sizeof(dst)); + + NET_DBG("[%s/%p] Received %saddress %s", + fsm->name, fsm, "", log_strdup(addr_str)); + } + + return 0; +} + +static void ipcp_lower_down(struct ppp_context *ctx) +{ + ppp_fsm_lower_down(&ctx->ipcp.fsm); +} + +static void ipcp_lower_up(struct ppp_context *ctx) +{ + ppp_fsm_lower_up(&ctx->ipcp.fsm); +} + +static void ipcp_open(struct ppp_context *ctx) +{ + ppp_fsm_open(&ctx->ipcp.fsm); +} + +static void ipcp_close(struct ppp_context *ctx, const u8_t *reason) +{ + ppp_fsm_close(&ctx->ipcp.fsm, reason); +} + +static void ipcp_up(struct ppp_fsm *fsm) +{ + struct ppp_context *ctx = CONTAINER_OF(fsm, struct ppp_context, + ipcp.fsm); + + if (ctx->is_ipcp_up) { + return; + } + + ppp_network_up(ctx, PPP_IP); + + ctx->is_network_up = true; + ctx->is_ipcp_up = true; + + NET_DBG("[%s/%p] Current state %s (%d)", fsm->name, fsm, + ppp_state_str(fsm->state), fsm->state); +} + +static void ipcp_down(struct ppp_fsm *fsm) +{ + struct ppp_context *ctx = CONTAINER_OF(fsm, struct ppp_context, + ipcp.fsm); + + if (!ctx->is_network_up) { + return; + } + + ctx->is_network_up = false; + + ppp_network_down(ctx, PPP_IP); +} + +static void ipcp_finished(struct ppp_fsm *fsm) +{ + struct ppp_context *ctx = CONTAINER_OF(fsm, struct ppp_context, + ipcp.fsm); + + if (!ctx->is_ipcp_open) { + return; + } + + ctx->is_ipcp_open = false; + + ppp_network_done(ctx, PPP_IP); +} + +static void ipcp_proto_reject(struct ppp_fsm *fsm) +{ + ppp_fsm_lower_down(fsm); +} + +static void ipcp_init(struct ppp_context *ctx) +{ + NET_DBG("proto %s (0x%04x) fsm %p", ppp_proto2str(PPP_IPCP), PPP_IPCP, + &ctx->ipcp.fsm); + + memset(&ctx->ipcp.fsm, 0, sizeof(ctx->ipcp.fsm)); + + ppp_fsm_init(&ctx->ipcp.fsm, PPP_IPCP); + + ppp_fsm_name_set(&ctx->ipcp.fsm, ppp_proto2str(PPP_IPCP)); + + ctx->ipcp.fsm.cb.up = ipcp_up; + ctx->ipcp.fsm.cb.down = ipcp_down; + ctx->ipcp.fsm.cb.finished = ipcp_finished; + ctx->ipcp.fsm.cb.proto_reject = ipcp_proto_reject; + ctx->ipcp.fsm.cb.config_info_add = ipcp_config_info_add; + ctx->ipcp.fsm.cb.config_info_req = ipcp_config_info_req; + ctx->ipcp.fsm.cb.config_info_rej = ipcp_config_info_rej; +} + +PPP_PROTOCOL_REGISTER(IPCP, PPP_IPCP, + ipcp_init, ipcp_handle, + ipcp_lower_up, ipcp_lower_down, + ipcp_open, ipcp_close); diff --git a/subsys/net/l2/ppp/lcp.c b/subsys/net/l2/ppp/lcp.c new file mode 100644 index 00000000000..c41a46f3d96 --- /dev/null +++ b/subsys/net/l2/ppp/lcp.c @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2019 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_DECLARE(net_l2_ppp, CONFIG_NET_L2_PPP_LOG_LEVEL); + +#include +#include +#include +#include +#include + +#include + +#include "net_private.h" + +#include "ppp_stats.h" +#include "ppp_internal.h" + +static enum net_verdict lcp_handle_ext(struct ppp_fsm *fsm, + enum ppp_packet_type code, u8_t id, + struct net_pkt *pkt) +{ + enum net_verdict verdict = NET_DROP; + + switch (code) { + case PPP_PROTOCOL_REJ: + NET_DBG("PPP Protocol-Rej"); + return ppp_fsm_recv_protocol_rej(fsm, id, pkt); + + case PPP_ECHO_REQ: + NET_DBG("PPP Echo-Req"); + return ppp_fsm_recv_echo_req(fsm, id, pkt); + + case PPP_ECHO_REPLY: + NET_DBG("PPP Echo-Reply"); + return ppp_fsm_recv_echo_reply(fsm, id, pkt); + + case PPP_DISCARD_REQ: + NET_DBG("PPP Discard-Req"); + return ppp_fsm_recv_discard_req(fsm, id, pkt); + + default: + break; + } + + return verdict; +} + +static enum net_verdict lcp_handle(struct ppp_context *ctx, + struct net_if *iface, + struct net_pkt *pkt) +{ + return ppp_fsm_input(&ctx->lcp.fsm, PPP_LCP, pkt); +} + +static bool append_to_buf(struct net_buf *buf, u8_t *data, u8_t data_len) +{ + if (data_len > net_buf_tailroom(buf)) { + return false; + } + + /* FIXME: use net_pkt api so that we can handle a case where data + * might split to two net_buf's + */ + net_buf_add_mem(buf, data, data_len); + + return true; +} + +static int lcp_config_info_req(struct ppp_fsm *fsm, + struct net_pkt *pkt, + u16_t length, + struct net_buf **buf) +{ + struct ppp_option_pkt options[MAX_LCP_OPTIONS]; + struct ppp_option_pkt nack_options[MAX_LCP_OPTIONS]; + struct net_buf *nack = NULL; + enum ppp_packet_type code; + enum net_verdict verdict; + int i, nack_idx = 0; + int count_rej = 0, count_nack = 0; + + memset(options, 0, sizeof(options)); + memset(nack_options, 0, sizeof(nack_options)); + + verdict = ppp_parse_options(fsm, pkt, length, options, + ARRAY_SIZE(options)); + if (verdict != NET_OK) { + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(options); i++) { + if (options[i].type.lcp != LCP_OPTION_RESERVED) { + NET_DBG("[%s/%p] %s option %s (%d) len %d", + fsm->name, fsm, "Check", + ppp_option2str(PPP_LCP, options[i].type.lcp), + options[i].type.lcp, options[i].len); + } + + switch (options[i].type.lcp) { + case LCP_OPTION_RESERVED: + continue; + + case LCP_OPTION_MRU: + break; + + /* TODO: Check from ctx->lcp.my_options what options to accept + */ + case LCP_OPTION_ASYNC_CTRL_CHAR_MAP: + count_nack++; + goto ignore_option; + + case LCP_OPTION_AUTH_PROTO: + count_nack++; + goto ignore_option; + + case LCP_OPTION_QUALITY_PROTO: + count_rej++; + goto ignore_option; + + case LCP_OPTION_MAGIC_NUMBER: + count_nack++; + goto ignore_option; + + case LCP_OPTION_PROTO_COMPRESS: + count_rej++; + goto ignore_option; + + case LCP_OPTION_ADDR_CTRL_COMPRESS: + count_rej++; + goto ignore_option; + + default: + ignore_option: + nack_options[nack_idx].type.lcp = options[i].type.lcp; + nack_options[nack_idx].len = options[i].len; + + if (options[i].len > 2) { + memcpy(&nack_options[nack_idx].value, + &options[i].value, + sizeof(nack_options[nack_idx].value)); + } + + nack_idx++; + break; + } + } + + if (nack_idx > 0) { + struct net_buf *nack_buf; + + if (count_rej > 0) { + code = PPP_CONFIGURE_REJ; + } else { + code = PPP_CONFIGURE_NACK; + } + + /* Create net_buf containing options that are not accepted */ + for (i = 0; i < MIN(nack_idx, ARRAY_SIZE(nack_options)); i++) { + bool added; + + nack_buf = ppp_get_net_buf(nack, nack_options[i].len); + if (!nack_buf) { + goto out_of_mem; + } + + if (!nack) { + nack = nack_buf; + } + + added = append_to_buf(nack_buf, + &nack_options[i].type.lcp, 1); + if (!added) { + goto out_of_mem; + } + + added = append_to_buf(nack_buf, &nack_options[i].len, + 1); + if (!added) { + goto out_of_mem; + } + + /* If there is some data, copy it to result buf */ + if (nack_options[i].value.pos) { + added = append_to_buf(nack_buf, + nack_options[i].value.pos, + nack_options[i].len - 1 - 1); + if (!added) { + goto out_of_mem; + } + } + + continue; + + out_of_mem: + if (nack) { + net_buf_unref(nack); + } else { + if (nack_buf) { + net_buf_unref(nack_buf); + } + } + + return -ENOMEM; + } + } else { + code = PPP_CONFIGURE_ACK; + } + + if (nack) { + *buf = nack; + } + + return code; +} + +static void lcp_lower_down(struct ppp_context *ctx) +{ + ppp_fsm_lower_down(&ctx->lcp.fsm); +} + +static void lcp_lower_up(struct ppp_context *ctx) +{ + ppp_fsm_lower_up(&ctx->lcp.fsm); +} + +static void lcp_open(struct ppp_context *ctx) +{ + ppp_fsm_open(&ctx->lcp.fsm); +} + +static void lcp_close(struct ppp_context *ctx, const u8_t *reason) +{ + if (ctx->phase != PPP_DEAD) { + ppp_change_phase(ctx, PPP_TERMINATE); + } + + ppp_change_state(&ctx->lcp.fsm, PPP_STOPPED); + + ppp_fsm_close(&ctx->lcp.fsm, reason); +} + +static void lcp_down(struct ppp_fsm *fsm) +{ + struct ppp_context *ctx = CONTAINER_OF(fsm, struct ppp_context, + lcp.fsm); + + ppp_link_down(ctx); + + ppp_change_phase(ctx, PPP_ESTABLISH); +} + +static void lcp_up(struct ppp_fsm *fsm) +{ + struct ppp_context *ctx = CONTAINER_OF(fsm, struct ppp_context, + lcp.fsm); + + /* TODO: Set MRU/MTU of the network interface here */ + + ppp_link_established(ctx, fsm); +} + +static void lcp_starting(struct ppp_fsm *fsm) +{ + struct ppp_context *ctx = CONTAINER_OF(fsm, struct ppp_context, + lcp.fsm); + + ppp_link_needed(ctx); +} + +static void lcp_finished(struct ppp_fsm *fsm) +{ + struct ppp_context *ctx = CONTAINER_OF(fsm, struct ppp_context, + lcp.fsm); + + ppp_link_terminated(ctx); +} + +static void lcp_init(struct ppp_context *ctx) +{ + NET_DBG("proto %s (0x%04x) fsm %p", ppp_proto2str(PPP_LCP), PPP_LCP, + &ctx->lcp.fsm); + + memset(&ctx->lcp.fsm, 0, sizeof(ctx->lcp.fsm)); + + ppp_fsm_init(&ctx->lcp.fsm, PPP_LCP); + + ppp_fsm_name_set(&ctx->lcp.fsm, ppp_proto2str(PPP_LCP)); + + ctx->lcp.fsm.cb.up = lcp_up; + ctx->lcp.fsm.cb.down = lcp_down; + ctx->lcp.fsm.cb.starting = lcp_starting; + ctx->lcp.fsm.cb.finished = lcp_finished; + ctx->lcp.fsm.cb.proto_extension = lcp_handle_ext; + ctx->lcp.fsm.cb.config_info_req = lcp_config_info_req; + + ctx->lcp.my_options.negotiate_proto_compression = false; + ctx->lcp.my_options.negotiate_addr_compression = false; + ctx->lcp.my_options.negotiate_async_map = false; + ctx->lcp.my_options.negotiate_magic = false; + ctx->lcp.my_options.negotiate_mru = + IS_ENABLED(CONFIG_NET_L2_PPP_OPTION_MRU_NEG) ? true : false; +} + +PPP_PROTOCOL_REGISTER(LCP, PPP_LCP, + lcp_init, lcp_handle, + lcp_lower_up, lcp_lower_down, + lcp_open, lcp_close); diff --git a/subsys/net/l2/ppp/link.c b/subsys/net/l2/ppp/link.c new file mode 100644 index 00000000000..f4d45fdd269 --- /dev/null +++ b/subsys/net/l2/ppp/link.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2019 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_DECLARE(net_l2_ppp, CONFIG_NET_L2_PPP_LOG_LEVEL); + +#include +#include + +#include + +#include "net_private.h" + +#include "ppp_internal.h" + +static void lcp_up(struct ppp_context *ctx) +{ + struct ppp_protocol_handler *proto; + + for (proto = __net_ppp_proto_start; + proto != __net_ppp_proto_end; + proto++) { + if (proto->protocol == PPP_LCP) { + continue; + } + + if (proto->lower_up) { + proto->lower_up(ctx); + } + } +} + +static void do_network(struct ppp_context *ctx) +{ + const struct ppp_protocol_handler *proto; + + ppp_change_phase(ctx, PPP_NETWORK); + + for (proto = __net_ppp_proto_start; + proto != __net_ppp_proto_end; + proto++) { + if (proto->protocol == PPP_CCP || proto->protocol == PPP_ECP) { + if (proto->open) { + proto->open(ctx); + } + } + } + + /* Do the other network protocols if encryption is not needed for + * them. + */ + /* TODO possible encryption stuff here*/ + + for (proto = __net_ppp_proto_start; + proto != __net_ppp_proto_end; + proto++) { + if (proto->protocol == PPP_CCP || proto->protocol == PPP_ECP || + proto->protocol >= 0xC000) { + continue; + } + + if (proto->open) { + ctx->network_protos_open++; + proto->open(ctx); + } + } + + if (ctx->network_protos_open == 0) { + proto = ppp_lcp_get(); + if (proto) { + proto->close(ctx, "No network protocols open"); + } + } +} + +void ppp_link_established(struct ppp_context *ctx, struct ppp_fsm *fsm) +{ + NET_DBG("[%p] Link established", ctx); + + ppp_change_phase(ctx, PPP_ESTABLISH); + + ppp_change_phase(ctx, PPP_AUTH); + + /* If no authentication is need, then we are done */ + /* TODO: check here if auth is needed */ + + do_network(ctx); + + lcp_up(ctx); +} + +void ppp_link_terminated(struct ppp_context *ctx) +{ + if (ctx->phase == PPP_DEAD) { + return; + } + + /* TODO: cleanup things etc here if needed */ + + ppp_change_phase(ctx, PPP_DEAD); + + NET_DBG("[%p] Link terminated", ctx); +} + +void ppp_link_down(struct ppp_context *ctx) +{ + if (ctx->phase == PPP_DEAD) { + return; + } + + ppp_change_phase(ctx, PPP_NETWORK); + + ppp_network_all_down(ctx); + + ppp_change_phase(ctx, PPP_DEAD); +} + +void ppp_link_needed(struct ppp_context *ctx) +{ + /* TODO: Try to create link if needed. */ +} diff --git a/subsys/net/l2/ppp/misc.c b/subsys/net/l2/ppp/misc.c new file mode 100644 index 00000000000..347489f8eee --- /dev/null +++ b/subsys/net/l2/ppp/misc.c @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2019 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_DECLARE(net_l2_ppp, CONFIG_NET_L2_PPP_LOG_LEVEL); + +#include +#include + +#include + +#include "net_private.h" + +#include "ppp_internal.h" + +const char *ppp_phase_str(enum ppp_phase phase) +{ +#if (CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG) || defined(CONFIG_NET_SHELL) + switch (phase) { + case PPP_DEAD: + return "DEAD"; + case PPP_ESTABLISH: + return "ESTABLISH"; + case PPP_AUTH: + return "AUTH"; + case PPP_NETWORK: + return "NETWORK"; + case PPP_RUNNING: + return "RUNNING"; + case PPP_TERMINATE: + return "TERMINATE"; + } +#else + ARG_UNUSED(phase); +#endif + + return ""; +} + +#if CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG +static void validate_phase_transition(enum ppp_phase current, + enum ppp_phase new) +{ + static const u8_t valid_transitions[] = { + [PPP_DEAD] = 1 << PPP_ESTABLISH, + [PPP_ESTABLISH] = 1 << PPP_DEAD | + 1 << PPP_AUTH | + 1 << PPP_TERMINATE, + [PPP_AUTH] = 1 << PPP_TERMINATE | + 1 << PPP_NETWORK, + [PPP_NETWORK] = 1 << PPP_TERMINATE | + 1 << PPP_RUNNING, + [PPP_RUNNING] = 1 << PPP_TERMINATE | + 1 << PPP_NETWORK, + [PPP_TERMINATE] = 1 << PPP_DEAD, + }; + + if (!(valid_transitions[current] & 1 << new)) { + NET_DBG("Invalid phase transition: %s (%d) => %s (%d)", + ppp_phase_str(current), current, + ppp_phase_str(new), new); + } +} +#else +static inline void validate_phase_transition(enum ppp_phase current, + enum ppp_phase new) +{ + ARG_UNUSED(current); + ARG_UNUSED(new); +} +#endif + +#if CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG +void ppp_change_phase_debug(struct ppp_context *ctx, enum ppp_phase new_phase, + const char *caller, int line) +#else +void ppp_change_phase(struct ppp_context *ctx, enum ppp_phase new_phase) +#endif +{ + NET_ASSERT(ctx); + + if (ctx->phase == new_phase) { + return; + } + + NET_ASSERT(new_phase >= PPP_DEAD && + new_phase <= PPP_TERMINATE); + +#if CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG + NET_DBG("[%p] phase %s (%d) => %s (%d) (%s():%d)", + ctx, ppp_phase_str(ctx->phase), ctx->phase, + ppp_phase_str(new_phase), new_phase, caller, line); +#endif + + validate_phase_transition(ctx->phase, new_phase); + + ctx->phase = new_phase; +} + +const char *ppp_state_str(enum ppp_state state) +{ +#if (CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG) || defined(CONFIG_NET_SHELL) + switch (state) { + case PPP_INITIAL: + return "INITIAL"; + case PPP_STARTING: + return "STARTING"; + case PPP_CLOSED: + return "CLOSED"; + case PPP_STOPPED: + return "STOPPED"; + case PPP_CLOSING: + return "CLOSING"; + case PPP_STOPPING: + return "STOPPING"; + case PPP_REQUEST_SENT: + return "REQUEST_SENT"; + case PPP_ACK_RECEIVED: + return "ACK_RECEIVED"; + case PPP_ACK_SENT: + return "ACK_SENT"; + case PPP_OPENED: + return "OPENED"; + } +#else + ARG_UNUSED(state); +#endif + + return ""; +} + +const char *ppp_pkt_type2str(enum ppp_packet_type type) +{ +#if (CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG) || defined(CONFIG_NET_SHELL) + switch (type) { + case PPP_CONFIGURE_REQ: + return "Configure-Req"; + case PPP_CONFIGURE_ACK: + return "Configure-Ack"; + case PPP_CONFIGURE_NACK: + return "Configure-Nack"; + case PPP_CONFIGURE_REJ: + return "Configure-Rej"; + case PPP_TERMINATE_REQ: + return "Terminate-Req"; + case PPP_TERMINATE_ACK: + return "Terminate-Ack"; + case PPP_CODE_REJ: + return "Code-Rej"; + case PPP_PROTOCOL_REJ: + return "Protocol-Rej"; + case PPP_ECHO_REQ: + return "Echo-Req"; + case PPP_ECHO_REPLY: + return "Echo-Reply"; + case PPP_DISCARD_REQ: + return "Discard-Req"; + } +#else + ARG_UNUSED(type); +#endif + + return ""; +} + +const char *ppp_proto2str(u16_t proto) +{ +#if (CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG) + switch (proto) { + case PPP_IP: + return "IPv4"; + case PPP_IPV6: + return "IPv6"; + case PPP_ECP: + return "ECP"; + case PPP_CCP: + return "CCP"; + case PPP_LCP: + return "LCP"; + case PPP_IPCP: + return "IPCP"; + } +#else + ARG_UNUSED(proto); +#endif + + return ""; +} + +#if CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG +static void validate_state_transition(enum ppp_state current, + enum ppp_state new) +{ + /* See RFC 1661 ch. 4.1 */ + static const u16_t valid_transitions[] = { + [PPP_INITIAL] = 1 << PPP_CLOSED | + 1 << PPP_STARTING, + [PPP_STARTING] = 1 << PPP_INITIAL | + 1 << PPP_REQUEST_SENT, + [PPP_CLOSED] = 1 << PPP_INITIAL | + 1 << PPP_REQUEST_SENT, + [PPP_STOPPED] = 1 << PPP_STARTING | + 1 << PPP_CLOSED | + 1 << PPP_ACK_RECEIVED | + 1 << PPP_REQUEST_SENT, + [PPP_CLOSING] = 1 << PPP_INITIAL | + 1 << PPP_STOPPING | + 1 << PPP_CLOSED, + [PPP_STOPPING] = 1 << PPP_STARTING | + 1 << PPP_CLOSING | + 1 << PPP_STOPPED, + [PPP_REQUEST_SENT] = 1 << PPP_STARTING | + 1 << PPP_CLOSING | + 1 << PPP_STOPPED | + 1 << PPP_ACK_SENT | + 1 << PPP_ACK_RECEIVED, + [PPP_ACK_RECEIVED] = 1 << PPP_STARTING | + 1 << PPP_CLOSING | + 1 << PPP_OPENED | + 1 << PPP_REQUEST_SENT | + 1 << PPP_STOPPED, + [PPP_ACK_SENT] = 1 << PPP_STARTING | + 1 << PPP_CLOSING | + 1 << PPP_STOPPED | + 1 << PPP_REQUEST_SENT | + 1 << PPP_OPENED, + [PPP_OPENED] = 1 << PPP_STARTING | + 1 << PPP_CLOSING | + 1 << PPP_ACK_SENT | + 1 << PPP_REQUEST_SENT | + 1 << PPP_CLOSING | + 1 << PPP_STOPPING, + }; + + if (!(valid_transitions[current] & 1 << new)) { + NET_DBG("Invalid state transition: %s (%d) => %s (%d)", + ppp_state_str(current), current, + ppp_state_str(new), new); + } +} +#else +static inline void validate_state_transition(enum ppp_state current, + enum ppp_state new) +{ + ARG_UNUSED(current); + ARG_UNUSED(new); +} +#endif + +#if CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG +void ppp_change_state_debug(struct ppp_fsm *fsm, enum ppp_state new_state, + const char *caller, int line) +#else +void ppp_change_state(struct ppp_fsm *fsm, enum ppp_state new_state) +#endif +{ + NET_ASSERT(fsm); + + if (fsm->state == new_state) { + return; + } + + NET_ASSERT(new_state >= PPP_INITIAL && + new_state <= PPP_OPENED); + +#if CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG + NET_DBG("[%s/%p] state %s (%d) => %s (%d) (%s():%d)", + fsm->name, fsm, ppp_state_str(fsm->state), fsm->state, + ppp_state_str(new_state), new_state, caller, line); +#endif + + validate_state_transition(fsm->state, new_state); + + fsm->state = new_state; +} + +const char *ppp_option2str(enum ppp_protocol_type protocol, + int type) +{ +#if (CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG) || defined(CONFIG_NET_SHELL) + switch (protocol) { + case PPP_LCP: + switch (type) { + case LCP_OPTION_RESERVED: + return "RESERVED"; + case LCP_OPTION_MRU: + return "MRU"; + case LCP_OPTION_ASYNC_CTRL_CHAR_MAP: + return "ASYNC_CTRL_CHAR_MAP"; + case LCP_OPTION_AUTH_PROTO: + return "AUTH_PROTO"; + case LCP_OPTION_QUALITY_PROTO: + return "QUALITY_PROTO"; + case LCP_OPTION_MAGIC_NUMBER: + return "MAGIC_NUMBER"; + case LCP_OPTION_PROTO_COMPRESS: + return "PROTO_COMPRESS"; + case LCP_OPTION_ADDR_CTRL_COMPRESS: + return "ADDR_CTRL_COMPRESS"; + } + + break; + +#if defined(CONFIG_NET_IPV4) + case PPP_IPCP: + switch (type) { + case IPCP_OPTION_RESERVED: + return "RESERVED"; + case IPCP_OPTION_IP_ADDRESSES: + return "IP_ADDRESSES"; + case IPCP_OPTION_IP_COMP_PROTO: + return "IP_COMPRESSION_PROTOCOL"; + case IPCP_OPTION_IP_ADDRESS: + return "IP_ADDRESS"; + } + + break; +#endif + + default: + break; + } +#else + ARG_UNUSED(type); +#endif + + return ""; +} + +void ppp_fsm_name_set(struct ppp_fsm *fsm, const char *name) +{ +#if CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG + fsm->name = name; +#else + ARG_UNUSED(fsm); + ARG_UNUSED(name); +#endif +} diff --git a/subsys/net/l2/ppp/network.c b/subsys/net/l2/ppp/network.c new file mode 100644 index 00000000000..a948018264b --- /dev/null +++ b/subsys/net/l2/ppp/network.c @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2019 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_DECLARE(net_l2_ppp, CONFIG_NET_L2_PPP_LOG_LEVEL); + +#include +#include + +#include + +#include "net_private.h" + +#include "ppp_internal.h" + +void ppp_network_up(struct ppp_context *ctx, int proto) +{ + if (ctx->network_protos_up == 0) { + ppp_change_phase(ctx, PPP_RUNNING); + } + + ctx->network_protos_up++; + + NET_DBG("[%p] Proto %s (0x%04x) %s (%d)", ctx, ppp_proto2str(proto), + proto, "up", ctx->network_protos_up); +} + +void ppp_network_down(struct ppp_context *ctx, int proto) +{ + ctx->network_protos_up--; + + if (ctx->network_protos_up <= 0) { + ctx->network_protos_up = 0; + ppp_change_phase(ctx, PPP_TERMINATE); + } + + NET_DBG("[%p] Proto %s (0x%04x) %s (%d)", ctx, ppp_proto2str(proto), + proto, "down", ctx->network_protos_up); +} + +void ppp_network_done(struct ppp_context *ctx, int proto) +{ + ctx->network_protos_up--; + if (ctx->network_protos_up <= 0) { + const struct ppp_protocol_handler *proto = ppp_lcp_get(); + + if (proto) { + proto->close(ctx, "All networks down"); + } + } +} + +void ppp_network_all_down(struct ppp_context *ctx) +{ + struct ppp_protocol_handler *proto; + + for (proto = __net_ppp_proto_start; + proto != __net_ppp_proto_end; + proto++) { + if (proto->protocol != PPP_LCP && proto->lower_down) { + proto->lower_down(ctx); + } + + if (proto->protocol < 0xC000 && proto->close) { + ctx->network_protos_open--; + proto->close(ctx, "LCP down"); + } + } + + if (ctx->network_protos_open > 0) { + NET_WARN("Not all network protocols were closed (%d)", + ctx->network_protos_open); + } + + ctx->network_protos_open = 0; +} diff --git a/subsys/net/l2/ppp/options.c b/subsys/net/l2/ppp/options.c new file mode 100644 index 00000000000..a2baa67c3d3 --- /dev/null +++ b/subsys/net/l2/ppp/options.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2019 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_DECLARE(net_l2_ppp, CONFIG_NET_L2_PPP_LOG_LEVEL); + +#include +#include + +#include + +#include "net_private.h" + +#include "ppp_internal.h" + +#define ALLOC_TIMEOUT K_MSEC(100) + +enum net_verdict ppp_parse_options(struct ppp_fsm *fsm, + struct net_pkt *pkt, + u16_t length, + struct ppp_option_pkt options[], + int options_len) +{ + int remaining = length, pkt_remaining; + enum net_verdict verdict; + u8_t opt_type, opt_len; + int ret, idx = 0; + + pkt_remaining = net_pkt_remaining_data(pkt); + if (remaining != pkt_remaining) { + NET_DBG("Expecting %d but pkt data length is %d bytes", + remaining, pkt_remaining); + verdict = NET_DROP; + goto out; + } + + while (remaining > 0) { + ret = net_pkt_read_u8(pkt, &opt_type); + if (ret < 0) { + NET_DBG("Cannot read %s (%d) (remaining len %d)", + "opt_type", ret, pkt_remaining); + verdict = NET_DROP; + goto out; + } + + ret = net_pkt_read_u8(pkt, &opt_len); + if (ret < 0) { + NET_DBG("Cannot read %s (%d) (remaining len %d)", + "opt_len", ret, remaining); + verdict = NET_DROP; + goto out; + } + + if (idx >= options_len) { + NET_DBG("Cannot insert options (max %d)", options_len); + verdict = NET_DROP; + goto out; + } + + options[idx].type.lcp = opt_type; + options[idx].len = opt_len; + + NET_DBG("[%s/%p] %s option %s (%d) len %d", fsm->name, fsm, + "Recv", ppp_option2str(fsm->protocol, opt_type), + opt_type, opt_len); + + if (opt_len > 2) { + /* There is an option value here */ + net_pkt_cursor_backup(pkt, &options[idx].value); + } + + net_pkt_skip(pkt, + opt_len - sizeof(opt_type) - sizeof(opt_len)); + remaining -= opt_len; + + idx++; + }; + + if (remaining < 0) { + verdict = NET_DROP; + goto out; + } + + verdict = NET_OK; + +out: + return verdict; +} + +struct net_buf *ppp_get_net_buf(struct net_buf *root_buf, u8_t len) +{ + struct net_buf *tmp; + + if (root_buf) { + tmp = net_buf_frag_last(root_buf); + + if (len > net_buf_tailroom(tmp)) { + tmp = net_pkt_get_reserve_tx_data(ALLOC_TIMEOUT); + if (tmp) { + net_buf_frag_add(root_buf, tmp); + } + } + + return tmp; + + } + + tmp = net_pkt_get_reserve_tx_data(ALLOC_TIMEOUT); + if (tmp) { + return tmp; + } + + return NULL; +} diff --git a/subsys/net/l2/ppp/ppp_internal.h b/subsys/net/l2/ppp/ppp_internal.h new file mode 100644 index 00000000000..f15af1996a5 --- /dev/null +++ b/subsys/net/l2/ppp/ppp_internal.h @@ -0,0 +1,163 @@ +/** @file + @brief PPP private header + + This is not to be included by the application. + */ + +/* + * Copyright (c) 2019 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +/** + * FSM flags that control how it operates. + */ +#define FSM_RESTART BIT(0) /**< Treat 2nd OPEN as DOWN followed by UP */ + +/** + * PPP packet format. + */ +struct ppp_packet { + u8_t code; + u8_t id; + u16_t length; +} __packed; + +/** Timeout in milliseconds */ +#define PPP_TIMEOUT K_SECONDS(3) + +/** Max Terminate-Request transmissions */ +#define MAX_TERMINATE_REQ CONFIG_NET_L2_PPP_MAX_TERMINATE_REQ_RETRANSMITS + +/** Max Configure-Request transmissions */ +#define MAX_CONFIGURE_REQ CONFIG_NET_L2_PPP_MAX_CONFIGURE_REQ_RETRANSMITS + +/** Max number of LCP options */ +#define MAX_LCP_OPTIONS CONFIG_NET_L2_PPP_MAX_OPTIONS + +/** Max number of IPCP options */ +#define MAX_IPCP_OPTIONS 3 + +/* + * Special alignment is needed for ppp_protocol_handler. This is the + * same issue as in net_if. See net_if.h __net_if_align for explanation. + */ +#define __ppp_proto_align __aligned(32) + +/** Protocol handler information. */ +struct ppp_protocol_handler { + /** Protocol init function */ + void (*init)(struct ppp_context *ctx); + + /** Process a received packet */ + enum net_verdict (*handler)(struct ppp_context *ctx, + struct net_if *iface, + struct net_pkt *pkt); + + /** Lower layer up */ + void (*lower_up)(struct ppp_context *ctx); + + /** Lower layer down */ + void (*lower_down)(struct ppp_context *ctx); + + /** Enable this protocol */ + void (*open)(struct ppp_context *ctx); + + /** Disable this protocol */ + void (*close)(struct ppp_context *ctx, const u8_t *reason); + + /** PPP protocol number */ + u16_t protocol; +} __ppp_proto_align; + +#define PPP_PROTO_GET_NAME(proto_name) \ + (ppp_protocol_handler_##proto_name) + +#define PPP_PROTOCOL_REGISTER(name, proto, init_func, proto_handler, \ + proto_lower_up, proto_lower_down, \ + proto_open, proto_close) \ + static const struct ppp_protocol_handler \ + (PPP_PROTO_GET_NAME(name)) __used \ + __attribute__((__section__(".net_ppp_proto.data"))) = { \ + .protocol = proto, \ + .init = init_func, \ + .handler = proto_handler, \ + .lower_up = proto_lower_up, \ + .lower_down = proto_lower_down, \ + .open = proto_open, \ + .close = proto_close, \ + } + +extern struct ppp_protocol_handler __net_ppp_proto_start[]; +extern struct ppp_protocol_handler __net_ppp_proto_end[]; + +const char *ppp_phase_str(enum ppp_phase phase); +const char *ppp_state_str(enum ppp_state state); +const char *ppp_proto2str(u16_t proto); +const char *ppp_pkt_type2str(enum ppp_packet_type type); +const char *ppp_option2str(enum ppp_protocol_type protocol, int type); +void ppp_fsm_name_set(struct ppp_fsm *fsm, const char *name); + +#if CONFIG_NET_L2_PPP_LOG_LEVEL < LOG_LEVEL_DBG +void ppp_change_phase(struct ppp_context *ctx, enum ppp_phase new_phase); +void ppp_change_state(struct ppp_fsm *fsm, enum ppp_state new_state); +#else +void ppp_change_phase_debug(struct ppp_context *ctx, + enum ppp_phase new_phase, + const char *caller, int line); + +#define ppp_change_phase(ctx, state) \ + ppp_change_phase_debug(ctx, state, __func__, __LINE__) + +#define ppp_change_state(fsm, state) \ + ppp_change_state_debug(fsm, state, __func__, __LINE__) + +void ppp_change_state_debug(struct ppp_fsm *fsm, enum ppp_state new_state, + const char *caller, int line); +#endif + +struct net_buf *ppp_get_net_buf(struct net_buf *root_buf, u8_t len); +int ppp_send_pkt(struct ppp_fsm *fsm, struct net_if *iface, + enum ppp_packet_type type, u8_t id, + void *data, u32_t data_len); + +void ppp_fsm_init(struct ppp_fsm *fsm, u16_t protocol); +void ppp_fsm_lower_up(struct ppp_fsm *fsm); +void ppp_fsm_lower_down(struct ppp_fsm *fsm); +void ppp_fsm_open(struct ppp_fsm *fsm); +void ppp_fsm_close(struct ppp_fsm *fsm, const u8_t *reason); +void ppp_fsm_proto_reject(struct ppp_fsm *fsm); +enum net_verdict ppp_fsm_input(struct ppp_fsm *fsm, u16_t proto, + struct net_pkt *pkt); +enum net_verdict ppp_fsm_recv_protocol_rej(struct ppp_fsm *fsm, + u8_t id, + struct net_pkt *pkt); +enum net_verdict ppp_fsm_recv_echo_req(struct ppp_fsm *fsm, + u8_t id, + struct net_pkt *pkt); +enum net_verdict ppp_fsm_recv_echo_reply(struct ppp_fsm *fsm, + u8_t id, + struct net_pkt *pkt); +enum net_verdict ppp_fsm_recv_discard_req(struct ppp_fsm *fsm, + u8_t id, + struct net_pkt *pkt); + +const struct ppp_protocol_handler *ppp_lcp_get(void); +enum net_verdict ppp_parse_options(struct ppp_fsm *fsm, + struct net_pkt *pkt, + u16_t length, + struct ppp_option_pkt options[], + int options_len); + +void ppp_link_established(struct ppp_context *ctx, struct ppp_fsm *fsm); +void ppp_link_terminated(struct ppp_context *ctx); +void ppp_link_down(struct ppp_context *ctx); +void ppp_link_needed(struct ppp_context *ctx); + +void ppp_network_up(struct ppp_context *ctx, int proto); +void ppp_network_down(struct ppp_context *ctx, int proto); +void ppp_network_done(struct ppp_context *ctx, int proto); +void ppp_network_all_down(struct ppp_context *ctx); diff --git a/subsys/net/l2/ppp/ppp_l2.c b/subsys/net/l2/ppp/ppp_l2.c new file mode 100644 index 00000000000..301cd79d595 --- /dev/null +++ b/subsys/net/l2/ppp/ppp_l2.c @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2019 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_REGISTER(net_l2_ppp, CONFIG_NET_L2_PPP_LOG_LEVEL); + +#include +#include +#include +#include +#include + +#include + +#include "net_private.h" + +#include "ppp_stats.h" +#include "ppp_internal.h" + +#define BUF_ALLOC_TIMEOUT K_MSEC(100) + +static const struct ppp_protocol_handler *ppp_lcp; + +static void ppp_update_rx_stats(struct net_if *iface, + struct net_pkt *pkt, size_t length) +{ +#if defined(CONFIG_NET_STATISTICS_PPP) + ppp_stats_update_bytes_rx(iface, length); + ppp_stats_update_pkts_rx(iface); +#endif /* CONFIG_NET_STATISTICS_PPP */ +} + +static void ppp_update_tx_stats(struct net_if *iface, + struct net_pkt *pkt, size_t length) +{ +#if defined(CONFIG_NET_STATISTICS_PPP) + ppp_stats_update_bytes_tx(iface, length); + ppp_stats_update_pkts_tx(iface); +#endif /* CONFIG_NET_STATISTICS_PPP */ +} + +#if defined(CONFIG_NET_TEST) +typedef enum net_verdict (*ppp_l2_callback_t)(struct net_if *iface, + struct net_pkt *pkt); + +static ppp_l2_callback_t testing_cb; + +void ppp_l2_register_pkt_cb(ppp_l2_callback_t cb) +{ + testing_cb = cb; +} +#endif + +static enum net_verdict process_ppp_msg(struct net_if *iface, + struct net_pkt *pkt) +{ + struct ppp_context *ctx = net_if_l2_data(iface); + enum net_verdict verdict = NET_DROP; + struct ppp_protocol_handler *proto; + u16_t protocol; + int ret; + + if (!ctx->is_init || !ctx->is_ready_to_serve) { + goto quit; + } + + ret = net_pkt_read_be16(pkt, &protocol); + if (ret < 0) { + goto quit; + } + + if ((IS_ENABLED(CONFIG_NET_IPV4) && protocol == PPP_IP) || + (IS_ENABLED(CONFIG_NET_IPV6) && protocol == PPP_IPV6)) { + /* Remove the protocol field so that IP packet processing + * continues properly in net_core.c:process_data() + */ + (void)net_buf_pull_be16(pkt->buffer); + net_pkt_cursor_init(pkt); + return NET_CONTINUE; + } + + for (proto = __net_ppp_proto_start; + proto != __net_ppp_proto_end; + proto++) { + if (proto->protocol != protocol) { + continue; + } + + return proto->handler(ctx, iface, pkt); + } + + NET_DBG("%s protocol %s%s(0x%02x)", + ppp_proto2str(protocol) ? "Unhandled" : "Unknown", + ppp_proto2str(protocol), + ppp_proto2str(protocol) ? " " : "", + protocol); + +quit: + return verdict; +} + +static enum net_verdict ppp_recv(struct net_if *iface, + struct net_pkt *pkt) +{ + enum net_verdict verdict; + +#if defined(CONFIG_NET_TEST) + /* If we are running a PPP unit test, then feed the packet + * back to test app for verification. + */ + if (testing_cb) { + return testing_cb(iface, pkt); + } +#endif + + ppp_update_rx_stats(iface, pkt, net_pkt_get_len(pkt)); + + if (CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG) { + net_pkt_hexdump(pkt, "recv L2"); + } + + verdict = process_ppp_msg(iface, pkt); + + switch (verdict) { + case NET_OK: + net_pkt_unref(pkt); + break; + case NET_DROP: + ppp_stats_update_drop_rx(iface); + break; + case NET_CONTINUE: + break; + } + + return verdict; +} + +static int ppp_send(struct net_if *iface, struct net_pkt *pkt) +{ + const struct ppp_api *api = net_if_get_device(iface)->driver_api; + struct ppp_context *ctx = net_if_l2_data(iface); + int ret; + + if (CONFIG_NET_L2_PPP_LOG_LEVEL >= LOG_LEVEL_DBG) { + net_pkt_hexdump(pkt, "send L2"); + } + + if (!ctx->is_init) { + return -EIO; + } + + /* If PPP is not yet ready, then just give error to caller as there + * is no way to send before the PPP handshake is finished. + */ + if (ctx->phase != PPP_RUNNING && !net_pkt_is_ppp(pkt)) { + return -ENETDOWN; + } + + ret = api->send(net_if_get_device(iface), pkt); + if (!ret) { + ret = net_pkt_get_len(pkt); + ppp_update_tx_stats(iface, pkt, ret); + net_pkt_unref(pkt); + } + + return ret; +} + +static void ppp_lower_down(struct ppp_context *ctx) +{ + if (ppp_lcp) { + ppp_lcp->lower_down(ctx); + } +} + +static void ppp_lower_up(struct ppp_context *ctx) +{ + if (ppp_lcp) { + ppp_lcp->lower_up(ctx); + } +} + +static void start_ppp(struct ppp_context *ctx) +{ + ppp_change_phase(ctx, PPP_ESTABLISH); + + ppp_lower_up(ctx); + + if (ppp_lcp) { + NET_DBG("Starting LCP"); + ppp_lcp->open(ctx); + } +} + +static int ppp_enable(struct net_if *iface, bool state) +{ + const struct ppp_api *ppp = + net_if_get_device(iface)->driver_api; + struct ppp_context *ctx = net_if_l2_data(iface); + + if (!ctx->is_init) { + return -EIO; + } + + if (ctx->is_enabled == state) { + return 0; + } + + ctx->is_enabled = state; + + if (!state) { + ppp_lower_down(ctx); + + if (ppp->stop) { + ppp->stop(net_if_get_device(iface)); + } + } else { + if (ppp->start) { + ppp->start(net_if_get_device(iface)); + } + + start_ppp(ctx); + } + + return 0; +} + +static enum net_l2_flags ppp_flags(struct net_if *iface) +{ + struct ppp_context *ctx = net_if_l2_data(iface); + + return ctx->ppp_l2_flags; +} + +NET_L2_INIT(PPP_L2, ppp_recv, ppp_send, ppp_enable, ppp_flags); + +static void carrier_on(struct k_work *work) +{ + struct ppp_context *ctx = CONTAINER_OF(work, struct ppp_context, + carrier_mgmt.work); + + if (ctx->iface == NULL || ctx->carrier_mgmt.enabled) { + return; + } + + NET_DBG("Carrier ON for interface %p", ctx->iface); + + ppp_mgmt_raise_carrier_on_event(ctx->iface); + + ctx->carrier_mgmt.enabled = true; + + net_if_up(ctx->iface); +} + +static void carrier_off(struct k_work *work) +{ + struct ppp_context *ctx = CONTAINER_OF(work, struct ppp_context, + carrier_mgmt.work); + + if (ctx->iface == NULL) { + return; + } + + NET_DBG("Carrier OFF for interface %p", ctx->iface); + + ppp_lower_down(ctx); + + ppp_change_phase(ctx, PPP_DEAD); + + ppp_mgmt_raise_carrier_off_event(ctx->iface); + + net_if_carrier_down(ctx->iface); + + ctx->carrier_mgmt.enabled = false; +} + +static void handle_carrier(struct ppp_context *ctx, + k_work_handler_t handler) +{ + k_work_init(&ctx->carrier_mgmt.work, handler); + + k_work_submit(&ctx->carrier_mgmt.work); +} + +void net_ppp_carrier_on(struct net_if *iface) +{ + struct ppp_context *ctx = net_if_l2_data(iface); + + handle_carrier(ctx, carrier_on); +} + +void net_ppp_carrier_off(struct net_if *iface) +{ + struct ppp_context *ctx = net_if_l2_data(iface); + + handle_carrier(ctx, carrier_off); +} + +#if defined(CONFIG_NET_SHELL) +int net_ppp_ping(int idx, s32_t timeout) +{ + struct net_if *iface = net_if_get_by_index(idx); + struct ppp_context *ctx; + int ret; + + if (!iface) { + return -ENOENT; + } + + if (net_if_l2(iface) != &NET_L2_GET_NAME(PPP)) { + return -ENODEV; + } + + ctx = net_if_l2_data(iface); + + ctx->shell.echo_req_data = sys_rand32_get(); + + ret = ppp_send_pkt(&ctx->lcp.fsm, iface, PPP_ECHO_REQ, 0, + UINT_TO_POINTER(ctx->shell.echo_req_data), + sizeof(ctx->shell.echo_req_data)); + if (ret < 0) { + return ret; + } + + return k_sem_take(&ctx->shell.wait_echo_reply, timeout); +} +#endif + +const struct ppp_protocol_handler *ppp_lcp_get(void) +{ + return ppp_lcp; +} + +void net_ppp_init(struct net_if *iface) +{ + struct ppp_context *ctx = net_if_l2_data(iface); + const struct ppp_protocol_handler *proto; + + NET_DBG("Initializing PPP L2 %p for iface %p", ctx, iface); + + if (!ctx->is_init) { + memset(ctx, 0, sizeof(*ctx)); + } + + ctx->ppp_l2_flags = NET_L2_MULTICAST | NET_L2_POINT_TO_POINT; + ctx->iface = iface; + + if (!ctx->is_init) { + int count; + +#if defined(CONFIG_NET_SHELL) + k_sem_init(&ctx->shell.wait_echo_reply, 0, UINT_MAX); +#endif + + for (proto = __net_ppp_proto_start, count = 0; + proto != __net_ppp_proto_end; + proto++, count++) { + if (proto->protocol == PPP_LCP) { + ppp_lcp = proto; + } + + proto->init(ctx); + } + + if (count == 0) { + NET_ERR("There are no PPP protocols configured!"); + return; + } + + if (ppp_lcp == NULL) { + NET_ERR("No LCP found!"); + return; + } + } + + ctx->is_init = true; + ctx->is_ready_to_serve = true; +} diff --git a/subsys/net/l2/ppp/ppp_stats.c b/subsys/net/l2/ppp/ppp_stats.c new file mode 100644 index 00000000000..b76e623c153 --- /dev/null +++ b/subsys/net/l2/ppp/ppp_stats.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2019 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_REGISTER(net_ppp_stats, CONFIG_NET_L2_PPP_LOG_LEVEL); + +#include +#include +#include +#include +#include + +#include "net_stats.h" + +#if defined(CONFIG_NET_STATISTICS_USER_API) + +static int ppp_stats_get(u32_t mgmt_request, struct net_if *iface, + void *data, size_t len) +{ + size_t len_chk = 0; + void *src = NULL; + const struct ppp_api *ppp; + + if (NET_MGMT_GET_COMMAND(mgmt_request) == + NET_REQUEST_STATS_CMD_GET_PPP) { + if (net_if_l2(iface) != &NET_L2_GET_NAME(PPP)) { + return -ENOENT; + } + + ppp = net_if_get_device(iface)->driver_api; + if (ppp->get_stats == NULL) { + return -ENOENT; + } + + len_chk = sizeof(struct net_stats_ppp); + src = ppp->get_stats(net_if_get_device(iface)); + } + + if (len != len_chk || !src) { + return -EINVAL; + } + + memcpy(data, src, len); + + return 0; +} + +NET_MGMT_REGISTER_REQUEST_HANDLER(NET_REQUEST_STATS_GET_PPP, + ppp_stats_get); + +#endif /* CONFIG_NET_STATISTICS_USER_API */ diff --git a/subsys/net/l2/ppp/ppp_stats.h b/subsys/net/l2/ppp/ppp_stats.h new file mode 100644 index 00000000000..fd7f333e69f --- /dev/null +++ b/subsys/net/l2/ppp/ppp_stats.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2019 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef __PPP_STATS_H__ +#define __PPP_STATS_H__ + +#if defined(CONFIG_NET_STATISTICS_PPP) + +#include +#include +#include + +static inline void ppp_stats_update_bytes_rx(struct net_if *iface, + u32_t bytes) +{ + const struct ppp_api *api = (const struct ppp_api *) + net_if_get_device(iface)->driver_api; + struct net_stats_ppp *stats; + + if (!api->get_stats) { + return; + } + + stats = api->get_stats(net_if_get_device(iface)); + if (!stats) { + return; + } + + stats->bytes.received += bytes; +} + +static inline void ppp_stats_update_bytes_tx(struct net_if *iface, + u32_t bytes) +{ + const struct ppp_api *api = (const struct ppp_api *) + net_if_get_device(iface)->driver_api; + struct net_stats_ppp *stats; + + if (!api->get_stats) { + return; + } + + stats = api->get_stats(net_if_get_device(iface)); + if (!stats) { + return; + } + + stats->bytes.sent += bytes; +} + +static inline void ppp_stats_update_pkts_rx(struct net_if *iface) +{ + const struct ppp_api *api = (const struct ppp_api *) + net_if_get_device(iface)->driver_api; + struct net_stats_ppp *stats; + + if (!api->get_stats) { + return; + } + + stats = api->get_stats(net_if_get_device(iface)); + if (!stats) { + return; + } + + stats->pkts.rx++; +} + +static inline void ppp_stats_update_pkts_tx(struct net_if *iface) +{ + const struct ppp_api *api = (const struct ppp_api *) + net_if_get_device(iface)->driver_api; + struct net_stats_ppp *stats; + + if (!api->get_stats) { + return; + } + + stats = api->get_stats(net_if_get_device(iface)); + if (!stats) { + return; + } + + stats->pkts.tx++; +} + +static inline void ppp_stats_update_drop_rx(struct net_if *iface) +{ + const struct ppp_api *api = ((const struct ppp_api *) + net_if_get_device(iface)->driver_api); + struct net_stats_ppp *stats; + + if (!api->get_stats) { + return; + } + + stats = api->get_stats(net_if_get_device(iface)); + if (!stats) { + return; + } + + stats->drop++; +} + +static inline void ppp_stats_update_fcs_error_rx(struct net_if *iface) +{ + const struct ppp_api *api = ((const struct ppp_api *) + net_if_get_device(iface)->driver_api); + struct net_stats_ppp *stats; + + if (!api->get_stats) { + return; + } + + stats = api->get_stats(net_if_get_device(iface)); + if (!stats) { + return; + } + + stats->chkerr++; +} + +#else /* CONFIG_NET_STATISTICS_PPP */ + +#define ppp_stats_update_bytes_rx(iface, bytes) +#define ppp_stats_update_bytes_tx(iface, bytes) +#define ppp_stats_update_pkts_rx(iface) +#define ppp_stats_update_pkts_tx(iface) +#define ppp_stats_update_drop_rx(iface) +#define ppp_stats_update_fcs_error_rx(iface) + +#endif /* CONFIG_NET_STATISTICS_PPP */ + +#endif /* __PPP_STATS_H__ */