diff --git a/include/zephyr/usb_c/usbc.h b/include/zephyr/usb_c/usbc.h new file mode 100644 index 00000000000..17a1e2d36e5 --- /dev/null +++ b/include/zephyr/usb_c/usbc.h @@ -0,0 +1,290 @@ +/* + * Copyright 2022 The Chromium OS Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief USB-C Device APIs + * + * This file contains the USB-C Device APIs. + */ + +#ifndef ZEPHYR_INCLUDE_USBC_H_ +#define ZEPHYR_INCLUDE_USBC_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief USB-C Device APIs + * @defgroup _usbc_device_api USB-C Device API + * @{ + */ + +/** + * @brief This Request Data Object (RDO) value can be returned from the + * policy_cb_get_rdo if 5V@100mA with the following + * options are sufficient for the Sink to operate. + * + * The RDO is configured as follows: + * Maximum operating current 100mA + * Operating current 100mA + * Unchunked Extended Messages Not Supported + * No USB Suspend + * Not USB Communications Capable + * No capability mismatch + * Don't giveback + * Object position 1 (5V PDO) + */ +#define FIXED_5V_100MA_RDO 0x1100280a + +/** + * @brief Device Policy Manager requests + */ +enum usbc_policy_request_t { + /** No request */ + REQUEST_NOP, + /** Request Type-C layer to transition to Disabled State */ + REQUEST_TC_DISABLED, + /** Request Type-C layer to transition to Error Recovery State */ + REQUEST_TC_ERROR_RECOVERY, + /** End of Type-C requests */ + REQUEST_TC_END, + + /** Request Policy Engine layer to perform a Data Role Swap */ + REQUEST_PE_DR_SWAP, + /** Request Policy Engine layer to send a hard reset */ + REQUEST_PE_HARD_RESET_SEND, + /** Request Policy Engine layer to send a soft reset */ + REQUEST_PE_SOFT_RESET_SEND, + /** + * Request Policy Engine layer to get Source Capabilities from + * port partner + */ + REQUEST_PE_GET_SRC_CAPS, +}; + +/** + * @brief Device Policy Manager notifications + */ +enum usbc_policy_notify_t { + /** Power Delivery Accept message was received */ + MSG_ACCEPT_RECEIVED, + /** Power Delivery Reject message was received */ + MSG_REJECTED_RECEIVED, + /** Power Delivery discarded the message being transmited */ + MSG_DISCARDED, + /** Power Delivery Not Supported message was received */ + MSG_NOT_SUPPORTED_RECEIVED, + /** Data Role has been set to Upstream Facing Port (UFP) */ + DATA_ROLE_IS_UFP, + /** Data Role has been set to Downstream Facing Port (DFP) */ + DATA_ROLE_IS_DFP, + /** A PD Explicit Contract is in place */ + PD_CONNECTED, + /** No PD Explicit Contract is in place */ + NOT_PD_CONNECTED, + /** Transition the Power Supply */ + TRANSITION_PS, + /** Port partner is not responsive */ + PORT_PARTNER_NOT_RESPONSIVE, + /** Protocol Error occurred */ + PROTOCOL_ERROR, + /** Transition the Sink to default */ + SNK_TRANSITION_TO_DEFAULT, + /** Hard Reset Received */ + HARD_RESET_RECEIVED, + /** Sink SubPower state at 0V */ + POWER_CHANGE_0A0, + /** Sink SubPower state a 5V / 500mA */ + POWER_CHANGE_DEF, + /** Sink SubPower state a 5V / 1.5A */ + POWER_CHANGE_1A5, + /** Sink SubPower state a 5V / 3A */ + POWER_CHANGE_3A0, +}; + +/** + * @brief Device Policy Manager checks + */ +enum usbc_policy_check_t { + /** Check if Power Role Swap is allowed */ + CHECK_POWER_ROLE_SWAP, + /** Check if Data Role Swap to DFP is allowed */ + CHECK_DATA_ROLE_SWAP_TO_DFP, + /** Check if Data Role Swap to UFP is allowed */ + CHECK_DATA_ROLE_SWAP_TO_UFP, + /** Check if Sink is at default level */ + CHECK_SNK_AT_DEFAULT_LEVEL, +}; + +/** + * @brief Device Policy Manager Wait message notifications + */ +enum usbc_policy_wait_t { + /** The port partner is unable to meet the sink request at this time */ + WAIT_SINK_REQUEST, + /** The port partner is unable to do a Power Role Swap at this time */ + WAIT_POWER_ROLE_SWAP, + /** The port partner is unable to do a Data Role Swap at this time */ + WAIT_DATA_ROLE_SWAP, + /** The port partner is unable to do a VCONN Swap at this time */ + WAIT_VCONN_SWAP, +}; + +/** @cond INTERNAL_HIDDEN */ +typedef int (*policy_cb_get_snk_cap_t)(const struct device *dev, + uint32_t **pdos, + int *num_pdos); +typedef void (*policy_cb_set_src_cap_t)(const struct device *dev, + const uint32_t *pdos, + const int num_pdos); +typedef bool (*policy_cb_check_t)(const struct device *dev, + const enum usbc_policy_check_t policy_check); +typedef bool (*policy_cb_wait_notify_t)(const struct device *dev, + const enum usbc_policy_wait_t wait_notify); +typedef void (*policy_cb_notify_t)(const struct device *dev, + const enum usbc_policy_notify_t policy_notify); +typedef uint32_t (*policy_cb_get_rdo_t)(const struct device *dev); +typedef bool (*policy_cb_is_snk_at_default_t)(const struct device *dev); +/** @endcond */ + +/** + * @brief Start the USB-C Subsystem + * + * @param dev Runtime device structure + * + * @retval 0 on success + */ +int usbc_start(const struct device *dev); + +/** + * @brief Suspend the USB-C Subsystem + * + * @param dev Runtime device structure + * + * @retval 0 on success + */ +int usbc_suspend(const struct device *dev); + +/** + * @brief Make a request of the USB-C Subsystem + * + * @param dev Runtime device structure + * @param req request + * + * @retval 0 on success + */ +int usbc_request(const struct device *dev, + const enum usbc_policy_request_t req); + +/** + * @brief Set pointer to Device Policy Manager (DPM) data + * + * @param dev Runtime device structure + * @param dpm_data pointer to dpm data + */ +void usbc_set_dpm_data(const struct device *dev, + void *dpm_data); + +/** + * @brief Get pointer to Device Policy Manager (DPM) data + * + * @param dev Runtime device structure + * + * @retval pointer to dpm data that was set with usbc_set_dpm_data + * @retval NULL if dpm data was not set + */ +void *usbc_get_dpm_data(const struct device *dev); + +/** + * @brief Set the callback used to set VCONN control + * + * @param dev Runtime device structure + * @param cb VCONN control callback + */ +void usbc_set_vconn_control_cb(const struct device *dev, + const tcpc_vconn_control_cb_t cb); + +/** + * @brief Set the callback used to check a policy + * + * @param dev Runtime device structure + * @param cb callback + */ +void usbc_set_policy_cb_check(const struct device *dev, + const policy_cb_check_t cb); + +/** + * @brief Set the callback used to notify Device Policy Manager of a + * policy change + * + * @param dev Runtime device structure + * @param cb callback + */ +void usbc_set_policy_cb_notify(const struct device *dev, + const policy_cb_notify_t cb); + +/** + * @brief Set the callback used to notify Device Policy Manager of WAIT + * message reception + * + * @param dev Runtime device structure + * @param cb callback + */ +void usbc_set_policy_cb_wait_notify(const struct device *dev, + const policy_cb_wait_notify_t cb); + +/** + * @brief Set the callback used to get the Sink Capabilities + * + * @param dev Runtime device structure + * @param cb callback + */ +void usbc_set_policy_cb_get_snk_cap(const struct device *dev, + const policy_cb_get_snk_cap_t cb); + +/** + * @brief Set the callback used to store the received Port Partner's + * Source Capabilities + * + * @param dev Runtime device structure + * @param cb callback + */ +void usbc_set_policy_cb_set_src_cap(const struct device *dev, + const policy_cb_set_src_cap_t cb); + +/** + * @brief Set the callback used to get the Request Data Object (RDO) + * + * @param dev Runtime device structure + * @param cb callback + */ +void usbc_set_policy_cb_get_rdo(const struct device *dev, + const policy_cb_get_rdo_t cb); + +/** + * @brief Set the callback used to check if the sink power supply is at + * the default level + * + * @param dev Runtime device structure + * @param cb callback + */ +void usbc_set_policy_cb_is_snk_at_default(const struct device *dev, + const policy_cb_is_snk_at_default_t cb); +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* ZEPHYR_INCLUDE_USBC_H_ */ diff --git a/subsys/Kconfig b/subsys/Kconfig index 3f6830fa434..423a5cf42a3 100644 --- a/subsys/Kconfig +++ b/subsys/Kconfig @@ -48,6 +48,8 @@ source "subsys/stats/Kconfig" source "subsys/usb/device/Kconfig" +source "subsys/usb/usb_c/Kconfig" + source "subsys/sd/Kconfig" source "subsys/dfu/Kconfig" diff --git a/subsys/usb/CMakeLists.txt b/subsys/usb/CMakeLists.txt index a8cf1a69c12..f03282ccce6 100644 --- a/subsys/usb/CMakeLists.txt +++ b/subsys/usb/CMakeLists.txt @@ -2,3 +2,4 @@ # SPDX-License-Identifier: Apache-2.0 add_subdirectory_ifdef(CONFIG_USB_DEVICE_STACK device) +add_subdirectory_ifdef(CONFIG_USBC_STACK usb_c) diff --git a/subsys/usb/usb_c/CMakeLists.txt b/subsys/usb/usb_c/CMakeLists.txt new file mode 100644 index 00000000000..94df8349389 --- /dev/null +++ b/subsys/usb/usb_c/CMakeLists.txt @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 + +zephyr_library() + +zephyr_library_sources_ifdef(CONFIG_USBC_STACK usbc_timer.c usbc_stack.c usbc_tc.c usbc_prl.c usbc_pe.c) diff --git a/subsys/usb/usb_c/Kconfig b/subsys/usb/usb_c/Kconfig new file mode 100644 index 00000000000..778b0f2be34 --- /dev/null +++ b/subsys/usb/usb_c/Kconfig @@ -0,0 +1,40 @@ +# USB-C stack configuration options + +# Copyright (c) 2022 The Chromium OS Authors +# SPDX-License-Identifier: Apache-2.0 + +menuconfig USBC_STACK + bool "USB-C Stack Support" + select SMF + select SMF_ANCESTOR_SUPPORT + select USBC_TCPC_DRIVER + select USBC_VBUS_DRIVER + help + Enable the USB-C Stack. Note that each USB-C port gets its own thread. + +if USBC_STACK + +config USBC_THREAD_PRIORITY + int "USB-C thread priority" + default 0 + help + Set thread priority of the USB-C + +config USBC_STACK_SIZE + int "USB-C thread stack size" + default 1024 + help + Stack size of thread created for each instance. + +config USBC_STATE_MACHINE_CYCLE_TIME + int "USB-C state machine cycle time in milliseconds" + default 5 + help + The USB-C state machine is run in a loop and the cycle time is the + delay before running the loop again. + +module = USBC_STACK +module-str = usbc stack +source "subsys/logging/Kconfig.template.log_config" + +endif # USBC_STACK diff --git a/subsys/usb/usb_c/usbc_pe.c b/subsys/usb/usb_c/usbc_pe.c new file mode 100644 index 00000000000..dd641a9d674 --- /dev/null +++ b/subsys/usb/usb_c/usbc_pe.c @@ -0,0 +1,1783 @@ +/* + * Copyright (c) 2022 The Chromium OS Authors + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include +LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL); + +#include "usbc_stack.h" + +/** + * @brief The HardResetCounter is used to retry the Hard Reset whenever there + * is no response from the remote device (see Section 6.6.6) + */ +#define N_HARD_RESET_COUNT 2 + +/** + * @brief Policy Engine Layer Flags + */ +enum pe_flags { + /** Accept message received from port partner */ + PE_FLAGS_ACCEPT = 0, + /** + * Protocol Error was determined based on error recovery + * current state + */ + PE_FLAGS_PROTOCOL_ERROR = 1, + /** A message we requested to be sent has been transmitted */ + PE_FLAGS_TX_COMPLETE = 2, + /** A message sent by a port partner has been received */ + PE_FLAGS_MSG_RECEIVED = 3, + /** + * A hard reset has been requested by the DPM but has not been sent, + * not currently used + */ + PE_FLAGS_HARD_RESET_PENDING = 4, + /** An explicit contract is in place with our port partner */ + PE_FLAGS_EXPLICIT_CONTRACT = 5, + /** + * Waiting for Sink Capabailities timed out. Used for retry error + * handling + */ + PE_FLAGS_SNK_WAIT_CAP_TIMEOUT = 6, + /** + * Flag to note current Atomic Message Sequence (AMS) is interruptible. + * If this flag is not set the AMS is non-interruptible. This flag must + * be set in the interruptible's message state entry. + */ + PE_FLAGS_INTERRUPTIBLE_AMS = 7, + /** Flag to trigger sending a Data Role Swap */ + PE_FLAGS_DR_SWAP_TO_DFP = 8, + /** Flag is set when an AMS is initiated by the Device Policy Manager */ + PE_FLAGS_DPM_INITIATED_AMS = 9, + /** Flag to note message was discarded due to incoming message */ + PE_FLAGS_MSG_DISCARDED = 10, + /** Flag to trigger sending a soft reset */ + PE_FLAGS_SEND_SOFT_RESET = 11, + /** + * This flag is set when a Wait message is received in response to a + * Sink REQUEST + */ + PE_FLAGS_WAIT_SINK_REQUEST = 12, + /** + * This flag is set when a Wait message is received in response to a + * Data Role Swap + */ + PE_FLAGS_WAIT_DATA_ROLE_SWAP = 13 +}; + +/** + * @brief Policy Engine Layer States + */ +enum usbc_pe_state { + /** PE_SNK_Startup */ + PE_SNK_STARTUP, + /** PE_SNK_Discovery */ + PE_SNK_DISCOVERY, + /** PE_SNK_Wait_for_Capabilities */ + PE_SNK_WAIT_FOR_CAPABILITIES, + /** PE_SNK_Evaluate_Capability */ + PE_SNK_EVALUATE_CAPABILITY, + /** PE_SNK_Select_Capability */ + PE_SNK_SELECT_CAPABILITY, + /** PE_SNK_Transition_Sink */ + PE_SNK_TRANSITION_SINK, + /** PE_SNK_Ready */ + PE_SNK_READY, + /** PE_SNK_Hard_Reset */ + PE_SNK_HARD_RESET, + /** PE_SNK_Transition_to_default */ + PE_SNK_TRANSITION_TO_DEFAULT, + /** PE_SNK_Give_Sink_Cap */ + PE_SNK_GIVE_SINK_CAP, + /** PE_SNK_Get_Source_Cap */ + PE_SNK_GET_SOURCE_CAP, + /**PE_Send_Soft_Reset */ + PE_SEND_SOFT_RESET, + /** PE_Soft_Reset */ + PE_SOFT_RESET, + /** PE_Send_Not_Supported */ + PE_SEND_NOT_SUPPORTED, + /** PE_DRS_Evaluate_Swap */ + PE_DRS_EVALUATE_SWAP, + /** PE_DRS_Send_Swap */ + PE_DRS_SEND_SWAP, + /** PE_SNK_Chunk_Received */ + PE_SNK_CHUNK_RECEIVED, + + /** PE_Suspend. Not part of the PD specification. */ + PE_SUSPEND, +}; + +static const struct smf_state pe_states[]; +static void pe_set_state(const struct device *dev, + const enum usbc_pe_state state); +static enum usbc_pe_state pe_get_state(const struct device *dev); +static enum usbc_pe_state pe_get_last_state(const struct device *dev); +static void pe_send_soft_reset(const struct device *dev, + const enum pd_packet_type type); +static void policy_notify(const struct device *dev, + const enum usbc_policy_notify_t notify); + +/** + * @brief Initializes the PE state machine and enters the PE_SUSPEND state. + */ +void pe_subsys_init(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + /* Save the port device object so states can access it */ + pe->dev = dev; + + /* Initialize the state machine */ + smf_set_initial(SMF_CTX(pe), &pe_states[PE_SUSPEND]); +} + +/** + * @brief Starts the Policy Engine layer + */ +void pe_start(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + data->pe_enabled = true; +} + +/** + * @brief Suspend the Policy Engine layer + */ +void pe_suspend(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + data->pe_enabled = false; + + /* + * While we are paused, exit all states + * and wait until initialized again. + */ + pe_set_state(dev, PE_SUSPEND); +} + +/** + * @brief Initialize the Policy Engine layer + */ +void pe_init(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + pe->flags = ATOMIC_INIT(0); + + usbc_timer_init(&pe->pd_t_typec_sink_wait_cap, PD_T_TYPEC_SINK_WAIT_CAP_MAX_MS); + usbc_timer_init(&pe->pd_t_sender_response, PD_T_SENDER_RESPONSE_NOM_MS); + usbc_timer_init(&pe->pd_t_ps_transition, PD_T_SPR_PS_TRANSITION_NOM_MS); + usbc_timer_init(&pe->pd_t_chunking_not_supported, PD_T_CHUNKING_NOT_SUPPORTED_NOM_MS); + usbc_timer_init(&pe->pd_t_wait_to_resend, PD_T_SINK_REQUEST_MIN_MS); + + pe->data_role = TC_ROLE_UFP; + pe->hard_reset_counter = 0; + + pe_set_state(dev, PE_SNK_STARTUP); +} + +/** + * @brief Tests if the Policy Engine layer is running + */ +bool pe_is_running(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + return data->pe_sm_state == SM_RUN; +} + +/** + * @brief Run the Policy Engine layer + */ +void pe_run(const struct device *dev, + const int32_t dpm_request) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + switch (data->pe_sm_state) { + case SM_PAUSED: + if (data->pe_enabled == false) { + break; + } + /* fall through */ + case SM_INIT: + pe_init(dev); + data->pe_sm_state = SM_RUN; + /* fall through */ + case SM_RUN: + if (data->pe_enabled == false) { + data->pe_sm_state = SM_PAUSED; + break; + } + + if (prl_is_running(dev) == false) { + break; + } + + /* + * 8.3.3.3.8 PE_SNK_Hard_Reset State + * The Policy Engine Shall transition to the PE_SNK_Hard_Reset + * state from any state when: + * - Hard Reset request from Device Policy Manager + */ + if (dpm_request == REQUEST_PE_HARD_RESET_SEND) { + pe_set_state(dev, PE_SNK_HARD_RESET); + } else { + /* Pass the DPM request along to the state machine */ + pe->dpm_request = dpm_request; + } + + /* Run state machine */ + smf_run_state(SMF_CTX(pe)); + break; + } +} + +/** + * @brief Gets the current data role + */ +enum tc_data_role pe_get_data_role(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + return data->pe->data_role; +} + +/** + * @brief Gets the current power role + */ +enum tc_power_role pe_get_power_role(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + return data->pe->power_role; +} + +/** + * @brief Gets the current cable plug role + */ +enum tc_cable_plug pe_get_cable_plug(const struct device *dev) +{ + return PD_PLUG_FROM_DFP_UFP; +} + +/** + * @brief Informs the Policy Engine that a soft reset was received. + */ +void pe_got_soft_reset(const struct device *dev) +{ + /* + * The PE_SRC_Soft_Reset state Shall be entered from any state when a + * Soft_Reset Message is received from the Protocol Layer. + */ + pe_set_state(dev, PE_SOFT_RESET); +} + +/** + * @brief Informs the Policy Engine that a message was successfully sent + */ +void pe_message_sent(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + atomic_set_bit(&pe->flags, PE_FLAGS_TX_COMPLETE); +} + +/** + * @brief Informs the Policy Engine of an error. + */ +void pe_report_error(const struct device *dev, + const enum pe_error e, + const enum pd_packet_type type) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + /* + * Generate Hard Reset if Protocol Error occurred + * while in PE_Send_Soft_Reset state. + */ + if (pe_get_state(dev) == PE_SEND_SOFT_RESET) { + pe_set_state(dev, PE_SNK_HARD_RESET); + return; + } + + /* + * See section 8.3.3.4.1.1 PE_SRC_Send_Soft_Reset State: + * + * The PE_Send_Soft_Reset state shall be entered from + * any state when + * * A Protocol Error is detected by Protocol Layer during a + * Non-Interruptible AMS or + * * A message has not been sent after retries or + * * When not in an explicit contract and + * * Protocol Errors occurred on SOP during an Interruptible AMS or + * * Protocol Errors occurred on SOP during any AMS where the first + * Message in the sequence has not yet been sent i.e. an unexpected + * Message is received instead of the expected GoodCRC Message + * response. + */ + /* All error types besides transmit errors are Protocol Errors. */ + if ((e != ERR_XMIT && + atomic_test_bit(&pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS) == false) || + e == ERR_XMIT || + (atomic_test_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT) == false && + type == PD_PACKET_SOP)) { + policy_notify(dev, PROTOCOL_ERROR); + pe_send_soft_reset(dev, type); + } + /* + * Transition to PE_Snk_Ready by a Protocol + * Error during an Interruptible AMS. + */ + else { + pe_set_state(dev, PE_SNK_READY); + } +} + +/** + * @brief Informs the Policy Engine of a discard. + */ +void pe_report_discard(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + /* + * Clear local AMS indicator as our AMS message was discarded, and flag + * the discard for the PE + */ + atomic_clear_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS); + atomic_set_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED); +} + +/** + * @brief Called by the Protocol Layer to informs the Policy Engine + * that a message has been received. + */ +void pe_message_received(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + atomic_set_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED); +} + +/** + * @brief Informs the Policy Engine that a hard reset was received. + */ +void pe_got_hard_reset(const struct device *dev) +{ + pe_set_state(dev, PE_SNK_TRANSITION_TO_DEFAULT); +} + +/** + * @brief Informs the Policy Engine that a hard reset was sent. + */ +void pe_hard_reset_sent(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + atomic_clear_bit(&pe->flags, PE_FLAGS_HARD_RESET_PENDING); +} + +/** + * @brief Indicates if an explicit contract is in place + */ +bool pe_is_explicit_contract(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + return atomic_test_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT); +} + +/** + * @brief Return true if the PE is is within an atomic messaging sequence + * that it initiated with a SOP* port partner. + */ +bool pe_dpm_initiated_ams(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + return atomic_test_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS); +} + +/** Private Policy Engine Layer API below */ + +/** + * @brief Sets a Policy Engine state + */ +static void pe_set_state(const struct device *dev, + const enum usbc_pe_state state) +{ + struct usbc_port_data *data = dev->data; + + smf_set_state(SMF_CTX(data->pe), &pe_states[state]); +} + +/** + * @brief Get the Policy Engine's current state + */ +static enum usbc_pe_state pe_get_state(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + return data->pe->ctx.current - &pe_states[0]; +} + +/** + * @brief Get the Policy Engine's previous state + */ +static enum usbc_pe_state pe_get_last_state(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + return data->pe->ctx.previous - &pe_states[0]; +} + +/** + * @brief Send a soft reset message + */ +static void pe_send_soft_reset(const struct device *dev, + const enum pd_packet_type type) +{ + struct usbc_port_data *data = dev->data; + + data->pe->soft_reset_sop = type; + pe_set_state(dev, PE_SEND_SOFT_RESET); +} + +/** + * @brief Send a Power Delivery Data Message + */ +static inline void send_data_msg(const struct device *dev, + const enum pd_packet_type type, + const enum pd_data_msg_type msg) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + /* Clear any previous TX status before sending a new message */ + atomic_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE); + prl_send_data_msg(dev, type, msg); +} + +/** + * @brief Send a Power Delivery Control Message + */ +static inline void send_ctrl_msg(const struct device *dev, + const enum pd_packet_type type, + const enum pd_ctrl_msg_type msg) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + /* Clear any previous TX status before sending a new message */ + atomic_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE); + prl_send_ctrl_msg(dev, type, msg); +} + +/** + * @brief Request desired voltage from source. + */ +static void pe_send_request_msg(const struct device *dev, + const uint32_t rdo) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + struct pd_msg *msg = &prl_tx->emsg; + uint8_t rdo_bytes[4]; + + msg->len = sizeof(rdo); + sys_put_le32(rdo, rdo_bytes); + memcpy(msg->data, rdo_bytes, msg->len); + send_data_msg(dev, PD_PACKET_SOP, PD_DATA_REQUEST); +} + +/** + * @brief Transitions state after receiving an extended message. + */ +static void extended_message_not_supported(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + uint32_t *payload = (uint32_t *)prl_rx->emsg.data; + union pd_ext_header ext_header; + + ext_header.raw_value = *payload; + + if (ext_header.chunked && + ext_header.data_size > PD_MAX_EXTENDED_MSG_CHUNK_LEN) { + pe_set_state(dev, PE_SNK_CHUNK_RECEIVED); + } else { + pe_set_state(dev, PE_SEND_NOT_SUPPORTED); + } +} + +/** + * @brief Handle common DPM requests + * + * @retval True if the request was handled, else False + */ +static bool common_dpm_requests(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + if (pe->dpm_request > REQUEST_TC_END) { + atomic_set_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS); + + if (pe->dpm_request == REQUEST_PE_DR_SWAP) { + pe_set_state(dev, PE_DRS_SEND_SWAP); + } else if (pe->dpm_request == REQUEST_PE_SOFT_RESET_SEND) { + pe_set_state(dev, PE_SEND_SOFT_RESET); + } + return true; + } + + return false; +} + +/** + * @brief Handle sink-specific DPM requests + */ +static bool sink_dpm_requests(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct policy_engine *pe = data->pe; + + if (pe->dpm_request > REQUEST_TC_END) { + atomic_set_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS); + + if (pe->dpm_request == REQUEST_PE_GET_SRC_CAPS) { + pe_set_state(dev, PE_SNK_GET_SOURCE_CAP); + } + return true; + } + + return false; +} + +/** + * @brief Check if a specific control message was received + */ +static bool received_control_message(const struct device *dev, + const union pd_header header, + const enum pd_ctrl_msg_type mt) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + + if (prl_rx->emsg.len == 0 && + header.message_type == mt && + header.extended == 0) { + return true; + } + + return false; +} + +/** + * @brief Check if a specific data message was received + */ +static bool received_data_message(const struct device *dev, + const union pd_header header, + const enum pd_data_msg_type mt) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + + if (prl_rx->emsg.len > 0 && + header.message_type == mt && + header.extended == 0) { + return true; + } + + return false; +} + +/** + * @brief Check a DPM policy + */ +static bool policy_check(const struct device *dev, + const enum usbc_policy_check_t pc) +{ + struct usbc_port_data *data = dev->data; + + if (data->policy_cb_check) { + return data->policy_cb_check(dev, pc); + } else { + return false; + } +} + +/** + * @brief Notify the DPM of a policy change + */ +static void policy_notify(const struct device *dev, + const enum usbc_policy_notify_t notify) +{ + struct usbc_port_data *data = dev->data; + + if (data->policy_cb_notify) { + data->policy_cb_notify(dev, notify); + } +} + +/** + * @brief Notify the DPM of a WAIT message reception + */ +static bool policy_wait_notify(const struct device *dev, + const enum usbc_policy_wait_t notify) +{ + struct usbc_port_data *data = dev->data; + + if (data->policy_cb_wait_notify) { + return data->policy_cb_wait_notify(dev, notify); + } + + return false; +} + +/** + * @brief Send the received source caps to the DPM + */ +static void policy_set_src_cap(const struct device *dev, + const uint32_t *pdos, + const int num_pdos) +{ + struct usbc_port_data *data = dev->data; + + if (data->policy_cb_set_src_cap) { + data->policy_cb_set_src_cap(dev, pdos, num_pdos); + } +} + +/** + * @brief Get a Request Data Object from the DPM + */ +static uint32_t policy_get_request_data_object(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + /* This callback must be implemented */ + __ASSERT(data->policy_cb_get_request_data_object != NULL, + "Callback pointer should not be NULL"); + + return data->policy_cb_get_rdo(dev); +} + +/** + * @brief Check if the sink is a default level + */ +static bool policy_is_snk_at_default(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + if (data->policy_cb_is_snk_at_default) { + return data->policy_cb_is_snk_at_default(dev); + } + + return true; +} + +/** + * @brief Get sink caps from the DPM + */ +static void policy_get_snk_cap(const struct device *dev, + uint32_t **pdos, + int *num_pdos) +{ + struct usbc_port_data *data = dev->data; + + /* This callback must be implemented */ + __ASSERT(data->policy_cb_get_snk_cap != NULL, + "Callback pointer should not be NULL"); + + data->policy_cb_get_snk_cap(dev, pdos, num_pdos); +} + +/** + * @brief PE_SNK_Startup Entry State + */ +static void pe_snk_startup_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + LOG_INF("PE_SNK_Startup"); + + /* Reset the protocol layer */ + prl_reset(dev); + + /* Set power role to Sink */ + pe->power_role = TC_ROLE_SINK; + + /* Invalidate explicit contract */ + atomic_clear_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT); + + policy_notify(dev, NOT_PD_CONNECTED); +} + +/** + * @brief PE_SNK_Startup Run State + */ +static void pe_snk_startup_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + /* + * Once the reset process completes, the Policy Engine Shall + * transition to the PE_SNK_Discovery state + */ + if (prl_is_running(dev)) { + pe_set_state(dev, PE_SNK_DISCOVERY); + } +} + +/** + * @brief PE_SNK_Discovery Entry State + */ +static void pe_snk_discovery_entry(void *obj) +{ + LOG_INF("PE_SNK_Discovery"); +} + +/** + * @brief PE_SNK_Discovery Run State + */ +static void pe_snk_discovery_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + const struct device *vbus = data->vbus; + + /* + * Transition to the PE_SNK_Wait_for_Capabilities state when + * VBUS has been detected + */ + if (usbc_vbus_check_level(vbus, TC_VBUS_PRESENT)) { + pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES); + } +} + +/** + * @brief PE_SNK_Wait_For_Capabilities Entry State + */ +static void pe_snk_wait_for_capabilities_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + + LOG_INF("PE_SNK_Wait_For_Capabilities"); + + /* Initialize and start the SinkWaitCapTimer */ + usbc_timer_start(&pe->pd_t_typec_sink_wait_cap); +} + +/** + * @brief PE_SNK_Wait_For_Capabilities Run State + */ +static void pe_snk_wait_for_capabilities_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + union pd_header header; + + /* + * Transition to the PE_SNK_Evaluate_Capability state when: + * 1) A Source_Capabilities Message is received. + */ + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) { + header = prl_rx->emsg.header; + if ((header.extended == false) && + received_data_message(dev, header, PD_DATA_SOURCE_CAP)) { + pe_set_state(dev, PE_SNK_EVALUATE_CAPABILITY); + return; + } + } + + /* When the SinkWaitCapTimer times out, perform a Hard Reset. */ + if (usbc_timer_expired(&pe->pd_t_typec_sink_wait_cap)) { + atomic_set_bit(&pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT); + pe_set_state(dev, PE_SNK_HARD_RESET); + } +} + +/** + * @brief PE_SNK_Wait_For_Capabilities Exit State + */ +static void pe_snk_wait_for_capabilities_exit(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + + /* Stop SinkWaitCapTimer */ + usbc_timer_stop(&pe->pd_t_typec_sink_wait_cap); +} + +/** + * @brief PE_SNK_Evaluate_Capability Entry State + */ +static void pe_snk_evaluate_capability_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + union pd_header header; + uint32_t *pdos = (uint32_t *)prl_rx->emsg.data; + uint32_t num_pdo_objs = PD_CONVERT_BYTES_TO_PD_HEADER_COUNT(prl_rx->emsg.len); + + LOG_INF("PE_SNK_Evaluate_Capability"); + + header = prl_rx->emsg.header; + + /* Reset Hard Reset counter to zero */ + pe->hard_reset_counter = 0; + + /* Set to highest revision supported by both ports */ + prl_set_rev(dev, PD_PACKET_SOP, MIN(PD_REV30, header.specification_revision)); + + /* Send source caps to Device Policy Manager for saving */ + policy_set_src_cap(dev, pdos, num_pdo_objs); + + /* Transition to PE_Snk_Select_Capability */ + pe_set_state(dev, PE_SNK_SELECT_CAPABILITY); +} + +/** + * @brief PE_SNK_Select_Capability Entry State + */ +static void pe_snk_select_capability_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + uint32_t rdo; + + LOG_INF("PE_SNK_Select_Capability"); + + /* Get selected source cap from Device Policy Manager */ + rdo = policy_get_request_data_object(dev); + + /* Send Request */ + pe_send_request_msg(dev, rdo); + /* Inform Device Policy Manager that we are PD Connected */ + policy_notify(dev, PD_CONNECTED); +} + + +/** + * @brief PE_SNK_Select_Capability Run State + */ +static void pe_snk_select_capability_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + union pd_header header; + + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { + /* + * The sent REQUEST message was discarded. This can be at + * the start of an AMS or in the middle. Handle what to + * do based on where we came from. + * 1) SE_SNK_EVALUATE_CAPABILITY: sends SoftReset + * 2) SE_SNK_READY: goes back to SNK Ready + */ + if (pe_get_last_state(dev) == PE_SNK_EVALUATE_CAPABILITY) { + pe_send_soft_reset(dev, PD_PACKET_SOP); + } else { + pe_set_state(dev, PE_SNK_READY); + } + } else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { + /* Start the SenderResponseTimer */ + usbc_timer_start(&pe->pd_t_sender_response); + } + + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) { + header = prl_rx->emsg.header; + + /* + * Transition to the PE_SNK_Transition_Sink state when: + * 1) An Accept Message is received from the Source. + * + * Transition to the PE_SNK_Wait_for_Capabilities state when: + * 1) There is no Explicit Contract in place and + * 2) A Reject Message is received from the Source or + * 3) A Wait Message is received from the Source. + * + * Transition to the PE_SNK_Ready state when: + * 1) There is an Explicit Contract in place and + * 2) A Reject Message is received from the Source or + * 3) A Wait Message is received from the Source. + * + * Transition to the PE_SNK_Hard_Reset state when: + * 1) A SenderResponseTimer timeout occurs. + */ + /* Only look at control messages */ + if (received_control_message(dev, header, PD_CTRL_ACCEPT)) { + /* explicit contract is now in place */ + atomic_set_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT); + pe_set_state(dev, PE_SNK_TRANSITION_SINK); + } else if (received_control_message(dev, header, PD_CTRL_REJECT) || + received_control_message(dev, header, PD_CTRL_WAIT)) { + /* + * We had a previous explicit contract, so transition to + * PE_SNK_Ready + */ + if (atomic_test_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT)) { + if (received_control_message(dev, header, PD_CTRL_WAIT)) { + /* + * Inform Device Policy Manager that Sink + * Request needs to Wait + */ + if (policy_wait_notify(dev, WAIT_SINK_REQUEST)) { + atomic_set_bit(&pe->flags, + PE_FLAGS_WAIT_SINK_REQUEST); + usbc_timer_start(&pe->pd_t_wait_to_resend); + } + } + + pe_set_state(dev, PE_SNK_READY); + } + /* + * No previous explicit contract, so transition + * to PE_SNK_Wait_For_Capabilities + */ + else { + pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES); + } + } else { + pe_send_soft_reset(dev, prl_rx->emsg.type); + } + return; + } + + /* When the SenderResponseTimer times out, perform a Hard Reset. */ + if (usbc_timer_expired(&pe->pd_t_sender_response)) { + policy_notify(dev, PORT_PARTNER_NOT_RESPONSIVE); + pe_set_state(dev, PE_SNK_HARD_RESET); + } +} + +/** + * @brief PE_SNK_Select_Capability Exit State + */ +static void pe_snk_select_capability_exit(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + + /* Stop SenderResponse Timer */ + usbc_timer_stop(&pe->pd_t_sender_response); +} + +/** + * @brief PE_SNK_Transition_Sink Entry State + */ +static void pe_snk_transition_sink_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + + LOG_INF("PE_SNK_Transition_Sink"); + + /* Initialize and run PSTransitionTimer */ + usbc_timer_start(&pe->pd_t_ps_transition); +} + +/** + * @brief PE_SNK_Transition_Sink Run State + */ +static void pe_snk_transition_sink_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + union pd_header header; + + /* + * Transition to the PE_SNK_Ready state when: + * 1) A PS_RDY Message is received from the Source. + * + * Transition to the PE_SNK_Hard_Reset state when: + * 1) A Protocol Error occurs. + */ + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) { + header = prl_rx->emsg.header; + + /* + * PS_RDY message received + */ + if (received_control_message(dev, header, PD_CTRL_PS_RDY)) { + /* + * Inform the Device Policy Manager to Transition + * the Power Supply + */ + policy_notify(dev, TRANSITION_PS); + pe_set_state(dev, PE_SNK_READY); + } else { + /* Protocol Error */ + pe_set_state(dev, PE_SNK_HARD_RESET); + } + return; + } + + /* + * Timeout will lead to a Hard Reset + */ + if (usbc_timer_expired(&pe->pd_t_ps_transition)) { + pe_set_state(dev, PE_SNK_HARD_RESET); + } +} + +/** + * @brief PE_SNK_Transition_Sink Exit State + */ +static void pe_snk_transition_sink_exit(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + + /* Initialize and run PSTransitionTimer */ + usbc_timer_stop(&pe->pd_t_ps_transition); +} + +/** + * @brief PE_SNK_Ready Entry State + */ +static void pe_snk_ready_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + + LOG_INF("PE_SNK_Ready"); + + /* Clear AMS Flags */ + atomic_clear_bit(&pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS); + atomic_clear_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS); +} + +/** + * @brief PE_SNK_Ready Run State + */ +static void pe_snk_ready_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + + /* + * Handle incoming messages before discovery and DPMs other than hard + * reset + */ + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) { + union pd_header header = prl_rx->emsg.header; + + /* Extended Message Request */ + if (header.extended) { + extended_message_not_supported(dev); + return; + } + /* Data Messages */ + else if (header.number_of_data_objects > 0) { + switch (header.message_type) { + case PD_DATA_SOURCE_CAP: + pe_set_state(dev, PE_SNK_EVALUATE_CAPABILITY); + break; + default: + pe_set_state(dev, PE_SEND_NOT_SUPPORTED); + } + return; + } + /* Control Messages */ + else { + switch (header.message_type) { + case PD_CTRL_GOOD_CRC: + /* Do nothing */ + break; + case PD_CTRL_PING: + /* Do nothing */ + break; + case PD_CTRL_GET_SINK_CAP: + pe_set_state(dev, PE_SNK_GIVE_SINK_CAP); + return; + case PD_CTRL_DR_SWAP: + pe_set_state(dev, PE_DRS_EVALUATE_SWAP); + return; + case PD_CTRL_NOT_SUPPORTED: + /* Do nothing */ + break; + /* + * USB PD 3.0 6.8.1: + * Receiving an unexpected message shall be responded + * to with a soft reset message. + */ + case PD_CTRL_ACCEPT: + case PD_CTRL_REJECT: + case PD_CTRL_WAIT: + case PD_CTRL_PS_RDY: + pe_send_soft_reset(dev, prl_rx->emsg.type); + return; + /* + * Receiving an unknown or unsupported message + * shall be responded to with a not supported message. + */ + default: + pe_set_state(dev, PE_SEND_NOT_SUPPORTED); + return; + } + } + } + + /* + * Check if we are waiting to resend any messages + */ + if (usbc_timer_expired(&pe->pd_t_wait_to_resend)) { + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_WAIT_SINK_REQUEST)) { + pe_set_state(dev, PE_SNK_SELECT_CAPABILITY); + return; + } else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP)) { + pe_set_state(dev, PE_DRS_SEND_SWAP); + return; + } + } + + /* + * Handle Device Policy Manager Requests + */ + common_dpm_requests(dev); + sink_dpm_requests(dev); +} + +/** + * @brief PE_SNK_Hard_Reset Entry State + */ +static void pe_snk_hard_reset_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + + LOG_INF("PE_SNK_Hard_Reset"); + + /* + * Note: If the SinkWaitCapTimer times out and the HardResetCounter is + * greater than nHardResetCount the Sink Shall assume that the + * Source is non-responsive. + */ + if (atomic_test_bit(&pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT) && + pe->hard_reset_counter > N_HARD_RESET_COUNT) { + /* Inform the DPM that the port partner is not responsive */ + policy_notify(dev, PORT_PARTNER_NOT_RESPONSIVE); + + /* Pause the Policy Engine */ + data->pe_enabled = false; + return; + } + + atomic_clear_bit(&pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT); + atomic_clear_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR); + + /* Request the generation of Hard Reset Signaling by the PHY Layer */ + prl_execute_hard_reset(dev); + /* Increment the HardResetCounter */ + pe->hard_reset_counter++; +} + +/** + * @brief PE_SNK_Hard_Reset Run State + */ +static void pe_snk_hard_reset_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + /* + * Transition to the PE_SNK_Transition_to_default state when: + * 1) The Hard Reset is complete. + */ + if (atomic_test_bit(&pe->flags, PE_FLAGS_HARD_RESET_PENDING)) { + return; + } + + pe_set_state(dev, PE_SNK_TRANSITION_TO_DEFAULT); +} + +/** + * @brief PE_SNK_Transition_to_default Entry State + */ +static void pe_snk_transition_to_default_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + LOG_INF("PE_SNK_Transition_to_default"); + + /* Reset flags */ + pe->flags = ATOMIC_INIT(0); + pe->data_role = TC_ROLE_UFP; + + /* + * Indicate to the Device Policy Manager that the Sink Shall + * transition to default + */ + policy_notify(dev, SNK_TRANSITION_TO_DEFAULT); + /* + * Request the Device Policy Manger that the Port Data Role is + * set to UFP + */ + policy_notify(dev, DATA_ROLE_IS_UFP); +} + +/** + * @brief PE_SNK_Transition_to_default Run State + */ +static void pe_snk_transition_to_default_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + /* + * Wait until Device Policy Manager has transitioned the sink to + * default level + */ + if (policy_is_snk_at_default(dev)) { + /* Inform the Protocol Layer that the Hard Reset is complete */ + prl_hard_reset_complete(dev); + pe_set_state(dev, PE_SNK_STARTUP); + } +} + +/** + * @brief PE_SNK_Get_Source_Cap Entry State + */ +static void pe_snk_get_source_cap_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + LOG_INF("PE_SNK_Get_Source_Cap"); + + /* Send a Get_Source_Cap Message */ + send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_GET_SOURCE_CAP); +} + +/** + * @brief PE_SNK_Get_Source_Cap Run State + */ +static void pe_snk_get_source_cap_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + + /* Wait until message is sent or dropped */ + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { + pe_set_state(dev, PE_SNK_READY); + } else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { + pe_send_soft_reset(dev, prl_rx->emsg.type); + } +} + +/** + * @brief PE_SNK_Get_Source_Cap Exit State + */ +static void pe_snk_get_source_cap_exit(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + + usbc_timer_stop(&pe->pd_t_sender_response); +} + +/** + * @brief PE_Send_Soft_Reset Entry State + */ +static void pe_send_soft_reset_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + LOG_INF("PE_SNK_Send_Soft_Reset"); + + /* Reset Protocol Layer */ + prl_reset(dev); + atomic_set_bit(&pe->flags, PE_FLAGS_SEND_SOFT_RESET); +} + +/** + * @brief PE_Send_Soft_Reset Run State + */ +static void pe_send_soft_reset_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + union pd_header header; + + if (prl_is_running(dev) == false) { + return; + } + + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_SEND_SOFT_RESET)) { + /* Send Soft Reset message */ + send_ctrl_msg(dev, pe->soft_reset_sop, PD_CTRL_SOFT_RESET); + return; + } + + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { + /* Inform Device Policy Manager that the message was discarded */ + policy_notify(dev, MSG_DISCARDED); + pe_set_state(dev, PE_SNK_READY); + } else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { + /* Start SenderResponse timer */ + usbc_timer_start(&pe->pd_t_sender_response); + } else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) { + header = prl_rx->emsg.header; + + if (received_control_message(dev, header, PD_CTRL_ACCEPT)) { + pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES); + } + } else if (atomic_test_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR) || + usbc_timer_expired(&pe->pd_t_sender_response)) { + if (atomic_test_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR)) { + atomic_clear_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR); + } else { + policy_notify(dev, PORT_PARTNER_NOT_RESPONSIVE); + } + pe_set_state(dev, PE_SNK_HARD_RESET); + } +} + +/** + * @brief PE_Send_Soft_Reset Exit State + */ +static void pe_send_soft_reset_exit(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + + /* Stop Sender Response Timer */ + usbc_timer_stop(&pe->pd_t_sender_response); +} + +/** + * @brief PE_SNK_Soft_Reset Entry State + */ +static void pe_soft_reset_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + LOG_INF("PE_SNK_Soft_Reset"); + + /* Reset the Protocol Layer */ + prl_reset(dev); + atomic_set_bit(&pe->flags, PE_FLAGS_SEND_SOFT_RESET); +} + +/** + * @brief PE_SNK_Soft_Reset Run State + */ +static void pe_soft_reset_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + if (prl_is_running(dev) == false) { + return; + } + + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_SEND_SOFT_RESET)) { + /* Send Accept message */ + send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT); + return; + } + + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { + pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES); + } else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR)) { + pe_set_state(dev, PE_SNK_HARD_RESET); + } +} + +/** + * @brief PE_Not_Supported Entry State + */ +static void pe_send_not_supported_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + LOG_INF("PE_Not_Supported"); + + /* Request the Protocol Layer to send a Not_Supported Message. */ + if (prl_get_rev(dev, PD_PACKET_SOP) > PD_REV20) { + send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_NOT_SUPPORTED); + } + /* Ignore unsupported messages from PD REV2.0 devices */ + else { + pe_set_state(dev, PE_SNK_READY); + } +} + +/** + * @brief PE_Not_Supported Run State + */ +static void pe_send_not_supported_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + if (atomic_test_bit(&pe->flags, PE_FLAGS_TX_COMPLETE) || + atomic_test_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { + atomic_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE); + atomic_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED); + pe_set_state(dev, PE_SNK_READY); + } +} + +/** + * @brief PE_Chunk_Received Entry State + */ +static void pe_chunk_received_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + + LOG_INF("PE_SNK_Chunk_Received"); + + usbc_timer_start(&pe->pd_t_chunking_not_supported); +} + +/** + * @brief PE_Chunk_Received Run State + */ +static void pe_chunk_received_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + if (usbc_timer_expired(&pe->pd_t_chunking_not_supported)) { + pe_set_state(dev, PE_SEND_NOT_SUPPORTED); + } +} + +/** + * @brief PE_SNK_Give_Sink_Cap Entry state + */ +static void pe_snk_give_sink_cap_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + struct pd_msg *msg = &prl_tx->emsg; + uint32_t *pdos; + uint32_t num_pdos; + + /* Get present sink capabilities from Device Policy Manager */ + policy_get_snk_cap(dev, &pdos, &num_pdos); + + msg->len = PD_CONVERT_PD_HEADER_COUNT_TO_BYTES(num_pdos); + memcpy(msg->data, (uint8_t *)pdos, msg->len); + send_data_msg(dev, PD_PACKET_SOP, PD_DATA_SINK_CAP); +} + +/** + * @brief PE_SNK_Give_Sink_Cap Run state + */ +static void pe_snk_give_sink_cap_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + + /* Wait until message is sent or dropped */ + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { + pe_set_state(dev, PE_SNK_READY); + } else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { + pe_send_soft_reset(dev, prl_rx->emsg.type); + } +} + +/** + * @brief PE_DRS_Evaluate_Swap Entry state + */ +static void pe_drs_evaluate_swap_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + /* Get evaluation of Data Role Swap request from Device Policy Manager */ + if (policy_check(dev, (pe->data_role == TC_ROLE_UFP) ? + CHECK_DATA_ROLE_SWAP_TO_DFP : CHECK_DATA_ROLE_SWAP_TO_UFP)) { + /* + * PE_DRS_DFP_UFP_Accept_Swap and PE_DRS_UFP_DFP_Accept_Swap + * State embedded here + */ + /* Send Accept message */ + send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT); + } else { + /* + * PE_DRS_DFP_UFP_Reject_Swap and PE_DRS_UFP_DFP_Reject_Swap + * State embedded here + */ + /* Send Reject message */ + send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT); + } +} + +/** + * @brief PE_DRS_Evaluate_Swap Run state + */ +static void pe_drs_evaluate_swap_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { + /* Only update data roles if last message sent was Accept */ + if (prl_tx->msg_type == PD_CTRL_ACCEPT) { + /* Update Data Role */ + pe->data_role = (pe->data_role == TC_ROLE_UFP) ? TC_ROLE_DFP : TC_ROLE_UFP; + /* Notify TCPC of role update */ + tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role); + /* Inform Device Policy Manager of Data Role Change */ + policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ? + DATA_ROLE_IS_UFP : DATA_ROLE_IS_DFP); + } + pe_set_state(dev, PE_SNK_READY); + } else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { + /* + * Inform Device Policy Manager that the message was + * discarded + */ + policy_notify(dev, MSG_DISCARDED); + pe_send_soft_reset(dev, prl_rx->emsg.type); + } +} + +/** + * @brief PE_DRS_Send_Swap Entry state + */ +static void pe_drs_send_swap_entry(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + + /* Send Swap DR message */ + send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_DR_SWAP); +} + +/** + * @brief PE_DRS_Send_Swap Run state + */ +static void pe_drs_send_swap_run(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + const struct device *dev = pe->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + union pd_header header; + + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { + /* Start Sender Response Timer */ + usbc_timer_start(&pe->pd_t_sender_response); + } + + if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) { + header = prl_rx->emsg.header; + if (received_control_message(dev, header, PD_CTRL_REJECT)) { + /* + * Inform Device Policy Manager that Data Role Swap + * was Rejected + */ + policy_notify(dev, MSG_REJECTED_RECEIVED); + } else if (received_control_message(dev, header, PD_CTRL_WAIT)) { + /* + * Inform Device Policy Manager that Data Role Swap + * needs to Wait + */ + if (policy_wait_notify(dev, WAIT_DATA_ROLE_SWAP)) { + atomic_set_bit(&pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP); + usbc_timer_start(&pe->pd_t_wait_to_resend); + } + } else if (received_control_message(dev, header, PD_CTRL_ACCEPT)) { + /* Update Data Role */ + pe->data_role = (pe->data_role == TC_ROLE_UFP) ? TC_ROLE_DFP : TC_ROLE_UFP; + /* Notify TCPC of role update */ + tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role); + /* Inform Device Policy Manager of Data Role Change */ + policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ? + DATA_ROLE_IS_UFP : DATA_ROLE_IS_DFP); + } else { + /* Protocol Error */ + policy_notify(dev, PROTOCOL_ERROR); + pe_send_soft_reset(dev, PD_PACKET_SOP); + return; + } + pe_set_state(dev, PE_SNK_READY); + return; + } else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { + /* + * Inform Device Policy Manager that the message + * was discarded + */ + policy_notify(dev, MSG_DISCARDED); + pe_set_state(dev, PE_SNK_READY); + return; + } + + if (usbc_timer_expired(&pe->pd_t_sender_response)) { + /* Protocol Error */ + policy_notify(dev, PROTOCOL_ERROR); + pe_send_soft_reset(dev, PD_PACKET_SOP); + } +} + +/** + * @brief PE_Send_Not_Supported Exit state + */ +static void pe_drs_send_swap_exit(void *obj) +{ + struct policy_engine *pe = (struct policy_engine *)obj; + + /* Stop Sender Response Timer */ + usbc_timer_stop(&pe->pd_t_sender_response); +} + +static void pe_suspend_entry(void *obj) +{ + LOG_INF("PE_SUSPEND"); +} + +static void pe_suspend_run(void *obj) +{ + /* DO NOTHING */ +} + +/** + * @brief Policy engine State table + */ +static const struct smf_state pe_states[] = { + [PE_SNK_STARTUP] = SMF_CREATE_STATE( + pe_snk_startup_entry, + pe_snk_startup_run, + NULL, + NULL), + [PE_SNK_DISCOVERY] = SMF_CREATE_STATE( + pe_snk_discovery_entry, + pe_snk_discovery_run, + NULL, + NULL), + [PE_SNK_WAIT_FOR_CAPABILITIES] = SMF_CREATE_STATE( + pe_snk_wait_for_capabilities_entry, + pe_snk_wait_for_capabilities_run, + pe_snk_wait_for_capabilities_exit, + NULL), + [PE_SNK_EVALUATE_CAPABILITY] = SMF_CREATE_STATE( + pe_snk_evaluate_capability_entry, + NULL, + NULL, + NULL), + [PE_SNK_SELECT_CAPABILITY] = SMF_CREATE_STATE( + pe_snk_select_capability_entry, + pe_snk_select_capability_run, + pe_snk_select_capability_exit, + NULL), + [PE_SNK_READY] = SMF_CREATE_STATE( + pe_snk_ready_entry, + pe_snk_ready_run, + NULL, + NULL), + [PE_SNK_HARD_RESET] = SMF_CREATE_STATE( + pe_snk_hard_reset_entry, + pe_snk_hard_reset_run, + NULL, + NULL), + [PE_SNK_TRANSITION_TO_DEFAULT] = SMF_CREATE_STATE( + pe_snk_transition_to_default_entry, + pe_snk_transition_to_default_run, + NULL, + NULL), + [PE_SNK_GIVE_SINK_CAP] = SMF_CREATE_STATE( + pe_snk_give_sink_cap_entry, + pe_snk_give_sink_cap_run, + NULL, + NULL), + [PE_SNK_GET_SOURCE_CAP] = SMF_CREATE_STATE( + pe_snk_get_source_cap_entry, + pe_snk_get_source_cap_run, + pe_snk_get_source_cap_exit, + NULL), + [PE_SNK_TRANSITION_SINK] = SMF_CREATE_STATE( + pe_snk_transition_sink_entry, + pe_snk_transition_sink_run, + pe_snk_transition_sink_exit, + NULL), + [PE_SEND_SOFT_RESET] = SMF_CREATE_STATE( + pe_send_soft_reset_entry, + pe_send_soft_reset_run, + pe_send_soft_reset_exit, + NULL), + [PE_SOFT_RESET] = SMF_CREATE_STATE( + pe_soft_reset_entry, + pe_soft_reset_run, + NULL, + NULL), + [PE_SEND_NOT_SUPPORTED] = SMF_CREATE_STATE( + pe_send_not_supported_entry, + pe_send_not_supported_run, + NULL, + NULL), + [PE_DRS_EVALUATE_SWAP] = SMF_CREATE_STATE( + pe_drs_evaluate_swap_entry, + pe_drs_evaluate_swap_run, + NULL, + NULL), + [PE_DRS_SEND_SWAP] = SMF_CREATE_STATE( + pe_drs_send_swap_entry, + pe_drs_send_swap_run, + pe_drs_send_swap_exit, + NULL), + [PE_SNK_GET_SOURCE_CAP] = SMF_CREATE_STATE( + pe_snk_get_source_cap_entry, + pe_snk_get_source_cap_run, + pe_snk_get_source_cap_exit, + NULL), + [PE_SNK_CHUNK_RECEIVED] = SMF_CREATE_STATE( + pe_chunk_received_entry, + pe_chunk_received_run, + NULL, + NULL), + [PE_SUSPEND] = SMF_CREATE_STATE( + pe_suspend_entry, + pe_suspend_run, + NULL, + NULL), +}; diff --git a/subsys/usb/usb_c/usbc_pe.h b/subsys/usb/usb_c/usbc_pe.h new file mode 100644 index 00000000000..f86df6d1e04 --- /dev/null +++ b/subsys/usb/usb_c/usbc_pe.h @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2022 The Chromium OS Authors + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_SUBSYS_USBC_PE_H_ +#define ZEPHYR_SUBSYS_USBC_PE_H_ + +#include +#include +#include +#include +#include +#include "usbc_timer.h" + +/** + * @brief Policy Engine Errors + */ +enum pe_error { + /** Transmit error */ + ERR_XMIT, +}; + +/** + * @brief Policy Engine State Machine Object + */ +struct policy_engine { + /** state machine context */ + struct smf_ctx ctx; + /** Port device */ + const struct device *dev; + /** state machine flags */ + atomic_t flags; + /** current port power role (SOURCE or SINK) */ + enum tc_power_role power_role; + /** current port data role (DFP or UFP) */ + enum tc_data_role data_role; + /** port address where soft resets are sent */ + enum pd_packet_type soft_reset_sop; + /** DPM request */ + enum usbc_policy_request_t dpm_request; + + /* Counters */ + + /** + * This counter is used to retry the Hard Reset whenever there is no + * response from the remote device. + */ + uint32_t hard_reset_counter; + + /* Timers */ + + /** tTypeCSinkWaitCap timer */ + struct usbc_timer_t pd_t_typec_sink_wait_cap; + /** tSenderResponse timer */ + struct usbc_timer_t pd_t_sender_response; + /** tPSTransition timer */ + struct usbc_timer_t pd_t_ps_transition; + /** tSinkRequest timer */ + struct usbc_timer_t pd_t_sink_request; + /** tChunkingNotSupported timer */ + struct usbc_timer_t pd_t_chunking_not_supported; + /** Time to wait before resending message after WAIT reception */ + struct usbc_timer_t pd_t_wait_to_resend; +}; + +/** + * @brief This function must only be called in the subsystem init function. + * + * @param dev Pointer to the device structure for the driver instance. + */ +void pe_subsys_init(const struct device *dev); + +/** + * @brief Start the Policy Engine Layer state machine. This is only called + * from the Type-C state machine. + * + * @param dev Pointer to the device structure for the driver instance + */ +void pe_start(const struct device *dev); + +/** + * @brief Suspend the Policy Engine Layer state machine. This is only called + * from the Type-C state machine. + * + * @param dev Pointer to the device structure for the driver instance + */ +void pe_suspend(const struct device *dev); + +/** + * @brief Run the Policy Engine Layer state machine. This is called from the + * subsystems port stack thread + * + * @param dev Pointer to the device structure for the driver instance + * @param dpm_request Device Policy Manager request + */ +void pe_run(const struct device *dev, + const int32_t dpm_request); + +/** + * @brief Query if the Policy Engine is running + * + * @param dev Pointer to the device structure for the driver instance + * + * @retval TRUE if the Policy Engine is running + * @retval FALSE if the Policy Engine is not running + */ +bool pe_is_running(const struct device *dev); + +/** + * @brief Informs the Policy Engine that a message was successfully sent + * + * @param dev Pointer to the device structure for the driver instance + */ +void pe_message_sent(const struct device *dev); + +/** + * @brief Informs the Policy Engine of an error. + * + * @param dev Pointer to the device structure for the driver instance + * @param e policy error + * @param type port partner address where error was generated + */ +void pe_report_error(const struct device *dev, + const enum pe_error e, + const enum pd_packet_type type); + +/** + * @brief Informs the Policy Engine that a transmit message was discarded + * because of an incoming message. + * + * @param dev Pointer to the device structure for the driver instance + */ +void pe_report_discard(const struct device *dev); + +/** + * @brief Called by the Protocol Layer to informs the Policy Engine + * that a message has been received. + * + * @param dev Pointer to the device structure for the driver instance + */ +void pe_message_received(const struct device *dev); + +/** + * @brief Informs the Policy Engine that a hard reset was received. + * + * @param dev Pointer to the device structure for the driver instance + */ +void pe_got_hard_reset(const struct device *dev); + +/** + * @brief Informs the Policy Engine that a soft reset was received. + * + * @param dev Pointer to the device structure for the driver instance + */ +void pe_got_soft_reset(const struct device *dev); + +/** + * @brief Informs the Policy Engine that a hard reset was sent. + * + * @param dev Pointer to the device structure for the driver instance + */ +void pe_hard_reset_sent(const struct device *dev); + +/** + * @brief Indicates if an explicit contract is in place + * + * @param dev Pointer to the device structure for the driver instance + * + * @retval true if an explicit contract is in place, else false + */ +bool pe_is_explicit_contract(const struct device *dev); + +/* + * @brief Informs the Policy Engine that it should invalidate the + * explicit contract. + * + * @param dev Pointer to the device structure for the driver instance + */ +void pe_invalidate_explicit_contract(const struct device *dev); + +/** + * @brief Return true if the PE is is within an atomic messaging sequence + * that it initiated with a SOP* port partner. + * + * @note The PRL layer polls this instead of using AMS_START and AMS_END + * notification from the PE that is called out by the spec + * + * @param dev Pointer to the device structure for the driver instance + */ +bool pe_dpm_initiated_ams(const struct device *dev); + +/** + * @brief Get the current data role + * + * @param dev Pointer to the device structure for the driver instance + * + * @retval data role + */ +enum tc_data_role pe_get_data_role(const struct device *dev); + +/** + * @brief Get the current power role + * + * @param dev Pointer to the device structure for the driver instance + * + * @retval power role + */ +enum tc_power_role pe_get_power_role(const struct device *dev); + +/** + * @brief Get cable plug role + * + * @param dev Pointer to the device structure for the driver instance + * + * @retval cable plug role + */ +enum tc_cable_plug pe_get_cable_plug(const struct device *dev); + +#endif /* ZEPHYR_SUBSYS_USBC_PE_H_ */ diff --git a/subsys/usb/usb_c/usbc_prl.c b/subsys/usb/usb_c/usbc_prl.c new file mode 100644 index 00000000000..23c8a97e8ea --- /dev/null +++ b/subsys/usb/usb_c/usbc_prl.c @@ -0,0 +1,1209 @@ +/* + * Copyright (c) 2022 The Chromium OS Authors + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL); + +#include "usbc_stack.h" + +/** + * @file + * @brief USB Power Delivery Protocol Layer (PRL) + * + * The PRL implementation in this file is base on + * Specification Revision 3.1, Version 1.3 + */ + +/** + * @brief Protocol Layer Flags + * + * @note: These flags are used in multiple state machines and could have + * different meanings in each state machine. + */ +enum prl_flags { + /** Flag to note message transmission completed */ + PRL_FLAGS_TX_COMPLETE = 0, + /** Flag to note message was discarded */ + PRL_FLAGS_TX_DISCARDED = 1, + /** Flag to note PRL waited for SINK_OK CC state before transmitting */ + PRL_FLAGS_WAIT_SINK_OK = 2, + /** Flag to note transmission error occurred */ + PRL_FLAGS_TX_ERROR = 3, + /** Flag to note PE triggered a hard reset */ + PRL_FLAGS_PE_HARD_RESET = 4, + /** Flag to note hard reset has completed */ + PRL_FLAGS_HARD_RESET_COMPLETE = 5, + /** Flag to note port partner sent a hard reset */ + PRL_FLAGS_PORT_PARTNER_HARD_RESET = 6, + /** + * Flag to note a message transmission has been requested. It is only + * cleared when the message is sent to the TCPC layer. + */ + PRL_FLAGS_MSG_XMIT = 7, +}; + +/** + * @brief Protocol Layer Transmission States + */ +enum usbc_prl_tx_state_t { + /** PRL_Tx_PHY_Layer_Reset */ + PRL_TX_PHY_LAYER_RESET, + /** PRL_Tx_Wait_for_Message_Request */ + PRL_TX_WAIT_FOR_MESSAGE_REQUEST, + /** PRL_Tx_Layer_Reset_for_Transmit */ + PRL_TX_LAYER_RESET_FOR_TRANSMIT, + /** PRL_Tx_Wait_for_PHY_response */ + PRL_TX_WAIT_FOR_PHY_RESPONSE, + /** PRL_Tx_Snk_Start_of_AMS */ + PRL_TX_SNK_START_AMS, + /** PRL_Tx_Snk_Pending */ + PRL_TX_SNK_PENDING, + /** PRL_Tx_Discard_Message */ + PRL_TX_DISCARD_MESSAGE, + + /** PRL_Tx_Suspend. Not part of the PD specification. */ + PRL_TX_SUSPEND, +}; + +/** + * @brief Protocol Layer Hard Reset States + */ +enum usbc_prl_hr_state_t { + /** PRL_HR_Wait_For_Request */ + PRL_HR_WAIT_FOR_REQUEST, + /** PRL_HR_Reset_Layer */ + PRL_HR_RESET_LAYER, + /** PRL_HR_Wait_For_PHY_Hard_Reset_Complete */ + PRL_HR_WAIT_FOR_PHY_HARD_RESET_COMPLETE, + /** PRL_HR_Wait_For_PE_Hard_Reset_Complete */ + PRL_HR_WAIT_FOR_PE_HARD_RESET_COMPLETE, + + /** PRL_Hr_Suspend. Not part of the PD specification. */ + PRL_HR_SUSPEND, +}; + +static const struct smf_state prl_tx_states[]; +static const struct smf_state prl_hr_states[]; + +static void prl_tx_construct_message(const struct device *dev); +static void prl_rx_wait_for_phy_message(const struct device *dev); +static void prl_hr_set_state(const struct device *dev, + const enum usbc_prl_hr_state_t state); +static void prl_tx_set_state(const struct device *dev, + const enum usbc_prl_tx_state_t state); +static void prl_init(const struct device *dev); +static enum usbc_prl_hr_state_t prl_hr_get_state(const struct device *dev); + +/** + * @brief Initializes the TX an HR state machines and enters the + * PRL_TX_SUSPEND and PRL_TX_SUSPEND states respectively. + */ +void prl_subsys_init(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + struct protocol_hard_reset_t *prl_hr = data->prl_hr; + + /* Save the port device objects so states can access it */ + prl_tx->dev = dev; + prl_hr->dev = dev; + + /* Initialize the state machines */ + smf_set_initial(SMF_CTX(prl_hr), &prl_hr_states[PRL_HR_SUSPEND]); + smf_set_initial(SMF_CTX(prl_tx), &prl_tx_states[PRL_TX_SUSPEND]); +} + +/** + * @brief Test if the Protocol Layer State Machines are running + * + * @retval TRUE if the state machines are running + * @retval FALSE if the state machines are paused + */ +bool prl_is_running(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + return data->prl_sm_state == SM_RUN; +} + +/** + * @brief Directs the Protocol Layer to perform a hard reset. This function + * is called from the Policy Engine. + */ +void prl_execute_hard_reset(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct protocol_hard_reset_t *prl_hr = data->prl_hr; + + /* Only allow async. function calls when state machine is running */ + if (prl_is_running(dev) == false) { + return; + } + + atomic_set_bit(&prl_hr->flags, PRL_FLAGS_PE_HARD_RESET); + prl_hr_set_state(dev, PRL_HR_RESET_LAYER); +} + +/** + * @brief Instructs the Protocol Layer that a hard reset is complete. + * This function is called from the Policy Engine. + */ +void prl_hard_reset_complete(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct protocol_hard_reset_t *prl_hr = data->prl_hr; + + atomic_set_bit(&prl_hr->flags, PRL_FLAGS_HARD_RESET_COMPLETE); +} + +/** + * @brief Directs the Protocol Layer to construct and transmit a Power Delivery + * Control message. + */ +void prl_send_ctrl_msg(const struct device *dev, + const enum pd_packet_type type, + const enum pd_ctrl_msg_type msg) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + + /* set packet type */ + prl_tx->emsg.type = type; + /* set message type */ + prl_tx->msg_type = msg; + /* control message. set data len to zero */ + prl_tx->emsg.len = 0; + + atomic_set_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT); +} + +/** + * @brief Directs the Protocol Layer to construct and transmit a Power Delivery + * Data message. + * + * @note: Before calling this function prl_tx->emsg.data and prl_tx->emsg.len + * must be set. + */ +void prl_send_data_msg(const struct device *dev, + const enum pd_packet_type type, + const enum pd_data_msg_type msg) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + + /* set packet type */ + prl_tx->emsg.type = type; + /* set message type */ + prl_tx->msg_type = msg; + + atomic_set_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT); +} + +/** + * @brief Directs the Protocol Layer to reset the revision of each packet type + * to its default value. + */ +void prl_set_default_pd_revision(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + /* + * Initialize to highest revision supported. If the port or cable + * partner doesn't support this revision, the Protocol Engine will + * lower this value to the revision supported by the partner. + */ + data->rev[PD_PACKET_SOP] = PD_REV30; + data->rev[PD_PACKET_SOP_PRIME] = PD_REV30; + data->rev[PD_PACKET_PRIME_PRIME] = PD_REV30; + data->rev[PD_PACKET_DEBUG_PRIME] = PD_REV30; + data->rev[PD_PACKET_DEBUG_PRIME_PRIME] = PD_REV30; +} + +/** + * @brief Start the Protocol Layer state machines + */ +void prl_start(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + data->prl_enabled = true; +} + +/** + * @brief Pause the Protocol Layer state machines + */ +void prl_suspend(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + data->prl_enabled = false; + + /* + * While we are paused, exit all states + * and wait until initialized again. + */ + prl_tx_set_state(dev, PRL_TX_SUSPEND); + prl_hr_set_state(dev, PRL_HR_SUSPEND); +} + +/** + * @brief Reset the Protocol Layer state machines + */ +void prl_reset(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + if (data->prl_enabled) { + data->prl_sm_state = SM_INIT; + } +} + +/** + * @brief Run the Protocol Layer state machines + */ +void prl_run(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + struct protocol_hard_reset_t *prl_hr = data->prl_hr; + + switch (data->prl_sm_state) { + case SM_PAUSED: + if (data->prl_enabled == false) { + break; + } + /* fall through */ + case SM_INIT: + prl_init(dev); + data->prl_sm_state = SM_RUN; + /* fall through */ + case SM_RUN: + if (data->prl_enabled == false) { + data->prl_sm_state = SM_PAUSED; + /* Disable RX */ + tcpc_set_rx_enable(data->tcpc, false); + break; + } + + /* Run Protocol Layer Hard Reset state machine */ + smf_run_state(SMF_CTX(prl_hr)); + + /* + * During Hard Reset no USB Power Delivery Protocol Messages + * are sent or received; only Hard Reset Signaling is present + * after which the communication channel is assumed to have + * been disabled by the Physical Layer until completion of + * the Hard Reset. + */ + if (prl_hr_get_state(dev) == PRL_HR_WAIT_FOR_REQUEST) { + /* Run Protocol Layer Message Reception */ + prl_rx_wait_for_phy_message(dev); + + /* Run Protocol Layer Message Tx state machine */ + smf_run_state(SMF_CTX(prl_tx)); + } + break; + } +} + +/** + * @brief Set revision for the give packet type. This function is called + * from the Policy Engine. + */ +void prl_set_rev(const struct device *dev, + const enum pd_packet_type type, + const enum pd_rev_type rev) +{ + struct usbc_port_data *data = dev->data; + + data->rev[type] = rev; +} + +/** + * @brief Get the revision for the give packet type. + * This function is called from the Policy Engine. + */ +enum pd_rev_type prl_get_rev(const struct device *dev, + const enum pd_packet_type type) +{ + struct usbc_port_data *data = dev->data; + + return data->rev[type]; +} + +/** Private Protocol Layer API below */ + +/** + * @brief Alert Handler called by the TCPC driver + */ +static void alert_handler(const struct device *tcpc, + void *port_dev, + enum tcpc_alert alert) +{ + const struct device *dev = (const struct device *)port_dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + struct protocol_hard_reset_t *prl_hr = data->prl_hr; + + switch (alert) { + case TCPC_ALERT_HARD_RESET_RECEIVED: + atomic_set_bit(&prl_hr->flags, + PRL_FLAGS_PORT_PARTNER_HARD_RESET); + break; + case TCPC_ALERT_TRANSMIT_MSG_FAILED: + atomic_set_bit(&prl_tx->flags, PRL_FLAGS_TX_ERROR); + break; + case TCPC_ALERT_TRANSMIT_MSG_DISCARDED: + atomic_set_bit(&prl_tx->flags, PRL_FLAGS_TX_DISCARDED); + break; + case TCPC_ALERT_TRANSMIT_MSG_SUCCESS: + atomic_set_bit(&prl_tx->flags, PRL_FLAGS_TX_COMPLETE); + break; + /* These alerts are ignored and will just wake the thread. */ + default: + break; + } + + /* Wake the thread if it's sleeping */ + k_wakeup(data->port_thread); +} + +/** + * @brief Set the Protocol Layer Message Transmission state + */ +static void prl_tx_set_state(const struct device *dev, + const enum usbc_prl_tx_state_t state) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + + smf_set_state(SMF_CTX(prl_tx), &prl_tx_states[state]); +} + +/** + * @brief Set the Protocol Layer Hard Reset state + */ +static void prl_hr_set_state(const struct device *dev, + const enum usbc_prl_hr_state_t state) +{ + struct usbc_port_data *data = dev->data; + struct protocol_hard_reset_t *prl_hr = data->prl_hr; + + smf_set_state(SMF_CTX(prl_hr), &prl_hr_states[state]); +} + +/** + * @brief Get the Protocol Layer Hard Reset state + */ +static enum usbc_prl_hr_state_t prl_hr_get_state(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct protocol_hard_reset_t *prl_hr = data->prl_hr; + + return prl_hr->ctx.current - &prl_hr_states[0]; +} + +/** + * @brief Increment the message ID counter for the last transmitted packet type + */ +static void increment_msgid_counter(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + + /* If the last message wasn't an SOP* message, no need to increment */ + if (prl_tx->last_xmit_type >= NUM_SOP_STAR_TYPES) { + return; + } + + prl_tx->msg_id_counter[prl_tx->last_xmit_type] = + (prl_tx->msg_id_counter[prl_tx->last_xmit_type] + 1) & + PD_MESSAGE_ID_COUNT; +} + +/** + * @brief Get the SOP* header for the current received message + */ +static uint32_t get_sop_star_header(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + const bool is_sop_packet = prl_tx->emsg.type == PD_PACKET_SOP; + union pd_header header; + + /* SOP vs SOP'/SOP" headers are different. Replace fields as needed */ + header.message_type = prl_tx->msg_type; + header.port_data_role = is_sop_packet ? pe_get_data_role(dev) : 0; + header.specification_revision = data->rev[prl_tx->emsg.type]; + header.port_power_role = is_sop_packet ? + pe_get_power_role(dev) : pe_get_cable_plug(dev); + header.message_id = prl_tx->msg_id_counter[prl_tx->emsg.type]; + header.number_of_data_objects = + PD_CONVERT_BYTES_TO_PD_HEADER_COUNT(prl_tx->emsg.len); + header.extended = false; + + return header.raw_value; +} + +/** + * @brief Construct and transmit a message + */ +static void prl_tx_construct_message(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + const struct device *tcpc = data->tcpc; + + /* The header is unused for hard reset, etc. */ + prl_tx->emsg.header.raw_value = prl_tx->emsg.type < NUM_SOP_STAR_TYPES ? + get_sop_star_header(dev) : 0; + + + /* Save SOP* so the correct msg_id_counter can be incremented */ + prl_tx->last_xmit_type = prl_tx->emsg.type; + + /* + * PRL_FLAGS_TX_COMPLETE could be set if this function is called before + * the Policy Engine is informed of the previous transmission. Clear + * the flag so that this message can be sent. + */ + atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_TX_COMPLETE); + + /* Clear PRL_FLAGS_MSG_XMIT flag */ + atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT); + + /* + * Pass message to PHY Layer. It handles retries in hardware as + * software cannot handle the required timing ~ 1ms (tReceive + tRetry) + */ + tcpc_transmit_data(tcpc, &prl_tx->emsg); +} + +/** + * @brief Transmit a Hard Reset Message + */ +static void prl_hr_send_msg_to_phy(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + const struct device *tcpc = data->tcpc; + + /* Header is not used for hard reset */ + prl_tx->emsg.header.raw_value = 0; + prl_tx->emsg.type = PD_PACKET_TX_HARD_RESET; + + /* + * These flags could be set if this function is called before the + * Policy Engine is informed of the previous transmission. Clear the + * flags so that this message can be sent. + */ + data->prl_tx->flags = ATOMIC_INIT(0); + + /* Pass message to PHY Layer */ + tcpc_transmit_data(tcpc, &prl_tx->emsg); +} + +/** + * @brief Initialize the Protocol Layer State Machines + */ +static void prl_init(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + struct protocol_hard_reset_t *prl_hr = data->prl_hr; + int i; + + LOG_INF("PRL_INIT"); + + /* Set all packet types to default revision */ + prl_set_default_pd_revision(dev); + + /* + * Set TCPC alert handler so we are notified when messages + * are received, transmitted, etc. + */ + tcpc_set_alert_handler_cb(data->tcpc, alert_handler, (void *)dev); + + /* Initialize the PRL_HR state machine */ + prl_hr->flags = ATOMIC_INIT(0); + usbc_timer_init(&prl_hr->pd_t_hard_reset_complete, + PD_T_HARD_RESET_COMPLETE_MAX_MS); + prl_hr_set_state(dev, PRL_HR_WAIT_FOR_REQUEST); + + /* Initialize the PRL_TX state machine */ + prl_tx->flags = ATOMIC_INIT(0); + prl_tx->last_xmit_type = PD_PACKET_SOP; + for (i = 0; i < NUM_SOP_STAR_TYPES; i++) { + prl_tx->msg_id_counter[i] = 0; + } + usbc_timer_init(&prl_tx->pd_t_tx_timeout, PD_T_TX_TIMEOUT_MS); + prl_tx_set_state(dev, PRL_TX_PHY_LAYER_RESET); + + /* Initialize the PRL_RX state machine */ + prl_rx->flags = ATOMIC_INIT(0); + for (i = 0; i < NUM_SOP_STAR_TYPES; i++) { + prl_rx->msg_id[i] = -1; + } +} + +/** + * @brief PRL_Tx_PHY_Layer_Reset State + */ +static void prl_tx_phy_layer_reset_entry(void *obj) +{ + struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj; + const struct device *dev = prl_tx->dev; + struct usbc_port_data *data = dev->data; + const struct device *tcpc = data->tcpc; + + LOG_INF("PRL_Tx_PHY_Layer_Reset"); + + /* Enable communications */ + tcpc_set_rx_enable(tcpc, tc_is_in_attached_state(dev)); + + /* Reset complete */ + prl_tx_set_state(dev, PRL_TX_WAIT_FOR_MESSAGE_REQUEST); +} + +/** + * @brief PRL_Tx_Wait_for_Message_Request Entry State + */ +static void prl_tx_wait_for_message_request_entry(void *obj) +{ + struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj; + + LOG_INF("PRL_Tx_Wait_for_Message_Request"); + + /* Clear outstanding messages */ + prl_tx->flags = ATOMIC_INIT(0); +} + +/** + * @brief PRL_Tx_Wait_for_Message_Request Run State + */ +static void prl_tx_wait_for_message_request_run(void *obj) +{ + struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj; + const struct device *dev = prl_tx->dev; + struct usbc_port_data *data = dev->data; + + /* Clear any AMS flags and state if we are no longer in an AMS */ + if (pe_dpm_initiated_ams(dev) == false) { + atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_WAIT_SINK_OK); + } + + /* + * Check if we are starting an AMS and need to wait and/or set the CC + * lines appropriately. + */ + if (data->rev[PD_PACKET_SOP] == PD_REV30 && pe_dpm_initiated_ams(dev)) { + if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_WAIT_SINK_OK)) { + /* + * If we are already in an AMS then allow the + * multi-message AMS to continue. + */ + } else { + /* + * Start of SNK AMS notification received from + * Policy Engine + */ + atomic_set_bit(&prl_tx->flags, PRL_FLAGS_WAIT_SINK_OK); + prl_tx_set_state(dev, PRL_TX_SNK_START_AMS); + return; + } + } + + /* Handle non Rev 3.0 or subsequent messages in AMS sequence */ + if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT)) { + /* + * Soft Reset Message pending + */ + if ((prl_tx->msg_type == PD_CTRL_SOFT_RESET) && + (prl_tx->emsg.len == 0)) { + prl_tx_set_state(dev, PRL_TX_LAYER_RESET_FOR_TRANSMIT); + } + /* + * Message pending (except Soft Reset) + */ + else { + /* NOTE: PRL_TX_Construct_Message State embedded here */ + prl_tx_construct_message(dev); + prl_tx_set_state(dev, PRL_TX_WAIT_FOR_PHY_RESPONSE); + } + return; + } +} + +/** + * @brief PRL_Tx_Layer_Reset_for_Transmit Entry State + */ +static void prl_tx_layer_reset_for_transmit_entry(void *obj) +{ + struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj; + const struct device *dev = prl_tx->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + + LOG_INF("PRL_Tx_Layer_Reset_for_Transmit"); + + if (prl_tx->emsg.type < NUM_SOP_STAR_TYPES) { + /* + * This state is only used during soft resets. Reset only the + * matching message type. + * + * From section 6.3.13 Soft Reset Message in the USB PD 3.0 + * v2.0 spec, Soft_Reset Message Shall be targeted at a + * specific entity depending on the type of SOP* Packet used. + */ + prl_tx->msg_id_counter[prl_tx->emsg.type] = 0; + /* + * From section 6.11.2.3.2, the MessageID should be cleared + * from the PRL_Rx_Layer_Reset_for_Receive state. However, we + * don't implement a full state machine for PRL RX states so + * clear the MessageID here. + */ + prl_rx->msg_id[prl_tx->emsg.type] = -1; + } + + /* NOTE: PRL_Tx_Construct_Message State embedded here */ + prl_tx_construct_message(dev); + prl_tx_set_state(dev, PRL_TX_WAIT_FOR_PHY_RESPONSE); +} + +/** + * @brief PRL_Tx_Wait_for_PHY_response Entry State + */ +static void prl_tx_wait_for_phy_response_entry(void *obj) +{ + struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj; + + LOG_INF("PRL_Tx_Wait_for_PHY_response"); + usbc_timer_start(&prl_tx->pd_t_tx_timeout); +} + +/** + * @brief PRL_Tx_Wait_for_PHY_response Run State + */ +static void prl_tx_wait_for_phy_response_run(void *obj) +{ + struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj; + const struct device *dev = prl_tx->dev; + + /* Wait until TX is complete */ + if (atomic_test_and_clear_bit(&prl_tx->flags, PRL_FLAGS_TX_DISCARDED)) { + /* NOTE: PRL_TX_DISCARD_MESSAGE State embedded here. */ + /* Inform Policy Engine Message was discarded */ + pe_report_discard(dev); + prl_tx_set_state(dev, PRL_TX_PHY_LAYER_RESET); + return; + } + if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_TX_COMPLETE)) { + /* NOTE: PRL_TX_Message_Sent State embedded here. */ + /* Inform Policy Engine Message was sent */ + pe_message_sent(dev); + /* + * This event reduces the time of informing the policy engine + * of the transmission by one state machine cycle + */ + prl_tx_set_state(dev, PRL_TX_WAIT_FOR_MESSAGE_REQUEST); + return; + } else if (usbc_timer_expired(&prl_tx->pd_t_tx_timeout) || + atomic_test_bit(&prl_tx->flags, PRL_FLAGS_TX_ERROR)) { + /* + * NOTE: PRL_Tx_Transmission_Error State embedded + * here. + */ + /* Report Error To Policy Engine */ + pe_report_error(dev, ERR_XMIT, prl_tx->last_xmit_type); + prl_tx_set_state(dev, PRL_TX_WAIT_FOR_MESSAGE_REQUEST); + return; + } +} + +/** + * @brief PRL_Tx_Wait_for_PHY_response Exit State + */ +static void prl_tx_wait_for_phy_response_exit(void *obj) +{ + struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj; + const struct device *dev = prl_tx->dev; + + usbc_timer_stop(&prl_tx->pd_t_tx_timeout); + + /* Increment messageId counter */ + increment_msgid_counter(dev); +} + +/** + * @brief PRL_Tx_Snk_Start_of_AMS Entry State + */ +static void prl_tx_snk_start_ams_entry(void *obj) +{ + LOG_INF("PRL_Tx_Snk_Start_of_AMS"); +} + +/** + * @brief PRL_Tx_Snk_Start_of_AMS Run State + */ +static void prl_tx_snk_start_ams_run(void *obj) +{ + struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj; + const struct device *dev = prl_tx->dev; + + if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT)) { + /* + * Don't clear pending XMIT flag here. Wait until we send so + * we can detect if we dropped this message or not. + */ + prl_tx_set_state(dev, PRL_TX_SNK_PENDING); + } +} + +/** + * @brief PRL_Tx_Snk_Pending Entry State + */ +static void prl_tx_snk_pending_entry(void *obj) +{ + LOG_INF("PRL_Tx_Snk_Pending"); +} + +/** + * @brief PRL_Tx_Snk_Pending Run State + */ +static void prl_tx_snk_pending_run(void *obj) +{ + struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj; + const struct device *dev = prl_tx->dev; + struct usbc_port_data *data = dev->data; + const struct device *tcpc = data->tcpc; + enum tc_cc_voltage_state cc1; + enum tc_cc_voltage_state cc2; + + /* + * Wait unit the SRC applies SINK_TX_OK so we can transmit. + */ + tcpc_get_cc(tcpc, &cc1, &cc2); + + /* + * We clear the pending XMIT flag here right before we send so + * we can detect if we discarded this message or not + */ + atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT); + + /* + * The Protocol Layer Shall transition to the + * PRL_Tx_Layer_Reset_for_Transmit state when a Soft_Reset + * Message is pending. + */ + if ((prl_tx->msg_type == PD_CTRL_SOFT_RESET) && (prl_tx->emsg.len == 0)) { + prl_tx_set_state(dev, PRL_TX_LAYER_RESET_FOR_TRANSMIT); + } + /* + * The Protocol Layer Shall transition to the PRL_Tx_Construct_Message + * state when Rp is set to SinkTxOk and a Soft_Reset Message is not + * pending. + */ + else if (cc1 == TC_CC_VOLT_RP_3A0 || cc2 == TC_CC_VOLT_RP_3A0) { + /* + * Message pending (except Soft Reset) & + * Rp = SinkTxOk + */ + prl_tx_construct_message(dev); + prl_tx_set_state(dev, PRL_TX_WAIT_FOR_PHY_RESPONSE); + } +} + +static void prl_tx_suspend_entry(void *obj) +{ + LOG_INF("PRL_TX_SUSPEND"); +} + +static void prl_tx_suspend_run(void *obj) +{ + /* Do nothing */ +} + +/** + * All necessary Protocol Hard Reset States (Section 6.12.2.4) + */ + +/** + * @brief PRL_HR_Wait_for_Request Entry State + * + * @note This state is not part of the PRL_HR State Diagram found in + * Figure 6-66. The PRL_HR state machine waits here until a + * Hard Reset is requested by either the Policy Engine or the + * PHY Layer. + */ +static void prl_hr_wait_for_request_entry(void *obj) +{ + struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj; + + LOG_INF("PRL_HR_Wait_for_Request"); + + /* Reset all Protocol Layer Hard Reset flags */ + prl_hr->flags = ATOMIC_INIT(0); +} + +/** + * @brief PRL_HR_Wait_for_Request Run State + */ +static void prl_hr_wait_for_request_run(void *obj) +{ + struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj; + const struct device *dev = prl_hr->dev; + + /* + * The PRL_FLAGS_PE_HARD_RESET flag is set when a Hard Reset request is + * received from the Policy Engine. + * + * The PRL_FLAGS_PORT_PARTNER_HARD_RESET flag is set when Hard Reset + * signaling is received by the PHY Layer. + */ + if (atomic_test_bit(&prl_hr->flags, PRL_FLAGS_PE_HARD_RESET) || + atomic_test_bit(&prl_hr->flags, PRL_FLAGS_PORT_PARTNER_HARD_RESET)) { + /* Start Hard Reset */ + prl_hr_set_state(dev, PRL_HR_RESET_LAYER); + } +} + +/** + * @brief PRL_HR_Reset_Layer Entry State + */ +static void prl_hr_reset_layer_entry(void *obj) +{ + struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj; + const struct device *dev = prl_hr->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + const struct device *tcpc = data->tcpc; + int i; + + LOG_INF("PRL_HR_Reset_Layer"); + + /* Reset all Protocol Layer message reception flags */ + prl_rx->flags = ATOMIC_INIT(0); + /* Reset all Protocol Layer message transmission flags */ + prl_tx->flags = ATOMIC_INIT(0); + + /* Hard reset resets messageIDCounters for all TX types */ + for (i = 0; i < NUM_SOP_STAR_TYPES; i++) { + prl_rx->msg_id[i] = -1; + prl_tx->msg_id_counter[i] = 0; + } + + /* Disable RX */ + tcpc_set_rx_enable(tcpc, false); + + /* + * PD r3.0 v2.0, ss6.2.1.1.5: + * After a physical or logical (USB Type-C Error Recovery) Attach, a + * Port discovers the common Specification Revision level between + * itself and its Port Partner and/or the Cable Plug(s), and uses this + * Specification Revision level until a Detach, Hard Reset or Error + * Recovery happens. + * + * This covers the Hard Reset case. + */ + prl_set_default_pd_revision(dev); + + /* + * Protocol Layer message transmission transitions to + * PRL_Tx_Wait_For_Message_Request state. + */ + prl_tx_set_state(dev, PRL_TX_PHY_LAYER_RESET); + + /* + * Protocol Layer message reception transitions to + * PRL_Rx_Wait_for_PHY_Message state. + * + * Note: The PRL_Rx_Wait_for_PHY_Message state is implemented + * as a single function, named prl_rx_wait_for_phy_message. + */ + + /* + * Protocol Layer reset Complete & + * Hard Reset was initiated by Policy Engine + */ + if (atomic_test_bit(&prl_hr->flags, PRL_FLAGS_PE_HARD_RESET)) { + /* + * Request PHY to perform a Hard Reset. Note + * PRL_HR_Request_Reset state is embedded here. + */ + prl_hr_send_msg_to_phy(dev); + prl_hr_set_state(dev, PRL_HR_WAIT_FOR_PHY_HARD_RESET_COMPLETE); + } + /* + * Protocol Layer reset complete & + * Hard Reset was initiated by Port Partner + */ + else { + /* Inform Policy Engine of the Hard Reset */ + pe_got_hard_reset(dev); + prl_hr_set_state(dev, PRL_HR_WAIT_FOR_PE_HARD_RESET_COMPLETE); + } +} + +/** + * @brief PRL_HR_Wait_for_PHY_Hard_Reset_Complete Entry State + */ +static void prl_hr_wait_for_phy_hard_reset_complete_entry(void *obj) +{ + struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj; + + LOG_INF("PRL_HR_Wait_for_PHY_Hard_Reset_Complete"); + + /* + * Start the HardResetCompleteTimer and wait for the PHY Layer to + * indicate that the Hard Reset completed. + */ + usbc_timer_start(&prl_hr->pd_t_hard_reset_complete); +} + +/** + * @brief PRL_HR_Wait_for_PHY_Hard_Reset_Complete Run State + */ +static void prl_hr_wait_for_phy_hard_reset_complete_run(void *obj) +{ + struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj; + const struct device *dev = prl_hr->dev; + struct usbc_port_data *data = dev->data; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + + /* + * Wait for hard reset from PHY or timeout + */ + if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_TX_COMPLETE) || + usbc_timer_expired(&prl_hr->pd_t_hard_reset_complete)) { + /* PRL_HR_PHY_Hard_Reset_Requested */ + /* Inform Policy Engine Hard Reset was sent */ + pe_hard_reset_sent(dev); + prl_hr_set_state(dev, PRL_HR_WAIT_FOR_PE_HARD_RESET_COMPLETE); + } +} + +/** + * @brief PRL_HR_Wait_for_PHY_Hard_Reset_Complete Exit State + */ +static void prl_hr_wait_for_phy_hard_reset_complete_exit(void *obj) +{ + struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj; + + /* Stop the HardResetCompleteTimer */ + usbc_timer_stop(&prl_hr->pd_t_hard_reset_complete); +} + +/** + * @brief PRL_HR_Wait_For_PE_Hard_Reset_Complete Entry State + */ +static void prl_hr_wait_for_pe_hard_reset_complete_entry(void *obj) +{ + LOG_INF("PRL_HR_Wait_For_PE_Hard_Reset_Complete"); +} + +/** + * @brief PRL_HR_Wait_For_PE_Hard_Reset_Complete Run State + */ +static void prl_hr_wait_for_pe_hard_reset_complete_run(void *obj) +{ + struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj; + const struct device *dev = prl_hr->dev; + + /* Wait for Hard Reset complete indication from Policy Engine */ + if (atomic_test_bit(&prl_hr->flags, PRL_FLAGS_HARD_RESET_COMPLETE)) { + prl_hr_set_state(dev, PRL_HR_WAIT_FOR_REQUEST); + } +} + +static void prl_hr_suspend_entry(void *obj) +{ + LOG_INF("PRL_HR_SUSPEND"); +} + +static void prl_hr_suspend_run(void *obj) +{ + /* Do nothing */ +} + +/** + * @brief This function implements both the Protocol Layer Message Reception + * State Machine. See Figure 6-55 Protocol layer Message reception + * + * The states of the two state machines can be identified by the + * comments preceded by a NOTE: + */ +static void prl_rx_wait_for_phy_message(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct protocol_layer_rx_t *prl_rx = data->prl_rx; + struct protocol_layer_tx_t *prl_tx = data->prl_tx; + struct pd_msg *rx_emsg = &prl_rx->emsg; + const struct device *tcpc = data->tcpc; + uint8_t msg_type; + uint8_t pkt_type; + uint8_t ext; + int8_t msid; + uint8_t num_data_objs; + uint8_t power_role; + + /* Get the message */ + if (tcpc_receive_data(tcpc, rx_emsg) <= 0) { + /* No pending message or problem getting the message */ + return; + } + + num_data_objs = rx_emsg->header.number_of_data_objects; + msid = rx_emsg->header.message_id; + msg_type = rx_emsg->header.message_type; + ext = rx_emsg->header.extended; + pkt_type = rx_emsg->type; + power_role = rx_emsg->header.port_power_role; + + /* Dump the received packet content, except for Pings */ + if (msg_type != PD_CTRL_PING) { + int p; + + LOG_INF("RECV %04x/%d ", rx_emsg->header.raw_value, num_data_objs); + for (p = 0; p < num_data_objs; p++) { + LOG_INF("\t[%d]%08x ", p, *((uint32_t *)rx_emsg->data + p)); + } + } + + /* Ignore messages sent to the cable from our port partner */ + if (pkt_type != PD_PACKET_SOP && power_role == PD_PLUG_FROM_DFP_UFP) { + return; + } + + /* Soft Reset Message received from PHY */ + if (num_data_objs == 0 && msg_type == PD_CTRL_SOFT_RESET) { + /* NOTE: PRL_Rx_Layer_Reset_for_Receive State embedded here */ + + /* Reset MessageIdCounter */ + prl_tx->msg_id_counter[pkt_type] = 0; + + /* Clear stored MessageID value */ + prl_rx->msg_id[pkt_type] = -1; + + /* + * Protocol Layer message transmission transitions to + * PRL_Tx_PHY_Layer_Reset state + */ + prl_tx_set_state(dev, PRL_TX_PHY_LAYER_RESET); + + /* + * Inform Policy Engine of Soft Reset. Note perform this after + * performing the protocol layer reset, otherwise we will lose + * the PE's outgoing ACCEPT message to the soft reset. + */ + pe_got_soft_reset(dev); + return; + } + + /* Ignore if this is a duplicate message. Stop processing */ + if (prl_rx->msg_id[pkt_type] == msid) { + return; + } + + /* + * Discard any pending TX message if this RX message is from SOP, + * except for ping messages. + */ + + /* Check if message transmit is pending */ + if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT)) { + /* Don't discard message if a PING was received */ + if ((num_data_objs > 0) || (msg_type != PD_CTRL_PING)) { + /* Only discard message if received from SOP */ + if (pkt_type == PD_PACKET_SOP) { + atomic_set_bit(&prl_tx->flags, PRL_FLAGS_TX_DISCARDED); + } + } + } + + /* Store Message Id */ + prl_rx->msg_id[pkt_type] = msid; + + /* Pass message to Policy Engine */ + pe_message_received(dev); +} + +/** + * @brief Protocol Layer Transmit State table + */ +static const struct smf_state prl_tx_states[] = { + [PRL_TX_PHY_LAYER_RESET] = SMF_CREATE_STATE( + prl_tx_phy_layer_reset_entry, + NULL, + NULL, + NULL), + [PRL_TX_WAIT_FOR_MESSAGE_REQUEST] = SMF_CREATE_STATE( + prl_tx_wait_for_message_request_entry, + prl_tx_wait_for_message_request_run, + NULL, + NULL), + [PRL_TX_LAYER_RESET_FOR_TRANSMIT] = SMF_CREATE_STATE( + prl_tx_layer_reset_for_transmit_entry, + NULL, + NULL, + NULL), + [PRL_TX_WAIT_FOR_PHY_RESPONSE] = SMF_CREATE_STATE( + prl_tx_wait_for_phy_response_entry, + prl_tx_wait_for_phy_response_run, + prl_tx_wait_for_phy_response_exit, + NULL), + [PRL_TX_SNK_START_AMS] = SMF_CREATE_STATE( + prl_tx_snk_start_ams_entry, + prl_tx_snk_start_ams_run, + NULL, + NULL), + [PRL_TX_SNK_PENDING] = SMF_CREATE_STATE( + prl_tx_snk_pending_entry, + prl_tx_snk_pending_run, + NULL, + NULL), + [PRL_TX_SUSPEND] = SMF_CREATE_STATE( + prl_tx_suspend_entry, + prl_tx_suspend_run, + NULL, + NULL), +}; + +/** + * @brief Protocol Layer Hard Reset State table + */ +static const struct smf_state prl_hr_states[] = { + [PRL_HR_WAIT_FOR_REQUEST] = SMF_CREATE_STATE( + prl_hr_wait_for_request_entry, + prl_hr_wait_for_request_run, + NULL, + NULL), + [PRL_HR_RESET_LAYER] = SMF_CREATE_STATE( + prl_hr_reset_layer_entry, + NULL, + NULL, + NULL), + [PRL_HR_WAIT_FOR_PHY_HARD_RESET_COMPLETE] = SMF_CREATE_STATE( + prl_hr_wait_for_phy_hard_reset_complete_entry, + prl_hr_wait_for_phy_hard_reset_complete_run, + prl_hr_wait_for_phy_hard_reset_complete_exit, + NULL), + [PRL_HR_WAIT_FOR_PE_HARD_RESET_COMPLETE] = SMF_CREATE_STATE( + prl_hr_wait_for_pe_hard_reset_complete_entry, + prl_hr_wait_for_pe_hard_reset_complete_run, + NULL, + NULL), + [PRL_HR_SUSPEND] = SMF_CREATE_STATE( + prl_hr_suspend_entry, + prl_hr_suspend_run, + NULL, + NULL), +}; diff --git a/subsys/usb/usb_c/usbc_prl.h b/subsys/usb/usb_c/usbc_prl.h new file mode 100644 index 00000000000..f7af411b885 --- /dev/null +++ b/subsys/usb/usb_c/usbc_prl.h @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2022 The Chromium OS Authors + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_SUBSYS_USBC_PRL_H_ +#define ZEPHYR_SUBSYS_USBC_PRL_H_ + +#include +#include +#include +#include + +#include "usbc_pe.h" +#include "usbc_timer.h" + +/** + * @brief PD counter definitions + * See Table 6-63 Counter parameters + * Parameter Name: nMessageIDCount + */ +#define PD_MESSAGE_ID_COUNT 7 + +/** + * @brief Message Reception State Machine Object + */ +struct protocol_layer_rx_t { + /** state machine flags */ + atomic_t flags; + /** message ids for all valid port partners */ + int msg_id[NUM_SOP_STAR_TYPES]; + /** Received Power Delivery Messages are stored in emsg */ + struct pd_msg emsg; +}; + +/** + * @brief Message Transmission State Machine Object + */ +struct protocol_layer_tx_t { + /** state machine context */ + struct smf_ctx ctx; + /** Port device */ + const struct device *dev; + /** state machine flags */ + atomic_t flags; + /** last packet type we transmitted */ + enum pd_packet_type last_xmit_type; + /** Current message type to transmit */ + uint8_t msg_type; + /** + * Power Delivery Messages meant for transmission are stored + * in emsg + */ + struct pd_msg emsg; + + /* Counters */ + + /** message id counters for all 6 port partners */ + uint32_t msg_id_counter[NUM_SOP_STAR_TYPES]; + + /* Timers */ + + /** tTxTimeout timer */ + struct usbc_timer_t pd_t_tx_timeout; +}; + +/** + * @brief Hard Reset State Machine Object + */ +struct protocol_hard_reset_t { + /** state machine context */ + struct smf_ctx ctx; + /** Port device */ + const struct device *dev; + /** state machine flags */ + atomic_t flags; + + /* Timers */ + + /** tHardResetComplete timer */ + struct usbc_timer_t pd_t_hard_reset_complete; +}; + +/** + * @brief This function must only be called in the subsystem init function. + * + * @param dev Pointer to the device structure for the driver instance. + */ +void prl_subsys_init(const struct device *dev); + +/** + * @brief Start the PRL Layer state machine. This is only called from the + * Type-C state machine. + * + * @param dev Pointer to the device structure for the driver instance + */ +void prl_start(const struct device *dev); + +/** + * @brief Suspends the PRL Layer state machine. This is only called from the + * Type-C state machine. + * + * @param dev Pointer to the device structure for the driver instance + */ +void prl_suspend(const struct device *dev); + +/** + * @brief Reset the PRL Layer state machine + * + * @param dev Pointer to the device structure for the driver instance + */ +void prl_reset(const struct device *dev); + +/** + * @brief Run the PRL Layer state machine. This is called from the subsystems + * port stack thread + * + * @param dev Pointer to the device structure for the driver instance + */ +void prl_run(const struct device *dev); + +/** + * @brief Called from the Policy Engine to signal that a hard reset is complete + * + * @param dev Pointer to the device structure for the driver instance + */ +void prl_hard_reset_complete(const struct device *dev); + +/** + * @brief Sets the revision received from the port partner + * + * @param dev Pointer to the device structure for the driver instance + * @param type SOP* packet sent from port partner + * @param rev Revision sent from the port partner + */ +void prl_set_rev(const struct device *dev, + const enum pd_packet_type type, + const enum pd_rev_type rev); + +/** + * @brief Gets the revision received assciated with a packet type + * + * @param dev Pointer to the device structure for the driver instance + * @param type SOP* packet type to get the revision for + * + * @retval revsion associated with the packet type + */ +enum pd_rev_type prl_get_rev(const struct device *dev, + const enum pd_packet_type type); + +/** + * @brief Instructs the Protocol Layer to send a Power Delivery control message + * + * @param dev Pointer to the device structure for the driver instance + * @param type The port partner to send this message to + * @param msg The control message to send + */ +void prl_send_ctrl_msg(const struct device *dev, + const enum pd_packet_type type, + const enum pd_ctrl_msg_type msg); + +/** + * @brief Instructs the Protocol Layer to send a Power Delivery data message + * + * @param dev Pointer to the device structure for the driver instance + * @param type The port partner to send this message to + * @param msg The data message to send + */ +void prl_send_data_msg(const struct device *dev, + const enum pd_packet_type type, + const enum pd_data_msg_type msg); + +/** + * @brief Instructs the Protocol Layer to execute a hard reset + * + * @param dev Pointer to the device structure for the driver instance + */ +void prl_execute_hard_reset(const struct device *dev); + +/** + * @brief Query if the Protocol Layer is running + * + * @param dev Pointer to the device structure for the driver instance + * + * @retval TRUE if the Protocol Layer is running + * @retval FALSE if the Protocol Layer is not running + */ +bool prl_is_running(const struct device *dev); + +#endif /* ZEPHYR_SUBSYS_USBC_PRL_H_ */ diff --git a/subsys/usb/usb_c/usbc_stack.c b/subsys/usb/usb_c/usbc_stack.c new file mode 100644 index 00000000000..8b978efa529 --- /dev/null +++ b/subsys/usb/usb_c/usbc_stack.c @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2022 The Chromium OS Authors + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#define DT_DRV_COMPAT usb_c_connector + +#include +#include +#include +#include +#include +#include +LOG_MODULE_REGISTER(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL); + +#include "usbc_stack.h" + + +static int usbc_subsys_init(const struct device *dev); + +static ALWAYS_INLINE void usbc_handler(void *port_dev) +{ + const struct device *dev = (const struct device *)port_dev; + struct usbc_port_data *port = dev->data; + struct request_value *req; + int32_t request; + + req = k_fifo_get(&port->request_fifo, K_NO_WAIT); + request = (req != NULL) ? req->val : REQUEST_NOP; + pe_run(dev, request); + prl_run(dev); + tc_run(dev, request); + + if (request == PRIV_PORT_REQUEST_SUSPEND) { + k_thread_suspend(port->port_thread); + } + + k_msleep(CONFIG_USBC_STATE_MACHINE_CYCLE_TIME); +} + +#define USBC_SUBSYS_INIT(inst) \ + K_THREAD_STACK_DEFINE(my_stack_area_##inst, \ + CONFIG_USBC_STACK_SIZE); \ + \ + static struct tc_sm_t tc_##inst; \ + static struct policy_engine pe_##inst; \ + static struct protocol_layer_rx_t prl_rx_##inst; \ + static struct protocol_layer_tx_t prl_tx_##inst; \ + static struct protocol_hard_reset_t prl_hr_##inst; \ + \ + static void run_usbc_##inst(void *port_dev, \ + void *unused1, \ + void *unused2) \ + { \ + while (1) { \ + usbc_handler(port_dev); \ + } \ + } \ + \ + static void create_thread_##inst(const struct device *dev) \ + { \ + struct usbc_port_data *port = dev->data; \ + \ + port->port_thread = k_thread_create(&port->thread_data, \ + my_stack_area_##inst, \ + K_THREAD_STACK_SIZEOF(my_stack_area_##inst), \ + run_usbc_##inst, \ + (void *)dev, 0, 0, \ + CONFIG_USBC_THREAD_PRIORITY, \ + K_ESSENTIAL, \ + K_NO_WAIT); \ + k_thread_suspend(port->port_thread); \ + } \ + \ + static struct usbc_port_data usbc_port_data_##inst = { \ + .tc = &tc_##inst, \ + .pe = &pe_##inst, \ + .prl_rx = &prl_rx_##inst, \ + .prl_tx = &prl_tx_##inst, \ + .prl_hr = &prl_hr_##inst, \ + .tcpc = DEVICE_DT_GET(DT_INST_PROP(inst, tcpc)), \ + .vbus = DEVICE_DT_GET(DT_INST_PROP(inst, vbus)), \ + }; \ + \ +static const struct usbc_port_config usbc_port_config_##inst = { \ + .create_thread = create_thread_##inst, \ +}; \ + \ +DEVICE_DT_INST_DEFINE(inst, \ + &usbc_subsys_init, \ + NULL, \ + &usbc_port_data_##inst, \ + &usbc_port_config_##inst, \ + APPLICATION, \ + CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \ + NULL); + +DT_INST_FOREACH_STATUS_OKAY(USBC_SUBSYS_INIT) + +/** + * @brief Called by the Device Policy Manager to start the USB-C Subsystem + */ +int usbc_start(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + /* Add private start request to fifo */ + data->request.val = PRIV_PORT_REQUEST_START; + k_fifo_put(&data->request_fifo, &data->request); + + /* Start the port thread */ + k_thread_resume(data->port_thread); + + return 0; +} + +/** + * @brief Called by the Device Policy Manager to suspend the USB-C Subsystem + */ +int usbc_suspend(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + /* Add private suspend request to fifo */ + data->request.val = PRIV_PORT_REQUEST_SUSPEND; + k_fifo_put(&data->request_fifo, &data->request); + + return 0; +} + +/** + * @brief Called by the Device Policy Manager to make a request of the + * USB-C Subsystem + */ +int usbc_request(const struct device *dev, + const enum usbc_policy_request_t req) +{ + struct usbc_port_data *data = dev->data; + + /* Add public request to fifo */ + data->request.val = req; + k_fifo_put(&data->request_fifo, &data->request); + + return 0; +} + +/** + * @brief Sets the Device Policy Manager's data + */ +void usbc_set_dpm_data(const struct device *dev, + void *dpm_data) +{ + struct usbc_port_data *data = dev->data; + + data->dpm_data = dpm_data; +} + +/** + * @brief Gets the Device Policy Manager's data + */ +void *usbc_get_dpm_data(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + return data->dpm_data; +} + +/** + * @brief Set the callback that gets the Sink Capabilities from the + * Device Policy Manager + */ +void usbc_set_policy_cb_get_snk_cap(const struct device *dev, + const policy_cb_get_snk_cap_t policy_cb_get_snk_cap) +{ + struct usbc_port_data *data = dev->data; + + data->policy_cb_get_snk_cap = policy_cb_get_snk_cap; +} + +/** + * @brief Set the callback that sends the received Source Capabilities to the + * Device Policy Manager + */ +void usbc_set_policy_cb_set_src_cap(const struct device *dev, + const policy_cb_set_src_cap_t policy_cb_set_src_cap) +{ + struct usbc_port_data *data = dev->data; + + data->policy_cb_set_src_cap = policy_cb_set_src_cap; +} + +/** + * @brief Set the callback for the Device Policy Manager policy check + */ +void usbc_set_policy_cb_check(const struct device *dev, + const policy_cb_check_t policy_cb_check) +{ + struct usbc_port_data *data = dev->data; + + data->policy_cb_check = policy_cb_check; +} + +/** + * @brief Set the callback for the Device Policy Manager policy change notify + */ +void usbc_set_policy_cb_notify(const struct device *dev, + const policy_cb_notify_t policy_cb_notify) +{ + struct usbc_port_data *data = dev->data; + + data->policy_cb_notify = policy_cb_notify; +} + +/** + * @brief Set the callback for the Device Policy Manager policy change notify + */ +void usbc_set_policy_cb_wait_notify(const struct device *dev, + const policy_cb_wait_notify_t policy_cb_wait_notify) +{ + struct usbc_port_data *data = dev->data; + + data->policy_cb_wait_notify = policy_cb_wait_notify; +} + +/** + * @brief Set the callback for requesting the data object (RDO) + */ +void usbc_set_policy_cb_get_rdo(const struct device *dev, + const policy_cb_get_rdo_t policy_cb_get_rdo) +{ + struct usbc_port_data *data = dev->data; + + data->policy_cb_get_rdo = policy_cb_get_rdo; +} + +/** + * @brief Set the callback for checking if Sink Power Supply is at + * default level + */ +void usbc_set_policy_cb_is_snk_at_default(const struct device *dev, + const policy_cb_is_snk_at_default_t policy_cb_is_snk_at_default) +{ + struct usbc_port_data *data = dev->data; + + data->policy_cb_is_snk_at_default = policy_cb_is_snk_at_default; +} + +/** + * @brief Initialize the USB-C Subsystem + */ +static int usbc_subsys_init(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + const struct usbc_port_config *const config = dev->config; + const struct device *tcpc = data->tcpc; + + /* Make sure TCPC is ready */ + if (!device_is_ready(tcpc)) { + LOG_ERR("TCPC NOT READY"); + return -ENODEV; + } + + /* Initialize the state machines */ + tc_subsys_init(dev); + pe_subsys_init(dev); + prl_subsys_init(dev); + + /* Initialize the request fifo */ + k_fifo_init(&data->request_fifo); + + /* Create the thread for this port */ + config->create_thread(dev); + return 0; +} diff --git a/subsys/usb/usb_c/usbc_stack.h b/subsys/usb/usb_c/usbc_stack.h new file mode 100644 index 00000000000..da49b5a518d --- /dev/null +++ b/subsys/usb/usb_c/usbc_stack.h @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2022 The Chromium OS Authors + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_SUBSYS_USBC_STACK_PRIV_H_ +#define ZEPHYR_SUBSYS_USBC_STACK_PRIV_H_ + +#include +#include + +#include "usbc_tc.h" +#include "usbc_pe.h" +#include "usbc_prl.h" + +#define PRIV_PORT_REQUEST_SUSPEND -1 +#define PRIV_PORT_REQUEST_START -2 + +/** + * @brief Each layer of the stack is composed of state machines that can be + * in one of the following states. + */ +enum usbc_sm_state { + /** The state machine is paused */ + SM_PAUSED, + /** The state machine is initializing */ + SM_INIT, + /** The state machine is running */ + SM_RUN +}; + +/** + * @brief Port config + */ +struct usbc_port_config { + /** + * The usbc stack initializes this pointer that creates the + * main thread for this port + */ + void (*create_thread)(const struct device *dev); + /** The thread stack for this port's thread */ + k_thread_stack_t *stack; +}; + +/** + * @brief Request FIFO + */ +struct request_value { + /** First word is reserved for use by FIFO */ + void *fifo_reserved; + /** Request value */ + int32_t val; +}; + +/** + * @brief Port data + */ +struct usbc_port_data { + /** This port's thread */ + k_tid_t port_thread; + /** This port thread's data */ + struct k_thread thread_data; + + /* Type-C layer data */ + + /** Type-C state machine object */ + struct tc_sm_t *tc; + /** Enables or Disables the Type-C state machine */ + bool tc_enabled; + /** The state of the Type-C state machine */ + enum usbc_sm_state tc_sm_state; + + /* Policy Engine layer data */ + + /** Policy Engine state machine object */ + struct policy_engine *pe; + /** Enables or Disables the Policy Engine state machine */ + bool pe_enabled; + /** The state of the Policy Engine state machine */ + enum usbc_sm_state pe_sm_state; + + /* Protocol Layer data */ + + /** Protocol Receive Layer state machine object */ + struct protocol_layer_rx_t *prl_rx; + /** Protocol Transmit Layer state machine object */ + struct protocol_layer_tx_t *prl_tx; + /** Protocol Hard Reset Layer state machine object */ + struct protocol_hard_reset_t *prl_hr; + /** Enables or Disables the Protocol Layer state machine */ + bool prl_enabled; + /** The state of the Protocol Layer state machine */ + enum usbc_sm_state prl_sm_state; + + /* Common data for all layers */ + + /** Power Delivery revisions for each packet type */ + enum pd_rev_type rev[NUM_SOP_STAR_TYPES]; + /** The Type-C Port Controller on this port */ + const struct device *tcpc; + /** VBUS Measurement and control device on this port */ + const struct device *vbus; + + /** Device Policy Manager Request FIFO */ + struct k_fifo request_fifo; + /** Device Policy manager Request */ + struct request_value request; + + /* USB-C Callbacks */ + + /** + * Callback used by the Policy Engine to ask the Device Policy Manager + * if a particular policy should be allowed + */ + bool (*policy_cb_check)(const struct device *dev, + const enum usbc_policy_check_t policy_check); + /** + * Callback used by the Policy Engine to notify the Device Policy + * Manager of a policy change + */ + void (*policy_cb_notify)(const struct device *dev, + const enum usbc_policy_notify_t policy_notify); + /** + * Callback used by the Policy Engine to notify the Device Policy + * Manager of WAIT message reception + */ + bool (*policy_cb_wait_notify)(const struct device *dev, + const enum usbc_policy_wait_t policy_notify); + /** + * Callback used by the Policy Engine to get the Sink Capabilities + * from the Device Policy Manager + */ + int (*policy_cb_get_snk_cap)(const struct device *dev, + uint32_t **pdos, + int *num_pdos); + + /** + * Callback used by the Policy Engine to send the received Source + * Capabilities to the Device Policy Manager + */ + void (*policy_cb_set_src_cap)(const struct device *dev, + const uint32_t *pdos, + const int num_pdos); + /** + * Callback used by the Policy Engine to get the Request Data Object + * (RDO) from the Device Policy Manager + */ + uint32_t (*policy_cb_get_rdo)(const struct device *dev); + + /** + * Callback used by the Policy Engine to check if Sink Power Supply + * is at default level + */ + bool (*policy_cb_is_snk_at_default)(const struct device *dev); + + /** Device Policy Manager data */ + void *dpm_data; +}; + +#endif /* ZEPHYR_SUBSYS_USBC_STACK_PRIV_H_ */ diff --git a/subsys/usb/usb_c/usbc_tc.c b/subsys/usb/usb_c/usbc_tc.c new file mode 100644 index 00000000000..20e24caa966 --- /dev/null +++ b/subsys/usb/usb_c/usbc_tc.c @@ -0,0 +1,522 @@ +/* + * Copyright (c) 2022 The Chromium OS Authors + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL); + +#include "usbc_stack.h" + +/** + * @brief Type-C Layer Flags + */ +enum tc_flags { + /** + * Flag to track Rp resistor change when the sink attached + * sub-state runs + */ + TC_FLAGS_RP_SUBSTATE_CHANGE = 0, +}; + +/** + * @brief Type-C States + */ +enum tc_state_t { + /** Super state that opens the CC lines */ + TC_CC_OPEN_SUPER_STATE, + /** Super state that applies Rd to the CC lines */ + TC_CC_RD_SUPER_STATE, + /** Disabled State */ + TC_DISABLED_STATE, + /** Error Recovery State */ + TC_ERROR_RECOVERY_STATE, + /** Unnattached Sink State */ + TC_UNATTACHED_SNK_STATE, + /** Attach Wait Sink State */ + TC_ATTACH_WAIT_SNK_STATE, + /** Attached Sink State */ + TC_ATTACHED_SNK_STATE, +}; + +static const struct smf_state tc_snk_states[]; +static void tc_init(const struct device *dev); +static void tc_set_state(const struct device *dev, + const enum tc_state_t state); +static enum tc_state_t tc_get_state(const struct device *dev); +static void pd_enable(const struct device *dev, + const bool enable); + +/** + * @brief Initializes the state machine and enters the Disabled state + */ +void tc_subsys_init(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct tc_sm_t *tc = data->tc; + + /* Save the port device object so states can access it */ + tc->dev = dev; + + /* Initialize the state machine */ + smf_set_initial(SMF_CTX(tc), &tc_snk_states[TC_DISABLED_STATE]); +} + +/** + * @brief Runs the Type-C layer + */ +void tc_run(const struct device *dev, + const int32_t dpm_request) +{ + struct usbc_port_data *data = dev->data; + const struct device *tcpc = data->tcpc; + struct tc_sm_t *tc = data->tc; + + /* These requests are implicitly set by the Device Policy Manager */ + if (dpm_request == PRIV_PORT_REQUEST_START) { + data->tc_enabled = true; + } else if (dpm_request == PRIV_PORT_REQUEST_SUSPEND) { + data->tc_enabled = false; + tc_set_state(dev, TC_DISABLED_STATE); + } + + switch (data->tc_sm_state) { + case SM_PAUSED: + if (data->tc_enabled == false) { + break; + } + /* fall through */ + case SM_INIT: + /* Initialize the Type-C layer */ + tc_init(dev); + data->tc_sm_state = SM_RUN; + /* fall through */ + case SM_RUN: + if (data->tc_enabled == false) { + pd_enable(dev, false); + data->tc_sm_state = SM_PAUSED; + break; + } + + /* Sample CC lines */ + tcpc_get_cc(tcpc, &tc->cc1, &tc->cc2); + + /* Detect polarity */ + tc->cc_polarity = (tc->cc1 > tc->cc2) ? + TC_POLARITY_CC1 : TC_POLARITY_CC2; + + /* Execute any asyncronous Device Policy Manager Requests */ + if (dpm_request == REQUEST_TC_ERROR_RECOVERY) { + /* Transition to Error Recovery State */ + tc_set_state(dev, TC_ERROR_RECOVERY_STATE); + } else if (dpm_request == REQUEST_TC_DISABLED) { + /* Transition to Disabled State */ + tc_set_state(dev, TC_DISABLED_STATE); + } + + /* Run state machine */ + smf_run_state(SMF_CTX(tc)); + } +} + +/** + * @brief Checks if the TC Layer is in an Attached state + */ +bool tc_is_in_attached_state(const struct device *dev) +{ + return (tc_get_state(dev) == TC_ATTACHED_SNK_STATE); +} + +/** + * @brief Initializes the Type-C layer + */ +static void tc_init(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + struct tc_sm_t *tc = data->tc; + + /* Initialize the timers */ + usbc_timer_init(&tc->tc_t_error_recovery, TC_T_ERROR_RECOVERY_SOURCE_MIN_MS); + usbc_timer_init(&tc->tc_t_cc_debounce, TC_T_CC_DEBOUNCE_MAX_MS); + usbc_timer_init(&tc->tc_t_rp_value_change, TC_T_RP_VALUE_CHANGE_MAX_MS); + + /* Clear the flags */ + tc->flags = ATOMIC_INIT(0); + + /* Initialize the TCPC */ + tcpc_init(data->tcpc); + + /* Initialize the state machine */ + /* + * Start out in error recovery state so the CC lines are opened for a + * short while if this is a system reset. + */ + tc_set_state(dev, TC_ERROR_RECOVERY_STATE); +} + +/** + * @brief Sets a Type-C state + */ +static void tc_set_state(const struct device *dev, + const enum tc_state_t state) +{ + struct usbc_port_data *data = dev->data; + struct tc_sm_t *tc = data->tc; + + smf_set_state(SMF_CTX(tc), &tc_snk_states[state]); +} + +/** + * @brief Get the Type-C current state + */ +static enum tc_state_t tc_get_state(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + + return data->tc->ctx.current - &tc_snk_states[0]; +} + +/** + * @brief Enable Power Delivery + */ +static void pd_enable(const struct device *dev, + const bool enable) +{ + if (enable) { + prl_start(dev); + pe_start(dev); + } else { + prl_suspend(dev); + pe_suspend(dev); + } +} + +/** + * @brief Sink power sub states. Only called if a PD contract is not in place + */ +static void sink_power_sub_states(const struct device *dev) +{ + struct usbc_port_data *data = dev->data; + enum tc_cc_voltage_state cc; + enum tc_cc_voltage_state new_cc_voltage; + enum usbc_policy_check_t dpm_pwr_change_notify; + struct tc_sm_t *tc = data->tc; + + /* Get the active CC line */ + cc = tc->cc_polarity ? tc->cc2 : tc->cc1; + + if (cc == TC_CC_VOLT_RP_DEF) { + /* + * This sub-state supports Sinks consuming current within the + * lowest range (default) of Source-supplied current. + */ + new_cc_voltage = TC_CC_VOLT_RP_DEF; + dpm_pwr_change_notify = POWER_CHANGE_DEF; + } else if (cc == TC_CC_VOLT_RP_1A5) { + /* + * This sub-state supports Sinks consuming current within the + * two lower ranges (default and 1.5 A) of Source-supplied + * current. + */ + new_cc_voltage = TC_CC_VOLT_RP_1A5; + dpm_pwr_change_notify = POWER_CHANGE_1A5; + } else if (cc == TC_CC_VOLT_RP_3A0) { + /* + * This sub-state supports Sinks consuming current within all + * three ranges (default, 1.5 A and 3.0 A) of Source-supplied + * current. + */ + new_cc_voltage = TC_CC_VOLT_RP_3A0; + dpm_pwr_change_notify = POWER_CHANGE_3A0; + } else { + /* Disconnect detected */ + new_cc_voltage = TC_CC_VOLT_OPEN; + dpm_pwr_change_notify = POWER_CHANGE_0A0; + } + + /* Debounce the Rp state */ + if (new_cc_voltage != tc->cc_voltage) { + tc->cc_voltage = new_cc_voltage; + atomic_set_bit(&tc->flags, TC_FLAGS_RP_SUBSTATE_CHANGE); + usbc_timer_start(&tc->tc_t_rp_value_change); + } + + /* Wait for Rp debounce */ + if (usbc_timer_expired(&tc->tc_t_rp_value_change) == false) { + return; + } + + /* Notify DPM of sink sub-state power change */ + if (atomic_test_and_clear_bit(&tc->flags, + TC_FLAGS_RP_SUBSTATE_CHANGE)) { + if (data->policy_cb_notify) { + data->policy_cb_notify(dev, dpm_pwr_change_notify); + } + } +} + +/** + * @brief Unattached.SNK Entry + */ +static void tc_unattached_snk_entry(void *obj) +{ + LOG_INF("Unattached.SNK"); +} + +/** + * @brief Unattached.SNK Run + */ +static void tc_unattached_snk_run(void *obj) +{ + struct tc_sm_t *tc = (struct tc_sm_t *)obj; + const struct device *dev = tc->dev; + + /* + * Transition to AttachWait.SNK when the SNK.Rp state is present + * on at least one of its CC pins. + */ + if (tcpc_is_cc_rp(tc->cc1) || tcpc_is_cc_rp(tc->cc2)) { + tc_set_state(dev, TC_ATTACH_WAIT_SNK_STATE); + } +} + +/** + * @brief AttachWait.SNK Entry + */ +static void tc_attach_wait_snk_entry(void *obj) +{ + struct tc_sm_t *tc = (struct tc_sm_t *)obj; + + LOG_INF("AttachWait.SNK"); + + tc->cc_state = TC_CC_NONE; +} + +/** + * @brief AttachWait.SNK Run + */ +static void tc_attach_wait_snk_run(void *obj) +{ + struct tc_sm_t *tc = (struct tc_sm_t *)obj; + const struct device *dev = tc->dev; + struct usbc_port_data *data = dev->data; + const struct device *vbus = data->vbus; + enum tc_cc_states new_cc_state; + bool vbus_present; + + if (tcpc_is_cc_rp(tc->cc1) || tcpc_is_cc_rp(tc->cc2)) { + new_cc_state = TC_CC_DFP_ATTACHED; + } else { + new_cc_state = TC_CC_NONE; + } + + /* Debounce the cc state */ + if (new_cc_state != tc->cc_state) { + usbc_timer_start(&tc->tc_t_cc_debounce); + tc->cc_state = new_cc_state; + } + + /* Wait for CC debounce */ + if (usbc_timer_running(&tc->tc_t_cc_debounce) && + usbc_timer_expired(&tc->tc_t_cc_debounce) == false) { + return; + } + + /* Transition to UnAttached.SNK if CC lines are open */ + if (new_cc_state == TC_CC_NONE) { + tc_set_state(dev, TC_UNATTACHED_SNK_STATE); + } + + /* + * The port shall transition to Attached.SNK after the state of only + * one of the CC1 or CC2 pins is SNK.Rp for at least tCCDebounce and + * VBUS is detected. + */ + vbus_present = usbc_vbus_check_level(vbus, TC_VBUS_PRESENT); + + if (vbus_present) { + tc_set_state(dev, TC_ATTACHED_SNK_STATE); + } +} + +static void tc_attach_wait_snk_exit(void *obj) +{ + struct tc_sm_t *tc = (struct tc_sm_t *)obj; + + usbc_timer_stop(&tc->tc_t_cc_debounce); +} + +/** + * @brief Attached.SNK Entry + */ +static void tc_attached_snk_entry(void *obj) +{ + struct tc_sm_t *tc = (struct tc_sm_t *)obj; + const struct device *dev = tc->dev; + struct usbc_port_data *data = dev->data; + const struct device *tcpc = data->tcpc; + + LOG_INF("Attached.SNK"); + + /* Set CC polarity */ + tcpc_set_cc_polarity(tcpc, tc->cc_polarity); + + /* Enable PD */ + pd_enable(dev, true); +} + +/** + * @brief Attached.SNK and DebugAccessory.SNK Run + */ +static void tc_attached_snk_run(void *obj) +{ + struct tc_sm_t *tc = (struct tc_sm_t *)obj; + const struct device *dev = tc->dev; + struct usbc_port_data *data = dev->data; + const struct device *vbus = data->vbus; + + /* Detach detection */ + if (usbc_vbus_check_level(vbus, TC_VBUS_PRESENT) == false) { + tc_set_state(dev, TC_UNATTACHED_SNK_STATE); + return; + } + + /* Run Sink Power Sub-State if not in an explicit contract */ + if (pe_is_explicit_contract(dev) == false) { + sink_power_sub_states(dev); + } +} + +/** + * @brief Attached.SNK and DebugAccessory.SNK Exit + */ +static void tc_attached_snk_exit(void *obj) +{ + struct tc_sm_t *tc = (struct tc_sm_t *)obj; + const struct device *dev = tc->dev; + + /* Disable PD */ + pd_enable(dev, false); +} + +/** + * @brief CC Open Entry + */ +static void tc_cc_open_entry(void *obj) +{ + struct tc_sm_t *tc = (struct tc_sm_t *)obj; + const struct device *dev = tc->dev; + struct usbc_port_data *data = dev->data; + const struct device *tcpc = data->tcpc; + + tc->cc_voltage = TC_CC_VOLT_OPEN; + + /* Disable VCONN */ + tcpc_set_vconn(tcpc, false); + + /* Open CC lines */ + tcpc_set_cc(tcpc, TC_CC_OPEN); +} + +/** + * @brief Rd on CC lines Entry + */ +static void tc_cc_rd_entry(void *obj) +{ + struct tc_sm_t *tc = (struct tc_sm_t *)obj; + const struct device *dev = tc->dev; + struct usbc_port_data *data = dev->data; + const struct device *tcpc = data->tcpc; + + tcpc_set_cc(tcpc, TC_CC_RD); +} + +/** + * @brief Disabled Entry + */ +static void tc_disabled_entry(void *obj) +{ + LOG_INF("Disabled"); +} + +/** + * @brief Disabled Run + */ +static void tc_disabled_run(void *obj) +{ + /* Do nothing */ +} + +/** + * @brief ErrorRecovery Entry + */ +static void tc_error_recovery_entry(void *obj) +{ + struct tc_sm_t *tc = (struct tc_sm_t *)obj; + + LOG_INF("ErrorRecovery"); + + /* Start tErrorRecovery timer */ + usbc_timer_start(&tc->tc_t_error_recovery); +} + +/** + * @brief ErrorRecovery Run + */ +static void tc_error_recovery_run(void *obj) +{ + struct tc_sm_t *tc = (struct tc_sm_t *)obj; + const struct device *dev = tc->dev; + + /* Wait for expiry */ + if (usbc_timer_expired(&tc->tc_t_error_recovery) == false) { + return; + } + + /* Transition to Unattached.SNK */ + tc_set_state(dev, TC_UNATTACHED_SNK_STATE); +} + +/** + * @brief Type-C State Table + */ +static const struct smf_state tc_snk_states[] = { + /* Super States */ + [TC_CC_OPEN_SUPER_STATE] = SMF_CREATE_STATE( + tc_cc_open_entry, + NULL, + NULL, + NULL), + [TC_CC_RD_SUPER_STATE] = SMF_CREATE_STATE( + tc_cc_rd_entry, + NULL, + NULL, + NULL), + /* Normal States */ + [TC_UNATTACHED_SNK_STATE] = SMF_CREATE_STATE( + tc_unattached_snk_entry, + tc_unattached_snk_run, + NULL, + &tc_snk_states[TC_CC_RD_SUPER_STATE]), + [TC_ATTACH_WAIT_SNK_STATE] = SMF_CREATE_STATE( + tc_attach_wait_snk_entry, + tc_attach_wait_snk_run, + tc_attach_wait_snk_exit, + &tc_snk_states[TC_CC_RD_SUPER_STATE]), + [TC_ATTACHED_SNK_STATE] = SMF_CREATE_STATE( + tc_attached_snk_entry, + tc_attached_snk_run, + tc_attached_snk_exit, + NULL), + [TC_DISABLED_STATE] = SMF_CREATE_STATE( + tc_disabled_entry, + tc_disabled_run, + NULL, + &tc_snk_states[TC_CC_OPEN_SUPER_STATE]), + [TC_ERROR_RECOVERY_STATE] = SMF_CREATE_STATE( + tc_error_recovery_entry, + tc_error_recovery_run, + NULL, + &tc_snk_states[TC_CC_OPEN_SUPER_STATE]), +}; diff --git a/subsys/usb/usb_c/usbc_tc.h b/subsys/usb/usb_c/usbc_tc.h new file mode 100644 index 00000000000..1b6125803dd --- /dev/null +++ b/subsys/usb/usb_c/usbc_tc.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2022 The Chromium OS Authors + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_SUBSYS_USBC_H_ +#define ZEPHYR_SUBSYS_USBC_H_ + +#include +#include +#include +#include "usbc_timer.h" + +/** + * @brief TC Layer State Machine Object + */ +struct tc_sm_t { + /** TC layer state machine context */ + struct smf_ctx ctx; + /** Port device */ + const struct device *dev; + /** TC layer flags */ + atomic_t flags; + /** VBUS measurement device */ + const struct device *vbus_dev; + /** Port polarity */ + enum tc_cc_polarity cc_polarity; + /** The cc state */ + enum tc_cc_states cc_state; + /** Voltage on CC pin */ + enum tc_cc_voltage_state cc_voltage; + /** Current CC1 value */ + enum tc_cc_voltage_state cc1; + /** Current CC2 value */ + enum tc_cc_voltage_state cc2; + + /* Timers */ + + /** tCCDebounce timer */ + struct usbc_timer_t tc_t_cc_debounce; + /** tRpValueChange timer */ + struct usbc_timer_t tc_t_rp_value_change; + /** tErrorRecovery timer */ + struct usbc_timer_t tc_t_error_recovery; +}; + +/** + * @brief This function must only be called in the subsystem init function. + * + * @param dev Pointer to the device structure for the driver instance. + */ +void tc_subsys_init(const struct device *dev); + +/** + * @brief Run the TC Layer state machine. This is called from the subsystems + * port stack thread. + * + * @param dev Pointer to the device structure for the driver instance. + * @param dpm_request Device Policy Manager request + */ +void tc_run(const struct device *dev, int32_t dpm_request); + +/** + * @brief Checks if the TC Layer is in an Attached state + * + * @param dev Pointer to the device structure for the driver instance. + * + * @retval true if TC Layer is in an Attached state, else false + */ +bool tc_is_in_attached_state(const struct device *dev); + +#endif /* ZEPHYR_SUBSYS_USBC_TC_H_ */ diff --git a/subsys/usb/usb_c/usbc_timer.c b/subsys/usb/usb_c/usbc_timer.c new file mode 100644 index 00000000000..90794631c42 --- /dev/null +++ b/subsys/usb/usb_c/usbc_timer.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2022 The Chromium OS Authors + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "usbc_timer.h" + +/** Timer flag to track if timer was started */ +#define TIMER_STARTED 0 +/** Timer flag to track if timer has expired */ +#define TIMER_EXPIRED 1 + +/** + * @brief The timer function that's executed when the timer expires + */ +static void usbc_timer_handler(struct k_timer *timer) +{ + struct usbc_timer_t *usbc_timer = k_timer_user_data_get(timer); + + atomic_set_bit(&usbc_timer->flags, TIMER_EXPIRED); +} + +void usbc_timer_init(struct usbc_timer_t *usbc_timer, + uint32_t timeout_ms) +{ + k_timer_init(&usbc_timer->timer, usbc_timer_handler, NULL); + k_timer_user_data_set(&usbc_timer->timer, usbc_timer); + usbc_timer->timeout_ms = timeout_ms; +} + +void usbc_timer_start(struct usbc_timer_t *usbc_timer) +{ + atomic_clear_bit(&usbc_timer->flags, TIMER_EXPIRED); + atomic_set_bit(&usbc_timer->flags, TIMER_STARTED); + k_timer_start(&usbc_timer->timer, K_MSEC(usbc_timer->timeout_ms), K_NO_WAIT); +} + +bool usbc_timer_expired(struct usbc_timer_t *usbc_timer) +{ + bool started = atomic_test_bit(&usbc_timer->flags, TIMER_STARTED); + bool expired = atomic_test_bit(&usbc_timer->flags, TIMER_EXPIRED); + + if (started & expired) { + atomic_clear_bit(&usbc_timer->flags, TIMER_STARTED); + return true; + } + + return false; +} + +bool usbc_timer_running(struct usbc_timer_t *usbc_timer) +{ + return atomic_test_bit(&usbc_timer->flags, TIMER_STARTED); +} + +void usbc_timer_stop(struct usbc_timer_t *usbc_timer) +{ + atomic_clear_bit(&usbc_timer->flags, TIMER_STARTED); + k_timer_stop(&usbc_timer->timer); +} diff --git a/subsys/usb/usb_c/usbc_timer.h b/subsys/usb/usb_c/usbc_timer.h new file mode 100644 index 00000000000..ca6ce038ad0 --- /dev/null +++ b/subsys/usb/usb_c/usbc_timer.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022 The Chromium OS Authors + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_SUBSYS_USBC_TIMER_H_ +#define ZEPHYR_SUBSYS_USBC_TIMER_H_ + +#include + +/** + * @brief USB-C Timer Object + */ +struct usbc_timer_t { + /** kernel timer */ + struct k_timer timer; + /** timeout value in ms */ + uint32_t timeout_ms; + /** flags to track timer status */ + atomic_t flags; +}; + +/** + * @brief Initialize a timer + * + * @param usbc_timer timer object + * @param timeout_ms timer timeout in ms + */ +void usbc_timer_init(struct usbc_timer_t *usbc_timer, + uint32_t timeout_ms); + +/** + * @brief Start a timer + * + * @param usbc_timer timer object + */ +void usbc_timer_start(struct usbc_timer_t *usbc_timer); + +/** + * @brief Check if a timer has expired + * + * @param usbc_timer timer object + * @retval true if the timer has expired + */ +bool usbc_timer_expired(struct usbc_timer_t *usbc_timer); + +/** + * @brief Check if a timer has been started + * + * @param usbc_timer timer object + * @retval true if the timer is running + */ +bool usbc_timer_running(struct usbc_timer_t *usbc_timer); + +/** + * @brief Stop a timer + * + * @param usbc_timer timer object + */ +void usbc_timer_stop(struct usbc_timer_t *usbc_timer); + +#endif /* ZEPHYR_SUBSYS_USBC_TIMER_H_ */