usb-c: Refactor USB-C Subsystem Sink

Refactor USB-C Subsystem Sink so that Power Delivery
Source support and be easily added.

Signed-off-by: Sam Hurst <sbh1187@gmail.com>
This commit is contained in:
Sam Hurst 2022-12-14 10:56:05 -08:00 committed by Carles Cufí
commit 7910c617be
15 changed files with 2786 additions and 2385 deletions

View file

@ -2,4 +2,19 @@
zephyr_library()
zephyr_library_sources_ifdef(CONFIG_USBC_STACK usbc_timer.c usbc_stack.c usbc_tc.c usbc_prl.c usbc_pe.c)
# Common USB-C Stack files
zephyr_library_sources_ifdef(
CONFIG_USBC_STACK
usbc_timer.c
usbc_stack.c
usbc_tc_common.c
usbc_pe_common.c
usbc_prl.c
)
# Sink USB-C Stack files
zephyr_library_sources_ifdef(
CONFIG_USBC_CSM_SINK_ONLY
usbc_tc_snk_states.c
usbc_pe_snk_states.c
)

View file

@ -39,6 +39,17 @@ config BUILD_OUTPUT_VIF
help
Generate XML file containing VIF policies during project build.
choice USBC_CSM_TYPE
prompt "USB-C Connection State Machine"
default USBC_CSM_SINK_ONLY
config USBC_CSM_SINK_ONLY
bool "Sink USB-C Connection State Machine"
help
Allows the USB-C state machine to function as a Sink
endchoice
module = USBC_STACK
module-str = usbc stack
source "subsys/logging/Kconfig.template.log_config"

File diff suppressed because it is too large Load diff

View file

@ -11,8 +11,6 @@
#include <zephyr/usb_c/usbc.h>
#include <zephyr/drivers/usb_c/usbc_pd.h>
#include <zephyr/drivers/usb_c/usbc_tc.h>
#include <zephyr/smf.h>
#include "usbc_timer.h"
/**
* @brief Policy Engine Errors
@ -22,49 +20,6 @@ enum pe_error {
ERR_XMIT,
};
/**
* @brief Policy Engine State Machine Object
*/
struct policy_engine {
/** state machine context */
struct smf_ctx ctx;
/** Port device */
const struct device *dev;
/** state machine flags */
atomic_t flags;
/** current port power role (SOURCE or SINK) */
enum tc_power_role power_role;
/** current port data role (DFP or UFP) */
enum tc_data_role data_role;
/** port address where soft resets are sent */
enum pd_packet_type soft_reset_sop;
/** DPM request */
enum usbc_policy_request_t dpm_request;
/* Counters */
/**
* This counter is used to retry the Hard Reset whenever there is no
* response from the remote device.
*/
uint32_t hard_reset_counter;
/* Timers */
/** tTypeCSinkWaitCap timer */
struct usbc_timer_t pd_t_typec_sink_wait_cap;
/** tSenderResponse timer */
struct usbc_timer_t pd_t_sender_response;
/** tPSTransition timer */
struct usbc_timer_t pd_t_ps_transition;
/** tSinkRequest timer */
struct usbc_timer_t pd_t_sink_request;
/** tChunkingNotSupported timer */
struct usbc_timer_t pd_t_chunking_not_supported;
/** Time to wait before resending message after WAIT reception */
struct usbc_timer_t pd_t_wait_to_resend;
};
/**
* @brief This function must only be called in the subsystem init function.
*

View file

@ -0,0 +1,849 @@
/*
* Copyright (c) 2022 The Chromium OS Authors
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/smf.h>
#include <zephyr/usb_c/usbc.h>
#include <zephyr/drivers/usb_c/usbc_pd.h>
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
#include "usbc_stack.h"
#include "usbc_pe_common_internal.h"
#include "usbc_pe_snk_states_internal.h"
static const struct smf_state pe_states[];
/**
* @brief Handle common DPM requests
*
* @retval True if the request was handled, else False
*/
bool common_dpm_requests(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
if (pe->dpm_request > REQUEST_TC_END) {
atomic_set_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
if (pe->dpm_request == REQUEST_PE_DR_SWAP) {
pe_set_state(dev, PE_DRS_SEND_SWAP);
} else if (pe->dpm_request == REQUEST_PE_SOFT_RESET_SEND) {
pe_set_state(dev, PE_SEND_SOFT_RESET);
}
return true;
}
return false;
}
/**
* @brief Initializes the PE state machine and enters the PE_SUSPEND state.
*/
void pe_subsys_init(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
/* Save the port device object so states can access it */
pe->dev = dev;
/* Initialize the state machine */
smf_set_initial(SMF_CTX(pe), &pe_states[PE_SUSPEND]);
}
/**
* @brief Starts the Policy Engine layer
*/
void pe_start(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
data->pe_enabled = true;
}
/**
* @brief Suspend the Policy Engine layer
*/
void pe_suspend(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
data->pe_enabled = false;
/*
* While we are paused, exit all states
* and wait until initialized again.
*/
pe_set_state(dev, PE_SUSPEND);
}
/**
* @brief Initialize the Policy Engine layer
*/
static void pe_init(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
pe->flags = ATOMIC_INIT(0);
usbc_timer_init(&pe->pd_t_typec_sink_wait_cap, PD_T_TYPEC_SINK_WAIT_CAP_MAX_MS);
usbc_timer_init(&pe->pd_t_sender_response, PD_T_SENDER_RESPONSE_NOM_MS);
usbc_timer_init(&pe->pd_t_ps_transition, PD_T_SPR_PS_TRANSITION_NOM_MS);
usbc_timer_init(&pe->pd_t_chunking_not_supported, PD_T_CHUNKING_NOT_SUPPORTED_NOM_MS);
usbc_timer_init(&pe->pd_t_wait_to_resend, PD_T_SINK_REQUEST_MIN_MS);
pe->data_role = TC_ROLE_UFP;
pe->hard_reset_counter = 0;
pe_set_state(dev, PE_SNK_STARTUP);
}
/**
* @brief Tests if the Policy Engine layer is running
*/
bool pe_is_running(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
return data->pe_sm_state == SM_RUN;
}
/**
* @brief Run the Policy Engine layer
*/
void pe_run(const struct device *dev,
const int32_t dpm_request)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
switch (data->pe_sm_state) {
case SM_PAUSED:
if (data->pe_enabled == false) {
break;
}
/* fall through */
case SM_INIT:
pe_init(dev);
data->pe_sm_state = SM_RUN;
/* fall through */
case SM_RUN:
if (data->pe_enabled == false) {
data->pe_sm_state = SM_PAUSED;
break;
}
if (prl_is_running(dev) == false) {
break;
}
/*
* 8.3.3.3.8 PE_SNK_Hard_Reset State
* The Policy Engine Shall transition to the PE_SNK_Hard_Reset
* state from any state when:
* - Hard Reset request from Device Policy Manager
*/
if (dpm_request == REQUEST_PE_HARD_RESET_SEND) {
pe_set_state(dev, PE_SNK_HARD_RESET);
} else {
/* Pass the DPM request along to the state machine */
pe->dpm_request = dpm_request;
common_dpm_requests(dev);
}
/* Run state machine */
smf_run_state(SMF_CTX(pe));
break;
}
}
/**
* @brief Gets the current data role
*/
enum tc_data_role pe_get_data_role(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
return data->pe->data_role;
}
/**
* @brief Gets the current power role
*/
enum tc_power_role pe_get_power_role(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
return data->pe->power_role;
}
/**
* @brief Gets the current cable plug role
*/
enum tc_cable_plug pe_get_cable_plug(const struct device *dev)
{
return PD_PLUG_FROM_DFP_UFP;
}
/**
* @brief Informs the Policy Engine that a soft reset was received.
*/
void pe_got_soft_reset(const struct device *dev)
{
/*
* The PE_SRC_Soft_Reset state Shall be entered from any state when a
* Soft_Reset Message is received from the Protocol Layer.
*/
pe_set_state(dev, PE_SOFT_RESET);
}
/**
* @brief Informs the Policy Engine that a message was successfully sent
*/
void pe_message_sent(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
atomic_set_bit(&pe->flags, PE_FLAGS_TX_COMPLETE);
}
/**
* @brief Informs the Policy Engine of an error.
*/
void pe_report_error(const struct device *dev,
const enum pe_error e,
const enum pd_packet_type type)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
/*
* Generate Hard Reset if Protocol Error occurred
* while in PE_Send_Soft_Reset state.
*/
if (pe_get_state(dev) == PE_SEND_SOFT_RESET) {
pe_set_state(dev, PE_SNK_HARD_RESET);
return;
}
/*
* See section 8.3.3.4.1.1 PE_SRC_Send_Soft_Reset State:
*
* The PE_Send_Soft_Reset state shall be entered from
* any state when
* * A Protocol Error is detected by Protocol Layer during a
* Non-Interruptible AMS or
* * A message has not been sent after retries or
* * When not in an explicit contract and
* * Protocol Errors occurred on SOP during an Interruptible AMS or
* * Protocol Errors occurred on SOP during any AMS where the first
* Message in the sequence has not yet been sent i.e. an unexpected
* Message is received instead of the expected GoodCRC Message
* response.
*/
/* All error types besides transmit errors are Protocol Errors. */
if ((e != ERR_XMIT &&
atomic_test_bit(&pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS) == false) ||
e == ERR_XMIT ||
(atomic_test_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT) == false &&
type == PD_PACKET_SOP)) {
policy_notify(dev, PROTOCOL_ERROR);
pe_send_soft_reset(dev, type);
}
/*
* Transition to PE_Snk_Ready by a Protocol
* Error during an Interruptible AMS.
*/
else {
pe_set_state(dev, PE_SNK_READY);
}
}
/**
* @brief Informs the Policy Engine of a discard.
*/
void pe_report_discard(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
/*
* Clear local AMS indicator as our AMS message was discarded, and flag
* the discard for the PE
*/
atomic_clear_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
atomic_set_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED);
}
/**
* @brief Called by the Protocol Layer to informs the Policy Engine
* that a message has been received.
*/
void pe_message_received(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
atomic_set_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED);
}
/**
* @brief Informs the Policy Engine that a hard reset was received.
*/
void pe_got_hard_reset(const struct device *dev)
{
pe_set_state(dev, PE_SNK_TRANSITION_TO_DEFAULT);
}
/**
* @brief Informs the Policy Engine that a hard reset was sent.
*/
void pe_hard_reset_sent(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
atomic_clear_bit(&pe->flags, PE_FLAGS_HARD_RESET_PENDING);
}
/**
* @brief Indicates if an explicit contract is in place
*/
bool pe_is_explicit_contract(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
return atomic_test_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
}
/**
* @brief Return true if the PE is is within an atomic messaging sequence
* that it initiated with a SOP* port partner.
*/
bool pe_dpm_initiated_ams(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
return atomic_test_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
}
/** Private Policy Engine Layer API below */
/**
* @brief Sets a Policy Engine state
*/
void pe_set_state(const struct device *dev,
const enum usbc_pe_state state)
{
struct usbc_port_data *data = dev->data;
smf_set_state(SMF_CTX(data->pe), &pe_states[state]);
}
/**
* @brief Get the Policy Engine's current state
*/
enum usbc_pe_state pe_get_state(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
return data->pe->ctx.current - &pe_states[0];
}
/**
* @brief Get the Policy Engine's previous state
*/
enum usbc_pe_state pe_get_last_state(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
return data->pe->ctx.previous - &pe_states[0];
}
/**
* @brief Send a soft reset message
*/
void pe_send_soft_reset(const struct device *dev,
const enum pd_packet_type type)
{
struct usbc_port_data *data = dev->data;
data->pe->soft_reset_sop = type;
pe_set_state(dev, PE_SEND_SOFT_RESET);
}
/**
* @brief Send a Power Delivery Data Message
*/
void pe_send_data_msg(const struct device *dev,
const enum pd_packet_type type,
const enum pd_data_msg_type msg)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
/* Clear any previous TX status before sending a new message */
atomic_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE);
prl_send_data_msg(dev, type, msg);
}
/**
* @brief Send a Power Delivery Control Message
*/
void pe_send_ctrl_msg(const struct device *dev,
const enum pd_packet_type type,
const enum pd_ctrl_msg_type msg)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
/* Clear any previous TX status before sending a new message */
atomic_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE);
prl_send_ctrl_msg(dev, type, msg);
}
/**
* @brief Request desired voltage from source.
*/
void pe_send_request_msg(const struct device *dev,
const uint32_t rdo)
{
struct usbc_port_data *data = dev->data;
struct protocol_layer_tx_t *prl_tx = data->prl_tx;
struct pd_msg *msg = &prl_tx->emsg;
uint8_t rdo_bytes[4];
msg->len = sizeof(rdo);
sys_put_le32(rdo, rdo_bytes);
memcpy(msg->data, rdo_bytes, msg->len);
pe_send_data_msg(dev, PD_PACKET_SOP, PD_DATA_REQUEST);
}
/**
* @brief Transitions state after receiving an extended message.
*/
void extended_message_not_supported(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
uint32_t *payload = (uint32_t *)prl_rx->emsg.data;
union pd_ext_header ext_header;
ext_header.raw_value = *payload;
if (ext_header.chunked &&
ext_header.data_size > PD_MAX_EXTENDED_MSG_CHUNK_LEN) {
pe_set_state(dev, PE_SNK_CHUNK_RECEIVED);
} else {
pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
}
}
/**
* @brief Check if a specific control message was received
*/
bool received_control_message(const struct device *dev,
const union pd_header header,
const enum pd_ctrl_msg_type mt)
{
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
if (prl_rx->emsg.len == 0 &&
header.message_type == mt &&
header.extended == 0) {
return true;
}
return false;
}
/**
* @brief Check if a specific data message was received
*/
bool received_data_message(const struct device *dev,
const union pd_header header,
const enum pd_data_msg_type mt)
{
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
if (prl_rx->emsg.len > 0 &&
header.message_type == mt &&
header.extended == 0) {
return true;
}
return false;
}
/**
* @brief Check a DPM policy
*/
bool policy_check(const struct device *dev,
const enum usbc_policy_check_t pc)
{
struct usbc_port_data *data = dev->data;
if (data->policy_cb_check) {
return data->policy_cb_check(dev, pc);
} else {
return false;
}
}
/**
* @brief Notify the DPM of a policy change
*/
void policy_notify(const struct device *dev,
const enum usbc_policy_notify_t notify)
{
struct usbc_port_data *data = dev->data;
if (data->policy_cb_notify) {
data->policy_cb_notify(dev, notify);
}
}
/**
* @brief Notify the DPM of a WAIT message reception
*/
bool policy_wait_notify(const struct device *dev,
const enum usbc_policy_wait_t notify)
{
struct usbc_port_data *data = dev->data;
if (data->policy_cb_wait_notify) {
return data->policy_cb_wait_notify(dev, notify);
}
return false;
}
/**
* @brief Send the received source caps to the DPM
*/
void policy_set_src_cap(const struct device *dev,
const uint32_t *pdos,
const int num_pdos)
{
struct usbc_port_data *data = dev->data;
if (data->policy_cb_set_src_cap) {
data->policy_cb_set_src_cap(dev, pdos, num_pdos);
}
}
/**
* @brief Get a Request Data Object from the DPM
*/
uint32_t policy_get_request_data_object(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
/* This callback must be implemented */
__ASSERT(data->policy_cb_get_rdo != NULL,
"Callback pointer should not be NULL");
return data->policy_cb_get_rdo(dev);
}
/**
* @brief Check if the sink is a default level
*/
bool policy_is_snk_at_default(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
if (data->policy_cb_is_snk_at_default) {
return data->policy_cb_is_snk_at_default(dev);
}
return true;
}
/**
* @brief Get sink caps from the DPM
*/
void policy_get_snk_cap(const struct device *dev,
uint32_t **pdos,
int *num_pdos)
{
struct usbc_port_data *data = dev->data;
/* This callback must be implemented */
__ASSERT(data->policy_cb_get_snk_cap != NULL,
"Callback pointer should not be NULL");
data->policy_cb_get_snk_cap(dev, pdos, num_pdos);
}
/**
* @brief PE_DRS_Evaluate_Swap Entry state
*/
void pe_drs_evaluate_swap_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
/* Get evaluation of Data Role Swap request from Device Policy Manager */
if (policy_check(dev, (pe->data_role == TC_ROLE_UFP) ?
CHECK_DATA_ROLE_SWAP_TO_DFP : CHECK_DATA_ROLE_SWAP_TO_UFP)) {
/*
* PE_DRS_DFP_UFP_Accept_Swap and PE_DRS_UFP_DFP_Accept_Swap
* State embedded here
*/
/* Send Accept message */
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
} else {
/*
* PE_DRS_DFP_UFP_Reject_Swap and PE_DRS_UFP_DFP_Reject_Swap
* State embedded here
*/
/* Send Reject message */
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
}
}
/**
* @brief PE_DRS_Evaluate_Swap Run state
*/
void pe_drs_evaluate_swap_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_tx_t *prl_tx = data->prl_tx;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) {
/* Only update data roles if last message sent was Accept */
if (prl_tx->msg_type == PD_CTRL_ACCEPT) {
/* Update Data Role */
pe->data_role = (pe->data_role == TC_ROLE_UFP) ? TC_ROLE_DFP : TC_ROLE_UFP;
/* Notify TCPC of role update */
tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role);
/* Inform Device Policy Manager of Data Role Change */
policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ?
DATA_ROLE_IS_UFP : DATA_ROLE_IS_DFP);
}
pe_set_state(dev, PE_SNK_READY);
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) {
/*
* Inform Device Policy Manager that the message was
* discarded
*/
policy_notify(dev, MSG_DISCARDED);
pe_send_soft_reset(dev, prl_rx->emsg.type);
}
}
/**
* @brief PE_DRS_Send_Swap Entry state
*/
void pe_drs_send_swap_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
/* Send Swap DR message */
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_DR_SWAP);
}
/**
* @brief PE_DRS_Send_Swap Run state
*/
void pe_drs_send_swap_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
union pd_header header;
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) {
/* Start Sender Response Timer */
usbc_timer_start(&pe->pd_t_sender_response);
}
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) {
header = prl_rx->emsg.header;
if (received_control_message(dev, header, PD_CTRL_REJECT)) {
/*
* Inform Device Policy Manager that Data Role Swap
* was Rejected
*/
policy_notify(dev, MSG_REJECTED_RECEIVED);
} else if (received_control_message(dev, header, PD_CTRL_WAIT)) {
/*
* Inform Device Policy Manager that Data Role Swap
* needs to Wait
*/
if (policy_wait_notify(dev, WAIT_DATA_ROLE_SWAP)) {
atomic_set_bit(&pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP);
usbc_timer_start(&pe->pd_t_wait_to_resend);
}
} else if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
/* Update Data Role */
pe->data_role = (pe->data_role == TC_ROLE_UFP) ? TC_ROLE_DFP : TC_ROLE_UFP;
/* Notify TCPC of role update */
tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role);
/* Inform Device Policy Manager of Data Role Change */
policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ?
DATA_ROLE_IS_UFP : DATA_ROLE_IS_DFP);
} else {
/* Protocol Error */
policy_notify(dev, PROTOCOL_ERROR);
pe_send_soft_reset(dev, PD_PACKET_SOP);
return;
}
pe_set_state(dev, PE_SNK_READY);
return;
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) {
/*
* Inform Device Policy Manager that the message
* was discarded
*/
policy_notify(dev, MSG_DISCARDED);
pe_set_state(dev, PE_SNK_READY);
return;
}
if (usbc_timer_expired(&pe->pd_t_sender_response)) {
/* Protocol Error */
policy_notify(dev, PROTOCOL_ERROR);
pe_send_soft_reset(dev, PD_PACKET_SOP);
}
}
/**
* @brief PE_Send_Not_Supported Exit state
*/
void pe_drs_send_swap_exit(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
/* Stop Sender Response Timer */
usbc_timer_stop(&pe->pd_t_sender_response);
}
static void pe_suspend_entry(void *obj)
{
LOG_INF("PE_SUSPEND");
}
static void pe_suspend_run(void *obj)
{
/* DO NOTHING */
}
/**
* @brief Policy engine State table
*/
static const struct smf_state pe_states[] = {
[PE_SNK_STARTUP] = SMF_CREATE_STATE(
pe_snk_startup_entry,
pe_snk_startup_run,
NULL,
NULL),
[PE_SNK_DISCOVERY] = SMF_CREATE_STATE(
pe_snk_discovery_entry,
pe_snk_discovery_run,
NULL,
NULL),
[PE_SNK_WAIT_FOR_CAPABILITIES] = SMF_CREATE_STATE(
pe_snk_wait_for_capabilities_entry,
pe_snk_wait_for_capabilities_run,
pe_snk_wait_for_capabilities_exit,
NULL),
[PE_SNK_EVALUATE_CAPABILITY] = SMF_CREATE_STATE(
pe_snk_evaluate_capability_entry,
NULL,
NULL,
NULL),
[PE_SNK_SELECT_CAPABILITY] = SMF_CREATE_STATE(
pe_snk_select_capability_entry,
pe_snk_select_capability_run,
pe_snk_select_capability_exit,
NULL),
[PE_SNK_READY] = SMF_CREATE_STATE(
pe_snk_ready_entry,
pe_snk_ready_run,
NULL,
NULL),
[PE_SNK_HARD_RESET] = SMF_CREATE_STATE(
pe_snk_hard_reset_entry,
pe_snk_hard_reset_run,
NULL,
NULL),
[PE_SNK_TRANSITION_TO_DEFAULT] = SMF_CREATE_STATE(
pe_snk_transition_to_default_entry,
pe_snk_transition_to_default_run,
NULL,
NULL),
[PE_SNK_GIVE_SINK_CAP] = SMF_CREATE_STATE(
pe_snk_give_sink_cap_entry,
pe_snk_give_sink_cap_run,
NULL,
NULL),
[PE_SNK_GET_SOURCE_CAP] = SMF_CREATE_STATE(
pe_snk_get_source_cap_entry,
pe_snk_get_source_cap_run,
pe_snk_get_source_cap_exit,
NULL),
[PE_SNK_TRANSITION_SINK] = SMF_CREATE_STATE(
pe_snk_transition_sink_entry,
pe_snk_transition_sink_run,
pe_snk_transition_sink_exit,
NULL),
[PE_SEND_SOFT_RESET] = SMF_CREATE_STATE(
pe_send_soft_reset_entry,
pe_send_soft_reset_run,
pe_send_soft_reset_exit,
NULL),
[PE_SOFT_RESET] = SMF_CREATE_STATE(
pe_soft_reset_entry,
pe_soft_reset_run,
NULL,
NULL),
[PE_SEND_NOT_SUPPORTED] = SMF_CREATE_STATE(
pe_send_not_supported_entry,
pe_send_not_supported_run,
NULL,
NULL),
[PE_DRS_EVALUATE_SWAP] = SMF_CREATE_STATE(
pe_drs_evaluate_swap_entry,
pe_drs_evaluate_swap_run,
NULL,
NULL),
[PE_DRS_SEND_SWAP] = SMF_CREATE_STATE(
pe_drs_send_swap_entry,
pe_drs_send_swap_run,
pe_drs_send_swap_exit,
NULL),
[PE_SNK_GET_SOURCE_CAP] = SMF_CREATE_STATE(
pe_snk_get_source_cap_entry,
pe_snk_get_source_cap_run,
pe_snk_get_source_cap_exit,
NULL),
[PE_SNK_CHUNK_RECEIVED] = SMF_CREATE_STATE(
pe_chunk_received_entry,
pe_chunk_received_run,
NULL,
NULL),
[PE_SUSPEND] = SMF_CREATE_STATE(
pe_suspend_entry,
pe_suspend_run,
NULL,
NULL),
};

View file

@ -0,0 +1,320 @@
/*
* Copyright (c) 2022 The Chromium OS Authors
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_SUBSYS_USBC_PE_COMMON_INTERNAL_H_
#define ZEPHYR_SUBSYS_USBC_PE_COMMON_INTERNAL_H_
#include <zephyr/kernel.h>
#include <zephyr/usb_c/usbc.h>
#include <zephyr/drivers/usb_c/usbc_pd.h>
#include <zephyr/drivers/usb_c/usbc_tc.h>
#include <zephyr/smf.h>
#include "usbc_timer.h"
/**
* @brief Policy Engine Layer States
*/
enum usbc_pe_state {
/** PE_SNK_Startup */
PE_SNK_STARTUP,
/** PE_SNK_Discovery */
PE_SNK_DISCOVERY,
/** PE_SNK_Wait_for_Capabilities */
PE_SNK_WAIT_FOR_CAPABILITIES,
/** PE_SNK_Evaluate_Capability */
PE_SNK_EVALUATE_CAPABILITY,
/** PE_SNK_Select_Capability */
PE_SNK_SELECT_CAPABILITY,
/** PE_SNK_Transition_Sink */
PE_SNK_TRANSITION_SINK,
/** PE_SNK_Ready */
PE_SNK_READY,
/** PE_SNK_Hard_Reset */
PE_SNK_HARD_RESET,
/** PE_SNK_Transition_to_default */
PE_SNK_TRANSITION_TO_DEFAULT,
/** PE_SNK_Give_Sink_Cap */
PE_SNK_GIVE_SINK_CAP,
/** PE_SNK_Get_Source_Cap */
PE_SNK_GET_SOURCE_CAP,
/**PE_Send_Soft_Reset */
PE_SEND_SOFT_RESET,
/** PE_Soft_Reset */
PE_SOFT_RESET,
/** PE_Send_Not_Supported */
PE_SEND_NOT_SUPPORTED,
/** PE_DRS_Evaluate_Swap */
PE_DRS_EVALUATE_SWAP,
/** PE_DRS_Send_Swap */
PE_DRS_SEND_SWAP,
/** PE_SNK_Chunk_Received */
PE_SNK_CHUNK_RECEIVED,
/** PE_Suspend. Not part of the PD specification. */
PE_SUSPEND,
};
/**
* @brief Policy Engine Layer Flags
*/
enum pe_flags {
/** Accept message received from port partner */
PE_FLAGS_ACCEPT = 0,
/**
* Protocol Error was determined based on error recovery
* current state
*/
PE_FLAGS_PROTOCOL_ERROR = 1,
/** A message we requested to be sent has been transmitted */
PE_FLAGS_TX_COMPLETE = 2,
/** A message sent by a port partner has been received */
PE_FLAGS_MSG_RECEIVED = 3,
/**
* A hard reset has been requested by the DPM but has not been sent,
* not currently used
*/
PE_FLAGS_HARD_RESET_PENDING = 4,
/** An explicit contract is in place with our port partner */
PE_FLAGS_EXPLICIT_CONTRACT = 5,
/**
* Waiting for Sink Capabailities timed out. Used for retry error
* handling
*/
PE_FLAGS_SNK_WAIT_CAP_TIMEOUT = 6,
/**
* Flag to note current Atomic Message Sequence (AMS) is interruptible.
* If this flag is not set the AMS is non-interruptible. This flag must
* be set in the interruptible's message state entry.
*/
PE_FLAGS_INTERRUPTIBLE_AMS = 7,
/** Flag to trigger sending a Data Role Swap */
PE_FLAGS_DR_SWAP_TO_DFP = 8,
/** Flag is set when an AMS is initiated by the Device Policy Manager */
PE_FLAGS_DPM_INITIATED_AMS = 9,
/** Flag to note message was discarded due to incoming message */
PE_FLAGS_MSG_DISCARDED = 10,
/** Flag to trigger sending a soft reset */
PE_FLAGS_SEND_SOFT_RESET = 11,
/**
* This flag is set when a Wait message is received in response to a
* Sink REQUEST
*/
PE_FLAGS_WAIT_SINK_REQUEST = 12,
/**
* This flag is set when a Wait message is received in response to a
* Data Role Swap
*/
PE_FLAGS_WAIT_DATA_ROLE_SWAP = 13
};
/**
* @brief Policy Engine State Machine Object
*/
struct policy_engine {
/** state machine context */
struct smf_ctx ctx;
/** Port device */
const struct device *dev;
/** state machine flags */
atomic_t flags;
/** current port power role (SOURCE or SINK) */
enum tc_power_role power_role;
/** current port data role (DFP or UFP) */
enum tc_data_role data_role;
/** port address where soft resets are sent */
enum pd_packet_type soft_reset_sop;
/** DPM request */
enum usbc_policy_request_t dpm_request;
/* Counters */
/**
* This counter is used to retry the Hard Reset whenever there is no
* response from the remote device.
*/
uint32_t hard_reset_counter;
/* Timers */
/** tTypeCSinkWaitCap timer */
struct usbc_timer_t pd_t_typec_sink_wait_cap;
/** tSenderResponse timer */
struct usbc_timer_t pd_t_sender_response;
/** tPSTransition timer */
struct usbc_timer_t pd_t_ps_transition;
/** tSinkRequest timer */
struct usbc_timer_t pd_t_sink_request;
/** tChunkingNotSupported timer */
struct usbc_timer_t pd_t_chunking_not_supported;
/** Time to wait before resending message after WAIT reception */
struct usbc_timer_t pd_t_wait_to_resend;
};
/**
* @brief Sets a Policy Engine state
*
* @param dev Pointer to the device structure for the driver instance
* @param state next PE State to enter
*/
void pe_set_state(const struct device *dev,
const enum usbc_pe_state state);
/**
* @brief Get the Policy Engine's current state
*
* @param dev Pointer to the device structure for the driver instance
* @retval current PE state
*/
enum usbc_pe_state pe_get_state(const struct device *dev);
/**
* @brief Get the Policy Engine's previous state
*
* @param dev Pointer to the device structure for the driver instance
* @retval last PE state
*/
enum usbc_pe_state pe_get_last_state(const struct device *dev);
/**
* @brief Send a soft reset message
*
* @param dev Pointer to the device structure for the driver instance
* @param type SOP* to send soft reset message
*/
void pe_send_soft_reset(const struct device *dev,
const enum pd_packet_type type);
/**
* @brief Send a Power Delivery Data Message
*
* @param dev Pointer to the device structure for the driver instance
* @param type SOP* to send message
* @param msg PD data message to send
*/
void pe_send_data_msg(const struct device *dev,
const enum pd_packet_type type,
const enum pd_data_msg_type msg);
/**
* @brief Send a Power Delivery Control Message
*
* @param dev Pointer to the device structure for the driver instance
* @param type SOP* to send message
* @param msg PD control message to send
*/
void pe_send_ctrl_msg(const struct device *dev,
const enum pd_packet_type type,
const enum pd_ctrl_msg_type msg);
/**
* @brief Request desired voltage from source.
*
* @param dev Pointer to the device structure for the driver instance
* @param rdo Request Data Object to send
*/
void pe_send_request_msg(const struct device *dev,
const uint32_t rdo);
/**
* @brief Transitions state after receiving an extended message.
*
* @param dev Pointer to the device structure for the driver instance
*/
void extended_message_not_supported(const struct device *dev);
/**
* @brief Check if a specific control message was received
*
* @param dev Pointer to the device structure for the driver instance
* @param header message header containing the message
* @param mt message type to check
* @retval true if the header contains the message type, else false
*/
bool received_control_message(const struct device *dev,
const union pd_header header,
const enum pd_ctrl_msg_type mt);
/**
* @brief Check if a specific data message was received
*
* @param dev Pointer to the device structure for the driver instance
* @param header message header containing the message
* @param mt message type to check
* @param true if the header contains the message type, else false
*/
bool received_data_message(const struct device *dev,
const union pd_header header,
const enum pd_data_msg_type mt);
/**
* @brief Check a DPM policy
*
* @param dev Pointer to the device structure for the driver instance
* @param pc The DPM policy to check
* @retval true if the DPM approves the check, else false
*/
bool policy_check(const struct device *dev,
const enum usbc_policy_check_t pc);
/**
* @brief Notify the DPM of a policy change
*
* @param dev Pointer to the device structure for the driver instance
* @param notify The notification to send the the DPM
*/
void policy_notify(const struct device *dev,
const enum usbc_policy_notify_t notify);
/**
* @brief Notify the DPM of a WAIT message reception
*
* @param dev Pointer to the device structure for the driver instance
* @param notify Wait message to send to DPM
* @retval true if the Policy Engine should wait and try the action again
*/
bool policy_wait_notify(const struct device *dev,
const enum usbc_policy_wait_t notify);
/**
* @brief Send the received source caps to the DPM
*
* @param dev Pointer to the device structure for the driver instance
* @param pdos pointer to pdos to send
* @param num_pdos number of pdos to send
*/
void policy_set_src_cap(const struct device *dev,
const uint32_t *pdos,
const int num_pdos);
/**
* @brief Get a Request Data Object from the DPM
*
* @param dev Pointer to the device structure for the driver instance
* @retval the RDO from the DPM
*/
uint32_t policy_get_request_data_object(const struct device *dev);
/**
* @brief Check if the sink is a default level
*
* @param dev Pointer to the device structure for the driver instance
* @retval true if sink is at default value, else false
*/
bool policy_is_snk_at_default(const struct device *dev);
/**
* @brief Get sink caps from the DPM
*
* @param dev Pointer to the device structure for the driver instance
* @param pdos pointer to pdo sink caps
* @param num_pdos number of pdo sink caps
*/
void policy_get_snk_cap(const struct device *dev,
uint32_t **pdos,
int *num_pdos);
#endif /* ZEPHYR_SUBSYS_USBC_PE_COMMON_INTERNAL_H_ */

View file

@ -0,0 +1,842 @@
/*
* Copyright (c) 2022 The Chromium OS Authors
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <zephyr/sys/byteorder.h>
#include <zephyr/smf.h>
#include <zephyr/usb_c/usbc.h>
#include <zephyr/drivers/usb_c/usbc_pd.h>
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
#include "usbc_pe_common_internal.h"
#include "usbc_stack.h"
/**
* @brief Handle sink-specific DPM requests
*/
bool sink_dpm_requests(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
if (pe->dpm_request > REQUEST_TC_END) {
atomic_set_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
if (pe->dpm_request == REQUEST_PE_GET_SRC_CAPS) {
pe_set_state(dev, PE_SNK_GET_SOURCE_CAP);
}
return true;
}
return false;
}
/**
* @brief PE_SNK_Startup Entry State
*/
void pe_snk_startup_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
LOG_INF("PE_SNK_Startup");
/* Reset the protocol layer */
prl_reset(dev);
/* Set power role to Sink */
pe->power_role = TC_ROLE_SINK;
/* Invalidate explicit contract */
atomic_clear_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
policy_notify(dev, NOT_PD_CONNECTED);
}
/**
* @brief PE_SNK_Startup Run State
*/
void pe_snk_startup_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
/*
* Once the reset process completes, the Policy Engine Shall
* transition to the PE_SNK_Discovery state
*/
if (prl_is_running(dev)) {
pe_set_state(dev, PE_SNK_DISCOVERY);
}
}
/**
* @brief PE_SNK_Discovery Entry State
*/
void pe_snk_discovery_entry(void *obj)
{
LOG_INF("PE_SNK_Discovery");
}
/**
* @brief PE_SNK_Discovery Run State
*/
void pe_snk_discovery_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
const struct device *vbus = data->vbus;
/*
* Transition to the PE_SNK_Wait_for_Capabilities state when
* VBUS has been detected
*/
if (usbc_vbus_check_level(vbus, TC_VBUS_PRESENT)) {
pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
}
}
/**
* @brief PE_SNK_Wait_For_Capabilities Entry State
*/
void pe_snk_wait_for_capabilities_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
LOG_INF("PE_SNK_Wait_For_Capabilities");
/* Initialize and start the SinkWaitCapTimer */
usbc_timer_start(&pe->pd_t_typec_sink_wait_cap);
}
/**
* @brief PE_SNK_Wait_For_Capabilities Run State
*/
void pe_snk_wait_for_capabilities_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
union pd_header header;
/*
* Transition to the PE_SNK_Evaluate_Capability state when:
* 1) A Source_Capabilities Message is received.
*/
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) {
header = prl_rx->emsg.header;
if (received_data_message(dev, header, PD_DATA_SOURCE_CAP)) {
pe_set_state(dev, PE_SNK_EVALUATE_CAPABILITY);
return;
}
}
/* When the SinkWaitCapTimer times out, perform a Hard Reset. */
if (usbc_timer_expired(&pe->pd_t_typec_sink_wait_cap)) {
atomic_set_bit(&pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT);
pe_set_state(dev, PE_SNK_HARD_RESET);
}
}
/**
* @brief PE_SNK_Wait_For_Capabilities Exit State
*/
void pe_snk_wait_for_capabilities_exit(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
/* Stop SinkWaitCapTimer */
usbc_timer_stop(&pe->pd_t_typec_sink_wait_cap);
}
/**
* @brief PE_SNK_Evaluate_Capability Entry State
*/
void pe_snk_evaluate_capability_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
union pd_header header;
uint32_t *pdos = (uint32_t *)prl_rx->emsg.data;
uint32_t num_pdo_objs = PD_CONVERT_BYTES_TO_PD_HEADER_COUNT(prl_rx->emsg.len);
LOG_INF("PE_SNK_Evaluate_Capability");
header = prl_rx->emsg.header;
/* Reset Hard Reset counter to zero */
pe->hard_reset_counter = 0;
/* Set to highest revision supported by both ports */
prl_set_rev(dev, PD_PACKET_SOP, MIN(PD_REV30, header.specification_revision));
/* Send source caps to Device Policy Manager for saving */
policy_set_src_cap(dev, pdos, num_pdo_objs);
/* Transition to PE_Snk_Select_Capability */
pe_set_state(dev, PE_SNK_SELECT_CAPABILITY);
}
/**
* @brief PE_SNK_Select_Capability Entry State
*/
void pe_snk_select_capability_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
uint32_t rdo;
LOG_INF("PE_SNK_Select_Capability");
/* Get selected source cap from Device Policy Manager */
rdo = policy_get_request_data_object(dev);
/* Send Request */
pe_send_request_msg(dev, rdo);
/* Inform Device Policy Manager that we are PD Connected */
policy_notify(dev, PD_CONNECTED);
}
/**
* @brief PE_SNK_Select_Capability Run State
*/
void pe_snk_select_capability_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
union pd_header header;
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) {
/*
* The sent REQUEST message was discarded. This can be at
* the start of an AMS or in the middle. Handle what to
* do based on where we came from.
* 1) SE_SNK_EVALUATE_CAPABILITY: sends SoftReset
* 2) SE_SNK_READY: goes back to SNK Ready
*/
if (pe_get_last_state(dev) == PE_SNK_EVALUATE_CAPABILITY) {
pe_send_soft_reset(dev, PD_PACKET_SOP);
} else {
pe_set_state(dev, PE_SNK_READY);
}
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) {
/* Start the SenderResponseTimer */
usbc_timer_start(&pe->pd_t_sender_response);
}
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) {
header = prl_rx->emsg.header;
/*
* Transition to the PE_SNK_Transition_Sink state when:
* 1) An Accept Message is received from the Source.
*
* Transition to the PE_SNK_Wait_for_Capabilities state when:
* 1) There is no Explicit Contract in place and
* 2) A Reject Message is received from the Source or
* 3) A Wait Message is received from the Source.
*
* Transition to the PE_SNK_Ready state when:
* 1) There is an Explicit Contract in place and
* 2) A Reject Message is received from the Source or
* 3) A Wait Message is received from the Source.
*
* Transition to the PE_SNK_Hard_Reset state when:
* 1) A SenderResponseTimer timeout occurs.
*/
/* Only look at control messages */
if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
/* explicit contract is now in place */
atomic_set_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
pe_set_state(dev, PE_SNK_TRANSITION_SINK);
} else if (received_control_message(dev, header, PD_CTRL_REJECT) ||
received_control_message(dev, header, PD_CTRL_WAIT)) {
/*
* We had a previous explicit contract, so transition to
* PE_SNK_Ready
*/
if (atomic_test_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT)) {
if (received_control_message(dev, header, PD_CTRL_WAIT)) {
/*
* Inform Device Policy Manager that Sink
* Request needs to Wait
*/
if (policy_wait_notify(dev, WAIT_SINK_REQUEST)) {
atomic_set_bit(&pe->flags,
PE_FLAGS_WAIT_SINK_REQUEST);
usbc_timer_start(&pe->pd_t_wait_to_resend);
}
}
pe_set_state(dev, PE_SNK_READY);
}
/*
* No previous explicit contract, so transition
* to PE_SNK_Wait_For_Capabilities
*/
else {
pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
}
} else {
pe_send_soft_reset(dev, prl_rx->emsg.type);
}
return;
}
/* When the SenderResponseTimer times out, perform a Hard Reset. */
if (usbc_timer_expired(&pe->pd_t_sender_response)) {
policy_notify(dev, PORT_PARTNER_NOT_RESPONSIVE);
pe_set_state(dev, PE_SNK_HARD_RESET);
}
}
/**
* @brief PE_SNK_Select_Capability Exit State
*/
void pe_snk_select_capability_exit(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
/* Stop SenderResponse Timer */
usbc_timer_stop(&pe->pd_t_sender_response);
}
/**
* @brief PE_SNK_Transition_Sink Entry State
*/
void pe_snk_transition_sink_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
LOG_INF("PE_SNK_Transition_Sink");
/* Initialize and run PSTransitionTimer */
usbc_timer_start(&pe->pd_t_ps_transition);
}
/**
* @brief PE_SNK_Transition_Sink Run State
*/
void pe_snk_transition_sink_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
union pd_header header;
/*
* Transition to the PE_SNK_Ready state when:
* 1) A PS_RDY Message is received from the Source.
*
* Transition to the PE_SNK_Hard_Reset state when:
* 1) A Protocol Error occurs.
*/
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) {
header = prl_rx->emsg.header;
/*
* PS_RDY message received
*/
if (received_control_message(dev, header, PD_CTRL_PS_RDY)) {
/*
* Inform the Device Policy Manager to Transition
* the Power Supply
*/
policy_notify(dev, TRANSITION_PS);
pe_set_state(dev, PE_SNK_READY);
} else {
/* Protocol Error */
pe_set_state(dev, PE_SNK_HARD_RESET);
}
return;
}
/*
* Timeout will lead to a Hard Reset
*/
if (usbc_timer_expired(&pe->pd_t_ps_transition)) {
pe_set_state(dev, PE_SNK_HARD_RESET);
}
}
/**
* @brief PE_SNK_Transition_Sink Exit State
*/
void pe_snk_transition_sink_exit(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
/* Initialize and run PSTransitionTimer */
usbc_timer_stop(&pe->pd_t_ps_transition);
}
/**
* @brief PE_SNK_Ready Entry State
*/
void pe_snk_ready_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
LOG_INF("PE_SNK_Ready");
/* Clear AMS Flags */
atomic_clear_bit(&pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS);
atomic_clear_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
}
/**
* @brief PE_SNK_Ready Run State
*/
void pe_snk_ready_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
/*
* Handle incoming messages before discovery and DPMs other than hard
* reset
*/
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) {
union pd_header header = prl_rx->emsg.header;
/* Extended Message Request */
if (header.extended) {
extended_message_not_supported(dev);
return;
}
/* Data Messages */
else if (header.number_of_data_objects > 0) {
switch (header.message_type) {
case PD_DATA_SOURCE_CAP:
pe_set_state(dev, PE_SNK_EVALUATE_CAPABILITY);
break;
default:
pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
}
return;
}
/* Control Messages */
else {
switch (header.message_type) {
case PD_CTRL_GOOD_CRC:
/* Do nothing */
break;
case PD_CTRL_PING:
/* Do nothing */
break;
case PD_CTRL_GET_SINK_CAP:
pe_set_state(dev, PE_SNK_GIVE_SINK_CAP);
return;
case PD_CTRL_DR_SWAP:
pe_set_state(dev, PE_DRS_EVALUATE_SWAP);
return;
case PD_CTRL_NOT_SUPPORTED:
/* Do nothing */
break;
/*
* USB PD 3.0 6.8.1:
* Receiving an unexpected message shall be responded
* to with a soft reset message.
*/
case PD_CTRL_ACCEPT:
case PD_CTRL_REJECT:
case PD_CTRL_WAIT:
case PD_CTRL_PS_RDY:
pe_send_soft_reset(dev, prl_rx->emsg.type);
return;
/*
* Receiving an unknown or unsupported message
* shall be responded to with a not supported message.
*/
default:
pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
return;
}
}
}
/*
* Check if we are waiting to resend any messages
*/
if (usbc_timer_expired(&pe->pd_t_wait_to_resend)) {
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_WAIT_SINK_REQUEST)) {
pe_set_state(dev, PE_SNK_SELECT_CAPABILITY);
return;
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP)) {
pe_set_state(dev, PE_DRS_SEND_SWAP);
return;
}
}
/*
* Handle Device Policy Manager Requests
*/
sink_dpm_requests(dev);
}
/**
* @brief PE_SNK_Hard_Reset Entry State
*/
void pe_snk_hard_reset_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
LOG_INF("PE_SNK_Hard_Reset");
/*
* Note: If the SinkWaitCapTimer times out and the HardResetCounter is
* greater than nHardResetCount the Sink Shall assume that the
* Source is non-responsive.
*/
if (atomic_test_bit(&pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT) &&
pe->hard_reset_counter > PD_N_HARD_RESET_COUNT) {
/* Inform the DPM that the port partner is not responsive */
policy_notify(dev, PORT_PARTNER_NOT_RESPONSIVE);
/* Pause the Policy Engine */
data->pe_enabled = false;
return;
}
/* Set Hard Reset Pending Flag */
atomic_set_bit(&pe->flags, PE_FLAGS_HARD_RESET_PENDING);
atomic_clear_bit(&pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT);
atomic_clear_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR);
/* Request the generation of Hard Reset Signaling by the PHY Layer */
prl_execute_hard_reset(dev);
/* Increment the HardResetCounter */
pe->hard_reset_counter++;
}
/**
* @brief PE_SNK_Hard_Reset Run State
*/
void pe_snk_hard_reset_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
/*
* Transition to the PE_SNK_Transition_to_default state when:
* 1) The Hard Reset is complete.
*/
if (atomic_test_bit(&pe->flags, PE_FLAGS_HARD_RESET_PENDING)) {
return;
}
pe_set_state(dev, PE_SNK_TRANSITION_TO_DEFAULT);
}
/**
* @brief PE_SNK_Transition_to_default Entry State
*/
void pe_snk_transition_to_default_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
LOG_INF("PE_SNK_Transition_to_default");
/* Reset flags */
pe->flags = ATOMIC_INIT(0);
pe->data_role = TC_ROLE_UFP;
/*
* Indicate to the Device Policy Manager that the Sink Shall
* transition to default
*/
policy_notify(dev, SNK_TRANSITION_TO_DEFAULT);
/*
* Request the Device Policy Manger that the Port Data Role is
* set to UFP
*/
policy_notify(dev, DATA_ROLE_IS_UFP);
}
/**
* @brief PE_SNK_Transition_to_default Run State
*/
void pe_snk_transition_to_default_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
/*
* Wait until Device Policy Manager has transitioned the sink to
* default level
*/
if (policy_is_snk_at_default(dev)) {
/* Inform the Protocol Layer that the Hard Reset is complete */
prl_hard_reset_complete(dev);
pe_set_state(dev, PE_SNK_STARTUP);
}
}
/**
* @brief PE_SNK_Get_Source_Cap Entry State
*/
void pe_snk_get_source_cap_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
LOG_INF("PE_SNK_Get_Source_Cap");
/* Send a Get_Source_Cap Message */
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_GET_SOURCE_CAP);
}
/**
* @brief PE_SNK_Get_Source_Cap Run State
*/
void pe_snk_get_source_cap_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
/* Wait until message is sent or dropped */
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) {
pe_set_state(dev, PE_SNK_READY);
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) {
pe_send_soft_reset(dev, prl_rx->emsg.type);
}
}
/**
* @brief PE_SNK_Get_Source_Cap Exit State
*/
void pe_snk_get_source_cap_exit(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
usbc_timer_stop(&pe->pd_t_sender_response);
}
/**
* @brief PE_Send_Soft_Reset Entry State
*/
void pe_send_soft_reset_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
LOG_INF("PE_SNK_Send_Soft_Reset");
/* Reset Protocol Layer */
prl_reset(dev);
atomic_set_bit(&pe->flags, PE_FLAGS_SEND_SOFT_RESET);
}
/**
* @brief PE_Send_Soft_Reset Run State
*/
void pe_send_soft_reset_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
union pd_header header;
if (prl_is_running(dev) == false) {
return;
}
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_SEND_SOFT_RESET)) {
/* Send Soft Reset message */
pe_send_ctrl_msg(dev, pe->soft_reset_sop, PD_CTRL_SOFT_RESET);
return;
}
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) {
/* Inform Device Policy Manager that the message was discarded */
policy_notify(dev, MSG_DISCARDED);
pe_set_state(dev, PE_SNK_READY);
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) {
/* Start SenderResponse timer */
usbc_timer_start(&pe->pd_t_sender_response);
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) {
header = prl_rx->emsg.header;
if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
}
} else if (atomic_test_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR) ||
usbc_timer_expired(&pe->pd_t_sender_response)) {
if (atomic_test_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
atomic_clear_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR);
} else {
policy_notify(dev, PORT_PARTNER_NOT_RESPONSIVE);
}
pe_set_state(dev, PE_SNK_HARD_RESET);
}
}
/**
* @brief PE_Send_Soft_Reset Exit State
*/
void pe_send_soft_reset_exit(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
/* Stop Sender Response Timer */
usbc_timer_stop(&pe->pd_t_sender_response);
}
/**
* @brief PE_SNK_Soft_Reset Entry State
*/
void pe_soft_reset_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
LOG_INF("PE_SNK_Soft_Reset");
/* Reset the Protocol Layer */
prl_reset(dev);
atomic_set_bit(&pe->flags, PE_FLAGS_SEND_SOFT_RESET);
}
/**
* @brief PE_SNK_Soft_Reset Run State
*/
void pe_soft_reset_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
if (prl_is_running(dev) == false) {
return;
}
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_SEND_SOFT_RESET)) {
/* Send Accept message */
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
return;
}
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) {
pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
pe_set_state(dev, PE_SNK_HARD_RESET);
}
}
/**
* @brief PE_Not_Supported Entry State
*/
void pe_send_not_supported_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
LOG_INF("PE_Not_Supported");
/* Request the Protocol Layer to send a Not_Supported or Reject Message. */
if (prl_get_rev(dev, PD_PACKET_SOP) > PD_REV20) {
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_NOT_SUPPORTED);
} else {
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
}
}
/**
* @brief PE_Not_Supported Run State
*/
void pe_send_not_supported_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
if (atomic_test_bit(&pe->flags, PE_FLAGS_TX_COMPLETE) ||
atomic_test_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) {
atomic_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE);
atomic_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED);
pe_set_state(dev, PE_SNK_READY);
}
}
/**
* @brief PE_Chunk_Received Entry State
*/
void pe_chunk_received_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
LOG_INF("PE_SNK_Chunk_Received");
usbc_timer_start(&pe->pd_t_chunking_not_supported);
}
/**
* @brief PE_Chunk_Received Run State
*/
void pe_chunk_received_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
if (usbc_timer_expired(&pe->pd_t_chunking_not_supported)) {
pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
}
}
/**
* @brief PE_SNK_Give_Sink_Cap Entry state
*/
void pe_snk_give_sink_cap_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_tx_t *prl_tx = data->prl_tx;
struct pd_msg *msg = &prl_tx->emsg;
uint32_t *pdos;
uint32_t num_pdos;
/* Get present sink capabilities from Device Policy Manager */
policy_get_snk_cap(dev, &pdos, &num_pdos);
msg->len = PD_CONVERT_PD_HEADER_COUNT_TO_BYTES(num_pdos);
memcpy(msg->data, (uint8_t *)pdos, msg->len);
pe_send_data_msg(dev, PD_PACKET_SOP, PD_DATA_SINK_CAP);
}
/**
* @brief PE_SNK_Give_Sink_Cap Run state
*/
void pe_snk_give_sink_cap_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
/* Wait until message is sent or dropped */
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) {
pe_set_state(dev, PE_SNK_READY);
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) {
pe_send_soft_reset(dev, prl_rx->emsg.type);
}
}

View file

@ -0,0 +1,104 @@
/*
* Copyright (c) 2022 The Chromium OS Authors
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_SUBSYS_USBC_PE_SNK_STATES_INTERNAL_H_
#define ZEPHYR_SUBSYS_USBC_PE_SNK_STATES_INTERNAL_H_
/**
* @brief PE_SNK_Startup Entry State
*/
void pe_snk_startup_entry(void *obj);
void pe_snk_startup_run(void *obj);
void pe_snk_discovery_entry(void *obj);
/**
* @brief PE_SNK_Discovery Run State
*/
void pe_snk_discovery_run(void *obj);
/**
* @brief PE_SNK_Wait_For_Capabilities
*/
void pe_snk_wait_for_capabilities_entry(void *obj);
void pe_snk_wait_for_capabilities_run(void *obj);
void pe_snk_wait_for_capabilities_exit(void *obj);
/**
* @brief PE_SNK_Evaluate_Capability Entry State
*/
void pe_snk_evaluate_capability_entry(void *obj);
/**
* @brief PE_SNK_Select_Capability
*/
void pe_snk_select_capability_entry(void *obj);
void pe_snk_select_capability_run(void *obj);
void pe_snk_select_capability_exit(void *obj);
/**
* @brief PE_SNK_Transition_Sink Entry State
*/
void pe_snk_transition_sink_entry(void *obj);
void pe_snk_transition_sink_run(void *obj);
void pe_snk_transition_sink_exit(void *obj);
/**
* @brief PE_SNK_Ready Entry State
*/
void pe_snk_ready_entry(void *obj);
void pe_snk_ready_run(void *obj);
/**
* @brief PE_SNK_Hard_Reset
*/
void pe_snk_hard_reset_entry(void *obj);
void pe_snk_hard_reset_run(void *obj);
/**
* @brief PE_SNK_Transition_to_default Entry State
*/
void pe_snk_transition_to_default_entry(void *obj);
void pe_snk_transition_to_default_run(void *obj);
/**
* @brief PE_SNK_Get_Source_Cap Entry State
*/
void pe_snk_get_source_cap_entry(void *obj);
void pe_snk_get_source_cap_run(void *obj);
void pe_snk_get_source_cap_exit(void *obj);
/**
* @brief PE_Send_Soft_Reset Entry State
*/
void pe_send_soft_reset_entry(void *obj);
void pe_send_soft_reset_run(void *obj);
void pe_send_soft_reset_exit(void *obj);
/**
* @brief PE_SNK_Soft_Reset Entry State
*/
void pe_soft_reset_entry(void *obj);
void pe_soft_reset_run(void *obj);
/*
* @brief PE_Not_Supported Entry State
*/
void pe_send_not_supported_entry(void *obj);
void pe_send_not_supported_run(void *obj);
/**
* @brief PE_Chunk_Received Entry State
*/
void pe_chunk_received_entry(void *obj);
void pe_chunk_received_run(void *obj);
/**
* @brief PE_SNK_Give_Sink_Cap Entry state
*/
void pe_snk_give_sink_cap_entry(void *obj);
void pe_snk_give_sink_cap_run(void *obj);
#endif /* ZEPHYR_SUBSYS_USBC_PE_SNK_STATES_INTERNAL_H_ */

View file

@ -15,7 +15,8 @@
LOG_MODULE_REGISTER(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
#include "usbc_stack.h"
#include "usbc_pe_common_internal.h"
#include "usbc_tc_common_internal.h"
static int usbc_subsys_init(const struct device *dev);

View file

@ -1,522 +0,0 @@
/*
* Copyright (c) 2022 The Chromium OS Authors
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
#include "usbc_stack.h"
/**
* @brief Type-C Layer Flags
*/
enum tc_flags {
/**
* Flag to track Rp resistor change when the sink attached
* sub-state runs
*/
TC_FLAGS_RP_SUBSTATE_CHANGE = 0,
};
/**
* @brief Type-C States
*/
enum tc_state_t {
/** Super state that opens the CC lines */
TC_CC_OPEN_SUPER_STATE,
/** Super state that applies Rd to the CC lines */
TC_CC_RD_SUPER_STATE,
/** Disabled State */
TC_DISABLED_STATE,
/** Error Recovery State */
TC_ERROR_RECOVERY_STATE,
/** Unattached Sink State */
TC_UNATTACHED_SNK_STATE,
/** Attach Wait Sink State */
TC_ATTACH_WAIT_SNK_STATE,
/** Attached Sink State */
TC_ATTACHED_SNK_STATE,
};
static const struct smf_state tc_snk_states[];
static void tc_init(const struct device *dev);
static void tc_set_state(const struct device *dev,
const enum tc_state_t state);
static enum tc_state_t tc_get_state(const struct device *dev);
static void pd_enable(const struct device *dev,
const bool enable);
/**
* @brief Initializes the state machine and enters the Disabled state
*/
void tc_subsys_init(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct tc_sm_t *tc = data->tc;
/* Save the port device object so states can access it */
tc->dev = dev;
/* Initialize the state machine */
smf_set_initial(SMF_CTX(tc), &tc_snk_states[TC_DISABLED_STATE]);
}
/**
* @brief Runs the Type-C layer
*/
void tc_run(const struct device *dev,
const int32_t dpm_request)
{
struct usbc_port_data *data = dev->data;
const struct device *tcpc = data->tcpc;
struct tc_sm_t *tc = data->tc;
/* These requests are implicitly set by the Device Policy Manager */
if (dpm_request == PRIV_PORT_REQUEST_START) {
data->tc_enabled = true;
} else if (dpm_request == PRIV_PORT_REQUEST_SUSPEND) {
data->tc_enabled = false;
tc_set_state(dev, TC_DISABLED_STATE);
}
switch (data->tc_sm_state) {
case SM_PAUSED:
if (data->tc_enabled == false) {
break;
}
/* fall through */
case SM_INIT:
/* Initialize the Type-C layer */
tc_init(dev);
data->tc_sm_state = SM_RUN;
/* fall through */
case SM_RUN:
if (data->tc_enabled == false) {
pd_enable(dev, false);
data->tc_sm_state = SM_PAUSED;
break;
}
/* Sample CC lines */
tcpc_get_cc(tcpc, &tc->cc1, &tc->cc2);
/* Detect polarity */
tc->cc_polarity = (tc->cc1 > tc->cc2) ?
TC_POLARITY_CC1 : TC_POLARITY_CC2;
/* Execute any asyncronous Device Policy Manager Requests */
if (dpm_request == REQUEST_TC_ERROR_RECOVERY) {
/* Transition to Error Recovery State */
tc_set_state(dev, TC_ERROR_RECOVERY_STATE);
} else if (dpm_request == REQUEST_TC_DISABLED) {
/* Transition to Disabled State */
tc_set_state(dev, TC_DISABLED_STATE);
}
/* Run state machine */
smf_run_state(SMF_CTX(tc));
}
}
/**
* @brief Checks if the TC Layer is in an Attached state
*/
bool tc_is_in_attached_state(const struct device *dev)
{
return (tc_get_state(dev) == TC_ATTACHED_SNK_STATE);
}
/**
* @brief Initializes the Type-C layer
*/
static void tc_init(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct tc_sm_t *tc = data->tc;
/* Initialize the timers */
usbc_timer_init(&tc->tc_t_error_recovery, TC_T_ERROR_RECOVERY_SOURCE_MIN_MS);
usbc_timer_init(&tc->tc_t_cc_debounce, TC_T_CC_DEBOUNCE_MAX_MS);
usbc_timer_init(&tc->tc_t_rp_value_change, TC_T_RP_VALUE_CHANGE_MAX_MS);
/* Clear the flags */
tc->flags = ATOMIC_INIT(0);
/* Initialize the TCPC */
tcpc_init(data->tcpc);
/* Initialize the state machine */
/*
* Start out in error recovery state so the CC lines are opened for a
* short while if this is a system reset.
*/
tc_set_state(dev, TC_ERROR_RECOVERY_STATE);
}
/**
* @brief Sets a Type-C state
*/
static void tc_set_state(const struct device *dev,
const enum tc_state_t state)
{
struct usbc_port_data *data = dev->data;
struct tc_sm_t *tc = data->tc;
smf_set_state(SMF_CTX(tc), &tc_snk_states[state]);
}
/**
* @brief Get the Type-C current state
*/
static enum tc_state_t tc_get_state(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
return data->tc->ctx.current - &tc_snk_states[0];
}
/**
* @brief Enable Power Delivery
*/
static void pd_enable(const struct device *dev,
const bool enable)
{
if (enable) {
prl_start(dev);
pe_start(dev);
} else {
prl_suspend(dev);
pe_suspend(dev);
}
}
/**
* @brief Sink power sub states. Only called if a PD contract is not in place
*/
static void sink_power_sub_states(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
enum tc_cc_voltage_state cc;
enum tc_cc_voltage_state new_cc_voltage;
enum usbc_policy_check_t dpm_pwr_change_notify;
struct tc_sm_t *tc = data->tc;
/* Get the active CC line */
cc = tc->cc_polarity ? tc->cc2 : tc->cc1;
if (cc == TC_CC_VOLT_RP_DEF) {
/*
* This sub-state supports Sinks consuming current within the
* lowest range (default) of Source-supplied current.
*/
new_cc_voltage = TC_CC_VOLT_RP_DEF;
dpm_pwr_change_notify = POWER_CHANGE_DEF;
} else if (cc == TC_CC_VOLT_RP_1A5) {
/*
* This sub-state supports Sinks consuming current within the
* two lower ranges (default and 1.5 A) of Source-supplied
* current.
*/
new_cc_voltage = TC_CC_VOLT_RP_1A5;
dpm_pwr_change_notify = POWER_CHANGE_1A5;
} else if (cc == TC_CC_VOLT_RP_3A0) {
/*
* This sub-state supports Sinks consuming current within all
* three ranges (default, 1.5 A and 3.0 A) of Source-supplied
* current.
*/
new_cc_voltage = TC_CC_VOLT_RP_3A0;
dpm_pwr_change_notify = POWER_CHANGE_3A0;
} else {
/* Disconnect detected */
new_cc_voltage = TC_CC_VOLT_OPEN;
dpm_pwr_change_notify = POWER_CHANGE_0A0;
}
/* Debounce the Rp state */
if (new_cc_voltage != tc->cc_voltage) {
tc->cc_voltage = new_cc_voltage;
atomic_set_bit(&tc->flags, TC_FLAGS_RP_SUBSTATE_CHANGE);
usbc_timer_start(&tc->tc_t_rp_value_change);
}
/* Wait for Rp debounce */
if (usbc_timer_expired(&tc->tc_t_rp_value_change) == false) {
return;
}
/* Notify DPM of sink sub-state power change */
if (atomic_test_and_clear_bit(&tc->flags,
TC_FLAGS_RP_SUBSTATE_CHANGE)) {
if (data->policy_cb_notify) {
data->policy_cb_notify(dev, dpm_pwr_change_notify);
}
}
}
/**
* @brief Unattached.SNK Entry
*/
static void tc_unattached_snk_entry(void *obj)
{
LOG_INF("Unattached.SNK");
}
/**
* @brief Unattached.SNK Run
*/
static void tc_unattached_snk_run(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
/*
* Transition to AttachWait.SNK when the SNK.Rp state is present
* on at least one of its CC pins.
*/
if (tcpc_is_cc_rp(tc->cc1) || tcpc_is_cc_rp(tc->cc2)) {
tc_set_state(dev, TC_ATTACH_WAIT_SNK_STATE);
}
}
/**
* @brief AttachWait.SNK Entry
*/
static void tc_attach_wait_snk_entry(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
LOG_INF("AttachWait.SNK");
tc->cc_state = TC_CC_NONE;
}
/**
* @brief AttachWait.SNK Run
*/
static void tc_attach_wait_snk_run(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
struct usbc_port_data *data = dev->data;
const struct device *vbus = data->vbus;
enum tc_cc_states new_cc_state;
bool vbus_present;
if (tcpc_is_cc_rp(tc->cc1) || tcpc_is_cc_rp(tc->cc2)) {
new_cc_state = TC_CC_DFP_ATTACHED;
} else {
new_cc_state = TC_CC_NONE;
}
/* Debounce the cc state */
if (new_cc_state != tc->cc_state) {
usbc_timer_start(&tc->tc_t_cc_debounce);
tc->cc_state = new_cc_state;
}
/* Wait for CC debounce */
if (usbc_timer_running(&tc->tc_t_cc_debounce) &&
usbc_timer_expired(&tc->tc_t_cc_debounce) == false) {
return;
}
/* Transition to UnAttached.SNK if CC lines are open */
if (new_cc_state == TC_CC_NONE) {
tc_set_state(dev, TC_UNATTACHED_SNK_STATE);
}
/*
* The port shall transition to Attached.SNK after the state of only
* one of the CC1 or CC2 pins is SNK.Rp for at least tCCDebounce and
* VBUS is detected.
*/
vbus_present = usbc_vbus_check_level(vbus, TC_VBUS_PRESENT);
if (vbus_present) {
tc_set_state(dev, TC_ATTACHED_SNK_STATE);
}
}
static void tc_attach_wait_snk_exit(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
usbc_timer_stop(&tc->tc_t_cc_debounce);
}
/**
* @brief Attached.SNK Entry
*/
static void tc_attached_snk_entry(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
struct usbc_port_data *data = dev->data;
const struct device *tcpc = data->tcpc;
LOG_INF("Attached.SNK");
/* Set CC polarity */
tcpc_set_cc_polarity(tcpc, tc->cc_polarity);
/* Enable PD */
pd_enable(dev, true);
}
/**
* @brief Attached.SNK and DebugAccessory.SNK Run
*/
static void tc_attached_snk_run(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
struct usbc_port_data *data = dev->data;
const struct device *vbus = data->vbus;
/* Detach detection */
if (usbc_vbus_check_level(vbus, TC_VBUS_PRESENT) == false) {
tc_set_state(dev, TC_UNATTACHED_SNK_STATE);
return;
}
/* Run Sink Power Sub-State if not in an explicit contract */
if (pe_is_explicit_contract(dev) == false) {
sink_power_sub_states(dev);
}
}
/**
* @brief Attached.SNK and DebugAccessory.SNK Exit
*/
static void tc_attached_snk_exit(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
/* Disable PD */
pd_enable(dev, false);
}
/**
* @brief CC Open Entry
*/
static void tc_cc_open_entry(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
struct usbc_port_data *data = dev->data;
const struct device *tcpc = data->tcpc;
tc->cc_voltage = TC_CC_VOLT_OPEN;
/* Disable VCONN */
tcpc_set_vconn(tcpc, false);
/* Open CC lines */
tcpc_set_cc(tcpc, TC_CC_OPEN);
}
/**
* @brief Rd on CC lines Entry
*/
static void tc_cc_rd_entry(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
struct usbc_port_data *data = dev->data;
const struct device *tcpc = data->tcpc;
tcpc_set_cc(tcpc, TC_CC_RD);
}
/**
* @brief Disabled Entry
*/
static void tc_disabled_entry(void *obj)
{
LOG_INF("Disabled");
}
/**
* @brief Disabled Run
*/
static void tc_disabled_run(void *obj)
{
/* Do nothing */
}
/**
* @brief ErrorRecovery Entry
*/
static void tc_error_recovery_entry(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
LOG_INF("ErrorRecovery");
/* Start tErrorRecovery timer */
usbc_timer_start(&tc->tc_t_error_recovery);
}
/**
* @brief ErrorRecovery Run
*/
static void tc_error_recovery_run(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
/* Wait for expiry */
if (usbc_timer_expired(&tc->tc_t_error_recovery) == false) {
return;
}
/* Transition to Unattached.SNK */
tc_set_state(dev, TC_UNATTACHED_SNK_STATE);
}
/**
* @brief Type-C State Table
*/
static const struct smf_state tc_snk_states[] = {
/* Super States */
[TC_CC_OPEN_SUPER_STATE] = SMF_CREATE_STATE(
tc_cc_open_entry,
NULL,
NULL,
NULL),
[TC_CC_RD_SUPER_STATE] = SMF_CREATE_STATE(
tc_cc_rd_entry,
NULL,
NULL,
NULL),
/* Normal States */
[TC_UNATTACHED_SNK_STATE] = SMF_CREATE_STATE(
tc_unattached_snk_entry,
tc_unattached_snk_run,
NULL,
&tc_snk_states[TC_CC_RD_SUPER_STATE]),
[TC_ATTACH_WAIT_SNK_STATE] = SMF_CREATE_STATE(
tc_attach_wait_snk_entry,
tc_attach_wait_snk_run,
tc_attach_wait_snk_exit,
&tc_snk_states[TC_CC_RD_SUPER_STATE]),
[TC_ATTACHED_SNK_STATE] = SMF_CREATE_STATE(
tc_attached_snk_entry,
tc_attached_snk_run,
tc_attached_snk_exit,
NULL),
[TC_DISABLED_STATE] = SMF_CREATE_STATE(
tc_disabled_entry,
tc_disabled_run,
NULL,
&tc_snk_states[TC_CC_OPEN_SUPER_STATE]),
[TC_ERROR_RECOVERY_STATE] = SMF_CREATE_STATE(
tc_error_recovery_entry,
tc_error_recovery_run,
NULL,
&tc_snk_states[TC_CC_OPEN_SUPER_STATE]),
};

View file

@ -4,46 +4,14 @@
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_SUBSYS_USBC_H_
#define ZEPHYR_SUBSYS_USBC_H_
#ifndef ZEPHYR_SUBSYS_USBC_TC_H_
#define ZEPHYR_SUBSYS_USBC_TC_H_
#include <zephyr/kernel.h>
#include <zephyr/usb_c/usbc.h>
#include <zephyr/smf.h>
#include "usbc_timer.h"
/**
* @brief TC Layer State Machine Object
*/
struct tc_sm_t {
/** TC layer state machine context */
struct smf_ctx ctx;
/** Port device */
const struct device *dev;
/** TC layer flags */
atomic_t flags;
/** VBUS measurement device */
const struct device *vbus_dev;
/** Port polarity */
enum tc_cc_polarity cc_polarity;
/** The cc state */
enum tc_cc_states cc_state;
/** Voltage on CC pin */
enum tc_cc_voltage_state cc_voltage;
/** Current CC1 value */
enum tc_cc_voltage_state cc1;
/** Current CC2 value */
enum tc_cc_voltage_state cc2;
/* Timers */
/** tCCDebounce timer */
struct usbc_timer_t tc_t_cc_debounce;
/** tRpValueChange timer */
struct usbc_timer_t tc_t_rp_value_change;
/** tErrorRecovery timer */
struct usbc_timer_t tc_t_error_recovery;
};
#include "usbc_stack.h"
/**
* @brief This function must only be called in the subsystem init function.

View file

@ -0,0 +1,265 @@
/*
* Copyright (c) 2022 The Chromium OS Authors
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
#include "usbc_stack.h"
#include "usbc_tc_snk_states_internal.h"
#include "usbc_tc_common_internal.h"
static const struct smf_state tc_states[];
static void tc_init(const struct device *dev);
/**
* @brief Initializes the state machine and enters the Disabled state
*/
void tc_subsys_init(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct tc_sm_t *tc = data->tc;
/* Save the port device object so states can access it */
tc->dev = dev;
/* Initialize the state machine */
smf_set_initial(SMF_CTX(tc), &tc_states[TC_DISABLED_STATE]);
}
/**
* @brief Runs the Type-C layer
*/
void tc_run(const struct device *dev, const int32_t dpm_request)
{
struct usbc_port_data *data = dev->data;
const struct device *tcpc = data->tcpc;
struct tc_sm_t *tc = data->tc;
/* These requests are implicitly set by the Device Policy Manager */
if (dpm_request == PRIV_PORT_REQUEST_START) {
data->tc_enabled = true;
} else if (dpm_request == PRIV_PORT_REQUEST_SUSPEND) {
data->tc_enabled = false;
tc_set_state(dev, TC_DISABLED_STATE);
}
switch (data->tc_sm_state) {
case SM_PAUSED:
if (data->tc_enabled == false) {
break;
}
/* fall through */
case SM_INIT:
/* Initialize the Type-C layer */
tc_init(dev);
data->tc_sm_state = SM_RUN;
/* fall through */
case SM_RUN:
if (data->tc_enabled == false) {
tc_pd_enable(dev, false);
data->tc_sm_state = SM_PAUSED;
break;
}
/* Sample CC lines */
tcpc_get_cc(tcpc, &tc->cc1, &tc->cc2);
/* Detect polarity */
tc->cc_polarity = (tc->cc1 > tc->cc2) ?
TC_POLARITY_CC1 : TC_POLARITY_CC2;
/* Execute any asyncronous Device Policy Manager Requests */
if (dpm_request == REQUEST_TC_ERROR_RECOVERY) {
/* Transition to Error Recovery State */
tc_set_state(dev, TC_ERROR_RECOVERY_STATE);
} else if (dpm_request == REQUEST_TC_DISABLED) {
/* Transition to Disabled State */
tc_set_state(dev, TC_DISABLED_STATE);
}
/* Run state machine */
smf_run_state(SMF_CTX(tc));
}
}
/**
* @brief Checks if the TC Layer is in an Attached state
*/
bool tc_is_in_attached_state(const struct device *dev)
{
return (tc_get_state(dev) == TC_ATTACHED_SNK_STATE);
}
/**
* @brief Initializes the Type-C layer
*/
static void tc_init(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct tc_sm_t *tc = data->tc;
/* Initialize the timers */
usbc_timer_init(&tc->tc_t_error_recovery, TC_T_ERROR_RECOVERY_SOURCE_MIN_MS);
usbc_timer_init(&tc->tc_t_cc_debounce, TC_T_CC_DEBOUNCE_MAX_MS);
usbc_timer_init(&tc->tc_t_rp_value_change, TC_T_RP_VALUE_CHANGE_MAX_MS);
/* Clear the flags */
tc->flags = ATOMIC_INIT(0);
/* Initialize the TCPC */
tcpc_init(data->tcpc);
/* Initialize the state machine */
/*
* Start out in error recovery state so the CC lines are opened for a
* short while if this is a system reset.
*/
tc_set_state(dev, TC_ERROR_RECOVERY_STATE);
}
/**
* @brief Sets a Type-C state
*/
void tc_set_state(const struct device *dev, const enum tc_state_t state)
{
struct usbc_port_data *data = dev->data;
struct tc_sm_t *tc = data->tc;
smf_set_state(SMF_CTX(tc), &tc_states[state]);
}
/**
* @brief Get the Type-C current state
*/
enum tc_state_t tc_get_state(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
return data->tc->ctx.current - &tc_states[0];
}
/**
* @brief Enable Power Delivery
*/
void tc_pd_enable(const struct device *dev,
const bool enable)
{
if (enable) {
prl_start(dev);
pe_start(dev);
} else {
prl_suspend(dev);
pe_suspend(dev);
}
}
/**
* @brief CC Open Entry
*/
static void tc_cc_open_entry(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
struct usbc_port_data *data = dev->data;
const struct device *tcpc = data->tcpc;
tc->cc_voltage = TC_CC_VOLT_OPEN;
/* Disable VCONN */
tcpc_set_vconn(tcpc, false);
/* Open CC lines */
tcpc_set_cc(tcpc, TC_CC_OPEN);
}
/**
* @brief Disabled Entry
*/
static void tc_disabled_entry(void *obj)
{
LOG_INF("Disabled");
}
/**
* @brief Disabled Run
*/
static void tc_disabled_run(void *obj)
{
/* Do nothing */
}
/**
* @brief ErrorRecovery Entry
*/
static void tc_error_recovery_entry(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
LOG_INF("ErrorRecovery");
/* Start tErrorRecovery timer */
usbc_timer_start(&tc->tc_t_error_recovery);
}
/**
* @brief ErrorRecovery Run
*/
static void tc_error_recovery_run(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
/* Wait for expiry */
if (usbc_timer_expired(&tc->tc_t_error_recovery) == false) {
return;
}
/* Transition to Unattached.SNK */
tc_set_state(dev, TC_UNATTACHED_SNK_STATE);
}
/**
* @brief Type-C State Table
*/
static const struct smf_state tc_states[] = {
/* Super States */
[TC_CC_OPEN_SUPER_STATE] = SMF_CREATE_STATE(
tc_cc_open_entry,
NULL,
NULL,
NULL),
[TC_CC_RD_SUPER_STATE] = SMF_CREATE_STATE(
tc_cc_rd_entry,
NULL,
NULL,
NULL),
/* Normal States */
[TC_UNATTACHED_SNK_STATE] = SMF_CREATE_STATE(
tc_unattached_snk_entry,
tc_unattached_snk_run,
NULL,
&tc_states[TC_CC_RD_SUPER_STATE]),
[TC_ATTACH_WAIT_SNK_STATE] = SMF_CREATE_STATE(
tc_attach_wait_snk_entry,
tc_attach_wait_snk_run,
tc_attach_wait_snk_exit,
&tc_states[TC_CC_RD_SUPER_STATE]),
[TC_ATTACHED_SNK_STATE] = SMF_CREATE_STATE(
tc_attached_snk_entry,
tc_attached_snk_run,
tc_attached_snk_exit,
NULL),
[TC_DISABLED_STATE] = SMF_CREATE_STATE(
tc_disabled_entry,
tc_disabled_run,
NULL,
&tc_states[TC_CC_OPEN_SUPER_STATE]),
[TC_ERROR_RECOVERY_STATE] = SMF_CREATE_STATE(
tc_error_recovery_entry,
tc_error_recovery_run,
NULL,
&tc_states[TC_CC_OPEN_SUPER_STATE]),
};

View file

@ -0,0 +1,106 @@
/*
* Copyright (c) 2022 The Chromium OS Authors
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_SUBSYS_USBC_TC_COMMON_INTERNAL_H_
#define ZEPHYR_SUBSYS_USBC_TC_COMMON_INTERNAL_H_
#include <zephyr/kernel.h>
#include <zephyr/usb_c/usbc.h>
#include <zephyr/smf.h>
#include "usbc_timer.h"
#include "usbc_stack.h"
#include "usbc_tc.h"
/**
* @brief Type-C States
*/
enum tc_state_t {
/** Super state that opens the CC lines */
TC_CC_OPEN_SUPER_STATE,
/** Super state that applies Rd to the CC lines */
TC_CC_RD_SUPER_STATE,
/** Disabled State */
TC_DISABLED_STATE,
/** Error Recovery State */
TC_ERROR_RECOVERY_STATE,
/** Unattached Sink State */
TC_UNATTACHED_SNK_STATE,
/** Attach Wait Sink State */
TC_ATTACH_WAIT_SNK_STATE,
/** Attached Sink State */
TC_ATTACHED_SNK_STATE,
};
/**
* @brief Type-C Layer flags
*/
enum tc_flags {
/**
* Flag to track Rp resistor change when the sink attached
* sub-state runs
*/
TC_FLAGS_RP_SUBSTATE_CHANGE = 0,
};
/**
* @brief TC Layer State Machine Object
*/
struct tc_sm_t {
/** TC layer state machine context */
struct smf_ctx ctx;
/** Port device */
const struct device *dev;
/** TC layer flags */
atomic_t flags;
/** VBUS measurement device */
const struct device *vbus_dev;
/** Port polarity */
enum tc_cc_polarity cc_polarity;
/** The cc state */
enum tc_cc_states cc_state;
/** Voltage on CC pin */
enum tc_cc_voltage_state cc_voltage;
/** Current CC1 value */
enum tc_cc_voltage_state cc1;
/** Current CC2 value */
enum tc_cc_voltage_state cc2;
/* Timers */
/** tCCDebounce timer */
struct usbc_timer_t tc_t_cc_debounce;
/** tRpValueChange timer */
struct usbc_timer_t tc_t_rp_value_change;
/** tErrorRecovery timer */
struct usbc_timer_t tc_t_error_recovery;
};
/**
* @brief Sets a Type-C State
*
* @param dev Pointer to the device structure for the driver instance
* @param state next Type-C State to enter
*/
void tc_set_state(const struct device *dev, const enum tc_state_t state);
/**
* @brief Get current Type-C State
*
* @param dev Pointer to the device structure for the driver instance
* @return current Type-C state
*/
enum tc_state_t tc_get_state(const struct device *dev);
/**
* @brief Enable Power Delivery
*
* @param dev Pointer to the device structure for the driver instance
* @param enable set true to enable Power Deliver
*/
void tc_pd_enable(const struct device *dev, const bool enable);
#endif /* ZEPHYR_SUBSYS_USBC_TC_COMMON_INTERNAL_H_ */

View file

@ -0,0 +1,233 @@
/*
* Copyright (c) 2022 The Chromium OS Authors
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
#include "usbc_stack.h"
#include "usbc_tc_snk_states_internal.h"
#include "usbc_tc_common_internal.h"
/**
* @brief Sink power sub states. Only called if a PD contract is not in place
*/
static void sink_power_sub_states(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
enum tc_cc_voltage_state cc;
enum tc_cc_voltage_state new_cc_voltage;
enum usbc_policy_check_t dpm_pwr_change_notify;
struct tc_sm_t *tc = data->tc;
/* Get the active CC line */
cc = tc->cc_polarity ? tc->cc2 : tc->cc1;
if (cc == TC_CC_VOLT_RP_DEF) {
/*
* This sub-state supports Sinks consuming current within the
* lowest range (default) of Source-supplied current.
*/
new_cc_voltage = TC_CC_VOLT_RP_DEF;
dpm_pwr_change_notify = POWER_CHANGE_DEF;
} else if (cc == TC_CC_VOLT_RP_1A5) {
/*
* This sub-state supports Sinks consuming current within the
* two lower ranges (default and 1.5 A) of Source-supplied
* current.
*/
new_cc_voltage = TC_CC_VOLT_RP_1A5;
dpm_pwr_change_notify = POWER_CHANGE_1A5;
} else if (cc == TC_CC_VOLT_RP_3A0) {
/*
* This sub-state supports Sinks consuming current within all
* three ranges (default, 1.5 A and 3.0 A) of Source-supplied
* current.
*/
new_cc_voltage = TC_CC_VOLT_RP_3A0;
dpm_pwr_change_notify = POWER_CHANGE_3A0;
} else {
/* Disconnect detected */
new_cc_voltage = TC_CC_VOLT_OPEN;
dpm_pwr_change_notify = POWER_CHANGE_0A0;
}
/* Debounce the Rp state */
if (new_cc_voltage != tc->cc_voltage) {
tc->cc_voltage = new_cc_voltage;
atomic_set_bit(&tc->flags, TC_FLAGS_RP_SUBSTATE_CHANGE);
usbc_timer_start(&tc->tc_t_rp_value_change);
}
/* Wait for Rp debounce */
if (usbc_timer_expired(&tc->tc_t_rp_value_change) == false) {
return;
}
/* Notify DPM of sink sub-state power change */
if (atomic_test_and_clear_bit(&tc->flags,
TC_FLAGS_RP_SUBSTATE_CHANGE)) {
if (data->policy_cb_notify) {
data->policy_cb_notify(dev, dpm_pwr_change_notify);
}
}
}
/**
* @brief Unattached.SNK Entry
*/
void tc_unattached_snk_entry(void *obj)
{
LOG_INF("Unattached.SNK");
}
/**
* @brief Unattached.SNK Run
*/
void tc_unattached_snk_run(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
/*
* Transition to AttachWait.SNK when the SNK.Rp state is present
* on at least one of its CC pins.
*/
if (tcpc_is_cc_rp(tc->cc1) || tcpc_is_cc_rp(tc->cc2)) {
tc_set_state(dev, TC_ATTACH_WAIT_SNK_STATE);
}
}
/**
* @brief AttachWait.SNK Entry
*/
void tc_attach_wait_snk_entry(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
LOG_INF("AttachWait.SNK");
tc->cc_state = TC_CC_NONE;
}
/**
* @brief AttachWait.SNK Run
*/
void tc_attach_wait_snk_run(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
struct usbc_port_data *data = dev->data;
const struct device *vbus = data->vbus;
enum tc_cc_states new_cc_state;
bool vbus_present;
if (tcpc_is_cc_rp(tc->cc1) || tcpc_is_cc_rp(tc->cc2)) {
new_cc_state = TC_CC_DFP_ATTACHED;
} else {
new_cc_state = TC_CC_NONE;
}
/* Debounce the cc state */
if (new_cc_state != tc->cc_state) {
usbc_timer_start(&tc->tc_t_cc_debounce);
tc->cc_state = new_cc_state;
}
/* Wait for CC debounce */
if (usbc_timer_running(&tc->tc_t_cc_debounce) &&
usbc_timer_expired(&tc->tc_t_cc_debounce) == false) {
return;
}
/* Transition to UnAttached.SNK if CC lines are open */
if (new_cc_state == TC_CC_NONE) {
tc_set_state(dev, TC_UNATTACHED_SNK_STATE);
}
/*
* The port shall transition to Attached.SNK after the state of only
* one of the CC1 or CC2 pins is SNK.Rp for at least tCCDebounce and
* VBUS is detected.
*/
vbus_present = usbc_vbus_check_level(vbus, TC_VBUS_PRESENT);
if (vbus_present) {
tc_set_state(dev, TC_ATTACHED_SNK_STATE);
}
}
void tc_attach_wait_snk_exit(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
usbc_timer_stop(&tc->tc_t_cc_debounce);
}
/**
* @brief Attached.SNK Entry
*/
void tc_attached_snk_entry(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
struct usbc_port_data *data = dev->data;
const struct device *tcpc = data->tcpc;
LOG_INF("Attached.SNK");
/* Set CC polarity */
tcpc_set_cc_polarity(tcpc, tc->cc_polarity);
/* Enable PD */
tc_pd_enable(dev, true);
}
/**
* @brief Attached.SNK and DebugAccessory.SNK Run
*/
void tc_attached_snk_run(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
struct usbc_port_data *data = dev->data;
const struct device *vbus = data->vbus;
/* Detach detection */
if (usbc_vbus_check_level(vbus, TC_VBUS_PRESENT) == false) {
tc_set_state(dev, TC_UNATTACHED_SNK_STATE);
return;
}
/* Run Sink Power Sub-State if not in an explicit contract */
if (pe_is_explicit_contract(dev) == false) {
sink_power_sub_states(dev);
}
}
/**
* @brief Attached.SNK and DebugAccessory.SNK Exit
*/
void tc_attached_snk_exit(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
/* Disable PD */
tc_pd_enable(dev, false);
}
/**
* @brief Rd on CC lines Entry
*/
void tc_cc_rd_entry(void *obj)
{
struct tc_sm_t *tc = (struct tc_sm_t *)obj;
const struct device *dev = tc->dev;
struct usbc_port_data *data = dev->data;
const struct device *tcpc = data->tcpc;
tcpc_set_cc(tcpc, TC_CC_RD);
}

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2022 The Chromium OS Authors
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_SUBSYS_USBC_TC_SNK_STATES_H_
#define ZEPHYR_SUBSYS_USBC_TC_SNK_STATES_H_
/**
* @brief Unattached.SNK
*/
void tc_unattached_snk_entry(void *obj);
void tc_unattached_snk_run(void *obj);
/**
* @brief AttachWait.SNK
*/
void tc_attach_wait_snk_entry(void *obj);
void tc_attach_wait_snk_run(void *obj);
void tc_attach_wait_snk_exit(void *obj);
/**
* @brief Attached.SNK
*/
void tc_attached_snk_entry(void *obj);
void tc_attached_snk_run(void *obj);
void tc_attached_snk_exit(void *obj);
/**
* @brief Super state that applies Rd
*/
void tc_cc_rd_entry(void *obj);
#endif /* ZEPHYR_SUBSYS_USBC_TC_SNK_STATES_H_ */