usb_c: Refactor Sink common code

Move non-sink specific code into common code

Signed-off-by: Sam Hurst <sbh1187@gmail.com>
This commit is contained in:
Sam Hurst 2023-01-07 15:32:10 -08:00 committed by Carles Cufí
commit ee9903005a
9 changed files with 493 additions and 463 deletions

View file

@ -1,174 +0,0 @@
/*
* Copyright (c) 2022 The Chromium OS Authors
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_SUBSYS_USBC_PE_H_
#define ZEPHYR_SUBSYS_USBC_PE_H_
#include <zephyr/kernel.h>
#include <zephyr/usb_c/usbc.h>
#include <zephyr/drivers/usb_c/usbc_pd.h>
#include <zephyr/drivers/usb_c/usbc_tc.h>
/**
* @brief Policy Engine Errors
*/
enum pe_error {
/** Transmit error */
ERR_XMIT,
};
/**
* @brief This function must only be called in the subsystem init function.
*
* @param dev Pointer to the device structure for the driver instance.
*/
void pe_subsys_init(const struct device *dev);
/**
* @brief Start the Policy Engine Layer state machine. This is only called
* from the Type-C state machine.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_start(const struct device *dev);
/**
* @brief Suspend the Policy Engine Layer state machine. This is only called
* from the Type-C state machine.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_suspend(const struct device *dev);
/**
* @brief Run the Policy Engine Layer state machine. This is called from the
* subsystems port stack thread
*
* @param dev Pointer to the device structure for the driver instance
* @param dpm_request Device Policy Manager request
*/
void pe_run(const struct device *dev, const int32_t dpm_request);
/**
* @brief Query if the Policy Engine is running
*
* @param dev Pointer to the device structure for the driver instance
*
* @retval TRUE if the Policy Engine is running
* @retval FALSE if the Policy Engine is not running
*/
bool pe_is_running(const struct device *dev);
/**
* @brief Informs the Policy Engine that a message was successfully sent
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_message_sent(const struct device *dev);
/**
* @brief Informs the Policy Engine of an error.
*
* @param dev Pointer to the device structure for the driver instance
* @param e policy error
* @param type port partner address where error was generated
*/
void pe_report_error(const struct device *dev, const enum pe_error e,
const enum pd_packet_type type);
/**
* @brief Informs the Policy Engine that a transmit message was discarded
* because of an incoming message.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_report_discard(const struct device *dev);
/**
* @brief Called by the Protocol Layer to informs the Policy Engine
* that a message has been received.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_message_received(const struct device *dev);
/**
* @brief Informs the Policy Engine that a hard reset was received.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_got_hard_reset(const struct device *dev);
/**
* @brief Informs the Policy Engine that a soft reset was received.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_got_soft_reset(const struct device *dev);
/**
* @brief Informs the Policy Engine that a hard reset was sent.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_hard_reset_sent(const struct device *dev);
/**
* @brief Indicates if an explicit contract is in place
*
* @param dev Pointer to the device structure for the driver instance
*
* @retval true if an explicit contract is in place, else false
*/
bool pe_is_explicit_contract(const struct device *dev);
/*
* @brief Informs the Policy Engine that it should invalidate the
* explicit contract.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_invalidate_explicit_contract(const struct device *dev);
/**
* @brief Return true if the PE is is within an atomic messaging sequence
* that it initiated with a SOP* port partner.
*
* @note The PRL layer polls this instead of using AMS_START and AMS_END
* notification from the PE that is called out by the spec
*
* @param dev Pointer to the device structure for the driver instance
*/
bool pe_dpm_initiated_ams(const struct device *dev);
/**
* @brief Get the current data role
*
* @param dev Pointer to the device structure for the driver instance
*
* @retval data role
*/
enum tc_data_role pe_get_data_role(const struct device *dev);
/**
* @brief Get the current power role
*
* @param dev Pointer to the device structure for the driver instance
*
* @retval power role
*/
enum tc_power_role pe_get_power_role(const struct device *dev);
/**
* @brief Get cable plug role
*
* @param dev Pointer to the device structure for the driver instance
*
* @retval cable plug role
*/
enum tc_cable_plug pe_get_cable_plug(const struct device *dev);
#endif /* ZEPHYR_SUBSYS_USBC_PE_H_ */

View file

@ -92,18 +92,17 @@ static void pe_init(const struct device *dev)
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
/* Clear all flags */
atomic_clear(pe->flags);
usbc_timer_init(&pe->pd_t_typec_sink_wait_cap, PD_T_TYPEC_SINK_WAIT_CAP_MAX_MS);
/* Initialize common timers */
usbc_timer_init(&pe->pd_t_sender_response, PD_T_SENDER_RESPONSE_NOM_MS);
usbc_timer_init(&pe->pd_t_ps_transition, PD_T_SPR_PS_TRANSITION_NOM_MS);
usbc_timer_init(&pe->pd_t_chunking_not_supported, PD_T_CHUNKING_NOT_SUPPORTED_NOM_MS);
usbc_timer_init(&pe->pd_t_wait_to_resend, PD_T_SINK_REQUEST_MIN_MS);
pe->data_role = TC_ROLE_UFP;
/* Initialize common counters */
pe->hard_reset_counter = 0;
pe_set_state(dev, PE_SNK_STARTUP);
pe_snk_init(dev);
}
/**
@ -164,6 +163,21 @@ void pe_run(const struct device *dev, const int32_t dpm_request)
}
}
/**
* @brief Sets Data Role
*/
void pe_set_data_role(const struct device *dev, enum tc_data_role dr)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
/* Update data role */
pe->data_role = dr;
/* Notify TCPC of role update */
tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role);
}
/**
* @brief Gets the current data role
*/
@ -437,7 +451,7 @@ void extended_message_not_supported(const struct device *dev)
ext_header.raw_value = *payload;
if (ext_header.chunked && ext_header.data_size > PD_MAX_EXTENDED_MSG_CHUNK_LEN) {
pe_set_state(dev, PE_SNK_CHUNK_RECEIVED);
pe_set_state(dev, PE_CHUNK_RECEIVED);
} else {
pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
}
@ -570,7 +584,7 @@ void policy_get_snk_cap(const struct device *dev, uint32_t **pdos, int *num_pdos
/**
* @brief PE_DRS_Evaluate_Swap Entry state
*/
void pe_drs_evaluate_swap_entry(void *obj)
static void pe_drs_evaluate_swap_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
@ -597,7 +611,7 @@ void pe_drs_evaluate_swap_entry(void *obj)
/**
* @brief PE_DRS_Evaluate_Swap Run state
*/
void pe_drs_evaluate_swap_run(void *obj)
static void pe_drs_evaluate_swap_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
@ -609,9 +623,8 @@ void pe_drs_evaluate_swap_run(void *obj)
/* Only update data roles if last message sent was Accept */
if (prl_tx->msg_type == PD_CTRL_ACCEPT) {
/* Update Data Role */
pe->data_role = (pe->data_role == TC_ROLE_UFP) ? TC_ROLE_DFP : TC_ROLE_UFP;
/* Notify TCPC of role update */
tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role);
pe_set_data_role(dev, (pe->data_role == TC_ROLE_UFP)
? TC_ROLE_DFP : TC_ROLE_UFP);
/* Inform Device Policy Manager of Data Role Change */
policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ? DATA_ROLE_IS_UFP
: DATA_ROLE_IS_DFP);
@ -630,7 +643,7 @@ void pe_drs_evaluate_swap_run(void *obj)
/**
* @brief PE_DRS_Send_Swap Entry state
*/
void pe_drs_send_swap_entry(void *obj)
static void pe_drs_send_swap_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
@ -642,7 +655,7 @@ void pe_drs_send_swap_entry(void *obj)
/**
* @brief PE_DRS_Send_Swap Run state
*/
void pe_drs_send_swap_run(void *obj)
static void pe_drs_send_swap_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
@ -710,9 +723,9 @@ void pe_drs_send_swap_run(void *obj)
}
/**
* @brief PE_Send_Not_Supported Exit state
* @brief PE_DRS_Send_Swap Exit state
*/
void pe_drs_send_swap_exit(void *obj)
static void pe_drs_send_swap_exit(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
@ -720,6 +733,229 @@ void pe_drs_send_swap_exit(void *obj)
usbc_timer_stop(&pe->pd_t_sender_response);
}
/**
* @brief The PE_SOFT_RESET state has two embedded states
* that handle sending an accept message.
*/
enum pe_soft_reset_submachine_states {
/* Send Accept message sub state */
PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG,
/* Wait for Accept message to be sent or an error sub state */
PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE
};
/**
* @brief 8.3.3.4.2.1 PE_SNK_Send_Soft_Reset State
*/
static void pe_soft_reset_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
/* Reset the protocol layer */
prl_reset(dev);
/* Initialize PE Submachine */
pe->submachine = PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG;
}
static void pe_soft_reset_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
if (!prl_is_running(dev)) {
return;
}
switch (pe->submachine) {
case PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG:
/* Send Accept message to SOP */
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
/* Start Sender Response Timer */
usbc_timer_start(&pe->pd_t_sender_response);
/* Move to next substate */
pe->submachine = PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE;
break;
case PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE:
/*
* The Policy Engine Shall transition to the
* PE_SRC_Send_Capabilities state when:
* 1: Accept message sent to SOP
*/
if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
} else if (usbc_timer_expired(&pe->pd_t_sender_response) ||
atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
/*
* The Policy Engine Shall transition to the
* PE_SRC_Hard_Reset state when:
* 1: A SenderResponseTimer timeout occurs.
* 2: OR the Protocol Layer indicates that a
* transmission error has occurred.
*/
pe_set_state(dev, PE_SNK_HARD_RESET);
}
break;
}
}
static void pe_soft_reset_exit(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
/* Stop Sender Response Timer */
usbc_timer_stop(&pe->pd_t_sender_response);
}
/**
* @brief PE_Send_Soft_Reset Entry State
*/
static void pe_send_soft_reset_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
LOG_INF("PE_SNK_Send_Soft_Reset");
/* Reset Protocol Layer */
prl_reset(dev);
atomic_set_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET);
}
/**
* @brief PE_Send_Soft_Reset Run State
*/
static void pe_send_soft_reset_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
union pd_header header;
if (prl_is_running(dev) == false) {
return;
}
if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET)) {
/* Send Soft Reset message */
pe_send_ctrl_msg(dev, pe->soft_reset_sop, PD_CTRL_SOFT_RESET);
return;
}
if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
/* Inform Device Policy Manager that the message was discarded */
policy_notify(dev, MSG_DISCARDED);
pe_set_state(dev, PE_SNK_READY);
} else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
/* Start SenderResponse timer */
usbc_timer_start(&pe->pd_t_sender_response);
}
/*
* The Policy Engine Shall transition to the PE_SNK_Wait_for_Capabilities
* state when:
* 1: An Accept Message has been received on SOP
*/
else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
header = prl_rx->emsg.header;
if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
}
}
/*
* The Policy Engine Shall transition to the PE_SNK_Hard_Reset state when:
* 1: A SenderResponseTimer timeout occurs (Handled in pe_report_error function)
* 2: Or the Protocol Layer indicates that a transmission error has occurred
*/
else if (usbc_timer_expired(&pe->pd_t_sender_response) ||
atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
pe_set_state(dev, PE_SNK_HARD_RESET);
}
}
/**
* @brief PE_Send_Soft_Reset Exit State
*/
static void pe_send_soft_reset_exit(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
/* Stop Sender Response Timer */
usbc_timer_stop(&pe->pd_t_sender_response);
}
/**
* @brief 8.3.3.6.2.1 PE_SNK_Send_Not_Supported State
*/
static void pe_send_not_supported_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
LOG_INF("PE_Not_Supported");
/* Request the Protocol Layer to send a Not_Supported or Reject Message. */
if (prl_get_rev(dev, PD_PACKET_SOP) > PD_REV20) {
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_NOT_SUPPORTED);
} else {
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
}
}
static void pe_send_not_supported_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
if (atomic_test_bit(pe->flags, PE_FLAGS_TX_COMPLETE) ||
atomic_test_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
atomic_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED);
pe_set_state(dev, PE_SNK_READY);
}
}
/**
* @brief 8.3.3.6.2.3 PE_SNK_Chunk_Received State
*/
static void pe_chunk_received_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
LOG_INF("PE_SNK_Chunk_Received");
/*
* On entry to the PE_SNK_Chunk_Received state, the Policy Engine
* Shall initialize and run the ChunkingNotSupportedTimer.
*/
usbc_timer_start(&pe->pd_t_chunking_not_supported);
}
/**
* @brief PE_Chunk_Received Run State
*/
static void pe_chunk_received_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
/*
* The Policy Engine Shall transition to PE_SNK_Send_Not_Supported
* when:
* 1: The ChunkingNotSupportedTimer has timed out.
*/
if (usbc_timer_expired(&pe->pd_t_chunking_not_supported)) {
pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
}
}
/**
* @brief Suspend State
*/
static void pe_suspend_entry(void *obj)
{
LOG_INF("PE_SUSPEND");
@ -789,6 +1025,11 @@ static const struct smf_state pe_states[] = {
pe_snk_transition_sink_run,
pe_snk_transition_sink_exit,
NULL),
[PE_SNK_GET_SOURCE_CAP] = SMF_CREATE_STATE(
pe_snk_get_source_cap_entry,
pe_snk_get_source_cap_run,
pe_snk_get_source_cap_exit,
NULL),
[PE_SEND_SOFT_RESET] = SMF_CREATE_STATE(
pe_send_soft_reset_entry,
pe_send_soft_reset_run,
@ -797,7 +1038,7 @@ static const struct smf_state pe_states[] = {
[PE_SOFT_RESET] = SMF_CREATE_STATE(
pe_soft_reset_entry,
pe_soft_reset_run,
NULL,
pe_soft_reset_exit,
NULL),
[PE_SEND_NOT_SUPPORTED] = SMF_CREATE_STATE(
pe_send_not_supported_entry,
@ -814,15 +1055,10 @@ static const struct smf_state pe_states[] = {
pe_drs_send_swap_run,
pe_drs_send_swap_exit,
NULL),
[PE_SNK_GET_SOURCE_CAP] = SMF_CREATE_STATE(
pe_snk_get_source_cap_entry,
pe_snk_get_source_cap_run,
pe_snk_get_source_cap_exit,
NULL),
[PE_SNK_CHUNK_RECEIVED] = SMF_CREATE_STATE(
[PE_CHUNK_RECEIVED] = SMF_CREATE_STATE(
pe_chunk_received_entry,
pe_chunk_received_run,
pe_chunk_received_exit,
NULL,
NULL),
[PE_SUSPEND] = SMF_CREATE_STATE(
pe_suspend_entry,

View file

@ -14,6 +14,14 @@
#include <zephyr/smf.h>
#include "usbc_timer.h"
/**
* @brief Policy Engine Errors
*/
enum pe_error {
/** Transmit error */
ERR_XMIT,
};
/**
* @brief Policy Engine Layer States
*/
@ -51,7 +59,7 @@ enum usbc_pe_state {
/** PE_DRS_Send_Swap */
PE_DRS_SEND_SWAP,
/** PE_SNK_Chunk_Received */
PE_SNK_CHUNK_RECEIVED,
PE_CHUNK_RECEIVED,
/** PE_Suspend. Not part of the PD specification. */
PE_SUSPEND,
@ -133,6 +141,8 @@ struct policy_engine {
enum pd_packet_type soft_reset_sop;
/** DPM request */
enum usbc_policy_request_t dpm_request;
/** generic variable used for simple in state statemachines */
uint32_t submachine;
/* Counters */
@ -317,4 +327,165 @@ void policy_get_snk_cap(const struct device *dev, uint32_t **pdos, int *num_pdos
*/
bool common_dpm_requests(const struct device *dev);
/**
* @brief This function must only be called in the subsystem init function.
*
* @param dev Pointer to the device structure for the driver instance.
*/
void pe_subsys_init(const struct device *dev);
/**
* @brief Start the Policy Engine Layer state machine. This is only called
* from the Type-C state machine.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_start(const struct device *dev);
/**
* @brief Suspend the Policy Engine Layer state machine. This is only called
* from the Type-C state machine.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_suspend(const struct device *dev);
/**
* @brief Run the Policy Engine Layer state machine. This is called from the
* subsystems port stack thread
*
* @param dev Pointer to the device structure for the driver instance
* @param dpm_request Device Policy Manager request
*/
void pe_run(const struct device *dev,
const int32_t dpm_request);
/**
* @brief Query if the Policy Engine is running
*
* @param dev Pointer to the device structure for the driver instance
*
* @retval TRUE if the Policy Engine is running
* @retval FALSE if the Policy Engine is not running
*/
bool pe_is_running(const struct device *dev);
/**
* @brief Informs the Policy Engine that a message was successfully sent
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_message_sent(const struct device *dev);
/**
* @brief Informs the Policy Engine of an error.
*
* @param dev Pointer to the device structure for the driver instance
* @param e policy error
* @param type port partner address where error was generated
*/
void pe_report_error(const struct device *dev,
const enum pe_error e,
const enum pd_packet_type type);
/**
* @brief Informs the Policy Engine that a transmit message was discarded
* because of an incoming message.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_report_discard(const struct device *dev);
/**
* @brief Called by the Protocol Layer to informs the Policy Engine
* that a message has been received.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_message_received(const struct device *dev);
/**
* @brief Informs the Policy Engine that a hard reset was received.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_got_hard_reset(const struct device *dev);
/**
* @brief Informs the Policy Engine that a soft reset was received.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_got_soft_reset(const struct device *dev);
/**
* @brief Informs the Policy Engine that a hard reset was sent.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_hard_reset_sent(const struct device *dev);
/**
* @brief Indicates if an explicit contract is in place
*
* @param dev Pointer to the device structure for the driver instance
*
* @retval true if an explicit contract is in place, else false
*/
bool pe_is_explicit_contract(const struct device *dev);
/*
* @brief Informs the Policy Engine that it should invalidate the
* explicit contract.
*
* @param dev Pointer to the device structure for the driver instance
*/
void pe_invalidate_explicit_contract(const struct device *dev);
/**
* @brief Return true if the PE is is within an atomic messaging sequence
* that it initiated with a SOP* port partner.
*
* @note The PRL layer polls this instead of using AMS_START and AMS_END
* notification from the PE that is called out by the spec
*
* @param dev Pointer to the device structure for the driver instance
*/
bool pe_dpm_initiated_ams(const struct device *dev);
/**
* @brief Get the current data role
*
* @param dev Pointer to the device structure for the driver instance
*
* @retval data role
*/
enum tc_data_role pe_get_data_role(const struct device *dev);
/**
* @brief Sets the data role and updates the TCPC
*
* @param dev Pointer to the device structure for the driver instance
* @param dr Data Role to be set
*/
void pe_set_data_role(const struct device *dev, enum tc_data_role dr);
/**
* @brief Get the current power role
*
* @param dev Pointer to the device structure for the driver instance
*
* @retval power role
*/
enum tc_power_role pe_get_power_role(const struct device *dev);
/**
* @brief Get cable plug role
*
* @param dev Pointer to the device structure for the driver instance
*
* @retval cable plug role
*/
enum tc_cable_plug pe_get_cable_plug(const struct device *dev);
#endif /* ZEPHYR_SUBSYS_USBC_PE_COMMON_INTERNAL_H_ */

View file

@ -15,6 +15,26 @@ LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
#include "usbc_pe_common_internal.h"
#include "usbc_stack.h"
/**
* @brief Initialize the Source Policy Engine layer
*/
void pe_snk_init(const struct device *dev)
{
struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe;
/* Initial role of sink is UFP */
pe_set_data_role(dev, TC_ROLE_UFP);
/* Initialize timers */
usbc_timer_init(&pe->pd_t_typec_sink_wait_cap, PD_T_TYPEC_SINK_WAIT_CAP_MAX_MS);
usbc_timer_init(&pe->pd_t_ps_transition, PD_T_SPR_PS_TRANSITION_NOM_MS);
usbc_timer_init(&pe->pd_t_wait_to_resend, PD_T_SINK_REQUEST_MIN_MS);
/* Goto startup state */
pe_set_state(dev, PE_SNK_STARTUP);
}
/**
* @brief Handle sink-specific DPM requests
*/
@ -662,196 +682,6 @@ void pe_snk_get_source_cap_exit(void *obj)
usbc_timer_stop(&pe->pd_t_sender_response);
}
/**
* @brief PE_Send_Soft_Reset Entry State
*/
void pe_send_soft_reset_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
LOG_INF("PE_SNK_Send_Soft_Reset");
/* Reset Protocol Layer */
prl_reset(dev);
atomic_set_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET);
}
/**
* @brief PE_Send_Soft_Reset Run State
*/
void pe_send_soft_reset_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
struct usbc_port_data *data = dev->data;
struct protocol_layer_rx_t *prl_rx = data->prl_rx;
union pd_header header;
if (prl_is_running(dev) == false) {
return;
}
if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET)) {
/* Send Soft Reset message */
pe_send_ctrl_msg(dev, pe->soft_reset_sop, PD_CTRL_SOFT_RESET);
return;
}
if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
/* Inform Device Policy Manager that the message was discarded */
policy_notify(dev, MSG_DISCARDED);
pe_set_state(dev, PE_SNK_READY);
} else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
/* Start SenderResponse timer */
usbc_timer_start(&pe->pd_t_sender_response);
} else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
/*
* The Policy Engine Shall transition to the PE_SNK_Wait_for_Capabilities
* state when:
* 1: An Accept Message has been received on SOP
*/
header = prl_rx->emsg.header;
if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
}
} else if (usbc_timer_expired(&pe->pd_t_sender_response) ||
atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
/*
* The Policy Engine Shall transition to the PE_SNK_Hard_Reset state when:
* 1: A SenderResponseTimer timeout occurs
* 2: Or the Protocol Layer indicates that a transmission error has occurred
*/
pe_set_state(dev, PE_SNK_HARD_RESET);
}
}
/**
* @brief PE_Send_Soft_Reset Exit State
*/
void pe_send_soft_reset_exit(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
/* Stop Sender Response Timer */
usbc_timer_stop(&pe->pd_t_sender_response);
}
/**
* @brief PE_SNK_Soft_Reset Entry State
*/
void pe_soft_reset_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
LOG_INF("PE_SNK_Soft_Reset");
/* Reset the Protocol Layer */
prl_reset(dev);
atomic_set_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET);
}
/**
* @brief PE_SNK_Soft_Reset Run State
*/
void pe_soft_reset_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
if (prl_is_running(dev) == false) {
return;
}
if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET)) {
/* Send Accept message */
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
} else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
/*
* The Policy Engine Shall transition to the PE_SNK_Wait_for_Capabilities
* state when:
* 1: The Accept Message has been sent on SOP.
*/
pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
} else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
/*
* The Policy Engine Shall transition to the PE_SNK_Hard_Reset
* state when:
* 1: The Protocol Layer indicates that a transmission error
* has occurred.
*/
pe_set_state(dev, PE_SNK_HARD_RESET);
}
}
/**
* @brief PE_Not_Supported Entry State
*/
void pe_send_not_supported_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
LOG_INF("PE_Not_Supported");
/* Request the Protocol Layer to send a Not_Supported or Reject Message. */
if (prl_get_rev(dev, PD_PACKET_SOP) > PD_REV20) {
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_NOT_SUPPORTED);
} else {
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
}
}
/**
* @brief PE_Not_Supported Run State
*/
void pe_send_not_supported_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
if (atomic_test_bit(pe->flags, PE_FLAGS_TX_COMPLETE) ||
atomic_test_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
atomic_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED);
pe_set_state(dev, PE_SNK_READY);
}
}
/**
* @brief PE_Chunk_Received Entry State
*/
void pe_chunk_received_entry(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
LOG_INF("PE_SNK_Chunk_Received");
usbc_timer_start(&pe->pd_t_chunking_not_supported);
}
/**
* @brief PE_Chunk_Received Run State
*/
void pe_chunk_received_run(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev;
if (usbc_timer_expired(&pe->pd_t_chunking_not_supported)) {
pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
}
}
void pe_chunk_received_exit(void *obj)
{
struct policy_engine *pe = (struct policy_engine *)obj;
usbc_timer_stop(&pe->pd_t_chunking_not_supported);
}
/**
* @brief PE_SNK_Give_Sink_Cap Entry state
*/

View file

@ -7,6 +7,16 @@
#ifndef ZEPHYR_SUBSYS_USBC_PE_SNK_STATES_INTERNAL_H_
#define ZEPHYR_SUBSYS_USBC_PE_SNK_STATES_INTERNAL_H_
/**
* @brief Init the PE Sink State machine
*/
void pe_snk_init(const struct device *dev);
/**
* @brief Handle Sink-specific DPM requests
*/
bool sink_dpm_requests(const struct device *dev);
/**
* @brief PE_SNK_Startup Entry State
*/
@ -70,32 +80,6 @@ void pe_snk_get_source_cap_entry(void *obj);
void pe_snk_get_source_cap_run(void *obj);
void pe_snk_get_source_cap_exit(void *obj);
/**
* @brief PE_Send_Soft_Reset Entry State
*/
void pe_send_soft_reset_entry(void *obj);
void pe_send_soft_reset_run(void *obj);
void pe_send_soft_reset_exit(void *obj);
/**
* @brief PE_SNK_Soft_Reset Entry State
*/
void pe_soft_reset_entry(void *obj);
void pe_soft_reset_run(void *obj);
/*
* @brief PE_Not_Supported Entry State
*/
void pe_send_not_supported_entry(void *obj);
void pe_send_not_supported_run(void *obj);
/**
* @brief PE_Chunk_Received Entry State
*/
void pe_chunk_received_entry(void *obj);
void pe_chunk_received_run(void *obj);
void pe_chunk_received_exit(void *obj);
/**
* @brief PE_SNK_Give_Sink_Cap Entry state
*/

View file

@ -12,7 +12,7 @@
#include <zephyr/drivers/usb_c/usbc_tcpc.h>
#include <zephyr/smf.h>
#include "usbc_pe.h"
#include "usbc_pe_common_internal.h"
#include "usbc_timer.h"
/**

View file

@ -10,8 +10,8 @@
#include <zephyr/kernel.h>
#include <zephyr/usb_c/usbc.h>
#include "usbc_tc.h"
#include "usbc_pe.h"
#include "usbc_tc_common_internal.h"
#include "usbc_pe_common_internal.h"
#include "usbc_prl.h"
#define PRIV_PORT_REQUEST_SUSPEND -1

View file

@ -1,41 +0,0 @@
/*
* Copyright (c) 2022 The Chromium OS Authors
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_SUBSYS_USBC_TC_H_
#define ZEPHYR_SUBSYS_USBC_TC_H_
#include <zephyr/kernel.h>
#include <zephyr/usb_c/usbc.h>
#include <zephyr/smf.h>
#include "usbc_stack.h"
/**
* @brief This function must only be called in the subsystem init function.
*
* @param dev Pointer to the device structure for the driver instance.
*/
void tc_subsys_init(const struct device *dev);
/**
* @brief Run the TC Layer state machine. This is called from the subsystems
* port stack thread.
*
* @param dev Pointer to the device structure for the driver instance.
* @param dpm_request Device Policy Manager request
*/
void tc_run(const struct device *dev, int32_t dpm_request);
/**
* @brief Checks if the TC Layer is in an Attached state
*
* @param dev Pointer to the device structure for the driver instance.
*
* @retval true if TC Layer is in an Attached state, else false
*/
bool tc_is_in_attached_state(const struct device *dev);
#endif /* ZEPHYR_SUBSYS_USBC_TC_H_ */

View file

@ -13,7 +13,6 @@
#include "usbc_timer.h"
#include "usbc_stack.h"
#include "usbc_tc.h"
/**
* @brief Type-C States
@ -23,16 +22,16 @@ enum tc_state_t {
TC_CC_OPEN_SUPER_STATE,
/** Super state that applies Rd to the CC lines */
TC_CC_RD_SUPER_STATE,
/** Disabled State */
TC_DISABLED_STATE,
/** Error Recovery State */
TC_ERROR_RECOVERY_STATE,
/** Unattached Sink State */
TC_UNATTACHED_SNK_STATE,
/** Attach Wait Sink State */
TC_ATTACH_WAIT_SNK_STATE,
/** Attached Sink State */
TC_ATTACHED_SNK_STATE,
/** Disabled State */
TC_DISABLED_STATE,
/** Error Recovery State */
TC_ERROR_RECOVERY_STATE,
/** Number of TC States */
TC_STATE_COUNT
@ -106,4 +105,29 @@ enum tc_state_t tc_get_state(const struct device *dev);
*/
void tc_pd_enable(const struct device *dev, const bool enable);
/**
* @brief This function must only be called in the subsystem init function.
*
* @param dev Pointer to the device structure for the driver instance.
*/
void tc_subsys_init(const struct device *dev);
/**
* @brief Run the TC Layer state machine. This is called from the subsystems
* port stack thread.
*
* @param dev Pointer to the device structure for the driver instance.
* @param dpm_request Device Policy Manager request
*/
void tc_run(const struct device *dev, int32_t dpm_request);
/**
* @brief Checks if the TC Layer is in an Attached state
*
* @param dev Pointer to the device structure for the driver instance.
*
* @retval true if TC Layer is in an Attached state, else false
*/
bool tc_is_in_attached_state(const struct device *dev);
#endif /* ZEPHYR_SUBSYS_USBC_TC_COMMON_INTERNAL_H_ */