usb_c: Use atomic arrays for PE flags

Use atomic arrays for PE flags because the total number
of Policy Engine flags could exceed 32

Signed-off-by: Sam Hurst <sbh1187@gmail.com>
This commit is contained in:
Sam Hurst 2022-12-23 07:14:32 -08:00 committed by Carles Cufí
commit 5b3155ff90
3 changed files with 66 additions and 63 deletions

View file

@ -29,7 +29,7 @@ bool common_dpm_requests(const struct device *dev)
struct policy_engine *pe = data->pe; struct policy_engine *pe = data->pe;
if (pe->dpm_request > REQUEST_TC_END) { if (pe->dpm_request > REQUEST_TC_END) {
atomic_set_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS); atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
if (pe->dpm_request == REQUEST_PE_DR_SWAP) { if (pe->dpm_request == REQUEST_PE_DR_SWAP) {
pe_set_state(dev, PE_DRS_SEND_SWAP); pe_set_state(dev, PE_DRS_SEND_SWAP);
@ -91,7 +91,7 @@ static void pe_init(const struct device *dev)
struct usbc_port_data *data = dev->data; struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe; struct policy_engine *pe = data->pe;
pe->flags = ATOMIC_INIT(0); atomic_clear(pe->flags);
usbc_timer_init(&pe->pd_t_typec_sink_wait_cap, PD_T_TYPEC_SINK_WAIT_CAP_MAX_MS); usbc_timer_init(&pe->pd_t_typec_sink_wait_cap, PD_T_TYPEC_SINK_WAIT_CAP_MAX_MS);
usbc_timer_init(&pe->pd_t_sender_response, PD_T_SENDER_RESPONSE_NOM_MS); usbc_timer_init(&pe->pd_t_sender_response, PD_T_SENDER_RESPONSE_NOM_MS);
@ -211,7 +211,7 @@ void pe_message_sent(const struct device *dev)
struct usbc_port_data *data = dev->data; struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe; struct policy_engine *pe = data->pe;
atomic_set_bit(&pe->flags, PE_FLAGS_TX_COMPLETE); atomic_set_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
} }
/** /**
@ -248,9 +248,9 @@ void pe_report_error(const struct device *dev, const enum pe_error e,
* response. * response.
*/ */
/* All error types besides transmit errors are Protocol Errors. */ /* All error types besides transmit errors are Protocol Errors. */
if ((e != ERR_XMIT && atomic_test_bit(&pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS) == false) || if ((e != ERR_XMIT && atomic_test_bit(pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS) == false) ||
e == ERR_XMIT || e == ERR_XMIT ||
(atomic_test_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT) == false && (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT) == false &&
type == PD_PACKET_SOP)) { type == PD_PACKET_SOP)) {
policy_notify(dev, PROTOCOL_ERROR); policy_notify(dev, PROTOCOL_ERROR);
pe_send_soft_reset(dev, type); pe_send_soft_reset(dev, type);
@ -276,8 +276,8 @@ void pe_report_discard(const struct device *dev)
* Clear local AMS indicator as our AMS message was discarded, and flag * Clear local AMS indicator as our AMS message was discarded, and flag
* the discard for the PE * the discard for the PE
*/ */
atomic_clear_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS); atomic_clear_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
atomic_set_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED); atomic_set_bit(pe->flags, PE_FLAGS_MSG_DISCARDED);
} }
/** /**
@ -289,7 +289,7 @@ void pe_message_received(const struct device *dev)
struct usbc_port_data *data = dev->data; struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe; struct policy_engine *pe = data->pe;
atomic_set_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED); atomic_set_bit(pe->flags, PE_FLAGS_MSG_RECEIVED);
} }
/** /**
@ -308,7 +308,7 @@ void pe_hard_reset_sent(const struct device *dev)
struct usbc_port_data *data = dev->data; struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe; struct policy_engine *pe = data->pe;
atomic_clear_bit(&pe->flags, PE_FLAGS_HARD_RESET_PENDING); atomic_clear_bit(pe->flags, PE_FLAGS_HARD_RESET_PENDING);
} }
/** /**
@ -319,7 +319,7 @@ bool pe_is_explicit_contract(const struct device *dev)
struct usbc_port_data *data = dev->data; struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe; struct policy_engine *pe = data->pe;
return atomic_test_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT); return atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
} }
/** /**
@ -331,7 +331,7 @@ bool pe_dpm_initiated_ams(const struct device *dev)
struct usbc_port_data *data = dev->data; struct usbc_port_data *data = dev->data;
struct policy_engine *pe = data->pe; struct policy_engine *pe = data->pe;
return atomic_test_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS); return atomic_test_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
} }
/** Private Policy Engine Layer API below */ /** Private Policy Engine Layer API below */
@ -387,7 +387,7 @@ void pe_send_data_msg(const struct device *dev, const enum pd_packet_type type,
struct policy_engine *pe = data->pe; struct policy_engine *pe = data->pe;
/* Clear any previous TX status before sending a new message */ /* Clear any previous TX status before sending a new message */
atomic_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE); atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
prl_send_data_msg(dev, type, msg); prl_send_data_msg(dev, type, msg);
} }
@ -401,7 +401,7 @@ void pe_send_ctrl_msg(const struct device *dev, const enum pd_packet_type type,
struct policy_engine *pe = data->pe; struct policy_engine *pe = data->pe;
/* Clear any previous TX status before sending a new message */ /* Clear any previous TX status before sending a new message */
atomic_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE); atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
prl_send_ctrl_msg(dev, type, msg); prl_send_ctrl_msg(dev, type, msg);
} }
@ -602,7 +602,7 @@ void pe_drs_evaluate_swap_run(void *obj)
struct protocol_layer_tx_t *prl_tx = data->prl_tx; struct protocol_layer_tx_t *prl_tx = data->prl_tx;
struct protocol_layer_rx_t *prl_rx = data->prl_rx; struct protocol_layer_rx_t *prl_rx = data->prl_rx;
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
/* Only update data roles if last message sent was Accept */ /* Only update data roles if last message sent was Accept */
if (prl_tx->msg_type == PD_CTRL_ACCEPT) { if (prl_tx->msg_type == PD_CTRL_ACCEPT) {
/* Update Data Role */ /* Update Data Role */
@ -614,7 +614,7 @@ void pe_drs_evaluate_swap_run(void *obj)
: DATA_ROLE_IS_DFP); : DATA_ROLE_IS_DFP);
} }
pe_set_state(dev, PE_SNK_READY); pe_set_state(dev, PE_SNK_READY);
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
/* /*
* Inform Device Policy Manager that the message was * Inform Device Policy Manager that the message was
* discarded * discarded
@ -647,12 +647,12 @@ void pe_drs_send_swap_run(void *obj)
struct protocol_layer_rx_t *prl_rx = data->prl_rx; struct protocol_layer_rx_t *prl_rx = data->prl_rx;
union pd_header header; union pd_header header;
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
/* Start Sender Response Timer */ /* Start Sender Response Timer */
usbc_timer_start(&pe->pd_t_sender_response); usbc_timer_start(&pe->pd_t_sender_response);
} }
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
header = prl_rx->emsg.header; header = prl_rx->emsg.header;
if (received_control_message(dev, header, PD_CTRL_REJECT)) { if (received_control_message(dev, header, PD_CTRL_REJECT)) {
/* /*
@ -666,7 +666,7 @@ void pe_drs_send_swap_run(void *obj)
* needs to Wait * needs to Wait
*/ */
if (policy_wait_notify(dev, WAIT_DATA_ROLE_SWAP)) { if (policy_wait_notify(dev, WAIT_DATA_ROLE_SWAP)) {
atomic_set_bit(&pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP); atomic_set_bit(pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP);
usbc_timer_start(&pe->pd_t_wait_to_resend); usbc_timer_start(&pe->pd_t_wait_to_resend);
} }
} else if (received_control_message(dev, header, PD_CTRL_ACCEPT)) { } else if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
@ -685,7 +685,7 @@ void pe_drs_send_swap_run(void *obj)
} }
pe_set_state(dev, PE_SNK_READY); pe_set_state(dev, PE_SNK_READY);
return; return;
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
/* /*
* Inform Device Policy Manager that the message * Inform Device Policy Manager that the message
* was discarded * was discarded

View file

@ -107,7 +107,10 @@ enum pe_flags {
* This flag is set when a Wait message is received in response to a * This flag is set when a Wait message is received in response to a
* Data Role Swap * Data Role Swap
*/ */
PE_FLAGS_WAIT_DATA_ROLE_SWAP = 13 PE_FLAGS_WAIT_DATA_ROLE_SWAP = 13,
/** Number of PE Flags */
PE_FLAGS_COUNT
}; };
/** /**
@ -119,7 +122,7 @@ struct policy_engine {
/** Port device */ /** Port device */
const struct device *dev; const struct device *dev;
/** state machine flags */ /** state machine flags */
atomic_t flags; ATOMIC_DEFINE(flags, PE_FLAGS_COUNT);
/** current port power role (SOURCE or SINK) */ /** current port power role (SOURCE or SINK) */
enum tc_power_role power_role; enum tc_power_role power_role;
/** current port data role (DFP or UFP) */ /** current port data role (DFP or UFP) */

View file

@ -24,7 +24,7 @@ bool sink_dpm_requests(const struct device *dev)
struct policy_engine *pe = data->pe; struct policy_engine *pe = data->pe;
if (pe->dpm_request > REQUEST_TC_END) { if (pe->dpm_request > REQUEST_TC_END) {
atomic_set_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS); atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
if (pe->dpm_request == REQUEST_PE_GET_SRC_CAPS) { if (pe->dpm_request == REQUEST_PE_GET_SRC_CAPS) {
pe_set_state(dev, PE_SNK_GET_SOURCE_CAP); pe_set_state(dev, PE_SNK_GET_SOURCE_CAP);
@ -52,7 +52,7 @@ void pe_snk_startup_entry(void *obj)
pe->power_role = TC_ROLE_SINK; pe->power_role = TC_ROLE_SINK;
/* Invalidate explicit contract */ /* Invalidate explicit contract */
atomic_clear_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT); atomic_clear_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
policy_notify(dev, NOT_PD_CONNECTED); policy_notify(dev, NOT_PD_CONNECTED);
} }
@ -129,7 +129,7 @@ void pe_snk_wait_for_capabilities_run(void *obj)
* Transition to the PE_SNK_Evaluate_Capability state when: * Transition to the PE_SNK_Evaluate_Capability state when:
* 1) A Source_Capabilities Message is received. * 1) A Source_Capabilities Message is received.
*/ */
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
header = prl_rx->emsg.header; header = prl_rx->emsg.header;
if (received_data_message(dev, header, PD_DATA_SOURCE_CAP)) { if (received_data_message(dev, header, PD_DATA_SOURCE_CAP)) {
pe_set_state(dev, PE_SNK_EVALUATE_CAPABILITY); pe_set_state(dev, PE_SNK_EVALUATE_CAPABILITY);
@ -139,7 +139,7 @@ void pe_snk_wait_for_capabilities_run(void *obj)
/* When the SinkWaitCapTimer times out, perform a Hard Reset. */ /* When the SinkWaitCapTimer times out, perform a Hard Reset. */
if (usbc_timer_expired(&pe->pd_t_typec_sink_wait_cap)) { if (usbc_timer_expired(&pe->pd_t_typec_sink_wait_cap)) {
atomic_set_bit(&pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT); atomic_set_bit(pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT);
pe_set_state(dev, PE_SNK_HARD_RESET); pe_set_state(dev, PE_SNK_HARD_RESET);
} }
} }
@ -216,7 +216,7 @@ void pe_snk_select_capability_run(void *obj)
struct protocol_layer_rx_t *prl_rx = data->prl_rx; struct protocol_layer_rx_t *prl_rx = data->prl_rx;
union pd_header header; union pd_header header;
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
/* /*
* The sent REQUEST message was discarded. This can be at * The sent REQUEST message was discarded. This can be at
* the start of an AMS or in the middle. Handle what to * the start of an AMS or in the middle. Handle what to
@ -229,12 +229,12 @@ void pe_snk_select_capability_run(void *obj)
} else { } else {
pe_set_state(dev, PE_SNK_READY); pe_set_state(dev, PE_SNK_READY);
} }
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
/* Start the SenderResponseTimer */ /* Start the SenderResponseTimer */
usbc_timer_start(&pe->pd_t_sender_response); usbc_timer_start(&pe->pd_t_sender_response);
} }
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
header = prl_rx->emsg.header; header = prl_rx->emsg.header;
/* /*
@ -257,7 +257,7 @@ void pe_snk_select_capability_run(void *obj)
/* Only look at control messages */ /* Only look at control messages */
if (received_control_message(dev, header, PD_CTRL_ACCEPT)) { if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
/* explicit contract is now in place */ /* explicit contract is now in place */
atomic_set_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT); atomic_set_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
pe_set_state(dev, PE_SNK_TRANSITION_SINK); pe_set_state(dev, PE_SNK_TRANSITION_SINK);
} else if (received_control_message(dev, header, PD_CTRL_REJECT) || } else if (received_control_message(dev, header, PD_CTRL_REJECT) ||
received_control_message(dev, header, PD_CTRL_WAIT)) { received_control_message(dev, header, PD_CTRL_WAIT)) {
@ -265,14 +265,14 @@ void pe_snk_select_capability_run(void *obj)
* We had a previous explicit contract, so transition to * We had a previous explicit contract, so transition to
* PE_SNK_Ready * PE_SNK_Ready
*/ */
if (atomic_test_bit(&pe->flags, PE_FLAGS_EXPLICIT_CONTRACT)) { if (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT)) {
if (received_control_message(dev, header, PD_CTRL_WAIT)) { if (received_control_message(dev, header, PD_CTRL_WAIT)) {
/* /*
* Inform Device Policy Manager that Sink * Inform Device Policy Manager that Sink
* Request needs to Wait * Request needs to Wait
*/ */
if (policy_wait_notify(dev, WAIT_SINK_REQUEST)) { if (policy_wait_notify(dev, WAIT_SINK_REQUEST)) {
atomic_set_bit(&pe->flags, atomic_set_bit(pe->flags,
PE_FLAGS_WAIT_SINK_REQUEST); PE_FLAGS_WAIT_SINK_REQUEST);
usbc_timer_start(&pe->pd_t_wait_to_resend); usbc_timer_start(&pe->pd_t_wait_to_resend);
} }
@ -342,7 +342,7 @@ void pe_snk_transition_sink_run(void *obj)
* Transition to the PE_SNK_Hard_Reset state when: * Transition to the PE_SNK_Hard_Reset state when:
* 1) A Protocol Error occurs. * 1) A Protocol Error occurs.
*/ */
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
header = prl_rx->emsg.header; header = prl_rx->emsg.header;
/* /*
@ -391,8 +391,8 @@ void pe_snk_ready_entry(void *obj)
LOG_INF("PE_SNK_Ready"); LOG_INF("PE_SNK_Ready");
/* Clear AMS Flags */ /* Clear AMS Flags */
atomic_clear_bit(&pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS); atomic_clear_bit(pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS);
atomic_clear_bit(&pe->flags, PE_FLAGS_DPM_INITIATED_AMS); atomic_clear_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
} }
/** /**
@ -409,7 +409,7 @@ void pe_snk_ready_run(void *obj)
* Handle incoming messages before discovery and DPMs other than hard * Handle incoming messages before discovery and DPMs other than hard
* reset * reset
*/ */
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
union pd_header header = prl_rx->emsg.header; union pd_header header = prl_rx->emsg.header;
/* Extended Message Request */ /* Extended Message Request */
@ -472,10 +472,10 @@ void pe_snk_ready_run(void *obj)
* Check if we are waiting to resend any messages * Check if we are waiting to resend any messages
*/ */
if (usbc_timer_expired(&pe->pd_t_wait_to_resend)) { if (usbc_timer_expired(&pe->pd_t_wait_to_resend)) {
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_WAIT_SINK_REQUEST)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_WAIT_SINK_REQUEST)) {
pe_set_state(dev, PE_SNK_SELECT_CAPABILITY); pe_set_state(dev, PE_SNK_SELECT_CAPABILITY);
return; return;
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP)) { } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP)) {
pe_set_state(dev, PE_DRS_SEND_SWAP); pe_set_state(dev, PE_DRS_SEND_SWAP);
return; return;
} }
@ -503,7 +503,7 @@ void pe_snk_hard_reset_entry(void *obj)
* greater than nHardResetCount the Sink Shall assume that the * greater than nHardResetCount the Sink Shall assume that the
* Source is non-responsive. * Source is non-responsive.
*/ */
if (atomic_test_bit(&pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT) && if (atomic_test_bit(pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT) &&
pe->hard_reset_counter > PD_N_HARD_RESET_COUNT) { pe->hard_reset_counter > PD_N_HARD_RESET_COUNT) {
/* Inform the DPM that the port partner is not responsive */ /* Inform the DPM that the port partner is not responsive */
policy_notify(dev, PORT_PARTNER_NOT_RESPONSIVE); policy_notify(dev, PORT_PARTNER_NOT_RESPONSIVE);
@ -514,10 +514,10 @@ void pe_snk_hard_reset_entry(void *obj)
} }
/* Set Hard Reset Pending Flag */ /* Set Hard Reset Pending Flag */
atomic_set_bit(&pe->flags, PE_FLAGS_HARD_RESET_PENDING); atomic_set_bit(pe->flags, PE_FLAGS_HARD_RESET_PENDING);
atomic_clear_bit(&pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT); atomic_clear_bit(pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT);
atomic_clear_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR); atomic_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR);
/* Request the generation of Hard Reset Signaling by the PHY Layer */ /* Request the generation of Hard Reset Signaling by the PHY Layer */
prl_execute_hard_reset(dev); prl_execute_hard_reset(dev);
@ -537,7 +537,7 @@ void pe_snk_hard_reset_run(void *obj)
* Transition to the PE_SNK_Transition_to_default state when: * Transition to the PE_SNK_Transition_to_default state when:
* 1) The Hard Reset is complete. * 1) The Hard Reset is complete.
*/ */
if (atomic_test_bit(&pe->flags, PE_FLAGS_HARD_RESET_PENDING)) { if (atomic_test_bit(pe->flags, PE_FLAGS_HARD_RESET_PENDING)) {
return; return;
} }
@ -555,7 +555,7 @@ void pe_snk_transition_to_default_entry(void *obj)
LOG_INF("PE_SNK_Transition_to_default"); LOG_INF("PE_SNK_Transition_to_default");
/* Reset flags */ /* Reset flags */
pe->flags = ATOMIC_INIT(0); atomic_clear(pe->flags);
pe->data_role = TC_ROLE_UFP; pe->data_role = TC_ROLE_UFP;
/* /*
@ -614,9 +614,9 @@ void pe_snk_get_source_cap_run(void *obj)
struct protocol_layer_rx_t *prl_rx = data->prl_rx; struct protocol_layer_rx_t *prl_rx = data->prl_rx;
/* Wait until message is sent or dropped */ /* Wait until message is sent or dropped */
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
pe_set_state(dev, PE_SNK_READY); pe_set_state(dev, PE_SNK_READY);
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
pe_send_soft_reset(dev, prl_rx->emsg.type); pe_send_soft_reset(dev, prl_rx->emsg.type);
} }
} }
@ -643,7 +643,7 @@ void pe_send_soft_reset_entry(void *obj)
/* Reset Protocol Layer */ /* Reset Protocol Layer */
prl_reset(dev); prl_reset(dev);
atomic_set_bit(&pe->flags, PE_FLAGS_SEND_SOFT_RESET); atomic_set_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET);
} }
/** /**
@ -661,29 +661,29 @@ void pe_send_soft_reset_run(void *obj)
return; return;
} }
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_SEND_SOFT_RESET)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET)) {
/* Send Soft Reset message */ /* Send Soft Reset message */
pe_send_ctrl_msg(dev, pe->soft_reset_sop, PD_CTRL_SOFT_RESET); pe_send_ctrl_msg(dev, pe->soft_reset_sop, PD_CTRL_SOFT_RESET);
return; return;
} }
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
/* Inform Device Policy Manager that the message was discarded */ /* Inform Device Policy Manager that the message was discarded */
policy_notify(dev, MSG_DISCARDED); policy_notify(dev, MSG_DISCARDED);
pe_set_state(dev, PE_SNK_READY); pe_set_state(dev, PE_SNK_READY);
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
/* Start SenderResponse timer */ /* Start SenderResponse timer */
usbc_timer_start(&pe->pd_t_sender_response); usbc_timer_start(&pe->pd_t_sender_response);
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_RECEIVED)) { } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
header = prl_rx->emsg.header; header = prl_rx->emsg.header;
if (received_control_message(dev, header, PD_CTRL_ACCEPT)) { if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES); pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
} }
} else if (atomic_test_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR) || } else if (atomic_test_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR) ||
usbc_timer_expired(&pe->pd_t_sender_response)) { usbc_timer_expired(&pe->pd_t_sender_response)) {
if (atomic_test_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR)) { if (atomic_test_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
atomic_clear_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR); atomic_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR);
} else { } else {
policy_notify(dev, PORT_PARTNER_NOT_RESPONSIVE); policy_notify(dev, PORT_PARTNER_NOT_RESPONSIVE);
} }
@ -714,7 +714,7 @@ void pe_soft_reset_entry(void *obj)
/* Reset the Protocol Layer */ /* Reset the Protocol Layer */
prl_reset(dev); prl_reset(dev);
atomic_set_bit(&pe->flags, PE_FLAGS_SEND_SOFT_RESET); atomic_set_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET);
} }
/** /**
@ -729,15 +729,15 @@ void pe_soft_reset_run(void *obj)
return; return;
} }
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_SEND_SOFT_RESET)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET)) {
/* Send Accept message */ /* Send Accept message */
pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT); pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
return; return;
} }
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES); pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_PROTOCOL_ERROR)) { } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
pe_set_state(dev, PE_SNK_HARD_RESET); pe_set_state(dev, PE_SNK_HARD_RESET);
} }
} }
@ -768,10 +768,10 @@ void pe_send_not_supported_run(void *obj)
struct policy_engine *pe = (struct policy_engine *)obj; struct policy_engine *pe = (struct policy_engine *)obj;
const struct device *dev = pe->dev; const struct device *dev = pe->dev;
if (atomic_test_bit(&pe->flags, PE_FLAGS_TX_COMPLETE) || if (atomic_test_bit(pe->flags, PE_FLAGS_TX_COMPLETE) ||
atomic_test_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { atomic_test_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
atomic_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE); atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
atomic_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED); atomic_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED);
pe_set_state(dev, PE_SNK_READY); pe_set_state(dev, PE_SNK_READY);
} }
} }
@ -833,9 +833,9 @@ void pe_snk_give_sink_cap_run(void *obj)
struct protocol_layer_rx_t *prl_rx = data->prl_rx; struct protocol_layer_rx_t *prl_rx = data->prl_rx;
/* Wait until message is sent or dropped */ /* Wait until message is sent or dropped */
if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_TX_COMPLETE)) { if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
pe_set_state(dev, PE_SNK_READY); pe_set_state(dev, PE_SNK_READY);
} else if (atomic_test_and_clear_bit(&pe->flags, PE_FLAGS_MSG_DISCARDED)) { } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
pe_send_soft_reset(dev, prl_rx->emsg.type); pe_send_soft_reset(dev, prl_rx->emsg.type);
} }
} }