drivers: i3c: add i3c-rtio

This adds rtio along with a default handler for i3c

Signed-off-by: Ryan McClelland <ryanmcclelland@meta.com>
This commit is contained in:
Ryan McClelland 2024-10-21 13:41:47 -07:00 committed by Benjamin Cabé
commit 0e5916f8e4
8 changed files with 760 additions and 1 deletions

View file

@ -50,3 +50,9 @@ zephyr_library_sources_ifdef(
CONFIG_I3C_TEST CONFIG_I3C_TEST
i3c_test.c i3c_test.c
) )
zephyr_library_sources_ifdef(
CONFIG_I3C_RTIO
i3c_rtio.c
i3c_rtio_default.c
)

View file

@ -117,6 +117,50 @@ config I3C_INIT_RSTACT
This determines whether the bus initialization routine This determines whether the bus initialization routine
sends a reset action command to I3C targets. sends a reset action command to I3C targets.
config I3C_RTIO
bool "I3C RTIO API"
select EXPERIMENTAL
select RTIO
select RTIO_WORKQ
help
API and implementations of I3C for RTIO
if I3C_RTIO
config I3C_RTIO_SQ_SIZE
int "Submission queue size for blocking calls"
default 4
help
Blocking i3c calls when I3C_RTIO is enabled are copied into a per driver
submission queue. The queue depth determines the number of possible i3c_msg
structs that may be in the array given to i3c_transfer. A sensible default
is going to be 4 given the device address, register address, and a value
to be read or written.
config I3C_RTIO_CQ_SIZE
int "Completion queue size for blocking calls"
default 4
help
Blocking i3c calls when I3C_RTIO is enabled are copied into a per driver
submission queue. The queue depth determines the number of possible i3c_msg
structs that may be in the array given to i3c_transfer. A sensible default
is going to be 4 given the device address, register address, and a value
to be read or written.
config I3C_RTIO_FALLBACK_MSGS
int "Number of available i3c_msg structs for the default handler to use"
default 4
help
When RTIO is used with a driver that does not yet implement the submit API
natively the submissions are converted back to struct i3c_msg values that
are given to i3c_transfer. This requires some number of msgs be available to convert
the submissions into on the stack. MISRA rules dictate we must know this in
advance.
In all likelihood 4 is going to work for everyone, but in case you do end up with
an issue where you are using RTIO, your driver does not implement submit natively,
endif # I3C_RTIO
comment "Device Drivers" comment "Device Drivers"
rsource "Kconfig.nxp" rsource "Kconfig.nxp"

254
drivers/i3c/i3c_rtio.c Normal file
View file

@ -0,0 +1,254 @@
/*
* Copyright (c) 2023 Intel Corporation
* Copyright (c) 2024 Meta Platforms
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/drivers/i3c.h>
#include <zephyr/drivers/i3c/rtio.h>
#include <zephyr/rtio/rtio.h>
#include <zephyr/sys/mpsc_lockfree.h>
#include <zephyr/sys/__assert.h>
#define LOG_LEVEL CONFIG_I3C_LOG_LEVEL
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(i3c_rtio);
const struct rtio_iodev_api i3c_iodev_api = {
.submit = i3c_iodev_submit,
};
struct rtio_sqe *i3c_rtio_copy(struct rtio *r, struct rtio_iodev *iodev, const struct i3c_msg *msgs,
uint8_t num_msgs)
{
__ASSERT(num_msgs > 0, "Expecting at least one message to copy");
struct rtio_sqe *sqe = NULL;
for (uint8_t i = 0; i < num_msgs; i++) {
sqe = rtio_sqe_acquire(r);
if (sqe == NULL) {
rtio_sqe_drop_all(r);
return NULL;
}
if (msgs[i].flags & I3C_MSG_READ) {
rtio_sqe_prep_read(sqe, iodev, RTIO_PRIO_NORM, msgs[i].buf, msgs[i].len,
NULL);
} else {
rtio_sqe_prep_write(sqe, iodev, RTIO_PRIO_NORM, msgs[i].buf, msgs[i].len,
NULL);
}
sqe->flags |= RTIO_SQE_TRANSACTION;
sqe->iodev_flags =
((msgs[i].flags & I3C_MSG_STOP) ? RTIO_IODEV_I3C_STOP : 0) |
((msgs[i].flags & I3C_MSG_RESTART) ? RTIO_IODEV_I3C_RESTART : 0) |
((msgs[i].flags & I3C_MSG_HDR) ? RTIO_IODEV_I3C_HDR : 0) |
((msgs[i].flags & I3C_MSG_NBCH) ? RTIO_IODEV_I3C_NBCH : 0) |
RTIO_IODEV_I3C_HDR_MODE_SET(msgs[i].hdr_mode) |
RTIO_IODEV_I3C_HDR_CMD_CODE_SET(msgs[i].hdr_cmd_code);
}
sqe->flags &= ~RTIO_SQE_TRANSACTION;
return sqe;
}
void i3c_rtio_init(struct i3c_rtio *ctx)
{
k_sem_init(&ctx->lock, 1, 1);
mpsc_init(&ctx->io_q);
ctx->txn_curr = NULL;
ctx->txn_head = NULL;
ctx->iodev.api = &i3c_iodev_api;
}
/**
* @private
* @brief Setup the next transaction (could be a single op) if needed
*
* @retval true New transaction to start with the hardware is setup
* @retval false No new transaction to start
*/
static bool i3c_rtio_next(struct i3c_rtio *ctx, bool completion)
{
k_spinlock_key_t key = k_spin_lock(&ctx->slock);
/* Already working on something, bail early */
if (!completion && ctx->txn_head != NULL) {
k_spin_unlock(&ctx->slock, key);
return false;
}
struct mpsc_node *next = mpsc_pop(&ctx->io_q);
/* Nothing left to do */
if (next == NULL) {
ctx->txn_head = NULL;
ctx->txn_curr = NULL;
k_spin_unlock(&ctx->slock, key);
return false;
}
ctx->txn_head = CONTAINER_OF(next, struct rtio_iodev_sqe, q);
ctx->txn_curr = ctx->txn_head;
k_spin_unlock(&ctx->slock, key);
return true;
}
bool i3c_rtio_complete(struct i3c_rtio *ctx, int status)
{
/* On error bail */
if (status < 0) {
rtio_iodev_sqe_err(ctx->txn_head, status);
return i3c_rtio_next(ctx, true);
}
/* Try for next submission in the transaction */
ctx->txn_curr = rtio_txn_next(ctx->txn_curr);
if (ctx->txn_curr) {
return true;
}
rtio_iodev_sqe_ok(ctx->txn_head, status);
return i3c_rtio_next(ctx, true);
}
bool i3c_rtio_submit(struct i3c_rtio *ctx, struct rtio_iodev_sqe *iodev_sqe)
{
mpsc_push(&ctx->io_q, &iodev_sqe->q);
return i3c_rtio_next(ctx, false);
}
int i3c_rtio_transfer(struct i3c_rtio *ctx, struct i3c_msg *msgs, uint8_t num_msgs,
struct i3c_device_desc *desc)
{
struct rtio_iodev *iodev = &ctx->iodev;
struct rtio *const r = ctx->r;
struct rtio_sqe *sqe = NULL;
struct rtio_cqe *cqe = NULL;
int res = 0;
k_sem_take(&ctx->lock, K_FOREVER);
ctx->i3c_desc = desc;
sqe = i3c_rtio_copy(r, iodev, msgs, num_msgs);
if (sqe == NULL) {
LOG_ERR("Not enough submission queue entries");
res = -ENOMEM;
goto out;
}
rtio_submit(r, 1);
cqe = rtio_cqe_consume(r);
while (cqe != NULL) {
res = cqe->result;
rtio_cqe_release(r, cqe);
cqe = rtio_cqe_consume(r);
}
out:
k_sem_give(&ctx->lock);
return res;
}
int i3c_rtio_configure(struct i3c_rtio *ctx, enum i3c_config_type type, void *config)
{
struct rtio_iodev *iodev = &ctx->iodev;
struct rtio *const r = ctx->r;
struct rtio_sqe *sqe = NULL;
struct rtio_cqe *cqe = NULL;
int res = 0;
k_sem_take(&ctx->lock, K_FOREVER);
sqe = rtio_sqe_acquire(r);
if (sqe == NULL) {
LOG_ERR("Not enough submission queue entries");
res = -ENOMEM;
goto out;
}
sqe->op = RTIO_OP_I3C_CONFIGURE;
sqe->iodev = iodev;
sqe->i3c_config.type = type;
sqe->i3c_config.config = config;
rtio_submit(r, 1);
cqe = rtio_cqe_consume(r);
res = cqe->result;
rtio_cqe_release(r, cqe);
out:
k_sem_give(&ctx->lock);
return res;
}
int i3c_rtio_ccc(struct i3c_rtio *ctx, struct i3c_ccc_payload *payload)
{
struct rtio_iodev *iodev = &ctx->iodev;
struct rtio *const r = ctx->r;
struct rtio_sqe *sqe = NULL;
struct rtio_cqe *cqe = NULL;
int res = 0;
k_sem_take(&ctx->lock, K_FOREVER);
sqe = rtio_sqe_acquire(r);
if (sqe == NULL) {
LOG_ERR("Not enough submission queue entries");
res = -ENOMEM;
goto out;
}
sqe->op = RTIO_OP_I3C_CCC;
sqe->iodev = iodev;
sqe->ccc_payload = payload;
rtio_submit(r, 1);
cqe = rtio_cqe_consume(r);
res = cqe->result;
rtio_cqe_release(r, cqe);
out:
k_sem_give(&ctx->lock);
return res;
}
int i3c_rtio_recover(struct i3c_rtio *ctx)
{
struct rtio_iodev *iodev = &ctx->iodev;
struct rtio *const r = ctx->r;
struct rtio_sqe *sqe = NULL;
struct rtio_cqe *cqe = NULL;
int res = 0;
k_sem_take(&ctx->lock, K_FOREVER);
sqe = rtio_sqe_acquire(r);
if (sqe == NULL) {
LOG_ERR("Not enough submission queue entries");
res = -ENOMEM;
goto out;
}
sqe->op = RTIO_OP_I3C_RECOVER;
sqe->iodev = iodev;
rtio_submit(r, 1);
cqe = rtio_cqe_consume(r);
res = cqe->result;
rtio_cqe_release(r, cqe);
out:
k_sem_give(&ctx->lock);
return res;
}

View file

@ -0,0 +1,164 @@
/*
* Copyright (c) 2024 Google LLC
* Copyright (c) 2024 Meta Platforms
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/drivers/i3c.h>
#include <zephyr/drivers/i3c/rtio.h>
#include <zephyr/rtio/rtio.h>
#include <zephyr/rtio/work.h>
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(i3c_rtio, CONFIG_I3C_LOG_LEVEL);
static inline void i3c_msg_from_rx(const struct rtio_iodev_sqe *iodev_sqe, struct i3c_msg *msg)
{
__ASSERT_NO_MSG(iodev_sqe->sqe.op == RTIO_OP_RX);
msg->buf = iodev_sqe->sqe.rx.buf;
msg->len = iodev_sqe->sqe.rx.buf_len;
msg->flags =
((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_STOP) ? I3C_MSG_STOP : 0) |
((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_RESTART) ? I3C_MSG_RESTART : 0) |
((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_HDR) ? I3C_MSG_HDR : 0) |
((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_NBCH) ? I3C_MSG_NBCH : 0) |
I3C_MSG_READ;
}
static inline void i3c_msg_from_tx(const struct rtio_iodev_sqe *iodev_sqe, struct i3c_msg *msg)
{
__ASSERT_NO_MSG(iodev_sqe->sqe.op == RTIO_OP_TX);
msg->buf = (uint8_t *)iodev_sqe->sqe.tx.buf;
msg->len = iodev_sqe->sqe.tx.buf_len;
msg->flags =
((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_STOP) ? I3C_MSG_STOP : 0) |
((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_RESTART) ? I3C_MSG_RESTART : 0) |
((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_HDR) ? I3C_MSG_HDR : 0) |
((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_NBCH) ? I3C_MSG_NBCH : 0) |
I3C_MSG_WRITE;
}
static inline void i3c_msg_from_tiny_tx(const struct rtio_iodev_sqe *iodev_sqe, struct i3c_msg *msg)
{
__ASSERT_NO_MSG(iodev_sqe->sqe.op == RTIO_OP_TINY_TX);
msg->buf = (uint8_t *)iodev_sqe->sqe.tiny_tx.buf;
msg->len = iodev_sqe->sqe.tiny_tx.buf_len;
msg->flags =
((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_STOP) ? I3C_MSG_STOP : 0) |
((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_RESTART) ? I3C_MSG_RESTART : 0) |
((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_HDR) ? I3C_MSG_HDR : 0) |
((iodev_sqe->sqe.iodev_flags & RTIO_IODEV_I3C_NBCH) ? I3C_MSG_NBCH : 0) |
I3C_MSG_WRITE;
}
void i3c_iodev_submit_work_handler(struct rtio_iodev_sqe *txn_first)
{
const struct i3c_iodev_data *data =
(const struct i3c_iodev_data *)txn_first->sqe.iodev->data;
struct i3c_device_desc *desc;
LOG_DBG("Sync RTIO work item for: %p", (void *)txn_first);
uint32_t num_msgs = 0;
int rc = 0;
struct rtio_iodev_sqe *txn_last = txn_first;
/* TODO: there really needs to be a compile time way to get the i3c_device_desc */
desc = i3c_device_find(data->bus, &data->dev_id);
if (!desc) {
LOG_ERR("Cannot find I3C device descriptor");
rc = -ENODEV;
rtio_iodev_sqe_err(txn_first, rc);
return;
}
/* Allocate the i3c_msg's on the stack, to do so
* the count of messages needs to be determined.
*/
do {
switch (txn_last->sqe.op) {
case RTIO_OP_RX:
case RTIO_OP_TX:
case RTIO_OP_TINY_TX:
num_msgs++;
break;
default:
LOG_ERR("Invalid op code %d for submission %p", txn_last->sqe.op,
(void *)&txn_last->sqe);
rc = -EIO;
break;
}
txn_last = rtio_txn_next(txn_last);
} while (rc == 0 && txn_last != NULL);
if (rc != 0) {
rtio_iodev_sqe_err(txn_first, rc);
return;
}
/* Allocate msgs on the stack, MISRA doesn't like VLAs so we need a statically
* sized array here. It's pretty unlikely we have more than 4 i3c messages
* in a transaction as we typically would only have 2, one to write a
* register address, and another to read/write the register into an array
*/
if (num_msgs > CONFIG_I3C_RTIO_FALLBACK_MSGS) {
LOG_ERR("At most CONFIG_I3C_RTIO_FALLBACK_MSGS"
" submissions in a transaction are"
" allowed in the default handler");
rtio_iodev_sqe_err(txn_first, -ENOMEM);
return;
}
struct i3c_msg msgs[CONFIG_I3C_RTIO_FALLBACK_MSGS];
rc = 0;
txn_last = txn_first;
/* Copy the transaction into the stack allocated msgs */
for (int i = 0; i < num_msgs; i++) {
switch (txn_last->sqe.op) {
case RTIO_OP_RX:
i3c_msg_from_rx(txn_last, &msgs[i]);
break;
case RTIO_OP_TX:
i3c_msg_from_tx(txn_last, &msgs[i]);
break;
case RTIO_OP_TINY_TX:
i3c_msg_from_tiny_tx(txn_last, &msgs[i]);
break;
default:
rc = -EIO;
break;
}
txn_last = rtio_txn_next(txn_last);
}
if (rc == 0) {
__ASSERT_NO_MSG(num_msgs > 0);
rc = i3c_transfer(desc, msgs, num_msgs);
}
if (rc != 0) {
rtio_iodev_sqe_err(txn_first, rc);
} else {
rtio_iodev_sqe_ok(txn_first, 0);
}
}
void i3c_iodev_submit_fallback(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe)
{
LOG_DBG("Executing fallback for dev: %p, sqe: %p", (void *)dev, (void *)iodev_sqe);
struct rtio_work_req *req = rtio_work_req_alloc();
if (req == NULL) {
rtio_iodev_sqe_err(iodev_sqe, -ENOMEM);
return;
}
rtio_work_req_submit(req, iodev_sqe, i3c_iodev_submit_work_handler);
}

View file

@ -29,6 +29,7 @@
#include <zephyr/drivers/i2c.h> #include <zephyr/drivers/i2c.h>
#include <zephyr/sys/slist.h> #include <zephyr/sys/slist.h>
#include <zephyr/sys/util.h> #include <zephyr/sys/util.h>
#include <zephyr/rtio/rtio.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -867,6 +868,21 @@ __subsystem struct i3c_driver_api {
*/ */
int (*target_tx_write)(const struct device *dev, int (*target_tx_write)(const struct device *dev,
uint8_t *buf, uint16_t len, uint8_t hdr_mode); uint8_t *buf, uint16_t len, uint8_t hdr_mode);
#ifdef CONFIG_I3C_RTIO
/**
* RTIO
*
* @see i3c_iodev_submit()
*
* @param dev Pointer to the controller device driver instance.
* @param iodev_sqe Pointer to the
*
* @return See i3c_iodev_submit()
*/
void (*iodev_submit)(const struct device *dev,
struct rtio_iodev_sqe *iodev_sqe);
#endif
}; };
/** /**
@ -2130,6 +2146,80 @@ bool i3c_bus_has_sec_controller(const struct device *dev);
*/ */
int i3c_bus_deftgts(const struct device *dev); int i3c_bus_deftgts(const struct device *dev);
#if defined(CONFIG_I3C_RTIO) || defined(__DOXYGEN__)
struct i3c_iodev_data {
const struct device *bus;
const struct i3c_device_id dev_id;
};
/**
* @brief Fallback submit implementation
*
* This implementation will schedule a blocking I3C transaction on the bus via the RTIO work
* queue. It is only used if the I3C driver did not implement the iodev_submit function.
*
* @param dev Pointer to the device structure for an I3C controller driver.
* @param iodev_sqe Prepared submissions queue entry connected to an iodev
* defined by I3C_DT_IODEV_DEFINE.
*/
void i3c_iodev_submit_fallback(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe);
/**
* @brief Submit request(s) to an I3C device with RTIO
*
* @param iodev_sqe Prepared submissions queue entry connected to an iodev
* defined by I3C_DT_IODEV_DEFINE.
*/
static inline void i3c_iodev_submit(struct rtio_iodev_sqe *iodev_sqe)
{
const struct i3c_iodev_data *data =
(const struct i3c_iodev_data *)iodev_sqe->sqe.iodev->data;
const struct i3c_driver_api *api = (const struct i3c_driver_api *)data->bus->api;
if (api->iodev_submit == NULL) {
rtio_iodev_sqe_err(iodev_sqe, -ENOSYS);
return;
}
api->iodev_submit(data->bus, iodev_sqe);
}
extern const struct rtio_iodev_api i3c_iodev_api;
/**
* @brief Define an iodev for a given dt node on the bus
*
* These do not need to be shared globally but doing so
* will save a small amount of memory.
*
* @param name Symbolic name of the iodev to define
* @param node_id Devicetree node identifier
*/
#define I3C_DT_IODEV_DEFINE(name, node_id) \
const struct i3c_iodev_data _i3c_iodev_data_##name = { \
.bus = DEVICE_DT_GET(DT_BUS(node_id)), \
.dev_id = I3C_DEVICE_ID_DT(node_id), \
}; \
RTIO_IODEV_DEFINE(name, &i3c_iodev_api, (void *)&_i3c_iodev_data_##name)
/**
* @brief Copy the i3c_msgs into a set of RTIO requests
*
* @param r RTIO context
* @param iodev RTIO IODev to target for the submissions
* @param msgs Array of messages
* @param num_msgs Number of i3c msgs in array
*
* @retval sqe Last submission in the queue added
* @retval NULL Not enough memory in the context to copy the requests
*/
struct rtio_sqe *i3c_rtio_copy(struct rtio *r,
struct rtio_iodev *iodev,
const struct i3c_msg *msgs,
uint8_t num_msgs);
#endif /* CONFIG_I3C_RTIO */
/* /*
* This needs to be after declaration of struct i3c_driver_api, * This needs to be after declaration of struct i3c_driver_api,
* or else compiler complains about undefined type inside * or else compiler complains about undefined type inside

View file

@ -0,0 +1,126 @@
/*
* Copyright (c) 2024 Intel Corporation
* Copyright (c) 2024 Meta Platforms
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_DRIVERS_I3C_RTIO_H_
#define ZEPHYR_DRIVERS_I3C_RTIO_H_
#include <zephyr/kernel.h>
#include <zephyr/drivers/i3c.h>
#include <zephyr/rtio/rtio.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Driver context for implementing i3c with rtio
*/
struct i3c_rtio {
struct k_sem lock;
struct k_spinlock slock;
struct rtio *r;
struct mpsc io_q;
struct rtio_iodev iodev;
struct rtio_iodev_sqe *txn_head;
struct rtio_iodev_sqe *txn_curr;
struct i3c_device_desc *i3c_desc;
};
/**
* @brief Statically define an i3c_rtio context
*
* @param _name Symbolic name of the context
* @param _sq_sz Submission queue entry pool size
* @param _cq_sz Completion queue entry pool size
*/
#define I3C_RTIO_DEFINE(_name, _sq_sz, _cq_sz) \
RTIO_DEFINE(CONCAT(_name, _r), _sq_sz, _cq_sz); \
static struct i3c_rtio _name = { \
.r = &CONCAT(_name, _r), \
};
/**
* @brief Copy an array of i3c_msgs to rtio submissions and a transaction
*
* @retval sqe Last sqe setup in the copy
* @retval NULL Not enough memory to copy the transaction
*/
struct rtio_sqe *i3c_rtio_copy(struct rtio *r, struct rtio_iodev *iodev, const struct i3c_msg *msgs,
uint8_t num_msgs);
/**
* @brief Initialize an i3c rtio context
*
* @param ctx I3C RTIO driver context
*/
void i3c_rtio_init(struct i3c_rtio *ctx);
/**
* @brief Signal that the current (ctx->txn_curr) submission has been completed
*
* @param ctx I3C RTIO driver context
* @param status Completion status, negative values are errors
*
* @retval true Next submission is ready to start
* @retval false No more submissions to work on
*/
bool i3c_rtio_complete(struct i3c_rtio *ctx, int status);
/**
* @brief Submit, atomically, a submission to work on at some point
*
* @retval true Next submission is ready to start
* @retval false No new submission to start or submissions are in progress already
*/
bool i3c_rtio_submit(struct i3c_rtio *ctx, struct rtio_iodev_sqe *iodev_sqe);
/**
* @brief Configure the I3C bus controller
*
* Provides a compatible API for the existing i3c_configure API, and blocks the
* caller until the transfer completes.
*
* See i3c_configure().
*/
int i3c_rtio_configure(struct i3c_rtio *ctx, enum i3c_config_type type, void *config);
/**
* @brief Transfer i3c messages in a blocking call
*
* Provides a compatible API for the existing i3c_transfer API, and blocks the caller
* until the transfer completes.
*
* See i3c_transfer().
*/
int i3c_rtio_transfer(struct i3c_rtio *ctx, struct i3c_msg *msgs, uint8_t num_msgs,
struct i3c_device_desc *desc);
/**
* @brief Perform an I3C bus recovery in a blocking call
*
* Provides a compatible API for the existing i3c_recover API, and blocks the caller
* until the process completes.
*
* See i3c_recover().
*/
int i3c_rtio_recover(struct i3c_rtio *ctx);
/**
* @brief Perform an I3C CCC in a blocking call
*
* Provides a compatible API for the existing i3c_do_ccc API, and blocks the caller
* until the process completes.
*
* See i3c_do_ccc().
*/
int i3c_rtio_ccc(struct i3c_rtio *ctx, struct i3c_ccc_payload *payload);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_DRVIERS_I3C_RTIO_H_ */

View file

@ -207,6 +207,60 @@ extern "C" {
*/ */
#define RTIO_IODEV_I2C_10_BITS BIT(3) #define RTIO_IODEV_I2C_10_BITS BIT(3)
/**
* @brief Equivalent to the I3C_MSG_STOP flag
*/
#define RTIO_IODEV_I3C_STOP BIT(1)
/**
* @brief Equivalent to the I3C_MSG_RESTART flag
*/
#define RTIO_IODEV_I3C_RESTART BIT(2)
/**
* @brief Equivalent to the I3C_MSG_HDR
*/
#define RTIO_IODEV_I3C_HDR BIT(3)
/**
* @brief Equivalent to the I3C_MSG_NBCH
*/
#define RTIO_IODEV_I3C_NBCH BIT(4)
/**
* @brief I3C HDR Mode Mask
*/
#define RTIO_IODEV_I3C_HDR_MODE_MASK GENMASK(15, 8)
/**
* @brief I3C HDR Mode Mask
*/
#define RTIO_IODEV_I3C_HDR_MODE_SET(flags) \
FIELD_PREP(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
/**
* @brief I3C HDR Mode Mask
*/
#define RTIO_IODEV_I3C_HDR_MODE_GET(flags) \
FIELD_GET(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
/**
* @brief I3C HDR 7b Command Code
*/
#define RTIO_IODEV_I3C_HDR_CMD_CODE_MASK GENMASK(22, 16)
/**
* @brief I3C HDR 7b Command Code
*/
#define RTIO_IODEV_I3C_HDR_CMD_CODE_SET(flags) \
FIELD_PREP(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
/**
* @brief I3C HDR 7b Command Code
*/
#define RTIO_IODEV_I3C_HDR_CMD_CODE_GET(flags) \
FIELD_GET(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
/** @cond ignore */ /** @cond ignore */
struct rtio; struct rtio;
struct rtio_cqe; struct rtio_cqe;
@ -236,7 +290,7 @@ struct rtio_sqe {
uint16_t flags; /**< Op Flags */ uint16_t flags; /**< Op Flags */
uint16_t iodev_flags; /**< Op iodev flags */ uint32_t iodev_flags; /**< Op iodev flags */
uint16_t _resv0; uint16_t _resv0;
@ -286,6 +340,17 @@ struct rtio_sqe {
/** OP_I2C_CONFIGURE */ /** OP_I2C_CONFIGURE */
uint32_t i2c_config; uint32_t i2c_config;
/** OP_I3C_CONFIGURE */
struct {
/* enum i3c_config_type type; */
int type;
void *config;
} i3c_config;
/** OP_I3C_CCC */
/* struct i3c_ccc_payload *ccc_payload; */
void *ccc_payload;
}; };
}; };
@ -483,6 +548,15 @@ struct rtio_iodev {
/** An operation to configure I2C buses */ /** An operation to configure I2C buses */
#define RTIO_OP_I2C_CONFIGURE (RTIO_OP_I2C_RECOVER+1) #define RTIO_OP_I2C_CONFIGURE (RTIO_OP_I2C_RECOVER+1)
/** An operation to recover I3C buses */
#define RTIO_OP_I3C_RECOVER (RTIO_OP_I2C_CONFIGURE+1)
/** An operation to configure I3C buses */
#define RTIO_OP_I3C_CONFIGURE (RTIO_OP_I3C_RECOVER+1)
/** An operation to sends I3C CCC */
#define RTIO_OP_I3C_CCC (RTIO_OP_I3C_CONFIGURE+1)
/** /**
* @brief Prepare a nop (no op) submission * @brief Prepare a nop (no op) submission
*/ */

View file

@ -3,3 +3,4 @@ CONFIG_TEST_USERSPACE=y
CONFIG_I3C=y CONFIG_I3C=y
CONFIG_SHELL=y CONFIG_SHELL=y
CONFIG_I3C_SHELL=y CONFIG_I3C_SHELL=y
CONFIG_I3C_RTIO=y