bluetooth: controller: Implemented dynamic re-scheduling in ticker

As a part of a ticker extension interface, it is now possible to specify
a slot_window when starting a ticker. When setting the
ticks_slot_window to a non-zero value, it is requested that the node
timeout is re-located to a position within the window, where the node
does not collide with other nodes - aligning to the end of the window.
The solution takes into consideration if a node has already been
updated with drift correction (e.g. ADV randomization), subtracting this
from the window.

Signed-off-by: Morten Priess <mtpr@oticon.com>
This commit is contained in:
Morten Priess 2020-01-22 15:02:49 +01:00 committed by Alberto Escolar
commit 64c89d682f
5 changed files with 402 additions and 18 deletions

View file

@ -818,6 +818,15 @@ config BT_TICKER_COMPATIBILITY_MODE
radio RX/TX. Enabling this option disables the ticker priority- and
'must expire' features.
config BT_TICKER_EXT
bool "Ticker extensions"
default y if !BT_TICKER_COMPATIBILITY_MODE
help
This option enables ticker extensions such as re-scheduling of
ticker nodes with slot_window set to non-zero. Ticker extensions
are invoked by using available '_ext' versions of ticker interface
functions.
config BT_CTLR_USER_EXT
prompt "Enable proprietary extensions in Controller"
depends on BT_LL_SW_SPLIT

View file

@ -47,3 +47,7 @@
*/
#define HAL_TICKER_REMAINDER_RANGE \
HAL_TICKER_TICKS_TO_US(1000000)
/* Macro defining the margin for positioning re-scheduled nodes */
#define HAL_TICKER_RESCHEDULE_MARGIN \
HAL_TICKER_US_TO_TICKS(150)

View file

@ -38,3 +38,7 @@
*/
#define HAL_TICKER_REMAINDER_RANGE \
HAL_TICKER_TICKS_TO_US(1000000)
/* Macro defining the margin for positioning re-scheduled nodes */
#define HAL_TICKER_RESCHEDULE_MARGIN \
HAL_TICKER_US_TO_TICKS(150)

View file

@ -62,6 +62,9 @@ struct ticker_node {
* remainder
*/
#if !defined(CONFIG_BT_TICKER_COMPATIBILITY_MODE)
#if defined(CONFIG_BT_TICKER_EXT)
struct ticker_ext *ext_data; /* Ticker extension data */
#endif /* CONFIG_BT_TICKER_EXT */
u8_t must_expire; /* Node must expire, even if it
* collides with other nodes
*/
@ -82,6 +85,19 @@ struct ticker_node {
#define TICKER_USER_OP_TYPE_UPDATE 5
#define TICKER_USER_OP_TYPE_STOP 6
/* Slot window re-schedule states */
#define TICKER_RESCHEDULE_STATE_NONE 0
#define TICKER_RESCHEDULE_STATE_PENDING 1
#define TICKER_RESCHEDULE_STATE_DONE 2
#if defined(CONFIG_BT_TICKER_EXT)
#define TICKER_RESCHEDULE_PENDING(_ticker) \
(_ticker->ext_data && (_ticker->ext_data->reschedule_state == \
TICKER_RESCHEDULE_STATE_PENDING))
#else
#define TICKER_RESCHEDULE_PENDING(_ticker) 0
#endif
/* User operation data structure for start opcode. Used for passing start
* requests to ticker_job
*/
@ -96,6 +112,9 @@ struct ticker_user_op_start {
u32_t ticks_slot; /* Air-time reservation ticks */
ticker_timeout_func fp_timeout_func; /* Timeout callback function */
void *context; /* Context passed in timeout callback */
#if defined(CONFIG_BT_TICKER_EXT)
struct ticker_ext *ext_data; /* Ticker extension data instance */
#endif /* CONFIG_BT_TICKER_EXT */
};
/* User operation data structure for update opcode. Used for passing update
@ -694,6 +713,7 @@ static u8_t ticker_resolve_collision(struct ticker_node *nodes,
next_is_critical ||
(next_has_priority && !current_is_older) ||
(equal_priority && next_is_older))) {
/* This node must be skipped - check window */
skipped = 1;
break;
}
@ -791,17 +811,46 @@ void ticker_worker(void *param)
*/
if (ticker->ticks_slot != 0U &&
(slot_reserved || ticker_resolve_collision(node, ticker))) {
#if defined(CONFIG_BT_TICKER_EXT)
struct ticker_ext *ext_data = ticker->ext_data;
if (ext_data &&
ext_data->ticks_slot_window != 0U &&
ext_data->reschedule_state ==
TICKER_RESCHEDULE_STATE_NONE &&
(ticker->lazy_periodic <= ticker->lazy_current)) {
/* Mark node for re-scheduling in ticker_job */
ext_data->reschedule_state =
TICKER_RESCHEDULE_STATE_PENDING;
} else if (ext_data) {
/* Mark node as not re-scheduling */
ext_data->reschedule_state =
TICKER_RESCHEDULE_STATE_NONE;
}
#endif /* CONFIG_BT_TICKER_EXT */
ticker->lazy_current++;
if ((ticker->must_expire == 0U) ||
(ticker->lazy_periodic >= ticker->lazy_current)) {
/* Not a must-expire case or this is programmed
* latency. Skip this ticker node
(ticker->lazy_periodic >= ticker->lazy_current) ||
TICKER_RESCHEDULE_PENDING(ticker)) {
/* Not a must-expire case, this is programmed
* latency or pending re-schedule. Skip this
* ticker node.
*/
continue;
}
/* Continue but perform shallow expiry */
must_expire_skip = 1U;
}
#if defined(CONFIG_BT_TICKER_EXT)
if (ticker->ext_data) {
ticker->ext_data->ticks_drift = 0U;
/* Mark node as not re-scheduling */
ticker->ext_data->reschedule_state =
TICKER_RESCHEDULE_STATE_NONE;
}
#endif /* CONFIG_BT_TICKER_EXT */
#endif /* !CONFIG_BT_TICKER_COMPATIBILITY_MODE */
/* Skip if not scheduled to execute */
if (((ticker->req - ticker->ack) & 0xff) != 1U) {
@ -1056,6 +1105,21 @@ static inline void ticker_job_node_update(struct ticker_node *ticker,
ticker->ticks_to_expire_minus +=
user_op->params.update.ticks_drift_minus;
#if defined(CONFIG_BT_TICKER_EXT)
/* TODO: An improvement on this could be to only consider the drift
* (ADV => randomization) if re-sceduling fails. We would still store
* the drift ticks here, but not actually update the node. That would
* allow the ticker to use the full window for re-scheduling.
*/
struct ticker_ext *ext_data = ticker->ext_data;
if (ext_data && ext_data->ticks_slot_window != 0U) {
ext_data->ticks_drift =
user_op->params.update.ticks_drift_plus -
user_op->params.update.ticks_drift_minus;
}
#endif /* CONFIG_BT_TICKER_EXT */
ticks_to_expire_prep(ticker, ticks_current, ticks_now);
/* Update ticks_slot parameter from plus/minus input */
@ -1288,7 +1352,8 @@ static inline void ticker_job_worker_bh(struct ticker_instance *instance,
ticks_expired += ticks_to_expire;
#if !defined(CONFIG_BT_TICKER_COMPATIBILITY_MODE)
if (ticker->lazy_current != 0U) {
if (ticker->lazy_current != 0U &&
!TICKER_RESCHEDULE_PENDING(ticker)) {
instance->ticker_id_slot_previous = TICKER_NULL;
instance->ticks_slot_previous = 0U;
} else
@ -1303,9 +1368,13 @@ static inline void ticker_job_worker_bh(struct ticker_instance *instance,
instance->ticks_slot_previous = 0U;
}
/* save current ticks_slot_previous */
if (ticker->ticks_slot != 0U) {
instance->ticker_id_slot_previous = id_expired;
/* If a reschedule is set pending, we will need to keep
* the slot_previous information
*/
if (!TICKER_RESCHEDULE_PENDING(ticker) &&
ticker->ticks_slot != 0U) {
instance->ticker_id_slot_previous =
id_expired;
instance->ticks_slot_previous =
ticker->ticks_slot;
}
@ -1317,11 +1386,21 @@ static inline void ticker_job_worker_bh(struct ticker_instance *instance,
/* remove the expired ticker from head */
instance->ticker_id_head = ticker->next;
/* ticker will be restarted if periodic */
if (ticker->ticks_periodic != 0U) {
/* Ticker will be restarted if periodic or to be re-scheduled */
if ((ticker->ticks_periodic != 0U) ||
TICKER_RESCHEDULE_PENDING(ticker)) {
#if !defined(CONFIG_BT_TICKER_COMPATIBILITY_MODE)
if (TICKER_RESCHEDULE_PENDING(ticker)) {
/* Set the re-scheduled node to now. Will be
* collision resolved after all nodes are
* restarted
*/
ticker->ticks_to_expire = ticks_elapsed;
} else {
/* Reload ticks_to_expire with one period */
ticker->ticks_to_expire = ticker->ticks_periodic;
ticker->ticks_to_expire =
ticker->ticks_periodic;
}
ticker->ticks_to_expire += ticker_remainder_inc(ticker);
ticks_to_expire_prep(ticker, instance->ticks_current,
@ -1381,6 +1460,9 @@ static inline void ticker_job_op_start(struct ticker_node *ticker,
#else
ticker->must_expire = (start->lazy == TICKER_LAZY_MUST_EXPIRE) ? 1U :
0U;
#if defined(CONFIG_BT_TICKER_EXT)
ticker->ext_data = start->ext_data;
#endif /* CONFIG_BT_TICKER_EXT */
#endif /* CONFIG_BT_TICKER_COMPATIBILITY_MODE */
ticker->ticks_periodic = start->ticks_periodic;
@ -1432,6 +1514,220 @@ static inline u32_t ticker_job_insert(struct ticker_instance *instance,
return TICKER_STATUS_SUCCESS;
}
#if defined(CONFIG_BT_TICKER_EXT)
/**
* @brief Re-schedule ticker nodes within slot_window
*
* @details This function is responsible for re-scheduling ticker nodes
* which have been marked for re-scheduling in ticker_worker. These nodes
* have a non-zero ticks_slot_window configuration, which indicates a
* valid range in which to re-schedule the node.
* The function iterates over nodes, and handles one re-schedule at a
* time. After a re-schedule, nodes are once again iterated until no more
* nodes are marked for re-scheduling.
*
* @param instance Pointer to ticker instance
* @param ticks_elapsed Number of ticks elapsed since last ticker job
*
* @internal
*/
static u8_t ticker_job_reschedule_in_window(struct ticker_instance *instance,
u32_t ticks_elapsed)
{
struct ticker_node *nodes;
struct ticker_node *ticker;
struct ticker_node *node;
struct ticker_ext *ext_data;
u32_t window_start_ticks;
u32_t window_end_ticks;
u32_t ticks_to_expire;
u32_t ticks_start_offset;
u32_t ticks_slot_window = 0;
u8_t ticker_id_head;
u8_t ticker_id_iter;
u8_t ticker_id_prev;
u8_t rescheduling = 1U;
u8_t rescheduled = 0U;
nodes = &instance->nodes[0];
/* Do until all pending re-schedules handled */
while (rescheduling) {
ticker_id_head = instance->ticker_id_head;
rescheduling = 0U;
ticks_start_offset = 0U;
window_start_ticks = 0U;
window_end_ticks = 0U;
ticks_to_expire = 0U;
/* Find first pending re-schedule */
while (ticker_id_head != TICKER_NULL) {
ticker = &nodes[ticker_id_head];
ext_data = ticker->ext_data;
if (ext_data && ext_data->reschedule_state ==
TICKER_RESCHEDULE_STATE_PENDING) {
/* Pending reschedule found */
break;
}
ticker_id_head = ticker->next;
}
if (ticker_id_head == TICKER_NULL) {
/* Done */
break;
}
/* Check for intersection with already active node */
if (instance->ticker_id_slot_previous != TICKER_NULL &&
instance->ticks_slot_previous > ticks_elapsed) {
/* Active node intersects - window starts after end of
* active slot
*/
window_start_ticks = instance->ticks_slot_previous -
ticks_elapsed;
}
ticker_id_iter = nodes[ticker_id_head].next;
ticker_id_prev = TICKER_NULL;
/* If drift was applied to this node, this must be
* taken into consideration. Reduce the window with
* the amount of drift already applied.
*
* TODO: An improvement on this could be to only consider the
* drift (ADV => randomization) if re-sceduling fails. Then the
* ticker would have the best possible window to re-schedule in
* and not be restricted to ticks_slot_window - ticks_drift.
*/
if (ext_data->ticks_drift < ext_data->ticks_slot_window) {
ticks_slot_window = ext_data->ticks_slot_window -
ext_data->ticks_drift;
} else {
/* Window has been exhausted - we can't reschedule */
ticker_id_iter = TICKER_NULL;
}
/* Try to find available slot for re-scheduling */
while (ticker_id_iter != TICKER_NULL &&
ticks_start_offset + ticker->ticks_slot <=
ticks_slot_window) {
node = &nodes[ticker_id_iter];
if (node->ext_data &&
node->ext_data->reschedule_state ==
TICKER_RESCHEDULE_STATE_PENDING) {
/* Skip other pending re-schedule nodes */
ticker_id_iter = node->next;
continue;
}
/* Calculate end of window. Since window may be aligned
* with expiry of next node, we add a margin
*/
if (node->ticks_to_expire >
HAL_TICKER_RESCHEDULE_MARGIN) {
window_end_ticks =
MIN(ticks_slot_window,
ticks_start_offset +
node->ticks_to_expire -
HAL_TICKER_RESCHEDULE_MARGIN);
} else {
/* Next expiry is too close - try the next
* node
*/
window_end_ticks = 0;
}
/* Calculate new ticks_to_expire as end of window minus
* slot size => place at end of window.
*/
if (window_end_ticks > ticks_start_offset +
ticker->ticks_slot) {
ticks_to_expire = window_end_ticks -
ticker->ticks_slot;
} else {
/* No space in window - try the next node */
ticks_to_expire = 0;
}
/* Decide if the re-scheduling ticker node fits in the
* slot found - break if it fits
*/
if (ticks_to_expire != 0U &&
ticks_to_expire >= window_start_ticks &&
ticks_to_expire <= window_end_ticks -
ticker->ticks_slot) {
/* Re-scheduled node fits before this node */
break;
}
/* We din't find a valid slot for re-scheduling - try
* the next node
*/
ticks_start_offset += node->ticks_to_expire;
window_start_ticks = node->ticks_to_expire +
node->ticks_slot;
ticks_to_expire = ticks_slot_window -
ticker->ticks_slot;
ticker_id_prev = ticker_id_iter;
ticker_id_iter = node->next;
}
ticker->ticks_to_expire = ticks_to_expire;
ticker_id_iter = nodes[ticker_id_head].next;
ticker_id_prev = TICKER_NULL;
/* Place the ticker node sorted by expiration time and adjust
* delta times
*/
while (ticker_id_iter != TICKER_NULL) {
node = &nodes[ticker_id_iter];
if (ticker->ticks_to_expire > node->ticks_to_expire) {
/* Node is after this - adjust delta */
ticker->ticks_to_expire -=
node->ticks_to_expire;
} else {
/* Node is before this one */
node->ticks_to_expire -=
ticker->ticks_to_expire;
break;
}
ticker_id_prev = ticker_id_iter;
ticker_id_iter = node->next;
}
if (ticker_id_prev != TICKER_NULL) {
/* Node did not become the first - update head and
* insert node after 'previous'
*/
instance->ticker_id_head = nodes[ticker_id_head].next;
/* Link inserted node */
nodes[ticker_id_head].next = nodes[ticker_id_prev].next;
nodes[ticker_id_prev].next = ticker_id_head;
}
/* Remove latency added in ticker_worker */
ticker->lazy_current--;
/* Prevent repeated re-scheduling */
ext_data->reschedule_state =
TICKER_RESCHEDULE_STATE_DONE;
/* Check for other pending re-schedules and set exit flag */
rescheduling = 1U;
rescheduled = 1U;
}
return rescheduled;
}
#endif /* CONFIG_BT_TICKER_EXT */
#else /* !CONFIG_BT_TICKER_COMPATIBILITY_MODE */
/**
@ -1873,16 +2169,26 @@ void ticker_job(void *param)
ticker_job_worker_bh(instance, ticks_previous, ticks_elapsed,
&insert_head);
/* detect change in head of the list */
/* Detect change in head of the list */
if (instance->ticker_id_head != ticker_id_old_head) {
flag_compare_update = 1U;
}
}
/* Handle insertions */
ticker_job_list_insert(instance, insert_head);
/* detect change in head of the list */
#if defined(CONFIG_BT_TICKER_EXT)
/* Re-schedule any pending nodes with slot_window */
if (ticker_job_reschedule_in_window(instance, ticks_elapsed)) {
flag_compare_update = 1U;
}
#endif /* CONFIG_BT_TICKER_EXT */
} else {
/* Handle insertions */
ticker_job_list_insert(instance, insert_head);
}
/* Detect change in head of the list */
if (instance->ticker_id_head != ticker_id_old_head) {
flag_compare_update = 1U;
}
@ -2043,6 +2349,9 @@ void ticker_trigger(u8_t instance_index)
* @param lazy Number of periods to skip (latency). A value of 1
* causes skipping every other timeout
* @param ticks_slot Slot reservation ticks for node (air-time)
* @param ticks_slot_window Window in which the slot reservation may be
* re-scheduled to avoid collision. Set to 0 for
* legacy behavior
* @param fp_timeout_func Function pointer of function to call at timeout
* @param context Context passed in timeout call
* @param fp_op_func Function pointer of user operation completion
@ -2054,11 +2363,35 @@ void ticker_trigger(u8_t instance_index)
* available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to
* run before exiting ticker_start
*/
#if defined(CONFIG_BT_TICKER_EXT)
u32_t ticker_start(u8_t instance_index, u8_t user_id, u8_t ticker_id,
u32_t ticks_anchor, u32_t ticks_first, u32_t ticks_periodic,
u32_t remainder_periodic, u16_t lazy, u32_t ticks_slot,
ticker_timeout_func fp_timeout_func, void *context,
ticker_op_func fp_op_func, void *op_context)
{
return ticker_start_ext(instance_index, user_id, ticker_id,
ticks_anchor, ticks_first, ticks_periodic,
remainder_periodic, lazy, ticks_slot,
fp_timeout_func, context,
fp_op_func, op_context,
NULL);
}
u32_t ticker_start_ext(u8_t instance_index, u8_t user_id, u8_t ticker_id,
u32_t ticks_anchor, u32_t ticks_first,
u32_t ticks_periodic, u32_t remainder_periodic,
u16_t lazy, u32_t ticks_slot,
ticker_timeout_func fp_timeout_func, void *context,
ticker_op_func fp_op_func, void *op_context,
struct ticker_ext *ext_data)
#else
u32_t ticker_start(u8_t instance_index, u8_t user_id, u8_t ticker_id,
u32_t ticks_anchor, u32_t ticks_first, u32_t ticks_periodic,
u32_t remainder_periodic, u16_t lazy, u32_t ticks_slot,
ticker_timeout_func fp_timeout_func, void *context,
ticker_op_func fp_op_func, void *op_context)
#endif
{
struct ticker_instance *instance = &_instance[instance_index];
struct ticker_user_op *user_op;
@ -2085,6 +2418,9 @@ u32_t ticker_start(u8_t instance_index, u8_t user_id, u8_t ticker_id,
user_op->params.start.remainder_periodic = remainder_periodic;
user_op->params.start.ticks_slot = ticks_slot;
user_op->params.start.lazy = lazy;
#if defined(CONFIG_BT_TICKER_EXT)
user_op->params.start.ext_data = ext_data;
#endif
user_op->params.start.fp_timeout_func = fp_timeout_func;
user_op->params.start.context = context;
user_op->status = TICKER_STATUS_BUSY;

View file

@ -39,8 +39,12 @@
#if defined(CONFIG_BT_TICKER_COMPATIBILITY_MODE)
#define TICKER_NODE_T_SIZE 40
#else
#if defined(CONFIG_BT_TICKER_EXT)
#define TICKER_NODE_T_SIZE 48
#else
#define TICKER_NODE_T_SIZE 44
#endif
#endif /* CONFIG_BT_TICKER_EXT */
#endif /* CONFIG_BT_TICKER_COMPATIBILITY_MODE*/
/** \brief Timer user type size.
*/
@ -48,7 +52,11 @@
/** \brief Timer user operation type size.
*/
#if defined(CONFIG_BT_TICKER_EXT)
#define TICKER_USER_OP_T_SIZE 52
#else
#define TICKER_USER_OP_T_SIZE 48
#endif /* CONFIG_BT_TICKER_EXT */
#define TICKER_CALL_ID_NONE 0
#define TICKER_CALL_ID_ISR 1
@ -81,6 +89,20 @@ typedef void (*ticker_timeout_func) (u32_t ticks_at_expire, u32_t remainder,
*/
typedef void (*ticker_op_func) (u32_t status, void *op_context);
#if defined(CONFIG_BT_TICKER_EXT)
struct ticker_ext {
u32_t ticks_slot_window; /* Window in which the slot
* reservation may be re-scheduled
* to avoid collision
*/
s32_t ticks_drift; /* Applied drift since last expiry */
u8_t reschedule_state; /* State of re-scheduling of the
* node. See defines
* TICKER_RESCHEDULE_STATE_XXX
*/
};
#endif /* CONFIG_BT_TICKER_EXT */
/** \brief Timer module initialization.
*
* \param[in] instance_index Timer mode instance 0 or 1 (uses RTC0 CMP0 or
@ -125,4 +147,13 @@ u32_t ticker_ticks_diff_get(u32_t ticks_now, u32_t ticks_old);
u32_t ticker_priority_set(u8_t instance_index, u8_t user_id, u8_t ticker_id,
s8_t priority, ticker_op_func fp_op_func,
void *op_context);
#if defined(CONFIG_BT_TICKER_EXT)
u32_t ticker_start_ext(u8_t instance_index, u8_t user_id, u8_t ticker_id,
u32_t ticks_anchor, u32_t ticks_first,
u32_t ticks_periodic, u32_t remainder_periodic,
u16_t lazy, u32_t ticks_slot,
ticker_timeout_func fp_timeout_func, void *context,
ticker_op_func fp_op_func, void *op_context,
struct ticker_ext *ext_data);
#endif /* CONFIG_BT_TICKER_EXT */
#endif /* !CONFIG_BT_TICKER_COMPATIBILITY_MODE */