2019-09-25 08:12:14 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2019 Nordic Semiconductor ASA
|
|
|
|
* Copyright 2019 NXP
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
2021-05-06 11:05:14 +05:30
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdbool.h>
|
2019-09-25 08:12:14 +03:00
|
|
|
#include <errno.h>
|
2021-05-06 11:05:14 +05:30
|
|
|
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/toolchain.h>
|
2019-09-25 08:12:14 +03:00
|
|
|
|
|
|
|
#include <soc.h>
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/device.h>
|
2021-05-06 11:05:14 +05:30
|
|
|
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/drivers/entropy.h>
|
2022-10-17 10:24:11 +02:00
|
|
|
#include <zephyr/irq.h>
|
2019-09-25 08:12:14 +03:00
|
|
|
|
|
|
|
#include "hal/swi.h"
|
|
|
|
#include "hal/ccm.h"
|
|
|
|
#include "hal/radio.h"
|
|
|
|
#include "hal/ticker.h"
|
|
|
|
|
|
|
|
#include "util/mem.h"
|
|
|
|
#include "util/memq.h"
|
|
|
|
#include "util/mayfly.h"
|
|
|
|
|
|
|
|
#include "ticker/ticker.h"
|
|
|
|
|
|
|
|
#include "lll.h"
|
|
|
|
#include "lll_vendor.h"
|
|
|
|
#include "lll_internal.h"
|
|
|
|
|
|
|
|
#include "hal/debug.h"
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
struct {
|
|
|
|
void *param;
|
|
|
|
lll_is_abort_cb_t is_abort_cb;
|
|
|
|
lll_abort_cb_t abort_cb;
|
|
|
|
} curr;
|
|
|
|
|
2021-05-06 11:05:14 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
|
|
|
|
struct {
|
|
|
|
uint8_t volatile lll_count;
|
|
|
|
uint8_t ull_count;
|
|
|
|
} done;
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
|
|
|
|
} event;
|
2019-09-25 08:12:14 +03:00
|
|
|
|
|
|
|
/* Entropy device */
|
2022-08-17 15:37:22 +02:00
|
|
|
static const struct device *const dev_entropy = DEVICE_DT_GET(DT_CHOSEN(zephyr_entropy));
|
2019-09-25 08:12:14 +03:00
|
|
|
|
|
|
|
static int init_reset(void);
|
2021-05-06 11:05:14 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
|
|
|
|
static inline void done_inc(void);
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
|
2021-05-04 09:56:35 +05:30
|
|
|
static struct lll_event *resume_enqueue(lll_prepare_cb_t resume_cb);
|
2019-09-25 08:12:14 +03:00
|
|
|
|
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT)
|
2021-05-06 11:05:14 +05:30
|
|
|
static void ticker_stop_op_cb(uint32_t status, void *param);
|
2020-05-27 11:26:57 -05:00
|
|
|
static void ticker_start_op_cb(uint32_t status, void *param);
|
2021-05-04 09:56:35 +05:30
|
|
|
static void ticker_start_next_op_cb(uint32_t status, void *param);
|
|
|
|
static uint32_t preempt_ticker_start(struct lll_event *event,
|
|
|
|
ticker_op_func op_cb);
|
2021-09-15 11:48:21 +05:30
|
|
|
static void preempt_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
|
|
|
|
uint32_t remainder, uint16_t lazy, uint8_t force,
|
|
|
|
void *param);
|
2019-09-25 08:12:14 +03:00
|
|
|
static void preempt(void *param);
|
|
|
|
#else /* CONFIG_BT_CTLR_LOW_LAT */
|
|
|
|
#if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
|
2021-05-06 11:05:14 +05:30
|
|
|
static void mfy_ticker_job_idle_get(void *param);
|
2020-05-27 11:26:57 -05:00
|
|
|
static void ticker_op_job_disable(uint32_t status, void *op_context);
|
2019-09-25 08:12:14 +03:00
|
|
|
#endif
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT */
|
|
|
|
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void rtc0_rv32m1_isr(const void *arg)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
|
|
|
DEBUG_TICKER_ISR(1);
|
|
|
|
|
|
|
|
/* On compare0 run ticker worker instance0 */
|
|
|
|
if (LPTMR1->CSR & LPTMR_CSR_TCF(1)) {
|
|
|
|
LPTMR1->CSR |= LPTMR_CSR_TCF(1);
|
|
|
|
|
|
|
|
ticker_trigger(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
mayfly_run(TICKER_USER_ID_ULL_HIGH);
|
|
|
|
|
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT) && \
|
|
|
|
(CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
|
|
|
|
mayfly_run(TICKER_USER_ID_ULL_LOW);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
DEBUG_TICKER_ISR(0);
|
|
|
|
}
|
|
|
|
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void swi_lll_rv32m1_isr(const void *arg)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
|
|
|
DEBUG_RADIO_ISR(1);
|
|
|
|
|
|
|
|
mayfly_run(TICKER_USER_ID_LLL);
|
|
|
|
|
|
|
|
DEBUG_RADIO_ISR(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT) || \
|
|
|
|
(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void swi_ull_low_rv32m1_isr(const void *arg)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
|
|
|
DEBUG_TICKER_JOB(1);
|
|
|
|
|
|
|
|
mayfly_run(TICKER_USER_ID_ULL_LOW);
|
|
|
|
|
|
|
|
DEBUG_TICKER_JOB(0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
int lll_init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2022-02-25 18:35:21 +01:00
|
|
|
/* Check if entropy device is ready */
|
|
|
|
if (!device_is_ready(dev_entropy)) {
|
2020-05-21 14:51:16 +02:00
|
|
|
return -ENODEV;
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialise LLL internals */
|
|
|
|
event.curr.abort_cb = NULL;
|
|
|
|
|
|
|
|
err = init_reset();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize SW IRQ structure */
|
|
|
|
hal_swi_init();
|
|
|
|
|
|
|
|
/* Connect ISRs */
|
|
|
|
IRQ_CONNECT(LL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO, isr_radio, NULL, 0);
|
|
|
|
IRQ_CONNECT(LL_RTC0_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
|
|
|
|
rtc0_rv32m1_isr, NULL, 0);
|
|
|
|
IRQ_CONNECT(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
|
|
|
|
swi_lll_rv32m1_isr, NULL, 0);
|
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT) || \
|
|
|
|
(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
|
|
|
|
IRQ_CONNECT(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO,
|
|
|
|
swi_ull_low_rv32m1_isr, NULL, 0);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Enable IRQs */
|
|
|
|
irq_enable(LL_RADIO_IRQn);
|
|
|
|
irq_enable(LL_RTC0_IRQn);
|
|
|
|
irq_enable(HAL_SWI_RADIO_IRQ);
|
2022-06-02 15:40:40 +02:00
|
|
|
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) ||
|
|
|
|
(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
|
|
|
|
irq_enable(HAL_SWI_JOB_IRQ);
|
|
|
|
}
|
2019-09-25 08:12:14 +03:00
|
|
|
|
|
|
|
/* Call it after IRQ enable to be able to measure ISR latency */
|
|
|
|
radio_setup();
|
2021-05-06 11:05:14 +05:30
|
|
|
|
2019-09-25 08:12:14 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-02 15:40:40 +02:00
|
|
|
int lll_deinit(void)
|
|
|
|
{
|
|
|
|
/* Disable IRQs */
|
|
|
|
irq_disable(LL_RADIO_IRQn);
|
|
|
|
irq_disable(LL_RTC0_IRQn);
|
|
|
|
irq_disable(HAL_SWI_RADIO_IRQ);
|
|
|
|
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) ||
|
|
|
|
(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
|
|
|
|
irq_disable(HAL_SWI_JOB_IRQ);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-21 14:51:16 +02:00
|
|
|
int lll_csrand_get(void *buf, size_t len)
|
|
|
|
{
|
|
|
|
return entropy_get_entropy(dev_entropy, buf, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
int lll_csrand_isr_get(void *buf, size_t len)
|
|
|
|
{
|
|
|
|
return entropy_get_entropy_isr(dev_entropy, buf, len, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int lll_rand_get(void *buf, size_t len)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lll_rand_isr_get(void *buf, size_t len)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int lll_reset(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = init_reset();
|
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void lll_disable(void *param)
|
|
|
|
{
|
|
|
|
/* LLL disable of current event, done is generated */
|
|
|
|
if (!param || (param == event.curr.param)) {
|
|
|
|
if (event.curr.abort_cb && event.curr.param) {
|
|
|
|
event.curr.abort_cb(NULL, event.curr.param);
|
|
|
|
} else {
|
|
|
|
LL_ASSERT(!param);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
{
|
|
|
|
struct lll_event *next;
|
2021-05-06 11:05:14 +05:30
|
|
|
uint8_t idx;
|
2019-09-25 08:12:14 +03:00
|
|
|
|
2021-05-06 11:05:14 +05:30
|
|
|
idx = UINT8_MAX;
|
2019-09-25 08:12:14 +03:00
|
|
|
next = ull_prepare_dequeue_iter(&idx);
|
|
|
|
while (next) {
|
|
|
|
if (!next->is_aborted &&
|
|
|
|
(!param || (param == next->prepare_param.param))) {
|
|
|
|
next->is_aborted = 1;
|
|
|
|
next->abort_cb(&next->prepare_param,
|
|
|
|
next->prepare_param.param);
|
2021-05-06 11:05:14 +05:30
|
|
|
|
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
|
|
|
|
/* NOTE: abort_cb called lll_done which modifies
|
|
|
|
* the prepare pipeline hence re-iterate
|
|
|
|
* through the prepare pipeline.
|
|
|
|
*/
|
|
|
|
idx = UINT8_MAX;
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
next = ull_prepare_dequeue_iter(&idx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int lll_prepare_done(void *param)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT) && \
|
|
|
|
(CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
|
2021-05-06 11:05:14 +05:30
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL, mfy_ticker_job_idle_get};
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ret;
|
2019-09-25 08:12:14 +03:00
|
|
|
|
2021-05-06 11:05:14 +05:30
|
|
|
ret = mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_LOW,
|
|
|
|
1, &mfy);
|
|
|
|
if (ret) {
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2019-09-25 08:12:14 +03:00
|
|
|
|
2021-05-06 11:05:14 +05:30
|
|
|
return 0;
|
2019-09-25 08:12:14 +03:00
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT */
|
|
|
|
}
|
|
|
|
|
|
|
|
int lll_done(void *param)
|
|
|
|
{
|
2021-05-06 11:05:14 +05:30
|
|
|
struct lll_event *next;
|
|
|
|
struct ull_hdr *ull;
|
2019-09-25 08:12:14 +03:00
|
|
|
void *evdone;
|
|
|
|
|
|
|
|
/* Assert if param supplied without a pending prepare to cancel. */
|
2021-05-06 11:05:14 +05:30
|
|
|
next = ull_prepare_dequeue_get();
|
2019-09-25 08:12:14 +03:00
|
|
|
LL_ASSERT(!param || next);
|
|
|
|
|
|
|
|
/* check if current LLL event is done */
|
2021-05-06 11:05:14 +05:30
|
|
|
ull = NULL;
|
2019-09-25 08:12:14 +03:00
|
|
|
if (!param) {
|
|
|
|
/* Reset current event instance */
|
|
|
|
LL_ASSERT(event.curr.abort_cb);
|
|
|
|
event.curr.abort_cb = NULL;
|
|
|
|
|
|
|
|
param = event.curr.param;
|
|
|
|
event.curr.param = NULL;
|
|
|
|
|
2021-05-06 11:05:14 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
|
|
|
|
done_inc();
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
|
|
|
|
|
2019-09-25 08:12:14 +03:00
|
|
|
if (param) {
|
2021-04-05 12:56:51 +05:30
|
|
|
ull = HDR_LLL2ULL(param);
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
|
|
|
|
(CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
|
|
|
|
mayfly_enable(TICKER_USER_ID_LLL,
|
|
|
|
TICKER_USER_ID_ULL_LOW,
|
|
|
|
1);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG_RADIO_CLOSE(0);
|
|
|
|
} else {
|
2021-04-05 12:56:51 +05:30
|
|
|
ull = HDR_LLL2ULL(param);
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
2021-05-06 11:05:14 +05:30
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
|
|
|
|
ull_prepare_dequeue(TICKER_USER_ID_LLL);
|
|
|
|
#endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
|
|
|
|
|
|
|
|
#if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
|
|
|
|
lll_done_score(param, 0, 0); /* TODO */
|
|
|
|
#endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
|
|
|
|
|
2019-09-25 08:12:14 +03:00
|
|
|
/* Let ULL know about LLL event done */
|
|
|
|
evdone = ull_event_done(ull);
|
|
|
|
LL_ASSERT(evdone);
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
return 0;
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
2021-05-06 11:05:14 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
|
2021-10-07 11:25:04 +05:30
|
|
|
void lll_done_ull_inc(void)
|
2021-05-06 11:05:14 +05:30
|
|
|
{
|
2021-10-07 11:25:04 +05:30
|
|
|
LL_ASSERT(event.done.ull_count != event.done.lll_count);
|
|
|
|
event.done.ull_count++;
|
2021-05-06 11:05:14 +05:30
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
|
|
|
|
|
2019-09-25 08:12:14 +03:00
|
|
|
bool lll_is_done(void *param)
|
|
|
|
{
|
|
|
|
/* FIXME: use param to check */
|
|
|
|
return !event.curr.abort_cb;
|
|
|
|
}
|
|
|
|
|
2021-04-10 00:43:09 +02:00
|
|
|
int lll_is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb)
|
2020-03-17 19:04:00 +05:30
|
|
|
{
|
|
|
|
return -ECANCELED;
|
|
|
|
}
|
|
|
|
|
2019-09-25 08:12:14 +03:00
|
|
|
int lll_clk_on(void)
|
|
|
|
{
|
2020-01-30 03:54:44 +02:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* turn on radio clock in non-blocking mode. */
|
|
|
|
err = radio_wake();
|
|
|
|
if (!err) {
|
|
|
|
DEBUG_RADIO_XTAL(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int lll_clk_on_wait(void)
|
|
|
|
{
|
2020-01-30 03:54:44 +02:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* turn on radio clock in blocking mode. */
|
|
|
|
err = radio_wake();
|
|
|
|
|
|
|
|
while (radio_is_off()) {
|
|
|
|
k_cpu_idle();
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG_RADIO_XTAL(1);
|
|
|
|
|
|
|
|
return err;
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int lll_clk_off(void)
|
|
|
|
{
|
2020-01-30 03:54:44 +02:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* turn off radio clock in non-blocking mode. */
|
|
|
|
err = radio_sleep();
|
|
|
|
if (!err) {
|
|
|
|
DEBUG_RADIO_XTAL(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
2021-04-05 12:56:51 +05:30
|
|
|
uint32_t lll_event_offset_get(struct ull_hdr *ull)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
|
|
|
if (0) {
|
|
|
|
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
2021-04-05 12:56:51 +05:30
|
|
|
} else if (ull->ticks_prepare_to_start & XON_BITMASK) {
|
|
|
|
return MAX(ull->ticks_active_to_start,
|
|
|
|
ull->ticks_preempt_to_start);
|
2019-09-25 08:12:14 +03:00
|
|
|
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
|
|
|
} else {
|
2021-04-05 12:56:51 +05:30
|
|
|
return MAX(ull->ticks_active_to_start,
|
|
|
|
ull->ticks_prepare_to_start);
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-05 12:56:51 +05:30
|
|
|
uint32_t lll_preempt_calc(struct ull_hdr *ull, uint8_t ticker_id,
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ticks_at_event)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
2021-05-06 11:05:14 +05:30
|
|
|
uint32_t ticks_now;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t diff;
|
2019-09-25 08:12:14 +03:00
|
|
|
|
2021-05-06 11:05:14 +05:30
|
|
|
ticks_now = ticker_ticks_now_get();
|
|
|
|
diff = ticks_now - ticks_at_event;
|
|
|
|
if (diff & BIT(HAL_TICKER_CNTR_MSBIT)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-25 08:12:14 +03:00
|
|
|
diff += HAL_TICKER_CNTR_CMP_OFFSET_MIN;
|
2021-05-06 11:05:14 +05:30
|
|
|
if (diff > HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US)) {
|
2019-09-25 08:12:14 +03:00
|
|
|
/* TODO: for Low Latency Feature with Advanced XTAL feature.
|
|
|
|
* 1. Release retained HF clock.
|
|
|
|
* 2. Advance the radio event to accommodate normal prepare
|
|
|
|
* duration.
|
|
|
|
* 3. Increase the preempt to start ticks for future events.
|
|
|
|
*/
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
void lll_chan_set(uint32_t chan)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
|
|
|
switch (chan) {
|
|
|
|
case 37:
|
|
|
|
radio_freq_chan_set(2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 38:
|
|
|
|
radio_freq_chan_set(26);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 39:
|
|
|
|
radio_freq_chan_set(80);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
if (chan < 11) {
|
|
|
|
radio_freq_chan_set(4 + (chan * 2U));
|
|
|
|
} else if (chan < 40) {
|
|
|
|
radio_freq_chan_set(28 + ((chan - 11) * 2U));
|
|
|
|
} else {
|
|
|
|
LL_ASSERT(0);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
radio_whiten_iv_set(chan);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t lll_radio_is_idle(void)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
|
|
|
return radio_is_idle();
|
|
|
|
}
|
|
|
|
|
2020-03-17 19:04:00 +05:30
|
|
|
uint32_t lll_radio_tx_ready_delay_get(uint8_t phy, uint8_t flags)
|
|
|
|
{
|
|
|
|
return radio_tx_ready_delay_get(phy, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t lll_radio_rx_ready_delay_get(uint8_t phy, uint8_t flags)
|
|
|
|
{
|
|
|
|
return radio_rx_ready_delay_get(phy, flags);
|
|
|
|
}
|
|
|
|
|
2022-05-23 11:34:10 +05:30
|
|
|
void lll_isr_status_reset(void)
|
|
|
|
{
|
|
|
|
radio_status_reset();
|
|
|
|
radio_tmr_status_reset();
|
|
|
|
radio_filter_status_reset();
|
|
|
|
if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
|
|
|
|
radio_ar_status_reset();
|
|
|
|
}
|
|
|
|
radio_rssi_status_reset();
|
|
|
|
}
|
2019-09-25 08:12:14 +03:00
|
|
|
|
|
|
|
static int init_reset(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-05-06 11:05:14 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
|
|
|
|
static inline void done_inc(void)
|
|
|
|
{
|
|
|
|
event.done.lll_count++;
|
2021-10-07 11:25:04 +05:30
|
|
|
LL_ASSERT(event.done.lll_count != event.done.ull_count);
|
2021-05-06 11:05:14 +05:30
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
|
|
|
|
|
|
|
|
static inline bool is_done_sync(void)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
|
|
|
|
return event.done.lll_count == event.done.ull_count;
|
|
|
|
#else /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
|
|
|
|
return true;
|
|
|
|
#endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
|
|
|
|
}
|
|
|
|
|
2021-04-10 00:43:09 +02:00
|
|
|
int lll_prepare_resolve(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb,
|
|
|
|
lll_prepare_cb_t prepare_cb,
|
|
|
|
struct lll_prepare_param *prepare_param,
|
|
|
|
uint8_t is_resume, uint8_t is_dequeue)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
2019-11-20 14:02:01 +02:00
|
|
|
struct lll_event *p;
|
2021-05-06 11:05:14 +05:30
|
|
|
uint8_t idx;
|
|
|
|
int err;
|
2019-09-25 08:12:14 +03:00
|
|
|
|
|
|
|
/* Find the ready prepare in the pipeline */
|
2021-05-06 11:05:14 +05:30
|
|
|
idx = UINT8_MAX;
|
2019-09-25 08:12:14 +03:00
|
|
|
p = ull_prepare_dequeue_iter(&idx);
|
|
|
|
while (p && (p->is_aborted || p->is_resume)) {
|
|
|
|
p = ull_prepare_dequeue_iter(&idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Current event active or another prepare is ready in the pipeline */
|
2021-05-06 11:05:14 +05:30
|
|
|
if ((!is_dequeue && !is_done_sync()) ||
|
|
|
|
event.curr.abort_cb ||
|
|
|
|
(p && is_resume)) {
|
2020-03-11 15:13:47 +05:30
|
|
|
#if defined(CONFIG_BT_CTLR_LOW_LAT)
|
2019-09-25 08:12:14 +03:00
|
|
|
lll_prepare_cb_t resume_cb;
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT */
|
2021-05-04 09:56:35 +05:30
|
|
|
struct lll_event *next;
|
2019-09-25 08:12:14 +03:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) && event.curr.param) {
|
|
|
|
/* early abort */
|
|
|
|
event.curr.abort_cb(NULL, event.curr.param);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Store the next prepare for deferred call */
|
2021-05-04 09:56:35 +05:30
|
|
|
next = ull_prepare_enqueue(is_abort_cb, abort_cb, prepare_param,
|
|
|
|
prepare_cb, is_resume);
|
|
|
|
LL_ASSERT(next);
|
2019-09-25 08:12:14 +03:00
|
|
|
|
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT)
|
|
|
|
if (is_resume) {
|
|
|
|
return -EINPROGRESS;
|
|
|
|
}
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
/* Start the preempt timeout */
|
|
|
|
uint32_t ret;
|
|
|
|
|
|
|
|
ret = preempt_ticker_start(next, ticker_start_op_cb);
|
|
|
|
LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
|
|
|
|
(ret == TICKER_STATUS_BUSY));
|
2019-09-25 08:12:14 +03:00
|
|
|
|
|
|
|
#else /* CONFIG_BT_CTLR_LOW_LAT */
|
|
|
|
next = NULL;
|
|
|
|
while (p) {
|
|
|
|
if (!p->is_aborted) {
|
|
|
|
if (event.curr.param ==
|
|
|
|
p->prepare_param.param) {
|
|
|
|
p->is_aborted = 1;
|
|
|
|
p->abort_cb(&p->prepare_param,
|
|
|
|
p->prepare_param.param);
|
|
|
|
} else {
|
|
|
|
next = p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
p = ull_prepare_dequeue_iter(&idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (next) {
|
|
|
|
/* check if resume requested by curr */
|
2021-05-06 11:05:14 +05:30
|
|
|
err = event.curr.is_abort_cb(NULL, event.curr.param,
|
2021-04-10 00:43:09 +02:00
|
|
|
&resume_cb);
|
2021-05-06 11:05:14 +05:30
|
|
|
LL_ASSERT(err);
|
2019-09-25 08:12:14 +03:00
|
|
|
|
2021-05-06 11:05:14 +05:30
|
|
|
if (err == -EAGAIN) {
|
2021-05-04 09:56:35 +05:30
|
|
|
next = resume_enqueue(resume_cb);
|
|
|
|
LL_ASSERT(next);
|
2019-09-25 08:12:14 +03:00
|
|
|
} else {
|
2021-05-06 11:05:14 +05:30
|
|
|
LL_ASSERT(err == -ECANCELED);
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT */
|
|
|
|
|
|
|
|
return -EINPROGRESS;
|
|
|
|
}
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
LL_ASSERT(!p || &p->prepare_param == prepare_param);
|
|
|
|
|
2019-09-25 08:12:14 +03:00
|
|
|
event.curr.param = prepare_param->param;
|
|
|
|
event.curr.is_abort_cb = is_abort_cb;
|
|
|
|
event.curr.abort_cb = abort_cb;
|
|
|
|
|
2019-11-20 14:02:01 +02:00
|
|
|
err = prepare_cb(prepare_param);
|
|
|
|
|
2021-05-06 11:05:14 +05:30
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT)
|
|
|
|
uint32_t ret;
|
|
|
|
|
|
|
|
/* Stop any scheduled preempt ticker */
|
|
|
|
ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
|
|
|
|
TICKER_USER_ID_LLL,
|
|
|
|
TICKER_ID_LLL_PREEMPT,
|
|
|
|
ticker_stop_op_cb, NULL);
|
|
|
|
LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
|
|
|
|
(ret == TICKER_STATUS_FAILURE) ||
|
|
|
|
(ret == TICKER_STATUS_BUSY));
|
2021-05-03 17:50:18 +05:30
|
|
|
|
|
|
|
/* Find next prepare needing preempt timeout to be setup */
|
|
|
|
do {
|
|
|
|
p = ull_prepare_dequeue_iter(&idx);
|
|
|
|
if (!p) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
} while (p->is_aborted || p->is_resume);
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
/* Start the preempt timeout */
|
|
|
|
ret = preempt_ticker_start(p, ticker_start_next_op_cb);
|
|
|
|
LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
|
|
|
|
(ret == TICKER_STATUS_BUSY));
|
2021-05-06 11:05:14 +05:30
|
|
|
#endif /* !CONFIG_BT_CTLR_LOW_LAT */
|
2019-11-20 14:02:01 +02:00
|
|
|
|
|
|
|
return err;
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
static struct lll_event *resume_enqueue(lll_prepare_cb_t resume_cb)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
2021-05-06 11:05:14 +05:30
|
|
|
struct lll_prepare_param prepare_param = {0};
|
2019-09-25 08:12:14 +03:00
|
|
|
|
|
|
|
prepare_param.param = event.curr.param;
|
|
|
|
event.curr.param = NULL;
|
|
|
|
|
|
|
|
return ull_prepare_enqueue(event.curr.is_abort_cb, event.curr.abort_cb,
|
2021-04-10 00:43:09 +02:00
|
|
|
&prepare_param, resume_cb, 1);
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT)
|
2021-05-06 11:05:14 +05:30
|
|
|
static void ticker_stop_op_cb(uint32_t status, void *param)
|
|
|
|
{
|
|
|
|
/* NOTE: this callback is present only for addition of debug messages
|
|
|
|
* when needed, else can be dispensed with.
|
|
|
|
*/
|
|
|
|
ARG_UNUSED(param);
|
|
|
|
|
|
|
|
LL_ASSERT((status == TICKER_STATUS_SUCCESS) ||
|
|
|
|
(status == TICKER_STATUS_FAILURE));
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static void ticker_start_op_cb(uint32_t status, void *param)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
2021-05-06 11:05:14 +05:30
|
|
|
/* NOTE: this callback is present only for addition of debug messages
|
2019-09-25 08:12:14 +03:00
|
|
|
* when needed, else can be dispensed with.
|
|
|
|
*/
|
|
|
|
ARG_UNUSED(param);
|
|
|
|
|
|
|
|
LL_ASSERT((status == TICKER_STATUS_SUCCESS) ||
|
|
|
|
(status == TICKER_STATUS_FAILURE));
|
|
|
|
}
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
static void ticker_start_next_op_cb(uint32_t status, void *param)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(param);
|
|
|
|
|
|
|
|
LL_ASSERT(status == TICKER_STATUS_SUCCESS);
|
|
|
|
}
|
|
|
|
|
2023-08-09 12:55:53 -07:00
|
|
|
static uint32_t preempt_ticker_start(struct lll_event *evt,
|
2021-05-04 09:56:35 +05:30
|
|
|
ticker_op_func op_cb)
|
2020-03-11 15:13:47 +05:30
|
|
|
{
|
2021-05-04 09:56:35 +05:30
|
|
|
struct lll_prepare_param *p;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t preempt_anchor;
|
2021-04-05 12:56:51 +05:30
|
|
|
struct ull_hdr *ull;
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t preempt_to;
|
2021-05-06 11:05:14 +05:30
|
|
|
uint32_t ret;
|
2020-03-11 15:13:47 +05:30
|
|
|
|
|
|
|
/* Calc the preempt timeout */
|
2023-08-09 12:55:53 -07:00
|
|
|
p = &evt->prepare_param;
|
2021-05-04 09:56:35 +05:30
|
|
|
ull = HDR_LLL2ULL(p->param);
|
|
|
|
preempt_anchor = p->ticks_at_expire;
|
2021-04-05 12:56:51 +05:30
|
|
|
preempt_to = MAX(ull->ticks_active_to_start,
|
|
|
|
ull->ticks_prepare_to_start) -
|
2021-05-06 11:05:14 +05:30
|
|
|
ull->ticks_preempt_to_start;
|
2020-03-11 15:13:47 +05:30
|
|
|
|
|
|
|
/* Setup pre empt timeout */
|
|
|
|
ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
|
|
|
|
TICKER_USER_ID_LLL,
|
|
|
|
TICKER_ID_LLL_PREEMPT,
|
|
|
|
preempt_anchor,
|
|
|
|
preempt_to,
|
|
|
|
TICKER_NULL_PERIOD,
|
|
|
|
TICKER_NULL_REMAINDER,
|
|
|
|
TICKER_NULL_LAZY,
|
|
|
|
TICKER_NULL_SLOT,
|
2023-08-09 12:55:53 -07:00
|
|
|
preempt_ticker_cb, evt,
|
|
|
|
op_cb, evt);
|
2021-05-04 09:56:35 +05:30
|
|
|
|
|
|
|
return ret;
|
2020-03-11 15:13:47 +05:30
|
|
|
}
|
|
|
|
|
2021-09-15 11:48:21 +05:30
|
|
|
static void preempt_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
|
|
|
|
uint32_t remainder, uint16_t lazy, uint8_t force,
|
|
|
|
void *param)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
|
|
|
static memq_link_t link;
|
|
|
|
static struct mayfly mfy = {0, 0, &link, NULL, preempt};
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t ret;
|
2019-09-25 08:12:14 +03:00
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
mfy.param = param;
|
2019-09-25 08:12:14 +03:00
|
|
|
ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
|
|
|
|
0, &mfy);
|
|
|
|
LL_ASSERT(!ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void preempt(void *param)
|
|
|
|
{
|
|
|
|
lll_prepare_cb_t resume_cb;
|
2021-05-06 11:05:14 +05:30
|
|
|
struct lll_event *next;
|
|
|
|
uint8_t idx;
|
2021-05-04 09:56:35 +05:30
|
|
|
int err;
|
2019-09-25 08:12:14 +03:00
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
/* No event to abort */
|
2020-03-11 15:13:47 +05:30
|
|
|
if (!event.curr.abort_cb || !event.curr.param) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
/* Check if any prepare in pipeline */
|
2021-05-06 11:05:14 +05:30
|
|
|
idx = UINT8_MAX;
|
2019-09-25 08:12:14 +03:00
|
|
|
next = ull_prepare_dequeue_iter(&idx);
|
2020-03-11 15:13:47 +05:30
|
|
|
if (!next) {
|
2019-09-25 08:12:14 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
/* Find a prepare that is ready and not a resume */
|
2019-09-25 08:12:14 +03:00
|
|
|
while (next && (next->is_aborted || next->is_resume)) {
|
|
|
|
next = ull_prepare_dequeue_iter(&idx);
|
|
|
|
}
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
/* No ready prepare */
|
2019-09-25 08:12:14 +03:00
|
|
|
if (!next) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
/* Preemptor not in pipeline */
|
|
|
|
if (next != param) {
|
|
|
|
uint32_t ret;
|
|
|
|
|
|
|
|
/* Start the preempt timeout */
|
|
|
|
ret = preempt_ticker_start(next, ticker_start_next_op_cb);
|
|
|
|
LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
|
|
|
|
(ret == TICKER_STATUS_BUSY));
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if current event want to continue */
|
|
|
|
err = event.curr.is_abort_cb(next->prepare_param.param,
|
2019-09-25 08:12:14 +03:00
|
|
|
event.curr.param,
|
2021-04-10 00:43:09 +02:00
|
|
|
&resume_cb);
|
2021-05-04 09:56:35 +05:30
|
|
|
if (!err) {
|
|
|
|
/* Let preemptor LLL know about the cancelled prepare */
|
2019-09-25 08:12:14 +03:00
|
|
|
next->is_aborted = 1;
|
|
|
|
next->abort_cb(&next->prepare_param, next->prepare_param.param);
|
|
|
|
|
2021-05-03 17:50:18 +05:30
|
|
|
return;
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
/* Abort the current event */
|
2019-09-25 08:12:14 +03:00
|
|
|
event.curr.abort_cb(NULL, event.curr.param);
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
/* Check if resume requested */
|
|
|
|
if (err == -EAGAIN) {
|
2019-09-25 08:12:14 +03:00
|
|
|
struct lll_event *iter;
|
2021-05-06 11:05:14 +05:30
|
|
|
uint8_t iter_idx;
|
2019-09-25 08:12:14 +03:00
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
/* Abort any duplicates so that they get dequeued */
|
2021-05-06 11:05:14 +05:30
|
|
|
iter_idx = UINT8_MAX;
|
2020-03-11 15:13:47 +05:30
|
|
|
iter = ull_prepare_dequeue_iter(&iter_idx);
|
2019-09-25 08:12:14 +03:00
|
|
|
while (iter) {
|
|
|
|
if (!iter->is_aborted &&
|
|
|
|
event.curr.param == iter->prepare_param.param) {
|
|
|
|
iter->is_aborted = 1;
|
|
|
|
iter->abort_cb(&iter->prepare_param,
|
|
|
|
iter->prepare_param.param);
|
2021-05-06 11:05:14 +05:30
|
|
|
|
|
|
|
#if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
|
|
|
|
/* NOTE: abort_cb called lll_done which modifies
|
|
|
|
* the prepare pipeline hence re-iterate
|
|
|
|
* through the prepare pipeline.
|
|
|
|
*/
|
|
|
|
idx = UINT8_MAX;
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
2020-03-11 15:13:47 +05:30
|
|
|
iter = ull_prepare_dequeue_iter(&iter_idx);
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
|
2021-05-04 09:56:35 +05:30
|
|
|
/* Enqueue as resume event */
|
|
|
|
iter = resume_enqueue(resume_cb);
|
|
|
|
LL_ASSERT(iter);
|
2019-09-25 08:12:14 +03:00
|
|
|
} else {
|
2021-05-04 09:56:35 +05:30
|
|
|
LL_ASSERT(err == -ECANCELED);
|
2019-09-25 08:12:14 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#else /* CONFIG_BT_CTLR_LOW_LAT */
|
|
|
|
|
|
|
|
#if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
|
2021-05-06 11:05:14 +05:30
|
|
|
static void mfy_ticker_job_idle_get(void *param)
|
|
|
|
{
|
|
|
|
uint32_t ret;
|
|
|
|
|
|
|
|
/* Ticker Job Silence */
|
|
|
|
ret = ticker_job_idle_get(TICKER_INSTANCE_ID_CTLR,
|
|
|
|
TICKER_USER_ID_ULL_LOW,
|
|
|
|
ticker_op_job_disable, NULL);
|
|
|
|
LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
|
|
|
|
(ret == TICKER_STATUS_BUSY));
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static void ticker_op_job_disable(uint32_t status, void *op_context)
|
2019-09-25 08:12:14 +03:00
|
|
|
{
|
|
|
|
ARG_UNUSED(status);
|
|
|
|
ARG_UNUSED(op_context);
|
|
|
|
|
|
|
|
/* FIXME: */
|
|
|
|
if (1 /* _radio.state != STATE_NONE */) {
|
|
|
|
mayfly_enable(TICKER_USER_ID_ULL_LOW,
|
|
|
|
TICKER_USER_ID_ULL_LOW, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* CONFIG_BT_CTLR_LOW_LAT */
|