2017-09-19 05:01:27 -03:00
|
|
|
/*
|
2018-02-20 21:24:07 +01:00
|
|
|
* Copyright (c) 2018 Nordic Semiconductor ASA
|
2017-09-19 05:01:27 -03:00
|
|
|
* Copyright (c) 2017 Exati Tecnologia Ltda.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
2019-06-25 15:53:49 -04:00
|
|
|
#include <drivers/entropy.h>
|
2019-06-25 12:25:32 -04:00
|
|
|
#include <sys/atomic.h>
|
2018-02-20 21:24:07 +01:00
|
|
|
#include <soc.h>
|
2019-09-19 13:25:18 +02:00
|
|
|
#include <hal/nrf_rng.h>
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2020-04-03 13:48:00 +00:00
|
|
|
#define DT_DRV_COMPAT nordic_nrf_rng
|
|
|
|
|
|
|
|
#define IRQN DT_INST_IRQN(0)
|
|
|
|
#define IRQ_PRIO DT_INST_IRQ(0, priority)
|
|
|
|
|
2018-04-04 16:56:33 +02:00
|
|
|
/*
|
|
|
|
* The nRF5 RNG HW has several characteristics that need to be taken
|
|
|
|
* into account by the driver to achieve energy efficient generation
|
|
|
|
* of entropy.
|
|
|
|
*
|
|
|
|
* The RNG does not support continuously DMA'ing entropy into RAM,
|
|
|
|
* values must be read out by the CPU byte-by-byte. But once started,
|
|
|
|
* it will continue to generate bytes until stopped.
|
|
|
|
*
|
|
|
|
* The generation time for byte 0 after starting generation (with BIAS
|
|
|
|
* correction) is:
|
|
|
|
*
|
|
|
|
* nRF51822 - 677us
|
|
|
|
* nRF52810 - 248us
|
|
|
|
* nRF52840 - 248us
|
|
|
|
*
|
|
|
|
* The generation time for byte N >= 1 after starting generation (with
|
|
|
|
* BIAS correction) is:
|
|
|
|
*
|
|
|
|
* nRF51822 - 677us
|
|
|
|
* nRF52810 - 120us
|
|
|
|
* nRF52840 - 120us
|
|
|
|
*
|
|
|
|
* Due to the first byte in a stream of bytes being more costly on
|
|
|
|
* some platforms a "water system" inspired algorithm is used to
|
2019-08-23 14:18:14 +02:00
|
|
|
* amortize the cost of the first byte.
|
2018-04-04 16:56:33 +02:00
|
|
|
*
|
|
|
|
* The algorithm will delay generation of entropy until the amount of
|
|
|
|
* bytes goes below THRESHOLD, at which point it will generate entropy
|
|
|
|
* until the BUF_LEN limit is reached.
|
|
|
|
*
|
|
|
|
* The entropy level is checked at the end of every consumption of
|
|
|
|
* entropy.
|
|
|
|
*
|
|
|
|
* The algorithm and HW together has these characteristics:
|
|
|
|
*
|
2019-08-23 14:18:14 +02:00
|
|
|
* Setting a low threshold will highly amortize the extra 120us cost
|
2018-04-04 16:56:33 +02:00
|
|
|
* of the first byte on nRF52.
|
|
|
|
*
|
|
|
|
* Setting a high threshold will minimize the time spent waiting for
|
|
|
|
* entropy.
|
|
|
|
*
|
|
|
|
* To minimize power consumption the threshold should either be set
|
|
|
|
* low or high depending on the HFCLK-usage pattern of other
|
|
|
|
* components.
|
|
|
|
*
|
|
|
|
* If the threshold is set close to the BUF_LEN, and the system
|
|
|
|
* happens to anyway be using the HFCLK for several hundred us after
|
|
|
|
* entropy is requested there will be no extra current-consumption for
|
|
|
|
* keeping clocks running for entropy generation.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2018-10-04 11:11:38 +02:00
|
|
|
struct rng_pool {
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t first_alloc;
|
|
|
|
uint8_t first_read;
|
|
|
|
uint8_t last;
|
|
|
|
uint8_t mask;
|
|
|
|
uint8_t threshold;
|
|
|
|
uint8_t buffer[0];
|
2018-02-20 21:24:07 +01:00
|
|
|
};
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
#define RNG_POOL_DEFINE(name, len) uint8_t name[sizeof(struct rng_pool) + (len)]
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2020-03-12 17:16:00 +02:00
|
|
|
BUILD_ASSERT((CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE &
|
|
|
|
(CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE - 1)) == 0,
|
|
|
|
"The CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE must be a power of 2!");
|
2018-10-04 13:11:37 +02:00
|
|
|
|
2020-03-12 17:16:00 +02:00
|
|
|
BUILD_ASSERT((CONFIG_ENTROPY_NRF5_THR_POOL_SIZE &
|
|
|
|
(CONFIG_ENTROPY_NRF5_THR_POOL_SIZE - 1)) == 0,
|
|
|
|
"The CONFIG_ENTROPY_NRF5_THR_POOL_SIZE must be a power of 2!");
|
2017-09-19 05:01:27 -03:00
|
|
|
|
|
|
|
struct entropy_nrf5_dev_data {
|
2018-02-20 21:24:07 +01:00
|
|
|
struct k_sem sem_lock;
|
|
|
|
struct k_sem sem_sync;
|
|
|
|
|
2018-10-04 13:11:37 +02:00
|
|
|
RNG_POOL_DEFINE(isr, CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE);
|
|
|
|
RNG_POOL_DEFINE(thr, CONFIG_ENTROPY_NRF5_THR_POOL_SIZE);
|
2017-09-19 05:01:27 -03:00
|
|
|
};
|
|
|
|
|
2018-10-10 14:46:09 +02:00
|
|
|
static struct entropy_nrf5_dev_data entropy_nrf5_data;
|
|
|
|
|
2018-10-04 08:22:01 +02:00
|
|
|
static int random_byte_get(void)
|
|
|
|
{
|
|
|
|
int retval = -EAGAIN;
|
|
|
|
unsigned int key;
|
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
|
2019-11-07 22:07:47 +01:00
|
|
|
if (nrf_rng_event_check(NRF_RNG, NRF_RNG_EVENT_VALRDY)) {
|
|
|
|
retval = nrf_rng_random_value_get(NRF_RNG);
|
|
|
|
nrf_rng_event_clear(NRF_RNG, NRF_RNG_EVENT_VALRDY);
|
2018-10-04 08:22:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2018-02-21 11:48:35 +01:00
|
|
|
#pragma GCC push_options
|
|
|
|
#if defined(CONFIG_BT_CTLR_FAST_ENC)
|
|
|
|
#pragma GCC optimize ("Ofast")
|
|
|
|
#endif
|
2020-05-27 11:26:57 -05:00
|
|
|
static uint16_t rng_pool_get(struct rng_pool *rngp, uint8_t *buf, uint16_t len)
|
2017-09-19 05:01:27 -03:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t last = rngp->last;
|
|
|
|
uint32_t mask = rngp->mask;
|
|
|
|
uint8_t *dst = buf;
|
|
|
|
uint32_t first, available;
|
|
|
|
uint32_t other_read_in_progress;
|
2018-10-04 13:11:37 +02:00
|
|
|
unsigned int key;
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2018-10-04 13:11:37 +02:00
|
|
|
key = irq_lock();
|
|
|
|
first = rngp->first_alloc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The other_read_in_progress is non-zero if rngp->first_read != first,
|
|
|
|
* which means that lower-priority code (which was interrupted by this
|
|
|
|
* call) already allocated area for read.
|
|
|
|
*/
|
|
|
|
other_read_in_progress = (rngp->first_read ^ first);
|
|
|
|
|
|
|
|
available = (last - first) & mask;
|
|
|
|
if (available < len) {
|
|
|
|
len = available;
|
|
|
|
}
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2018-10-04 13:11:37 +02:00
|
|
|
/*
|
|
|
|
* Move alloc index forward to signal, that part of the buffer is
|
|
|
|
* now reserved for this call.
|
|
|
|
*/
|
|
|
|
rngp->first_alloc = (first + len) & mask;
|
|
|
|
irq_unlock(key);
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2018-10-04 13:11:37 +02:00
|
|
|
while (likely(len--)) {
|
|
|
|
*dst++ = rngp->buffer[first];
|
|
|
|
first = (first + 1) & mask;
|
|
|
|
}
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2018-10-04 13:11:37 +02:00
|
|
|
/*
|
|
|
|
* If this call is the last one accessing the pool, move read index
|
|
|
|
* to signal that all allocated regions are now read and could be
|
|
|
|
* overwritten.
|
|
|
|
*/
|
|
|
|
if (likely(!other_read_in_progress)) {
|
|
|
|
key = irq_lock();
|
|
|
|
rngp->first_read = rngp->first_alloc;
|
|
|
|
irq_unlock(key);
|
2017-09-19 05:01:27 -03:00
|
|
|
}
|
|
|
|
|
2018-10-04 13:11:37 +02:00
|
|
|
len = dst - buf;
|
|
|
|
available = available - len;
|
|
|
|
if (available <= rngp->threshold) {
|
2019-11-07 22:07:47 +01:00
|
|
|
nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_START);
|
2018-02-20 21:24:07 +01:00
|
|
|
}
|
2017-09-19 05:01:27 -03:00
|
|
|
|
2018-10-04 13:11:37 +02:00
|
|
|
return len;
|
2017-09-19 05:01:27 -03:00
|
|
|
}
|
2018-02-21 11:48:35 +01:00
|
|
|
#pragma GCC pop_options
|
2017-09-19 05:01:27 -03:00
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static int rng_pool_put(struct rng_pool *rngp, uint8_t byte)
|
2017-09-19 05:01:27 -03:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint8_t first = rngp->first_read;
|
|
|
|
uint8_t last = rngp->last;
|
|
|
|
uint8_t mask = rngp->mask;
|
2017-09-19 05:01:27 -03:00
|
|
|
|
2018-10-04 13:11:37 +02:00
|
|
|
/* Signal error if the pool is full. */
|
|
|
|
if (((last - first) & mask) == mask) {
|
2018-02-20 21:24:07 +01:00
|
|
|
return -ENOBUFS;
|
2017-09-19 05:01:27 -03:00
|
|
|
}
|
|
|
|
|
2018-10-04 13:11:37 +02:00
|
|
|
rngp->buffer[last] = byte;
|
|
|
|
rngp->last = (last + 1) & mask;
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2018-10-04 13:11:37 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static void rng_pool_init(struct rng_pool *rngp, uint16_t size, uint8_t threshold)
|
2018-10-04 13:11:37 +02:00
|
|
|
{
|
2018-11-29 11:12:22 -08:00
|
|
|
rngp->first_alloc = 0U;
|
|
|
|
rngp->first_read = 0U;
|
|
|
|
rngp->last = 0U;
|
2018-10-04 13:11:37 +02:00
|
|
|
rngp->mask = size - 1;
|
|
|
|
rngp->threshold = threshold;
|
2017-09-19 05:01:27 -03:00
|
|
|
}
|
|
|
|
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void isr(const void *arg)
|
2017-09-19 05:01:27 -03:00
|
|
|
{
|
2018-10-04 08:22:01 +02:00
|
|
|
int byte, ret;
|
2017-09-19 05:01:27 -03:00
|
|
|
|
2018-10-10 14:46:09 +02:00
|
|
|
ARG_UNUSED(arg);
|
|
|
|
|
2018-10-04 08:22:01 +02:00
|
|
|
byte = random_byte_get();
|
|
|
|
if (byte < 0) {
|
|
|
|
return;
|
|
|
|
}
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2018-10-10 14:46:09 +02:00
|
|
|
ret = rng_pool_put((struct rng_pool *)(entropy_nrf5_data.isr), byte);
|
2018-10-04 13:11:37 +02:00
|
|
|
if (ret < 0) {
|
2018-10-10 14:46:09 +02:00
|
|
|
ret = rng_pool_put((struct rng_pool *)(entropy_nrf5_data.thr),
|
|
|
|
byte);
|
2018-10-04 13:11:37 +02:00
|
|
|
if (ret < 0) {
|
2019-11-07 22:07:47 +01:00
|
|
|
nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_STOP);
|
2018-10-04 13:11:37 +02:00
|
|
|
}
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2018-10-10 14:46:09 +02:00
|
|
|
k_sem_give(&entropy_nrf5_data.sem_sync);
|
2017-09-19 05:01:27 -03:00
|
|
|
}
|
2018-02-20 21:24:07 +01:00
|
|
|
}
|
|
|
|
|
2021-03-22 10:28:25 -04:00
|
|
|
static int entropy_nrf5_get_entropy(const struct device *dev, uint8_t *buf,
|
2020-04-30 20:33:38 +02:00
|
|
|
uint16_t len)
|
2018-02-20 21:24:07 +01:00
|
|
|
{
|
2018-10-10 14:46:09 +02:00
|
|
|
/* Check if this API is called on correct driver instance. */
|
2022-01-18 15:37:08 +01:00
|
|
|
__ASSERT_NO_MSG(&entropy_nrf5_data == dev->data);
|
2018-02-20 21:24:07 +01:00
|
|
|
|
|
|
|
while (len) {
|
2020-05-27 11:26:57 -05:00
|
|
|
uint16_t bytes;
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2018-10-10 14:46:09 +02:00
|
|
|
k_sem_take(&entropy_nrf5_data.sem_lock, K_FOREVER);
|
|
|
|
bytes = rng_pool_get((struct rng_pool *)(entropy_nrf5_data.thr),
|
2018-10-04 13:11:37 +02:00
|
|
|
buf, len);
|
2018-10-10 14:46:09 +02:00
|
|
|
k_sem_give(&entropy_nrf5_data.sem_lock);
|
2018-10-04 13:11:37 +02:00
|
|
|
|
2019-03-26 19:57:45 -06:00
|
|
|
if (bytes == 0U) {
|
2018-10-04 13:11:37 +02:00
|
|
|
/* Pool is empty: Sleep until next interrupt. */
|
2018-10-10 14:46:09 +02:00
|
|
|
k_sem_take(&entropy_nrf5_data.sem_sync, K_FOREVER);
|
2018-10-04 13:11:37 +02:00
|
|
|
continue;
|
2018-02-20 21:24:07 +01:00
|
|
|
}
|
2018-10-04 13:11:37 +02:00
|
|
|
|
|
|
|
len -= bytes;
|
|
|
|
buf += bytes;
|
2018-02-20 21:24:07 +01:00
|
|
|
}
|
2017-09-19 05:01:27 -03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int entropy_nrf5_get_entropy_isr(const struct device *dev,
|
|
|
|
uint8_t *buf, uint16_t len,
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t flags)
|
2018-05-24 20:13:42 +02:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint16_t cnt = len;
|
2018-05-24 20:13:42 +02:00
|
|
|
|
2018-10-10 14:46:09 +02:00
|
|
|
/* Check if this API is called on correct driver instance. */
|
2022-01-18 15:37:08 +01:00
|
|
|
__ASSERT_NO_MSG(&entropy_nrf5_data == dev->data);
|
2018-10-10 14:46:09 +02:00
|
|
|
|
2019-03-26 19:57:45 -06:00
|
|
|
if (likely((flags & ENTROPY_BUSYWAIT) == 0U)) {
|
2018-10-10 14:46:09 +02:00
|
|
|
return rng_pool_get((struct rng_pool *)(entropy_nrf5_data.isr),
|
|
|
|
buf, len);
|
2018-05-24 20:13:42 +02:00
|
|
|
}
|
|
|
|
|
2018-08-10 13:14:25 +02:00
|
|
|
if (len) {
|
2018-10-04 09:11:48 +02:00
|
|
|
unsigned int key;
|
|
|
|
int irq_enabled;
|
2018-08-10 13:14:25 +02:00
|
|
|
|
2018-10-04 09:11:48 +02:00
|
|
|
key = irq_lock();
|
2020-04-03 13:48:00 +00:00
|
|
|
irq_enabled = irq_is_enabled(IRQN);
|
|
|
|
irq_disable(IRQN);
|
2018-10-04 09:11:48 +02:00
|
|
|
irq_unlock(key);
|
2018-08-10 13:14:25 +02:00
|
|
|
|
2019-11-07 22:07:47 +01:00
|
|
|
nrf_rng_event_clear(NRF_RNG, NRF_RNG_EVENT_VALRDY);
|
|
|
|
nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_START);
|
2018-08-10 13:14:25 +02:00
|
|
|
|
2020-06-11 18:01:58 +02:00
|
|
|
/* Clear NVIC pending bit. This ensures that a subsequent
|
|
|
|
* RNG event will set the Cortex-M single-bit event register
|
|
|
|
* to 1 (the bit is set when NVIC pending IRQ status is
|
|
|
|
* changed from 0 to 1)
|
|
|
|
*/
|
|
|
|
NVIC_ClearPendingIRQ(IRQN);
|
|
|
|
|
2018-08-10 13:14:25 +02:00
|
|
|
do {
|
2018-10-04 08:22:01 +02:00
|
|
|
int byte;
|
|
|
|
|
2019-11-07 22:07:47 +01:00
|
|
|
while (!nrf_rng_event_check(NRF_RNG,
|
|
|
|
NRF_RNG_EVENT_VALRDY)) {
|
2020-06-11 18:07:32 +02:00
|
|
|
/*
|
|
|
|
* To guarantee waking up from the event, the
|
|
|
|
* SEV-On-Pend feature must be enabled (enabled
|
|
|
|
* during ARCH initialization).
|
|
|
|
*
|
|
|
|
* DSB is recommended by spec before WFE (to
|
|
|
|
* guarantee completion of memory transactions)
|
|
|
|
*/
|
|
|
|
__DSB();
|
2018-08-10 13:14:25 +02:00
|
|
|
__WFE();
|
|
|
|
__SEV();
|
|
|
|
__WFE();
|
|
|
|
}
|
|
|
|
|
2018-10-04 08:22:01 +02:00
|
|
|
byte = random_byte_get();
|
2020-04-03 13:48:00 +00:00
|
|
|
NVIC_ClearPendingIRQ(IRQN);
|
2018-10-04 08:22:01 +02:00
|
|
|
|
|
|
|
if (byte < 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf[--len] = byte;
|
2018-08-10 13:14:25 +02:00
|
|
|
} while (len);
|
|
|
|
|
2018-10-04 09:11:48 +02:00
|
|
|
if (irq_enabled) {
|
2020-04-03 13:48:00 +00:00
|
|
|
irq_enable(IRQN);
|
2018-05-24 20:13:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2021-03-22 10:28:25 -04:00
|
|
|
static int entropy_nrf5_init(const struct device *dev);
|
2017-09-19 05:01:27 -03:00
|
|
|
|
|
|
|
static const struct entropy_driver_api entropy_nrf5_api_funcs = {
|
2018-05-24 20:13:42 +02:00
|
|
|
.get_entropy = entropy_nrf5_get_entropy,
|
|
|
|
.get_entropy_isr = entropy_nrf5_get_entropy_isr
|
2017-09-19 05:01:27 -03:00
|
|
|
};
|
|
|
|
|
2020-12-10 10:51:54 -06:00
|
|
|
DEVICE_DT_INST_DEFINE(0,
|
2021-04-28 10:39:21 +02:00
|
|
|
entropy_nrf5_init, NULL,
|
2020-12-10 10:51:54 -06:00
|
|
|
&entropy_nrf5_data, NULL,
|
2021-10-19 23:14:27 +02:00
|
|
|
PRE_KERNEL_1, CONFIG_ENTROPY_INIT_PRIORITY,
|
2017-09-19 05:01:27 -03:00
|
|
|
&entropy_nrf5_api_funcs);
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2021-03-22 10:28:25 -04:00
|
|
|
static int entropy_nrf5_init(const struct device *dev)
|
2018-02-20 21:24:07 +01:00
|
|
|
{
|
2018-10-10 14:46:09 +02:00
|
|
|
/* Check if this API is called on correct driver instance. */
|
2022-01-18 15:37:08 +01:00
|
|
|
__ASSERT_NO_MSG(&entropy_nrf5_data == dev->data);
|
2018-02-20 21:24:07 +01:00
|
|
|
|
|
|
|
/* Locking semaphore initialized to 1 (unlocked) */
|
2018-10-10 14:46:09 +02:00
|
|
|
k_sem_init(&entropy_nrf5_data.sem_lock, 1, 1);
|
2018-10-04 13:11:37 +02:00
|
|
|
|
2018-02-20 21:24:07 +01:00
|
|
|
/* Synching semaphore */
|
2018-10-10 14:46:09 +02:00
|
|
|
k_sem_init(&entropy_nrf5_data.sem_sync, 0, 1);
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2018-10-10 14:46:09 +02:00
|
|
|
rng_pool_init((struct rng_pool *)(entropy_nrf5_data.thr),
|
2018-10-04 13:11:37 +02:00
|
|
|
CONFIG_ENTROPY_NRF5_THR_POOL_SIZE,
|
|
|
|
CONFIG_ENTROPY_NRF5_THR_THRESHOLD);
|
2018-10-10 14:46:09 +02:00
|
|
|
rng_pool_init((struct rng_pool *)(entropy_nrf5_data.isr),
|
2018-10-04 13:11:37 +02:00
|
|
|
CONFIG_ENTROPY_NRF5_ISR_POOL_SIZE,
|
|
|
|
CONFIG_ENTROPY_NRF5_ISR_THRESHOLD);
|
2018-02-20 21:24:07 +01:00
|
|
|
|
|
|
|
/* Enable or disable bias correction */
|
|
|
|
if (IS_ENABLED(CONFIG_ENTROPY_NRF5_BIAS_CORRECTION)) {
|
2019-11-07 22:07:47 +01:00
|
|
|
nrf_rng_error_correction_enable(NRF_RNG);
|
2018-02-20 21:24:07 +01:00
|
|
|
} else {
|
2019-11-07 22:07:47 +01:00
|
|
|
nrf_rng_error_correction_disable(NRF_RNG);
|
2018-02-20 21:24:07 +01:00
|
|
|
}
|
|
|
|
|
2019-11-07 22:07:47 +01:00
|
|
|
nrf_rng_event_clear(NRF_RNG, NRF_RNG_EVENT_VALRDY);
|
|
|
|
nrf_rng_int_enable(NRF_RNG, NRF_RNG_INT_VALRDY_MASK);
|
|
|
|
nrf_rng_task_trigger(NRF_RNG, NRF_RNG_TASK_START);
|
2018-02-20 21:24:07 +01:00
|
|
|
|
2020-04-03 13:48:00 +00:00
|
|
|
IRQ_CONNECT(IRQN, IRQ_PRIO, isr, &entropy_nrf5_data, 0);
|
|
|
|
irq_enable(IRQN);
|
2018-02-20 21:24:07 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|