kernel: subsys: lib: drivers: Use k_heap instead of z_mem_pool wrappers
Use the core k_heap API pervasively within our tree instead of the z_mem_pool wrapper that provided compatibility with the older mempool implementation. Almost all of this is straightforward swapping of one alloc/free call for another. In a few cases where code was holding onto an old-style "mem_block" a local compatibility struct with a single field has been swapped in to keep the invasiveness of the changes down. Note that not all the relevant changes in this patch have in-tree test coverage, though I validated that it all builds. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
78614bf271
commit
fcd392f6ce
25 changed files with 137 additions and 207 deletions
|
@ -17,11 +17,9 @@
|
||||||
|
|
||||||
LOG_MODULE_REGISTER(display_mcux_elcdif, CONFIG_DISPLAY_LOG_LEVEL);
|
LOG_MODULE_REGISTER(display_mcux_elcdif, CONFIG_DISPLAY_LOG_LEVEL);
|
||||||
|
|
||||||
K_MEM_POOL_DEFINE(mcux_elcdif_pool,
|
K_HEAP_DEFINE(mcux_elcdif_pool,
|
||||||
CONFIG_MCUX_ELCDIF_POOL_BLOCK_MIN,
|
CONFIG_MCUX_ELCDIF_POOL_BLOCK_MAX *
|
||||||
CONFIG_MCUX_ELCDIF_POOL_BLOCK_MAX,
|
CONFIG_MCUX_ELCDIF_POOL_BLOCK_NUM);
|
||||||
CONFIG_MCUX_ELCDIF_POOL_BLOCK_NUM,
|
|
||||||
CONFIG_MCUX_ELCDIF_POOL_BLOCK_ALIGN);
|
|
||||||
|
|
||||||
struct mcux_elcdif_config {
|
struct mcux_elcdif_config {
|
||||||
LCDIF_Type *base;
|
LCDIF_Type *base;
|
||||||
|
@ -31,8 +29,12 @@ struct mcux_elcdif_config {
|
||||||
uint8_t bits_per_pixel;
|
uint8_t bits_per_pixel;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mcux_mem_block {
|
||||||
|
void *data;
|
||||||
|
};
|
||||||
|
|
||||||
struct mcux_elcdif_data {
|
struct mcux_elcdif_data {
|
||||||
struct k_mem_block fb[2];
|
struct mcux_mem_block fb[2];
|
||||||
struct k_sem sem;
|
struct k_sem sem;
|
||||||
size_t pixel_bytes;
|
size_t pixel_bytes;
|
||||||
size_t fb_bytes;
|
size_t fb_bytes;
|
||||||
|
@ -190,8 +192,10 @@ static int mcux_elcdif_init(const struct device *dev)
|
||||||
data->write_idx = 1U;
|
data->write_idx = 1U;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(data->fb); i++) {
|
for (i = 0; i < ARRAY_SIZE(data->fb); i++) {
|
||||||
if (k_mem_pool_alloc(&mcux_elcdif_pool, &data->fb[i],
|
data->fb[i].data = k_heap_alloc(&mcux_elcdif_pool,
|
||||||
data->fb_bytes, K_NO_WAIT) != 0) {
|
&data->fb[i],
|
||||||
|
data->fb_bytes, K_NO_WAIT);
|
||||||
|
if (data->fb[i] == NULL) {
|
||||||
LOG_ERR("Could not allocate frame buffer %d", i);
|
LOG_ERR("Could not allocate frame buffer %d", i);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,7 +80,11 @@ static struct buf_descriptor __aligned(512) bdt[(NUM_OF_EP_MAX) * 2 * 2];
|
||||||
|
|
||||||
#define EP_BUF_NUMOF_BLOCKS (NUM_OF_EP_MAX / 2)
|
#define EP_BUF_NUMOF_BLOCKS (NUM_OF_EP_MAX / 2)
|
||||||
|
|
||||||
Z_MEM_POOL_DEFINE(ep_buf_pool, 16, 512, EP_BUF_NUMOF_BLOCKS, 4);
|
K_HEAP_DEFINE(ep_buf_pool, 512 * EP_BUF_NUMOF_BLOCKS + 128);
|
||||||
|
|
||||||
|
struct ep_mem_block {
|
||||||
|
void *data;
|
||||||
|
};
|
||||||
|
|
||||||
struct usb_ep_ctrl_data {
|
struct usb_ep_ctrl_data {
|
||||||
struct ep_status {
|
struct ep_status {
|
||||||
|
@ -95,8 +99,8 @@ struct usb_ep_ctrl_data {
|
||||||
} status;
|
} status;
|
||||||
uint16_t mps_in;
|
uint16_t mps_in;
|
||||||
uint16_t mps_out;
|
uint16_t mps_out;
|
||||||
struct k_mem_block mblock_in;
|
struct ep_mem_block mblock_in;
|
||||||
struct k_mem_block mblock_out;
|
struct ep_mem_block mblock_out;
|
||||||
usb_dc_ep_callback cb_in;
|
usb_dc_ep_callback cb_in;
|
||||||
usb_dc_ep_callback cb_out;
|
usb_dc_ep_callback cb_out;
|
||||||
};
|
};
|
||||||
|
@ -325,7 +329,7 @@ int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data * const cfg)
|
||||||
{
|
{
|
||||||
uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr);
|
uint8_t ep_idx = USB_EP_GET_IDX(cfg->ep_addr);
|
||||||
struct usb_ep_ctrl_data *ep_ctrl;
|
struct usb_ep_ctrl_data *ep_ctrl;
|
||||||
struct k_mem_block *block;
|
struct ep_mem_block *block;
|
||||||
uint8_t idx_even;
|
uint8_t idx_even;
|
||||||
uint8_t idx_odd;
|
uint8_t idx_odd;
|
||||||
|
|
||||||
|
@ -353,14 +357,15 @@ int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data * const cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bdt[idx_even].buf_addr) {
|
if (bdt[idx_even].buf_addr) {
|
||||||
z_mem_pool_free(block);
|
k_heap_free(&ep_buf_pool, block->data);
|
||||||
}
|
}
|
||||||
|
|
||||||
USB0->ENDPOINT[ep_idx].ENDPT = 0;
|
USB0->ENDPOINT[ep_idx].ENDPT = 0;
|
||||||
(void)memset(&bdt[idx_even], 0, sizeof(struct buf_descriptor));
|
(void)memset(&bdt[idx_even], 0, sizeof(struct buf_descriptor));
|
||||||
(void)memset(&bdt[idx_odd], 0, sizeof(struct buf_descriptor));
|
(void)memset(&bdt[idx_odd], 0, sizeof(struct buf_descriptor));
|
||||||
|
|
||||||
if (z_mem_pool_alloc(&ep_buf_pool, block, cfg->ep_mps * 2U, K_MSEC(10)) == 0) {
|
block->data = k_heap_alloc(&ep_buf_pool, cfg->ep_mps * 2U, K_MSEC(10));
|
||||||
|
if (block->data != NULL) {
|
||||||
(void)memset(block->data, 0, cfg->ep_mps * 2U);
|
(void)memset(block->data, 0, cfg->ep_mps * 2U);
|
||||||
} else {
|
} else {
|
||||||
LOG_ERR("Memory allocation time-out");
|
LOG_ERR("Memory allocation time-out");
|
||||||
|
|
|
@ -67,9 +67,9 @@ extern void USB_DeviceEhciIsrFunction(void *deviceHandle);
|
||||||
/* The max MPS is 1023 for FS, 1024 for HS. */
|
/* The max MPS is 1023 for FS, 1024 for HS. */
|
||||||
#if defined(CONFIG_NOCACHE_MEMORY)
|
#if defined(CONFIG_NOCACHE_MEMORY)
|
||||||
#define EP_BUF_NONCACHED
|
#define EP_BUF_NONCACHED
|
||||||
__nocache K_MEM_POOL_DEFINE(ep_buf_pool, 16, 1024, EP_BUF_NUMOF_BLOCKS, 4);
|
__nocache K_HEAP_DEFINE(ep_buf_pool, 1024 * EP_BUF_NUMOF_BLOCKS);
|
||||||
#else
|
#else
|
||||||
K_MEM_POOL_DEFINE(ep_buf_pool, 16, 1024, EP_BUF_NUMOF_BLOCKS, 4);
|
K_HEAP_DEFINE(ep_buf_pool, 1024 * EP_BUF_NUMOF_BLOCKS);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static usb_ep_ctrl_data_t s_ep_ctrl[NUM_OF_EP_MAX];
|
static usb_ep_ctrl_data_t s_ep_ctrl[NUM_OF_EP_MAX];
|
||||||
|
@ -78,8 +78,8 @@ static usb_device_struct_t dev_data;
|
||||||
#if ((defined(USB_DEVICE_CONFIG_EHCI)) && (USB_DEVICE_CONFIG_EHCI > 0U))
|
#if ((defined(USB_DEVICE_CONFIG_EHCI)) && (USB_DEVICE_CONFIG_EHCI > 0U))
|
||||||
/* EHCI device driver interface */
|
/* EHCI device driver interface */
|
||||||
static const usb_device_controller_interface_struct_t ehci_iface = {
|
static const usb_device_controller_interface_struct_t ehci_iface = {
|
||||||
USB_DeviceEhciInit, USB_DeviceEhciDeinit, USB_DeviceEhciSend,
|
USB_DeviceEhciInit, USB_DeviceEhciDeinit, USB_DeviceEhciSend,
|
||||||
USB_DeviceEhciRecv, USB_DeviceEhciCancel, USB_DeviceEhciControl
|
USB_DeviceEhciRecv, USB_DeviceEhciCancel, USB_DeviceEhciControl
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -207,11 +207,12 @@ int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const cfg)
|
||||||
|
|
||||||
block = &(eps->block);
|
block = &(eps->block);
|
||||||
if (block->data) {
|
if (block->data) {
|
||||||
k_mem_pool_free(block);
|
k_heap_free(&ep_buf_pool, block->data);
|
||||||
block->data = NULL;
|
block->data = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (k_mem_pool_alloc(&ep_buf_pool, block, cfg->ep_mps, K_MSEC(10))) {
|
block->data = k_heap_alloc(&ep_buf_pool, cfg->ep_mps, K_MSEC(10));
|
||||||
|
if (block->data == NULL) {
|
||||||
LOG_ERR("Memory allocation time-out");
|
LOG_ERR("Memory allocation time-out");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,6 +85,10 @@ struct nrf_usbd_ep_cfg {
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct usbd_mem_block {
|
||||||
|
void *data;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Endpoint buffer
|
* @brief Endpoint buffer
|
||||||
*
|
*
|
||||||
|
@ -95,7 +99,7 @@ struct nrf_usbd_ep_cfg {
|
||||||
*/
|
*/
|
||||||
struct nrf_usbd_ep_buf {
|
struct nrf_usbd_ep_buf {
|
||||||
uint32_t len;
|
uint32_t len;
|
||||||
struct k_mem_block block;
|
struct usbd_mem_block block;
|
||||||
uint8_t *data;
|
uint8_t *data;
|
||||||
uint8_t *curr;
|
uint8_t *curr;
|
||||||
};
|
};
|
||||||
|
@ -155,7 +159,7 @@ struct usbd_pwr_event {
|
||||||
*/
|
*/
|
||||||
struct usbd_event {
|
struct usbd_event {
|
||||||
sys_snode_t node;
|
sys_snode_t node;
|
||||||
struct k_mem_block block;
|
struct usbd_mem_block block;
|
||||||
union {
|
union {
|
||||||
struct usbd_ep_event ep_evt;
|
struct usbd_ep_event ep_evt;
|
||||||
struct usbd_pwr_event pwr_evt;
|
struct usbd_pwr_event pwr_evt;
|
||||||
|
@ -178,8 +182,8 @@ struct usbd_event {
|
||||||
#error Invalid USBD event queue size (CONFIG_USB_NRFX_EVT_QUEUE_SIZE).
|
#error Invalid USBD event queue size (CONFIG_USB_NRFX_EVT_QUEUE_SIZE).
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Z_MEM_POOL_DEFINE(fifo_elem_pool, FIFO_ELEM_MIN_SZ, FIFO_ELEM_MAX_SZ,
|
K_HEAP_DEFINE(fifo_elem_pool,
|
||||||
CONFIG_USB_NRFX_EVT_QUEUE_SIZE, FIFO_ELEM_ALIGN);
|
FIFO_ELEM_MAX_SZ * CONFIG_USB_NRFX_EVT_QUEUE_SIZE);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Endpoint buffer pool
|
* @brief Endpoint buffer pool
|
||||||
|
@ -233,9 +237,8 @@ Z_MEM_POOL_DEFINE(fifo_elem_pool, FIFO_ELEM_MIN_SZ, FIFO_ELEM_MAX_SZ,
|
||||||
/** 4 Byte Buffer alignment required by hardware */
|
/** 4 Byte Buffer alignment required by hardware */
|
||||||
#define EP_BUF_POOL_ALIGNMENT sizeof(unsigned int)
|
#define EP_BUF_POOL_ALIGNMENT sizeof(unsigned int)
|
||||||
|
|
||||||
Z_MEM_POOL_DEFINE(ep_buf_pool, EP_BUF_POOL_BLOCK_MIN_SZ,
|
K_HEAP_DEFINE(ep_buf_pool,
|
||||||
EP_BUF_POOL_BLOCK_MAX_SZ, EP_BUF_POOL_BLOCK_COUNT,
|
EP_BUF_POOL_BLOCK_MAX_SZ * EP_BUF_POOL_BLOCK_COUNT);
|
||||||
EP_BUF_POOL_ALIGNMENT);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief USBD control structure
|
* @brief USBD control structure
|
||||||
|
@ -406,7 +409,7 @@ static inline void usbd_work_schedule(void)
|
||||||
*/
|
*/
|
||||||
static inline void usbd_evt_free(struct usbd_event *ev)
|
static inline void usbd_evt_free(struct usbd_event *ev)
|
||||||
{
|
{
|
||||||
z_mem_pool_free(&ev->block);
|
k_heap_free(&fifo_elem_pool, &ev->block);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -451,15 +454,14 @@ static inline void usbd_evt_flush(void)
|
||||||
*/
|
*/
|
||||||
static inline struct usbd_event *usbd_evt_alloc(void)
|
static inline struct usbd_event *usbd_evt_alloc(void)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
struct usbd_event *ev;
|
struct usbd_event *ev;
|
||||||
struct k_mem_block block;
|
struct usbd_mem_block block;
|
||||||
|
|
||||||
ret = z_mem_pool_alloc(&fifo_elem_pool, &block,
|
block.data = k_heap_alloc(&fifo_elem_pool,
|
||||||
sizeof(struct usbd_event),
|
sizeof(struct usbd_event),
|
||||||
K_NO_WAIT);
|
K_NO_WAIT);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (block.data == NULL) {
|
||||||
LOG_ERR("USBD event allocation failed!");
|
LOG_ERR("USBD event allocation failed!");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -470,10 +472,10 @@ static inline struct usbd_event *usbd_evt_alloc(void)
|
||||||
*/
|
*/
|
||||||
usbd_evt_flush();
|
usbd_evt_flush();
|
||||||
|
|
||||||
ret = z_mem_pool_alloc(&fifo_elem_pool, &block,
|
block.data = k_heap_alloc(&fifo_elem_pool,
|
||||||
sizeof(struct usbd_event),
|
sizeof(struct usbd_event),
|
||||||
K_NO_WAIT);
|
K_NO_WAIT);
|
||||||
if (ret < 0) {
|
if (block.data == NULL) {
|
||||||
LOG_ERR("USBD event memory corrupted");
|
LOG_ERR("USBD event memory corrupted");
|
||||||
__ASSERT_NO_MSG(0);
|
__ASSERT_NO_MSG(0);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -629,7 +631,6 @@ static void ep_ctx_reset(struct nrf_usbd_ep_ctx *ep_ctx)
|
||||||
static int eps_ctx_init(void)
|
static int eps_ctx_init(void)
|
||||||
{
|
{
|
||||||
struct nrf_usbd_ep_ctx *ep_ctx;
|
struct nrf_usbd_ep_ctx *ep_ctx;
|
||||||
int err;
|
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
for (i = 0U; i < CFG_EPIN_CNT; i++) {
|
for (i = 0U; i < CFG_EPIN_CNT; i++) {
|
||||||
|
@ -643,9 +644,10 @@ static int eps_ctx_init(void)
|
||||||
__ASSERT_NO_MSG(ep_ctx);
|
__ASSERT_NO_MSG(ep_ctx);
|
||||||
|
|
||||||
if (!ep_ctx->buf.block.data) {
|
if (!ep_ctx->buf.block.data) {
|
||||||
err = z_mem_pool_alloc(&ep_buf_pool, &ep_ctx->buf.block,
|
ep_ctx->buf.block.data =
|
||||||
EP_BUF_MAX_SZ, K_NO_WAIT);
|
k_heap_alloc(&ep_buf_pool,
|
||||||
if (err < 0) {
|
EP_BUF_MAX_SZ, K_NO_WAIT);
|
||||||
|
if (ep_ctx->buf.block.data == NULL) {
|
||||||
LOG_ERR("Buffer alloc failed for EP 0x%02x", i);
|
LOG_ERR("Buffer alloc failed for EP 0x%02x", i);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -665,10 +667,10 @@ static int eps_ctx_init(void)
|
||||||
__ASSERT_NO_MSG(ep_ctx);
|
__ASSERT_NO_MSG(ep_ctx);
|
||||||
|
|
||||||
if (!ep_ctx->buf.block.data) {
|
if (!ep_ctx->buf.block.data) {
|
||||||
err = z_mem_pool_alloc(&ep_buf_pool, &ep_ctx->buf.block,
|
ep_ctx->buf.block.data = k_heap_alloc(&ep_buf_pool,
|
||||||
ISO_EP_BUF_MAX_SZ,
|
ISO_EP_BUF_MAX_SZ,
|
||||||
K_NO_WAIT);
|
K_NO_WAIT);
|
||||||
if (err < 0) {
|
if (ep_ctx->buf.block.data == NULL) {
|
||||||
LOG_ERR("EP buffer alloc failed for ISOOUT");
|
LOG_ERR("EP buffer alloc failed for ISOOUT");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -694,7 +696,7 @@ static void eps_ctx_uninit(void)
|
||||||
for (i = 0U; i < CFG_EPOUT_CNT; i++) {
|
for (i = 0U; i < CFG_EPOUT_CNT; i++) {
|
||||||
ep_ctx = out_endpoint_ctx(i);
|
ep_ctx = out_endpoint_ctx(i);
|
||||||
__ASSERT_NO_MSG(ep_ctx);
|
__ASSERT_NO_MSG(ep_ctx);
|
||||||
z_mem_pool_free(&ep_ctx->buf.block);
|
k_heap_free(&ep_buf_pool, ep_ctx->buf.block.data);
|
||||||
memset(ep_ctx, 0, sizeof(*ep_ctx));
|
memset(ep_ctx, 0, sizeof(*ep_ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -707,7 +709,7 @@ static void eps_ctx_uninit(void)
|
||||||
if (CFG_EP_ISOOUT_CNT) {
|
if (CFG_EP_ISOOUT_CNT) {
|
||||||
ep_ctx = out_endpoint_ctx(NRF_USBD_EPOUT(8));
|
ep_ctx = out_endpoint_ctx(NRF_USBD_EPOUT(8));
|
||||||
__ASSERT_NO_MSG(ep_ctx);
|
__ASSERT_NO_MSG(ep_ctx);
|
||||||
z_mem_pool_free(&ep_ctx->buf.block);
|
k_heap_free(&ep_buf_pool, ep_ctx->buf.block.data);
|
||||||
memset(ep_ctx, 0, sizeof(*ep_ctx));
|
memset(ep_ctx, 0, sizeof(*ep_ctx));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,14 +7,17 @@
|
||||||
|
|
||||||
#include <drivers/video.h>
|
#include <drivers/video.h>
|
||||||
|
|
||||||
K_MEM_POOL_DEFINE(video_buffer_pool,
|
K_HEAP_DEFINE(video_buffer_pool,
|
||||||
CONFIG_VIDEO_BUFFER_POOL_ALIGN,
|
CONFIG_VIDEO_BUFFER_POOL_SZ_MAX *
|
||||||
CONFIG_VIDEO_BUFFER_POOL_SZ_MAX,
|
CONFIG_VIDEO_BUFFER_POOL_NUM_MAX);
|
||||||
CONFIG_VIDEO_BUFFER_POOL_NUM_MAX,
|
|
||||||
CONFIG_VIDEO_BUFFER_POOL_ALIGN);
|
|
||||||
|
|
||||||
static struct video_buffer video_buf[CONFIG_VIDEO_BUFFER_POOL_NUM_MAX];
|
static struct video_buffer video_buf[CONFIG_VIDEO_BUFFER_POOL_NUM_MAX];
|
||||||
static struct k_mem_block video_block[CONFIG_VIDEO_BUFFER_POOL_NUM_MAX];
|
|
||||||
|
struct mem_block {
|
||||||
|
void *data;
|
||||||
|
};
|
||||||
|
|
||||||
|
static mem_block video_block[CONFIG_VIDEO_BUFFER_POOL_NUM_MAX];
|
||||||
|
|
||||||
struct video_buffer *video_buffer_alloc(size_t size)
|
struct video_buffer *video_buffer_alloc(size_t size)
|
||||||
{
|
{
|
||||||
|
@ -36,7 +39,8 @@ struct video_buffer *video_buffer_alloc(size_t size)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Alloc buffer memory */
|
/* Alloc buffer memory */
|
||||||
if (k_mem_pool_alloc(&video_buffer_pool, block, size, K_FOREVER)) {
|
block->data = k_heap_alloc(&video_buffer_pool, size, K_FOREVER);
|
||||||
|
if (block->data == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +53,7 @@ struct video_buffer *video_buffer_alloc(size_t size)
|
||||||
|
|
||||||
void video_buffer_release(struct video_buffer *vbuf)
|
void video_buffer_release(struct video_buffer *vbuf)
|
||||||
{
|
{
|
||||||
struct k_mem_block *block = NULL;
|
struct mem_block *block = NULL;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* vbuf to block */
|
/* vbuf to block */
|
||||||
|
@ -61,5 +65,5 @@ void video_buffer_release(struct video_buffer *vbuf)
|
||||||
}
|
}
|
||||||
|
|
||||||
vbuf->buffer = NULL;
|
vbuf->buffer = NULL;
|
||||||
k_mem_pool_free(block);
|
k_heap_free(&video_buffer_pool, block->data);
|
||||||
}
|
}
|
||||||
|
|
|
@ -979,7 +979,7 @@ extern const struct net_buf_data_cb net_buf_var_cb;
|
||||||
*/
|
*/
|
||||||
#define NET_BUF_POOL_VAR_DEFINE(_name, _count, _data_size, _destroy) \
|
#define NET_BUF_POOL_VAR_DEFINE(_name, _count, _data_size, _destroy) \
|
||||||
static struct net_buf _net_buf_##_name[_count] __noinit; \
|
static struct net_buf _net_buf_##_name[_count] __noinit; \
|
||||||
Z_MEM_POOL_DEFINE(net_buf_mem_pool_##_name, 16, _data_size, 1, 4); \
|
K_HEAP_DEFINE(net_buf_mem_pool_##_name, _data_size); \
|
||||||
static const struct net_buf_data_alloc net_buf_data_alloc_##_name = { \
|
static const struct net_buf_data_alloc net_buf_data_alloc_##_name = { \
|
||||||
.cb = &net_buf_var_cb, \
|
.cb = &net_buf_var_cb, \
|
||||||
.alloc_data = &net_buf_mem_pool_##_name, \
|
.alloc_data = &net_buf_mem_pool_##_name, \
|
||||||
|
|
|
@ -8,17 +8,15 @@
|
||||||
#include <zephyr.h>
|
#include <zephyr.h>
|
||||||
#include <init.h>
|
#include <init.h>
|
||||||
|
|
||||||
Z_MEM_POOL_DEFINE(lvgl_mem_pool,
|
K_HEAP_DEFINE(lvgl_mem_pool, CONFIG_LVGL_MEM_POOL_MAX_SIZE *
|
||||||
CONFIG_LVGL_MEM_POOL_MIN_SIZE,
|
CONFIG_LVGL_MEM_POOL_NUMBER_BLOCKS);
|
||||||
CONFIG_LVGL_MEM_POOL_MAX_SIZE,
|
|
||||||
CONFIG_LVGL_MEM_POOL_NUMBER_BLOCKS, 4);
|
|
||||||
|
|
||||||
void *lvgl_malloc(size_t size)
|
void *lvgl_malloc(size_t size)
|
||||||
{
|
{
|
||||||
return z_mem_pool_malloc(&lvgl_mem_pool, size);
|
return k_heap_alloc(&lvgl_mem_pool, size, K_NO_WAIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
void lvgl_free(void *ptr)
|
void lvgl_free(void *ptr)
|
||||||
{
|
{
|
||||||
k_free(ptr);
|
k_heap_free(&lvgl_mem_pool, ptr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -511,8 +511,7 @@ K_THREAD_DEFINE(app_thread, STACK_SIZE,
|
||||||
start_app, NULL, NULL, NULL,
|
start_app, NULL, NULL, NULL,
|
||||||
THREAD_PRIORITY, K_USER, -1);
|
THREAD_PRIORITY, K_USER, -1);
|
||||||
|
|
||||||
static Z_MEM_POOL_DEFINE(app_mem_pool, sizeof(uintptr_t), 1024,
|
static K_HEAP_DEFINE(app_mem_pool, 1024 * 2);
|
||||||
2, sizeof(uintptr_t));
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void main(void)
|
void main(void)
|
||||||
|
@ -534,7 +533,7 @@ void main(void)
|
||||||
|
|
||||||
k_mem_domain_init(&app_domain, ARRAY_SIZE(parts), parts);
|
k_mem_domain_init(&app_domain, ARRAY_SIZE(parts), parts);
|
||||||
k_mem_domain_add_thread(&app_domain, app_thread);
|
k_mem_domain_add_thread(&app_domain, app_thread);
|
||||||
z_thread_resource_pool_assign(app_thread, &app_mem_pool);
|
k_thread_heap_assign(app_thread, &app_mem_pool);
|
||||||
|
|
||||||
k_thread_start(app_thread);
|
k_thread_start(app_thread);
|
||||||
k_thread_join(app_thread, K_FOREVER);
|
k_thread_join(app_thread, K_FOREVER);
|
||||||
|
|
|
@ -76,7 +76,6 @@ enum evt_t {
|
||||||
|
|
||||||
struct app_evt_t {
|
struct app_evt_t {
|
||||||
sys_snode_t node;
|
sys_snode_t node;
|
||||||
struct k_mem_block block;
|
|
||||||
enum evt_t event_type;
|
enum evt_t event_type;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -85,12 +84,11 @@ struct app_evt_t {
|
||||||
#define FIFO_ELEM_COUNT 255
|
#define FIFO_ELEM_COUNT 255
|
||||||
#define FIFO_ELEM_ALIGN sizeof(unsigned int)
|
#define FIFO_ELEM_ALIGN sizeof(unsigned int)
|
||||||
|
|
||||||
Z_MEM_POOL_DEFINE(event_elem_pool, FIFO_ELEM_MIN_SZ, FIFO_ELEM_MAX_SZ,
|
K_HEAP_DEFINE(event_elem_pool, FIFO_ELEM_MAX_SZ * FIFO_ELEM_COUNT + 256);
|
||||||
FIFO_ELEM_COUNT, FIFO_ELEM_ALIGN);
|
|
||||||
|
|
||||||
static inline void app_evt_free(struct app_evt_t *ev)
|
static inline void app_evt_free(struct app_evt_t *ev)
|
||||||
{
|
{
|
||||||
z_mem_pool_free(&ev->block);
|
k_heap_free(&event_elem_pool, ev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void app_evt_put(struct app_evt_t *ev)
|
static inline void app_evt_put(struct app_evt_t *ev)
|
||||||
|
@ -117,21 +115,19 @@ static inline void app_evt_flush(void)
|
||||||
|
|
||||||
static inline struct app_evt_t *app_evt_alloc(void)
|
static inline struct app_evt_t *app_evt_alloc(void)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
struct app_evt_t *ev;
|
struct app_evt_t *ev;
|
||||||
struct k_mem_block block;
|
|
||||||
|
|
||||||
ret = z_mem_pool_alloc(&event_elem_pool, &block,
|
ev = k_heap_alloc(&event_elem_pool,
|
||||||
sizeof(struct app_evt_t),
|
sizeof(struct app_evt_t),
|
||||||
K_NO_WAIT);
|
K_NO_WAIT);
|
||||||
if (ret < 0) {
|
if (ev == NULL) {
|
||||||
LOG_ERR("APP event allocation failed!");
|
LOG_ERR("APP event allocation failed!");
|
||||||
app_evt_flush();
|
app_evt_flush();
|
||||||
|
|
||||||
ret = z_mem_pool_alloc(&event_elem_pool, &block,
|
ev = k_heap_alloc(&event_elem_pool,
|
||||||
sizeof(struct app_evt_t),
|
sizeof(struct app_evt_t),
|
||||||
K_NO_WAIT);
|
K_NO_WAIT);
|
||||||
if (ret < 0) {
|
if (ev == NULL) {
|
||||||
LOG_ERR("APP event memory corrupted.");
|
LOG_ERR("APP event memory corrupted.");
|
||||||
__ASSERT_NO_MSG(0);
|
__ASSERT_NO_MSG(0);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -139,9 +135,6 @@ static inline struct app_evt_t *app_evt_alloc(void)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ev = (struct app_evt_t *)block.data;
|
|
||||||
ev->block = block;
|
|
||||||
|
|
||||||
return ev;
|
return ev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ LOG_MODULE_REGISTER(app_a);
|
||||||
/* Resource pool for allocations made by the kernel on behalf of system
|
/* Resource pool for allocations made by the kernel on behalf of system
|
||||||
* calls. Needed for k_queue_alloc_append()
|
* calls. Needed for k_queue_alloc_append()
|
||||||
*/
|
*/
|
||||||
Z_MEM_POOL_DEFINE(app_a_resource_pool, 32, 256, 5, 4);
|
K_HEAP_DEFINE(app_a_resource_pool, 256 * 5 + 128);
|
||||||
|
|
||||||
/* Define app_a_partition, where all globals for this app will be routed.
|
/* Define app_a_partition, where all globals for this app will be routed.
|
||||||
* The partition starting address and size are populated by build system
|
* The partition starting address and size are populated by build system
|
||||||
|
@ -213,7 +213,7 @@ void app_a_entry(void *p1, void *p2, void *p3)
|
||||||
/* Assign a resource pool to serve for kernel-side allocations on
|
/* Assign a resource pool to serve for kernel-side allocations on
|
||||||
* behalf of application A. Needed for k_queue_alloc_append().
|
* behalf of application A. Needed for k_queue_alloc_append().
|
||||||
*/
|
*/
|
||||||
z_thread_resource_pool_assign(k_current_get(), &app_a_resource_pool);
|
k_thread_heap_assign(k_current_get(), &app_a_resource_pool);
|
||||||
|
|
||||||
/* Set the callback function for the sample driver. This has to be
|
/* Set the callback function for the sample driver. This has to be
|
||||||
* done from supervisor mode, as this code will run in supervisor
|
* done from supervisor mode, as this code will run in supervisor
|
||||||
|
|
|
@ -16,7 +16,7 @@ LOG_MODULE_REGISTER(app_b);
|
||||||
/* Resource pool for allocations made by the kernel on behalf of system
|
/* Resource pool for allocations made by the kernel on behalf of system
|
||||||
* calls. Needed for k_queue_alloc_append()
|
* calls. Needed for k_queue_alloc_append()
|
||||||
*/
|
*/
|
||||||
Z_MEM_POOL_DEFINE(app_b_resource_pool, 32, 256, 4, 4);
|
K_HEAP_DEFINE(app_b_resource_pool, 256 * 4 + 128);
|
||||||
|
|
||||||
/* Define app_b_partition, where all globals for this app will be routed.
|
/* Define app_b_partition, where all globals for this app will be routed.
|
||||||
* The partition starting address and size are populated by build system
|
* The partition starting address and size are populated by build system
|
||||||
|
@ -86,7 +86,7 @@ void app_b_entry(void *p1, void *p2, void *p3)
|
||||||
/* Assign a resource pool to serve for kernel-side allocations on
|
/* Assign a resource pool to serve for kernel-side allocations on
|
||||||
* behalf of application A. Needed for k_queue_alloc_append().
|
* behalf of application A. Needed for k_queue_alloc_append().
|
||||||
*/
|
*/
|
||||||
z_thread_resource_pool_assign(k_current_get(), &app_b_resource_pool);
|
k_thread_heap_assign(k_current_get(), &app_b_resource_pool);
|
||||||
|
|
||||||
/* We are about to drop to user mode and become the monitor thread.
|
/* We are about to drop to user mode and become the monitor thread.
|
||||||
* Grant ourselves access to the kernel objects we need for
|
* Grant ourselves access to the kernel objects we need for
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
struct lfs_file_data {
|
struct lfs_file_data {
|
||||||
struct lfs_file file;
|
struct lfs_file file;
|
||||||
struct lfs_file_config config;
|
struct lfs_file_config config;
|
||||||
struct k_mem_block cache_block;
|
void *cache_block;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define LFS_FILEP(fp) (&((struct lfs_file_data *)(fp->filep))->file)
|
#define LFS_FILEP(fp) (&((struct lfs_file_data *)(fp->filep))->file)
|
||||||
|
@ -47,10 +47,9 @@ BUILD_ASSERT(CONFIG_FS_LITTLEFS_CACHE_SIZE >= 4);
|
||||||
#define CONFIG_FS_LITTLEFS_FC_MEM_POOL_NUM_BLOCKS CONFIG_FS_LITTLEFS_NUM_FILES
|
#define CONFIG_FS_LITTLEFS_FC_MEM_POOL_NUM_BLOCKS CONFIG_FS_LITTLEFS_NUM_FILES
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Z_MEM_POOL_DEFINE(file_cache_pool,
|
K_HEAP_DEFINE(file_cache_pool,
|
||||||
CONFIG_FS_LITTLEFS_FC_MEM_POOL_MIN_SIZE,
|
CONFIG_FS_LITTLEFS_FC_MEM_POOL_MAX_SIZE *
|
||||||
CONFIG_FS_LITTLEFS_FC_MEM_POOL_MAX_SIZE,
|
CONFIG_FS_LITTLEFS_FC_MEM_POOL_NUM_BLOCKS);
|
||||||
CONFIG_FS_LITTLEFS_FC_MEM_POOL_NUM_BLOCKS, 4);
|
|
||||||
|
|
||||||
static inline void fs_lock(struct fs_littlefs *fs)
|
static inline void fs_lock(struct fs_littlefs *fs)
|
||||||
{
|
{
|
||||||
|
@ -175,7 +174,7 @@ static void release_file_data(struct fs_file_t *fp)
|
||||||
struct lfs_file_data *fdp = fp->filep;
|
struct lfs_file_data *fdp = fp->filep;
|
||||||
|
|
||||||
if (fdp->config.buffer) {
|
if (fdp->config.buffer) {
|
||||||
z_mem_pool_free(&fdp->cache_block);
|
k_heap_free(&file_cache_pool, fdp->cache_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
k_mem_slab_free(&file_data_pool, &fp->filep);
|
k_mem_slab_free(&file_data_pool, &fp->filep);
|
||||||
|
@ -213,14 +212,14 @@ static int littlefs_open(struct fs_file_t *fp, const char *path,
|
||||||
|
|
||||||
memset(fdp, 0, sizeof(*fdp));
|
memset(fdp, 0, sizeof(*fdp));
|
||||||
|
|
||||||
ret = z_mem_pool_alloc(&file_cache_pool, &fdp->cache_block,
|
fdp->cache_block = k_heap_alloc(&file_cache_pool,
|
||||||
lfs->cfg->cache_size, K_NO_WAIT);
|
lfs->cfg->cache_size, K_NO_WAIT);
|
||||||
LOG_DBG("alloc %u file cache: %d", lfs->cfg->cache_size, ret);
|
if (fdp->cache_block == NULL) {
|
||||||
if (ret != 0) {
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
fdp->config.buffer = fdp->cache_block.data;
|
fdp->config.buffer = fdp->cache_block;
|
||||||
path = fs_impl_strip_prefix(path, fp->mp);
|
path = fs_impl_strip_prefix(path, fp->mp);
|
||||||
|
|
||||||
fs_lock(fs);
|
fs_lock(fs);
|
||||||
|
|
|
@ -96,21 +96,17 @@ static uint8_t *mem_pool_data_alloc(struct net_buf *buf, size_t *size,
|
||||||
k_timeout_t timeout)
|
k_timeout_t timeout)
|
||||||
{
|
{
|
||||||
struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
|
struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
|
||||||
struct k_mem_pool *pool = buf_pool->alloc->alloc_data;
|
struct k_heap *pool = buf_pool->alloc->alloc_data;
|
||||||
struct k_mem_block block;
|
|
||||||
uint8_t *ref_count;
|
uint8_t *ref_count;
|
||||||
|
|
||||||
/* Reserve extra space for k_mem_block_id and ref-count (uint8_t) */
|
/* Reserve extra space for a ref-count (uint8_t) */
|
||||||
if (z_mem_pool_alloc(pool, &block,
|
void *b = k_heap_alloc(pool, 1 + *size, timeout);
|
||||||
sizeof(struct k_mem_block_id) + 1 + *size,
|
|
||||||
timeout)) {
|
if (b == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* save the block descriptor info at the start of the actual block */
|
ref_count = (uint8_t *)b;
|
||||||
memcpy(block.data, &block.id, sizeof(block.id));
|
|
||||||
|
|
||||||
ref_count = (uint8_t *)block.data + sizeof(block.id);
|
|
||||||
*ref_count = 1U;
|
*ref_count = 1U;
|
||||||
|
|
||||||
/* Return pointer to the byte following the ref count */
|
/* Return pointer to the byte following the ref count */
|
||||||
|
@ -119,7 +115,8 @@ static uint8_t *mem_pool_data_alloc(struct net_buf *buf, size_t *size,
|
||||||
|
|
||||||
static void mem_pool_data_unref(struct net_buf *buf, uint8_t *data)
|
static void mem_pool_data_unref(struct net_buf *buf, uint8_t *data)
|
||||||
{
|
{
|
||||||
struct k_mem_block_id id;
|
struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
|
||||||
|
struct k_heap *pool = buf_pool->alloc->alloc_data;
|
||||||
uint8_t *ref_count;
|
uint8_t *ref_count;
|
||||||
|
|
||||||
ref_count = data - 1;
|
ref_count = data - 1;
|
||||||
|
@ -128,8 +125,7 @@ static void mem_pool_data_unref(struct net_buf *buf, uint8_t *data)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Need to copy to local variable due to alignment */
|
/* Need to copy to local variable due to alignment */
|
||||||
memcpy(&id, ref_count - sizeof(id), sizeof(id));
|
k_heap_free(pool, ref_count);
|
||||||
z_mem_pool_free_id(&id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct net_buf_data_cb net_buf_var_cb = {
|
const struct net_buf_data_cb net_buf_var_cb = {
|
||||||
|
|
|
@ -20,9 +20,7 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
Z_MEM_POOL_DEFINE(gcov_heap_mem_pool,
|
K_HEAP_DEFINE(gcov_heap, MALLOC_MAX_HEAP_SIZE);
|
||||||
MALLOC_MIN_BLOCK_SIZE,
|
|
||||||
MALLOC_MAX_HEAP_SIZE, 1, 4);
|
|
||||||
|
|
||||||
|
|
||||||
static struct gcov_info *gcov_info_head;
|
static struct gcov_info *gcov_info_head;
|
||||||
|
@ -233,7 +231,7 @@ void gcov_coverage_dump(void)
|
||||||
|
|
||||||
size = calculate_buff_size(gcov_list);
|
size = calculate_buff_size(gcov_list);
|
||||||
|
|
||||||
buffer = (uint8_t *) z_mem_pool_malloc(&gcov_heap_mem_pool, size);
|
buffer = k_heap_alloc(&gcov_heap, size, K_NO_WAIT);
|
||||||
if (!buffer) {
|
if (!buffer) {
|
||||||
printk("No Mem available to continue dump\n");
|
printk("No Mem available to continue dump\n");
|
||||||
goto coverage_dump_end;
|
goto coverage_dump_end;
|
||||||
|
@ -247,7 +245,7 @@ void gcov_coverage_dump(void)
|
||||||
|
|
||||||
dump_on_console(gcov_list->filename, buffer, size);
|
dump_on_console(gcov_list->filename, buffer, size);
|
||||||
|
|
||||||
k_free(buffer);
|
k_heap_free(&gcov_heap, buffer);
|
||||||
gcov_list = gcov_list->next;
|
gcov_list = gcov_list->next;
|
||||||
}
|
}
|
||||||
coverage_dump_end:
|
coverage_dump_end:
|
||||||
|
|
|
@ -10,9 +10,6 @@
|
||||||
#define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
|
#define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
|
||||||
#define MAIL_LEN 64
|
#define MAIL_LEN 64
|
||||||
|
|
||||||
Z_MEM_POOL_DEFINE(mpooltx, 8, MAIL_LEN, 1, 4);
|
|
||||||
Z_MEM_POOL_DEFINE(mpoolrx, 8, MAIL_LEN, 1, 4);
|
|
||||||
|
|
||||||
static K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
|
static K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
|
||||||
|
|
||||||
static struct k_thread tdata;
|
static struct k_thread tdata;
|
||||||
|
|
|
@ -414,13 +414,13 @@ void test_syscall_context(void)
|
||||||
k_thread_user_mode_enter(test_syscall_context_user, NULL, NULL, NULL);
|
k_thread_user_mode_enter(test_syscall_context_user, NULL, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
Z_MEM_POOL_DEFINE(test_pool, BUF_SIZE, BUF_SIZE, 4 * NR_THREADS, 4);
|
K_HEAP_DEFINE(test_heap, BUF_SIZE * (4 * NR_THREADS));
|
||||||
|
|
||||||
void test_main(void)
|
void test_main(void)
|
||||||
{
|
{
|
||||||
sprintf(kernel_string, "this is a kernel string");
|
sprintf(kernel_string, "this is a kernel string");
|
||||||
sprintf(user_string, "this is a user string");
|
sprintf(user_string, "this is a user string");
|
||||||
z_thread_resource_pool_assign(k_current_get(), &test_pool);
|
k_thread_heap_assign(k_current_get(), &test_heap);
|
||||||
|
|
||||||
ztest_test_suite(syscalls,
|
ztest_test_suite(syscalls,
|
||||||
ztest_unit_test(test_string_nlen),
|
ztest_unit_test(test_string_nlen),
|
||||||
|
|
|
@ -50,7 +50,8 @@ dummy_test(test_msgq_user_purge_when_put);
|
||||||
#else
|
#else
|
||||||
#define MAX_SZ 128
|
#define MAX_SZ 128
|
||||||
#endif
|
#endif
|
||||||
Z_MEM_POOL_DEFINE(test_pool, 128, MAX_SZ, 2, 4);
|
|
||||||
|
K_HEAP_DEFINE(test_pool, MAX_SZ * 2);
|
||||||
|
|
||||||
extern struct k_msgq kmsgq;
|
extern struct k_msgq kmsgq;
|
||||||
extern struct k_msgq msgq;
|
extern struct k_msgq msgq;
|
||||||
|
@ -64,7 +65,7 @@ void test_main(void)
|
||||||
k_thread_access_grant(k_current_get(), &kmsgq, &msgq, &end_sema,
|
k_thread_access_grant(k_current_get(), &kmsgq, &msgq, &end_sema,
|
||||||
&tdata, &tstack);
|
&tdata, &tstack);
|
||||||
|
|
||||||
z_thread_resource_pool_assign(k_current_get(), &test_pool);
|
k_thread_heap_assign(k_current_get(), &test_pool);
|
||||||
|
|
||||||
ztest_test_suite(msgq_api,
|
ztest_test_suite(msgq_api,
|
||||||
ztest_1cpu_unit_test(test_msgq_thread),
|
ztest_1cpu_unit_test(test_msgq_thread),
|
||||||
|
|
|
@ -38,18 +38,6 @@ extern struct k_sem end_sema;
|
||||||
extern struct k_stack tstack;
|
extern struct k_stack tstack;
|
||||||
extern struct k_thread tdata;
|
extern struct k_thread tdata;
|
||||||
|
|
||||||
#ifndef CONFIG_USERSPACE
|
|
||||||
#define dummy_test(_name) \
|
|
||||||
static void _name(void) \
|
|
||||||
{ \
|
|
||||||
ztest_test_skip(); \
|
|
||||||
}
|
|
||||||
|
|
||||||
dummy_test(test_pipe_user_thread2thread);
|
|
||||||
dummy_test(test_pipe_user_put_fail);
|
|
||||||
dummy_test(test_pipe_user_get_fail);
|
|
||||||
#endif /* !CONFIG_USERSPACE */
|
|
||||||
|
|
||||||
/*test case main entry*/
|
/*test case main entry*/
|
||||||
void test_main(void)
|
void test_main(void)
|
||||||
{
|
{
|
||||||
|
@ -59,13 +47,9 @@ void test_main(void)
|
||||||
|
|
||||||
ztest_test_suite(pipe_api,
|
ztest_test_suite(pipe_api,
|
||||||
ztest_1cpu_unit_test(test_pipe_thread2thread),
|
ztest_1cpu_unit_test(test_pipe_thread2thread),
|
||||||
ztest_1cpu_user_unit_test(test_pipe_user_thread2thread),
|
|
||||||
ztest_1cpu_user_unit_test(test_pipe_user_put_fail),
|
|
||||||
ztest_user_unit_test(test_pipe_user_get_fail),
|
|
||||||
ztest_1cpu_unit_test(test_pipe_put_fail),
|
ztest_1cpu_unit_test(test_pipe_put_fail),
|
||||||
ztest_unit_test(test_pipe_get_fail),
|
ztest_unit_test(test_pipe_get_fail),
|
||||||
ztest_unit_test(test_half_pipe_get_put),
|
ztest_unit_test(test_half_pipe_get_put),
|
||||||
ztest_1cpu_unit_test(test_pipe_alloc),
|
|
||||||
ztest_unit_test(test_pipe_reader_wait),
|
ztest_unit_test(test_pipe_reader_wait),
|
||||||
ztest_unit_test(test_pipe_avail_r_lt_w),
|
ztest_unit_test(test_pipe_avail_r_lt_w),
|
||||||
ztest_unit_test(test_pipe_avail_w_lt_r),
|
ztest_unit_test(test_pipe_avail_w_lt_r),
|
||||||
|
|
|
@ -90,23 +90,6 @@ static void tpipe_thread_thread(struct k_pipe *ppipe)
|
||||||
k_thread_abort(tid);
|
k_thread_abort(tid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tpipe_kthread_to_kthread(struct k_pipe *ppipe)
|
|
||||||
{
|
|
||||||
/**TESTPOINT: thread-thread data passing via pipe*/
|
|
||||||
k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
|
|
||||||
tThread_entry, ppipe, NULL, NULL,
|
|
||||||
K_PRIO_PREEMPT(0), 0, K_NO_WAIT);
|
|
||||||
|
|
||||||
tpipe_put(ppipe, K_NO_WAIT);
|
|
||||||
k_sem_take(&end_sema, K_FOREVER);
|
|
||||||
|
|
||||||
k_sem_take(&end_sema, K_FOREVER);
|
|
||||||
tpipe_get(ppipe, K_FOREVER);
|
|
||||||
|
|
||||||
/* clear the spawned thread avoid side effect */
|
|
||||||
k_thread_abort(tid);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tpipe_put_no_wait(struct k_pipe *ppipe)
|
static void tpipe_put_no_wait(struct k_pipe *ppipe)
|
||||||
{
|
{
|
||||||
size_t to_wt, wt_byte = 0;
|
size_t to_wt, wt_byte = 0;
|
||||||
|
@ -229,28 +212,6 @@ void test_half_pipe_get_put(void)
|
||||||
k_thread_abort(tid);
|
k_thread_abort(tid);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Test Initialization and buffer allocation of pipe,
|
|
||||||
* with various parameters
|
|
||||||
* @see k_pipe_alloc_init(), k_pipe_cleanup()
|
|
||||||
*/
|
|
||||||
void test_pipe_alloc(void)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
zassert_false(k_pipe_alloc_init(&pipe_test_alloc, PIPE_LEN), NULL);
|
|
||||||
|
|
||||||
tpipe_kthread_to_kthread(&pipe_test_alloc);
|
|
||||||
k_pipe_cleanup(&pipe_test_alloc);
|
|
||||||
|
|
||||||
zassert_false(k_pipe_alloc_init(&pipe_test_alloc, 0), NULL);
|
|
||||||
k_pipe_cleanup(&pipe_test_alloc);
|
|
||||||
|
|
||||||
ret = k_pipe_alloc_init(&pipe_test_alloc, 2048);
|
|
||||||
zassert_true(ret == -ENOMEM,
|
|
||||||
"resource pool max block size is not smaller then requested buffer");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Test pending reader in pipe
|
* @brief Test pending reader in pipe
|
||||||
* @see k_pipe_put(), k_pipe_get()
|
* @see k_pipe_put(), k_pipe_get()
|
||||||
|
|
|
@ -20,14 +20,14 @@ extern void test_poll_grant_access(void);
|
||||||
#define MAX_SZ 128
|
#define MAX_SZ 128
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Z_MEM_POOL_DEFINE(test_pool, 128, MAX_SZ, 4, 4);
|
K_HEAP_DEFINE(test_heap, MAX_SZ * 4);
|
||||||
|
|
||||||
/*test case main entry*/
|
/*test case main entry*/
|
||||||
void test_main(void)
|
void test_main(void)
|
||||||
{
|
{
|
||||||
test_poll_grant_access();
|
test_poll_grant_access();
|
||||||
|
|
||||||
z_thread_resource_pool_assign(k_current_get(), &test_pool);
|
k_thread_heap_assign(k_current_get(), &test_heap);
|
||||||
|
|
||||||
ztest_test_suite(poll_api,
|
ztest_test_suite(poll_api,
|
||||||
ztest_1cpu_user_unit_test(test_poll_no_wait),
|
ztest_1cpu_user_unit_test(test_poll_no_wait),
|
||||||
|
|
|
@ -25,12 +25,12 @@ dummy_test(test_auto_free);
|
||||||
#else
|
#else
|
||||||
#define MAX_SZ 96
|
#define MAX_SZ 96
|
||||||
#endif
|
#endif
|
||||||
Z_MEM_POOL_DEFINE(test_pool, 16, MAX_SZ, 4, 4);
|
K_HEAP_DEFINE(test_pool, MAX_SZ * 4 + 128);
|
||||||
|
|
||||||
/*test case main entry*/
|
/*test case main entry*/
|
||||||
void test_main(void)
|
void test_main(void)
|
||||||
{
|
{
|
||||||
z_thread_resource_pool_assign(k_current_get(), &test_pool);
|
k_thread_heap_assign(k_current_get(), &test_pool);
|
||||||
|
|
||||||
ztest_test_suite(queue_api,
|
ztest_test_suite(queue_api,
|
||||||
ztest_1cpu_unit_test(test_queue_supv_to_user),
|
ztest_1cpu_unit_test(test_queue_supv_to_user),
|
||||||
|
|
|
@ -27,7 +27,7 @@ extern void test_queue_poll_race(void);
|
||||||
extern void test_multiple_queues(void);
|
extern void test_multiple_queues(void);
|
||||||
extern void test_access_kernel_obj_with_priv_data(void);
|
extern void test_access_kernel_obj_with_priv_data(void);
|
||||||
|
|
||||||
extern struct k_mem_pool test_pool;
|
extern struct k_heap test_pool;
|
||||||
|
|
||||||
typedef struct qdata {
|
typedef struct qdata {
|
||||||
sys_snode_t snode;
|
sys_snode_t snode;
|
||||||
|
|
|
@ -11,8 +11,8 @@
|
||||||
/**TESTPOINT: init via K_QUEUE_DEFINE*/
|
/**TESTPOINT: init via K_QUEUE_DEFINE*/
|
||||||
K_QUEUE_DEFINE(kqueue);
|
K_QUEUE_DEFINE(kqueue);
|
||||||
|
|
||||||
Z_MEM_POOL_DEFINE(mem_pool_fail, 4, 8, 1, 4);
|
K_HEAP_DEFINE(mem_pool_fail, 8 + 128);
|
||||||
Z_MEM_POOL_DEFINE(mem_pool_pass, 4, 64, 4, 4);
|
K_HEAP_DEFINE(mem_pool_pass, 64 * 4 + 128);
|
||||||
|
|
||||||
struct k_queue queue;
|
struct k_queue queue;
|
||||||
static qdata_t data[LIST_LEN];
|
static qdata_t data[LIST_LEN];
|
||||||
|
@ -260,7 +260,7 @@ void test_queue_get_2threads(void)
|
||||||
|
|
||||||
static void tqueue_alloc(struct k_queue *pqueue)
|
static void tqueue_alloc(struct k_queue *pqueue)
|
||||||
{
|
{
|
||||||
z_thread_resource_pool_assign(k_current_get(), NULL);
|
k_thread_heap_assign(k_current_get(), NULL);
|
||||||
|
|
||||||
/* Alloc append without resource pool */
|
/* Alloc append without resource pool */
|
||||||
k_queue_alloc_append(pqueue, (void *)&data_append);
|
k_queue_alloc_append(pqueue, (void *)&data_append);
|
||||||
|
@ -269,7 +269,7 @@ static void tqueue_alloc(struct k_queue *pqueue)
|
||||||
zassert_false(k_queue_remove(pqueue, &data_append), NULL);
|
zassert_false(k_queue_remove(pqueue, &data_append), NULL);
|
||||||
|
|
||||||
/* Assign resource pool of lower size */
|
/* Assign resource pool of lower size */
|
||||||
z_thread_resource_pool_assign(k_current_get(), &mem_pool_fail);
|
k_thread_heap_assign(k_current_get(), &mem_pool_fail);
|
||||||
|
|
||||||
/* Prepend to the queue, but fails because of
|
/* Prepend to the queue, but fails because of
|
||||||
* insufficient memory
|
* insufficient memory
|
||||||
|
@ -284,8 +284,7 @@ static void tqueue_alloc(struct k_queue *pqueue)
|
||||||
zassert_true(k_queue_is_empty(pqueue), NULL);
|
zassert_true(k_queue_is_empty(pqueue), NULL);
|
||||||
|
|
||||||
/* Assign resource pool of sufficient size */
|
/* Assign resource pool of sufficient size */
|
||||||
z_thread_resource_pool_assign(k_current_get(),
|
k_thread_heap_assign(k_current_get(), &mem_pool_pass);
|
||||||
&mem_pool_pass);
|
|
||||||
|
|
||||||
zassert_false(k_queue_alloc_prepend(pqueue, (void *)&data_prepend),
|
zassert_false(k_queue_alloc_prepend(pqueue, (void *)&data_prepend),
|
||||||
NULL);
|
NULL);
|
||||||
|
@ -306,14 +305,12 @@ static void tqueue_alloc(struct k_queue *pqueue)
|
||||||
*/
|
*/
|
||||||
void test_queue_alloc(void)
|
void test_queue_alloc(void)
|
||||||
{
|
{
|
||||||
struct k_mem_block block;
|
|
||||||
|
|
||||||
/* The mem_pool_fail pool is supposed to be too small to
|
/* The mem_pool_fail pool is supposed to be too small to
|
||||||
* succeed any allocations, but in fact with the heap backend
|
* succeed any allocations, but in fact with the heap backend
|
||||||
* there's some base minimal memory in there that can be used.
|
* there's some base minimal memory in there that can be used.
|
||||||
* Make sure it's really truly full.
|
* Make sure it's really truly full.
|
||||||
*/
|
*/
|
||||||
while (z_mem_pool_alloc(&mem_pool_fail, &block, 1, K_NO_WAIT) == 0) {
|
while (k_heap_alloc(&mem_pool_fail, 1, K_NO_WAIT) != NULL) {
|
||||||
}
|
}
|
||||||
|
|
||||||
k_queue_init(&queue);
|
k_queue_init(&queue);
|
||||||
|
|
|
@ -185,7 +185,6 @@ void test_queue_alloc_append_user(void)
|
||||||
/**
|
/**
|
||||||
* @brief Test to verify free of allocated elements of queue
|
* @brief Test to verify free of allocated elements of queue
|
||||||
* @ingroup kernel_queue_tests
|
* @ingroup kernel_queue_tests
|
||||||
* @see z_mem_pool_alloc(), z_mem_pool_free()
|
|
||||||
*/
|
*/
|
||||||
void test_auto_free(void)
|
void test_auto_free(void)
|
||||||
{
|
{
|
||||||
|
@ -196,20 +195,12 @@ void test_auto_free(void)
|
||||||
* threads with permissions exit.
|
* threads with permissions exit.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct k_mem_block b[4];
|
void *b[4];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
zassert_false(z_mem_pool_alloc(&test_pool, &b[i], 64,
|
b[i] = k_heap_alloc(&test_pool, 64, K_FOREVER);
|
||||||
K_FOREVER),
|
zassert_true(b[i] != NULL, "memory not auto released!");
|
||||||
"memory not auto released!");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Free everything so that the pool is back to a pristine state in
|
|
||||||
* case we want to use it again.
|
|
||||||
*/
|
|
||||||
for (i = 0; i < 4; i++) {
|
|
||||||
z_mem_pool_free(&b[i]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -66,7 +66,7 @@ static struct k_sem end_sema;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Z_MEM_POOL_DEFINE(test_pool, 128, 128, 2, 4);
|
K_HEAP_DEFINE(test_pool, 128 * 3);
|
||||||
|
|
||||||
extern struct k_stack kstack;
|
extern struct k_stack kstack;
|
||||||
extern struct k_stack stack;
|
extern struct k_stack stack;
|
||||||
|
@ -335,7 +335,7 @@ void test_main(void)
|
||||||
&end_sema, &threadstack, &kstack, &stack, &thread_data1,
|
&end_sema, &threadstack, &kstack, &stack, &thread_data1,
|
||||||
&end_sema1, &threadstack1);
|
&end_sema1, &threadstack1);
|
||||||
|
|
||||||
z_thread_resource_pool_assign(k_current_get(), &test_pool);
|
k_thread_heap_assign(k_current_get(), &test_pool);
|
||||||
|
|
||||||
ztest_test_suite(test_stack_usage,
|
ztest_test_suite(test_stack_usage,
|
||||||
ztest_unit_test(test_stack_thread2thread),
|
ztest_unit_test(test_stack_thread2thread),
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue