cache: Fix libraries and drivers
Fix the usage to be compliant to the new cache API. Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
parent
74e1b1781a
commit
cc427b4bb0
6 changed files with 27 additions and 37 deletions
|
@ -517,7 +517,7 @@ int can_mcan_init(const struct device *dev)
|
||||||
can->txbtie = CAN_MCAN_TXBTIE_TIE;
|
can->txbtie = CAN_MCAN_TXBTIE_TIE;
|
||||||
|
|
||||||
memset32_volatile(msg_ram, 0, sizeof(struct can_mcan_msg_sram));
|
memset32_volatile(msg_ram, 0, sizeof(struct can_mcan_msg_sram));
|
||||||
sys_cache_data_range(msg_ram, sizeof(struct can_mcan_msg_sram), K_CACHE_WB);
|
sys_cache_data_flush_range(msg_ram, sizeof(struct can_mcan_msg_sram));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -550,9 +550,8 @@ static void can_mcan_tc_event_handler(const struct device *dev)
|
||||||
while (can->txefs & CAN_MCAN_TXEFS_EFFL) {
|
while (can->txefs & CAN_MCAN_TXEFS_EFFL) {
|
||||||
event_idx = (can->txefs & CAN_MCAN_TXEFS_EFGI) >>
|
event_idx = (can->txefs & CAN_MCAN_TXEFS_EFGI) >>
|
||||||
CAN_MCAN_TXEFS_EFGI_POS;
|
CAN_MCAN_TXEFS_EFGI_POS;
|
||||||
sys_cache_data_range((void *)&msg_ram->tx_event_fifo[event_idx],
|
sys_cache_data_invd_range((void *)&msg_ram->tx_event_fifo[event_idx],
|
||||||
sizeof(struct can_mcan_tx_event_fifo),
|
sizeof(struct can_mcan_tx_event_fifo));
|
||||||
K_CACHE_INVD);
|
|
||||||
tx_event = &msg_ram->tx_event_fifo[event_idx];
|
tx_event = &msg_ram->tx_event_fifo[event_idx];
|
||||||
tx_idx = tx_event->mm.idx;
|
tx_idx = tx_event->mm.idx;
|
||||||
/* Acknowledge TX event */
|
/* Acknowledge TX event */
|
||||||
|
@ -624,9 +623,8 @@ static void can_mcan_get_message(const struct device *dev,
|
||||||
get_idx = (*fifo_status_reg & CAN_MCAN_RXF0S_F0GI) >>
|
get_idx = (*fifo_status_reg & CAN_MCAN_RXF0S_F0GI) >>
|
||||||
CAN_MCAN_RXF0S_F0GI_POS;
|
CAN_MCAN_RXF0S_F0GI_POS;
|
||||||
|
|
||||||
sys_cache_data_range((void *)&fifo[get_idx].hdr,
|
sys_cache_data_invd_range((void *)&fifo[get_idx].hdr,
|
||||||
sizeof(struct can_mcan_rx_fifo_hdr),
|
sizeof(struct can_mcan_rx_fifo_hdr));
|
||||||
K_CACHE_INVD);
|
|
||||||
memcpy32_volatile(&hdr, &fifo[get_idx].hdr,
|
memcpy32_volatile(&hdr, &fifo[get_idx].hdr,
|
||||||
sizeof(struct can_mcan_rx_fifo_hdr));
|
sizeof(struct can_mcan_rx_fifo_hdr));
|
||||||
|
|
||||||
|
@ -670,9 +668,8 @@ static void can_mcan_get_message(const struct device *dev,
|
||||||
data_length = can_dlc_to_bytes(frame.dlc);
|
data_length = can_dlc_to_bytes(frame.dlc);
|
||||||
if (data_length <= sizeof(frame.data)) {
|
if (data_length <= sizeof(frame.data)) {
|
||||||
/* Data needs to be written in 32 bit blocks! */
|
/* Data needs to be written in 32 bit blocks! */
|
||||||
sys_cache_data_range((void *)fifo[get_idx].data_32,
|
sys_cache_data_invd_range((void *)fifo[get_idx].data_32,
|
||||||
ROUND_UP(data_length, sizeof(uint32_t)),
|
ROUND_UP(data_length, sizeof(uint32_t)));
|
||||||
K_CACHE_INVD);
|
|
||||||
memcpy32_volatile(frame.data_32, fifo[get_idx].data_32,
|
memcpy32_volatile(frame.data_32, fifo[get_idx].data_32,
|
||||||
ROUND_UP(data_length, sizeof(uint32_t)));
|
ROUND_UP(data_length, sizeof(uint32_t)));
|
||||||
|
|
||||||
|
@ -898,9 +895,9 @@ int can_mcan_send(const struct device *dev,
|
||||||
memcpy32_volatile(&msg_ram->tx_buffer[put_idx].hdr, &tx_hdr, sizeof(tx_hdr));
|
memcpy32_volatile(&msg_ram->tx_buffer[put_idx].hdr, &tx_hdr, sizeof(tx_hdr));
|
||||||
memcpy32_volatile(msg_ram->tx_buffer[put_idx].data_32, frame->data_32,
|
memcpy32_volatile(msg_ram->tx_buffer[put_idx].data_32, frame->data_32,
|
||||||
ROUND_UP(data_length, 4));
|
ROUND_UP(data_length, 4));
|
||||||
sys_cache_data_range((void *)&msg_ram->tx_buffer[put_idx].hdr, sizeof(tx_hdr), K_CACHE_WB);
|
sys_cache_data_flush_range((void *)&msg_ram->tx_buffer[put_idx].hdr, sizeof(tx_hdr));
|
||||||
sys_cache_data_range((void *)&msg_ram->tx_buffer[put_idx].data_32, ROUND_UP(data_length, 4),
|
sys_cache_data_flush_range((void *)&msg_ram->tx_buffer[put_idx].data_32,
|
||||||
K_CACHE_WB);
|
ROUND_UP(data_length, 4));
|
||||||
|
|
||||||
data->tx_fin_cb[put_idx] = callback;
|
data->tx_fin_cb[put_idx] = callback;
|
||||||
data->tx_fin_cb_arg[put_idx] = user_data;
|
data->tx_fin_cb_arg[put_idx] = user_data;
|
||||||
|
@ -971,9 +968,8 @@ int can_mcan_add_rx_filter_std(const struct device *dev,
|
||||||
|
|
||||||
memcpy32_volatile(&msg_ram->std_filt[filter_id], &filter_element,
|
memcpy32_volatile(&msg_ram->std_filt[filter_id], &filter_element,
|
||||||
sizeof(struct can_mcan_std_filter));
|
sizeof(struct can_mcan_std_filter));
|
||||||
sys_cache_data_range((void *)&msg_ram->std_filt[filter_id],
|
sys_cache_data_flush_range((void *)&msg_ram->std_filt[filter_id],
|
||||||
sizeof(struct can_mcan_std_filter),
|
sizeof(struct can_mcan_std_filter));
|
||||||
K_CACHE_WB);
|
|
||||||
|
|
||||||
k_mutex_unlock(&data->inst_mutex);
|
k_mutex_unlock(&data->inst_mutex);
|
||||||
|
|
||||||
|
@ -1036,9 +1032,8 @@ static int can_mcan_add_rx_filter_ext(const struct device *dev,
|
||||||
|
|
||||||
memcpy32_volatile(&msg_ram->ext_filt[filter_id], &filter_element,
|
memcpy32_volatile(&msg_ram->ext_filt[filter_id], &filter_element,
|
||||||
sizeof(struct can_mcan_ext_filter));
|
sizeof(struct can_mcan_ext_filter));
|
||||||
sys_cache_data_range((void *)&msg_ram->ext_filt[filter_id],
|
sys_cache_data_flush_range((void *)&msg_ram->ext_filt[filter_id],
|
||||||
sizeof(struct can_mcan_ext_filter),
|
sizeof(struct can_mcan_ext_filter));
|
||||||
K_CACHE_WB);
|
|
||||||
|
|
||||||
k_mutex_unlock(&data->inst_mutex);
|
k_mutex_unlock(&data->inst_mutex);
|
||||||
|
|
||||||
|
@ -1100,15 +1095,13 @@ void can_mcan_remove_rx_filter(const struct device *dev, int filter_id)
|
||||||
|
|
||||||
memset32_volatile(&msg_ram->ext_filt[filter_id], 0,
|
memset32_volatile(&msg_ram->ext_filt[filter_id], 0,
|
||||||
sizeof(struct can_mcan_ext_filter));
|
sizeof(struct can_mcan_ext_filter));
|
||||||
sys_cache_data_range((void *)&msg_ram->ext_filt[filter_id],
|
sys_cache_data_flush_range((void *)&msg_ram->ext_filt[filter_id],
|
||||||
sizeof(struct can_mcan_ext_filter),
|
sizeof(struct can_mcan_ext_filter));
|
||||||
K_CACHE_WB);
|
|
||||||
} else {
|
} else {
|
||||||
memset32_volatile(&msg_ram->std_filt[filter_id], 0,
|
memset32_volatile(&msg_ram->std_filt[filter_id], 0,
|
||||||
sizeof(struct can_mcan_std_filter));
|
sizeof(struct can_mcan_std_filter));
|
||||||
sys_cache_data_range((void *)&msg_ram->std_filt[filter_id],
|
sys_cache_data_flush_range((void *)&msg_ram->std_filt[filter_id],
|
||||||
sizeof(struct can_mcan_std_filter),
|
sizeof(struct can_mcan_std_filter));
|
||||||
K_CACHE_WB);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
k_mutex_unlock(&data->inst_mutex);
|
k_mutex_unlock(&data->inst_mutex);
|
||||||
|
|
|
@ -161,7 +161,7 @@ static int dwmac_send(const struct device *dev, struct net_pkt *pkt)
|
||||||
k_sem_give(&p->free_tx_descs);
|
k_sem_give(&p->free_tx_descs);
|
||||||
goto abort;
|
goto abort;
|
||||||
}
|
}
|
||||||
sys_cache_data_range(pinned->data, pinned->len, K_CACHE_WB);
|
sys_cache_data_flush_range(pinned->data, pinned->len);
|
||||||
p->tx_frags[d_idx] = pinned;
|
p->tx_frags[d_idx] = pinned;
|
||||||
LOG_DBG("d[%d]: frag %p pinned %p len %d", d_idx,
|
LOG_DBG("d[%d]: frag %p pinned %p len %d", d_idx,
|
||||||
frag->data, pinned->data, pinned->len);
|
frag->data, pinned->data, pinned->len);
|
||||||
|
@ -367,7 +367,7 @@ static void dwmac_rx_refill_thread(void *arg1, void *unused1, void *unused2)
|
||||||
}
|
}
|
||||||
LOG_DBG("new frag[%d] at %p", d_idx, frag->data);
|
LOG_DBG("new frag[%d] at %p", d_idx, frag->data);
|
||||||
__ASSERT(frag->size == RX_FRAG_SIZE, "");
|
__ASSERT(frag->size == RX_FRAG_SIZE, "");
|
||||||
sys_cache_data_range(frag->data, frag->size, K_CACHE_INVD);
|
sys_cache_data_invd_range(frag->data, frag->size);
|
||||||
p->rx_frags[d_idx] = frag;
|
p->rx_frags[d_idx] = frag;
|
||||||
} else {
|
} else {
|
||||||
LOG_DBG("reusing frag[%d] at %p", d_idx, frag->data);
|
LOG_DBG("reusing frag[%d] at %p", d_idx, frag->data);
|
||||||
|
|
|
@ -44,9 +44,8 @@ void dwmac_platform_init(struct dwmac_priv *p)
|
||||||
uintptr_t desc_phys_addr;
|
uintptr_t desc_phys_addr;
|
||||||
|
|
||||||
/* make sure no valid cache lines map to the descriptor area */
|
/* make sure no valid cache lines map to the descriptor area */
|
||||||
sys_cache_data_range(dwmac_tx_rx_descriptors,
|
sys_cache_data_invd_range(dwmac_tx_rx_descriptors,
|
||||||
sizeof(dwmac_tx_rx_descriptors),
|
sizeof(dwmac_tx_rx_descriptors));
|
||||||
K_CACHE_INVD);
|
|
||||||
|
|
||||||
desc_phys_addr = z_mem_phys_addr(dwmac_tx_rx_descriptors);
|
desc_phys_addr = z_mem_phys_addr(dwmac_tx_rx_descriptors);
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ static inline void cache_wb(void *data, size_t len, uint32_t flags)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_SPSC_PBUF_CACHE_ALWAYS) ||
|
if (IS_ENABLED(CONFIG_SPSC_PBUF_CACHE_ALWAYS) ||
|
||||||
(IS_ENABLED(CONFIG_SPSC_PBUF_CACHE_FLAG) && (flags & SPSC_PBUF_CACHE))) {
|
(IS_ENABLED(CONFIG_SPSC_PBUF_CACHE_FLAG) && (flags & SPSC_PBUF_CACHE))) {
|
||||||
sys_cache_data_range(data, len, K_CACHE_WB);
|
sys_cache_data_flush_range(data, len);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ static inline void cache_inv(void *data, size_t len, uint32_t flags)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_SPSC_PBUF_CACHE_ALWAYS) ||
|
if (IS_ENABLED(CONFIG_SPSC_PBUF_CACHE_ALWAYS) ||
|
||||||
(IS_ENABLED(CONFIG_SPSC_PBUF_CACHE_FLAG) && (flags & SPSC_PBUF_CACHE))) {
|
(IS_ENABLED(CONFIG_SPSC_PBUF_CACHE_FLAG) && (flags & SPSC_PBUF_CACHE))) {
|
||||||
sys_cache_data_range(data, len, K_CACHE_INVD);
|
sys_cache_data_invd_range(data, len);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -99,7 +99,7 @@ void z_arm_platform_init(void)
|
||||||
(void)memset(__bss_nc_start__, 0, __bss_nc_end__ - __bss_nc_start__);
|
(void)memset(__bss_nc_start__, 0, __bss_nc_end__ - __bss_nc_start__);
|
||||||
}
|
}
|
||||||
|
|
||||||
cache_instr_enable();
|
sys_cache_instr_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
void aspeed_print_abr_wdt_mode(void)
|
void aspeed_print_abr_wdt_mode(void)
|
||||||
|
|
|
@ -39,8 +39,7 @@ static void virtio_set_status(struct virtio_device *p_vdev, unsigned char status
|
||||||
vr = CONTAINER_OF(p_vdev, struct ipc_static_vrings, vdev);
|
vr = CONTAINER_OF(p_vdev, struct ipc_static_vrings, vdev);
|
||||||
|
|
||||||
sys_write8(status, vr->status_reg_addr);
|
sys_write8(status, vr->status_reg_addr);
|
||||||
sys_cache_data_range((void *) vr->status_reg_addr,
|
sys_cache_data_flush_range((void *) vr->status_reg_addr, sizeof(status));
|
||||||
sizeof(status), K_CACHE_WB);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t virtio_get_features(struct virtio_device *vdev)
|
static uint32_t virtio_get_features(struct virtio_device *vdev)
|
||||||
|
@ -58,8 +57,7 @@ static unsigned char virtio_get_status(struct virtio_device *p_vdev)
|
||||||
ret = VIRTIO_CONFIG_STATUS_DRIVER_OK;
|
ret = VIRTIO_CONFIG_STATUS_DRIVER_OK;
|
||||||
|
|
||||||
if (p_vdev->role == VIRTIO_DEV_DEVICE) {
|
if (p_vdev->role == VIRTIO_DEV_DEVICE) {
|
||||||
sys_cache_data_range((void *) vr->status_reg_addr,
|
sys_cache_data_invd_range((void *) vr->status_reg_addr, sizeof(ret));
|
||||||
sizeof(ret), K_CACHE_INVD);
|
|
||||||
ret = sys_read8(vr->status_reg_addr);
|
ret = sys_read8(vr->status_reg_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue