xtensa: use sys_cache API instead of custom interfaces
Use sys_cache instead of custom and internal APIs. Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
aa4f2bc81e
commit
6388f5f106
25 changed files with 83 additions and 74 deletions
|
@ -88,7 +88,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||||
#ifdef CONFIG_KERNEL_COHERENCE
|
#ifdef CONFIG_KERNEL_COHERENCE
|
||||||
__ASSERT((((size_t)stack) % XCHAL_DCACHE_LINESIZE) == 0, "");
|
__ASSERT((((size_t)stack) % XCHAL_DCACHE_LINESIZE) == 0, "");
|
||||||
__ASSERT((((size_t)stack_ptr) % XCHAL_DCACHE_LINESIZE) == 0, "");
|
__ASSERT((((size_t)stack_ptr) % XCHAL_DCACHE_LINESIZE) == 0, "");
|
||||||
z_xtensa_cache_flush_inv(stack, (char *)stack_ptr - (char *)stack);
|
sys_cache_data_flush_and_invd_range(stack, (char *)stack_ptr - (char *)stack);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
#ifndef _ASMLANGUAGE
|
#ifndef _ASMLANGUAGE
|
||||||
#include <kernel_internal.h>
|
#include <kernel_internal.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
#include <zephyr/cache.h>
|
||||||
#include <zsr.h>
|
#include <zsr.h>
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
@ -33,7 +33,7 @@ static ALWAYS_INLINE void arch_kernel_init(void)
|
||||||
/* Make sure we don't have live data for unexpected cached
|
/* Make sure we don't have live data for unexpected cached
|
||||||
* regions due to boot firmware
|
* regions due to boot firmware
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_flush_inv_all();
|
sys_cache_data_flush_and_invd_all();
|
||||||
|
|
||||||
/* Our cache top stash location might have junk in it from a
|
/* Our cache top stash location might have junk in it from a
|
||||||
* pre-boot environment. Must be zero or valid!
|
* pre-boot environment. Must be zero or valid!
|
||||||
|
@ -115,7 +115,7 @@ static ALWAYS_INLINE void arch_cohere_stacks(struct k_thread *old_thread,
|
||||||
* automatically overwritten as needed.
|
* automatically overwritten as needed.
|
||||||
*/
|
*/
|
||||||
if (curr_cpu != new_thread->arch.last_cpu) {
|
if (curr_cpu != new_thread->arch.last_cpu) {
|
||||||
z_xtensa_cache_inv((void *)nsp, (nstack + nsz) - nsp);
|
sys_cache_data_invd_range((void *)nsp, (nstack + nsz) - nsp);
|
||||||
}
|
}
|
||||||
old_thread->arch.last_cpu = curr_cpu;
|
old_thread->arch.last_cpu = curr_cpu;
|
||||||
|
|
||||||
|
@ -143,8 +143,8 @@ static ALWAYS_INLINE void arch_cohere_stacks(struct k_thread *old_thread,
|
||||||
* to the stack top stashed in a special register.
|
* to the stack top stashed in a special register.
|
||||||
*/
|
*/
|
||||||
if (old_switch_handle != NULL) {
|
if (old_switch_handle != NULL) {
|
||||||
z_xtensa_cache_flush((void *)osp, (ostack + osz) - osp);
|
sys_cache_data_flush_range((void *)osp, (ostack + osz) - osp);
|
||||||
z_xtensa_cache_inv((void *)ostack, osp - ostack);
|
sys_cache_data_invd_range((void *)ostack, osp - ostack);
|
||||||
} else {
|
} else {
|
||||||
/* When in a switch, our current stack is the outbound
|
/* When in a switch, our current stack is the outbound
|
||||||
* stack. Flush the single line containing the stack
|
* stack. Flush the single line containing the stack
|
||||||
|
@ -155,8 +155,8 @@ static ALWAYS_INLINE void arch_cohere_stacks(struct k_thread *old_thread,
|
||||||
*/
|
*/
|
||||||
__asm__ volatile("mov %0, a1" : "=r"(osp));
|
__asm__ volatile("mov %0, a1" : "=r"(osp));
|
||||||
osp -= 16;
|
osp -= 16;
|
||||||
z_xtensa_cache_flush((void *)osp, 1);
|
sys_cache_data_flush_range((void *)osp, 1);
|
||||||
z_xtensa_cache_inv((void *)ostack, osp - ostack);
|
sys_cache_data_invd_range((void *)ostack, osp - ostack);
|
||||||
|
|
||||||
uint32_t end = ostack + osz;
|
uint32_t end = ostack + osz;
|
||||||
|
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include <zephyr/drivers/mm/mm_drv_intel_adsp_mtl_tlb.h>
|
#include <zephyr/drivers/mm/mm_drv_intel_adsp_mtl_tlb.h>
|
||||||
#include <zephyr/drivers/mm/mm_drv_bank.h>
|
#include <zephyr/drivers/mm/mm_drv_bank.h>
|
||||||
#include <zephyr/debug/sparse.h>
|
#include <zephyr/debug/sparse.h>
|
||||||
|
#include <zephyr/cache.h>
|
||||||
|
|
||||||
static struct k_spinlock tlb_lock;
|
static struct k_spinlock tlb_lock;
|
||||||
extern struct k_spinlock sys_mm_drv_common_lock;
|
extern struct k_spinlock sys_mm_drv_common_lock;
|
||||||
|
@ -269,7 +270,7 @@ int sys_mm_drv_map_page(void *virt, uintptr_t phys, uint32_t flags)
|
||||||
* Invalid the cache of the newly mapped virtual page to
|
* Invalid the cache of the newly mapped virtual page to
|
||||||
* avoid stale data.
|
* avoid stale data.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_inv(virt, CONFIG_MM_DRV_PAGE_SIZE);
|
sys_cache_data_invd_range(virt, CONFIG_MM_DRV_PAGE_SIZE);
|
||||||
|
|
||||||
k_spin_unlock(&tlb_lock, key);
|
k_spin_unlock(&tlb_lock, key);
|
||||||
|
|
||||||
|
@ -356,7 +357,7 @@ int sys_mm_drv_unmap_page(void *virt)
|
||||||
* Flush the cache to make sure the backing physical page
|
* Flush the cache to make sure the backing physical page
|
||||||
* has the latest data.
|
* has the latest data.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_flush(virt, CONFIG_MM_DRV_PAGE_SIZE);
|
sys_cache_data_flush_range(virt, CONFIG_MM_DRV_PAGE_SIZE);
|
||||||
|
|
||||||
entry_idx = get_tlb_entry_idx(va);
|
entry_idx = get_tlb_entry_idx(va);
|
||||||
|
|
||||||
|
@ -581,8 +582,8 @@ out:
|
||||||
* flush the cache to make sure the backing physical
|
* flush the cache to make sure the backing physical
|
||||||
* pages have the new data.
|
* pages have the new data.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_flush(virt_new, size);
|
sys_cache_data_flush_range(virt_new, size);
|
||||||
z_xtensa_cache_flush_inv(virt_old, size);
|
sys_cache_data_flush_and_invd_range(virt_old, size);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -603,7 +604,7 @@ int sys_mm_drv_move_array(void *virt_old, size_t size, void *virt_new,
|
||||||
* flush the cache to make sure the backing physical
|
* flush the cache to make sure the backing physical
|
||||||
* pages have the new data.
|
* pages have the new data.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_flush(va_new, size);
|
sys_cache_data_flush_range(va_new, size);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -722,7 +723,8 @@ static void adsp_mm_save_context(void *storage_buffer)
|
||||||
* all cache data has been flushed before
|
* all cache data has been flushed before
|
||||||
* do this for pages to remap only
|
* do this for pages to remap only
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_inv(UINT_TO_POINTER(phys_addr), CONFIG_MM_DRV_PAGE_SIZE);
|
sys_cache_data_invd_range(UINT_TO_POINTER(phys_addr),
|
||||||
|
CONFIG_MM_DRV_PAGE_SIZE);
|
||||||
|
|
||||||
/* Enable the translation in the TLB entry */
|
/* Enable the translation in the TLB entry */
|
||||||
entry |= TLB_ENABLE_BIT;
|
entry |= TLB_ENABLE_BIT;
|
||||||
|
@ -746,7 +748,7 @@ static void adsp_mm_save_context(void *storage_buffer)
|
||||||
*((uint32_t *) location) = 0;
|
*((uint32_t *) location) = 0;
|
||||||
location += sizeof(uint32_t);
|
location += sizeof(uint32_t);
|
||||||
|
|
||||||
z_xtensa_cache_flush(
|
sys_cache_data_flush_range(
|
||||||
storage_buffer,
|
storage_buffer,
|
||||||
(uint32_t)location - (uint32_t)storage_buffer);
|
(uint32_t)location - (uint32_t)storage_buffer);
|
||||||
|
|
||||||
|
@ -788,7 +790,7 @@ __imr void adsp_mm_restore_context(void *storage_buffer)
|
||||||
bmemcpy(UINT_TO_POINTER(phys_addr_uncached),
|
bmemcpy(UINT_TO_POINTER(phys_addr_uncached),
|
||||||
location,
|
location,
|
||||||
CONFIG_MM_DRV_PAGE_SIZE);
|
CONFIG_MM_DRV_PAGE_SIZE);
|
||||||
z_xtensa_cache_inv(UINT_TO_POINTER(phys_addr), CONFIG_MM_DRV_PAGE_SIZE);
|
sys_cache_data_invd_range(UINT_TO_POINTER(phys_addr), CONFIG_MM_DRV_PAGE_SIZE);
|
||||||
|
|
||||||
location += CONFIG_MM_DRV_PAGE_SIZE;
|
location += CONFIG_MM_DRV_PAGE_SIZE;
|
||||||
phys_addr = *((uint32_t *) location);
|
phys_addr = *((uint32_t *) location);
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include <zephyr/sys/check.h>
|
#include <zephyr/sys/check.h>
|
||||||
#include <zephyr/sys/mem_manage.h>
|
#include <zephyr/sys/mem_manage.h>
|
||||||
#include <zephyr/sys/util.h>
|
#include <zephyr/sys/util.h>
|
||||||
|
#include <zephyr/cache.h>
|
||||||
|
|
||||||
#include <soc.h>
|
#include <soc.h>
|
||||||
#include <adsp_memory.h>
|
#include <adsp_memory.h>
|
||||||
|
@ -132,7 +133,7 @@ int sys_mm_drv_map_page(void *virt, uintptr_t phys, uint32_t flags)
|
||||||
* Invalid the cache of the newly mapped virtual page to
|
* Invalid the cache of the newly mapped virtual page to
|
||||||
* avoid stale data.
|
* avoid stale data.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_inv(virt, CONFIG_MM_DRV_PAGE_SIZE);
|
sys_cache_data_invd_range(virt, CONFIG_MM_DRV_PAGE_SIZE);
|
||||||
|
|
||||||
k_spin_unlock(&tlb_lock, key);
|
k_spin_unlock(&tlb_lock, key);
|
||||||
|
|
||||||
|
@ -185,7 +186,7 @@ int sys_mm_drv_unmap_page(void *virt)
|
||||||
* Flush the cache to make sure the backing physical page
|
* Flush the cache to make sure the backing physical page
|
||||||
* has the latest data.
|
* has the latest data.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_flush(virt, CONFIG_MM_DRV_PAGE_SIZE);
|
sys_cache_data_flush_range(virt, CONFIG_MM_DRV_PAGE_SIZE);
|
||||||
|
|
||||||
entry_idx = get_tlb_entry_idx(va);
|
entry_idx = get_tlb_entry_idx(va);
|
||||||
|
|
||||||
|
@ -302,7 +303,7 @@ int sys_mm_drv_move_region(void *virt_old, size_t size, void *virt_new,
|
||||||
* flush the cache to make sure the backing physical
|
* flush the cache to make sure the backing physical
|
||||||
* pages have the new data.
|
* pages have the new data.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_flush(va_new, size);
|
sys_cache_data_flush_range(va_new, size);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -323,7 +324,7 @@ int sys_mm_drv_move_array(void *virt_old, size_t size, void *virt_new,
|
||||||
* flush the cache to make sure the backing physical
|
* flush the cache to make sure the backing physical
|
||||||
* pages have the new data.
|
* pages have the new data.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_flush(va_new, size);
|
sys_cache_data_flush_range(va_new, size);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,7 +79,7 @@ static void intel_gna_interrupt_handler(const struct device *dev)
|
||||||
if (k_msgq_get(&gna->request_queue, &pending_req, K_NO_WAIT) != 0) {
|
if (k_msgq_get(&gna->request_queue, &pending_req, K_NO_WAIT) != 0) {
|
||||||
LOG_ERR("Pending request queue is empty");
|
LOG_ERR("Pending request queue is empty");
|
||||||
} else {
|
} else {
|
||||||
z_xtensa_cache_inv(pending_req.model->output,
|
sys_cache_data_invd_range(pending_req.model->output,
|
||||||
pending_req.output_len);
|
pending_req.output_len);
|
||||||
/* copy output from the model buffer to application buffer */
|
/* copy output from the model buffer to application buffer */
|
||||||
memcpy(pending_req.output, pending_req.model->output,
|
memcpy(pending_req.output, pending_req.model->output,
|
||||||
|
@ -194,7 +194,7 @@ static int intel_gna_initialize(const struct device *dev)
|
||||||
dev->name, gna_config_desc.vamaxaddr);
|
dev->name, gna_config_desc.vamaxaddr);
|
||||||
|
|
||||||
/* flush cache */
|
/* flush cache */
|
||||||
z_xtensa_cache_flush((void *)&gna_config_desc, sizeof(gna_config_desc));
|
sys_cache_data_flush_range((void *)&gna_config_desc, sizeof(gna_config_desc));
|
||||||
|
|
||||||
LOG_INF("%s: initialized (max %u models & max %u pending requests)",
|
LOG_INF("%s: initialized (max %u models & max %u pending requests)",
|
||||||
dev->name, GNA_MAX_NUM_MODELS,
|
dev->name, GNA_MAX_NUM_MODELS,
|
||||||
|
@ -334,7 +334,7 @@ static int intel_gna_register_model(const struct device *dev,
|
||||||
|
|
||||||
intel_gna_setup_page_table(model->rw_region, rw_size,
|
intel_gna_setup_page_table(model->rw_region, rw_size,
|
||||||
virtual_base);
|
virtual_base);
|
||||||
z_xtensa_cache_flush(model->rw_region, rw_size);
|
sys_cache_data_flush_range(model->rw_region, rw_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (model->ro_region == NULL) {
|
if (model->ro_region == NULL) {
|
||||||
|
@ -352,8 +352,8 @@ static int intel_gna_register_model(const struct device *dev,
|
||||||
intel_gna_setup_page_table(ro_region, ro_size,
|
intel_gna_setup_page_table(ro_region, ro_size,
|
||||||
(void *)((uint32_t)virtual_base + rw_size));
|
(void *)((uint32_t)virtual_base + rw_size));
|
||||||
|
|
||||||
z_xtensa_cache_flush(ro_region, ro_size);
|
sys_cache_data_flush_range(ro_region, ro_size);
|
||||||
z_xtensa_cache_flush(gna_page_table, sizeof(gna_page_table));
|
sys_cache_data_flush_range(gna_page_table, sizeof(gna_page_table));
|
||||||
|
|
||||||
/* copy the model pointers */
|
/* copy the model pointers */
|
||||||
gna_model->model = *model;
|
gna_model->model = *model;
|
||||||
|
@ -461,12 +461,12 @@ static int intel_gna_infer(const struct device *dev,
|
||||||
|
|
||||||
/* copy input */
|
/* copy input */
|
||||||
memcpy(handle->input, req->input, input_size);
|
memcpy(handle->input, req->input, input_size);
|
||||||
z_xtensa_cache_flush(handle->input, input_size);
|
sys_cache_data_flush_range(handle->input, input_size);
|
||||||
|
|
||||||
/* assign layer descriptor base address to configuration descriptor */
|
/* assign layer descriptor base address to configuration descriptor */
|
||||||
gna_config_desc.labase = (uint32_t)handle->vabase;
|
gna_config_desc.labase = (uint32_t)handle->vabase;
|
||||||
gna_config_desc.lacnt = (uint16_t)header->layer_count;
|
gna_config_desc.lacnt = (uint16_t)header->layer_count;
|
||||||
z_xtensa_cache_flush(&gna_config_desc, sizeof(gna_config_desc));
|
sys_cache_data_flush_range(&gna_config_desc, sizeof(gna_config_desc));
|
||||||
|
|
||||||
gna->state = GNA_STATE_ACTIVE;
|
gna->state = GNA_STATE_ACTIVE;
|
||||||
regs->gnactrl = (regs->gnactrl & ~GNA_CTRL_INTR_DISABLE) |
|
regs->gnactrl = (regs->gnactrl & ~GNA_CTRL_INTR_DISABLE) |
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <xtensa/config/core.h>
|
#include <xtensa/config/core.h>
|
||||||
#include <zephyr/arch/common/addr_types.h>
|
#include <zephyr/arch/common/addr_types.h>
|
||||||
#include <zephyr/arch/xtensa/gdbstub.h>
|
#include <zephyr/arch/xtensa/gdbstub.h>
|
||||||
|
#include <zephyr/debug/sparse.h>
|
||||||
|
|
||||||
#ifdef CONFIG_KERNEL_COHERENCE
|
#ifdef CONFIG_KERNEL_COHERENCE
|
||||||
#define ARCH_STACK_PTR_ALIGN XCHAL_DCACHE_LINESIZE
|
#define ARCH_STACK_PTR_ALIGN XCHAL_DCACHE_LINESIZE
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
#include <zephyr/kernel.h>
|
#include <zephyr/kernel.h>
|
||||||
#include <zephyr/init.h>
|
#include <zephyr/init.h>
|
||||||
#include <soc.h>
|
#include <soc.h>
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
#include <zephyr/cache.h>
|
||||||
#include <adsp_shim.h>
|
#include <adsp_shim.h>
|
||||||
#include <adsp_memory.h>
|
#include <adsp_memory.h>
|
||||||
#include <cpu_init.h>
|
#include <cpu_init.h>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
#ifndef INTEL_COMM_WIDGET_H
|
#ifndef INTEL_COMM_WIDGET_H
|
||||||
#define INTEL_COMM_WIDGET_H
|
#define INTEL_COMM_WIDGET_H
|
||||||
|
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
#include <zephyr/cache.h>
|
||||||
#include <zephyr/kernel.h>
|
#include <zephyr/kernel.h>
|
||||||
|
|
||||||
#define CW_DT_NODE DT_NODELABEL(ace_comm_widget)
|
#define CW_DT_NODE DT_NODELABEL(ace_comm_widget)
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <adsp_memory.h>
|
#include <adsp_memory.h>
|
||||||
#include <adsp_interrupt.h>
|
#include <adsp_interrupt.h>
|
||||||
#include <zephyr/irq.h>
|
#include <zephyr/irq.h>
|
||||||
|
#include <zephyr/cache.h>
|
||||||
|
|
||||||
#define CORE_POWER_CHECK_NUM 32
|
#define CORE_POWER_CHECK_NUM 32
|
||||||
#define ACE_INTC_IRQ DT_IRQN(DT_NODELABEL(ace_intc))
|
#define ACE_INTC_IRQ DT_IRQN(DT_NODELABEL(ace_intc))
|
||||||
|
@ -85,7 +86,7 @@ void soc_start_core(int cpu_num)
|
||||||
/* Initialize the ROM jump address */
|
/* Initialize the ROM jump address */
|
||||||
uint32_t *rom_jump_vector = (uint32_t *) ROM_JUMP_ADDR;
|
uint32_t *rom_jump_vector = (uint32_t *) ROM_JUMP_ADDR;
|
||||||
*rom_jump_vector = (uint32_t) z_soc_mp_asm_entry;
|
*rom_jump_vector = (uint32_t) z_soc_mp_asm_entry;
|
||||||
z_xtensa_cache_flush(rom_jump_vector, sizeof(*rom_jump_vector));
|
sys_cache_data_flush_range(rom_jump_vector, sizeof(*rom_jump_vector));
|
||||||
ACE_PWRCTL->wpdsphpxpg |= BIT(cpu_num);
|
ACE_PWRCTL->wpdsphpxpg |= BIT(cpu_num);
|
||||||
|
|
||||||
while ((ACE_PWRSTS->dsphpxpgs & BIT(cpu_num)) == 0) {
|
while ((ACE_PWRSTS->dsphpxpgs & BIT(cpu_num)) == 0) {
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
#include <zephyr/pm/pm.h>
|
#include <zephyr/pm/pm.h>
|
||||||
#include <zephyr/device.h>
|
#include <zephyr/device.h>
|
||||||
#include <zephyr/debug/sparse.h>
|
#include <zephyr/debug/sparse.h>
|
||||||
|
#include <zephyr/cache.h>
|
||||||
#include <cpu_init.h>
|
#include <cpu_init.h>
|
||||||
|
|
||||||
#include <adsp_boot.h>
|
#include <adsp_boot.h>
|
||||||
|
@ -142,7 +143,7 @@ void power_gate_entry(uint32_t core_id)
|
||||||
lpsheader->adsp_lpsram_magic = LPSRAM_MAGIC_VALUE;
|
lpsheader->adsp_lpsram_magic = LPSRAM_MAGIC_VALUE;
|
||||||
lpsheader->lp_restore_vector = &dsp_restore_vector;
|
lpsheader->lp_restore_vector = &dsp_restore_vector;
|
||||||
soc_cpus_active[core_id] = false;
|
soc_cpus_active[core_id] = false;
|
||||||
z_xtensa_cache_flush_inv_all();
|
sys_cache_data_flush_and_invd_all();
|
||||||
z_xt_ints_on(ALL_USED_INT_LEVELS_MASK);
|
z_xt_ints_on(ALL_USED_INT_LEVELS_MASK);
|
||||||
k_cpu_idle();
|
k_cpu_idle();
|
||||||
z_xt_ints_off(0xffffffff);
|
z_xt_ints_off(0xffffffff);
|
||||||
|
@ -211,7 +212,7 @@ __weak void pm_state_set(enum pm_state state, uint8_t substate_id)
|
||||||
core_desc[cpu].bctl = DSPCS.bootctl[cpu].bctl;
|
core_desc[cpu].bctl = DSPCS.bootctl[cpu].bctl;
|
||||||
DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPCG;
|
DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPCG;
|
||||||
soc_cpus_active[cpu] = false;
|
soc_cpus_active[cpu] = false;
|
||||||
z_xtensa_cache_flush_inv_all();
|
sys_cache_data_flush_and_invd_all();
|
||||||
if (cpu == 0) {
|
if (cpu == 0) {
|
||||||
#ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
|
#ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
|
||||||
/* save storage and restore information to imr */
|
/* save storage and restore information to imr */
|
||||||
|
@ -224,7 +225,7 @@ __weak void pm_state_set(enum pm_state state, uint8_t substate_id)
|
||||||
imr_layout->imr_state.header.imr_restore_vector =
|
imr_layout->imr_state.header.imr_restore_vector =
|
||||||
(void *)boot_entry_d3_restore;
|
(void *)boot_entry_d3_restore;
|
||||||
imr_layout->imr_state.header.imr_ram_storage = global_imr_ram_storage;
|
imr_layout->imr_state.header.imr_ram_storage = global_imr_ram_storage;
|
||||||
z_xtensa_cache_flush(imr_layout, sizeof(*imr_layout));
|
sys_cache_data_flush_range(imr_layout, sizeof(*imr_layout));
|
||||||
|
|
||||||
/* save CPU context here
|
/* save CPU context here
|
||||||
* when _restore_core_context() is called, it will return directly to
|
* when _restore_core_context() is called, it will return directly to
|
||||||
|
@ -255,7 +256,7 @@ __weak void pm_state_set(enum pm_state state, uint8_t substate_id)
|
||||||
#else
|
#else
|
||||||
imr_layout->imr_state.header.imr_restore_vector =
|
imr_layout->imr_state.header.imr_restore_vector =
|
||||||
(void *)rom_entry;
|
(void *)rom_entry;
|
||||||
z_xtensa_cache_flush(imr_layout, sizeof(*imr_layout));
|
sys_cache_data_flush_range(imr_layout, sizeof(*imr_layout));
|
||||||
#endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
|
#endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
|
||||||
/* turn off all HPSRAM banks - get a full bitmap */
|
/* turn off all HPSRAM banks - get a full bitmap */
|
||||||
uint32_t ebb_banks = ace_hpsram_get_bank_count();
|
uint32_t ebb_banks = ace_hpsram_get_bank_count();
|
||||||
|
@ -305,7 +306,7 @@ __weak void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
|
||||||
struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS);
|
struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS);
|
||||||
|
|
||||||
/* clean storage and restore information */
|
/* clean storage and restore information */
|
||||||
z_xtensa_cache_inv(imr_layout, sizeof(*imr_layout));
|
sys_cache_data_invd_range(imr_layout, sizeof(*imr_layout));
|
||||||
imr_layout->imr_state.header.adsp_imr_magic = 0;
|
imr_layout->imr_state.header.adsp_imr_magic = 0;
|
||||||
imr_layout->imr_state.header.imr_restore_vector = NULL;
|
imr_layout->imr_state.header.imr_restore_vector = NULL;
|
||||||
imr_layout->imr_state.header.imr_ram_storage = NULL;
|
imr_layout->imr_state.header.imr_ram_storage = NULL;
|
||||||
|
@ -313,7 +314,7 @@ __weak void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
|
||||||
#endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
|
#endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
|
||||||
|
|
||||||
soc_cpus_active[cpu] = true;
|
soc_cpus_active[cpu] = true;
|
||||||
z_xtensa_cache_flush_inv_all();
|
sys_cache_data_flush_and_invd_all();
|
||||||
z_xt_ints_on(core_desc[cpu].intenable);
|
z_xt_ints_on(core_desc[cpu].intenable);
|
||||||
} else if (state == PM_STATE_RUNTIME_IDLE) {
|
} else if (state == PM_STATE_RUNTIME_IDLE) {
|
||||||
if (cpu != 0) {
|
if (cpu != 0) {
|
||||||
|
@ -340,7 +341,7 @@ __weak void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
soc_cpus_active[cpu] = true;
|
soc_cpus_active[cpu] = true;
|
||||||
z_xtensa_cache_flush_inv_all();
|
sys_cache_data_flush_and_invd_all();
|
||||||
z_xt_ints_on(core_desc[cpu].intenable);
|
z_xt_ints_on(core_desc[cpu].intenable);
|
||||||
} else {
|
} else {
|
||||||
__ASSERT(false, "invalid argument - unsupported power state");
|
__ASSERT(false, "invalid argument - unsupported power state");
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
#include <zephyr/devicetree.h>
|
#include <zephyr/devicetree.h>
|
||||||
#include <soc_util.h>
|
#include <soc_util.h>
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
#include <zephyr/cache.h>
|
||||||
#include <adsp_shim.h>
|
#include <adsp_shim.h>
|
||||||
#include <adsp_memory.h>
|
#include <adsp_memory.h>
|
||||||
#include <cpu_init.h>
|
#include <cpu_init.h>
|
||||||
|
|
|
@ -78,7 +78,7 @@ __weak void pm_state_set(enum pm_state state, uint8_t substate_id)
|
||||||
core_desc[cpu].intenable = XTENSA_RSR("INTENABLE");
|
core_desc[cpu].intenable = XTENSA_RSR("INTENABLE");
|
||||||
z_xt_ints_off(0xffffffff);
|
z_xt_ints_off(0xffffffff);
|
||||||
soc_cpus_active[cpu] = false;
|
soc_cpus_active[cpu] = false;
|
||||||
z_xtensa_cache_flush_inv_all();
|
sys_cache_data_flush_and_invd_all();
|
||||||
if (cpu == 0) {
|
if (cpu == 0) {
|
||||||
uint32_t ebb = EBB_BANKS_IN_SEGMENT;
|
uint32_t ebb = EBB_BANKS_IN_SEGMENT;
|
||||||
/* turn off all HPSRAM banks - get a full bitmap */
|
/* turn off all HPSRAM banks - get a full bitmap */
|
||||||
|
@ -102,7 +102,7 @@ __weak void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
|
||||||
|
|
||||||
if (state == PM_STATE_SOFT_OFF) {
|
if (state == PM_STATE_SOFT_OFF) {
|
||||||
soc_cpus_active[cpu] = true;
|
soc_cpus_active[cpu] = true;
|
||||||
z_xtensa_cache_flush_inv_all();
|
sys_cache_data_flush_and_invd_all();
|
||||||
z_xt_ints_on(core_desc[cpu].intenable);
|
z_xt_ints_on(core_desc[cpu].intenable);
|
||||||
} else {
|
} else {
|
||||||
__ASSERT(false, "invalid argument - unsupported power state");
|
__ASSERT(false, "invalid argument - unsupported power state");
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
#include <zephyr/devicetree.h>
|
#include <zephyr/devicetree.h>
|
||||||
#include <soc_util.h>
|
#include <soc_util.h>
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
#include <zephyr/cache.h>
|
||||||
#include <adsp_shim.h>
|
#include <adsp_shim.h>
|
||||||
#include <adsp_memory.h>
|
#include <adsp_memory.h>
|
||||||
#include <cpu_init.h>
|
#include <cpu_init.h>
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
#include <zephyr/kernel.h>
|
#include <zephyr/kernel.h>
|
||||||
#include <zephyr/init.h>
|
#include <zephyr/init.h>
|
||||||
#include <soc_util.h>
|
#include <soc_util.h>
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
#include <zephyr/cache.h>
|
||||||
#include <adsp_shim.h>
|
#include <adsp_shim.h>
|
||||||
#include <adsp_memory.h>
|
#include <adsp_memory.h>
|
||||||
#include <cpu_init.h>
|
#include <cpu_init.h>
|
||||||
|
@ -122,13 +122,13 @@ __imr void parse_manifest(void)
|
||||||
struct sof_man_module *mod;
|
struct sof_man_module *mod;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
z_xtensa_cache_inv(hdr, sizeof(*hdr));
|
sys_cache_data_invd_range(hdr, sizeof(*hdr));
|
||||||
|
|
||||||
/* copy module to SRAM - skip bootloader module */
|
/* copy module to SRAM - skip bootloader module */
|
||||||
for (i = MAN_SKIP_ENTRIES; i < hdr->num_module_entries; i++) {
|
for (i = MAN_SKIP_ENTRIES; i < hdr->num_module_entries; i++) {
|
||||||
mod = desc->man_module + i;
|
mod = desc->man_module + i;
|
||||||
|
|
||||||
z_xtensa_cache_inv(mod, sizeof(*mod));
|
sys_cache_data_invd_range(mod, sizeof(*mod));
|
||||||
parse_module(hdr, mod);
|
parse_module(hdr, mod);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -152,7 +152,7 @@ __imr void boot_core0(void)
|
||||||
hp_sram_init(L2_SRAM_SIZE);
|
hp_sram_init(L2_SRAM_SIZE);
|
||||||
lp_sram_init();
|
lp_sram_init();
|
||||||
parse_manifest();
|
parse_manifest();
|
||||||
z_xtensa_cache_flush_all();
|
sys_cache_data_flush_all();
|
||||||
|
|
||||||
/* Zephyr! */
|
/* Zephyr! */
|
||||||
extern FUNC_NORETURN void z_cstart(void);
|
extern FUNC_NORETURN void z_cstart(void);
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
#ifndef ZEPHYR_INCLUDE_INTEL_ADSP_HDA_H
|
#ifndef ZEPHYR_INCLUDE_INTEL_ADSP_HDA_H
|
||||||
#define ZEPHYR_INCLUDE_INTEL_ADSP_HDA_H
|
#define ZEPHYR_INCLUDE_INTEL_ADSP_HDA_H
|
||||||
|
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
#include <zephyr/cache.h>
|
||||||
#include <zephyr/kernel.h>
|
#include <zephyr/kernel.h>
|
||||||
#include <zephyr/device.h>
|
#include <zephyr/device.h>
|
||||||
#include <adsp_shim.h>
|
#include <adsp_shim.h>
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
|
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
|
||||||
#include <zephyr/linker/sections.h>
|
#include <zephyr/linker/sections.h>
|
||||||
|
|
||||||
#include <adsp_interrupt.h>
|
#include <adsp_interrupt.h>
|
||||||
|
@ -29,9 +28,9 @@ extern bool soc_cpus_active[CONFIG_MP_MAX_NUM_CPUS];
|
||||||
|
|
||||||
/* Legacy cache APIs still used in a few places */
|
/* Legacy cache APIs still used in a few places */
|
||||||
#define SOC_DCACHE_FLUSH(addr, size) \
|
#define SOC_DCACHE_FLUSH(addr, size) \
|
||||||
z_xtensa_cache_flush((addr), (size))
|
sys_cache_data_flush_range((addr), (size))
|
||||||
#define SOC_DCACHE_INVALIDATE(addr, size) \
|
#define SOC_DCACHE_INVALIDATE(addr, size) \
|
||||||
z_xtensa_cache_inv((addr), (size))
|
sys_cache_data_invd_range((addr), (size))
|
||||||
#define z_soc_cached_ptr(p) arch_xtensa_cached_ptr(p)
|
#define z_soc_cached_ptr(p) arch_xtensa_cached_ptr(p)
|
||||||
#define z_soc_uncached_ptr(p) arch_xtensa_uncached_ptr(p)
|
#define z_soc_uncached_ptr(p) arch_xtensa_uncached_ptr(p)
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
* SPDX-License-Identifier: Apache-2.0
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
#include <zephyr/cache.h>
|
||||||
#include <zephyr/device.h>
|
#include <zephyr/device.h>
|
||||||
#include <zephyr/devicetree.h>
|
#include <zephyr/devicetree.h>
|
||||||
#include <zephyr/arch/cpu.h>
|
#include <zephyr/arch/cpu.h>
|
||||||
|
|
|
@ -20,7 +20,7 @@ LOG_MODULE_REGISTER(soc_mp, CONFIG_SOC_LOG_LEVEL);
|
||||||
#include <zsr.h>
|
#include <zsr.h>
|
||||||
#include <cavs-idc.h>
|
#include <cavs-idc.h>
|
||||||
#include <soc.h>
|
#include <soc.h>
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
#include <zephyr/cache.h>
|
||||||
#include <adsp_shim.h>
|
#include <adsp_shim.h>
|
||||||
#include <adsp_memory.h>
|
#include <adsp_memory.h>
|
||||||
#include <cpu_init.h>
|
#include <cpu_init.h>
|
||||||
|
|
|
@ -11,8 +11,8 @@
|
||||||
|
|
||||||
/* Macros for data cache operations */
|
/* Macros for data cache operations */
|
||||||
#define SOC_DCACHE_FLUSH(addr, size) \
|
#define SOC_DCACHE_FLUSH(addr, size) \
|
||||||
z_xtensa_cache_flush((addr), (size))
|
sys_cache_data_flush_range((addr), (size))
|
||||||
#define SOC_DCACHE_INVALIDATE(addr, size) \
|
#define SOC_DCACHE_INVALIDATE(addr, size) \
|
||||||
z_xtensa_cache_inv((addr), (size))
|
sys_cache_data_invd_range((addr), (size))
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
* SPDX-License-Identifier: Apache-2.0
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
#include <zephyr/cache.h>
|
||||||
#include <zephyr/logging/log_backend.h>
|
#include <zephyr/logging/log_backend.h>
|
||||||
#include <zephyr/logging/log_core.h>
|
#include <zephyr/logging/log_core.h>
|
||||||
#include <zephyr/logging/log_output.h>
|
#include <zephyr/logging/log_output.h>
|
||||||
|
@ -68,7 +68,7 @@ static uint32_t hda_log_flush(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !(IS_ENABLED(CONFIG_KERNEL_COHERENCE))
|
#if !(IS_ENABLED(CONFIG_KERNEL_COHERENCE))
|
||||||
z_xtensa_cache_flush(hda_log_buf, CONFIG_LOG_BACKEND_ADSP_HDA_SIZE);
|
sys_cache_data_flush_range(hda_log_buf, CONFIG_LOG_BACKEND_ADSP_HDA_SIZE);
|
||||||
#endif
|
#endif
|
||||||
dma_reload(hda_log_dev, hda_log_chan, 0, 0, nearest128);
|
dma_reload(hda_log_dev, hda_log_chan, 0, 0, nearest128);
|
||||||
|
|
||||||
|
|
13
tests/boards/intel_adsp/cache/src/main.c
vendored
13
tests/boards/intel_adsp/cache/src/main.c
vendored
|
@ -5,6 +5,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <zephyr/ztest.h>
|
#include <zephyr/ztest.h>
|
||||||
|
#include <zephyr/cache.h>
|
||||||
#include <adsp_memory.h>
|
#include <adsp_memory.h>
|
||||||
|
|
||||||
ZTEST(adsp_cache, test_adsp_cache_flush_inv_all)
|
ZTEST(adsp_cache, test_adsp_cache_flush_inv_all)
|
||||||
|
@ -21,14 +22,14 @@ ZTEST(adsp_cache, test_adsp_cache_flush_inv_all)
|
||||||
zassert_equal(*cached, 42, NULL);
|
zassert_equal(*cached, 42, NULL);
|
||||||
zassert_equal(*uncached, 40, NULL);
|
zassert_equal(*uncached, 40, NULL);
|
||||||
|
|
||||||
z_xtensa_cache_flush_inv_all();
|
sys_cache_data_flush_and_invd_all();
|
||||||
|
|
||||||
/* After z_xtensa_cache_flush_inv_all(), uncached should be updated */
|
/* After sys_cache_data_flush_and_invd_all(), uncached should be updated */
|
||||||
zassert_equal(*cached, 42, NULL);
|
zassert_equal(*cached, 42, NULL);
|
||||||
zassert_equal(*uncached, 42, NULL);
|
zassert_equal(*uncached, 42, NULL);
|
||||||
|
|
||||||
/* Flush and invalidate again, this time to check the invalidate part */
|
/* Flush and invalidate again, this time to check the invalidate part */
|
||||||
z_xtensa_cache_flush_inv_all();
|
sys_cache_data_flush_and_invd_all();
|
||||||
*uncached = 80;
|
*uncached = 80;
|
||||||
|
|
||||||
/* As cache is invalid, cached should be updated with uncached new value */
|
/* As cache is invalid, cached should be updated with uncached new value */
|
||||||
|
@ -41,9 +42,9 @@ ZTEST(adsp_cache, test_adsp_cache_flush_inv_all)
|
||||||
zassert_equal(*cached, 82, NULL);
|
zassert_equal(*cached, 82, NULL);
|
||||||
zassert_equal(*uncached, 80, NULL);
|
zassert_equal(*uncached, 80, NULL);
|
||||||
|
|
||||||
z_xtensa_cache_flush_all();
|
sys_cache_data_flush_all();
|
||||||
|
|
||||||
/* After z_xtensa_cache_flush_all(), uncached should be updated */
|
/* After sys_cache_data_flush_all(), uncached should be updated */
|
||||||
zassert_equal(*cached, 82, NULL);
|
zassert_equal(*cached, 82, NULL);
|
||||||
zassert_equal(*uncached, 82, NULL);
|
zassert_equal(*uncached, 82, NULL);
|
||||||
|
|
||||||
|
@ -53,7 +54,7 @@ ZTEST(adsp_cache, test_adsp_cache_flush_inv_all)
|
||||||
zassert_equal(*cached, 82, NULL);
|
zassert_equal(*cached, 82, NULL);
|
||||||
zassert_equal(*uncached, 100, NULL);
|
zassert_equal(*uncached, 100, NULL);
|
||||||
|
|
||||||
z_xtensa_cache_inv_all();
|
sys_cache_data_invd_all();
|
||||||
|
|
||||||
/* Now, cached should be updated */
|
/* Now, cached should be updated */
|
||||||
zassert_equal(*cached, 100, NULL);
|
zassert_equal(*cached, 100, NULL);
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
* SPDX-License-Identifier: Apache-2.0
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
#include <zephyr/cache.h>
|
||||||
#include <zephyr/kernel.h>
|
#include <zephyr/kernel.h>
|
||||||
#include <zephyr/ztest.h>
|
#include <zephyr/ztest.h>
|
||||||
#include <intel_adsp_ipc.h>
|
#include <intel_adsp_ipc.h>
|
||||||
|
@ -64,7 +64,7 @@ ZTEST(intel_adsp_hda_dma, test_hda_host_in_dma)
|
||||||
#else
|
#else
|
||||||
/* The buffer is in the cached address range and must be flushed */
|
/* The buffer is in the cached address range and must be flushed */
|
||||||
zassert_false(arch_mem_coherent(dma_buf), "Buffer is unexpectedly coherent!");
|
zassert_false(arch_mem_coherent(dma_buf), "Buffer is unexpectedly coherent!");
|
||||||
z_xtensa_cache_flush(dma_buf, DMA_BUF_SIZE);
|
sys_cache_data_flush_range(dma_buf, DMA_BUF_SIZE);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
dma = DEVICE_DT_GET(DT_NODELABEL(hda_host_in));
|
dma = DEVICE_DT_GET(DT_NODELABEL(hda_host_in));
|
||||||
|
@ -210,7 +210,7 @@ void test_hda_host_out_dma(void)
|
||||||
* prior to reading.
|
* prior to reading.
|
||||||
*/
|
*/
|
||||||
zassert_false(arch_mem_coherent(dma_buf), "Buffer is unexpectedly coherent!");
|
zassert_false(arch_mem_coherent(dma_buf), "Buffer is unexpectedly coherent!");
|
||||||
z_xtensa_cache_inv(dma_buf, DMA_BUF_SIZE);
|
sys_cache_data_invd_range(dma_buf, DMA_BUF_SIZE);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
is_ramp = true;
|
is_ramp = true;
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
* SPDX-License-Identifier: Apache-2.0
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <zephyr/arch/xtensa/cache.h>
|
#include <zephyr/cache.h>
|
||||||
#include <zephyr/kernel.h>
|
#include <zephyr/kernel.h>
|
||||||
#include <zephyr/ztest.h>
|
#include <zephyr/ztest.h>
|
||||||
#include <intel_adsp_ipc.h>
|
#include <intel_adsp_ipc.h>
|
||||||
|
@ -63,7 +63,7 @@ ZTEST(intel_adsp_hda, test_hda_host_in_smoke)
|
||||||
#else
|
#else
|
||||||
/* The buffer is in the cached address range and must be flushed */
|
/* The buffer is in the cached address range and must be flushed */
|
||||||
zassert_false(arch_mem_coherent(hda_buf), "Buffer is unexpectedly coherent!");
|
zassert_false(arch_mem_coherent(hda_buf), "Buffer is unexpectedly coherent!");
|
||||||
z_xtensa_cache_flush(hda_buf, HDA_BUF_SIZE);
|
sys_cache_data_flush_range(hda_buf, HDA_BUF_SIZE);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
intel_adsp_hda_init(HDA_HOST_IN_BASE, HDA_REGBLOCK_SIZE, STREAM_ID);
|
intel_adsp_hda_init(HDA_HOST_IN_BASE, HDA_REGBLOCK_SIZE, STREAM_ID);
|
||||||
|
@ -172,7 +172,7 @@ ZTEST(intel_adsp_hda, test_hda_host_out_smoke)
|
||||||
* prior to reading.
|
* prior to reading.
|
||||||
*/
|
*/
|
||||||
zassert_false(arch_mem_coherent(hda_buf), "Buffer is unexpectedly coherent!");
|
zassert_false(arch_mem_coherent(hda_buf), "Buffer is unexpectedly coherent!");
|
||||||
z_xtensa_cache_inv(hda_buf, HDA_BUF_SIZE);
|
sys_cache_data_invd_range(hda_buf, HDA_BUF_SIZE);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
is_ramp = true;
|
is_ramp = true;
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
#include <zephyr/ztest.h>
|
#include <zephyr/ztest.h>
|
||||||
#include <zephyr/kernel.h>
|
#include <zephyr/kernel.h>
|
||||||
|
#include <zephyr/cache.h>
|
||||||
|
|
||||||
#include <zephyr/toolchain.h>
|
#include <zephyr/toolchain.h>
|
||||||
#include <zephyr/sys/printk.h>
|
#include <zephyr/sys/printk.h>
|
||||||
|
@ -59,7 +60,7 @@ ZTEST(adsp_mem, test_adsp_mem_map_region)
|
||||||
* Make sure it is written back to the mapped
|
* Make sure it is written back to the mapped
|
||||||
* physical memory.
|
* physical memory.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_flush(&vps[i].mem[0], PAGE_SZ);
|
sys_cache_data_flush_range(&vps[i].mem[0], PAGE_SZ);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* pa[i] is a cached address which means that the cached
|
* pa[i] is a cached address which means that the cached
|
||||||
|
@ -67,7 +68,7 @@ ZTEST(adsp_mem, test_adsp_mem_map_region)
|
||||||
* above. So we need to invalidate the cache to reload
|
* above. So we need to invalidate the cache to reload
|
||||||
* the new value.
|
* the new value.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_inv(UINT_TO_POINTER(pa[i]), PAGE_SZ);
|
sys_cache_data_invd_range(UINT_TO_POINTER(pa[i]), PAGE_SZ);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Verify the originals reflect the change */
|
/* Verify the originals reflect the change */
|
||||||
|
@ -132,7 +133,7 @@ ZTEST(adsp_mem, test_adsp_mem_map_region)
|
||||||
* Make sure it is written back to the mapped
|
* Make sure it is written back to the mapped
|
||||||
* physical memory.
|
* physical memory.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_flush(&vps3[i].mem[0], PAGE_SZ);
|
sys_cache_data_flush_range(&vps3[i].mem[0], PAGE_SZ);
|
||||||
|
|
||||||
zassert_equal(*(int *)pa[i], markers[i],
|
zassert_equal(*(int *)pa[i], markers[i],
|
||||||
"page copy modified original");
|
"page copy modified original");
|
||||||
|
@ -185,7 +186,7 @@ ZTEST(adsp_mem, test_adsp_mem_map_array)
|
||||||
* Make sure it is written back to the mapped
|
* Make sure it is written back to the mapped
|
||||||
* physical memory.
|
* physical memory.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_flush(&vps[i].mem[0], PAGE_SZ);
|
sys_cache_data_flush_range(&vps[i].mem[0], PAGE_SZ);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* pa[i] is a cached address which means that the cached
|
* pa[i] is a cached address which means that the cached
|
||||||
|
@ -193,7 +194,7 @@ ZTEST(adsp_mem, test_adsp_mem_map_array)
|
||||||
* above. So we need to invalidate the cache to reload
|
* above. So we need to invalidate the cache to reload
|
||||||
* the new value.
|
* the new value.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_inv(UINT_TO_POINTER(pa[i]), PAGE_SZ);
|
sys_cache_data_invd_range(UINT_TO_POINTER(pa[i]), PAGE_SZ);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Verify the originals reflect the change */
|
/* Verify the originals reflect the change */
|
||||||
|
@ -258,7 +259,7 @@ ZTEST(adsp_mem, test_adsp_mem_map_array)
|
||||||
* Make sure it is written back to the mapped
|
* Make sure it is written back to the mapped
|
||||||
* physical memory.
|
* physical memory.
|
||||||
*/
|
*/
|
||||||
z_xtensa_cache_flush(&vps3[i].mem[0], PAGE_SZ);
|
sys_cache_data_flush_range(&vps3[i].mem[0], PAGE_SZ);
|
||||||
|
|
||||||
zassert_equal(*(int *)pa[i], markers[i],
|
zassert_equal(*(int *)pa[i], markers[i],
|
||||||
"page copy modified original");
|
"page copy modified original");
|
||||||
|
|
|
@ -4,6 +4,8 @@
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <zephyr/kernel.h>
|
#include <zephyr/kernel.h>
|
||||||
#include <zephyr/ztest.h>
|
#include <zephyr/ztest.h>
|
||||||
|
#include <zephyr/cache.h>
|
||||||
|
|
||||||
#include <intel_adsp_ipc.h>
|
#include <intel_adsp_ipc.h>
|
||||||
#include "tests.h"
|
#include "tests.h"
|
||||||
|
|
||||||
|
@ -100,7 +102,7 @@ static void core_smoke(void *arg)
|
||||||
*utag = 42;
|
*utag = 42;
|
||||||
zassert_true(*ctag == 99, "uncached assignment unexpectedly affected cache");
|
zassert_true(*ctag == 99, "uncached assignment unexpectedly affected cache");
|
||||||
zassert_true(*utag == 42, "uncached memory affected unexpectedly");
|
zassert_true(*utag == 42, "uncached memory affected unexpectedly");
|
||||||
z_xtensa_cache_flush((void *)ctag, sizeof(*ctag));
|
sys_cache_data_flush_range((void *)ctag, sizeof(*ctag));
|
||||||
zassert_true(*utag == 99, "cache flush didn't work");
|
zassert_true(*utag == 99, "cache flush didn't work");
|
||||||
|
|
||||||
/* Calibrate clocks */
|
/* Calibrate clocks */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue