2015-09-18 22:36:57 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2015 Wind River Systems, Inc.
|
2022-10-05 10:08:29 +02:00
|
|
|
* Copyright (c) 2022 Carlo Caione <ccaione@baylibre.com>
|
2015-09-18 22:36:57 +02:00
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-09-18 22:36:57 +02:00
|
|
|
*/
|
|
|
|
|
2018-09-14 19:43:44 +02:00
|
|
|
#ifndef ZEPHYR_INCLUDE_CACHE_H_
|
|
|
|
#define ZEPHYR_INCLUDE_CACHE_H_
|
2015-09-18 22:36:57 +02:00
|
|
|
|
2022-10-05 10:08:29 +02:00
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* @brief cache API interface
|
|
|
|
*/
|
|
|
|
|
2022-05-06 10:30:42 +02:00
|
|
|
#include <zephyr/kernel.h>
|
2022-10-05 10:08:29 +02:00
|
|
|
#include <zephyr/arch/cpu.h>
|
2024-01-25 03:46:14 +01:00
|
|
|
#include <zephyr/debug/sparse.h>
|
2015-09-18 22:36:57 +02:00
|
|
|
|
2016-01-22 18:38:49 +01:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2022-10-05 10:08:29 +02:00
|
|
|
#if defined(CONFIG_EXTERNAL_CACHE)
|
2023-04-19 22:10:48 +02:00
|
|
|
#include <zephyr/drivers/cache.h>
|
2020-12-02 12:38:58 +01:00
|
|
|
|
2023-04-19 22:10:48 +02:00
|
|
|
#elif defined(CONFIG_ARCH_CACHE)
|
|
|
|
#include <zephyr/arch/cache.h>
|
2022-07-25 10:42:08 +02:00
|
|
|
|
2023-04-19 22:10:48 +02:00
|
|
|
#endif
|
2022-12-12 18:23:50 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @defgroup cache_interface Cache Interface
|
2023-05-31 12:06:20 +02:00
|
|
|
* @ingroup os_services
|
2022-12-12 18:23:50 +01:00
|
|
|
* @{
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @cond INTERNAL_HIDDEN
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define _CPU DT_PATH(cpus, cpu_0)
|
|
|
|
|
|
|
|
/** @endcond */
|
|
|
|
|
2022-10-05 10:08:29 +02:00
|
|
|
/**
|
|
|
|
* @brief Enable the d-cache
|
|
|
|
*
|
|
|
|
* Enable the data cache
|
|
|
|
*
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE void sys_cache_data_enable(void)
|
2022-10-05 10:08:29 +02:00
|
|
|
{
|
2023-01-10 16:44:55 +01:00
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
|
2022-10-05 10:08:29 +02:00
|
|
|
cache_data_enable();
|
|
|
|
#endif
|
|
|
|
}
|
2020-12-02 12:38:58 +01:00
|
|
|
|
2022-10-05 10:08:29 +02:00
|
|
|
/**
|
|
|
|
* @brief Disable the d-cache
|
|
|
|
*
|
|
|
|
* Disable the data cache
|
|
|
|
*
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE void sys_cache_data_disable(void)
|
2022-10-05 10:08:29 +02:00
|
|
|
{
|
2023-01-10 16:44:55 +01:00
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
|
2022-10-05 10:08:29 +02:00
|
|
|
cache_data_disable();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Enable the i-cache
|
|
|
|
*
|
|
|
|
* Enable the instruction cache
|
|
|
|
*
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE void sys_cache_instr_enable(void)
|
2022-10-05 10:08:29 +02:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
|
|
|
|
cache_instr_enable();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Disable the i-cache
|
|
|
|
*
|
|
|
|
* Disable the instruction cache
|
|
|
|
*
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE void sys_cache_instr_disable(void)
|
2022-10-05 10:08:29 +02:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
|
|
|
|
cache_instr_disable();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Flush the d-cache
|
|
|
|
*
|
|
|
|
* Flush the whole data cache.
|
|
|
|
*
|
|
|
|
* @retval 0 If succeeded.
|
|
|
|
* @retval -ENOTSUP If not supported.
|
|
|
|
* @retval -errno Negative errno for other failures.
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE int sys_cache_data_flush_all(void)
|
2020-12-02 12:38:58 +01:00
|
|
|
{
|
2022-07-25 10:42:08 +02:00
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
|
2022-10-05 10:08:29 +02:00
|
|
|
return cache_data_flush_all();
|
2021-04-28 10:38:27 +02:00
|
|
|
#endif
|
2022-10-05 10:08:29 +02:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2021-05-24 11:51:00 +02:00
|
|
|
|
2022-10-05 10:08:29 +02:00
|
|
|
/**
|
|
|
|
* @brief Flush the i-cache
|
|
|
|
*
|
|
|
|
* Flush the whole instruction cache.
|
|
|
|
*
|
|
|
|
* @retval 0 If succeeded.
|
|
|
|
* @retval -ENOTSUP If not supported.
|
|
|
|
* @retval -errno Negative errno for other failures.
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE int sys_cache_instr_flush_all(void)
|
2022-10-05 10:08:29 +02:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
|
|
|
|
return cache_instr_flush_all();
|
|
|
|
#endif
|
2020-12-02 12:38:58 +01:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2022-10-05 10:08:29 +02:00
|
|
|
/**
|
|
|
|
* @brief Invalidate the d-cache
|
|
|
|
*
|
|
|
|
* Invalidate the whole data cache.
|
|
|
|
*
|
|
|
|
* @retval 0 If succeeded.
|
|
|
|
* @retval -ENOTSUP If not supported.
|
|
|
|
* @retval -errno Negative errno for other failures.
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE int sys_cache_data_invd_all(void)
|
2020-12-02 12:38:58 +01:00
|
|
|
{
|
2022-07-25 10:42:08 +02:00
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
|
2022-10-05 10:08:29 +02:00
|
|
|
return cache_data_invd_all();
|
|
|
|
#endif
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Invalidate the i-cache
|
|
|
|
*
|
|
|
|
* Invalidate the whole instruction cache.
|
|
|
|
*
|
|
|
|
* @retval 0 If succeeded.
|
|
|
|
* @retval -ENOTSUP If not supported.
|
|
|
|
* @retval -errno Negative errno for other failures.
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE int sys_cache_instr_invd_all(void)
|
2022-10-05 10:08:29 +02:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
|
|
|
|
return cache_instr_invd_all();
|
|
|
|
#endif
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Flush and Invalidate the d-cache
|
|
|
|
*
|
|
|
|
* Flush and Invalidate the whole data cache.
|
|
|
|
*
|
|
|
|
* @retval 0 If succeeded.
|
|
|
|
* @retval -ENOTSUP If not supported.
|
|
|
|
* @retval -errno Negative errno for other failures.
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE int sys_cache_data_flush_and_invd_all(void)
|
2022-10-05 10:08:29 +02:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
|
|
|
|
return cache_data_flush_and_invd_all();
|
|
|
|
#endif
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Flush and Invalidate the i-cache
|
|
|
|
*
|
|
|
|
* Flush and Invalidate the whole instruction cache.
|
|
|
|
*
|
|
|
|
* @retval 0 If succeeded.
|
|
|
|
* @retval -ENOTSUP If not supported.
|
|
|
|
* @retval -errno Negative errno for other failures.
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE int sys_cache_instr_flush_and_invd_all(void)
|
2022-10-05 10:08:29 +02:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
|
|
|
|
return cache_instr_flush_and_invd_all();
|
|
|
|
#endif
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Flush an address range in the d-cache
|
|
|
|
*
|
|
|
|
* Flush the specified address range of the data cache.
|
|
|
|
*
|
2023-06-21 13:35:13 +02:00
|
|
|
* @note the cache operations act on cache line. When multiple data structures
|
|
|
|
* share the same cache line being flushed, all the portions of the
|
|
|
|
* data structures sharing the same line will be flushed. This is usually
|
|
|
|
* not a problem because writing back is a non-destructive process that
|
|
|
|
* could be triggered by hardware at any time, so having an aligned
|
|
|
|
* @p addr or a padded @p size is not strictly necessary.
|
|
|
|
*
|
2022-10-05 10:08:29 +02:00
|
|
|
* @param addr Starting address to flush.
|
|
|
|
* @param size Range size.
|
|
|
|
*
|
|
|
|
* @retval 0 If succeeded.
|
|
|
|
* @retval -ENOTSUP If not supported.
|
|
|
|
* @retval -errno Negative errno for other failures.
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
__syscall_always_inline int sys_cache_data_flush_range(void *addr, size_t size);
|
|
|
|
|
|
|
|
static ALWAYS_INLINE int z_impl_sys_cache_data_flush_range(void *addr, size_t size)
|
2022-10-05 10:08:29 +02:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
|
|
|
|
return cache_data_flush_range(addr, size);
|
2021-04-28 10:38:27 +02:00
|
|
|
#endif
|
2021-05-24 11:51:00 +02:00
|
|
|
ARG_UNUSED(addr);
|
|
|
|
ARG_UNUSED(size);
|
|
|
|
|
2020-12-02 12:38:58 +01:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2022-10-05 10:08:29 +02:00
|
|
|
/**
|
|
|
|
* @brief Flush an address range in the i-cache
|
|
|
|
*
|
|
|
|
* Flush the specified address range of the instruction cache.
|
|
|
|
*
|
2023-06-21 13:35:13 +02:00
|
|
|
* @note the cache operations act on cache line. When multiple data structures
|
|
|
|
* share the same cache line being flushed, all the portions of the
|
|
|
|
* data structures sharing the same line will be flushed. This is usually
|
|
|
|
* not a problem because writing back is a non-destructive process that
|
|
|
|
* could be triggered by hardware at any time, so having an aligned
|
|
|
|
* @p addr or a padded @p size is not strictly necessary.
|
|
|
|
*
|
2022-10-05 10:08:29 +02:00
|
|
|
* @param addr Starting address to flush.
|
|
|
|
* @param size Range size.
|
|
|
|
*
|
|
|
|
* @retval 0 If succeeded.
|
|
|
|
* @retval -ENOTSUP If not supported.
|
|
|
|
* @retval -errno Negative errno for other failures.
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE int sys_cache_instr_flush_range(void *addr, size_t size)
|
2020-12-02 12:38:58 +01:00
|
|
|
{
|
2022-07-25 10:42:08 +02:00
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
|
2022-10-05 10:08:29 +02:00
|
|
|
return cache_instr_flush_range(addr, size);
|
|
|
|
#endif
|
|
|
|
ARG_UNUSED(addr);
|
|
|
|
ARG_UNUSED(size);
|
|
|
|
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Invalidate an address range in the d-cache
|
|
|
|
*
|
|
|
|
* Invalidate the specified address range of the data cache.
|
|
|
|
*
|
2023-06-21 13:35:13 +02:00
|
|
|
* @note the cache operations act on cache line. When multiple data structures
|
|
|
|
* share the same cache line being invalidated, all the portions of the
|
|
|
|
* non-read-only data structures sharing the same line will be
|
|
|
|
* invalidated as well. This is a destructive process that could lead to
|
|
|
|
* data loss and/or corruption. When @p addr is not aligned to the cache
|
|
|
|
* line and/or @p size is not a multiple of the cache line size the
|
|
|
|
* behaviour is undefined.
|
|
|
|
*
|
2022-10-05 10:08:29 +02:00
|
|
|
* @param addr Starting address to invalidate.
|
|
|
|
* @param size Range size.
|
|
|
|
*
|
|
|
|
* @retval 0 If succeeded.
|
|
|
|
* @retval -ENOTSUP If not supported.
|
|
|
|
* @retval -errno Negative errno for other failures.
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
__syscall_always_inline int sys_cache_data_invd_range(void *addr, size_t size);
|
|
|
|
|
|
|
|
static ALWAYS_INLINE int z_impl_sys_cache_data_invd_range(void *addr, size_t size)
|
2022-10-05 10:08:29 +02:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
|
|
|
|
return cache_data_invd_range(addr, size);
|
2021-04-28 10:38:27 +02:00
|
|
|
#endif
|
2022-10-05 10:08:29 +02:00
|
|
|
ARG_UNUSED(addr);
|
|
|
|
ARG_UNUSED(size);
|
2021-05-24 11:51:00 +02:00
|
|
|
|
2020-12-02 12:38:58 +01:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2020-07-16 21:32:24 +02:00
|
|
|
|
2022-10-05 10:08:29 +02:00
|
|
|
/**
|
|
|
|
* @brief Invalidate an address range in the i-cache
|
|
|
|
*
|
|
|
|
* Invalidate the specified address range of the instruction cache.
|
|
|
|
*
|
2023-06-21 13:35:13 +02:00
|
|
|
* @note the cache operations act on cache line. When multiple data structures
|
|
|
|
* share the same cache line being invalidated, all the portions of the
|
|
|
|
* non-read-only data structures sharing the same line will be
|
|
|
|
* invalidated as well. This is a destructive process that could lead to
|
|
|
|
* data loss and/or corruption. When @p addr is not aligned to the cache
|
|
|
|
* line and/or @p size is not a multiple of the cache line size the
|
|
|
|
* behaviour is undefined.
|
|
|
|
*
|
2022-10-05 10:08:29 +02:00
|
|
|
* @param addr Starting address to invalidate.
|
|
|
|
* @param size Range size.
|
|
|
|
*
|
|
|
|
* @retval 0 If succeeded.
|
|
|
|
* @retval -ENOTSUP If not supported.
|
|
|
|
* @retval -errno Negative errno for other failures.
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE int sys_cache_instr_invd_range(void *addr, size_t size)
|
2020-04-28 22:14:54 +02:00
|
|
|
{
|
2022-07-25 10:42:08 +02:00
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
|
2022-10-05 10:08:29 +02:00
|
|
|
return cache_instr_invd_range(addr, size);
|
2021-04-28 10:38:27 +02:00
|
|
|
#endif
|
2021-05-24 11:51:00 +02:00
|
|
|
ARG_UNUSED(addr);
|
|
|
|
ARG_UNUSED(size);
|
|
|
|
|
2020-12-02 12:38:58 +01:00
|
|
|
return -ENOTSUP;
|
2020-04-28 22:14:54 +02:00
|
|
|
}
|
2015-09-18 22:36:57 +02:00
|
|
|
|
2022-10-05 10:08:29 +02:00
|
|
|
/**
|
|
|
|
* @brief Flush and Invalidate an address range in the d-cache
|
|
|
|
*
|
|
|
|
* Flush and Invalidate the specified address range of the data cache.
|
|
|
|
*
|
2023-06-21 13:35:13 +02:00
|
|
|
* @note the cache operations act on cache line. When multiple data structures
|
|
|
|
* share the same cache line being flushed, all the portions of the
|
|
|
|
* data structures sharing the same line will be flushed before being
|
|
|
|
* invalidated. This is usually not a problem because writing back is a
|
|
|
|
* non-destructive process that could be triggered by hardware at any
|
|
|
|
* time, so having an aligned @p addr or a padded @p size is not strictly
|
|
|
|
* necessary.
|
|
|
|
*
|
2022-10-05 10:08:29 +02:00
|
|
|
* @param addr Starting address to flush and invalidate.
|
|
|
|
* @param size Range size.
|
|
|
|
*
|
|
|
|
* @retval 0 If succeeded.
|
|
|
|
* @retval -ENOTSUP If not supported.
|
|
|
|
* @retval -errno Negative errno for other failures.
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
__syscall_always_inline int sys_cache_data_flush_and_invd_range(void *addr, size_t size);
|
|
|
|
|
|
|
|
static ALWAYS_INLINE int z_impl_sys_cache_data_flush_and_invd_range(void *addr, size_t size)
|
2020-12-02 21:42:42 +01:00
|
|
|
{
|
2022-10-05 10:08:29 +02:00
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
|
|
|
|
return cache_data_flush_and_invd_range(addr, size);
|
|
|
|
#endif
|
|
|
|
ARG_UNUSED(addr);
|
|
|
|
ARG_UNUSED(size);
|
|
|
|
|
|
|
|
return -ENOTSUP;
|
2020-12-02 21:42:42 +01:00
|
|
|
}
|
2022-10-05 10:08:29 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Flush and Invalidate an address range in the i-cache
|
|
|
|
*
|
|
|
|
* Flush and Invalidate the specified address range of the instruction cache.
|
|
|
|
*
|
2023-06-21 13:35:13 +02:00
|
|
|
* @note the cache operations act on cache line. When multiple data structures
|
|
|
|
* share the same cache line being flushed, all the portions of the
|
|
|
|
* data structures sharing the same line will be flushed before being
|
|
|
|
* invalidated. This is usually not a problem because writing back is a
|
|
|
|
* non-destructive process that could be triggered by hardware at any
|
|
|
|
* time, so having an aligned @p addr or a padded @p size is not strictly
|
|
|
|
* necessary.
|
|
|
|
*
|
2022-10-05 10:08:29 +02:00
|
|
|
* @param addr Starting address to flush and invalidate.
|
|
|
|
* @param size Range size.
|
|
|
|
*
|
|
|
|
* @retval 0 If succeeded.
|
|
|
|
* @retval -ENOTSUP If not supported.
|
|
|
|
* @retval -errno Negative errno for other failures.
|
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE int sys_cache_instr_flush_and_invd_range(void *addr, size_t size)
|
2022-10-05 10:08:29 +02:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
|
|
|
|
return cache_instr_flush_and_invd_range(addr, size);
|
2020-12-02 21:42:42 +01:00
|
|
|
#endif
|
2022-10-05 10:08:29 +02:00
|
|
|
ARG_UNUSED(addr);
|
|
|
|
ARG_UNUSED(size);
|
|
|
|
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2020-12-02 21:42:42 +01:00
|
|
|
|
2020-04-28 22:14:54 +02:00
|
|
|
/**
|
|
|
|
*
|
2022-10-05 10:08:29 +02:00
|
|
|
* @brief Get the the d-cache line size.
|
|
|
|
*
|
|
|
|
* The API is provided to get the data cache line.
|
2020-04-28 22:14:54 +02:00
|
|
|
*
|
2022-10-05 10:08:29 +02:00
|
|
|
* The cache line size is calculated (in order of priority):
|
2020-04-28 22:14:54 +02:00
|
|
|
*
|
2022-12-12 18:23:50 +01:00
|
|
|
* - At run-time when @kconfig{CONFIG_DCACHE_LINE_SIZE_DETECT} is set.
|
|
|
|
* - At compile time using the value set in @kconfig{CONFIG_DCACHE_LINE_SIZE}.
|
2022-10-05 10:08:29 +02:00
|
|
|
* - At compile time using the `d-cache-line-size` CPU0 property of the DT.
|
|
|
|
* - 0 otherwise
|
|
|
|
*
|
|
|
|
* @retval size Size of the d-cache line.
|
|
|
|
* @retval 0 If the d-cache is not enabled.
|
2020-04-28 22:14:54 +02:00
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE size_t sys_cache_data_line_size_get(void)
|
2020-04-28 22:14:54 +02:00
|
|
|
{
|
2020-12-02 12:38:58 +01:00
|
|
|
#ifdef CONFIG_DCACHE_LINE_SIZE_DETECT
|
2021-05-04 16:26:10 +02:00
|
|
|
return cache_data_line_size_get();
|
2020-12-02 12:38:58 +01:00
|
|
|
#elif (CONFIG_DCACHE_LINE_SIZE != 0)
|
|
|
|
return CONFIG_DCACHE_LINE_SIZE;
|
2015-10-30 22:04:00 +01:00
|
|
|
#else
|
2022-12-12 18:23:50 +01:00
|
|
|
return DT_PROP_OR(_CPU, d_cache_line_size, 0);
|
2020-12-02 12:38:58 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2022-10-05 10:08:29 +02:00
|
|
|
/**
|
2020-12-02 12:38:58 +01:00
|
|
|
*
|
2022-10-05 10:08:29 +02:00
|
|
|
* @brief Get the the i-cache line size.
|
2020-12-02 12:38:58 +01:00
|
|
|
*
|
2022-10-05 10:08:29 +02:00
|
|
|
* The API is provided to get the instruction cache line.
|
2020-12-02 12:38:58 +01:00
|
|
|
*
|
2022-10-05 10:08:29 +02:00
|
|
|
* The cache line size is calculated (in order of priority):
|
|
|
|
*
|
2022-12-12 18:23:50 +01:00
|
|
|
* - At run-time when @kconfig{CONFIG_ICACHE_LINE_SIZE_DETECT} is set.
|
|
|
|
* - At compile time using the value set in @kconfig{CONFIG_ICACHE_LINE_SIZE}.
|
2022-10-05 10:08:29 +02:00
|
|
|
* - At compile time using the `i-cache-line-size` CPU0 property of the DT.
|
|
|
|
* - 0 otherwise
|
|
|
|
*
|
|
|
|
* @retval size Size of the d-cache line.
|
|
|
|
* @retval 0 If the d-cache is not enabled.
|
2020-12-02 12:38:58 +01:00
|
|
|
*/
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE size_t sys_cache_instr_line_size_get(void)
|
2020-12-02 12:38:58 +01:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_ICACHE_LINE_SIZE_DETECT
|
2021-05-04 16:26:10 +02:00
|
|
|
return cache_instr_line_size_get();
|
2020-12-02 12:38:58 +01:00
|
|
|
#elif (CONFIG_ICACHE_LINE_SIZE != 0)
|
|
|
|
return CONFIG_ICACHE_LINE_SIZE;
|
2020-04-28 22:14:54 +02:00
|
|
|
#else
|
2022-12-12 18:23:50 +01:00
|
|
|
return DT_PROP_OR(_CPU, i_cache_line_size, 0);
|
2020-12-02 12:38:58 +01:00
|
|
|
#endif
|
2020-04-28 22:14:54 +02:00
|
|
|
}
|
2015-09-18 22:36:57 +02:00
|
|
|
|
2024-01-25 03:46:14 +01:00
|
|
|
/**
|
|
|
|
* @brief Test if a pointer is in cached region.
|
|
|
|
*
|
|
|
|
* Some hardware may map the same physical memory twice
|
|
|
|
* so that it can be seen in both (incoherent) cached mappings
|
|
|
|
* and a coherent "shared" area. This tests if a particular
|
|
|
|
* pointer is within the cached, coherent area.
|
|
|
|
*
|
|
|
|
* @param ptr Pointer
|
|
|
|
*
|
|
|
|
* @retval True if pointer is in cached region.
|
|
|
|
* @retval False if pointer is not in cached region.
|
|
|
|
*/
|
|
|
|
static ALWAYS_INLINE bool sys_cache_is_ptr_cached(void *ptr)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_CACHE_DOUBLEMAP)
|
|
|
|
return cache_is_ptr_cached(ptr);
|
|
|
|
#else
|
|
|
|
ARG_UNUSED(ptr);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Test if a pointer is in un-cached region.
|
|
|
|
*
|
|
|
|
* Some hardware may map the same physical memory twice
|
|
|
|
* so that it can be seen in both (incoherent) cached mappings
|
|
|
|
* and a coherent "shared" area. This tests if a particular
|
|
|
|
* pointer is within the un-cached, incoherent area.
|
|
|
|
*
|
|
|
|
* @param ptr Pointer
|
|
|
|
*
|
|
|
|
* @retval True if pointer is not in cached region.
|
|
|
|
* @retval False if pointer is in cached region.
|
|
|
|
*/
|
|
|
|
static ALWAYS_INLINE bool sys_cache_is_ptr_uncached(void *ptr)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_CACHE_DOUBLEMAP)
|
|
|
|
return cache_is_ptr_uncached(ptr);
|
|
|
|
#else
|
|
|
|
ARG_UNUSED(ptr);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Return cached pointer to a RAM address
|
|
|
|
*
|
|
|
|
* This function takes a pointer to any addressable object (either in
|
|
|
|
* cacheable memory or not) and returns a pointer that can be used to
|
|
|
|
* refer to the same memory through the L1 data cache. Data read
|
|
|
|
* through the resulting pointer will reflect locally cached values on
|
|
|
|
* the current CPU if they exist, and writes will go first into the
|
|
|
|
* cache and be written back later.
|
|
|
|
*
|
|
|
|
* @note This API returns the same pointer if CONFIG_CACHE_DOUBLEMAP is not
|
|
|
|
* enabled.
|
|
|
|
*
|
|
|
|
* @see arch_uncached_ptr()
|
|
|
|
*
|
|
|
|
* @param ptr A pointer to a valid C object
|
|
|
|
* @return A pointer to the same object via the L1 dcache
|
|
|
|
*/
|
|
|
|
static ALWAYS_INLINE void __sparse_cache *sys_cache_cached_ptr_get(void *ptr)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_CACHE_DOUBLEMAP)
|
|
|
|
return cache_cached_ptr(ptr);
|
|
|
|
#else
|
|
|
|
return (__sparse_force void __sparse_cache *)ptr;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Return uncached pointer to a RAM address
|
|
|
|
*
|
|
|
|
* This function takes a pointer to any addressable object (either in
|
|
|
|
* cacheable memory or not) and returns a pointer that can be used to
|
|
|
|
* refer to the same memory while bypassing the L1 data cache. Data
|
|
|
|
* in the L1 cache will not be inspected nor modified by the access.
|
|
|
|
*
|
|
|
|
* @note This API returns the same pointer if CONFIG_CACHE_DOUBLEMAP is not
|
|
|
|
* enabled.
|
|
|
|
*
|
|
|
|
* @see arch_cached_ptr()
|
|
|
|
*
|
|
|
|
* @param ptr A pointer to a valid C object
|
|
|
|
* @return A pointer to the same object bypassing the L1 dcache
|
|
|
|
*/
|
|
|
|
static ALWAYS_INLINE void *sys_cache_uncached_ptr_get(void __sparse_cache *ptr)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_CACHE_DOUBLEMAP)
|
|
|
|
return cache_uncached_ptr(ptr);
|
|
|
|
#else
|
|
|
|
return (__sparse_force void *)ptr;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-10-05 10:08:29 +02:00
|
|
|
#ifdef CONFIG_LIBMETAL
|
2023-04-19 22:10:48 +02:00
|
|
|
static ALWAYS_INLINE void sys_cache_flush(void *addr, size_t size)
|
2022-10-05 10:08:29 +02:00
|
|
|
{
|
|
|
|
sys_cache_data_flush_range(addr, size);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2024-01-24 10:35:04 +01:00
|
|
|
#include <zephyr/syscalls/cache.h>
|
2016-01-22 18:38:49 +01:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-12-12 18:23:50 +01:00
|
|
|
/**
|
|
|
|
* @}
|
|
|
|
*/
|
|
|
|
|
2018-09-14 19:43:44 +02:00
|
|
|
#endif /* ZEPHYR_INCLUDE_CACHE_H_ */
|