diff --git a/CODEOWNERS b/CODEOWNERS index c9060dfbd7f..9225e294f19 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -303,6 +303,7 @@ /drivers/memc/ @gmarull /drivers/misc/ @tejlmand /drivers/misc/ft8xx/ @hubertmis +/drivers/mm/ @dcpleung /drivers/modem/hl7800.c @LairdCP/zephyr /drivers/modem/simcom-sim7080.c @lgehreke /drivers/modem/simcom-sim7080.h @lgehreke diff --git a/drivers/CMakeLists.txt b/drivers/CMakeLists.txt index 528d45b70c9..1387f599695 100644 --- a/drivers/CMakeLists.txt +++ b/drivers/CMakeLists.txt @@ -64,3 +64,4 @@ add_subdirectory_ifdef(CONFIG_FPGA fpga) add_subdirectory_ifdef(CONFIG_PINCTRL pinctrl) add_subdirectory_ifdef(CONFIG_MBOX mbox) add_subdirectory_ifdef(CONFIG_BOARD_XENVM xen) +add_subdirectory_ifdef(CONFIG_MM_DRV mm) diff --git a/drivers/Kconfig b/drivers/Kconfig index 8390fcf0806..29696003497 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -127,4 +127,6 @@ source "drivers/pinctrl/Kconfig" source "drivers/mbox/Kconfig" +source "drivers/mm/Kconfig" + endmenu diff --git a/drivers/mm/CMakeLists.txt b/drivers/mm/CMakeLists.txt new file mode 100644 index 00000000000..143c5be5cfc --- /dev/null +++ b/drivers/mm/CMakeLists.txt @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 + +zephyr_library() + +zephyr_library_sources(mm_drv_common.c) diff --git a/drivers/mm/Kconfig b/drivers/mm/Kconfig new file mode 100644 index 00000000000..3b9b5c458ce --- /dev/null +++ b/drivers/mm/Kconfig @@ -0,0 +1,20 @@ +# Copyright (c) 2021 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +menuconfig MM_DRV + bool "Memory Management Drivers [EXPERIMENTAL]" + select EXPERIMENTAL + select KERNEL_VM_SUPPORT + help + Include Memory Management drivers in system config + +if MM_DRV + +config MM_DRV_PAGE_SIZE + hex "Memory Page Size" + default 0x1000 + help + Size of memory pages. + +endif # MM_DRV diff --git a/drivers/mm/mm_drv_common.c b/drivers/mm/mm_drv_common.c new file mode 100644 index 00000000000..2913a9dae43 --- /dev/null +++ b/drivers/mm/mm_drv_common.c @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2021 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief Common Memory Management Driver Code + * + * This file provides common implementation of memory management driver + * functions, for example, sys_mm_drv_map_region() can use + * sys_mm_drv_map_page() to map page by page for the whole region. + * This avoids duplicate implementations of same functionality in + * different drivers. The implementations here are marked as + * weak functions so they can be overridden by the driver. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "mm_drv_common.h" + +struct k_spinlock sys_mm_drv_common_lock; + +bool sys_mm_drv_is_addr_array_aligned(uintptr_t *addr, size_t cnt) +{ + size_t idx; + bool ret = true; + + for (idx = 0; idx < cnt; idx++) { + if (!sys_mm_drv_is_addr_aligned(addr[idx])) { + ret = false; + break; + } + } + + return ret; +} + +bool sys_mm_drv_is_virt_region_mapped(void *virt, size_t size) +{ + size_t offset; + bool ret = true; + + for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { + uint8_t *va = (uint8_t *)virt + offset; + + if (sys_mm_drv_page_phys_get(va, NULL) != 0) { + ret = false; + break; + } + } + + return ret; +} + +bool sys_mm_drv_is_virt_region_unmapped(void *virt, size_t size) +{ + size_t offset; + bool ret = true; + + for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { + uint8_t *va = (uint8_t *)virt + offset; + + if (sys_mm_drv_page_phys_get(va, NULL) != -EFAULT) { + ret = false; + break; + } + } + + return ret; +} + +int sys_mm_drv_simple_map_region(void *virt, uintptr_t phys, + size_t size, uint32_t flags) +{ + k_spinlock_key_t key; + int ret = 0; + size_t offset; + + CHECKIF(!sys_mm_drv_is_addr_aligned(phys) || + !sys_mm_drv_is_virt_addr_aligned(virt) || + !sys_mm_drv_is_size_aligned(size)) { + ret = -EINVAL; + goto out; + } + + key = k_spin_lock(&sys_mm_drv_common_lock); + + for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { + uint8_t *va = (uint8_t *)virt + offset; + uintptr_t pa = phys + offset; + + int ret2 = sys_mm_drv_map_page(va, pa, flags); + + if (ret2 != 0) { + __ASSERT(false, "cannot map 0x%lx to %p\n", pa, va); + + ret = ret2; + } + } + + k_spin_unlock(&sys_mm_drv_common_lock, key); + +out: + return ret; +} + +__weak FUNC_ALIAS(sys_mm_drv_simple_map_region, + sys_mm_drv_map_region, int); + +int sys_mm_drv_simple_map_array(void *virt, uintptr_t *phys, + size_t cnt, uint32_t flags) +{ + k_spinlock_key_t key; + int ret = 0; + size_t idx, offset; + + CHECKIF(!sys_mm_drv_is_addr_array_aligned(phys, cnt) || + !sys_mm_drv_is_virt_addr_aligned(virt)) { + ret = -EINVAL; + goto out; + } + + key = k_spin_lock(&sys_mm_drv_common_lock); + + offset = 0; + idx = 0; + while (idx < cnt) { + uint8_t *va = (uint8_t *)virt + offset; + + int ret2 = sys_mm_drv_map_page(va, phys[idx], flags); + + if (ret2 != 0) { + __ASSERT(false, "cannot map 0x%lx to %p\n", phys[idx], va); + + ret = ret2; + } + + offset += CONFIG_MM_DRV_PAGE_SIZE; + idx++; + } + + k_spin_unlock(&sys_mm_drv_common_lock, key); + +out: + return ret; +} + +__weak FUNC_ALIAS(sys_mm_drv_simple_map_array, sys_mm_drv_map_array, int); + +int sys_mm_drv_simple_unmap_region(void *virt, size_t size) +{ + k_spinlock_key_t key; + int ret = 0; + size_t offset; + + CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt) || + !sys_mm_drv_is_size_aligned(size)) { + ret = -EINVAL; + goto out; + } + + key = k_spin_lock(&sys_mm_drv_common_lock); + + for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { + uint8_t *va = (uint8_t *)virt + offset; + + int ret2 = sys_mm_drv_unmap_page(va); + + if (ret2 != 0) { + __ASSERT(false, "cannot unmap %p\n", va); + + ret = ret2; + } + } + + k_spin_unlock(&sys_mm_drv_common_lock, key); + +out: + return ret; +} + +__weak FUNC_ALIAS(sys_mm_drv_simple_unmap_region, + sys_mm_drv_unmap_region, int); + +int sys_mm_drv_simple_remap_region(void *virt_old, size_t size, + void *virt_new) +{ + k_spinlock_key_t key; + size_t offset; + int ret = 0; + + CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt_old) || + !sys_mm_drv_is_virt_addr_aligned(virt_new) || + !sys_mm_drv_is_size_aligned(size)) { + ret = -EINVAL; + goto out; + } + + if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) && + (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) { + ret = -EINVAL; /* overlaps */ + goto out; + } + + key = k_spin_lock(&sys_mm_drv_common_lock); + + if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) || + !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) { + ret = -EINVAL; + goto unlock_out; + } + + for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { + uint8_t *va_old = (uint8_t *)virt_old + offset; + uint8_t *va_new = (uint8_t *)virt_new + offset; + uintptr_t pa; + uint32_t flags; + int ret2; + bool to_map; + + /* + * va_old is mapped as checked above, so no need + * to check for return value here. + */ + (void)sys_mm_drv_page_phys_get(va_old, &pa); + + to_map = true; + ret2 = sys_mm_drv_page_flag_get(va_old, &flags); + if (ret2 != 0) { + __ASSERT(false, "cannot query page %p\n", va_old); + + ret = ret2; + to_map = false; + } + + ret2 = sys_mm_drv_unmap_page(va_old); + if (ret2 != 0) { + __ASSERT(false, "cannot unmap %p\n", va_old); + + ret = ret2; + } + + if (!to_map) { + /* + * Cannot retrieve flags of mapped virtual memory. + * Skip mapping this page as we don't want to map + * with unknown random flags. + */ + continue; + } + + ret2 = sys_mm_drv_map_page(va_new, pa, flags); + if (ret2 != 0) { + __ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new); + + ret = ret2; + } + } + +unlock_out: + k_spin_unlock(&sys_mm_drv_common_lock, key); + +out: + return ret; +} + +__weak FUNC_ALIAS(sys_mm_drv_simple_remap_region, + sys_mm_drv_remap_region, int); + +int sys_mm_drv_simple_move_region(void *virt_old, size_t size, + void *virt_new, uintptr_t phys_new) +{ + k_spinlock_key_t key; + size_t offset; + int ret = 0; + + CHECKIF(!sys_mm_drv_is_addr_aligned(phys_new) || + !sys_mm_drv_is_virt_addr_aligned(virt_old) || + !sys_mm_drv_is_virt_addr_aligned(virt_new) || + !sys_mm_drv_is_size_aligned(size)) { + ret = -EINVAL; + goto out; + } + + if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) && + (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) { + ret = -EINVAL; /* overlaps */ + goto out; + } + + key = k_spin_lock(&sys_mm_drv_common_lock); + + if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) || + !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) { + ret = -EINVAL; + goto unlock_out; + } + + for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { + uint8_t *va_old = (uint8_t *)virt_old + offset; + uint8_t *va_new = (uint8_t *)virt_new + offset; + uintptr_t pa = phys_new + offset; + uint32_t flags; + int ret2; + + ret2 = sys_mm_drv_page_flag_get(va_old, &flags); + if (ret2 != 0) { + __ASSERT(false, "cannot query page %p\n", va_old); + + ret = ret2; + } else { + /* + * Only map the new page when we can retrieve + * flags of the old mapped page as We don't + * want to map with unknown random flags. + */ + ret2 = sys_mm_drv_map_page(va_new, pa, flags); + if (ret2 != 0) { + __ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new); + + ret = ret2; + } else { + (void)memcpy(va_new, va_old, + CONFIG_MM_DRV_PAGE_SIZE); + } + } + + ret2 = sys_mm_drv_unmap_page(va_old); + if (ret2 != 0) { + __ASSERT(false, "cannot unmap %p\n", va_old); + + ret = ret2; + } + } + +unlock_out: + k_spin_unlock(&sys_mm_drv_common_lock, key); + +out: + return ret; +} + +__weak FUNC_ALIAS(sys_mm_drv_simple_move_region, + sys_mm_drv_move_region, int); + +int sys_mm_drv_simple_move_array(void *virt_old, size_t size, + void *virt_new, + uintptr_t *phys_new, size_t phys_cnt) +{ + k_spinlock_key_t key; + size_t idx, offset; + int ret = 0; + + CHECKIF(!sys_mm_drv_is_addr_array_aligned(phys_new, phys_cnt) || + !sys_mm_drv_is_virt_addr_aligned(virt_old) || + !sys_mm_drv_is_virt_addr_aligned(virt_new) || + !sys_mm_drv_is_size_aligned(size)) { + ret = -EINVAL; + goto out; + } + + if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) && + (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) { + ret = -EINVAL; /* overlaps */ + goto out; + } + + key = k_spin_lock(&sys_mm_drv_common_lock); + + if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) || + !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) { + ret = -EINVAL; + goto unlock_out; + } + + offset = 0; + idx = 0; + while (idx < phys_cnt) { + uint8_t *va_old = (uint8_t *)virt_old + offset; + uint8_t *va_new = (uint8_t *)virt_new + offset; + uint32_t flags; + int ret2; + + ret2 = sys_mm_drv_page_flag_get(va_old, &flags); + if (ret2 != 0) { + __ASSERT(false, "cannot query page %p\n", va_old); + + ret = ret2; + } else { + /* + * Only map the new page when we can retrieve + * flags of the old mapped page as We don't + * want to map with unknown random flags. + */ + ret2 = sys_mm_drv_map_page(va_new, phys_new[idx], flags); + if (ret2 != 0) { + __ASSERT(false, "cannot map 0x%lx to %p\n", + phys_new[idx], va_new); + + ret = ret2; + } else { + (void)memcpy(va_new, va_old, + CONFIG_MM_DRV_PAGE_SIZE); + } + } + + ret2 = sys_mm_drv_unmap_page(va_old); + + if (ret2 != 0) { + __ASSERT(false, "cannot unmap %p\n", va_old); + + ret = ret2; + } + + offset += CONFIG_MM_DRV_PAGE_SIZE; + idx++; + } + +unlock_out: + k_spin_unlock(&sys_mm_drv_common_lock, key); + +out: + return ret; +} + +__weak FUNC_ALIAS(sys_mm_drv_simple_move_array, + sys_mm_drv_move_array, int); diff --git a/drivers/mm/mm_drv_common.h b/drivers/mm/mm_drv_common.h new file mode 100644 index 00000000000..beee192e6dc --- /dev/null +++ b/drivers/mm/mm_drv_common.h @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2021 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_DRIVERS_SYSTEM_MM_DRV_COMMON_H_ +#define ZEPHYR_DRIVERS_SYSTEM_MM_DRV_COMMON_H_ + +#include +#include + +#include + +extern struct k_spinlock sys_mm_drv_common_lock; + +/** + * @brief Get the flags of mapped virtual address. + * + * The function queries the translation tables to find the flags of + * a mapped virtual address. This is used internally for remapping. + * + * Behavior when providing unaligned address is undefined, this + * is assumed to be page aligned. + * + * @param virt Page-aligned virtual address + * @param[out] flags flags of mapped virtual address + * + * @retval 0 if mapping is found and valid + * @retval -EINVAL if invalid arguments are provided + * @retval -EFAULT if virtual address is not mapped + */ +int sys_mm_drv_page_flag_get(void *virt, uint32_t *flags); + +/** + * @brief Test if address is page-aligned + * + * @param addr address to be tested + * + * @retval true if page-aligned + * @retval false if not page-aligned + */ +static inline bool sys_mm_drv_is_addr_aligned(uintptr_t addr) +{ + return ((addr & (CONFIG_MM_DRV_PAGE_SIZE - 1)) == 0U); +} + +/** + * @brief Test if address is page-aligned + * + * @param addr address to be tested + * + * @retval true if page-aligned + * @retval false if not page-aligned + */ +static inline bool sys_mm_drv_is_virt_addr_aligned(void *virt) +{ + return sys_mm_drv_is_addr_aligned(POINTER_TO_UINT(virt)); +} + +/** + * @brief Test if size is page-aligned + * + * @param addr size to be tested + * + * @retval true if page-aligned + * @retval false if not page-aligned + */ +static inline bool sys_mm_drv_is_size_aligned(size_t size) +{ + if ((size & (CONFIG_MM_DRV_PAGE_SIZE - 1)) == 0U) { + return true; + } else { + return false; + } +} + +/** + * @brief Test if all physical addresses in array are page-aligned + * + * @param addr Array of physical addresses + * @param cnt Number of elements in the array + * + * @retval true if all are page-aligned + * @retval false if at least one is not page-aligned + */ +bool sys_mm_drv_is_addr_array_aligned(uintptr_t *addr, size_t cnt); + +/** + * @brief Test if the virtual memory region is mapped + * + * @param virt Page-aligned base virtual address + * @param size Size of the virtual memory region + * + * @retval true if all pages in the region are mapped + * @retval false if at least one page is not mapped + */ +bool sys_mm_drv_is_virt_region_mapped(void *virt, size_t size); + +/** + * @brief Test if the virtual memory region is unmapped + * + * @param virt Page-aligned base virtual address + * @param size Size of the virtual memory region + * + * @retval true if all pages in the region are unmapped + * @retval false if at least one page is mapped + */ +bool sys_mm_drv_is_virt_region_unmapped(void *virt, size_t size); + +/** + * @brief Simple implementation of sys_mm_drv_map_region() + * + * This provides a simple implementation for sys_mm_drv_map_region() + * which is marked as a weak alias to sys_mm_drv_map_region(). + * + * Drivers do not have to implement their own sys_mm_drv_map_region() + * if this works for them. Or they can override sys_mm_drv_map_region() + * and call sys_mm_drv_simple_map_region() with some pre-processing done. + * Or the drivers can implement their own sys_mm_drv_map_region(), then + * this function will not be used. + * + * @see sys_mm_drv_map_region + * + * @param virt Page-aligned destination virtual address to map + * @param phys Page-aligned source physical address to map + * @param size Page-aligned size of the mapped memory region in bytes + * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros + * + * @retval 0 if successful + * @retval -EINVAL if invalid arguments are provided + * @retval -EFAULT if any virtual addresses have already been mapped + */ +int sys_mm_drv_simple_map_region(void *virt, uintptr_t phys, + size_t size, uint32_t flags); + +/** + * @brief Simple implementation of sys_mm_drv_map_array() + * + * This provides a simple implementation for sys_mm_drv_map_array() + * which is marked as a weak alias to sys_mm_drv_map_array(). + * + * Drivers do not have to implement their own sys_mm_drv_map_array() + * if this works for them. Or they can override sys_mm_drv_map_array() + * and call sys_mm_drv_simple_map_array() with some pre-processing done. + * Or the drivers can implement their own sys_mm_drv_map_array(), then + * this function will not be used. + * + * @see sys_mm_drv_map_array + * + * @param virt Page-aligned destination virtual address to map + * @param phys Array of pge-aligned source physical address to map + * @param cnt Number of elements in the physical page array + * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros + * + * @retval 0 if successful + * @retval -EINVAL if invalid arguments are provided + * @retval -EFAULT if any virtual addresses have already been mapped + */ +int sys_mm_drv_simple_map_array(void *virt, uintptr_t *phys, + size_t cnt, uint32_t flags); + +/** + * @brief Simple implementation of sys_mm_drv_unmap_region() + * + * This provides a simple implementation for sys_mm_drv_unmap_region() + * which is marked as a weak alias to sys_mm_drv_unmap_region(). + * + * Drivers do not have to implement their own sys_mm_drv_unmap_region() + * if this works for them. Or they can override sys_mm_drv_unmap_region() + * and call sys_mm_drv_simple_unmap_region() with some pre-processing done. + * Or the drivers can implement their own sys_mm_drv_unmap_region(), then + * this function will not be used. + * + * @see sys_mm_drv_unmap_region + * + * @param virt Page-aligned base virtual address to un-map + * @param size Page-aligned region size + * + * @retval 0 if successful + * @retval -EINVAL if invalid arguments are provided + * @retval -EFAULT if virtual addresses have already been mapped + */ +int sys_mm_drv_simple_unmap_region(void *virt, size_t size); + +/** + * @brief Simple implementation of sys_mm_drv_remap_region() + * + * This provides a simple implementation for sys_mm_drv_remap_region() + * which is marked as a weak alias to sys_mm_drv_remap_region(). + * + * Drivers do not have to implement their own sys_mm_drv_remap_region() + * if this works for them. Or they can override sys_mm_drv_remap_region() + * and call sys_mm_drv_simple_remap_region() with some pre-processing done. + * Or the drivers can implement their own sys_mm_drv_remap_region(), then + * this function will not be used. + * + * @see sys_mm_drv_remap_region + * + * @param virt_old Page-aligned base virtual address of existing memory + * @param size Page-aligned size of the mapped memory region in bytes + * @param virt_new Page-aligned base virtual address to which to remap + * the memory + * + * @retval 0 if successful + * @retval -EINVAL if invalid arguments are provided + * @retval -EFAULT if old virtual addresses are not all mapped or + * new virtual addresses are not all unmapped + */ +int sys_mm_drv_simple_remap_region(void *virt_old, size_t size, + void *virt_new); + +/** + * @brief Simple implementation of sys_mm_drv_move_region() + * + * This provides a simple implementation for sys_mm_drv_move_region() + * which is marked as a weak alias to sys_mm_drv_move_region(). + * + * Drivers do not have to implement their own sys_mm_drv_move_region() + * if this works for them. Or they can override sys_mm_drv_move_region() + * and call sys_mm_drv_simple_move_region() with some pre-processing done. + * Or the drivers can implement their own sys_mm_drv_move_region(), then + * this function will not be used. + * + * @see sys_mm_drv_move_region + * + * @param virt_old Page-aligned base virtual address of existing memory + * @param size Page-aligned size of the mapped memory region in bytes + * @param virt_new Page-aligned base virtual address to which to map + * new physical pages + * @param phys_new Page-aligned base physical address to contain + * the moved memory + * + * @retval 0 if successful + * @retval -EINVAL if invalid arguments are provided + * @retval -EFAULT if old virtual addresses are not all mapped or + * new virtual addresses are not all unmapped + */ +int sys_mm_drv_simple_move_region(void *virt_old, size_t size, + void *virt_new, uintptr_t phys_new); + +/** + * @brief Simple implementation of sys_mm_drv_move_array() + * + * This provides a simple implementation for sys_mm_drv_move_array() + * which is marked as a weak alias to sys_mm_drv_move_array(). + * + * Drivers do not have to implement their own sys_mm_drv_move_array() + * if this works for them. Or they can override sys_mm_drv_move_array() + * and call sys_mm_drv_simple_move_array() with some pre-processing done. + * Or the drivers can implement their own sys_mm_drv_move_array(), then + * this function will not be used. + * + * @see sys_mm_drv_move_array + * + * @param virt_old Page-aligned base virtual address of existing memory + * @param size Page-aligned size of the mapped memory region in bytes + * @param virt_new Page-aligned base virtual address to which to map + * new physical pages + * @param phys_new Array of page-aligned physical address to contain + * the moved memory + * @param phys_cnt Number of elements in the physical page array + * + * @retval 0 if successful + * @retval -EINVAL if invalid arguments are provided + * @retval -EFAULT if old virtual addresses are not all mapped or + * new virtual addresses are not all unmapped + */ +int sys_mm_drv_simple_move_array(void *virt_old, size_t size, + void *virt_new, + uintptr_t *phys_new, size_t phys_cnt); + +#endif /* ZEPHYR_DRIVERS_SYSTEM_MM_DRV_COMMON_H_ */