ext: Import OpenAMP for IPC

Origin:
   https://github.com/OpenAMP/open-amp

Status:
   de361adee09cd31793c60218a0ec49bc307a7410 [v2018.04]

   When we import open-amp we removed the apps dir to reduce the amount
   of code imported.

Purpose:
   IPC layer that implements rpmsg communication between cores.

Description:

This repository is the home for the Open Asymmetric Multi Processing
(OpenAMP) framework project. The OpenAMP framework provides software
components that enable development of software applications for
Asymmetric Multiprocessing (AMP) systems. The framework provides the
following key capabilities.

* Provides Life Cycle Management, and Inter Processor Communication
  capabilities for management of remote compute resources and their
  associated software contexts.
* Provides a stand alone library usable with RTOS and Baremetal software
  environments
* Compatibility with upstream Linux remoteproc and rpmsg components
* Following AMP configurations supported:
  a. Linux master/Generic(Baremetal) remote
  b. Generic(Baremetal) master/Linux remote
* Proxy infrastructure and supplied demos showcase ability of proxy on
  master to handle printf, scanf, open, close, read, write calls from
  Bare metal based remote contexts.

Dependencies:
   libmetal (https://github.com/OpenAMP/libmetal) - provides HAL layer
   between OpenAMP and RTOS or OS environment.

URL:
   https://github.com/OpenAMP/open-amp/

commit:
   de361adee09cd31793c60218a0ec49bc307a7410

Maintained-by:
   External

License:
   BSD-3-Clause
   BSD-2-Clause

Signed-off-by: Kumar Gala <kumar.gala@linaro.org>
This commit is contained in:
Kumar Gala 2018-04-19 14:27:55 -05:00 committed by Maureen Helm
commit 17b64bafae
63 changed files with 10677 additions and 0 deletions

View file

@ -0,0 +1,54 @@
OpenAMP
#####################
Origin:
https://github.com/OpenAMP/open-amp
Status:
de361adee09cd31793c60218a0ec49bc307a7410 [v2018.04]
When we import open-amp we removed the apps dir to reduce the amount of
code imported.
Purpose:
IPC layer that implements rpmsg communication between cores.
Description:
This repository is the home for the Open Asymmetric Multi Processing (OpenAMP)
framework project. The OpenAMP framework provides software components that
enable development of software applications for Asymmetric Multiprocessing
(AMP) systems. The framework provides the following key capabilities.
* Provides Life Cycle Management, and Inter Processor Communication capabilities
for management of remote compute resources and their associated software
contexts.
* Provides a stand alone library usable with RTOS and Baremetal software
environments
* Compatibility with upstream Linux remoteproc and rpmsg components
* Following AMP configurations supported:
a. Linux master/Generic(Baremetal) remote
b. Generic(Baremetal) master/Linux remote
* Proxy infrastructure and supplied demos showcase ability of proxy on master
to handle printf, scanf, open, close, read, write calls from Bare metal
based remote contexts.
Dependencies:
libmetal (https://github.com/OpenAMP/libmetal) - provides HAL layer
between OpenAMP and RTOS or OS environment.
URL:
https://github.com/OpenAMP/open-amp/
commit:
de361adee09cd31793c60218a0ec49bc307a7410
Maintained-by:
External
License:
BSD-3-Clause
BSD-2-Clause
License Link:
https://github.com/OpenAMP/open-amp/blob/master/LICENSE.md

View file

@ -0,0 +1,17 @@
include(ExternalProject)
include($ENV{ZEPHYR_BASE}/ext/lib/ipc/libmetal.cmake)
ExternalProject_Add(
open-amp
SOURCE_DIR $ENV{ZEPHYR_BASE}/ext/lib/ipc/open-amp/
DEPENDS libmetal
INSTALL_COMMAND "" # This particular build system has no install command
CMAKE_ARGS -DWITH_ZEPHYR=ON -DWITH_PROXY=OFF -DBOARD=${BOARD} -DLIBMETAL_INCLUDE_DIR=${LIBMETAL_INCLUDE_DIR} -DLIBMETAL_LIB=${LIBMETAL_LIBRARY}
)
ExternalProject_Get_property(open-amp SOURCE_DIR)
set(OPENAMP_INCLUDE_DIR ${SOURCE_DIR}/lib/include CACHE PATH "Path to the OpenAMP header files")
ExternalProject_Get_property(open-amp BINARY_DIR)
set(OPENAMP_LIBRARY ${BINARY_DIR}/lib/libopen_amp.a CACHE FILEPATH "Path to the OpenAMP library")

15
ext/lib/ipc/open-amp/.gitignore vendored Normal file
View file

@ -0,0 +1,15 @@
*.o
*~
!libs/system/zc702evk/linux/lib/*/*.a
*.bin
*.map
*.out
*.log
*.d
/tags
/TAGS
# cscope files
cscope.*
ncscope.*

View file

@ -0,0 +1,36 @@
cmake_minimum_required (VERSION 2.6)
# The version number
set (OPENAMP_VERSION_MAJOR 1)
set (OPENAMP_VERSION_MINOR 0)
list (APPEND CMAKE_MODULE_PATH
"${CMAKE_SOURCE_DIR}/cmake"
"${CMAKE_SOURCE_DIR}/cmake/modules"
"${CMAKE_SOURCE_DIR}/cmake/platforms")
include (syscheck)
project (open_amp C)
include (CheckIncludeFiles)
include (CheckCSourceCompiles)
include (collect)
include (options)
include (depends)
enable_testing ()
set (OPENAMP_ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
set (OPENAMP_BIN_ROOT "${CMAKE_CURRENT_BINARY_DIR}")
if (WITH_OBSOLETE)
add_subdirectory (obsolete)
endif (WITH_OBSOLETE)
add_subdirectory (lib)
if (WITH_APPS)
add_subdirectory (apps)
endif (WITH_APPS)
# vim: expandtab:ts=2:sw=2:smartindent

View file

@ -0,0 +1,33 @@
Software License Agreement (BSD License)
========================================
Copyright (c) 2014, Mentor Graphics Corporation. All rights reserved.
Copyright (c) 2015 - 2016 Xilinx, Inc. All rights reserved.
Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. The names of its contributors may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,22 @@
# OpenAMP Maintainers
OpenAMP project is maintained by the OpenAMP open source community. Everyone
is encouraged to submit issues and changes to improve OpenAMP.
The intention of this file is to provide a set of names that developers can
consult when they have a question about OpenAMP and to provide a a set of
names to be CC'd when submitting a patch.
## Project Administration
Wendy Liang <wendy.liang@xilinx.com>
### All patches CC here
open-amp@googlegroups.com
## Machines
### Xilinx Platform - Zynq-7000
Wendy Liang <wendy.liang@xilinx.com>
### Xilinx Platform - Zynq UltraScale+ MPSoC
Wendy Liang <wendy.liang@xilinx.com>

View file

@ -0,0 +1,248 @@
# open-amp
This repository is the home for the Open Asymmetric Multi Processing (OpenAMP)
framework project. The OpenAMP framework provides software components that
enable development of software applications for Asymmetric Multiprocessing
(AMP) systems. The framework provides the following key capabilities.
1. Provides Life Cycle Management, and Inter Processor Communication
capabilities for management of remote compute resources and their associated
software contexts.
2. Provides a stand alone library usable with RTOS and Baremetal software
environments
3. Compatibility with upstream Linux remoteproc and rpmsg components
4. Following AMP configurations supported
a. Linux master/Generic(Baremetal) remote
b. Generic(Baremetal) master/Linux remote
5. Proxy infrastructure and supplied demos showcase ability of proxy on master
to handle printf, scanf, open, close, read, write calls from Bare metal
based remote contexts.
## OpenAMP Source Structure
```
|- lib/
| |- common/ # common helper functions
| |- virtio/ # virtio implementation
| |- rpmsg/ # rpmsg implementation
| |- remoteproc/ # remoteproc implementation
| | |- drivers # remoteproc drivers
| |- proxy/ # implement one processor access device on the
| | # other processor with file operations
|- apps/ # demonstration/testing applications
| |- machine/ # common files for machine can be shared by applications
| # It is up to each app to decide whether to use these files.
| |- system/ # common files for system can be shared by applications
| # It is up to each app to decide whether to use these files.
|- obsolete # It is used to build libs which may also required when
| # building the apps. It will be removed in future since
| # user can specify which libs to use when compiling the apps.
|- cmake # CMake files
```
OpenAMP library libopen_amp is composed of the following directories in `lib/`:
* `common/`
* `virtio/`
* `rpmsg/`
* `remoteproc/`
* `proxy/`
OpenAMP system/machine support has been moved to libmetal, the system/machine
layer in the `apps/` directory is for system application initialization, and
resource table definition.
### libmetal APIs used in OpenAMP
Here are the libmetal APIs used by OpenAMP, if you want to port OpenAMP for your
system, you will need to implement the following libmetal APIs in the libmetal's
`lib/system/<SYS>` directory:
* alloc, for memory allocation and memory free
* cache, for flushing cache and invalidating cache
* io, for memory mapping. OpenAMP required memory mapping in order to access
vrings and carved out memory.
* irq, for IRQ handler registration, IRQ disable/enable and global IRQ handling.
* mutex
* shmem (For RTOS, you can usually use the implementation from
`lib/system/generic/`)
* sleep, at the moment, OpenAMP only requires microseconds sleep as when OpenAMP
fails to get a buffer to send messages, it will call this function to sleep and
then try again.
* time, for timestamp
* init, for libmetal initialization.
* atomic
Please refer to `lib/system/generic` when you port libmetal for your system.
If you a different compiler to GNU gcc, please refer to `lib/compiler/gcc/` to
port libmetal for your compiler. At the moment, OpenAMP needs the atomic
operations defined in `lib/compiler/gcc/atomic.h`.
## OpenAMP Compilation
OpenAMP uses CMake for library and demonstration application compilation.
OpenAMP requires libmetal library. For now, you will need to download and
compile libmetal library separately before you compiling OpenAMP library.
In future, we will try to make libmetal as a submodule to OpenAMP to make this
flow easier.
### Example to compile OpenAMP for Zephyr
You can compile OpenAMP library for Zephyr.
As OpenAMP uses libmetal, please refer to libmetal README to build libmetal
for Zephyr before building OpenAMP library for Zephyr.
As Zephyr uses CMake, we build OpenAMP library as a target of Zephyr CMake
project. Here is how to build libmetal for Zephyr:
```
$ export ZEPHRY_GCC_VARIANT=zephyr
$ export ZEPHRY_SDK_INSTALL_DIR=<where Zephyr SDK is installed>
$ source <git_clone_zephyr_project_source_root>/zephyr-env.sh
$ cmake <OpenAMP_source_root> \
-DWITH_ZEPHYR=on -DBOARD=qemu_cortex_m3 \
-DCMAKE_INCLUDE_PATH="<libmetal_zephyr_build_dir>/lib/include" \
-DCMAKE_LIBRARY_PATH="<libmetal_zephyr_build_dir>/lib" \
$ make VERBOSE=1 all
```
### Example to compile OpenAMP for communication between Linux processes:
* Install libsysfs devel and libhugetlbfs devel packages on your Linux host.
* build libmetal library on your host as follows:
```
$ mkdir -p build-libmetal
$ cd build-libmetal
$ cmake <libmetal_source>
$ make VERBOSE=1 DESTDIR=<libmetal_install> install
```
* build OpenAMP library on your host as follows:
$ mkdir -p build-openamp
$ cd build-openamp
$ cmake <openamp_source> -DCMAKE_INCLUDE_PATH=<libmetal_built_include_dir> \
-DCMAKE_LIBRARY_PATH=<libmetal_built_lib_dir> [-DWITH_APPS=ON]
$ make VERBOSE=1 DESTDIR=$(pwd) install
The OpenAMP library will be generated to `build/usr/local/lib` directory,
headers will be generated to `build/usr/local/include` directory, and the
applications executable will be generated to `build/usr/local/bin`
directory.
* cmake option `-DWITH_APPS=ON` is to build the demonstration applications.
* If you have used `-DWITH_APPS=ON` to build the demos, you can try them on
your Linux host as follows:
```
# Start echo test server to wait for message to echo
$ sudo LD_LIBRARY_PATH=<openamp_built>/usr/local/lib:<libmetal_built>/usr/local/lib \
build/usr/local/bin/echo_testd-shared
# Run echo test to send message to echo test server
$ sudo LD_LIBRARY_PATH=<openamp_built>/usr/local/lib:<libmetal_built>/usr/local/lib \
build/usr/local/bin/echo_test-shared 1
```
### Example to compile Zynq UltraScale+ MPSoC R5 generic(baremetal) remote:
* build libmetal library on your host as follows:
* Create your on cmake toolchain file to compile libmetal for your generic
(baremetal) platform. Here is the example of the toolchain file:
```
set (CMAKE_SYSTEM_PROCESSOR "arm" CACHE STRING "")
set (MACHINE "zynqmp_r5" CACHE STRING "")
set (CROSS_PREFIX "armr5-none-eabi-" CACHE STRING "")
set (CMAKE_C_FLAGS "-mfloat-abi=soft -mcpu=cortex-r5 -Wall -Werror -Wextra \
-flto -Os -I/ws/xsdk/r5_0_bsp/psu_cortexr5_0/include" CACHE STRING "")
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -flto")
SET(CMAKE_AR "gcc-ar" CACHE STRING "")
SET(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> qcs <TARGET> <LINK_FLAGS> <OBJECTS>")
SET(CMAKE_C_ARCHIVE_FINISH true)
include (cross-generic-gcc)
```
* Compile libmetal library:
```
$ mkdir -p build-libmetal
$ cd build-libmetal
$ cmake <libmetal_source> -DCMAKE_TOOLCHAIN_FILE=<toolchain_file>
$ make VERBOSE=1 DESTDIR=<libmetal_install> install
```
* build OpenAMP library on your host as follows:
* Create your on cmake toolchain file to compile openamp for your generic
(baremetal) platform. Here is the example of the toolchain file:
```
set (CMAKE_SYSTEM_PROCESSOR "arm" CACHE STRING "")
set (MACHINE "zynqmp_r5" CACHE STRING "")
set (CROSS_PREFIX "armr5-none-eabi-" CACHE STRING "")
set (CMAKE_C_FLAGS "-mfloat-abi=soft -mcpu=cortex-r5 -Os -flto \
-I/ws/libmetal-r5-generic/usr/local/include \
-I/ws/xsdk/r5_0_bsp/psu_cortexr5_0/include" CACHE STRING "")
set (CMAKE_ASM_FLAGS "-mfloat-abi=soft -mcpu=cortex-r5" CACHE STRING "")
set (PLATFORM_LIB_DEPS "-lxil -lc -lm" CACHE STRING "")
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -flto")
SET(CMAKE_AR "gcc-ar" CACHE STRING "")
SET(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> qcs <TARGET> <LINK_FLAGS> <OBJECTS>")
SET(CMAKE_C_ARCHIVE_FINISH true)
set (CMAKE_FIND_ROOT_PATH /ws/libmetal-r5-generic/usr/local/lib \
/ws/xsdk/r5_bsp/psu_cortexr5_0/lib )
include (cross_generic_gcc)
```
* We use cmake `find_path` and `find_library` to check if libmetal includes
and libmetal library is in the includes and library search paths. However,
for non-linux system, it doesn't work with `CMAKE_INCLUDE_PATH` and
`CMAKE_LIBRARY_PATH` variables, and thus, we need to specify those paths
in the toolchain file with `CMAKE_C_FLAGS` and `CMAKE_FIND_ROOT_PATH`.
* Compile the OpenAMP library:
```
$ mkdir -p build-openamp
$ cd build-openamp
$ cmake <openamp_source> -DCMAKE_TOOLCHAIN_FILE=<toolchain_file>
$ make VERBOSE=1 DESTDIR=$(pwd) install
```
The OpenAMP library will be generated to `build/usr/local/lib` directory,
headers will be generated to `build/usr/local/include` directory, and the
applications executable will be generated to `build/usr/local/bin`
directory.
### Example to compile OpenAMP Linux Userspace for Zynq UltraScale+ MPSoC
We can use yocto to build the OpenAMP Linux userspace library and application.
open-amp and libmetal recipes are in this yocto layer:
https://github.com/OpenAMP/meta-openamp
* Add the `meta-openamp` layer to your layers in your yocto build project's `bblayers.conf` file.
* Add `libmetal` and `open-amp` to your packages list. E.g. add `libmetal` and `open-amp` to the
`IMAGE_INSTALL_append` in the `local.conf` file.
* You can also add OpenAMP demos Linux applications packages to your yocto packages list. OpenAMP
demo examples recipes are also in `meta-openamp`:
https://github.com/OpenAMP/meta-openamp/tree/master/recipes-openamp/openamp-examples
In order to user OpenAMP(RPMsg) in Linux userspace, you will need to have put the IPI device,
vring memory and shared buffer memory to your Linux kernel device tree. The device tree example
can be found here:
https://github.com/OpenAMP/open-amp/blob/master/apps/machine/zynqmp/openamp-linux-userspace.dtsi
## Supported System and Machines
For now, it supports:
* Zynq generic slave
* Zynq UltraScale+ MPSoC R5 generic slave
* Linux host OpenAMP between Linux userspace processes
* Linux userspace OpenAMP RPMsg master
* Linux userspace OpenAMP RPMsg slave
## Known Limitations:
1. OpenAMP framework supports OpenAMP firmware running as master, however,
the example to show this ability is not ready yet.
2. In case of OpenAMP on Linux userspace for inter processors communication,
life cycle management with remoteproc is not supported yet, that is for now,
it is not able to load the remote firmware with OpenAMP running on Linux
userspace.
3. In case of OpenAMP on Linux userspace for inter processors communication,
it only supports static vrings and shared buffers.
4. `sudo` is required to run the OpenAMP demos between Linux processes, as
it doesn't work on some systems if you are normal users.
For using the framework please refer to the wiki of the OpenAMP repo.
Subscribe to the open-amp mailing list at https://groups.google.com/group/open-amp.

View file

@ -0,0 +1,40 @@
function (collector_create name base)
set_property (GLOBAL PROPERTY "COLLECT_${name}_LIST")
set_property (GLOBAL PROPERTY "COLLECT_${name}_BASE" "${base}")
endfunction (collector_create)
function (collector_list var name)
get_property (_list GLOBAL PROPERTY "COLLECT_${name}_LIST")
set (${var} "${_list}" PARENT_SCOPE)
endfunction (collector_list)
function (collector_base var name)
get_property (_base GLOBAL PROPERTY "COLLECT_${name}_BASE")
set (${var} "${_base}" PARENT_SCOPE)
endfunction (collector_base)
function (collect name)
collector_base (_base ${name})
string(COMPARE NOTEQUAL "${_base}" "" _is_rel)
set (_list)
foreach (s IN LISTS ARGN)
if (_is_rel)
get_filename_component (s "${s}" ABSOLUTE)
file (RELATIVE_PATH s "${_base}" "${s}")
else (_is_rel)
get_filename_component (ts "${s}" ABSOLUTE)
if (EXISTS "${ts}")
set (s "${ts}")
endif (EXISTS "${ts}")
endif (_is_rel)
list (APPEND _list "${s}")
endforeach ()
set_property (GLOBAL APPEND PROPERTY "COLLECT_${name}_LIST" "${_list}")
endfunction (collect)
# Create global collectors
collector_create (PROJECT_INC_DIRS "")
collector_create (PROJECT_LIB_DIRS "")
collector_create (PROJECT_LIB_DEPS "")
# vim: expandtab:ts=2:sw=2:smartindent

View file

@ -0,0 +1,23 @@
if (WITH_LIBMETAL_FIND)
find_package (Libmetal REQUIRED)
collect (PROJECT_INC_DIRS "${LIBMETAL_INCLUDE_DIR}")
collect (PROJECT_LIB_DIRS "${LIBMETAL_LIB_DIR}")
collect (PROJECT_LIB_DEPS "${LIBMETAL_LIB}")
endif (WITH_LIBMETAL_FIND)
if ("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux")
check_include_files (stdatomic.h HAVE_STDATOMIC_H)
check_include_files (fcntl.h HAVE_FCNTL_H)
else ("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux")
set (_saved_cmake_required_flags ${CMAKE_REQUIRED_FLAGS})
set (CMAKE_REQUIRED_FLAGS "-c")
check_include_files (stdatomic.h HAVE_STDATOMIC_H)
check_include_files (fcntl.h HAVE_FCNTL_H)
set (CMAKE_REQUIRED_FLAGS ${_saved_cmake_required_flags})
endif ("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux")
if (NOT HAVE_FCNTL_H)
unset (WITH_PROXY CACHE)
endif (NOT HAVE_FCNTL_H)
# vim: expandtab:ts=2:sw=2:smartindent

View file

@ -0,0 +1,31 @@
# FindLibmetal
# --------
#
# Find Libmetal
#
# Find the native Libmetal includes and library this module defines
#
# ::
#
# LIBMETAL_INCLUDE_DIR, where to find metal/sysfs.h, etc.
# LIBSYSFS_LIB_DIR, where to find libmetal library.
# FIX ME, CMAKE_FIND_ROOT_PATH doesn't work
# even use the following
# set (CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH)
# set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH)
# set (CMAKE_FIND_ROOT_PATH_MODE_PROGRAM BOTH)
find_path(LIBMETAL_INCLUDE_DIR NAMES metal/sys.h PATHS ${CMAKE_FIND_ROOT_PATH})
find_library(LIBMETAL_LIB NAMES metal PATHS ${CMAKE_FIND_ROOT_PATH})
get_filename_component(LIBMETAL_LIB_DIR ${LIBMETAL_LIB} DIRECTORY)
# handle the QUIETLY and REQUIRED arguments and set HUGETLBFS_FOUND to TRUE if
# all listed variables are TRUE
include (FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS (LIBMETAL DEFAULT_MSG LIBMETAL_LIB LIBMETAL_INCLUDE_DIR)
if (LIBMETAL_FOUND)
set (LIBMETAL_LIBS ${LIBMETAL_LIB})
endif (LIBMETAL_FOUND)
mark_as_advanced (LIBMETAL_LIB LIBMETAL_INCLUDE_DIR LIBMETAL_LIB_DIR)

View file

@ -0,0 +1,77 @@
set (PROJECT_VER_MAJOR 0)
set (PROJECT_VER_MINOR 1)
set (PROJECT_VER_PATCH 0)
set (PROJECT_VER 0.1.0)
if (NOT CMAKE_BUILD_TYPE)
set (CMAKE_BUILD_TYPE Debug)
endif (NOT CMAKE_BUILD_TYPE)
if (NOT CMAKE_INSTALL_LIBDIR)
set (CMAKE_INSTALL_LIBDIR "lib")
endif (NOT CMAKE_INSTALL_LIBDIR)
if (NOT CMAKE_INSTALL_BINDIR)
set (CMAKE_INSTALL_BINDIR "bin")
endif (NOT CMAKE_INSTALL_BINDIR)
set (_host "${CMAKE_HOST_SYSTEM_NAME}/${CMAKE_HOST_SYSTEM_PROCESSOR}")
message ("-- Host: ${_host}")
set (_target "${CMAKE_SYSTEM_NAME}/${CMAKE_SYSTEM_PROCESSOR}")
message ("-- Target: ${_target}")
if (NOT DEFINED MACHINE)
set (MACHINE "Generic")
endif (NOT DEFINED MACHINE)
message ("-- Machine: ${MACHINE}")
string (TOLOWER ${CMAKE_SYSTEM_NAME} PROJECT_SYSTEM)
string (TOUPPER ${CMAKE_SYSTEM_NAME} PROJECT_SYSTEM_UPPER)
string (TOLOWER ${CMAKE_SYSTEM_PROCESSOR} PROJECT_PROCESSOR)
string (TOUPPER ${CMAKE_SYSTEM_PROCESSOR} PROJECT_PROCESSOR_UPPER)
string (TOLOWER ${MACHINE} PROJECT_MACHINE)
string (TOUPPER ${MACHINE} PROJECT_MACHINE_UPPER)
# Select to build Remote proc master
option (WITH_REMOTEPROC_MASTER "Build as remoteproc master" OFF)
if (WITH_REMOTEPROC_MASTER)
option (WITH_LINUXREMOTE "The remote is Linux" ON)
endif (WITH_REMOTEPROC_MASTER)
# Select which components are in the openamp lib
option (WITH_PROXY "Build with proxy(access device controlled by other processor)" ON)
option (WITH_APPS "Build with sample applicaitons" OFF)
option (WITH_PROXY_APPS "Build with proxy sample applicaitons" OFF)
if (WITH_APPS)
if (WITH_PROXY)
set (WITH_PROXY_APPS ON)
elseif ("${PROJECT_SYSTEM}" STREQUAL "linux")
set (WITH_PROXY_APPS ON)
endif (WITH_PROXY)
option (WITH_BENCHMARK "Build benchmark app" OFF)
endif (WITH_APPS)
option (WITH_OBSOLETE "Build obsolete system libs" OFF)
# Set the complication flags
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra")
if (WITH_LINUXREMOTE)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DOPENAMP_REMOTE_LINUX_ENABLE")
endif (WITH_LINUXREMOTE)
if (WITH_BENCHMARK)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DOPENAMP_BENCHMARK_ENABLE")
endif (WITH_BENCHMARK)
option (WITH_STATIC_LIB "Build with a static library" ON)
if ("${PROJECT_SYSTEM}" STREQUAL "linux")
option (WITH_SHARED_LIB "Build with a shared library" ON)
endif ("${PROJECT_SYSTEM}" STREQUAL "linux")
option (WITH_LIBMETAL_FIND "Check Libmetal library can be found" ON)
option (WITH_EXT_INCLUDES_FIND "Check other external includes are found" ON)
message ("-- C_FLAGS : ${CMAKE_C_FLAGS}")
# vim: expandtab:ts=2:sw=2:smartindent

View file

@ -0,0 +1,12 @@
set (CMAKE_SYSTEM_NAME "Generic" CACHE STRING "")
include (CMakeForceCompiler)
CMAKE_FORCE_C_COMPILER ("${CROSS_PREFIX}gcc" GNU)
CMAKE_FORCE_CXX_COMPILER ("${CROSS_PREFIX}g++" GNU)
set (CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER CACHE STRING "")
set (CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER CACHE STRING "")
set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER CACHE STRING "")
# vim: expandtab:ts=2:sw=2:smartindent

View file

@ -0,0 +1,9 @@
set (CMAKE_SYSTEM_NAME "Linux")
set (CMAKE_C_COMPILER "${CROSS_PREFIX}gcc")
set (CMAKE_CXX_COMPILER "${CROSS_PREFIX}g++")
set (CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set (CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER)
set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER)
# vim: expandtab:ts=2:sw=2:smartindent

View file

@ -0,0 +1,8 @@
set (CMAKE_SYSTEM_PROCESSOR "arm" CACHE STRING "")
set (MACHINE "zynq7" CACHE STRING "")
set (CROSS_PREFIX "arm-none-eabi-" CACHE STRING "")
set (CMAKE_C_FLAGS "-mcpu=cortex-a9 -mfpu=vfpv3 -mfloat-abi=hard" CACHE STRING "")
include (cross_generic_gcc)
# vim: expandtab:ts=2:sw=2:smartindent

View file

@ -0,0 +1,6 @@
set (CMAKE_SYSTEM_PROCESSOR "arm")
set (CROSS_PREFIX "arm-xilinx-linux-gnueabi-")
include (cross-linux-gcc)
# vim: expandtab:ts=2:sw=2:smartindent

View file

@ -0,0 +1,6 @@
set (CMAKE_SYSTEM_PROCESSOR "arm64")
set (CROSS_PREFIX "aarch64-none-elf-")
include (cross_generic_gcc)
# vim: expandtab:ts=2:sw=2:smartindent

View file

@ -0,0 +1,7 @@
set (CMAKE_SYSTEM_PROCESSOR "arm64")
set (CROSS_PREFIX "aarch64-linux-gnu-")
set (MACHINE "zynqmp" CACHE STRING "")
include (cross_linux_gcc)
# vim: expandtab:ts=2:sw=2:smartindent

View file

@ -0,0 +1,10 @@
set (CMAKE_SYSTEM_PROCESSOR "arm" CACHE STRING "")
set (MACHINE "zynqmp_r5" CACHE STRING "")
set (CROSS_PREFIX "armr5-none-eabi-" CACHE STRING "")
# Xilinx SDK version earlier than 2017.2 use mfloat-abi=soft by default to generat libxil
set (CMAKE_C_FLAGS "-mfloat-abi=hard -mfpu=vfpv3-d16 -mcpu=cortex-r5" CACHE STRING "")
include (cross_generic_gcc)
# vim: expandtab:ts=2:sw=2:smartindent

View file

@ -0,0 +1,12 @@
# use "Generic" as CMAKE_SYSTEM_NAME
if (WITH_ZEPHYR)
set (CMAKE_SYSTEM_NAME "Generic" CACHE STRING "")
string (TOLOWER "Zephyr" PROJECT_SYSTEM)
string (TOUPPER "Zephyr" PROJECT_SYSTEM_UPPER)
set(IS_TEST 1)
include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE)
if (CONFIG_CPU_CORTEX_M)
set (MACHINE "cortexm" CACHE STRING "")
endif (CONFIG_CPU_CORTEX_M)
endif (WITH_ZEPHYR)

View file

@ -0,0 +1,58 @@
# echo_test
This readme is about the OpenAMP echo_test demo.
The echo_test is about one processor sends message to the other one, and the other one echo back the message. The processor which sends the message will verify the echo message.
For now, it implements Linux sends the message, and the baremetal echos back.
## Compilation
### Baremetal Compilation
Option `WITH_ECHO_TEST` is to control if the application will be built.
By default this option is `ON` when `WITH_APPS` is on.
Here is an example:
```
$ cmake ../open-amp -DCMAKE_TOOLCHAIN_FILE=zynq7_generic -DWITH_OBSOLETE=on -DWITH_APPS=ON
```
### Linux Compilation
#### Linux Kernel Compilation
You will need to manually compile the following kernel modules with your Linux kernel (Please refer to Linux kernel documents for how to add kernel module):
* Your machine's remoteproc kernel driver
* `obsolete/apps/echo_test/system/linux/kernelspace/rpmsg_user_dev_driver` if you want to run the echo_test app in Linux user space.
* `obsolete/system/linux/kernelspace/rpmsg_echo_test_kern_app` if you want to run the echo_test app in Linux kernel space.
#### Linux Userspace Compliation
* Compile `obsolete/apps/echo_test/system/linux/userspace/echo_test` into your Linux OS.
* If you are running generic(baremetal) system as remoteproc slave, and Linux as remoteproc master, please also add the built generic `echo_test` executable to the firmware of your Linux OS.
## Run the Demo
### Load the Demo
After Linux boots,
* Load the machine remoteproc. If Linux runs as remoteproc master, you will need to pass the other processor's echo_test binary as firmware arguement to the remoteproc module.
* If you run the Linux kernel application demo, load the `rpmsg_echo_test_kern_app` module. You will see the kernel application send the message to remote and the remote reply back and the kernel application will verify the result.
* If you run the userspace application demo, load the `rpmsg_user_dev_driver` module.
* If you run the userspace application demo, you will see the similar output on the console:
```
****************************************
Please enter command and press enter key
****************************************
1 - Send data to remote core, retrieve the echo and validate its integrity ..
2 - Quit this application ..
CMD>
```
* Input `1` to send packages.
* Input `2` to exit the application.
After you run the demo, you will need to unload the kernel modules.
### Unload the Demo
* If you run the userspace application demo, unload the `rpmsg_user_dev_driver` module.
* If you run the kernelspace application demo, unload the `rpmsg_echo_test_kern_app` module.
* Unload the machine remoteproc driver.

View file

@ -0,0 +1,59 @@
# matrix_multiply
This readme is about the OpenAMP matrix_multiply demo.
The matrix_multiply is about one processor generates two matrices, and send them to the one, and the other one calcuate the matrix multiplicaiton and return the result matrix.
For now, it implements Linux generates the matrices, and the baremetal calculate the matrix mulitplication and send back the result.
## Compilation
### Baremetal Compilation
Option `WITH_MATRIX_MULTIPLY` is to control if the application will be built.
By default this option is `ON` when `WITH_APPS` is on.
Here is an example:
```
$ cmake ../open-amp -DCMAKE_TOOLCHAIN_FILE=zynq7_generic -DWITH_OBSOLETE=on -DWITH_APPS=ON
```
### Linux Compilation
#### Linux Kernel Compilation
You will need to manually compile the following kernel modules with your Linux kernel (Please refer to Linux kernel documents for how to add kernel module):
* Your machine's remoteproc kernel driver
* `obsolete/system/linux/kernelspace/rpmsg_user_dev_driver` if you want to run the matrix_multiply app in Linux user space.
* `obsolete/apps/matrix_multiply/system/linux/kernelspace/rpmsg_mat_mul_kern_app` if you want to run the matrix_multiply app in Linux kernel space.
#### Linux Userspace Compliation
* Compile `obsolete/apps/matrix_multiply/system/linux/userspace/mat_mul_demo` into your Linux OS.
* If you are running generic(baremetal) system as remoteproc slave, and Linux as remoteproc master, please also add the built generic `matrix_multiply` executable to the firmware of your Linux OS.
## Run the Demo
### Load the Demo
After Linux boots,
* Load the machine remoteproc. If Linux runs as remoteproc master, you will need to pass the other processor's matrix_multiply binary as firmware arguement to the remoteproc module.
* If you run the Linux kernel application demo, load the `rpmsg_mat_mul_kern_app` module, you will see the kernel app will generate two matrices to the other processor, and output the result matrix returned by the other processor.
* If you run the userspace application demo, load the `rpmsg_user_dev_driver` module.
* If you run the userspace application demo `mat_mul_demo`, you will see the similar output on the console:
```
****************************************
Please enter command and press enter key
****************************************
1 - Generates random 6x6 matrices and transmits them to remote core over rpmsg
..
2 - Quit this application ..
CMD>
```
* Input `1` to run the matrix multiplication.
* Input `2` to exit the application.
After you run the demo, you will need to unload the kernel modules.
### Unload the Demo
* If you run the userspace application demo, unload the `rpmsg_user_dev_driver` module.
* If you run the kernelspace application demo, unload the `rpmsg_mat_mul_kern_app` module.
* Unload the machine remoteproc driver.

View file

@ -0,0 +1,38 @@
# rpc_demo
This readme is about the OpenAMP rpc_demo demo.
The rpc_demo is about one processor uses the UART on the other processor and create file on the other processor's filesystem with file operations.
For now, It implements the processor running generic(baremetal) applicaiton access the devices on the Linux.
## Compilation
### Baremetal Compilation
Option `WITH_RPC_DEMO` is to control if the application will be built.
By default this option is `ON` when `WITH_APPS` is on.
Here is an example:
```
$ cmake ../open-amp -DCMAKE_TOOLCHAIN_FILE=zynq7_generic -DWITH_OBSOLETE=on -DWITH_APPS=ON
```
### Linux Compilation
#### Linux Kernel Compilation
You will need to manually compile the following kernel modules with your Linux kernel (Please refer to Linux kernel documents for how to add kernel module):
* Your machine's remoteproc kernel driver
* `obsolete/apps/rpc_demo/system/linux/kernelspace/rpmsg_proxy_dev_driver`
#### Linux Userspace Compliation
* Compile `obsolete/apps/rpc_demo/system/linux/userspace/proxy_app` into your Linux OS.
* Add the built generic `rpc_demo` executable to the firmware of your Linux OS.
## Run the Demo
After Linux boots, run `proxy_app` as follows:
```
# proxy_app [-m REMOTEPROC_MODULE] [-f PATH_OF_THE_RPC_DEMO_FIRMWARE]
```
The demo application will load the remoteproc module, then the proxy rpmsg module, will output message sent from the other processor, send the console input back to the other processor. When the demo application exits, it will unload the kernel modules.

Binary file not shown.

View file

@ -0,0 +1,65 @@
set_property (GLOBAL PROPERTY "PROJECT_LIB_EXTRA_CFLAGS")
collector_create (PROJECT_LIB_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}")
collect (PROJECT_LIB_DIRS "${CMAKE_CURRENT_BINARY_DIR}")
collect (PROJECT_INC_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/include")
add_subdirectory (common)
add_subdirectory (virtio)
add_subdirectory (rpmsg)
add_subdirectory (remoteproc)
if (WITH_PROXY)
add_subdirectory (proxy)
endif (WITH_PROXY)
set (OPENAMP_LIB open_amp)
if (NOT CMAKE_INSTALL_LIBDIR)
set (CMAKE_INSTALL_LIBDIR "lib")
endif (NOT CMAKE_INSTALL_LIBDIR)
collector_list (_include PROJECT_INC_DIRS)
include_directories (${_include})
collector_list (_deps PROJECT_LIB_DEPS)
get_property (_ecflags GLOBAL PROPERTY "PROJECT_LIB_EXTRA_CFLAGS")
collector_list (_sources PROJECT_LIB_SOURCES)
set_property (SOURCE ${_sources}
APPEND_STRING PROPERTY COMPILE_FLAGS " ${_ecflags}")
# Build a shared library if so configured.
if (WITH_ZEPHYR)
zephyr_library_named(${OPENAMP_LIB})
add_dependencies(${OPENAMP_LIB} offsets_h)
target_sources (${OPENAMP_LIB} PRIVATE ${_sources})
else (WITH_ZEPHYR)
if (WITH_SHARED_LIB)
set (_lib ${OPENAMP_LIB}-shared)
add_library (${_lib} SHARED ${_sources})
target_link_libraries (${_lib} ${_deps})
install (TARGETS ${_lib} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
set_target_properties (${_lib} PROPERTIES
OUTPUT_NAME "${OPENAMP_LIB}"
VERSION "${PROJECT_VER}"
SOVERSION "${PROJECT_VER_MAJOR}"
)
endif (WITH_SHARED_LIB)
if (WITH_STATIC_LIB)
set (_lib ${OPENAMP_LIB}-static)
add_library (${_lib} STATIC ${_sources})
install (TARGETS ${_lib} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
set_target_properties (${_lib} PROPERTIES
OUTPUT_NAME "${OPENAMP_LIB}"
)
endif (WITH_STATIC_LIB)
endif (WITH_ZEPHYR)
install (DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/openamp" DESTINATION include)
# vim: expandtab:ts=2:sw=2:smartindent

View file

@ -0,0 +1,3 @@
collect (PROJECT_LIB_SOURCES hil.c)
collect (PROJECT_LIB_SOURCES sh_mem.c)
collect (PROJECT_LIB_SOURCES firmware.c)

View file

@ -0,0 +1,56 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* firmware.c
*
* COMPONENT
*
* OpenAMP stack.
*
* DESCRIPTION
*
*
**************************************************************************/
#include <string.h>
#include <openamp/firmware.h>
/**
* config_get_firmware
*
* Searches the given firmware in firmware table list and provides
* it to caller.
*
* @param fw_name - name of the firmware
* @param start_addr - pointer t hold start address of firmware
* @param size - pointer to hold size of firmware
*
* returns - status of function execution
*
*/
extern struct firmware_info fw_table[];
extern int fw_table_size;
int config_get_firmware(char *fw_name, uintptr_t *start_addr,
unsigned int *size)
{
int idx;
for (idx = 0; idx < fw_table_size; idx++) {
if (!strncmp((char *)fw_table[idx].name, fw_name, sizeof(fw_table[idx].name))) {
*start_addr = fw_table[idx].start_addr;
*size =
fw_table[idx].end_addr - fw_table[idx].start_addr +
1;
return 0;
}
}
return -1;
}

View file

@ -0,0 +1,803 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* hil.c
*
* COMPONENT
*
* OpenAMP Stack.
*
* DESCRIPTION
*
* This file is implementation of generic part of HIL.
*
*
*
**************************************************************************/
#include <openamp/hil.h>
#include <openamp/remoteproc.h>
#include <metal/io.h>
#include <metal/alloc.h>
#include <metal/assert.h>
#include <metal/device.h>
#include <metal/shmem.h>
#include <metal/utilities.h>
#include <metal/time.h>
#include <metal/cache.h>
#include <stdio.h>
#define DEFAULT_VRING_MEM_SIZE 0x10000
#define HIL_DEV_NAME_PREFIX "hil-dev."
/*--------------------------- Globals ---------------------------------- */
static METAL_DECLARE_LIST (procs);
#if defined (OPENAMP_BENCHMARK_ENABLE)
unsigned long long boot_time_stamp;
unsigned long long shutdown_time_stamp;
#endif
struct hil_mem_device {
struct metal_device device;
char name[64];
metal_phys_addr_t pa;
};
metal_phys_addr_t hil_generic_start_paddr = 0;
static int hil_shm_block_write(struct metal_io_region *io,
unsigned long offset,
const void *restrict src,
memory_order order,
int len)
{
void *va = metal_io_virt(io, offset);
(void)order;
memcpy(va, src, len);
metal_cache_flush(va, (unsigned int)len);
return len;
}
static void hil_shm_block_set(struct metal_io_region *io,
unsigned long offset,
unsigned char value,
memory_order order,
int len)
{
void *va = metal_io_virt(io, offset);
(void)order;
memset(va, (int)value, len);
metal_cache_flush(va, (unsigned int)len);
}
static struct metal_io_region hil_shm_generic_io = {
0,
&hil_generic_start_paddr,
(size_t)(-1),
(sizeof(metal_phys_addr_t) << 3),
(metal_phys_addr_t)(-1),
0,
{NULL, NULL,
NULL, hil_shm_block_write, hil_shm_block_set, NULL},
};
struct metal_device *hil_create_generic_mem_dev(
metal_phys_addr_t pa,
size_t size, unsigned int flags)
{
struct hil_mem_device *dev;
struct metal_device *mdev;
int ret;
/* If no generic bus is found in libmetal
* there is no need to create the generic device
*/
ret = metal_bus_find("generic", NULL);
if (ret)
return NULL;
dev = metal_allocate_memory(sizeof(*dev));
metal_assert(dev);
memset(dev, 0, sizeof(*dev));
sprintf(dev->name, "%s%lx.%lx", HIL_DEV_NAME_PREFIX, pa,
(unsigned long)size);
dev->pa = pa;
mdev = &dev->device;
mdev->name = dev->name;
mdev->num_regions = 1;
metal_io_init(&mdev->regions[0], (void *)pa, &dev->pa, size,
sizeof(pa) << 3, flags, NULL);
ret = metal_register_generic_device(mdev);
metal_assert(!ret);
ret = metal_device_open("generic", dev->name, &mdev);
metal_assert(!ret);
return mdev;
}
void hil_close_generic_mem_dev(struct metal_device *dev)
{
struct hil_mem_device *mdev;
if (strncmp(HIL_DEV_NAME_PREFIX, dev->name,
strlen(HIL_DEV_NAME_PREFIX))) {
metal_device_close(dev);
} else {
metal_list_del(&dev->node);
mdev = metal_container_of(dev, struct hil_mem_device, device);
metal_free_memory(mdev);
}
}
static struct metal_io_region *hil_get_mem_io(
struct metal_device *dev,
metal_phys_addr_t pa,
size_t size)
{
struct metal_io_region *io;
unsigned int i;
for (i = 0; i < dev->num_regions; i++) {
io = &dev->regions[i];
if (!pa && io->size >= size)
return io;
if (metal_io_phys_to_offset(io, pa) == METAL_BAD_OFFSET)
continue;
if (metal_io_phys_to_offset(io, (pa + size)) ==
METAL_BAD_OFFSET)
continue;
return io;
}
return NULL;
}
struct hil_proc *hil_create_proc(struct hil_platform_ops *ops,
unsigned long cpu_id, void *pdata)
{
struct hil_proc *proc = 0;
proc = metal_allocate_memory(sizeof(struct hil_proc));
if (!proc)
return NULL;
memset(proc, 0, sizeof(struct hil_proc));
proc->ops = ops;
proc->num_chnls = 1;
proc->cpu_id = cpu_id;
proc->pdata = pdata;
/* Setup generic shared memory I/O region */
proc->sh_buff.io = &hil_shm_generic_io;
metal_mutex_init(&proc->lock);
metal_list_add_tail(&procs, &proc->node);
return proc;
}
/**
* hil_delete_proc
*
* This function deletes the given proc instance and frees the
* associated resources.
*
* @param proc - pointer to hil remote_proc instance
*
*/
void hil_delete_proc(struct hil_proc *proc)
{
struct metal_list *node;
struct metal_device *dev;
struct metal_io_region *io;
struct proc_vring *vring;
int i;
metal_list_for_each(&procs, node) {
if (proc ==
metal_container_of(node, struct hil_proc, node)) {
metal_list_del(&proc->node);
metal_mutex_acquire(&proc->lock);
proc->ops->release(proc);
/* Close shmem device */
dev = proc->sh_buff.dev;
io = proc->sh_buff.io;
if (dev)
proc->ops->release_shm(proc, dev, io);
else if (io && io->ops.close)
io->ops.close(io);
/* Close resource table device */
dev = proc->rsc_dev;
io = proc->rsc_io;
if (dev)
proc->ops->release_shm(proc, dev, io);
else if (io && io->ops.close)
io->ops.close(io);
/* Close vring device */
for (i = 0; i < HIL_MAX_NUM_VRINGS; i++) {
vring = &proc->vdev.vring_info[i];
dev = vring->dev;
io = vring->io;
if (dev)
proc->ops->release_shm(proc, dev, io);
else if (io && io->ops.close)
io->ops.close(io);
}
metal_mutex_release(&proc->lock);
metal_mutex_deinit(&proc->lock);
metal_free_memory(proc);
return;
}
}
}
int hil_init_proc(struct hil_proc *proc)
{
int ret = 0;
if (!proc->is_initialized && proc->ops->initialize) {
ret = proc->ops->initialize(proc);
if (!ret)
proc->is_initialized = 1;
else
return -1;
}
return 0;
}
/**
* hil_get_chnl_info
*
* This function returns channels info for given proc.
*
* @param proc - pointer to proc info struct
* @param num_chnls - pointer to integer variable to hold
* number of available channels
*
* @return - pointer to channel info control block
*
*/
struct proc_chnl *hil_get_chnl_info(struct hil_proc *proc, int *num_chnls)
{
*num_chnls = proc->num_chnls;
return (proc->chnls);
}
void hil_notified(struct hil_proc *proc, uint32_t notifyid)
{
struct proc_vdev *pvdev = &proc->vdev;
struct fw_rsc_vdev *vdev_rsc = pvdev->vdev_info;
int i;
if (vdev_rsc->status & VIRTIO_CONFIG_STATUS_NEEDS_RESET) {
if (pvdev->rst_cb)
pvdev->rst_cb(proc, 0);
} else {
for(i = 0; i < (int)pvdev->num_vrings; i++) {
struct fw_rsc_vdev_vring *vring_rsc;
vring_rsc = &vdev_rsc->vring[i];
if (notifyid == (uint32_t)(-1) ||
notifyid == vring_rsc->notifyid)
virtqueue_notification(
pvdev->vring_info[i].vq);
}
}
}
/**
* hil_get_vdev_info
*
* This function return virtio device for remote core.
*
* @param proc - pointer to remote proc
*
* @return - pointer to virtio HW device.
*
*/
struct proc_vdev *hil_get_vdev_info(struct hil_proc *proc)
{
return (&proc->vdev);
}
/**
* hil_get_vring_info
*
* This function returns vring_info_table. The caller will use
* this table to get the vring HW info which will be subsequently
* used to create virtqueues.
*
* @param vdev - pointer to virtio HW device
* @param num_vrings - pointer to hold number of vrings
*
* @return - pointer to vring hardware info table
*/
struct proc_vring *hil_get_vring_info(struct proc_vdev *vdev, int *num_vrings)
{
struct fw_rsc_vdev *vdev_rsc;
struct fw_rsc_vdev_vring *vring_rsc;
struct proc_vring *vring;
int i, ret;
vdev_rsc = vdev->vdev_info;
*num_vrings = vdev->num_vrings;
if (vdev_rsc) {
vring = &vdev->vring_info[0];
for (i = 0; i < vdev_rsc->num_of_vrings; i++) {
struct hil_proc *proc = metal_container_of(
vdev, struct hil_proc, vdev);
void *vaddr = METAL_BAD_VA;
/* Initialize vring with vring resource */
vring_rsc = &vdev_rsc->vring[i];
vring[i].num_descs = vring_rsc->num;
vring[i].align = vring_rsc->align;
/* Check if vring needs to reinitialize.
* Vring needs reinitialization if the vdev
* master restarts.
*/
if (vring[i].io) {
vaddr = metal_io_phys_to_virt(vring[i].io,
(metal_phys_addr_t)vring_rsc->da);
}
if (vaddr == (void *)METAL_BAD_VA) {
ret = hil_set_vring(proc, i, NULL, NULL,
(metal_phys_addr_t)vring_rsc->da,
vring_size(vring_rsc->num,
vring_rsc->align));
if (ret)
return NULL;
vaddr = metal_io_phys_to_virt(vring[i].io,
(metal_phys_addr_t)vring_rsc->da);
}
vring[i].vaddr = vaddr;
}
}
return (vdev->vring_info);
}
/**
* hil_get_shm_info
*
* This function returns shared memory info control block. The caller
* will use this information to create and manage memory buffers for
* vring descriptor table.
*
* @param proc - pointer to proc instance
*
* @return - pointer to shared memory region used for buffers
*
*/
struct proc_shm *hil_get_shm_info(struct hil_proc *proc)
{
return (&proc->sh_buff);
}
void hil_free_vqs(struct virtio_device *vdev)
{
struct hil_proc *proc = vdev->device;
struct proc_vdev *pvdev = &proc->vdev;
int num_vrings = (int)pvdev->num_vrings;
int i;
metal_mutex_acquire(&proc->lock);
for(i = 0; i < num_vrings; i++) {
struct proc_vring *pvring = &pvdev->vring_info[i];
struct virtqueue *vq = pvring->vq;
if (vq) {
virtqueue_free(vq);
pvring->vq = 0;
}
}
metal_mutex_release(&proc->lock);
}
int hil_enable_vdev_notification(struct hil_proc *proc, int id)
{
/* We only support single vdev in hil_proc */
(void)id;
if (!proc)
return -1;
if (proc->ops->enable_interrupt)
proc->ops->enable_interrupt(&proc->vdev.intr_info);
return 0;
}
/**
* hil_enable_vring_notifications()
*
* This function is called after successful creation of virtqueues.
* This function saves queue handle in the vring_info_table which
* will be used during interrupt handling .This function setups
* interrupt handlers.
*
* @param vring_index - index to vring HW table
* @param vq - pointer to virtqueue to save in vring HW table
*
* @return - execution status
*/
int hil_enable_vring_notifications(int vring_index, struct virtqueue *vq)
{
struct hil_proc *proc_hw = (struct hil_proc *)vq->vq_dev->device;
struct proc_vring *vring_hw = &proc_hw->vdev.vring_info[vring_index];
/* Save virtqueue pointer for later reference */
vring_hw->vq = vq;
if (proc_hw->ops->enable_interrupt) {
proc_hw->ops->enable_interrupt(&vring_hw->intr_info);
}
return 0;
}
/**
* hil_vdev_notify()
*
* This function generates IPI to let the other side know that there is
* update in the vritio dev configs
*
* @param vdev - pointer to the viritio device
*
*/
void hil_vdev_notify(struct virtio_device *vdev)
{
struct hil_proc *proc = vdev->device;
struct proc_vdev *pvdev = &proc->vdev;
if (proc->ops->notify) {
proc->ops->notify(proc, &pvdev->intr_info);
}
}
/**
* hil_vring_notify()
*
* This function generates IPI to let the other side know that there is
* job available for it. The required information to achieve this, like interrupt
* vector, CPU id etc is be obtained from the proc_vring table.
*
* @param vq - pointer to virtqueue
*
*/
void hil_vring_notify(struct virtqueue *vq)
{
struct hil_proc *proc_hw = (struct hil_proc *)vq->vq_dev->device;
struct proc_vring *vring_hw =
&proc_hw->vdev.vring_info[vq->vq_queue_index];
if (proc_hw->ops->notify) {
proc_hw->ops->notify(proc_hw, &vring_hw->intr_info);
}
}
/**
* hil_get_status
*
* This function is used to check if the given core is up and running.
* This call will return after it is confirmed that remote core has
* started.
*
* @param proc - pointer to proc instance
*
* @return - execution status
*/
int hil_get_status(struct hil_proc *proc)
{
(void)proc;
/* For future use only. */
return 0;
}
/**
* hil_set_status
*
* This function is used to update the status
* of the given core i.e it is ready for IPC.
*
* @param proc - pointer to remote proc
*
* @return - execution status
*/
int hil_set_status(struct hil_proc *proc)
{
(void)proc;
/* For future use only. */
return 0;
}
/**
* hil_boot_cpu
*
* This function boots the remote processor.
*
* @param proc - pointer to remote proc
* @param start_addr - start address of remote cpu
*
* @return - execution status
*/
int hil_boot_cpu(struct hil_proc *proc, unsigned int start_addr)
{
if (proc->ops->boot_cpu) {
proc->ops->boot_cpu(proc, start_addr);
}
#if defined (OPENAMP_BENCHMARK_ENABLE)
boot_time_stamp = metal_get_timestamp();
#endif
return 0;
}
/**
* hil_shutdown_cpu
*
* This function shutdowns the remote processor
*
* @param proc - pointer to remote proc
*
*/
void hil_shutdown_cpu(struct hil_proc *proc)
{
if (proc->ops->shutdown_cpu) {
proc->ops->shutdown_cpu(proc);
}
#if defined (OPENAMP_BENCHMARK_ENABLE)
shutdown_time_stamp = metal_get_timestamp();
#endif
}
/**
* hil_get_firmware
*
* This function returns address and size of given firmware name passed as
* parameter.
*
* @param fw_name - name of the firmware
* @param start_addr - pointer t hold start address of firmware
* @param size - pointer to hold size of firmware
*
* returns - status of function execution
*
*/
int hil_get_firmware(char *fw_name, uintptr_t *start_addr,
unsigned int *size)
{
return (config_get_firmware(fw_name, start_addr, size));
}
int hil_poll (struct hil_proc *proc, int nonblock)
{
return proc->ops->poll(proc, nonblock);
}
int hil_set_shm (struct hil_proc *proc,
const char *bus_name, const char *name,
metal_phys_addr_t paddr, size_t size)
{
struct metal_device *dev;
struct metal_io_region *io;
int ret;
if (!proc)
return -1;
if (name && bus_name) {
ret = metal_device_open(bus_name, name, &dev);
if (ret)
return ret;
proc->sh_buff.dev = dev;
proc->sh_buff.io = NULL;
} else if (name) {
ret = metal_shmem_open(name, size, &io);
if (ret)
return ret;
proc->sh_buff.io = io;
}
if (!size) {
if (proc->sh_buff.io) {
io = proc->sh_buff.io;
proc->sh_buff.start_paddr = metal_io_phys(io, 0);
proc->sh_buff.size = io->size;
} else if (proc->sh_buff.dev) {
dev = proc->sh_buff.dev;
io = &dev->regions[0];
proc->sh_buff.io = io;
proc->sh_buff.start_paddr = metal_io_phys(io, 0);
proc->sh_buff.size = io->size;
}
} else if (!paddr) {
if (proc->sh_buff.io) {
io = proc->sh_buff.io;
if (io->size != size)
return -1;
proc->sh_buff.start_paddr = metal_io_phys(io, 0);
proc->sh_buff.size = io->size;
} else if (proc->sh_buff.dev) {
dev = proc->sh_buff.dev;
io = &dev->regions[0];
proc->sh_buff.io = io;
proc->sh_buff.start_paddr = metal_io_phys(io, 0);
proc->sh_buff.size = size;
}
} else {
if (proc->sh_buff.io) {
io = proc->sh_buff.io;
if (size > io->size)
return -1;
if (metal_io_phys_to_offset(io, paddr) ==
METAL_BAD_OFFSET)
return -1;
proc->sh_buff.start_paddr = paddr;
proc->sh_buff.size = size;
} else if (proc->sh_buff.dev) {
dev = proc->sh_buff.dev;
io = hil_get_mem_io(dev, paddr, size);
if (!io)
return -1;
proc->sh_buff.io = io;
proc->sh_buff.start_paddr = metal_io_phys(io, 0);
proc->sh_buff.size = size;
} else {
io = proc->ops->alloc_shm(proc, paddr, size, &dev);
metal_assert(io);
proc->sh_buff.dev = dev;
proc->sh_buff.io = io;
proc->sh_buff.start_paddr = paddr;
proc->sh_buff.size = size;
}
}
proc->sh_buff.start_addr = metal_io_phys_to_virt(proc->sh_buff.io,
proc->sh_buff.start_paddr);
return 0;
}
int hil_set_rsc (struct hil_proc *proc,
const char *bus_name, const char *name,
metal_phys_addr_t paddr, size_t size)
{
struct metal_device *dev;
struct metal_io_region *io;
int ret;
if (!proc)
return -1;
if (name && bus_name) {
ret = metal_device_open(bus_name, name, &dev);
if (ret)
return ret;
proc->rsc_dev = dev;
io = hil_get_mem_io(dev, 0, size);
if (!io)
return -1;
proc->rsc_io = io;
} else if (name) {
ret = metal_shmem_open(name, size, &io);
if (ret)
return ret;
proc->rsc_io = io;
} else {
if (proc->rsc_dev || proc->rsc_io)
return 0;
io = proc->ops->alloc_shm(proc, paddr, size, &dev);
if (dev) {
proc->rsc_dev = dev;
proc->rsc_io = io;
}
}
return 0;
}
int hil_set_vring (struct hil_proc *proc, int index,
const char *bus_name, const char *name,
metal_phys_addr_t paddr, size_t size)
{
struct metal_device *dev;
struct metal_io_region *io;
struct proc_vring *vring;
int ret;
if (!proc)
return -1;
if (index >= HIL_MAX_NUM_VRINGS)
return -1;
vring = &proc->vdev.vring_info[index];
if (name && bus_name) {
ret = metal_device_open(bus_name, name, &dev);
if (ret)
return ret;
vring->dev = dev;
} else if (name) {
ret = metal_shmem_open(name, size, &io);
if (ret)
return ret;
vring->io = io;
} else {
if (vring->dev) {
dev = vring->dev;
io = hil_get_mem_io(dev, paddr, size);
if (io) {
vring->io = io;
return 0;
}
proc->ops->release_shm(proc, dev, NULL);
} else if (vring->io) {
io = vring->io;
if (size <= io->size &&
metal_io_phys_to_offset(io, paddr) !=
METAL_BAD_OFFSET)
return 0;
}
io = proc->ops->alloc_shm(proc, paddr, size, &dev);
if (!io)
return -1;
vring->io = io;;
}
return 0;
}
int hil_set_vdev_ipi (struct hil_proc *proc, int index,
unsigned int irq, void *data)
{
struct proc_intr *vring_intr;
/* As we support only one vdev for now */
(void)index;
if (!proc)
return -1;
vring_intr = &proc->vdev.intr_info;
vring_intr->vect_id = irq;
vring_intr->data = data;
return 0;
}
int hil_set_vring_ipi (struct hil_proc *proc, int index,
unsigned int irq, void *data)
{
struct proc_intr *vring_intr;
if (!proc)
return -1;
vring_intr = &proc->vdev.vring_info[index].intr_info;
vring_intr->vect_id = irq;
vring_intr->data = data;
return 0;
}
int hil_set_rpmsg_channel (struct hil_proc *proc, int index,
char *name)
{
if (!proc)
return -1;
if (index >= HIL_MAX_NUM_CHANNELS)
return -1;
strcpy(proc->chnls[index].name, name);
return 0;
}
int hil_set_vdev_rst_cb (struct hil_proc *proc, int index,
hil_proc_vdev_rst_cb_t cb)
{
(void)index;
proc->vdev.rst_cb = cb;
return 0;
}

View file

@ -0,0 +1,206 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* sh_mem.c
*
* COMPONENT
*
* OpenAMP stack.
*
* DESCRIPTION
*
* Source file for fixed buffer size memory management service. Currently
* it is only being used to manage shared memory.
*
**************************************************************************/
#include <string.h>
#include <openamp/sh_mem.h>
#include <metal/alloc.h>
/**
* sh_mem_create_pool
*
* Creates new memory pool with the given parameters.
*
* @param start_addr - start address of the memory region
* @param size - size of the memory
* @param buff_size - fixed buffer size
*
* @return - pointer to memory pool
*
*/
struct sh_mem_pool *sh_mem_create_pool(void *start_addr, unsigned int size,
unsigned int buff_size)
{
struct sh_mem_pool *mem_pool;
int pool_size;
int num_buffs, bmp_size;
if (!start_addr || !size || !buff_size)
return NULL;
/* Word align the buffer size */
buff_size = WORD_ALIGN(buff_size);
/* Get number of buffers. */
num_buffs = (size / buff_size) + ((size % buff_size) == 0 ? 0 : 1);
/*
* Size of the bitmap required to maintain buffers info. One word(32 bit) can
* keep track of 32 buffers.
*/
bmp_size = (num_buffs / BITMAP_WORD_SIZE)
+ ((num_buffs % BITMAP_WORD_SIZE) == 0 ? 0 : 1);
/* Total size required for pool control block. */
pool_size = sizeof(struct sh_mem_pool) + BITMAP_WORD_SIZE * bmp_size;
/* Create pool control block. */
mem_pool = metal_allocate_memory(pool_size);
if (mem_pool) {
/* Initialize pool parameters */
memset(mem_pool, 0x00, pool_size);
metal_mutex_init(&mem_pool->lock);
mem_pool->start_addr = start_addr;
mem_pool->buff_size = buff_size;
mem_pool->bmp_size = bmp_size;
mem_pool->total_buffs = num_buffs;
}
return mem_pool;
}
/**
* sh_mem_get_buffer
*
* Allocates fixed size buffer from the given memory pool.
*
* @param pool - pointer to memory pool
*
* @return - pointer to allocated buffer
*
*/
void *sh_mem_get_buffer(struct sh_mem_pool *pool)
{
void *buff = NULL;
int bit_idx;
unsigned int idx;
if (!pool)
return NULL;
metal_mutex_acquire(&pool->lock);
if (pool->used_buffs >= pool->total_buffs) {
metal_mutex_release(&pool->lock);
return NULL;
}
for (idx = 0; idx < pool->bmp_size; idx++) {
/*
* Find the first 0 bit in the buffers bitmap. The 0th bit
* represents a free buffer.
*/
bit_idx = get_first_zero_bit(
*(unsigned long*)SH_MEM_POOL_LOCATE_BITMAP(pool,idx));
if (bit_idx >= 0) {
/* Set bit to mark it as consumed. */
*(unsigned long*)(SH_MEM_POOL_LOCATE_BITMAP(pool,idx))
|= ((unsigned long)1 << (unsigned long)bit_idx);
buff = (char *)pool->start_addr +
pool->buff_size * (idx * BITMAP_WORD_SIZE +
bit_idx);
pool->used_buffs++;
break;
}
}
metal_mutex_release(&pool->lock);
return buff;
}
/**
* sh_mem_free_buffer
*
* Frees the given buffer.
*
* @param pool - pointer to memory pool
* @param buff - pointer to buffer
*
* @return - none
*/
void sh_mem_free_buffer(void *buff, struct sh_mem_pool *pool)
{
unsigned long *bitmask;
int bmp_idx, bit_idx, buff_idx;
if (!pool || !buff)
return;
/* Acquire the pool lock */
metal_mutex_acquire(&pool->lock);
/* Map the buffer address to its index. */
buff_idx = ((char *)buff - (char *)pool->start_addr) / pool->buff_size;
/* Translate the buffer index to bitmap index. */
bmp_idx = buff_idx / BITMAP_WORD_SIZE;
bit_idx = buff_idx % BITMAP_WORD_SIZE;
bitmask = (unsigned long*)(SH_MEM_POOL_LOCATE_BITMAP(pool, bmp_idx));
/* Mark the buffer as free */
*bitmask ^= (1 << bit_idx);
pool->used_buffs--;
/* Release the pool lock. */
metal_mutex_release(&pool->lock);
}
/**
* sh_mem_delete_pool
*
* Deletes the given memory pool.
*
* @param pool - pointer to memory pool
*
* @return - none
*/
void sh_mem_delete_pool(struct sh_mem_pool *pool)
{
if (pool) {
metal_mutex_deinit(&pool->lock);
metal_free_memory(pool);
}
}
/**
* get_first_zero_bit
*
* Provides position of first 0 bit in a 32 bit value
*
* @param value - given value
*
* @return - 0th bit position
*/
int get_first_zero_bit(unsigned long value)
{
unsigned int idx = 0;
value = ((~value) & (value + 1));
while (value) {
idx++;
value >>= 1;
}
return ((int)idx-1);
}

View file

@ -0,0 +1,68 @@
#ifndef _COMPILER_H_
#define _COMPILER_H_
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* compiler.h
*
* DESCRIPTION
*
* This file defines compiler-specific macros.
*
***************************************************************************/
#if defined __cplusplus
extern "C" {
#endif
/* IAR ARM build tools */
#if defined(__ICCARM__)
#ifndef OPENAMP_PACKED_BEGIN
#define OPENAMP_PACKED_BEGIN __packed
#endif
#ifndef OPENAMP_PACKED_END
#define OPENAMP_PACKED_END
#endif
/* GNUC */
#elif defined(__GNUC__)
#ifndef OPENAMP_PACKED_BEGIN
#define OPENAMP_PACKED_BEGIN
#endif
#ifndef OPENAMP_PACKED_END
#define OPENAMP_PACKED_END __attribute__((__packed__))
#endif
/* ARM GCC */
#elif defined(__CC_ARM)
#ifndef OPENAMP_PACKED_BEGIN
#define OPENAMP_PACKED_BEGIN _Pragma("pack(1U)")
#endif
#ifndef OPENAMP_PACKED_END
#define OPENAMP_PACKED_END _Pragma("pack()")
#endif
#else
/* There is no default definition here to avoid wrong structures packing in case of not supported compiler */
#error Please implement the structure packing macros for your compiler here!
#endif
#if defined __cplusplus
}
#endif
#endif /* _COMPILER_H_ */

View file

@ -0,0 +1,212 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef ELF_LOADER_H_
#define ELF_LOADER_H_
#include <openamp/remoteproc_loader.h>
#if defined __cplusplus
extern "C" {
#endif
/* ELF base types - 32-bit. */
typedef uintptr_t Elf32_Addr;
typedef unsigned short Elf32_Half;
typedef unsigned int Elf32_Off;
typedef signed int Elf32_Sword;
typedef unsigned int Elf32_Word;
/* Size of ELF identifier field in the ELF file header. */
#define EI_NIDENT 16
/* ELF file header */
typedef struct {
unsigned char e_ident[EI_NIDENT];
Elf32_Half e_type;
Elf32_Half e_machine;
Elf32_Word e_version;
Elf32_Addr e_entry;
Elf32_Off e_phoff;
Elf32_Off e_shoff;
Elf32_Word e_flags;
Elf32_Half e_ehsize;
Elf32_Half e_phentsize;
Elf32_Half e_phnum;
Elf32_Half e_shentsize;
Elf32_Half e_shnum;
Elf32_Half e_shstrndx;
} Elf32_Ehdr;
/* e_ident */
#define ET_NONE 0
#define ET_REL 1 /* Re-locatable file */
#define ET_EXEC 2 /* Executable file */
#define ET_DYN 3 /* Shared object file */
#define ET_CORE 4 /* Core file */
#define ET_LOOS 0xfe00 /* Operating system-specific */
#define ET_HIOS 0xfeff /* Operating system-specific */
#define ET_LOPROC 0xff00 /* remote_proc-specific */
#define ET_HIPROC 0xffff /* remote_proc-specific */
/* e_machine */
#define EM_ARM 40 /* ARM/Thumb Architecture */
/* e_version */
#define EV_CURRENT 1 /* Current version */
/* e_ident[] Identification Indexes */
#define EI_MAG0 0 /* File identification */
#define EI_MAG1 1 /* File identification */
#define EI_MAG2 2 /* File identification */
#define EI_MAG3 3 /* File identification */
#define EI_CLASS 4 /* File class */
#define EI_DATA 5 /* Data encoding */
#define EI_VERSION 6 /* File version */
#define EI_OSABI 7 /* Operating system/ABI identification */
#define EI_ABIVERSION 8 /* ABI version */
#define EI_PAD 9 /* Start of padding bytes */
#define EI_NIDENT 16 /* Size of e_ident[] */
/* EI_MAG0 to EI_MAG3 - A file's first 4 bytes hold amagic number, identifying the file as an ELF object file */
#define ELFMAG0 0x7f /* e_ident[EI_MAG0] */
#define ELFMAG1 'E' /* e_ident[EI_MAG1] */
#define ELFMAG2 'L' /* e_ident[EI_MAG2] */
#define ELFMAG3 'F' /* e_ident[EI_MAG3] */
/* EI_CLASS - The next byte, e_ident[EI_CLASS], identifies the file's class, or capacity. */
#define ELFCLASSNONE 0 /* Invalid class */
#define ELFCLASS32 1 /* 32-bit objects */
#define ELFCLASS64 2 /* 64-bit objects */
/* EI_DATA - Byte e_ident[EI_DATA] specifies the data encoding of the remote_proc-specific data in the object
file. The following encodings are currently defined. */
#define ELFDATANONE 0 /* Invalid data encoding */
#define ELFDATA2LSB 1 /* See Data encodings, below */
#define ELFDATA2MSB 2 /* See Data encodings, below */
/* EI_OSABI - We do not define an OS specific ABI */
#define ELFOSABI_NONE 0
/* ELF section header. */
typedef struct {
Elf32_Word sh_name;
Elf32_Word sh_type;
Elf32_Word sh_flags;
Elf32_Addr sh_addr;
Elf32_Off sh_offset;
Elf32_Word sh_size;
Elf32_Word sh_link;
Elf32_Word sh_info;
Elf32_Word sh_addralign;
Elf32_Word sh_entsize;
} Elf32_Shdr;
/* sh_type */
#define SHT_NULL 0
#define SHT_PROGBITS 1
#define SHT_SYMTAB 2
#define SHT_STRTAB 3
#define SHT_RELA 4
#define SHT_HASH 5
#define SHT_DYNAMIC 6
#define SHT_NOTE 7
#define SHT_NOBITS 8
#define SHT_REL 9
#define SHT_SHLIB 10
#define SHT_DYNSYM 11
#define SHT_INIT_ARRAY 14
#define SHT_FINI_ARRAY 15
#define SHT_PREINIT_ARRAY 16
#define SHT_GROUP 17
#define SHT_SYMTAB_SHNDX 18
#define SHT_LOOS 0x60000000
#define SHT_HIOS 0x6fffffff
#define SHT_LOPROC 0x70000000
#define SHT_HIPROC 0x7fffffff
#define SHT_LOUSER 0x80000000
#define SHT_HIUSER 0xffffffff
/* sh_flags */
#define SHF_WRITE 0x1
#define SHF_ALLOC 0x2
#define SHF_EXECINSTR 0x4
#define SHF_MASKPROC 0xf0000000
/* Relocation entry (without addend) */
typedef struct {
Elf32_Addr r_offset;
Elf32_Word r_info;
} Elf32_Rel;
/* Relocation entry with addend */
typedef struct {
Elf32_Addr r_offset;
Elf32_Word r_info;
Elf32_Sword r_addend;
} Elf32_Rela;
/* Macros to extract information from 'r_info' field of relocation entries */
#define ELF32_R_SYM(i) ((i)>>8)
#define ELF32_R_TYPE(i) ((unsigned char)(i))
/* Symbol table entry */
typedef struct {
Elf32_Word st_name;
Elf32_Addr st_value;
Elf32_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf32_Half st_shndx;
} Elf32_Sym;
/* ARM specific dynamic relocation codes */
#define R_ARM_GLOB_DAT 21 /* 0x15 */
#define R_ARM_JUMP_SLOT 22 /* 0x16 */
#define R_ARM_RELATIVE 23 /* 0x17 */
#define R_ARM_ABS32 2 /* 0x02 */
/* ELF decoding information */
struct elf_decode_info {
Elf32_Ehdr elf_header;
unsigned char *section_headers_start;
char *shstrtab;
Elf32_Shdr *dynsym;
Elf32_Shdr *dynstr;
Elf32_Shdr *rel_plt;
Elf32_Shdr *rel_dyn;
Elf32_Shdr *rsc;
unsigned char *dynsym_addr;
unsigned char *dynstr_addr;
char *firmware;
};
/* ELF Loader functions. */
int elf_loader_init(struct remoteproc_loader *loader);
void *elf_loader_retrieve_entry_point(struct remoteproc_loader *loader);
void *elf_loader_retrieve_resource_section(struct remoteproc_loader *loader,
unsigned int *size);
int elf_loader_load_remote_firmware(struct remoteproc_loader *loader);
int elf_loader_attach_firmware(struct remoteproc_loader *loader,
void *firmware);
int elf_loader_detach_firmware(struct remoteproc_loader *loader);
void *elf_get_load_address(struct remoteproc_loader *loader);
#if defined __cplusplus
}
#endif
#endif /* ELF_LOADER_H_ */

View file

@ -0,0 +1,33 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef FIRMWARE_H
#define FIRMWARE_H
#include <stdint.h>
#if defined __cplusplus
extern "C" {
#endif
/* Max supported firmwares */
#define FW_COUNT 4
struct firmware_info {
char name[32];
unsigned int start_addr;
unsigned int end_addr;
};
int config_get_firmware(char *fw_name, uintptr_t *start_addr,
unsigned int *size);
#if defined __cplusplus
}
#endif
#endif /* FIRMWARE_H */

View file

@ -0,0 +1,712 @@
#ifndef _HIL_H_
#define _HIL_H_
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* hil.h
*
* DESCRIPTION
*
* This file defines interface layer to access hardware features. This
* interface is used by both RPMSG and remoteproc components.
*
***************************************************************************/
#include <openamp/virtio.h>
#include <openamp/firmware.h>
#include <metal/list.h>
#include <metal/io.h>
#include <metal/device.h>
#include <metal/mutex.h>
#if defined __cplusplus
extern "C" {
#endif
/* Configurable parameters */
#define HIL_MAX_CORES 2
#define HIL_MAX_NUM_VRINGS 2
#define HIL_MAX_NUM_CHANNELS 1
/* Reserved CPU id */
#define HIL_RSVD_CPU_ID 0xffffffff
struct hil_proc;
typedef void (*hil_proc_vdev_rst_cb_t)(struct hil_proc *proc, int id);
/**
* struct proc_shm
*
* This structure is maintained by hardware interface layer for
* shared memory information. The shared memory provides buffers
* for use by the vring to exchange messages between the cores.
*
*/
struct proc_shm {
/* Start address of shared memory used for buffers. */
void *start_addr;
/* Start physical address of shared memory used for buffers. */
metal_phys_addr_t start_paddr;
/* sharmed memory I/O region */
struct metal_io_region *io;
/* sharmed memory metal device */
struct metal_device *dev;
/* Size of shared memory. */
unsigned long size;
};
/**
* struct proc_intr
*
* This structure is maintained by hardware interface layer for
* notification(interrupts) mechanism. The most common notification mechanism
* is Inter-Processor Interrupt(IPI). There can be other mechanism depending
* on SoC architecture.
*
*/
struct proc_intr {
/* Interrupt number for vring - use for IPI */
unsigned int vect_id;
/* Interrupt priority */
unsigned int priority;
/* Interrupt trigger type */
unsigned int trigger_type;
/* IPI metal device */
struct metal_device *dev;
/* IPI device I/O */
struct metal_io_region *io;
/* Private data */
void *data;
};
/**
* struct proc_vring
*
* This structure is maintained by hardware interface layer to keep
* vring physical memory and notification info.
*
*/
struct proc_vring {
/* Pointer to virtqueue encapsulating the vring */
struct virtqueue *vq;
/* Vring logical address */
void *vaddr;
/* Vring metal device */
struct metal_device *dev;
/* Vring I/O region */
struct metal_io_region *io;
/* Number of vring descriptors */
unsigned short num_descs;
/* Vring alignment */
unsigned long align;
/* Vring interrupt control block */
struct proc_intr intr_info;
};
/**
* struct proc_vdev
*
* This structure represents a virtio HW device for remote processor.
* Currently only one virtio device per processor is supported.
*
*/
struct proc_vdev {
/* Address for the vdev info */
void *vdev_info;
/* Vdev interrupt control block */
struct proc_intr intr_info;
/* Vdev reset callback */
hil_proc_vdev_rst_cb_t rst_cb;
/* Number of vrings */
unsigned int num_vrings;
/* Virtio device features */
unsigned int dfeatures;
/* Virtio gen features */
unsigned int gfeatures;
/* Vring info control blocks */
struct proc_vring vring_info[HIL_MAX_NUM_VRINGS];
};
/**
* struct proc_chnl
*
* This structure represents channel IDs that would be used by
* the remote in the name service message. This will be extended
* further to support static channel creation.
*
*/
struct proc_chnl {
/* Channel ID */
char name[32];
};
/**
* struct hil_proc
*
* This structure represents a remote processor and encapsulates shared
* memory and notification info required for IPC.
*
*/
struct hil_proc {
/* HIL CPU ID */
unsigned long cpu_id;
/* HIL platform ops table */
struct hil_platform_ops *ops;
/* Resource table metal device */
struct metal_device *rsc_dev;
/* Resource table I/O region */
struct metal_io_region *rsc_io;
/* Shared memory info */
struct proc_shm sh_buff;
/* Virtio device hardware info */
struct proc_vdev vdev;
/* Number of RPMSG channels */
unsigned long num_chnls;
/* RPMsg channels array */
struct proc_chnl chnls[HIL_MAX_NUM_CHANNELS];
/* Initialized status */
int is_initialized;
/* hil_proc lock */
metal_mutex_t lock;
/* private data */
void *pdata;
/* List node */
struct metal_list node;
};
/**
* hil_create_proc
*
* This function creates a HIL proc instance
*
* @param ops - hil proc platform operations
* @param cpu_id - remote CPU ID.
* E.g. the CPU ID of the remote processor in its
* cluster.
* @param pdata - private data
* @return - pointer to proc instance
*
*/
struct hil_proc *hil_create_proc(struct hil_platform_ops *ops,
unsigned long cpu_id, void *pdata);
/**
* hil_delete_proc
*
* This function deletes the given proc instance and frees the
* associated resources.
*
* @param proc - pointer to HIL proc instance
*
*/
void hil_delete_proc(struct hil_proc *proc);
/**
* hil_init_proc
*
* This function initialize a HIL proc instance with the given platform data
* @param proc - pointer to the hil_proc to initialize
*
* @return - 0 succeeded, non-0 for failure
*
*/
int hil_init_proc(struct hil_proc *proc);
/**
* hil_notified()
*
* This function is called when notification is received.
* This function gets the corresponding virtqueue and generates
* call back for it.
*
* @param proc - pointer to hil_proc
* @param notifyid - notifyid
*
*/
void hil_notified(struct hil_proc *proc, uint32_t notifyid);
/**
* hil_get_vdev_info
*
* This function return virtio device for remote core.
*
* @param proc - pointer to remote proc
*
* @return - pointer to virtio HW device.
*
*/
struct proc_vdev *hil_get_vdev_info(struct hil_proc *proc);
/**
* hil_get_chnl_info
*
* This function returns channels info for given proc.
*
* @param proc - pointer to proc info struct
* @param num_chnls - pointer to integer variable to hold
* number of available channels
*
* @return - pointer to channel info control block
*
*/
struct proc_chnl *hil_get_chnl_info(struct hil_proc *proc, int *num_chnls);
/**
* hil_get_vring_info
*
* This function returns vring_info_table. The caller will use
* this table to get the vring HW info which will be subsequently
* used to create virtqueues.
*
* @param vdev - pointer to virtio HW device
* @param num_vrings - pointer to hold number of vrings
*
* @return - pointer to vring hardware info table
*/
struct proc_vring *hil_get_vring_info(struct proc_vdev *vdev, int *num_vrings);
/**
* hil_get_shm_info
*
* This function returns shared memory info control block. The caller
* will use this information to create and manage memory buffers for
* vring descriptor table.
*
* @param proc - pointer to proc instance
*
* @return - pointer to shared memory region used for buffers
*
*/
struct proc_shm *hil_get_shm_info(struct hil_proc *proc);
/**
* hil_free_virtqueues
*
* This function remove virt queues of the vdev.
* @param vdev - pointer to the vdev which needs to remove vqs
*/
void hil_free_vqs(struct virtio_device *vdev);
/**
* hil_enable_vdev_notification()
*
* This function enable handler for vdev notification.
*
* @param proc - pointer to hil_proc
* @param id - vdev index
*
* @return - execution status
*/
int hil_enable_vdev_notification(struct hil_proc *proc, int id);
/**
* hil_enable_vring_notifications()
*
* This function is called after successful creation of virtqueues.
* This function saves queue handle in the vring_info_table which
* will be used during interrupt handling .This function setups
* interrupt handlers.
*
* @param vring_index - index to vring HW table
* @param vq - pointer to virtqueue to save in vring HW table
*
* @return - execution status
*/
int hil_enable_vring_notifications(int vring_index, struct virtqueue *vq);
/**
* hil_vdev_notify()
*
* This function generates IPI to let the other side know that there is
* change to virtio device configs.
*
* @param vdev - pointer to virtio device
*
*/
void hil_vdev_notify(struct virtio_device *vdev);
/**
* hil_vring_notify()
*
* This function generates IPI to let the other side know that there is
* job available for it. The required information to achieve this, like interrupt
* vector, CPU id etc is be obtained from the proc_vring table.
*
* @param vq - pointer to virtqueue
*
*/
void hil_vring_notify(struct virtqueue *vq);
/**
* hil_get_status
*
* This function is used to check if the given core is up and running.
* This call will return after it is confirmed that remote core has
* started.
*
* @param proc - pointer to proc instance
*
* @return - execution status
*/
int hil_get_status(struct hil_proc *proc);
/**
* hil_set_status
*
* This function is used to update the status
* of the given core i.e it is ready for IPC.
*
* @param proc - pointer to remote proc
*
* @return - execution status
*/
int hil_set_status(struct hil_proc *proc);
/** hil_create_generic_mem_dev
*
* This function creates generic memory device.
* This is a helper function.
*
* @param pa - physical base address
* @param size - size of the memory
* @param flags - flags of the memory region
*
* @return - pointer to the memory device
*/
struct metal_device *hil_create_generic_mem_dev( metal_phys_addr_t pa,
size_t size, unsigned int flags);
/** hil_close_generic_mem_dev
*
* This function closes the generic memory device.
*
* @param dev - pointer to the memory device.
*/
void hil_close_generic_mem_dev(struct metal_device *dev);
/**
* hil_boot_cpu
*
* This function starts remote processor at given address.
*
* @param proc - pointer to remote proc
* @param load_addr - load address of remote firmware
*
* @return - execution status
*/
int hil_boot_cpu(struct hil_proc *proc, unsigned int load_addr);
/**
* hil_shutdown_cpu
*
* This function shutdowns the remote processor
*
* @param proc - pointer to remote proc
*
*/
void hil_shutdown_cpu(struct hil_proc *proc);
/**
* hil_get_firmware
*
* This function returns address and size of given firmware name passed as
* parameter.
*
* @param fw_name - name of the firmware
* @param start_addr - pointer t hold start address of firmware
* @param size - pointer to hold size of firmware
*
* returns - status of function execution
*
*/
int hil_get_firmware(char *fw_name, uintptr_t *start_addr,
unsigned int *size);
/**
* hil_poll
*
* This function polls the remote processor.
* If it is blocking mode, it will not return until the remoteproc
* is signaled. If it is non-blocking mode, it will return 0
* if the remoteproc has pending signals, it will return non 0
* otherwise.
*
* @param proc - hil_proc to poll
* @param nonblock - 0 for blocking, non-0 for non-blocking.
*
* @return - 0 for no errors, non-0 for errors.
*/
int hil_poll (struct hil_proc *proc, int nonblock);
/**
* hil_set_shm
*
* This function set HIL proc shared memory
*
* @param proc - hil_proc to set
* @param bus_name - bus name of the shared memory device
* @param name - name of the shared memory, or platform device
* mandatory for Linux system.
* @param paddr - physical address of the memory
* @param size - size of the shared memory
*
* If name argument exists, it will open the specified libmetal
* shared memory or the specified libmetal device if bus_name
* is specified to get the I/O region of the shared memory.
* If memory name doesn't exist, it will create a metal device
* for teh shared memory.
*
* @return - 0 for no errors, non-0 for errors.
*/
int hil_set_shm (struct hil_proc *proc,
const char *bus_name, const char *name,
metal_phys_addr_t paddr, size_t size);
/**
* hil_set_rsc
*
* This function set HIL proc RSC I/O
*
* @param proc - hil_proc to set vdev io regsion
* @param bus_name - bus name of the vdev device
* @param name - name of the shared memory, or platform device
* mandatory for Linux system.
* @param paddr - physical address of the memory
* @param size - size of the shared memory
*
* If name argument exists, it will open the specified libmetal
* shared memory or the specified libmetal device if bus_name
* is specified to get the I/O region of the shared memory.
* If memory name doesn't exist, it will create a metal device
* for teh shared memory.
*
* @return - 0 for no errors, non-0 for errors.
*/
int hil_set_rsc (struct hil_proc *proc,
const char *bus_name, const char *name,
metal_phys_addr_t paddr, size_t size);
/**
* hil_set_vring
*
* This function set HIL proc vring
*
* @param proc - hil_proc to set
* @param index - vring index
* @param bus_name - bus name of the vring device
* @param name - name of the shared memory, or platform device
* mandatory for Linux system.
* @param paddr - physical address of the memory
* @param size - size of the shared memory
*
* If name argument exists, it will open the specified libmetal
* shared memory or the specified libmetal device if bus_name
* is specified to get the I/O region of the shared memory.
* If memory name doesn't exist, it will create a metal device
* for teh shared memory.
*
* @return - 0 for no errors, non-0 for errors.
*/
int hil_set_vring (struct hil_proc *proc, int index,
const char *bus_name, const char *name,
metal_phys_addr_t paddr, size_t size);
/**
* hil_set_vdev_ipi
*
* This function set HIL proc vdev IPI
*
* @param proc - hil_proc to set
* @param index - vring index for the IPI
* @param irq - IPI irq vector ID
* @param data - IPI data
*
* @return - 0 for no errors, non-0 for errors.
*/
int hil_set_vdev_ipi (struct hil_proc *proc, int index,
unsigned int irq, void *data);
/**
* hil_set_vring_ipi
*
* This function set HIL proc vring IPI
*
* @param proc - hil_proc to set
* @param index - vring index for the IPI
* @param irq - IPI irq vector ID
* @param data - IPI data
*
* @return - 0 for no errors, non-0 for errors.
*/
int hil_set_vring_ipi (struct hil_proc *proc, int index,
unsigned int irq, void *data);
/**
* hil_set_rpmsg_channel
*
* This function set HIL proc rpmsg_channel
*
* @param proc - hil_proc to set
* @param index - vring index for the rpmsg_channel
* @param name - RPMsg channel name
*
* @return - 0 for no errors, non-0 for errors.
*/
int hil_set_rpmsg_channel (struct hil_proc *proc, int index,
char *name);
/**
* hil_set_vdev_rst_cb
*
* This function set HIL proc vdev reset callback
*
* @param proc - hil_proc to set
* @param index - vdev index
* @param cb - reset callback
*
* @return - 0 for no errors, non-0 for errors.
*/
int hil_set_vdev_rst_cb (struct hil_proc *proc, int index,
hil_proc_vdev_rst_cb_t cb);
/**
*
* This structure is an interface between HIL and platform porting
* component. It is required for the user to provide definitions of
* these functions when framework is ported to new hardware platform.
*
*/
struct hil_platform_ops {
/**
* enable_interrupt()
*
* This function enables interrupt(IPI)
*
* @param intr - pointer to intr information
*
* @return - execution status
*/
int (*enable_interrupt) (struct proc_intr *intr);
/**
* notify()
*
* This function generates IPI to let the other side know that there is
* job available for it.
*
* @param proc - pointer to the hil_proc
* @param intr_info - pointer to interrupt info control block
*/
void (*notify) (struct hil_proc *proc, struct proc_intr * intr_info);
/**
* boot_cpu
*
* This unction boots the remote processor.
*
* @param proc - pointer to the hil_proc
* @param start_addr - start address of remote cpu
*
* @return - execution status
*/
int (*boot_cpu) (struct hil_proc *proc, unsigned int start_addr);
/**
* shutdown_cpu
*
* This function shutdowns the remote processor.
*
* @param proc - pointer to the hil_proc
*
*/
void (*shutdown_cpu) (struct hil_proc *proc);
/**
* poll
*
* This function polls the remote processor.
*
* @param proc - hil_proc to poll
* @param nonblock - 0 for blocking, non-0 for non-blocking.
*
* @return - 0 for no errors, non-0 for errors.
*/
int (*poll) (struct hil_proc *proc, int nonblock);
/**
* alloc_shm
*
* This function is to allocate shared memory
*
* @param[in] proc - pointer to the remote processor
* @param[in] pa - physical address
* @param[in] size - size of the shared memory
* @param[out] dev - pointer to the mem dev pointer
*
* @return - NULL, pointer to the I/O region
*
*/
struct metal_io_region *(*alloc_shm) (struct hil_proc *proc,
metal_phys_addr_t pa,
size_t size,
struct metal_device **dev);
/**
* release_shm
*
* This function is to release shared memory
*
* @param[in] proc - pointer to the remote processor
* @param[in] dev - pointer to the mem dev
* @param[in] io - pointer to the I/O region
*
*/
void (*release_shm) (struct hil_proc *proc,
struct metal_device *dev,
struct metal_io_region *io);
/**
* initialize
*
* This function initialize remote processor with platform data.
*
* @param proc - hil_proc to poll
*
* @return NULL on failure, hil_proc pointer otherwise
*
*/
int (*initialize) (struct hil_proc *proc);
/**
* release
*
* This function is to release remote processor resource
*
* @param[in] proc - pointer to the remote processor
*
*/
void (*release) (struct hil_proc *proc);
};
/* Utility macros for register read/write */
#define HIL_MEM_READ8(addr) *(volatile unsigned char *)(addr)
#define HIL_MEM_READ16(addr) *(volatile unsigned short *)(addr)
#define HIL_MEM_READ32(addr) *(volatile unsigned long *)(addr)
#define HIL_MEM_WRITE8(addr,data) *(volatile unsigned char *)(addr) = (unsigned char)(data)
#define HIL_MEM_WRITE16(addr,data) *(volatile unsigned short *)(addr) = (unsigned short)(data)
#define HIL_MEM_WRITE32(addr,data) *(volatile unsigned long *)(addr) = (unsigned long)(data)
#if defined __cplusplus
}
#endif
#endif /* _HIL_H_ */

View file

@ -0,0 +1,15 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef OPEN_AMP_H_
#define OPEN_AMP_H_
#include <openamp/rpmsg.h>
#include <openamp/remoteproc.h>
#endif /* OPEN_AMP_H_ */

View file

@ -0,0 +1,503 @@
/*
* Remote remote_proc Framework
*
* Copyright(c) 2011 Texas Instruments, Inc.
* Copyright(c) 2011 Google, Inc.
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef REMOTEPROC_H
#define REMOTEPROC_H
#include <openamp/rpmsg.h>
#include <openamp/firmware.h>
#if defined __cplusplus
extern "C" {
#endif
/**
* struct resource_table - firmware resource table header
* @ver: version number
* @num: number of resource entries
* @reserved: reserved (must be zero)
* @offset: array of offsets pointing at the various resource entries
*
* A resource table is essentially a list of system resources required
* by the remote remote_proc. It may also include configuration entries.
* If needed, the remote remote_proc firmware should contain this table
* as a dedicated ".resource_table" ELF section.
*
* Some resources entries are mere announcements, where the host is informed
* of specific remoteproc configuration. Other entries require the host to
* do something (e.g. allocate a system resource). Sometimes a negotiation
* is expected, where the firmware requests a resource, and once allocated,
* the host should provide back its details (e.g. address of an allocated
* memory region).
*
* The header of the resource table, as expressed by this structure,
* contains a version number (should we need to change this format in the
* future), the number of available resource entries, and their offsets
* in the table.
*
* Immediately following this header are the resource entries themselves,
* each of which begins with a resource entry header (as described below).
*/
OPENAMP_PACKED_BEGIN
struct resource_table {
uint32_t ver;
uint32_t num;
uint32_t reserved[2];
uint32_t offset[0];
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_hdr - firmware resource entry header
* @type: resource type
* @data: resource data
*
* Every resource entry begins with a 'struct fw_rsc_hdr' header providing
* its @type. The content of the entry itself will immediately follow
* this header, and it should be parsed according to the resource type.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_hdr {
uint32_t type;
uint8_t data[0];
} OPENAMP_PACKED_END;
/**
* enum fw_resource_type - types of resource entries
*
* @RSC_CARVEOUT: request for allocation of a physically contiguous
* memory region.
* @RSC_DEVMEM: request to iommu_map a memory-based peripheral.
* @RSC_TRACE: announces the availability of a trace buffer into which
* the remote remote_proc will be writing logs.
* @RSC_VDEV: declare support for a virtio device, and serve as its
* virtio header.
* @RSC_LAST: just keep this one at the end
*
* For more details regarding a specific resource type, please see its
* dedicated structure below.
*
* Please note that these values are used as indices to the rproc_handle_rsc
* lookup table, so please keep them sane. Moreover, @RSC_LAST is used to
* check the validity of an index before the lookup table is accessed, so
* please update it as needed.
*/
enum fw_resource_type {
RSC_CARVEOUT = 0,
RSC_DEVMEM = 1,
RSC_TRACE = 2,
RSC_VDEV = 3,
RSC_RPROC_MEM = 4,
RSC_FW_CHKSUM = 5,
RSC_LAST = 6,
};
#define FW_RSC_ADDR_ANY (0xFFFFFFFFFFFFFFFF)
/**
* struct fw_rsc_carveout - physically contiguous memory request
* @da: device address
* @pa: physical address
* @len: length (in bytes)
* @flags: iommu protection flags
* @reserved: reserved (must be zero)
* @name: human-readable name of the requested memory region
*
* This resource entry requests the host to allocate a physically contiguous
* memory region.
*
* These request entries should precede other firmware resource entries,
* as other entries might request placing other data objects inside
* these memory regions (e.g. data/code segments, trace resource entries, ...).
*
* Allocating memory this way helps utilizing the reserved physical memory
* (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
* needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
* pressure is important; it may have a substantial impact on performance.
*
* If the firmware is compiled with static addresses, then @da should specify
* the expected device address of this memory region. If @da is set to
* FW_RSC_ADDR_ANY, then the host will dynamically allocate it, and then
* overwrite @da with the dynamically allocated address.
*
* We will always use @da to negotiate the device addresses, even if it
* isn't using an iommu. In that case, though, it will obviously contain
* physical addresses.
*
* Some remote remote_procs needs to know the allocated physical address
* even if they do use an iommu. This is needed, e.g., if they control
* hardware accelerators which access the physical memory directly (this
* is the case with OMAP4 for instance). In that case, the host will
* overwrite @pa with the dynamically allocated physical address.
* Generally we don't want to expose physical addresses if we don't have to
* (remote remote_procs are generally _not_ trusted), so we might want to
* change this to happen _only_ when explicitly required by the hardware.
*
* @flags is used to provide IOMMU protection flags, and @name should
* (optionally) contain a human readable name of this carveout region
* (mainly for debugging purposes).
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_carveout {
uint32_t type;
uint32_t da;
uint32_t pa;
uint32_t len;
uint32_t flags;
uint32_t reserved;
uint8_t name[32];
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_devmem - iommu mapping request
* @da: device address
* @pa: physical address
* @len: length (in bytes)
* @flags: iommu protection flags
* @reserved: reserved (must be zero)
* @name: human-readable name of the requested region to be mapped
*
* This resource entry requests the host to iommu map a physically contiguous
* memory region. This is needed in case the remote remote_proc requires
* access to certain memory-based peripherals; _never_ use it to access
* regular memory.
*
* This is obviously only needed if the remote remote_proc is accessing memory
* via an iommu.
*
* @da should specify the required device address, @pa should specify
* the physical address we want to map, @len should specify the size of
* the mapping and @flags is the IOMMU protection flags. As always, @name may
* (optionally) contain a human readable name of this mapping (mainly for
* debugging purposes).
*
* Note: at this point we just "trust" those devmem entries to contain valid
* physical addresses, but this isn't safe and will be changed: eventually we
* want remoteproc implementations to provide us ranges of physical addresses
* the firmware is allowed to request, and not allow firmwares to request
* access to physical addresses that are outside those ranges.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_devmem {
uint32_t type;
uint32_t da;
uint32_t pa;
uint32_t len;
uint32_t flags;
uint32_t reserved;
uint8_t name[32];
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_trace - trace buffer declaration
* @da: device address
* @len: length (in bytes)
* @reserved: reserved (must be zero)
* @name: human-readable name of the trace buffer
*
* This resource entry provides the host information about a trace buffer
* into which the remote remote_proc will write log messages.
*
* @da specifies the device address of the buffer, @len specifies
* its size, and @name may contain a human readable name of the trace buffer.
*
* After booting the remote remote_proc, the trace buffers are exposed to the
* user via debugfs entries (called trace0, trace1, etc..).
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_trace {
uint32_t type;
uint32_t da;
uint32_t len;
uint32_t reserved;
uint8_t name[32];
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_vdev_vring - vring descriptor entry
* @da: device address
* @align: the alignment between the consumer and producer parts of the vring
* @num: num of buffers supported by this vring (must be power of two)
* @notifyid is a unique rproc-wide notify index for this vring. This notify
* index is used when kicking a remote remote_proc, to let it know that this
* vring is triggered.
* @reserved: reserved (must be zero)
*
* This descriptor is not a resource entry by itself; it is part of the
* vdev resource type (see below).
*
* Note that @da should either contain the device address where
* the remote remote_proc is expecting the vring, or indicate that
* dynamically allocation of the vring's device address is supported.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_vdev_vring {
uint32_t da;
uint32_t align;
uint32_t num;
uint32_t notifyid;
uint32_t reserved;
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_vdev - virtio device header
* @id: virtio device id (as in virtio_ids.h)
* @notifyid is a unique rproc-wide notify index for this vdev. This notify
* index is used when kicking a remote remote_proc, to let it know that the
* status/features of this vdev have changes.
* @dfeatures specifies the virtio device features supported by the firmware
* @gfeatures is a place holder used by the host to write back the
* negotiated features that are supported by both sides.
* @config_len is the size of the virtio config space of this vdev. The config
* space lies in the resource table immediate after this vdev header.
* @status is a place holder where the host will indicate its virtio progress.
* @num_of_vrings indicates how many vrings are described in this vdev header
* @reserved: reserved (must be zero)
* @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
*
* This resource is a virtio device header: it provides information about
* the vdev, and is then used by the host and its peer remote remote_procs
* to negotiate and share certain virtio properties.
*
* By providing this resource entry, the firmware essentially asks remoteproc
* to statically allocate a vdev upon registration of the rproc (dynamic vdev
* allocation is not yet supported).
*
* Note: unlike virtualization systems, the term 'host' here means
* the Linux side which is running remoteproc to control the remote
* remote_procs. We use the name 'gfeatures' to comply with virtio's terms,
* though there isn't really any virtualized guest OS here: it's the host
* which is responsible for negotiating the final features.
* Yeah, it's a bit confusing.
*
* Note: immediately following this structure is the virtio config space for
* this vdev (which is specific to the vdev; for more info, read the virtio
* spec). the size of the config space is specified by @config_len.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_vdev {
uint32_t type;
uint32_t id;
uint32_t notifyid;
uint32_t dfeatures;
uint32_t gfeatures;
uint32_t config_len;
uint8_t status;
uint8_t num_of_vrings;
uint8_t reserved[2];
struct fw_rsc_vdev_vring vring[0];
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_rproc_mem - remote processor memory
* @da: device address
* @pa: physical address
* @len: length (in bytes)
* @reserved: reserved (must be zero)
*
* This resource entry tells the host to the remote processor
* memory that the host can be used as shared memory.
*
* These request entries should precede other shared resource entries
* such as vdevs, vrings.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_rproc_mem {
uint32_t type;
uint32_t da;
uint32_t pa;
uint32_t len;
uint32_t reserved;
} OPENAMP_PACKED_END;
/*
* struct fw_rsc_fw_chksum - firmware checksum
* @algo: algorithm to generate the cheksum
* @chksum: checksum of the firmware loadable sections.
*
* This resource entry provides checksum for the firmware loadable sections.
* It is used to check if the remote already runs with the expected firmware to
* decide if it needs to start the remote if the remote is already running.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_fw_chksum {
uint32_t type;
uint8_t algo[16];
uint8_t chksum[64];
} OPENAMP_PACKED_END;
/**
* struct remote_proc
*
* This structure is maintained by the remoteproc to represent the remote
* processor instance. This structure acts as a prime parameter to use
* the remoteproc APIs.
*
* @proc : hardware interface layer processor control
* @rdev : remote device , used by RPMSG "messaging" framework.
* @loader : pointer remoteproc loader
* @channel_created : create channel callback
* @channel_destroyed : delete channel callback
* @default_cb : default callback for channel
* @role : remote proc role , RPROC_MASTER/RPROC_REMOTE
*
*/
struct remote_proc {
struct hil_proc *proc;
struct remote_device *rdev;
struct remoteproc_loader *loader;
rpmsg_chnl_cb_t channel_created;
rpmsg_chnl_cb_t channel_destroyed;
rpmsg_rx_cb_t default_cb;
int role;
};
/**
* struct resc_table_info
*
* This structure is maintained by the remoteproc to allow applications
* to pass resource table info during remote initialization.
*
* @rsc_tab : pointer to resource table control block
* @size : size of resource table.
*
*/
struct rsc_table_info {
struct resource_table *rsc_tab;
int size;
};
/* Definitions for device types , null pointer, etc.*/
#define RPROC_SUCCESS 0
#define RPROC_NULL (void *)0
#define RPROC_TRUE 1
#define RPROC_FALSE 0
#define RPROC_MASTER 1
#define RPROC_REMOTE 0
/* Number of msecs to wait for remote context to come up */
#define RPROC_BOOT_DELAY 500
/* Remoteproc error codes */
#define RPROC_ERR_BASE -4000
#define RPROC_ERR_CPU_INIT (RPROC_ERR_BASE -1)
#define RPROC_ERR_NO_RSC_TABLE (RPROC_ERR_BASE -2)
#define RPROC_ERR_NO_MEM (RPROC_ERR_BASE -3)
#define RPROC_ERR_RSC_TAB_TRUNC (RPROC_ERR_BASE -4)
#define RPROC_ERR_RSC_TAB_VER (RPROC_ERR_BASE -5)
#define RPROC_ERR_RSC_TAB_RSVD (RPROC_ERR_BASE -6)
#define RPROC_ERR_RSC_TAB_VDEV_NRINGS (RPROC_ERR_BASE -7)
#define RPROC_ERR_RSC_TAB_NP (RPROC_ERR_BASE -8)
#define RPROC_ERR_RSC_TAB_NS (RPROC_ERR_BASE -9)
#define RPROC_ERR_INVLD_FW (RPROC_ERR_BASE -10)
#define RPROC_ERR_LOADER (RPROC_ERR_BASE -11)
#define RPROC_ERR_PARAM (RPROC_ERR_BASE -12)
#define RPROC_ERR_PTR (void*)0xDEADBEAF
/**
* remoteproc_resource_init
*
* Initializes resources for remoteproc remote configuration.Only
* remoteproc remote applications are allowed to call this function.
*
* @param rsc_info - pointer to resource table info control
* block
* @param proc - pointer to the hil_proc
* @param channel_created - callback function for channel creation
* @param channel_destroyed - callback function for channel deletion
* @param default_cb - default callback for channel I/O
* @param rproc_handle - pointer to new remoteproc instance
* @param init_env - 1 to initialize environment, 0 not to
* @param rpmsg_role - 1 for rpmsg master, or 0 for rpmsg slave
*
* @param returns - status of execution
*
*/
int remoteproc_resource_init(struct rsc_table_info *rsc_info,
struct hil_proc *proc,
rpmsg_chnl_cb_t channel_created,
rpmsg_chnl_cb_t channel_destroyed,
rpmsg_rx_cb_t default_cb,
struct remote_proc **rproc_handle,
int rpmsg_role);
/**
* remoteproc_resource_deinit
*
* Uninitializes resources for remoteproc remote configuration.
*
* @param rproc - pointer to remoteproc instance
*
* @param returns - status of execution
*
*/
int remoteproc_resource_deinit(struct remote_proc *rproc);
/**
* remoteproc_init
*
* Initializes resources for remoteproc master configuration. Only
* remoteproc master applications are allowed to call this function.
*
* @param fw_name - name of firmware
* @param proc - pointer to hil_proc
* @param channel_created - callback function for channel creation
* @param channel_destroyed - callback function for channel deletion
* @param default_cb - default callback for channel I/O
* @param rproc_handle - pointer to new remoteproc instance
*
* @param returns - status of function execution
*
*/
int remoteproc_init(char *fw_name, struct hil_proc *proc,
rpmsg_chnl_cb_t channel_created,
rpmsg_chnl_cb_t channel_destroyed,
rpmsg_rx_cb_t default_cb,
struct remote_proc **rproc_handle);
/**
* remoteproc_deinit
*
* Uninitializes resources for remoteproc "master" configuration.
*
* @param rproc - pointer to remoteproc instance
*
* @param returns - status of function execution
*
*/
int remoteproc_deinit(struct remote_proc *rproc);
/**
* remoteproc_boot
*
* This function loads the image on the remote processor and starts
* its execution from image load address.
*
* @param rproc - pointer to remoteproc instance to boot
*
* @param returns - status of function execution
*/
int remoteproc_boot(struct remote_proc *rproc);
/**
* remoteproc_shutdown
*
* This function shutdowns the remote execution context.
*
* @param rproc - pointer to remoteproc instance to shutdown
*
* @param returns - status of function execution
*/
int remoteproc_shutdown(struct remote_proc *rproc);
#if defined __cplusplus
}
#endif
#endif /* REMOTEPROC_H_ */

View file

@ -0,0 +1,80 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* remoteproc_loader.h
*
* COMPONENT
*
* OpenAMP stack.
*
* DESCRIPTION
*
* This file provides definitions for remoteproc loader
*
*
**************************************************************************/
#ifndef REMOTEPROC_LOADER_H_
#define REMOTEPROC_LOADER_H_
#include <openamp/remoteproc.h>
#if defined __cplusplus
extern "C" {
#endif
/**
* enum loader_type - dynamic name service announcement flags
*
* @ELF_LOADER: an ELF loader
* @FIT_LOADER: a loader for Flattened Image Trees
*/
enum loader_type {
ELF_LOADER = 0, FIT_LOADER = 1, LAST_LOADER = 2,
};
/* Loader structure definition. */
struct remoteproc_loader {
enum loader_type type;
void *remote_firmware;
/* Pointer to firmware decoded info control block */
void *fw_decode_info;
/* Loader callbacks. */
void *(*retrieve_entry) (struct remoteproc_loader * loader);
void *(*retrieve_rsc) (struct remoteproc_loader * loader,
unsigned int *size);
int (*load_firmware) (struct remoteproc_loader * loader);
int (*attach_firmware) (struct remoteproc_loader * loader,
void *firmware);
int (*detach_firmware) (struct remoteproc_loader * loader);
void *(*retrieve_load_addr) (struct remoteproc_loader * loader);
};
/* RemoteProc Loader functions. */
struct remoteproc_loader *remoteproc_loader_init(enum loader_type type);
int remoteproc_loader_delete(struct remoteproc_loader *loader);
int remoteproc_loader_attach_firmware(struct remoteproc_loader *loader,
void *firmware_image);
void *remoteproc_loader_retrieve_entry_point(struct remoteproc_loader *loader);
void *remoteproc_loader_retrieve_resource_section(struct remoteproc_loader
*loader, unsigned int *size);
int remoteproc_loader_load_remote_firmware(struct remoteproc_loader *loader);
void *remoteproc_get_load_address(struct remoteproc_loader *loader);
/* Supported loaders */
extern int elf_loader_init(struct remoteproc_loader *loader);
#if defined __cplusplus
}
#endif
#endif /* REMOTEPROC_LOADER_H_ */

View file

@ -0,0 +1,575 @@
/*
* Remote processor messaging
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
* All rights reserved.
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _RPMSG_H_
#define _RPMSG_H_
#include <openamp/rpmsg_core.h>
#if defined __cplusplus
extern "C" {
#endif
/* The feature bitmap for virtio rpmsg */
#define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */
#define RPMSG_NAME_SIZE 32
#define RPMSG_BUF_HELD (1U << 31) /* Flag to suggest to hold the buffer */
#define RPMSG_LOCATE_DATA(p) ((unsigned char *) p + sizeof (struct rpmsg_hdr))
/**
* struct rpmsg_hdr - common header for all rpmsg messages
* @src: source address
* @dst: destination address
* @reserved: reserved for future use
* @len: length of payload (in bytes)
* @flags: message flags
*
* Every message sent(/received) on the rpmsg bus begins with this header.
*/
OPENAMP_PACKED_BEGIN
struct rpmsg_hdr {
uint32_t src;
uint32_t dst;
uint32_t reserved;
uint16_t len;
uint16_t flags;
} OPENAMP_PACKED_END;
/**
* struct rpmsg_hdr_reserved - this is the "union" of the rpmsg_hdr->reserved
* @rfu: reserved for future usage
* @idx: index of a buffer (not to be returned back to the buffer's pool)
*
* This structure has been introduced to keep the backward compatibility.
* It could be integrated into rpmsg_hdr struct, replacing the reserved field.
*/
struct rpmsg_hdr_reserved
{
uint16_t rfu; /* reserved for future usage */
uint16_t idx;
};
/**
* struct rpmsg_ns_msg - dynamic name service announcement message
* @name: name of remote service that is published
* @addr: address of remote service that is published
* @flags: indicates whether service is created or destroyed
*
* This message is sent across to publish a new service, or announce
* about its removal. When we receive these messages, an appropriate
* rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
* or ->remove() handler of the appropriate rpmsg driver will be invoked
* (if/as-soon-as one is registered).
*/
OPENAMP_PACKED_BEGIN
struct rpmsg_ns_msg {
char name[RPMSG_NAME_SIZE];
uint32_t addr;
uint32_t flags;
} OPENAMP_PACKED_END;
/**
* enum rpmsg_ns_flags - dynamic name service announcement flags
*
* @RPMSG_NS_CREATE: a new remote service was just created
* @RPMSG_NS_DESTROY: a known remote service was just destroyed
*/
enum rpmsg_ns_flags {
RPMSG_NS_CREATE = 0,
RPMSG_NS_DESTROY = 1,
};
#define RPMSG_ADDR_ANY 0xFFFFFFFF
/**
* rpmsg_channel - devices that belong to the rpmsg bus are called channels
* @name: channel name
* @src: local address
* @dst: destination address
* rdev: rpmsg remote device
* @ept: the rpmsg endpoint of this channel
* @state: channel state
*/
struct rpmsg_channel {
char name[RPMSG_NAME_SIZE];
uint32_t src;
uint32_t dst;
struct remote_device *rdev;
struct rpmsg_endpoint *rp_ept;
unsigned int state;
struct metal_list node;
};
/**
* channel_info - channel info
* @name: channel name
* @src: local address
* @dst: destination address
*/
struct channel_info {
char name[RPMSG_NAME_SIZE];
uint32_t src;
uint32_t dest;
};
/**
* struct rpmsg_endpoint - binds a local rpmsg address to its user
* @rp_chnl: rpmsg channel device
* @cb: rx callback handler
* @addr: local rpmsg address
* @priv: private data for the driver's use
*
* In essence, an rpmsg endpoint represents a listener on the rpmsg bus, as
* it binds an rpmsg address with an rx callback handler.
*
* Simple rpmsg drivers shouldn't use this struct directly, because
* things just work: every rpmsg driver provides an rx callback upon
* registering to the bus, and that callback is then bound to its rpmsg
* address when the driver is probed. When relevant inbound messages arrive
* (i.e. messages which their dst address equals to the src address of
* the rpmsg channel), the driver's handler is invoked to process it.
*
* More complicated drivers though, that do need to allocate additional rpmsg
* addresses, and bind them to different rx callbacks, must explicitly
* create additional endpoints by themselves (see rpmsg_create_ept()).
*/
struct rpmsg_endpoint {
struct rpmsg_channel *rp_chnl;
rpmsg_rx_cb_t cb;
uint32_t addr;
void *priv;
struct metal_list node;
};
struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rp_chnl,
rpmsg_rx_cb_t cb, void *priv,
uint32_t addr);
void rpmsg_destroy_ept(struct rpmsg_endpoint *rp_ept);
int
rpmsg_send_offchannel_raw(struct rpmsg_channel *, uint32_t, uint32_t,
const void *, int, int);
/**
* rpmsg_send() - send a message across to the remote processor
* @rpdev: the rpmsg channel
* @data: payload of message
* @len: length of payload
*
* This function sends @data of length @len on the @rpdev channel.
* The message will be sent to the remote processor which the @rpdev
* channel belongs to, using @rpdev's source and destination addresses.
* In case there are no TX buffers available, the function will block until
* one becomes available, or a timeout of 15 seconds elapses. When the latter
* happens, -ERESTARTSYS is returned.
*
* Can only be called from process context (for now).
*
* Returns number of bytes it has sent or negative error value on failure.
*/
static inline int rpmsg_send(struct rpmsg_channel *rpdev, const void *data,
int len)
{
return rpmsg_send_offchannel_raw(rpdev, rpdev->src, rpdev->dst,
data, len, RPMSG_TRUE);
}
/**
* rpmsg_sendto() - send a message across to the remote processor, specify dst
* @rpdev: the rpmsg channel
* @data: payload of message
* @len: length of payload
* @dst: destination address
*
* This function sends @data of length @len to the remote @dst address.
* The message will be sent to the remote processor which the @rpdev
* channel belongs to, using @rpdev's source address.
* In case there are no TX buffers available, the function will block until
* one becomes available, or a timeout of 15 seconds elapses. When the latter
* happens, -ERESTARTSYS is returned.
*
* Can only be called from process context (for now).
*
* Returns number of bytes it has sent or negative error value on failure.
*/
static inline int rpmsg_sendto(struct rpmsg_channel *rpdev, const void *data,
int len, uint32_t dst)
{
return rpmsg_send_offchannel_raw(rpdev, rpdev->src, dst, data,
len, RPMSG_TRUE);
}
/**
* rpmsg_send_offchannel() - send a message using explicit src/dst addresses
* @rpdev: the rpmsg channel
* @src: source address
* @dst: destination address
* @data: payload of message
* @len: length of payload
*
* This function sends @data of length @len to the remote @dst address,
* and uses @src as the source address.
* The message will be sent to the remote processor which the @rpdev
* channel belongs to.
* In case there are no TX buffers available, the function will block until
* one becomes available, or a timeout of 15 seconds elapses. When the latter
* happens, -ERESTARTSYS is returned.
*
* Can only be called from process context (for now).
*
* Returns number of bytes it has sent or negative error value on failure.
*/
static inline int rpmsg_send_offchannel(struct rpmsg_channel *rpdev,
uint32_t src, uint32_t dst,
const void *data, int len)
{
return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len,
RPMSG_TRUE);
}
/**
* rpmsg_trysend() - send a message across to the remote processor
* @rpdev: the rpmsg channel
* @data: payload of message
* @len: length of payload
*
* This function sends @data of length @len on the @rpdev channel.
* The message will be sent to the remote processor which the @rpdev
* channel belongs to, using @rpdev's source and destination addresses.
* In case there are no TX buffers available, the function will immediately
* return -ENOMEM without waiting until one becomes available.
*
* Can only be called from process context (for now).
*
* Returns number of bytes it has sent or negative error value on failure.
*/
static inline int rpmsg_trysend(struct rpmsg_channel *rpdev, const void *data,
int len)
{
return rpmsg_send_offchannel_raw(rpdev, rpdev->src, rpdev->dst,
data, len, RPMSG_FALSE);
}
/**
* rpmsg_trysendto() - send a message across to the remote processor, specify dst
* @rpdev: the rpmsg channel
* @data: payload of message
* @len: length of payload
* @dst: destination address
*
* This function sends @data of length @len to the remote @dst address.
* The message will be sent to the remote processor which the @rpdev
* channel belongs to, using @rpdev's source address.
* In case there are no TX buffers available, the function will immediately
* return -ENOMEM without waiting until one becomes available.
*
* Can only be called from process context (for now).
*
* Returns number of bytes it has sent or negative error value on failure.
*/
static inline int rpmsg_trysendto(struct rpmsg_channel *rpdev, const void *data,
int len, uint32_t dst)
{
return rpmsg_send_offchannel_raw(rpdev, rpdev->src, dst, data, len,
RPMSG_FALSE);
}
/**
* rpmsg_trysend_offchannel() - send a message using explicit src/dst addresses
* @rpdev: the rpmsg channel
* @src: source address
* @dst: destination address
* @data: payload of message
* @len: length of payload
*
* This function sends @data of length @len to the remote @dst address,
* and uses @src as the source address.
* The message will be sent to the remote processor which the @rpdev
* channel belongs to.
* In case there are no TX buffers available, the function will immediately
* return -ENOMEM without waiting until one becomes available.
*
* Can only be called from process context (for now).
*
* Returns number of bytes it has sent or negative error value on failure.
*/
static inline int rpmsg_trysend_offchannel(struct rpmsg_channel *rpdev,
uint32_t src, uint32_t dst,
const void *data, int len)
{
return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len,
RPMSG_FALSE);
}
/**
* @brief Holds the rx buffer for usage outside the receive callback.
*
* Calling this function prevents the RPMsg receive buffer from being released
* back to the pool of shmem buffers. This API can only be called at rx
* callback context (rpmsg_rx_cb_t). With this API, the application doesn't
* need to copy the message in rx callback. Instead, the rx buffer base address
* is saved in application context and further processed in application
* process. After the message is processed, the application can release the rx
* buffer for future reuse in vring by calling the rpmsg_release_rx_buffer()
* function.
*
* @param[in] rpdev The rpmsg channel
* @param[in] rxbuf RX buffer with message payload
*
* @see rpmsg_release_rx_buffer
*/
void rpmsg_hold_rx_buffer(struct rpmsg_channel *rpdev, void *rxbuf);
/**
* @brief Releases the rx buffer for future reuse in vring.
*
* This API can be called at process context when the message in rx buffer is
* processed.
*
* @param rpdev - the rpmsg channel
* @param rxbuf - rx buffer with message payload
*
* @see rpmsg_hold_rx_buffer
*/
void rpmsg_release_rx_buffer(struct rpmsg_channel *rpdev, void *rxbuf);
/**
* @brief Gets the tx buffer for message payload.
*
* This API can only be called at process context to get the tx buffer in vring.
* By this way, the application can directly put its message into the vring tx
* buffer without copy from an application buffer.
* It is the application responsibility to correctly fill the allocated tx
* buffer by data and passing correct parameters to the rpmsg_send_nocopy() or
* rpmsg_sendto_nocopy() function to perform data no-copy-send mechanism.
*
* @param[in] rpdev Pointer to rpmsg channel
* @param[in] size Pointer to store tx buffer size
* @param[in] wait Boolean, wait or not for buffer to become available
*
* @return The tx buffer address on success and NULL on failure
*
* @see rpmsg_send_offchannel_nocopy
* @see rpmsg_sendto_nocopy
* @see rpmsg_send_nocopy
*/
void *rpmsg_get_tx_payload_buffer(struct rpmsg_channel *rpdev, uint32_t *size,
int wait);
/**
* @brief Sends a message in tx buffer allocated by
* rpmsg_get_tx_payload_buffer()
*
* using explicit src/dst addresses.
*
* This function sends txbuf of length len to the remote dst address,
* and uses src as the source address.
* The message will be sent to the remote processor which the rpdev
* channel belongs to.
* The application has to take the responsibility for:
* 1. tx buffer allocation (rpmsg_get_tx_payload_buffer() )
* 2. filling the data to be sent into the pre-allocated tx buffer
* 3. not exceeding the buffer size when filling the data
* 4. data cache coherency
*
* After the rpmsg_send_offchannel_nocopy() function is issued the tx buffer is
* no more owned by the sending task and must not be touched anymore unless the
* rpmsg_send_offchannel_nocopy() function fails and returns an error. In that
* case the application should try to re-issue the
* rpmsg_send_offchannel_nocopy() again and if it is still not possible to send
* the message and the application wants to give it up from whatever reasons
* the rpmsg_release_rx_buffer function could be called, passing the pointer to
* the tx buffer to be released as a parameter.
*
* @param[in] rpdev The rpmsg channel
* @param[in] src Source address
* @param[in] dst Destination address
* @param[in] txbuf TX buffer with message filled
* @param[in] len Length of payload
*
* @return number of bytes it has sent or negative error value on failure.
*
* @see rpmsg_get_tx_payload_buffer
* @see rpmsg_sendto_nocopy
* @see rpmsg_send_nocopy
*/
int rpmsg_send_offchannel_nocopy(struct rpmsg_channel *rpdev, uint32_t src,
uint32_t dst, void *txbuf, int len);
/**
* @brief Sends a message in tx buffer allocated by
* rpmsg_get_tx_payload_buffer()
*
* across to the remote processor, specify dst.
*
* This function sends txbuf of length len to the remote dst address.
* The message will be sent to the remote processor which the rpdev
* channel belongs to, using rpdev's source address.
* The application has to take the responsibility for:
* 1. tx buffer allocation (rpmsg_get_tx_payload_buffer() )
* 2. filling the data to be sent into the pre-allocated tx buffer
* 3. not exceeding the buffer size when filling the data
* 4. data cache coherency
*
* After the rpmsg_sendto_nocopy() function is issued the tx buffer is no more
* owned by the sending task and must not be touched anymore unless the
* rpmsg_sendto_nocopy() function fails and returns an error. In that case the
* application should try to re-issue the rpmsg_sendto_nocopy() again and if
* it is still not possible to send the message and the application wants to
* give it up from whatever reasons the rpmsg_release_rx_buffer function
* could be called,
* passing the pointer to the tx buffer to be released as a parameter.
*
* @param[in] rpdev The rpmsg channel
* @param[in] txbuf TX buffer with message filled
* @param[in] len Length of payload
* @param[in] dst Destination address
*
* @return number of bytes it has sent or negative error value on failure.
*
* @see rpmsg_get_tx_payload_buffer
* @see rpmsg_send_offchannel_nocopy
* @see rpmsg_send_nocopy
*/
static inline
int rpmsg_sendto_nocopy(struct rpmsg_channel *rpdev, void *txbuf, int len,
uint32_t dst)
{
if (!rpdev)
return RPMSG_ERR_PARAM;
return rpmsg_send_offchannel_nocopy(rpdev, (uint32_t)rpdev->src, dst,
txbuf, len);
}
/**
* @brief Sends a message in tx buffer allocated by
* rpmsg_get_tx_payload_buffer() across to the remote processor.
*
* This function sends txbuf of length len on the rpdev channel.
* The message will be sent to the remote processor which the rpdev
* channel belongs to, using rpdev's source and destination addresses.
* The application has to take the responsibility for:
* 1. tx buffer allocation (rpmsg_get_tx_payload_buffer() )
* 2. filling the data to be sent into the pre-allocated tx buffer
* 3. not exceeding the buffer size when filling the data
* 4. data cache coherency
*
* After the rpmsg_send_nocopy() function is issued the tx buffer is no more
* owned by the sending task and must not be touched anymore unless the
* rpmsg_send_nocopy() function fails and returns an error. In that case the
* application should try to re-issue the rpmsg_send_nocopy() again and if
* it is still not possible to send the message and the application wants to
* give it up from whatever reasons the rpmsg_release_rx_buffer function
* could be called, passing the pointer to the tx buffer to be released as a
* parameter.
*
* @param[in] rpdev The rpmsg channel
* @param[in] txbuf TX buffer with message filled
* @param[in] len Length of payload
*
* @return 0 on success and an appropriate error value on failure
*
* @see rpmsg_get_tx_payload_buffer
* @see rpmsg_send_offchannel_nocopy
* @see rpmsg_sendto_nocopy
*/
static inline
int rpmsg_send_nocopy(struct rpmsg_channel *rpdev, void *txbuf, int len)
{
if (!rpdev)
return RPMSG_ERR_PARAM;
return rpmsg_send_offchannel_nocopy(rpdev, rpdev->src, rpdev->dst,
txbuf, len);
}
/**
* rpmsg_init
*
* Thus function allocates and initializes the rpmsg driver resources for
* the given hil_proc.The successful return from this function leaves
* fully enabled IPC link.
*
* @param proc - pointer to hil_proc
* @param rdev - pointer to newly created remote device
* @param channel_created - callback function for channel creation
* @param channel_destroyed - callback function for channel deletion
* @param default_cb - default callback for channel
* @param role - role of the other device, Master or Remote
* @return - status of function execution
*
*/
int rpmsg_init(struct hil_proc *proc,
struct remote_device **rdev,
rpmsg_chnl_cb_t channel_created,
rpmsg_chnl_cb_t channel_destroyed,
rpmsg_rx_cb_t default_cb, int role);
/**
* rpmsg_deinit
*
* Thus function releases the rpmsg driver resources for given remote
* instance.
*
* @param rdev - pointer to device de-init
*
* @return - none
*
*/
void rpmsg_deinit(struct remote_device *rdev);
/**
* rpmsg_get_buffer_size
*
* Returns buffer size available for sending messages.
*
* @param channel - pointer to rpmsg channel/device
*
* @return - buffer size
*
*/
int rpmsg_get_buffer_size(struct rpmsg_channel *rp_chnl);
/**
* rpmsg_create_channel
*
* Creates RPMSG channel with the given name for remote device.
*
* @param rdev - pointer to rpmsg remote device
* @param name - channel name
*
* @return - pointer to new rpmsg channel
*
*/
struct rpmsg_channel *rpmsg_create_channel(struct remote_device *rdev,
char *name);
/**
* rpmsg_delete_channel
*
* Deletes the given RPMSG channel. The channel must first be created with the
* rpmsg_create_channel API.
*
* @param rp_chnl - pointer to rpmsg channel to delete
*
*/
void rpmsg_delete_channel(struct rpmsg_channel *rp_chnl);
#if defined __cplusplus
}
#endif
#endif /* _RPMSG_H_ */

View file

@ -0,0 +1,183 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _RPMSG_CORE_H_
#define _RPMSG_CORE_H_
#include <openamp/compiler.h>
#include <openamp/virtio.h>
#include <openamp/hil.h>
#include <openamp/sh_mem.h>
#include <openamp/rpmsg.h>
#include <metal/mutex.h>
#include <metal/list.h>
#if defined __cplusplus
extern "C" {
#endif
/* Configurable parameters */
#define RPMSG_BUFFER_SIZE 512
#define RPMSG_MAX_VQ_PER_RDEV 2
#define RPMSG_NS_EPT_ADDR 0x35
#define RPMSG_ADDR_BMP_SIZE 4
/* Definitions for device types , null pointer, etc.*/
#define RPMSG_SUCCESS 0
#define RPMSG_NULL (void *)0
#define RPMSG_REMOTE 0
#define RPMSG_MASTER 1
#define RPMSG_TRUE 1
#define RPMSG_FALSE 0
/* RPMSG channel states. */
#define RPMSG_CHNL_STATE_IDLE 0
#define RPMSG_CHNL_STATE_NS 1
#define RPMSG_CHNL_STATE_ACTIVE 2
/* Remote processor/device states. */
#define RPMSG_DEV_STATE_IDLE 0
#define RPMSG_DEV_STATE_ACTIVE 1
/* Total tick count for 15secs - 1msec tick. */
#define RPMSG_TICK_COUNT 15000
/* Time to wait - In multiple of 10 msecs. */
#define RPMSG_TICKS_PER_INTERVAL 10
/* Error macros. */
#define RPMSG_ERROR_BASE -2000
#define RPMSG_ERR_NO_MEM (RPMSG_ERROR_BASE - 1)
#define RPMSG_ERR_NO_BUFF (RPMSG_ERROR_BASE - 2)
#define RPMSG_ERR_MAX_VQ (RPMSG_ERROR_BASE - 3)
#define RPMSG_ERR_PARAM (RPMSG_ERROR_BASE - 4)
#define RPMSG_ERR_DEV_STATE (RPMSG_ERROR_BASE - 5)
#define RPMSG_ERR_BUFF_SIZE (RPMSG_ERROR_BASE - 6)
#define RPMSG_ERR_DEV_INIT (RPMSG_ERROR_BASE - 7)
#define RPMSG_ERR_DEV_ADDR (RPMSG_ERROR_BASE - 8)
/* Zero-Copy extension macros */
#define RPMSG_HDR_FROM_BUF(buf) (struct rpmsg_hdr *)((char*)buf - \
sizeof(struct rpmsg_hdr))
struct rpmsg_channel;
typedef void (*rpmsg_rx_cb_t) (struct rpmsg_channel *, void *, int, void *,
unsigned long);
typedef void (*rpmsg_chnl_cb_t) (struct rpmsg_channel * rp_chl);
/**
* remote_device
*
* This structure is maintained by RPMSG driver to represent remote device/core.
*
* @virtd_dev - virtio device for remote core
* @rvq - Rx virtqueue for virtio device
* @tvq - Tx virtqueue for virtio device
* @proc - reference to remote processor
* @rp_channels - rpmsg channels list for the device
* @rp_endpoints - rpmsg endpoints list for the device
* @mem_pool - shared memory pool
* @bitmap - bitmap for channels addresses
* @channel_created - create channel callback
* @channel_destroyed - delete channel callback
* @default_cb - default callback handler for RX data on channel
* @lock - remote device mutex
* @role - role of the remote device, RPMSG_MASTER/RPMSG_REMOTE
* @state - remote device state, IDLE/ACTIVE
* @support_ns - if device supports name service announcement
*
*/
struct remote_device {
struct virtio_device virt_dev;
struct virtqueue *rvq;
struct virtqueue *tvq;
struct hil_proc *proc;
struct metal_list rp_channels;
struct metal_list rp_endpoints;
struct sh_mem_pool *mem_pool;
unsigned long bitmap[RPMSG_ADDR_BMP_SIZE];
rpmsg_chnl_cb_t channel_created;
rpmsg_chnl_cb_t channel_destroyed;
rpmsg_rx_cb_t default_cb;
metal_mutex_t lock;
unsigned int role;
unsigned int state;
int support_ns;
};
/* Core functions */
int rpmsg_start_ipc(struct remote_device *rdev);
struct rpmsg_channel *_rpmsg_create_channel(struct remote_device *rdev,
char *name, unsigned long src,
unsigned long dst);
void _rpmsg_delete_channel(struct rpmsg_channel *rp_chnl);
struct rpmsg_endpoint *_create_endpoint(struct remote_device *rdev,
rpmsg_rx_cb_t cb, void *priv,
unsigned long addr);
void _destroy_endpoint(struct remote_device *rdev,
struct rpmsg_endpoint *rp_ept);
int rpmsg_send_ns_message(struct remote_device *rdev,
struct rpmsg_channel *rp_chnl, unsigned long flags);
int rpmsg_enqueue_buffer(struct remote_device *rdev, void *buffer,
unsigned long len, unsigned short idx);
void rpmsg_return_buffer(struct remote_device *rdev, void *buffer,
unsigned long len, unsigned short idx);
void *rpmsg_get_tx_buffer(struct remote_device *rdev, unsigned long *len,
unsigned short *idx);
void rpmsg_free_buffer(struct remote_device *rdev, void *buffer);
void rpmsg_free_channel(struct rpmsg_channel *rp_chnl);
void *rpmsg_get_rx_buffer(struct remote_device *rdev, unsigned long *len,
unsigned short *idx);
int rpmsg_get_address(unsigned long *bitmap, int size);
int rpmsg_release_address(unsigned long *bitmap, int size, int addr);
int rpmsg_is_address_set(unsigned long *bitmap, int size, int addr);
int rpmsg_set_address(unsigned long *bitmap, int size, int addr);
void rpmsg_ns_callback(struct rpmsg_channel *server_chnl,
void *data, int len, void *priv, unsigned long src);
/* Remote device functions */
int rpmsg_rdev_init(struct hil_proc *proc,
struct remote_device **rdev, int role,
rpmsg_chnl_cb_t channel_created,
rpmsg_chnl_cb_t channel_destroyed,
rpmsg_rx_cb_t default_cb);
void rpmsg_rdev_deinit(struct remote_device *rdev);
int rpmsg_rdev_remote_ready(struct remote_device *rdev);
struct rpmsg_channel *rpmsg_rdev_get_chnl_from_id(struct remote_device *rdev,
char *rp_chnl_id);
struct rpmsg_endpoint *rpmsg_rdev_get_endpoint_from_addr(
struct remote_device *rdev,
unsigned long addr);
int rpmsg_rdev_notify(struct remote_device *rdev);
int rpmsg_rdev_create_virtqueues(struct virtio_device *dev, int flags, int nvqs,
const char *names[], vq_callback * callbacks[],
struct virtqueue *vqs[]);
unsigned char rpmsg_rdev_get_status(struct virtio_device *dev);
void rpmsg_rdev_set_status(struct virtio_device *dev, unsigned char status);
uint32_t rpmsg_rdev_get_feature(struct virtio_device *dev);
void rpmsg_rdev_set_feature(struct virtio_device *dev, uint32_t feature);
uint32_t rpmsg_rdev_negotiate_feature(struct virtio_device *dev,
uint32_t features);
/*
* Read/write a variable amount from the device specific (ie, network)
* configuration region. This region is encoded in the same endian as
* the guest.
*/
void rpmsg_rdev_read_config(struct virtio_device *dev, uint32_t offset,
void *dst, int length);
void rpmsg_rdev_write_config(struct virtio_device *dev, uint32_t offset,
void *src, int length);
void rpmsg_rdev_reset(struct virtio_device *dev);
#if defined __cplusplus
}
#endif
#endif /* _RPMSG_CORE_H_ */

View file

@ -0,0 +1,62 @@
#include <openamp/open_amp.h>
#include <metal/mutex.h>
#include <metal/atomic.h>
#ifndef RPMSG_RETARGET_H
#define RPMSG_RETARGET_H
#if defined __cplusplus
extern "C" {
#endif
/* RPC response buffer size */
#define RPC_BUFF_SIZE 512
/* System call definitions */
#define OPEN_SYSCALL_ID 1
#define CLOSE_SYSCALL_ID 2
#define WRITE_SYSCALL_ID 3
#define READ_SYSCALL_ID 4
#define ACK_STATUS_ID 5
#define TERM_SYSCALL_ID 6
#define FILE_NAME_LEN 50
/* Proxy device endpoint ID */
#define PROXY_ENDPOINT 127
typedef void (*rpc_shutdown_cb) (struct rpmsg_channel *);
struct _rpc_data {
struct rpmsg_channel *rpmsg_chnl;
struct rpmsg_endpoint *rp_ept;
metal_mutex_t rpc_lock;
atomic_int sync;
struct _sys_rpc *rpc;
struct _sys_rpc *rpc_response;
rpc_shutdown_cb shutdown_cb;
};
struct _sys_call_args {
int int_field1;
int int_field2;
unsigned int data_len;
char data[0];
};
/* System call rpc data structure */
struct _sys_rpc {
unsigned int id;
struct _sys_call_args sys_call_args;
};
/* API prototypes */
int rpmsg_retarget_init(struct rpmsg_channel *rp_chnl, rpc_shutdown_cb cb);
int rpmsg_retarget_deinit(struct rpmsg_channel *rp_chnl);
int rpmsg_retarget_send(void *data, int len);
#if defined __cplusplus
}
#endif
#endif /* RPMSG_RETARGET_H */

View file

@ -0,0 +1,40 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef RSC_TABLE_PARSER_H
#define RSC_TABLE_PARSER_H
#include <openamp/remoteproc.h>
#include <openamp/hil.h>
#if defined __cplusplus
extern "C" {
#endif
#define RSC_TAB_SUPPORTED_VERSION 1
#define RSC_TAB_HEADER_SIZE 12
#define RSC_TAB_MAX_VRINGS 2
/* Standard control request handling. */
typedef int (*rsc_handler) (struct remote_proc * rproc, void *rsc);
/* Function prototypes */
int handle_rsc_table(struct remote_proc *rproc,
struct resource_table *rsc_table, int len);
int handle_carve_out_rsc(struct remote_proc *rproc, void *rsc);
int handle_trace_rsc(struct remote_proc *rproc, void *rsc);
int handle_dev_mem_rsc(struct remote_proc *rproc, void *rsc);
int handle_vdev_rsc(struct remote_proc *rproc, void *rsc);
int handle_rproc_mem_rsc(struct remote_proc *rproc, void *rsc);
int handle_fw_chksum_rsc(struct remote_proc *rproc, void *rsc);
int handle_mmu_rsc(struct remote_proc *rproc, void *rsc);
#if defined __cplusplus
}
#endif
#endif /* RSC_TABLE_PARSER_H */

View file

@ -0,0 +1,77 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* sh_mem.c
*
* COMPONENT
*
* IPC Stack for uAMP systems.
*
* DESCRIPTION
*
* Header file for fixed buffer size memory management service. Currently
* it is being used to manage shared memory.
*
**************************************************************************/
#ifndef SH_MEM_H_
#define SH_MEM_H_
#include <metal/mutex.h>
#if defined __cplusplus
extern "C" {
#endif
/* Macros */
#define BITMAP_WORD_SIZE (sizeof(unsigned long) << 3)
#define WORD_SIZE sizeof(unsigned long)
#define WORD_ALIGN(a) (((a) & (WORD_SIZE-1)) != 0)? \
(((a) & (~(WORD_SIZE-1))) + sizeof(unsigned long)):(a)
#define SH_MEM_POOL_LOCATE_BITMAP(pool,idx) ((unsigned char *) pool \
+ sizeof(struct sh_mem_pool) \
+ (BITMAP_WORD_SIZE * idx))
/*
* This structure represents a shared memory pool.
*
* @start_addr - start address of shared memory region
* @lock - lock to ensure exclusive access
* @size - size of shared memory*
* @buff_size - size of each buffer
* @total_buffs - total number of buffers in shared memory region
* @used_buffs - number of used buffers
* @bmp_size - size of bitmap array
*
*/
struct sh_mem_pool {
void *start_addr;
metal_mutex_t lock;
unsigned int size;
unsigned int buff_size;
unsigned int total_buffs;
unsigned int used_buffs;
unsigned int bmp_size;
};
/* APIs */
struct sh_mem_pool *sh_mem_create_pool(void *start_addr, unsigned int size,
unsigned int buff_size);
void sh_mem_delete_pool(struct sh_mem_pool *pool);
void *sh_mem_get_buffer(struct sh_mem_pool *pool);
void sh_mem_free_buffer(void *ptr, struct sh_mem_pool *pool);
int get_first_zero_bit(unsigned long value);
#if defined __cplusplus
}
#endif
#endif /* SH_MEM_H_ */

View file

@ -0,0 +1,138 @@
/*
* SPDX-License-Identifier: BSD-3-Clause
*
* $FreeBSD$
*/
#ifndef _VIRTIO_H_
#define _VIRTIO_H_
#include <openamp/virtqueue.h>
#if defined __cplusplus
extern "C" {
#endif
/* VirtIO device IDs. */
#define VIRTIO_ID_NETWORK 0x01
#define VIRTIO_ID_BLOCK 0x02
#define VIRTIO_ID_CONSOLE 0x03
#define VIRTIO_ID_ENTROPY 0x04
#define VIRTIO_ID_BALLOON 0x05
#define VIRTIO_ID_IOMEMORY 0x06
#define VIRTIO_ID_RPMSG 0x07 /* virtio remote remote_proc messaging */
#define VIRTIO_ID_SCSI 0x08
#define VIRTIO_ID_9P 0x09
/* Status byte for guest to report progress. */
#define VIRTIO_CONFIG_STATUS_ACK 0x01
#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
#define VIRTIO_CONFIG_STATUS_NEEDS_RESET 0x40
#define VIRTIO_CONFIG_STATUS_FAILED 0x80
/*
* Generate interrupt when the virtqueue ring is
* completely used, even if we've suppressed them.
*/
#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24)
/*
* The guest should never negotiate this feature; it
* is used to detect faulty drivers.
*/
#define VIRTIO_F_BAD_FEATURE (1 << 30)
/*
* Some VirtIO feature bits (currently bits 28 through 31) are
* reserved for the transport being used (eg. virtio_ring), the
* rest are per-device feature bits.
*/
#define VIRTIO_TRANSPORT_F_START 28
#define VIRTIO_TRANSPORT_F_END 32
typedef struct _virtio_dispatch_ virtio_dispatch;
struct virtio_feature_desc {
uint32_t vfd_val;
const char *vfd_str;
};
/*
* Structure definition for virtio devices for use by the
* applications/drivers
*
*/
struct virtio_device {
/*
* Since there is no generic device structure so
* keep its type as void. The driver layer will take
* care of it.
*/
void *device;
/* Device name */
char *name;
/* List of virtqueues encapsulated by virtio device. */
//TODO : Need to implement a list service for ipc stack.
void *vq_list;
/* Virtio device specific features */
uint32_t features;
/* Virtio dispatch table */
virtio_dispatch *func;
/*
* Pointer to hold some private data, useful
* in callbacks.
*/
void *data;
};
/*
* Helper functions.
*/
const char *virtio_dev_name(uint16_t devid);
void virtio_describe(struct virtio_device *dev, const char *msg,
uint32_t features,
struct virtio_feature_desc *feature_desc);
/*
* Functions for virtio device configuration as defined in Rusty Russell's paper.
* Drivers are expected to implement these functions in their respective codes.
*
*/
struct _virtio_dispatch_ {
int (*create_virtqueues) (struct virtio_device * dev, int flags,
int nvqs, const char *names[],
vq_callback * callbacks[],
struct virtqueue * vqs[]);
uint8_t(*get_status) (struct virtio_device * dev);
void (*set_status) (struct virtio_device * dev, uint8_t status);
uint32_t(*get_features) (struct virtio_device * dev);
void (*set_features) (struct virtio_device * dev, uint32_t feature);
uint32_t(*negotiate_features) (struct virtio_device * dev,
uint32_t features);
/*
* Read/write a variable amount from the device specific (ie, network)
* configuration region. This region is encoded in the same endian as
* the guest.
*/
void (*read_config) (struct virtio_device * dev, uint32_t offset,
void *dst, int length);
void (*write_config) (struct virtio_device * dev, uint32_t offset,
void *src, int length);
void (*reset_device) (struct virtio_device * dev);
};
#if defined __cplusplus
}
#endif
#endif /* _VIRTIO_H_ */

View file

@ -0,0 +1,148 @@
/*
* Copyright Rusty Russell IBM Corporation 2007.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* $FreeBSD$
*/
#ifndef VIRTIO_RING_H
#define VIRTIO_RING_H
#if defined __cplusplus
extern "C" {
#endif
/* This marks a buffer as continuing via the next field. */
#define VRING_DESC_F_NEXT 1
/* This marks a buffer as write-only (otherwise read-only). */
#define VRING_DESC_F_WRITE 2
/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT 4
/* The Host uses this in used->flags to advise the Guest: don't kick me
* when you add a buffer. It's unreliable, so it's simply an
* optimization. Guest will still kick if it's out of buffers. */
#define VRING_USED_F_NO_NOTIFY 1
/* The Guest uses this in avail->flags to advise the Host: don't
* interrupt me when you consume a buffer. It's unreliable, so it's
* simply an optimization. */
#define VRING_AVAIL_F_NO_INTERRUPT 1
/* VirtIO ring descriptors: 16 bytes.
* These can chain together via "next". */
struct vring_desc {
/* Address (guest-physical). */
uint64_t addr;
/* Length. */
uint32_t len;
/* The flags as indicated above. */
uint16_t flags;
/* We chain unused descriptors via this, too. */
uint16_t next;
};
struct vring_avail {
uint16_t flags;
uint16_t idx;
uint16_t ring[0];
};
/* uint32_t is used here for ids for padding reasons. */
struct vring_used_elem {
/* Index of start of used descriptor chain. */
uint32_t id;
/* Total length of the descriptor chain which was written to. */
uint32_t len;
};
struct vring_used {
uint16_t flags;
uint16_t idx;
struct vring_used_elem ring[0];
};
struct vring {
unsigned int num;
struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
};
/* The standard layout for the ring is a continuous chunk of memory which
* looks like this. We assume num is a power of 2.
*
* struct vring {
* // The actual descriptors (16 bytes each)
* struct vring_desc desc[num];
*
* // A ring of available descriptor heads with free-running index.
* __u16 avail_flags;
* __u16 avail_idx;
* __u16 available[num];
* __u16 used_event_idx;
*
* // Padding to the next align boundary.
* char pad[];
*
* // A ring of used descriptor heads with free-running index.
* __u16 used_flags;
* __u16 used_idx;
* struct vring_used_elem used[num];
* __u16 avail_event_idx;
* };
*
* NOTE: for VirtIO PCI, align is 4096.
*/
/*
* We publish the used event index at the end of the available ring, and vice
* versa. They are at the end for backwards compatibility.
*/
#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
#define vring_avail_event(vr) ((vr)->used->ring[(vr)->num].id & 0xFFFF)
static inline int vring_size(unsigned int num, unsigned long align)
{
int size;
size = num * sizeof(struct vring_desc);
size += sizeof(struct vring_avail) + (num * sizeof(uint16_t)) +
sizeof(uint16_t);
size = (size + align - 1) & ~(align - 1);
size += sizeof(struct vring_used) +
(num * sizeof(struct vring_used_elem)) + sizeof(uint16_t);
return (size);
}
static inline void
vring_init(struct vring *vr, unsigned int num, uint8_t * p, unsigned long align)
{
vr->num = num;
vr->desc = (struct vring_desc *)p;
vr->avail = (struct vring_avail *)(p + num * sizeof(struct vring_desc));
vr->used = (struct vring_used *)
(((unsigned long)&vr->avail->ring[num] + align - 1) & ~(align - 1));
}
/*
* The following is used with VIRTIO_RING_F_EVENT_IDX.
*
* Assuming a given event_idx value from the other size, if we have
* just incremented index from old to new_idx, should we trigger an
* event?
*/
static inline int
vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
{
return (uint16_t) (new_idx - event_idx - 1) <
(uint16_t) (new_idx - old);
}
#if defined __cplusplus
}
#endif
#endif /* VIRTIO_RING_H */

View file

@ -0,0 +1,217 @@
#ifndef VIRTQUEUE_H_
#define VIRTQUEUE_H_
/*-
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* SPDX-License-Identifier: BSD-2-Clause
*
* $FreeBSD$
*/
#include <stdbool.h>
#include <stdint.h>
#if defined __cplusplus
extern "C" {
#endif
typedef uint8_t boolean;
#include <openamp/virtio_ring.h>
#include <metal/dma.h>
#include <metal/io.h>
/*Error Codes*/
#define VQ_ERROR_BASE -3000
#define ERROR_VRING_FULL (VQ_ERROR_BASE - 1)
#define ERROR_INVLD_DESC_IDX (VQ_ERROR_BASE - 2)
#define ERROR_EMPTY_RING (VQ_ERROR_BASE - 3)
#define ERROR_NO_MEM (VQ_ERROR_BASE - 4)
#define ERROR_VRING_MAX_DESC (VQ_ERROR_BASE - 5)
#define ERROR_VRING_ALIGN (VQ_ERROR_BASE - 6)
#define ERROR_VRING_NO_BUFF (VQ_ERROR_BASE - 7)
#define ERROR_VQUEUE_INVLD_PARAM (VQ_ERROR_BASE - 8)
#define VQUEUE_SUCCESS 0
#define VQUEUE_DEBUG false
/* The maximum virtqueue size is 2^15. Use that value as the end of
* descriptor chain terminator since it will never be a valid index
* in the descriptor table. This is used to verify we are correctly
* handling vq_free_cnt.
*/
#define VQ_RING_DESC_CHAIN_END 32768
#define VIRTQUEUE_FLAG_INDIRECT 0x0001
#define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
#define VIRTQUEUE_MAX_NAME_SZ 32
/* Support for indirect buffer descriptors. */
#define VIRTIO_RING_F_INDIRECT_DESC (1 << 28)
/* Support to suppress interrupt until specific index is reached. */
#define VIRTIO_RING_F_EVENT_IDX (1 << 29)
/*
* Hint on how long the next interrupt should be postponed. This is
* only used when the EVENT_IDX feature is negotiated.
*/
typedef enum {
VQ_POSTPONE_SHORT,
VQ_POSTPONE_LONG,
VQ_POSTPONE_EMPTIED /* Until all available desc are used. */
} vq_postpone_t;
struct virtqueue {
struct virtio_device *vq_dev;
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
uint16_t vq_queue_index;
uint16_t vq_nentries;
uint32_t vq_flags;
void (*callback) (struct virtqueue * vq);
void (*notify) (struct virtqueue * vq);
struct vring vq_ring;
uint16_t vq_free_cnt;
uint16_t vq_queued_cnt;
/** Shared memory I/O region */
struct metal_io_region *shm_io;
/*
* Head of the free chain in the descriptor table. If
* there are no free descriptors, this will be set to
* VQ_RING_DESC_CHAIN_END.
*/
uint16_t vq_desc_head_idx;
/*
* Last consumed descriptor in the used table,
* trails vq_ring.used->idx.
*/
uint16_t vq_used_cons_idx;
/*
* Last consumed descriptor in the available table -
* used by the consumer side.
*/
uint16_t vq_available_idx;
#if (VQUEUE_DEBUG == true)
boolean vq_inuse;
#endif
/*
* Used by the host side during callback. Cookie
* holds the address of buffer received from other side.
* Other fields in this structure are not used currently.
*/
struct vq_desc_extra {
void *cookie;
uint16_t ndescs;
} vq_descx[0];
};
/* struct to hold vring specific information */
struct vring_alloc_info {
void *vaddr;
uint32_t align;
uint16_t num_descs;
uint16_t pad;
};
typedef void vq_callback(struct virtqueue *);
typedef void vq_notify(struct virtqueue *);
#if (VQUEUE_DEBUG == true)
#include <metal/log.h>
#include <metal/assert.h>
#define VQASSERT(_vq, _exp, _msg) \
do{ \
if (!(_exp)){ \
metal_log(METAL_LOG_EMERGENCY, \
"%s: %s - "_msg, \
__func__, \
(_vq)->vq_name); \
metal_assert(_exp); \
} \
} while(0)
#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
"invalid ring index")
#define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
VQ_RING_DESC_CHAIN_END, "full ring terminated incorrectly: invalid head")
#define VQ_PARAM_CHK(condition, status_var, status_err) \
if ((status_var == 0) && (condition)) \
{ \
status_var = status_err; \
}
#define VQUEUE_BUSY(vq) if ((vq)->vq_inuse == false) \
(vq)->vq_inuse = true; \
else \
VQASSERT(vq, (vq)->vq_inuse == false, \
"VirtQueue already in use")
#define VQUEUE_IDLE(vq) ((vq)->vq_inuse = false)
#else
#define KASSERT(cond, str)
#define VQASSERT(_vq, _exp, _msg)
#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)
#define VQ_RING_ASSERT_CHAIN_TERM(_vq)
#define VQ_PARAM_CHK(condition, status_var, status_err)
#define VQUEUE_BUSY(vq)
#define VQUEUE_IDLE(vq)
#endif
int virtqueue_create(struct virtio_device *device, unsigned short id,
const char *name, struct vring_alloc_info *ring,
void (*callback) (struct virtqueue * vq),
void (*notify) (struct virtqueue * vq),
struct metal_io_region *shm_io,
struct virtqueue **v_queue);
int virtqueue_add_buffer(struct virtqueue *vq, struct metal_sg *sg,
int readable, int writable, void *cookie);
int virtqueue_add_single_buffer(struct virtqueue *vq, void *cookie,
struct metal_sg *sg, int writable,
boolean has_next);
void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t * len, uint16_t *idx);
void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t * avail_idx,
uint32_t * len);
int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
uint32_t len);
void virtqueue_disable_cb(struct virtqueue *vq);
int virtqueue_enable_cb(struct virtqueue *vq);
void virtqueue_kick(struct virtqueue *vq);
void virtqueue_free(struct virtqueue *vq);
void virtqueue_dump(struct virtqueue *vq);
void virtqueue_notification(struct virtqueue *vq);
uint32_t virtqueue_get_desc_size(struct virtqueue *vq);
uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx);
#if defined __cplusplus
}
#endif
#endif /* VIRTQUEUE_H_ */

View file

@ -0,0 +1 @@
collect (PROJECT_LIB_SOURCES rpmsg_retarget.c)

View file

@ -0,0 +1,270 @@
#include <openamp/open_amp.h>
#include <openamp/rpmsg_retarget.h>
#include <metal/alloc.h>
#include <stdio.h>
#include <string.h>
#include <fcntl.h>
/*************************************************************************
* Description
* This files contains rpmsg based redefinitions for C RTL system calls
* such as _open, _read, _write, _close.
*************************************************************************/
static struct _rpc_data *rpc_data = 0;
int send_rpc(void *data, int len);
void rpc_cb(struct rpmsg_channel *rtl_rp_chnl, void *data, int len, void *priv,
unsigned long src)
{
(void)priv;
(void)src;
memcpy(rpc_data->rpc_response, data, len);
atomic_flag_clear(&rpc_data->sync);
if (rpc_data->rpc_response->id == TERM_SYSCALL_ID) {
/* Application terminate signal is received from the proxy app,
* so let the application know of terminate message.
*/
rpc_data->shutdown_cb(rtl_rp_chnl);
}
}
int send_rpc(void *data, int len)
{
int retval;
retval = rpmsg_sendto(rpc_data->rpmsg_chnl, data, len, PROXY_ENDPOINT);
return retval;
}
int rpmsg_retarget_init(struct rpmsg_channel *rp_chnl, rpc_shutdown_cb cb)
{
/* Allocate memory for rpc control block */
rpc_data = (struct _rpc_data *)metal_allocate_memory(sizeof(struct _rpc_data));
/* Create a mutex for synchronization */
metal_mutex_init(&rpc_data->rpc_lock);
/* Create a mutex for synchronization */
atomic_store(&rpc_data->sync, 1);
/* Create a endpoint to handle rpc response from master */
rpc_data->rpmsg_chnl = rp_chnl;
rpc_data->rp_ept = rpmsg_create_ept(rpc_data->rpmsg_chnl, rpc_cb,
RPMSG_NULL, PROXY_ENDPOINT);
rpc_data->rpc = metal_allocate_memory(RPC_BUFF_SIZE);
rpc_data->rpc_response = metal_allocate_memory(RPC_BUFF_SIZE);
rpc_data->shutdown_cb = cb;
return 0;
}
int rpmsg_retarget_deinit(struct rpmsg_channel *rp_chnl)
{
(void)rp_chnl;
metal_free_memory(rpc_data->rpc);
metal_free_memory(rpc_data->rpc_response);
metal_mutex_deinit(&rpc_data->rpc_lock);
rpmsg_destroy_ept(rpc_data->rp_ept);
metal_free_memory(rpc_data);
rpc_data = NULL;
return 0;
}
int rpmsg_retarget_send(void *data, int len)
{
return send_rpc(data, len);
}
static inline void rpmsg_retarget_wait(struct _rpc_data *rpc)
{
struct hil_proc *proc = rpc->rpmsg_chnl->rdev->proc;
while (atomic_flag_test_and_set(&rpc->sync)) {
hil_poll(proc, 0);
}
}
/*************************************************************************
*
* FUNCTION
*
* _open
*
* DESCRIPTION
*
* Open a file. Minimal implementation
*
*************************************************************************/
int _open(const char *filename, int flags, int mode)
{
int filename_len = strlen(filename) + 1;
int payload_size = sizeof(struct _sys_rpc) + filename_len;
int retval = -1;
if ((!filename) || (filename_len > FILE_NAME_LEN)) {
return -1;
}
if (!rpc_data)
return retval;
/* Construct rpc payload */
rpc_data->rpc->id = OPEN_SYSCALL_ID;
rpc_data->rpc->sys_call_args.int_field1 = flags;
rpc_data->rpc->sys_call_args.int_field2 = mode;
rpc_data->rpc->sys_call_args.data_len = filename_len;
memcpy(&rpc_data->rpc->sys_call_args.data, filename, filename_len);
/* Transmit rpc request */
metal_mutex_acquire(&rpc_data->rpc_lock);
send_rpc((void *)rpc_data->rpc, payload_size);
metal_mutex_release(&rpc_data->rpc_lock);
/* Wait for response from proxy on master */
rpmsg_retarget_wait(rpc_data);
/* Obtain return args and return to caller */
if (rpc_data->rpc_response->id == OPEN_SYSCALL_ID) {
retval = rpc_data->rpc_response->sys_call_args.int_field1;
}
return retval;
}
/*************************************************************************
*
* FUNCTION
*
* _read
*
* DESCRIPTION
*
* Low level function to redirect IO to serial.
*
*************************************************************************/
int _read(int fd, char *buffer, int buflen)
{
int payload_size = sizeof(struct _sys_rpc);
int retval = -1;
if (!buffer || !buflen)
return retval;
if (!rpc_data)
return retval;
/* Construct rpc payload */
rpc_data->rpc->id = READ_SYSCALL_ID;
rpc_data->rpc->sys_call_args.int_field1 = fd;
rpc_data->rpc->sys_call_args.int_field2 = buflen;
rpc_data->rpc->sys_call_args.data_len = 0; /*not used */
/* Transmit rpc request */
metal_mutex_acquire(&rpc_data->rpc_lock);
send_rpc((void *)rpc_data->rpc, payload_size);
metal_mutex_release(&rpc_data->rpc_lock);
/* Wait for response from proxy on master */
rpmsg_retarget_wait(rpc_data);
/* Obtain return args and return to caller */
if (rpc_data->rpc_response->id == READ_SYSCALL_ID) {
if (rpc_data->rpc_response->sys_call_args.int_field1 > 0) {
memcpy(buffer,
rpc_data->rpc_response->sys_call_args.data,
rpc_data->rpc_response->sys_call_args.data_len);
}
retval = rpc_data->rpc_response->sys_call_args.int_field1;
}
return retval;
}
/*************************************************************************
*
* FUNCTION
*
* _write
*
* DESCRIPTION
*
* Low level function to redirect IO to serial.
*
*************************************************************************/
int _write(int fd, const char *ptr, int len)
{
int retval = -1;
int payload_size = sizeof(struct _sys_rpc) + len;
int null_term = 0;
if (fd == 1) {
null_term = 1;
}
if (!rpc_data)
return retval;
rpc_data->rpc->id = WRITE_SYSCALL_ID;
rpc_data->rpc->sys_call_args.int_field1 = fd;
rpc_data->rpc->sys_call_args.int_field2 = len;
rpc_data->rpc->sys_call_args.data_len = len + null_term;
memcpy(rpc_data->rpc->sys_call_args.data, ptr, len);
if (null_term) {
*(char *)(rpc_data->rpc->sys_call_args.data + len + null_term) =
0;
}
metal_mutex_acquire(&rpc_data->rpc_lock);
send_rpc((void *)rpc_data->rpc, payload_size);
metal_mutex_release(&rpc_data->rpc_lock);
/* Wait for response from proxy on master */
rpmsg_retarget_wait(rpc_data);
if (rpc_data->rpc_response->id == WRITE_SYSCALL_ID) {
retval = rpc_data->rpc_response->sys_call_args.int_field1;
}
return retval;
}
/*************************************************************************
*
* FUNCTION
*
* _close
*
* DESCRIPTION
*
* Close a file. Minimal implementation
*
*************************************************************************/
int _close(int fd)
{
int payload_size = sizeof(struct _sys_rpc);
int retval = -1;
if (!rpc_data)
return retval;
rpc_data->rpc->id = CLOSE_SYSCALL_ID;
rpc_data->rpc->sys_call_args.int_field1 = fd;
rpc_data->rpc->sys_call_args.int_field2 = 0; /*not used */
rpc_data->rpc->sys_call_args.data_len = 0; /*not used */
metal_mutex_acquire(&rpc_data->rpc_lock);
send_rpc((void *)rpc_data->rpc, payload_size);
metal_mutex_release(&rpc_data->rpc_lock);
/* Wait for response from proxy on master */
rpmsg_retarget_wait(rpc_data);
if (rpc_data->rpc_response->id == CLOSE_SYSCALL_ID) {
retval = rpc_data->rpc_response->sys_call_args.int_field1;
}
return retval;
}

View file

@ -0,0 +1,5 @@
collect (PROJECT_LIB_SOURCES elf_loader.c)
collect (PROJECT_LIB_SOURCES remoteproc.c)
collect (PROJECT_LIB_SOURCES remoteproc_loader.c)
collect (PROJECT_LIB_SOURCES rsc_table_parser.c)
add_subdirectory (drivers)

View file

@ -0,0 +1,16 @@
if ("${MACHINE}" STREQUAL "zynqmp_r5")
collect (PROJECT_LIB_SOURCES zynqmp_remoteproc_a53.c)
endif ("${MACHINE}" STREQUAL "zynqmp_r5")
if ("${MACHINE}" STREQUAL "zynq7")
collect (PROJECT_LIB_SOURCES zynq_remoteproc_a9.c)
collect (PROJECT_LIB_SOURCES zynq_a9_trampoline.S)
endif ("${MACHINE}" STREQUAL "zynq7")
if ("${MACHINE}" STREQUAL "zynqmp")
collect (PROJECT_LIB_SOURCES zynqmp_remoteproc_r5.c)
endif ("${MACHINE}" STREQUAL "zynqmp")
if ("${PROJECT_SYSTEM}" STREQUAL "linux")
collect (PROJECT_LIB_SOURCES linux_remoteproc.c)
endif ("${PROJECT_SYSTEM}" STREQUAL "linux")

View file

@ -0,0 +1,352 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2016 Xilinx, Inc.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* zynqmp_remoteproc_r5.c
*
* DESCRIPTION
*
* This file is the Implementation of IPC hardware layer interface
* for Xilinx Zynq UltraScale+ MPSoC system.
*
**************************************************************************/
#include <errno.h>
#include <string.h>
#include <stdio.h>
#include <poll.h>
#include <metal/io.h>
#include <metal/device.h>
#include <metal/utilities.h>
#include <metal/atomic.h>
#include <metal/irq.h>
#include <metal/cpu.h>
#include <metal/alloc.h>
#include <metal/assert.h>
#include <metal/shmem.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <openamp/hil.h>
#include <openamp/virtqueue.h>
#define MAX_VRING_MEM_SIZE 0x20000
#define _rproc_wait() metal_cpu_yield()
#define UNIX_PREFIX "unix:"
#define UNIXS_PREFIX "unixs:"
struct vring_ipi_info {
/* Socket file path */
char *path;
int fd;
struct metal_io_region *vring_io;
atomic_int sync;
};
/*--------------------------- Declare Functions ------------------------ */
static int _ipi_handler(int vect_id, void *data);
static int _enable_interrupt(struct proc_intr *intr);
static void _notify(struct hil_proc *proc, struct proc_intr *intr_info);
static int _boot_cpu(struct hil_proc *proc, unsigned int load_addr);
static void _shutdown_cpu(struct hil_proc *proc);
static int _poll(struct hil_proc *proc, int nonblock);
static int _initialize(struct hil_proc *proc);
static void _release(struct hil_proc *proc);
static struct metal_io_region* _alloc_shm(struct hil_proc *proc,
metal_phys_addr_t pa,
size_t size,
struct metal_device **dev);
static void _release_shm(struct hil_proc *proc,
struct metal_device *dev,
struct metal_io_region *io);
/*--------------------------- Globals ---------------------------------- */
struct hil_platform_ops linux_proc_ops = {
.enable_interrupt = _enable_interrupt,
.notify = _notify,
.boot_cpu = _boot_cpu,
.shutdown_cpu = _shutdown_cpu,
.poll = _poll,
.alloc_shm = _alloc_shm,
.release_shm = _release_shm,
.initialize = _initialize,
.release = _release,
};
static int sk_unix_client(const char *descr)
{
struct sockaddr_un addr;
int fd;
fd = socket(AF_UNIX, SOCK_STREAM, 0);
memset(&addr, 0, sizeof addr);
addr.sun_family = AF_UNIX;
strncpy(addr.sun_path, descr + strlen(UNIX_PREFIX),
sizeof addr.sun_path);
if (connect(fd, (struct sockaddr *)&addr, sizeof(addr)) >= 0) {
printf("connected to %s\n", descr + strlen(UNIX_PREFIX));
return fd;
}
close(fd);
return -1;
}
static int sk_unix_server(const char *descr)
{
struct sockaddr_un addr;
int fd, nfd;
fd = socket(AF_UNIX, SOCK_STREAM, 0);
addr.sun_family = AF_UNIX;
strncpy(addr.sun_path, descr + strlen(UNIXS_PREFIX),
sizeof addr.sun_path);
unlink(addr.sun_path);
if (bind(fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
goto fail;
}
listen(fd, 5);
printf("Waiting for connection on %s\n", addr.sun_path);
nfd = accept(fd, NULL, NULL);
close(fd);
return nfd;
fail:
close(fd);
return -1;
}
static int event_open(const char *descr)
{
int fd = -1;
int i;
if (descr == NULL) {
return fd;
}
if (memcmp(UNIX_PREFIX, descr, strlen(UNIX_PREFIX)) == 0) {
/* UNIX. Retry to connect a few times to give the peer a
* chance to setup. */
for (i = 0; i < 100 && fd == -1; i++) {
fd = sk_unix_client(descr);
if (fd == -1)
usleep(i * 10 * 1000);
}
}
if (memcmp(UNIXS_PREFIX, descr, strlen(UNIXS_PREFIX)) == 0) {
/* UNIX. */
fd = sk_unix_server(descr);
}
printf("Open IPI: %s\n", descr);
return fd;
}
static int _ipi_handler(int vect_id, void *data)
{
char dummy_buf[32];
struct proc_intr *intr = data;
struct vring_ipi_info *ipi = intr->data;
(void) vect_id;
read(vect_id, dummy_buf, sizeof(dummy_buf));
atomic_flag_clear(&ipi->sync);
return 0;
}
static int _enable_interrupt(struct proc_intr *intr)
{
struct vring_ipi_info *ipi = intr->data;
ipi->fd = event_open(ipi->path);
if (ipi->fd < 0) {
fprintf(stderr, "ERROR: Failed to open sock %s for IPI.\n",
ipi->path);
return -1;
}
intr->vect_id = ipi->fd;
/* Register ISR */
metal_irq_register(ipi->fd, _ipi_handler,
NULL, intr);
return 0;
}
static void _notify(struct hil_proc *proc, struct proc_intr *intr_info)
{
(void)proc;
struct vring_ipi_info *ipi = (struct vring_ipi_info *)(intr_info->data);
if (ipi == NULL)
return;
char dummy = 1;
send(ipi->fd, &dummy, 1, MSG_NOSIGNAL);
}
static int _boot_cpu(struct hil_proc *proc, unsigned int load_addr)
{
(void)proc;
(void)load_addr;
return -1;
}
static void _shutdown_cpu(struct hil_proc *proc)
{
(void)proc;
return;
}
static struct metal_io_region* _alloc_shm(struct hil_proc *proc,
metal_phys_addr_t pa,
size_t size,
struct metal_device **dev)
{
(void)proc;
(void)pa;
(void)size;
*dev = NULL;
return NULL;
}
static void _release_shm(struct hil_proc *proc,
struct metal_device *dev,
struct metal_io_region *io)
{
(void)proc;
(void)io;
hil_close_generic_mem_dev(dev);
}
static int _poll(struct hil_proc *proc, int nonblock)
{
(void) nonblock;
struct proc_vring *vring;
struct vring_ipi_info *ipi;
unsigned int flags;
int num_vrings = proc->vdev.num_vrings;
int ret = 0;
int notified;
int i;
metal_assert(proc);
notified = 0;
while (1) {
for (i = 0; i < num_vrings; i++) {
vring = &proc->vdev.vring_info[i];
ipi = (struct vring_ipi_info *)(vring->intr_info.data);
flags = metal_irq_save_disable();
if (!(atomic_flag_test_and_set(&ipi->sync))) {
metal_irq_restore_enable(flags);
virtqueue_notification(vring->vq);
notified = 1;
} else {
metal_irq_restore_enable(flags);
}
}
if (notified)
return 0;
if (nonblock)
return -EAGAIN;
_rproc_wait();
}
return ret;
}
/**
* @brief _adjust_vring_io - Adjust the vring I/O region to map to the
* specified start device address.
* @param[in] io - vring I/O region
* @param[in] start_phy - start device address of the vring, this is
* not the actual physical address.
* @return adjusted I/O region
*/
static struct metal_io_region *_create_vring_io(struct metal_io_region *in_io,
int start_phy)
{
struct metal_io_region *io = 0;
metal_phys_addr_t *phys;
io = metal_allocate_memory(sizeof(struct metal_io_region));
if (!io) {
fprintf(stderr, "ERROR: Failed to allocation I/O for vring.\n");
return NULL;
}
phys = metal_allocate_memory(sizeof(metal_phys_addr_t));
if (!phys) {
fprintf(stderr, "ERROR: Failed to allocation phys for vring.\n");
metal_free_memory(io);
return NULL;
}
*phys = (metal_phys_addr_t)start_phy;
metal_io_init(io, in_io->virt, phys, in_io->size,
sizeof(metal_phys_addr_t)*8 - 1, 0, NULL);
return io;
}
static int _initialize(struct hil_proc *proc)
{
struct proc_vring *vring;
struct vring_ipi_info *ipi;
struct metal_io_region *io;
int i;
if (proc) {
for (i = 0; i < 2; i++) {
vring = &proc->vdev.vring_info[i];
ipi = (struct vring_ipi_info *)vring->intr_info.data;
if (ipi && !ipi->vring_io && vring->io) {
io = _create_vring_io(vring->io, 0);
if (!io)
return -1;
ipi->vring_io = vring->io;
vring->io = io;
atomic_store(&ipi->sync, 1);
}
}
}
return 0;
}
static void _release(struct hil_proc *proc)
{
struct proc_vring *vring;
struct vring_ipi_info *ipi;
int i;
if (proc) {
for (i = 0; i < 2; i++) {
vring = &proc->vdev.vring_info[i];
ipi = (struct vring_ipi_info *)vring->intr_info.data;
if (ipi) {
if (ipi->fd >= 0) {
metal_irq_unregister(ipi->fd, 0, NULL,
vring);
close(ipi->fd);
}
if (ipi->vring_io) {
metal_free_memory(vring->io->physmap);
metal_free_memory(vring->io);
vring->io = NULL;
if (ipi->vring_io->ops.close)
ipi->vring_io->ops.close(ipi->vring_io);
ipi->vring_io = NULL;
}
}
}
}
}

View file

@ -0,0 +1,16 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
.global zynq_trampoline
zynq_trampoline:
ldr r0, [pc]
bx r0
.global zynq_trampoline_jump
zynq_trampoline_jump:
.word
.global zynq_trampoline_end
zynq_trampoline_end:

View file

@ -0,0 +1,312 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* platform.c
*
* DESCRIPTION
*
* This file is the Implementation of IPC hardware layer interface
* for Xilinx Zynq ZC702EVK platform.
*
**************************************************************************/
#include <string.h>
#include <errno.h>
#include <openamp/hil.h>
#include <metal/alloc.h>
#include <metal/irq.h>
#include <metal/atomic.h>
/* ------------------------- Macros --------------------------*/
#define SCUGIC_PERIPH_BASE 0xF8F00000
#define SCUGIC_DIST_BASE (SCUGIC_PERIPH_BASE + 0x00001000)
#define ESAL_DP_SLCR_BASE 0xF8000000
#define GIC_DIST_SOFTINT 0xF00
#define GIC_SFI_TRIG_CPU_MASK 0x00FF0000
#define GIC_SFI_TRIG_SATT_MASK 0x00008000
#define GIC_SFI_TRIG_INTID_MASK 0x0000000F
#define GIC_CPU_ID_BASE (1 << 4)
#define A9_CPU_SLCR_RESET_CTRL 0x244
#define A9_CPU_SLCR_CLK_STOP (1 << 4)
#define A9_CPU_SLCR_RST (1 << 0)
#define unlock_slcr() HIL_MEM_WRITE32(ESAL_DP_SLCR_BASE + 0x08, 0xDF0DDF0D)
#define lock_slcr() HIL_MEM_WRITE32(ESAL_DP_SLCR_BASE + 0x04, 0x767B767B)
/* L2Cpl310 L2 cache controller base address. */
#define HIL_PL130_BASE 0xF8F02000
/********************/
/* Register offsets */
/********************/
#define HIL_PL130_INVALLINE 0x770
#define HIL_PL130_CLEANINVLINE 0x7F0
#define HIL_PA_SBZ_MASK ~(HIL_CACHE_LINE_SIZE - 1UL)
#define HIL_CACHE_LINE_SIZE 32
#define HIL_CACHE_INV_ALL_WAYS 0xFF
#define HIL_CACHE_UNLOCK_ALL_WAYS 0xFFFF0000
#define HIL_CACHE_CLEAR_INT 0x1FF
/* Memory attributes */
#define NORM_NONCACHE 0x11DE2 /* Normal Non-cacheable */
#define STRONG_ORDERED 0xC02 /* Strongly ordered */
#define DEVICE_MEMORY 0xC06 /* Device memory */
#define RESERVED 0x0 /* reserved memory */
#define HIL_DEV_NAME_PREFIX "hil-dev."
#define _rproc_wait() asm volatile("wfi")
/*--------------------------- Declare Functions ------------------------ */
static int _enable_interrupt(struct proc_intr *intr);
static void _notify(struct hil_proc *proc, struct proc_intr *intr_info);
static int _boot_cpu(struct hil_proc *proc, unsigned int load_addr);
static void _shutdown_cpu(struct hil_proc *proc);
static int _poll(struct hil_proc *proc, int nonblock);
static int _initialize(struct hil_proc *proc);
static void _release(struct hil_proc *proc);
static int _ipi_handler(int vect_id, void *data);
static struct metal_io_region* _alloc_shm(struct hil_proc *proc,
metal_phys_addr_t pa,
size_t size,
struct metal_device **dev);
static void _release_shm(struct hil_proc *proc,
struct metal_device *dev,
struct metal_io_region *io);
/*--------------------------- Globals ---------------------------------- */
struct hil_platform_ops zynq_a9_proc_ops = {
.enable_interrupt = _enable_interrupt,
.notify = _notify,
.boot_cpu = _boot_cpu,
.shutdown_cpu = _shutdown_cpu,
.poll = _poll,
.alloc_shm = _alloc_shm,
.release_shm = _release_shm,
.initialize = _initialize,
.release = _release,
};
struct hil_mem_device {
struct metal_device device;
char name[64];
metal_phys_addr_t pa;
};
static metal_phys_addr_t git_dist_base_addr = SCUGIC_DIST_BASE;
static struct metal_io_region gic_dist_io = {
(void *)SCUGIC_DIST_BASE,
&git_dist_base_addr,
0x1000,
(sizeof(metal_phys_addr_t) << 3),
(metal_phys_addr_t)(-1),
0,
{NULL},
};
//volatile unsigned int ipi_counter = 0;
//volatile unsigned int enableirq_counter = 0;
int _ipi_handler(int vect_id, void *data)
{
struct proc_intr *intr_info = data;
(void) vect_id;
atomic_flag_clear((atomic_uint *)&(intr_info->data));
//ipi_counter++;
return 0;
}
static int _enable_interrupt(struct proc_intr *intr)
{
//enableirq_counter++;
/* Register ISR */
metal_irq_register(intr->vect_id, _ipi_handler,
intr->dev, intr);
/* Enable the interrupts */
metal_irq_enable(intr->vect_id);
/* FIXME: This is a workaround for Zynq. As Linux is possible
* to have already generate the soft IRQ
*/
atomic_flag_clear((atomic_uint *)&(intr->data));
return 0;
}
static void _notify(struct hil_proc *proc, struct proc_intr *intr_info)
{
unsigned long mask = 0;
mask = ((1 << (GIC_CPU_ID_BASE + proc->cpu_id)) | (intr_info->vect_id))
& (GIC_SFI_TRIG_CPU_MASK | GIC_SFI_TRIG_INTID_MASK);
/* Trigger IPI */
metal_io_write32(&gic_dist_io, GIC_DIST_SOFTINT, mask);
}
static int _poll(struct hil_proc *proc, int nonblock)
{
struct proc_vring *vring;
unsigned int flags;
struct proc_intr *intr_info;
int i = 0;
int kicked = 0;
while(1) {
vring = &proc->vdev.vring_info[i];
intr_info = &(vring->intr_info);
flags = metal_irq_save_disable();
if (!(atomic_flag_test_and_set(
(atomic_uint *)&(intr_info->data)))) {
metal_irq_restore_enable(flags);
virtqueue_notification(vring->vq);
kicked = 1;
if (i)
return 0;
i++;
} else if (!i) {
metal_irq_restore_enable(flags);
i++;
} else {
if (kicked) {
metal_irq_restore_enable(flags);
return 0;
} else if (nonblock) {
metal_irq_restore_enable(flags);
return -EAGAIN;
} else {
_rproc_wait();
metal_irq_restore_enable(flags);
i--;
continue;
}
}
}
}
extern char zynq_trampoline;
extern char zynq_trampoline_jump;
extern char zynq_trampoline_end;
static int _boot_cpu(struct hil_proc *proc, unsigned int load_addr)
{
/* FIXME: Will need to add the boot_cpu implementation back */
#if 0
unsigned int reg;
unsigned int tramp_size;
unsigned int tramp_addr = 0;
if (load_addr) {
tramp_size = zynq_trampoline_end - zynq_trampoline;
if ((load_addr < tramp_size) || (load_addr & 0x3)) {
return -1;
}
tramp_size = &zynq_trampoline_jump - &zynq_trampoline;
/*
* Trampoline code is copied to address 0 from where remote core is expected to
* fetch first instruction after reset.If master is using the address 0 then
* this mem copy will screwed the system. It is user responsibility to not
* copy trampoline code in such cases.
*
*/
memcpy((char *)tramp_addr, &zynq_trampoline, tramp_size);
/* Write image address at the word reserved at the trampoline end */
HIL_MEM_WRITE32((char *)(tramp_addr + tramp_size), load_addr);
}
unlock_slcr();
reg = HIL_MEM_READ32(ESAL_DP_SLCR_BASE + A9_CPU_SLCR_RESET_CTRL);
reg &= ~(A9_CPU_SLCR_CLK_STOP << cpu_id);
HIL_MEM_WRITE32(ESAL_DP_SLCR_BASE + A9_CPU_SLCR_RESET_CTRL, reg);
/* De-assert reset signal and start clock to start the core */
reg &= ~(A9_CPU_SLCR_RST << cpu_id);
HIL_MEM_WRITE32(ESAL_DP_SLCR_BASE + A9_CPU_SLCR_RESET_CTRL, reg);
lock_slcr();
#else
(void)proc;
(void)load_addr;
#endif
return 0;
}
static void _shutdown_cpu(struct hil_proc *proc)
{
/* FIXME: Will need to add the shutdown CPU implementation back */
#if 0
unsigned int reg;
unlock_slcr();
reg = HIL_MEM_READ32(ESAL_DP_SLCR_BASE + A9_CPU_SLCR_RESET_CTRL);
/* Assert reset signal and stop clock to halt the core */
reg |= (A9_CPU_SLCR_CLK_STOP | A9_CPU_SLCR_RST) << cpu_id;
HIL_MEM_WRITE32(ESAL_DP_SLCR_BASE + A9_CPU_SLCR_RESET_CTRL, reg);
lock_slcr();
#else
(void)proc;
#endif
}
static struct metal_io_region* _alloc_shm(struct hil_proc *proc,
metal_phys_addr_t pa,
size_t size,
struct metal_device **dev)
{
(void)proc;
*dev = hil_create_generic_mem_dev(pa, size,
NORM_NONCACHE | STRONG_ORDERED);
if ((*dev))
return &((*dev)->regions[0]);
return NULL;
}
static void _release_shm(struct hil_proc *proc,
struct metal_device *dev,
struct metal_io_region *io)
{
(void)proc;
(void)io;
hil_close_generic_mem_dev(dev);
}
static int _initialize(struct hil_proc *proc)
{
int i;
struct proc_intr *intr_info;
for (i = 0; i < 2; i++) {
intr_info = &(proc->vdev.vring_info[i].intr_info);
atomic_store((atomic_uint *)&(intr_info->data), 1);
}
return 0;
}
static void _release(struct hil_proc *proc)
{
(void)proc;
return;
}

View file

@ -0,0 +1,273 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2015 Xilinx, Inc.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* platform.c
*
* DESCRIPTION
*
* This file is the Implementation of IPC hardware layer interface
* for Xilinx Zynq ZC702EVK platform.
*
**************************************************************************/
#include <errno.h>
#include <string.h>
#include <metal/io.h>
#include <metal/device.h>
#include <metal/utilities.h>
#include <metal/atomic.h>
#include <metal/irq.h>
#include <metal/alloc.h>
#include <openamp/hil.h>
#include <openamp/virtqueue.h>
/* IPI REGs OFFSET */
#define IPI_TRIG_OFFSET 0x00000000 /* IPI trigger register offset */
#define IPI_OBS_OFFSET 0x00000004 /* IPI observation register offset */
#define IPI_ISR_OFFSET 0x00000010 /* IPI interrupt status register offset */
#define IPI_IMR_OFFSET 0x00000014 /* IPI interrupt mask register offset */
#define IPI_IER_OFFSET 0x00000018 /* IPI interrupt enable register offset */
#define IPI_IDR_OFFSET 0x0000001C /* IPI interrupt disable register offset */
/* memory attributes */
#define DEVICE_SHARED 0x00000001U /*device, shareable*/
#define DEVICE_NONSHARED 0x00000010U /*device, non shareable*/
#define NORM_NSHARED_NCACHE 0x00000008U /* Non cacheable non shareable */
#define NORM_SHARED_NCACHE 0x0000000CU /* Non cacheable shareable */
#define PRIV_RW_USER_RW (0x00000003U<<8U) /*Full Access*/
#define _rproc_wait() asm volatile("wfi")
/* -- FIX ME: ipi info is to be defined -- */
struct ipi_info {
const char *name;
const char *bus_name;
struct metal_device *dev;
struct metal_io_region *io;
metal_phys_addr_t paddr;
uint32_t ipi_chn_mask;
int registered;
atomic_int sync;
};
/*--------------------------- Declare Functions ------------------------ */
static int _enable_interrupt(struct proc_intr *intr);
static void _notify(struct hil_proc *proc, struct proc_intr *intr_info);
static int _boot_cpu(struct hil_proc *proc, unsigned int load_addr);
static void _shutdown_cpu(struct hil_proc *proc);
static int _poll(struct hil_proc *proc, int nonblock);
static int _initialize(struct hil_proc *proc);
static void _release(struct hil_proc *proc);
static int _ipi_handler(int vect_id, void *data);
static struct metal_io_region* _alloc_shm(struct hil_proc *proc,
metal_phys_addr_t pa,
size_t size,
struct metal_device **dev);
static void _release_shm(struct hil_proc *proc,
struct metal_device *dev,
struct metal_io_region *io);
/*--------------------------- Globals ---------------------------------- */
struct hil_platform_ops zynqmp_r5_a53_proc_ops = {
.enable_interrupt = _enable_interrupt,
.notify = _notify,
.boot_cpu = _boot_cpu,
.shutdown_cpu = _shutdown_cpu,
.poll = _poll,
.alloc_shm = _alloc_shm,
.release_shm = _release_shm,
.initialize = _initialize,
.release = _release,
};
int _ipi_handler(int vect_id, void *data)
{
struct proc_intr *intr = data;
struct ipi_info *ipi = intr->data;
struct metal_io_region *io = ipi->io;
unsigned int ipi_intr_status =
(unsigned int)metal_io_read32(io, IPI_ISR_OFFSET);
(void) vect_id;
if (ipi_intr_status & ipi->ipi_chn_mask) {
atomic_flag_clear(&ipi->sync);
metal_io_write32(io, IPI_ISR_OFFSET,
ipi->ipi_chn_mask);
return 0;
}
return -1;
}
static int _enable_interrupt(struct proc_intr *intr)
{
struct ipi_info *ipi =
(struct ipi_info *)(intr->data);
struct metal_io_region *io = ipi->io;
if (ipi->registered) {
return 0;
}
/* Register ISR */
metal_irq_register(intr->vect_id, _ipi_handler,
intr->dev, intr);
/* Enable IPI interrupt */
metal_irq_enable(intr->vect_id);
metal_io_write32(io, IPI_IER_OFFSET, ipi->ipi_chn_mask);
ipi->registered = 1;
return 0;
}
static void _notify(struct hil_proc *proc, struct proc_intr *intr_info)
{
(void)proc;
struct ipi_info *ipi = (struct ipi_info *)(intr_info->data);
if (ipi == NULL)
return;
/* Trigger IPI */
metal_io_write32(ipi->io, IPI_TRIG_OFFSET, ipi->ipi_chn_mask);
}
static int _boot_cpu(struct hil_proc *proc, unsigned int load_addr)
{
(void)proc;
(void)load_addr;
return -1;
}
static void _shutdown_cpu(struct hil_proc *proc)
{
(void)proc;
return;
}
static int _poll(struct hil_proc *proc, int nonblock)
{
struct proc_vdev *vdev;
struct ipi_info *ipi;
unsigned int flags;
vdev = &proc->vdev;
ipi = (struct ipi_info *)(vdev->intr_info.data);
while(1) {
flags = metal_irq_save_disable();
if (!(atomic_flag_test_and_set(&ipi->sync))) {
metal_irq_restore_enable(flags);
hil_notified(proc, (uint32_t)(-1));
return 0;
}
if (nonblock) {
metal_irq_restore_enable(flags);
return -EAGAIN;
}
_rproc_wait();
metal_irq_restore_enable(flags);
}
}
static struct metal_io_region* _alloc_shm(struct hil_proc *proc,
metal_phys_addr_t pa,
size_t size,
struct metal_device **dev)
{
(void)proc;
*dev = hil_create_generic_mem_dev(pa, size,
NORM_SHARED_NCACHE | PRIV_RW_USER_RW);
if ((*dev))
return &((*dev)->regions[0]);
return NULL;
}
static void _release_shm(struct hil_proc *proc,
struct metal_device *dev,
struct metal_io_region *io)
{
(void)proc;
(void)io;
hil_close_generic_mem_dev(dev);
}
static int _initialize(struct hil_proc *proc)
{
int ret;
struct proc_intr *intr_info;
struct ipi_info *ipi;
if (!proc)
return -1;
intr_info = &(proc->vdev.intr_info);
ipi = intr_info->data;
if (ipi && ipi->name && ipi->bus_name) {
ret = metal_device_open(ipi->bus_name, ipi->name,
&ipi->dev);
if (ret)
return -ENODEV;
ipi->io = metal_device_io_region(ipi->dev, 0);
} else if (ipi->paddr) {
ipi->io = metal_allocate_memory(
sizeof(struct metal_io_region));
if (!ipi->io)
goto error;
metal_io_init(ipi->io, (void *)ipi->paddr,
&ipi->paddr, 0x1000,
sizeof(metal_phys_addr_t) << 3,
0,
NULL);
}
if (ipi->io) {
metal_io_write32(ipi->io, IPI_IDR_OFFSET,
ipi->ipi_chn_mask);
atomic_store(&ipi->sync, 1);
}
ipi->registered = 0;
return 0;
error:
_release(proc);
return -1;
}
static void _release(struct hil_proc *proc)
{
struct proc_intr *intr_info;
struct ipi_info *ipi;
if (!proc)
return;
intr_info = &(proc->vdev.intr_info);
ipi = (struct ipi_info *)(intr_info->data);
if (ipi) {
if (ipi->io) {
metal_io_write32(ipi->io, IPI_IDR_OFFSET,
ipi->ipi_chn_mask);
if (ipi->dev) {
metal_device_close(ipi->dev);
ipi->dev = NULL;
} else {
metal_free_memory(ipi->io);
}
ipi->io = NULL;
}
}
}

View file

@ -0,0 +1,237 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2015 Xilinx, Inc.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* zynqmp_remoteproc_r5.c
*
* DESCRIPTION
*
* This file is the Implementation of IPC hardware layer interface
* for Xilinx Zynq UltraScale+ MPSoC system.
*
**************************************************************************/
#include <errno.h>
#include <string.h>
#include <stdio.h>
#include <metal/io.h>
#include <metal/device.h>
#include <metal/utilities.h>
#include <metal/atomic.h>
#include <metal/irq.h>
#include <metal/cpu.h>
#include <metal/alloc.h>
#include <openamp/hil.h>
#include <openamp/virtqueue.h>
/* IPI REGs OFFSET */
#define IPI_TRIG_OFFSET 0x00000000 /** IPI trigger register offset */
#define IPI_OBS_OFFSET 0x00000004 /** IPI observation register offset */
#define IPI_ISR_OFFSET 0x00000010 /* IPI interrupt status register offset */
#define IPI_IMR_OFFSET 0x00000014 /* IPI interrupt mask register offset */
#define IPI_IER_OFFSET 0x00000018 /* IPI interrupt enable register offset */
#define IPI_IDR_OFFSET 0x0000001C /* IPI interrupt disable register offset */
#define _rproc_wait() metal_cpu_yield()
/* -- FIX ME: ipi info is to be defined -- */
struct ipi_info {
const char *name;
const char *bus_name;
struct metal_device *dev;
struct metal_io_region *io;
metal_phys_addr_t paddr;
uint32_t ipi_chn_mask;
atomic_int sync;
};
/*--------------------------- Declare Functions ------------------------ */
static int _enable_interrupt(struct proc_intr *intr);
static void _notify(struct hil_proc *proc, struct proc_intr *intr_info);
static int _boot_cpu(struct hil_proc *proc, unsigned int load_addr);
static void _shutdown_cpu(struct hil_proc *proc);
static int _poll(struct hil_proc *proc, int nonblock);
static int _initialize(struct hil_proc *proc);
static void _release(struct hil_proc *proc);
static struct metal_io_region* _alloc_shm(struct hil_proc *proc,
metal_phys_addr_t pa,
size_t size,
struct metal_device **dev);
static void _release_shm(struct hil_proc *proc,
struct metal_device *dev,
struct metal_io_region *io);
/*--------------------------- Globals ---------------------------------- */
struct hil_platform_ops zynqmp_a53_r5_proc_ops = {
.enable_interrupt = _enable_interrupt,
.notify = _notify,
.boot_cpu = _boot_cpu,
.shutdown_cpu = _shutdown_cpu,
.poll = _poll,
.alloc_shm = _alloc_shm,
.release_shm = _release_shm,
.initialize = _initialize,
.release = _release,
};
static int _enable_interrupt(struct proc_intr *intr)
{
(void)intr;
return 0;
}
static void _notify(struct hil_proc *proc, struct proc_intr *intr_info)
{
(void)proc;
struct ipi_info *ipi = (struct ipi_info *)(intr_info->data);
if (ipi == NULL)
return;
/* Trigger IPI */
metal_io_write32(ipi->io, IPI_TRIG_OFFSET, ipi->ipi_chn_mask);
}
static int _boot_cpu(struct hil_proc *proc, unsigned int load_addr)
{
(void)proc;
(void)load_addr;
return -1;
}
static void _shutdown_cpu(struct hil_proc *proc)
{
(void)proc;
return;
}
static int _poll(struct hil_proc *proc, int nonblock)
{
struct proc_vdev *vdev;
struct ipi_info *ipi;
struct metal_io_region *io;
vdev = &proc->vdev;
ipi = (struct ipi_info *)(vdev->intr_info.data);
io = ipi->io;
while(1) {
unsigned int ipi_intr_status =
(unsigned int)metal_io_read32(io, IPI_ISR_OFFSET);
if (ipi_intr_status & ipi->ipi_chn_mask) {
metal_io_write32(io, IPI_ISR_OFFSET,
ipi->ipi_chn_mask);
hil_notified(proc, (uint32_t)(-1));
return 0;
} else if (nonblock) {
return -EAGAIN;
}
_rproc_wait();
}
}
static struct metal_io_region* _alloc_shm(struct hil_proc *proc,
metal_phys_addr_t pa,
size_t size,
struct metal_device **dev)
{
(void)proc;
(void)pa;
(void)size;
*dev = NULL;
return NULL;
}
static void _release_shm(struct hil_proc *proc,
struct metal_device *dev,
struct metal_io_region *io)
{
(void)proc;
(void)io;
hil_close_generic_mem_dev(dev);
}
static int _initialize(struct hil_proc *proc)
{
int ret;
struct proc_intr *intr_info;
struct ipi_info *ipi;
unsigned int ipi_intr_status;
if (!proc)
return -1;
intr_info = &(proc->vdev.intr_info);
ipi = intr_info->data;
if (ipi && ipi->name && ipi->bus_name) {
ret = metal_device_open(ipi->bus_name, ipi->name,
&ipi->dev);
if (ret)
return -ENODEV;
ipi->io = metal_device_io_region(ipi->dev, 0);
intr_info->vect_id = (uintptr_t)ipi->dev->irq_info;
} else if (ipi->paddr) {
ipi->io = metal_allocate_memory(
sizeof(struct metal_io_region));
if (!ipi->io)
goto error;
metal_io_init(ipi->io, (void *)ipi->paddr,
&ipi->paddr, 0x1000,
(unsigned)(-1),
0,
NULL);
}
if (ipi->io) {
ipi_intr_status = (unsigned int)metal_io_read32(
ipi->io, IPI_ISR_OFFSET);
if (ipi_intr_status & ipi->ipi_chn_mask)
metal_io_write32(ipi->io, IPI_ISR_OFFSET,
ipi->ipi_chn_mask);
metal_io_write32(ipi->io, IPI_IDR_OFFSET,
ipi->ipi_chn_mask);
atomic_store(&ipi->sync, 1);
}
return 0;
error:
_release(proc);
return -1;
}
static void _release(struct hil_proc *proc)
{
struct proc_intr *intr_info;
struct ipi_info *ipi;
if (!proc)
return;
intr_info = &(proc->vdev.intr_info);
ipi = (struct ipi_info *)(intr_info->data);
if (ipi) {
if (ipi->io) {
metal_io_write32(ipi->io, IPI_IDR_OFFSET,
ipi->ipi_chn_mask);
if (ipi->dev) {
metal_device_close(ipi->dev);
ipi->dev = NULL;
} else {
metal_free_memory(ipi->io);
}
ipi->io = NULL;
}
}
}

View file

@ -0,0 +1,662 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include <metal/alloc.h>
#include <openamp/elf_loader.h>
/* Local functions. */
static int elf_loader_get_needed_sections(struct elf_decode_info *elf_info);
static int elf_loader_relocs_specific(struct elf_decode_info *elf_info,
Elf32_Shdr * section);
static void *elf_loader_get_entry_point_address(struct elf_decode_info
*elf_info);
static int elf_loader_relocate_link(struct elf_decode_info *elf_info);
static int elf_loader_seek_and_read(void *firmware, void *destination,
Elf32_Off offset, Elf32_Word size);
static int elf_loader_read_headers(void *firmware,
struct elf_decode_info *elf_info);
static int elf_loader_load_sections(void *firmware,
struct elf_decode_info *elf_info);
static int elf_loader_get_decode_info(void *firmware,
struct elf_decode_info *elf_info);
static int elf_loader_reloc_entry(struct elf_decode_info *elf_info,
Elf32_Rel * rel_entry);
static Elf32_Addr elf_loader_get_dynamic_symbol_addr(struct elf_decode_info
*elf_info, int index);
/**
* elf_loader_init
*
* Initializes ELF loader.
*
* @param loader - pointer to remoteproc loader
*
* @return - 0 if success, error otherwise
*/
int elf_loader_init(struct remoteproc_loader *loader)
{
/* Initialize loader function table */
loader->load_firmware = elf_loader_load_remote_firmware;
loader->retrieve_entry = elf_loader_retrieve_entry_point;
loader->retrieve_rsc = elf_loader_retrieve_resource_section;
loader->attach_firmware = elf_loader_attach_firmware;
loader->detach_firmware = elf_loader_detach_firmware;
loader->retrieve_load_addr = elf_get_load_address;
return RPROC_SUCCESS;
}
/**
* elf_loader_attach_firmware
*
* Attaches an ELF firmware to the loader
*
* @param loader - pointer to remoteproc loader
* @param firmware - pointer to the firmware start location
*
* @return - 0 if success, error otherwise
*/
int elf_loader_attach_firmware(struct remoteproc_loader *loader, void *firmware)
{
struct elf_decode_info *elf_info;
int status;
/* Allocate memory for decode info structure. */
elf_info = metal_allocate_memory(sizeof(struct elf_decode_info));
if (!elf_info) {
return RPROC_ERR_NO_MEM;
}
/* Clear the ELF decode struct. */
memset(elf_info, 0, sizeof(struct elf_decode_info));
/* Get the essential information to decode the ELF. */
status = elf_loader_get_decode_info(firmware, elf_info);
if (status) {
/* Free memory. */
metal_free_memory(elf_info);
return status;
}
elf_info->firmware = firmware;
loader->fw_decode_info = elf_info;
return status;
}
/**
* elf_loader_detach_firmware
*
* Detaches ELF firmware from the loader
*
* @param loader - pointer to remoteproc loader
*
* @return - 0 if success, error otherwise
*/
int elf_loader_detach_firmware(struct remoteproc_loader *loader)
{
struct elf_decode_info *elf_info =
(struct elf_decode_info *)loader->fw_decode_info;
if (elf_info) {
/* Free memory. */
metal_free_memory(elf_info->shstrtab);
metal_free_memory(elf_info->section_headers_start);
metal_free_memory(elf_info);
}
return RPROC_SUCCESS;
}
/**
* elf_loader_retrieve_entry_point
*
* Retrieves the ELF entrypoint.
*
* @param loader - pointer to remoteproc loader
*
* @return - entrypoint
*/
void *elf_loader_retrieve_entry_point(struct remoteproc_loader *loader)
{
return elf_loader_get_entry_point_address((struct elf_decode_info *)
loader->fw_decode_info);
}
/**
* elf_loader_retrieve_resource_section
*
* Retrieves the resource section.
*
* @param loader - pointer to remoteproc loader
* @param size - pointer to contain the size of the section
*
* @return - pointer to resource section
*/
void *elf_loader_retrieve_resource_section(struct remoteproc_loader *loader,
unsigned int *size)
{
Elf32_Shdr *rsc_header;
void *resource_section = NULL;
struct elf_decode_info *elf_info =
(struct elf_decode_info *)loader->fw_decode_info;
if (elf_info->rsc) {
/* Retrieve resource section header. */
rsc_header = elf_info->rsc;
/* Retrieve resource section size. */
*size = rsc_header->sh_size;
/* Locate the start of resource section. */
resource_section = (void *)((uintptr_t)elf_info->firmware
+ rsc_header->sh_offset);
}
/* Return the address of resource section. */
return resource_section;
}
/**
* elf_loader_load_remote_firmware
*
* Loads the ELF firmware.
*
* @param loader - pointer to remoteproc loader
*
* @return - 0 if success, error otherwise
*/
int elf_loader_load_remote_firmware(struct remoteproc_loader *loader)
{
struct elf_decode_info *elf_info =
(struct elf_decode_info *)loader->fw_decode_info;
int status;
/* Load ELF sections. */
status = elf_loader_load_sections(elf_info->firmware, elf_info);
if (!status) {
/* Perform dynamic relocations if needed. */
status = elf_loader_relocate_link(elf_info);
}
return status;
}
/**
* elf_get_load_address
*
* Provides firmware load address.
*
* @param loader - pointer to remoteproc loader
*
* @return - load address pointer
*/
void *elf_get_load_address(struct remoteproc_loader *loader)
{
struct elf_decode_info *elf_info =
(struct elf_decode_info *)loader->fw_decode_info;
int status = 0;
Elf32_Shdr *current = (Elf32_Shdr *) (elf_info->section_headers_start);
/* Traverse all sections except the reserved null section. */
int section_count = elf_info->elf_header.e_shnum - 1;
while ((section_count > 0) && (status == 0)) {
/* Compute the pointer to section header. */
current = (Elf32_Shdr *) (((unsigned char *)current)
+ elf_info->elf_header.e_shentsize);
/* Get the name of current section. */
char *current_name = elf_info->shstrtab + current->sh_name;
if (!strcmp(current_name, ".text")) {
return ((void *)(current->sh_addr));
}
/* Move to the next section. */
section_count--;
}
return (RPROC_ERR_PTR);
}
/**
* elf_loader_get_needed_sections
*
* Retrieves the sections we need during the load and link from the
* section headers list.
*
* @param elf_info - ELF object decode info container.
*
* @return- Pointer to the ELF section header.
*/
static int elf_loader_get_needed_sections(struct elf_decode_info *elf_info)
{
Elf32_Shdr *current = (Elf32_Shdr *) (elf_info->section_headers_start);
/* We are interested in the following sections:
.dynsym
.dynstr
.rel.plt
.rel.dyn
*/
int sections_to_find = 5;
/* Search for sections but skip the reserved null section. */
int section_count = elf_info->elf_header.e_shnum - 1;
while ((section_count > 0) && (sections_to_find > 0)) {
/* Compute the section header pointer. */
current = (Elf32_Shdr *) (((unsigned char *)current)
+ elf_info->elf_header.e_shentsize);
/* Get the name of current section. */
char *current_name = elf_info->shstrtab + current->sh_name;
/* Proceed if the section is allocatable and is not executable. */
if ((current->sh_flags & SHF_ALLOC)
&& !(current->sh_flags & SHF_EXECINSTR)) {
/* Check for '.dynsym' or '.dynstr' or '.rel.plt' or '.rel.dyn'. */
if (*current_name == '.') {
current_name++;
/* Check for '.dynsym' or 'dynstr'. */
if (*current_name == 'd') {
current_name++;
/* Check for '.dynsym'. */
if (strncmp(current_name, "ynsym", 5) == 0) {
elf_info->dynsym = current;
sections_to_find--;
}
/* Check for '.dynstr'. */
else if (strncmp(current_name, "ynstr", 5) == 0) {
elf_info->dynstr = current;
sections_to_find--;
}
}
/* Check for '.rel.plt' or '.rel.dyn'. */
else if (*current_name == 'r') {
current_name++;
/* Check for '.rel.plt'. */
if (strncmp(current_name, "el.plt", 6) == 0) {
elf_info->rel_plt = current;
sections_to_find--;
}
/* Check for '.rel.dyn'. */
else if (strncmp(current_name, "el.dyn", 6) == 0) {
elf_info->rel_dyn = current;
sections_to_find--;
}
/* Check for '.resource_table'. */
else if (strncmp(current_name, "esource_table", 13)
== 0) {
elf_info->rsc = current;
sections_to_find--;
}
}
}
}
/* Move to the next section. */
section_count--;
}
/* Return remaining sections section. */
return (sections_to_find);
}
/**
* elf_loader_relocs_specific
*
* Processes the relocations contained in the specified section.
*
* @param elf_info - elf decoding information.
* @param section - header of the specified relocation section.
*
* @return - 0 if success, error otherwise
*/
static int elf_loader_relocs_specific(struct elf_decode_info *elf_info,
Elf32_Shdr * section)
{
unsigned char *section_load_addr = (unsigned char *)section->sh_addr;
int status = 0;
unsigned int i;
/* Check the section type. */
if (section->sh_type == SHT_REL) {
/* Traverse the list of relocation entries contained in the section. */
for (i = 0; (i < section->sh_size) && (status == 0);
i += section->sh_entsize) {
/* Compute the relocation entry address. */
Elf32_Rel *rel_entry =
(Elf32_Rel *) (section_load_addr + i);
/* Process the relocation entry. */
status = elf_loader_reloc_entry(elf_info, rel_entry);
}
}
/* Return status to caller. */
return (status);
}
/**
* elf_loader_get_entry_point_address
*
* Retrieves the entry point address from the specified ELF object.
*
* @param elf_info - elf object decode info container.
* @param runtime_buffer - buffer containing ELF sections which are
* part of runtime.
*
* @return - entry point address of the specified ELF object.
*/
static void *elf_loader_get_entry_point_address(struct elf_decode_info
*elf_info)
{
return ((void *)elf_info->elf_header.e_entry);
}
/**
* elf_loader_relocate_link
*
* Relocates and links the given ELF object.
*
* @param elf_info - elf object decode info container.
*
* @return - 0 if success, error otherwise
*/
static int elf_loader_relocate_link(struct elf_decode_info *elf_info)
{
int status = 0;
/* Check of .rel.dyn section exists in the ELF. */
if (elf_info->rel_dyn) {
/* Relocate and link .rel.dyn section. */
status =
elf_loader_relocs_specific(elf_info, elf_info->rel_dyn);
}
/* Proceed to check if .rel.plt section exists, if no error encountered yet. */
if (status == 0 && elf_info->rel_plt) {
/* Relocate and link .rel.plt section. */
status =
elf_loader_relocs_specific(elf_info, elf_info->rel_plt);
}
/* Return status to caller */
return (status);
}
/**
* elf_loader_seek_and_read
*
* Seeks to the specified offset in the given file and reads the data
* into the specified destination location.
*
* @param firmware - firmware to read from.
* @param destination - Location into which the data should be read.
* @param offset - Offset to seek in the file.
* @param size - Size of the data to read.
*
* @return - 0 if success, error otherwise
*/
static int elf_loader_seek_and_read(void *firmware, void *destination,
Elf32_Off offset, Elf32_Word size)
{
char *src = (char *)firmware;
/* Seek to the specified offset. */
src = src + offset;
/* Read the data. */
memcpy((char *)destination, src, size);
/* Return status to caller. */
return (0);
}
/**
* elf_loader_read_headers
*
* Reads the ELF headers (ELF header, section headers and the section
* headers string table) essential to access further information from
* the file containing the ELF object.
*
* @param firmware - firmware to read from.
* @param elf_info - ELF object decode info container.
*
* @return - 0 if success, error otherwise
*/
static int elf_loader_read_headers(void *firmware,
struct elf_decode_info *elf_info)
{
int status = 0;
unsigned int section_count;
/* Read the ELF header. */
status = elf_loader_seek_and_read(firmware, &(elf_info->elf_header), 0,
sizeof(Elf32_Ehdr));
/* Ensure the read was successful. */
if (!status) {
/* Get section count from the ELF header. */
section_count = elf_info->elf_header.e_shnum;
/* Allocate memory to read in the section headers. */
elf_info->section_headers_start = metal_allocate_memory(section_count * elf_info->elf_header.e_shentsize);
/* Check if the allocation was successful. */
if (elf_info->section_headers_start) {
/* Read the section headers list. */
status = elf_loader_seek_and_read(firmware,
elf_info->
section_headers_start,
elf_info->elf_header.
e_shoff,
section_count *
elf_info->elf_header.
e_shentsize);
/* Ensure the read was successful. */
if (!status) {
/* Compute the pointer to section header string table section. */
Elf32_Shdr *section_header_string_table =
(Elf32_Shdr *) (elf_info->
section_headers_start +
elf_info->elf_header.
e_shstrndx *
elf_info->elf_header.
e_shentsize);
/* Allocate the memory for section header string table. */
elf_info->shstrtab = metal_allocate_memory(section_header_string_table->sh_size);
/* Ensure the allocation was successful. */
if (elf_info->shstrtab) {
/* Read the section headers string table. */
status =
elf_loader_seek_and_read(firmware,
elf_info->
shstrtab,
section_header_string_table->
sh_offset,
section_header_string_table->
sh_size);
}
}
}
}
/* Return status to caller. */
return (status);
}
/**
* elf_loader_file_read_sections
*
* Reads the ELF section contents from the specified file containing
* the ELF object.
*
* @param firmware - firmware to read from.
* @param elf_info - ELF object decode info container.
*
* @return - 0 if success, error otherwise
*/
static int elf_loader_load_sections(void *firmware,
struct elf_decode_info *elf_info)
{
int status = 0;
Elf32_Shdr *current = (Elf32_Shdr *) (elf_info->section_headers_start);
/* Traverse all sections except the reserved null section. */
int section_count = elf_info->elf_header.e_shnum - 1;
while ((section_count > 0) && (status == 0)) {
/* Compute the pointer to section header. */
current = (Elf32_Shdr *) (((unsigned char *)current)
+ elf_info->elf_header.e_shentsize);
/* Make sure the section can be allocated and is not empty. */
if ((current->sh_flags & SHF_ALLOC) && (current->sh_size)) {
char *destination = NULL;
/* Check if the section is part of runtime and is not section with
* no-load attributes such as BSS or heap. */
if ((current->sh_type & SHT_NOBITS) == 0) {
/* Compute the destination address where the section should
* be copied. */
destination = (char *)(current->sh_addr);
status =
elf_loader_seek_and_read(firmware,
destination,
current->sh_offset,
current->sh_size);
}
}
/* Move to the next section. */
section_count--;
}
/* Return status to caller. */
return (status);
}
/**
* elf_loader_get_decode_info
*
* Retrieves the information necessary to decode the ELF object for
* loading, relocating and linking.
*
* @param firmware - firmware to read from.
* @param elf_info - ELF object decode info container.
*
* @return - 0 if success, error otherwise
*/
static int elf_loader_get_decode_info(void *firmware,
struct elf_decode_info *elf_info)
{
int status;
/* Read the ELF headers (ELF header and section headers including
* the section header string table). */
status = elf_loader_read_headers(firmware, elf_info);
/* Ensure that ELF headers were read successfully. */
if (!status) {
/* Retrieve the sections required for load. */
elf_loader_get_needed_sections(elf_info);
}
/* Return status to caller. */
return (status);
}
/**
* elf_loader_get_dynamic_symbol_addr
*
* Retrieves the (relocatable) address of the symbol specified as
* index from the given ELF object.
*
* @param elf_info - ELF object decode info container.
* @param index - Index of the desired symbol in the dynamic symbol table.
*
* @return - Address of the specified symbol.
*/
static Elf32_Addr elf_loader_get_dynamic_symbol_addr(struct elf_decode_info
*elf_info, int index)
{
Elf32_Sym *symbol_entry = (Elf32_Sym *) (elf_info->dynsym_addr
+
index *
elf_info->dynsym->sh_entsize);
/* Return the symbol address. */
return (symbol_entry->st_value);
}
/**
* elf_loader_reloc_entry
*
* Processes the specified relocation entry. It handles the relocation
* and linking both cases.
*
*
* @param elf_info - ELF object decode info container.
*
* @return - 0 if success, error otherwise
*/
static int elf_loader_reloc_entry(struct elf_decode_info *elf_info,
Elf32_Rel * rel_entry)
{
unsigned char rel_type = ELF32_R_TYPE(rel_entry->r_info);
int status = 0;
switch (rel_type) {
case R_ARM_ABS32: /* 0x02 */
{
Elf32_Addr sym_addr =
elf_loader_get_dynamic_symbol_addr(elf_info,
ELF32_R_SYM
(rel_entry->
r_info));
if (sym_addr) {
*((unsigned int *)(rel_entry->r_offset)) =
(unsigned int)sym_addr;
break;
}
}
break;
default:
break;
}
return status;
}

View file

@ -0,0 +1,335 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2015 Xilinx, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include <openamp/remoteproc.h>
#include <openamp/remoteproc_loader.h>
#include <openamp/rsc_table_parser.h>
#include <openamp/hil.h>
#include <metal/sys.h>
#include <metal/alloc.h>
#include <metal/sleep.h>
/**
* remoteproc_resource_init
*
* Initializes resources for remoteproc remote configuration. Only
* remoteproc remote applications are allowed to call this function.
*
* @param rsc_info - pointer to resource table info control
* block
* @param proc - pointer to the hil_proc
* @param channel_created - callback function for channel creation
* @param channel_destroyed - callback function for channel deletion
* @param default_cb - default callback for channel I/O
* @param rproc_handle - pointer to new remoteproc instance
* @param rpmsg_role - 1 for rpmsg master, or 0 for rpmsg slave
*
* @param returns - status of function execution
*
*/
int remoteproc_resource_init(struct rsc_table_info *rsc_info,
struct hil_proc *proc,
rpmsg_chnl_cb_t channel_created,
rpmsg_chnl_cb_t channel_destroyed,
rpmsg_rx_cb_t default_cb,
struct remote_proc **rproc_handle,
int rpmsg_role)
{
struct remote_proc *rproc;
int status;
int remote_rpmsg_role;
if (!rsc_info || !proc) {
return RPROC_ERR_PARAM;
}
rproc = metal_allocate_memory(sizeof(struct remote_proc));
if (rproc) {
memset(rproc, 0x00, sizeof(struct remote_proc));
/* There can be only one master for remote configuration so use the
* rsvd cpu id for creating hil proc */
rproc->proc = proc;
status = hil_init_proc(proc);
if (!status) {
/* Parse resource table */
status =
handle_rsc_table(rproc, rsc_info->rsc_tab,
rsc_info->size);
if (status == RPROC_SUCCESS) {
/* Initialize RPMSG "messaging" component */
*rproc_handle = rproc;
remote_rpmsg_role = (rpmsg_role == RPMSG_MASTER?
RPMSG_REMOTE : RPMSG_MASTER);
status =
rpmsg_init(proc,
&rproc->rdev, channel_created,
channel_destroyed, default_cb,
remote_rpmsg_role);
} else {
status = RPROC_ERR_NO_RSC_TABLE;
}
} else {
status = RPROC_ERR_CPU_INIT;
}
} else {
status = RPROC_ERR_NO_MEM;
}
/* Cleanup in case of error */
if (status != RPROC_SUCCESS) {
*rproc_handle = 0;
(void)remoteproc_resource_deinit(rproc);
return status;
}
return status;
}
/**
* remoteproc_resource_deinit
*
* Uninitializes resources for remoteproc "remote" configuration.
*
* @param rproc - pointer to rproc instance
*
* @param returns - status of function execution
*
*/
int remoteproc_resource_deinit(struct remote_proc *rproc)
{
if (rproc) {
if (rproc->rdev) {
rpmsg_deinit(rproc->rdev);
}
if (rproc->proc) {
hil_delete_proc(rproc->proc);
rproc->proc = NULL;
}
metal_free_memory(rproc);
}
return RPROC_SUCCESS;
}
/**
* remoteproc_init
*
* Initializes resources for remoteproc master configuration. Only
* remoteproc master applications are allowed to call this function.
*
* @param fw_name - name of frimware
* @param proc - pointer to hil_proc
* @param channel_created - callback function for channel creation
* @param channel_destroyed - callback function for channel deletion
* @param default_cb - default callback for channel I/O
* @param rproc_handle - pointer to new remoteproc instance
*
* @param returns - status of function execution
*
*/
int remoteproc_init(char *fw_name, struct hil_proc *proc,
rpmsg_chnl_cb_t channel_created,
rpmsg_chnl_cb_t channel_destroyed, rpmsg_rx_cb_t default_cb,
struct remote_proc **rproc_handle)
{
struct remote_proc *rproc;
struct resource_table *rsc_table;
unsigned int fw_size, rsc_size;
uintptr_t fw_addr;
int status;
if (!fw_name) {
return RPROC_ERR_PARAM;
}
rproc = metal_allocate_memory(sizeof(struct remote_proc));
if (rproc) {
memset((void *)rproc, 0x00, sizeof(struct remote_proc));
/* Create proc instance */
rproc->proc = proc;
status = hil_init_proc(proc);
if (!status) {
/* Retrieve firmware attributes */
status =
hil_get_firmware(fw_name, &fw_addr,
&fw_size);
if (!status) {
/* Initialize ELF loader - currently only ELF format is supported */
rproc->loader =
remoteproc_loader_init(ELF_LOADER);
if (rproc->loader) {
/* Attach the given firmware with the ELF parser/loader */
status =
remoteproc_loader_attach_firmware
(rproc->loader,
(void *)fw_addr);
} else {
status = RPROC_ERR_LOADER;
}
}
} else {
status = RPROC_ERR_CPU_INIT;
}
} else {
status = RPROC_ERR_NO_MEM;
}
if (!status) {
rproc->role = RPROC_MASTER;
/* Get resource table from firmware */
rsc_table =
remoteproc_loader_retrieve_resource_section(rproc->loader,
&rsc_size);
if (rsc_table) {
/* Parse resource table */
status = handle_rsc_table(rproc, rsc_table, rsc_size);
} else {
status = RPROC_ERR_NO_RSC_TABLE;
}
}
/* Cleanup in case of error */
if (status != RPROC_SUCCESS) {
(void)remoteproc_deinit(rproc);
return status;
}
rproc->channel_created = channel_created;
rproc->channel_destroyed = channel_destroyed;
rproc->default_cb = default_cb;
*rproc_handle = rproc;
return status;
}
/**
* remoteproc_deinit
*
* Uninitializes resources for remoteproc "master" configuration.
*
* @param rproc - pointer to remote proc instance
*
* @param returns - status of function execution
*
*/
int remoteproc_deinit(struct remote_proc *rproc)
{
if (rproc) {
if (rproc->loader) {
(void)remoteproc_loader_delete(rproc->loader);
rproc->loader = RPROC_NULL;
}
if (rproc->proc) {
hil_delete_proc(rproc->proc);
rproc->proc = RPROC_NULL;
}
metal_free_memory(rproc);
}
return RPROC_SUCCESS;
}
/**
* remoteproc_boot
*
* This function loads the image on the remote processor and starts
* its execution from image load address.
*
* @param rproc - pointer to remoteproc instance to boot
*
* @param returns - status of function execution
*/
int remoteproc_boot(struct remote_proc *rproc)
{
void *load_addr;
int status;
if (!rproc) {
return RPROC_ERR_PARAM;
}
/* Stop the remote CPU */
hil_shutdown_cpu(rproc->proc);
/* Load the firmware */
status = remoteproc_loader_load_remote_firmware(rproc->loader);
if (status == RPROC_SUCCESS) {
load_addr = remoteproc_get_load_address(rproc->loader);
if (load_addr != RPROC_ERR_PTR) {
/* Start the remote cpu */
status = hil_boot_cpu(rproc->proc,
(uintptr_t)load_addr);
if (status == RPROC_SUCCESS) {
/* Wait for remote side to come up. This delay is arbitrary and may
* need adjustment for different configuration of remote systems */
metal_sleep_usec(RPROC_BOOT_DELAY);
/* Initialize RPMSG "messaging" component */
/* It is a work-around to work with remote Linux context.
Since the upstream Linux rpmsg implementation always
assumes itself to be an rpmsg master, we initialize
the remote device as an rpmsg master for remote Linux
configuration only. */
#if defined (OPENAMP_REMOTE_LINUX_ENABLE)
status =
rpmsg_init(rproc->proc,
&rproc->rdev,
rproc->channel_created,
rproc->channel_destroyed,
rproc->default_cb, RPMSG_MASTER);
#else
status =
rpmsg_init(rproc->proc,
&rproc->rdev,
rproc->channel_created,
rproc->channel_destroyed,
rproc->default_cb, RPMSG_REMOTE);
#endif
}
} else {
status = RPROC_ERR_LOADER;
}
} else {
status = RPROC_ERR_LOADER;
}
return status;
}
/**
* remoteproc_shutdown
*
* This function shutdowns the remote execution context
*
* @param rproc - pointer to remote proc instance to shutdown
*
* @param returns - status of function execution
*/
int remoteproc_shutdown(struct remote_proc *rproc)
{
if (rproc) {
if (rproc->proc) {
hil_shutdown_cpu(rproc->proc);
}
if (rproc->rdev) {
rpmsg_deinit(rproc->rdev);
rproc->rdev = RPROC_NULL;
}
}
return RPROC_SUCCESS;
}

View file

@ -0,0 +1,229 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include <metal/alloc.h>
#include <openamp/remoteproc_loader.h>
/**
* remoteproc_loader_init
*
* Initializes the remoteproc loader.
*
* @param type - loader type
*
* @return - remoteproc_loader
*/
struct remoteproc_loader *remoteproc_loader_init(enum loader_type type)
{
struct remoteproc_loader *loader;
/* Check for valid loader type. */
if (type >= LAST_LOADER) {
return RPROC_NULL;
}
/* Allocate a loader handle. */
loader = metal_allocate_memory(sizeof(struct remoteproc_loader));
if (!loader) {
return RPROC_NULL;
}
/* Clear loader handle. */
memset(loader, 0, sizeof(struct remoteproc_loader));
/* Save loader type. */
loader->type = type;
switch (type) {
case ELF_LOADER:
elf_loader_init(loader);
break;
default:
/* Loader not supported. */
metal_free_memory(loader);
loader = RPROC_NULL;
break;
}
return loader;
}
/**
* remoteproc_loader_delete
*
* Deletes the remoteproc loader.
*
* @param loader - pointer to remoteproc loader
*
* @return - 0 if success, error otherwise
*/
int remoteproc_loader_delete(struct remoteproc_loader *loader)
{
int status = 0;
if (!loader) {
return RPROC_ERR_PARAM;
}
/* Check if a firmware is attached. */
if (loader->remote_firmware) {
/* Detach firmware first. */
status = loader->detach_firmware(loader);
}
/* Recover the allocated memory. */
metal_free_memory(loader);
return status;
}
/**
* remoteproc_loader_attach_firmware
*
* Attaches an ELF firmware to the loader
*
* @param loader - pointer to remoteproc loader
* @param firmware - pointer to the firmware start location
*
* @return - 0 if success, error otherwise
*/
int remoteproc_loader_attach_firmware(struct remoteproc_loader *loader,
void *firmware_image)
{
int status = RPROC_SUCCESS;
if (!loader || !firmware_image) {
return RPROC_ERR_PARAM;
}
if (loader->attach_firmware) {
/* Check if a firmware is already attached. */
if (loader->remote_firmware) {
/* Detach firmware first. */
status = loader->detach_firmware(loader);
}
/* Attach firmware. */
if (!status) {
status =
loader->attach_firmware(loader, firmware_image);
/* Save firmware address. */
if (!status) {
loader->remote_firmware = firmware_image;
}
}
} else {
status = RPROC_ERR_LOADER;
}
return status;
}
/**
* remoteproc_loader_retrieve_entry_point
*
* Provides entry point address.
*
* @param loader - pointer to remoteproc loader
*
* @return - entrypoint
*/
void *remoteproc_loader_retrieve_entry_point(struct remoteproc_loader *loader)
{
if (!loader) {
return RPROC_NULL;
}
if (loader->retrieve_entry) {
return loader->retrieve_entry(loader);
} else {
return RPROC_NULL;
}
}
/**
* remoteproc_loader_retrieve_resource_section
*
* Provides resource section address.
*
* @param loader - pointer to remoteproc loader
* @param size - pointer to hold size of resource section
*
* @return - pointer to resource section
*/
void *remoteproc_loader_retrieve_resource_section(struct remoteproc_loader
*loader, unsigned int *size)
{
if (!loader) {
return RPROC_NULL;
}
if (loader->retrieve_rsc) {
return loader->retrieve_rsc(loader, size);
} else {
return RPROC_NULL;
}
}
/**
* remoteproc_loader_load_remote_firmware
*
* Loads the firmware in memory
*
* @param loader - pointer to remoteproc loader
*
* @return - 0 if success, error otherwise
*/
int remoteproc_loader_load_remote_firmware(struct remoteproc_loader *loader)
{
if (!loader) {
return RPROC_ERR_PARAM;
}
if (loader->load_firmware) {
return loader->load_firmware(loader);
} else {
return RPROC_ERR_LOADER;
}
}
/**
* remoteproc_get_load_address
*
* Provides firmware load address.
*
* @param loader - pointer to remoteproc loader
*
* @return - load address pointer
*/
void *remoteproc_get_load_address(struct remoteproc_loader *loader)
{
if (!loader) {
return RPROC_ERR_PTR;
}
if (loader->retrieve_load_addr) {
return loader->retrieve_load_addr(loader);
} else {
return RPROC_ERR_PTR;
}
}

View file

@ -0,0 +1,266 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <openamp/rsc_table_parser.h>
#include <metal/io.h>
/* Resources handler */
rsc_handler rsc_handler_table[] = {
handle_carve_out_rsc,
handle_dev_mem_rsc,
handle_trace_rsc,
handle_vdev_rsc,
handle_rproc_mem_rsc,
handle_fw_chksum_rsc,
handle_mmu_rsc
};
/**
* handle_rsc_table
*
* This function parses resource table.
*
* @param rproc - pointer to remote remote_proc
* @param rsc_table - resource table to parse
* @param size - size of rsc table
*
* @returns - execution status
*
*/
int handle_rsc_table(struct remote_proc *rproc,
struct resource_table *rsc_table, int size)
{
unsigned char *rsc_start;
unsigned int *rsc_offset;
unsigned int rsc_type;
unsigned int idx;
int status = 0;
/* Validate rsc table header fields */
/* Minimum rsc table size */
if (sizeof(struct resource_table) > (unsigned int)size) {
return (RPROC_ERR_RSC_TAB_TRUNC);
}
/* Supported version */
if (rsc_table->ver != RSC_TAB_SUPPORTED_VERSION) {
return (RPROC_ERR_RSC_TAB_VER);
}
/* Offset array */
if (sizeof(struct resource_table)
+ rsc_table->num * sizeof(rsc_table->offset[0]) > (unsigned int)size) {
return (RPROC_ERR_RSC_TAB_TRUNC);
}
/* Reserved fields - must be zero */
if ((rsc_table->reserved[0] != 0 || rsc_table->reserved[1]) != 0) {
return RPROC_ERR_RSC_TAB_RSVD;
}
rsc_start = (unsigned char *)rsc_table;
/* FIX ME: need a clearer solution to set the I/O region for
* resource table */
status = hil_set_rsc(rproc->proc, NULL, NULL,
(metal_phys_addr_t)((uintptr_t)rsc_table), size);
if (status)
return status;
/* Loop through the offset array and parse each resource entry */
for (idx = 0; idx < rsc_table->num; idx++) {
rsc_offset =
(unsigned int *)(rsc_start + rsc_table->offset[idx]);
rsc_type = *rsc_offset;
status =
rsc_handler_table[rsc_type] (rproc, (void *)rsc_offset);
if (status != RPROC_SUCCESS) {
break;
}
}
return status;
}
/**
* handle_carve_out_rsc
*
* Carveout resource handler.
*
* @param rproc - pointer to remote remote_proc
* @param rsc - pointer to carveout resource
*
* @returns - execution status
*
*/
int handle_carve_out_rsc(struct remote_proc *rproc, void *rsc)
{
struct fw_rsc_carveout *carve_rsc = (struct fw_rsc_carveout *)rsc;
/* Validate resource fields */
if (!carve_rsc) {
return RPROC_ERR_RSC_TAB_NP;
}
if (carve_rsc->reserved) {
return RPROC_ERR_RSC_TAB_RSVD;
}
if (rproc->role == RPROC_MASTER) {
/* FIX ME: TO DO */
return RPROC_SUCCESS;
}
return RPROC_SUCCESS;
}
/**
* handle_trace_rsc
*
* Trace resource handler.
*
* @param rproc - pointer to remote remote_proc
* @param rsc - pointer to trace resource
*
* @returns - execution status
*
*/
int handle_trace_rsc(struct remote_proc *rproc, void *rsc)
{
(void)rproc;
(void)rsc;
return RPROC_ERR_RSC_TAB_NS;
}
/**
* handle_dev_mem_rsc
*
* Device memory resource handler.
*
* @param rproc - pointer to remote remote_proc
* @param rsc - pointer to device memory resource
*
* @returns - execution status
*
*/
int handle_dev_mem_rsc(struct remote_proc *rproc, void *rsc)
{
(void)rproc;
(void)rsc;
return RPROC_ERR_RSC_TAB_NS;
}
/**
* handle_vdev_rsc
*
* Virtio device resource handler
*
* @param rproc - pointer to remote remote_proc
* @param rsc - pointer to virtio device resource
*
* @returns - execution status
*
*/
int handle_vdev_rsc(struct remote_proc *rproc, void *rsc)
{
struct fw_rsc_vdev *vdev_rsc = (struct fw_rsc_vdev *)rsc;
struct proc_vdev *vdev;
if (!vdev_rsc) {
return RPROC_ERR_RSC_TAB_NP;
}
/* Maximum supported vrings per Virtio device */
if (vdev_rsc->num_of_vrings > RSC_TAB_MAX_VRINGS) {
return RPROC_ERR_RSC_TAB_VDEV_NRINGS;
}
/* Reserved fields - must be zero */
if (vdev_rsc->reserved[0] || vdev_rsc->reserved[1]) {
return RPROC_ERR_RSC_TAB_RSVD;
}
/* Get the Virtio device from HIL proc */
vdev = hil_get_vdev_info(rproc->proc);
/* Initialize HIL Virtio device resources */
vdev->num_vrings = vdev_rsc->num_of_vrings;
vdev->dfeatures = vdev_rsc->dfeatures;
vdev->gfeatures = vdev_rsc->gfeatures;
vdev->vdev_info = vdev_rsc;
return RPROC_SUCCESS;
}
/**
* handle_rproc_mem_rsc
*
* This function parses rproc_mem resource.
* This is the resource for the remote processor
* to tell the host the memory can be used as
* shared memory.
*
* @param rproc - pointer to remote remote_proc
* @param rsc - pointer to mmu resource
*
* @returns - execution status
*
*/
int handle_rproc_mem_rsc(struct remote_proc *rproc, void *rsc)
{
(void)rproc;
(void)rsc;
/* TODO: the firmware side should handle this resource properly
* when it is the master or when it is the remote. */
return RPROC_SUCCESS;
}
/*
* handle_fw_chksum_rsc
*
* This function parses firmware checksum resource.
*
* @param rproc - pointer to remote remote_proc
* @param rsc - pointer to mmu resource
*
* @returns - execution status
*
*/
int handle_fw_chksum_rsc(struct remote_proc *rproc, void *rsc)
{
(void)rproc;
(void)rsc;
/* TODO: the firmware side should handle this resource properly
* when it is the master or when it is the remote. */
return RPROC_SUCCESS;
}
/**
* handle_mmu_rsc
*
* This function parses mmu resource , requested by the peripheral.
*
* @param rproc - pointer to remote remote_proc
* @param rsc - pointer to mmu resource
*
* @returns - execution status
*
*/
int handle_mmu_rsc(struct remote_proc *rproc, void *rsc)
{
(void)rproc;
(void)rsc;
return RPROC_ERR_RSC_TAB_NS;
}

View file

@ -0,0 +1,3 @@
collect (PROJECT_LIB_SOURCES remote_device.c)
collect (PROJECT_LIB_SOURCES rpmsg.c)
collect (PROJECT_LIB_SOURCES rpmsg_core.c)

View file

@ -0,0 +1,563 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2015 Xilinx, Inc. All rights reserved.
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* remote_device.c
*
* COMPONENT
*
* OpenAMP Stack
*
* DESCRIPTION
*
* This file provides services to manage the remote devices.It also implements
* the interface defined by the virtio and provides few other utility functions.
*
*
**************************************************************************/
#include <string.h>
#include <openamp/rpmsg.h>
#include <openamp/remoteproc.h>
#include <metal/utilities.h>
#include <metal/alloc.h>
#include <metal/atomic.h>
#include <metal/cpu.h>
/* Macro to initialize vring HW info */
#define INIT_VRING_ALLOC_INFO(ring_info,vring_hw) \
(ring_info).vaddr = (vring_hw).vaddr; \
(ring_info).align = (vring_hw).align; \
(ring_info).num_descs = (vring_hw).num_descs
/* Local functions */
static int rpmsg_rdev_init_channels(struct remote_device *rdev);
/* Ops table for virtio device */
virtio_dispatch rpmsg_rdev_config_ops = {
rpmsg_rdev_create_virtqueues,
rpmsg_rdev_get_status,
rpmsg_rdev_set_status,
rpmsg_rdev_get_feature,
rpmsg_rdev_set_feature,
rpmsg_rdev_negotiate_feature,
rpmsg_rdev_read_config,
rpmsg_rdev_write_config,
rpmsg_rdev_reset
};
/**
* rpmsg_memb_match
*
* This internal function checks if the contents in two memories matches byte
* by byte. This function is needed because memcmp() or strcmp() does not
* always work across different memories.
*
* @param ptr1 - pointer to memory
* @param ptr2 - pointer to memory
* @param n - number of bytes to compare
*
* @return 0 if the contents in the two memories matches, otherwise -1.
*/
static int rpmsg_memb_match(const void *ptr1, const void *ptr2, size_t n)
{
size_t i;
const unsigned char *tmp1, *tmp2;
tmp1 = ptr1;
tmp2 = ptr2;
for (i = 0; i < n; i++, tmp1++, tmp2++) {
if (*tmp1 != *tmp2)
return -1;
}
return 0;
}
/**
* rpmsg_rdev_init
*
* This function creates and initializes the remote device. The remote device
* encapsulates virtio device.
*
* @param proc - pointer to hil_proc
* @param rdev - pointer to newly created remote device
* @param role - role of the other device, Master or Remote
* @param channel_created - callback function for channel creation
* @param channel_destroyed - callback function for channel deletion
* @param default_cb - default callback for channel
*
* @return - status of function execution
*
*/
int rpmsg_rdev_init(struct hil_proc *proc,
struct remote_device **rdev, int role,
rpmsg_chnl_cb_t channel_created,
rpmsg_chnl_cb_t channel_destroyed, rpmsg_rx_cb_t default_cb)
{
struct remote_device *rdev_loc;
struct virtio_device *virt_dev;
struct proc_shm *shm;
int status;
if (!proc)
return RPMSG_ERR_PARAM;
/* Initialize HIL data structures for given device */
if (hil_init_proc(proc))
return RPMSG_ERR_DEV_INIT;
/* Create software representation of remote processor. */
rdev_loc = (struct remote_device *)metal_allocate_memory(sizeof(struct remote_device));
if (!rdev_loc) {
return RPMSG_ERR_NO_MEM;
}
memset(rdev_loc, 0x00, sizeof(struct remote_device));
metal_mutex_init(&rdev_loc->lock);
rdev_loc->proc = proc;
rdev_loc->role = role;
rdev_loc->channel_created = channel_created;
rdev_loc->channel_destroyed = channel_destroyed;
rdev_loc->default_cb = default_cb;
/* Restrict the ept address - zero address can't be assigned */
rdev_loc->bitmap[0] = 1;
/* Initialize the virtio device */
virt_dev = &rdev_loc->virt_dev;
virt_dev->device = proc;
virt_dev->func = &rpmsg_rdev_config_ops;
if (virt_dev->func->set_features != RPMSG_NULL) {
virt_dev->func->set_features(virt_dev, proc->vdev.dfeatures);
}
if (rdev_loc->role == RPMSG_REMOTE) {
/*
* Since device is RPMSG Remote so we need to manage the
* shared buffers. Create shared memory pool to handle buffers.
*/
shm = hil_get_shm_info(proc);
rdev_loc->mem_pool =
sh_mem_create_pool(shm->start_addr, shm->size,
RPMSG_BUFFER_SIZE);
if (!rdev_loc->mem_pool) {
return RPMSG_ERR_NO_MEM;
}
}
if (!rpmsg_rdev_remote_ready(rdev_loc))
return RPMSG_ERR_DEV_INIT;
/* Initialize endpoints list */
metal_list_init(&rdev_loc->rp_endpoints);
/* Initialize channels for RPMSG Remote */
status = rpmsg_rdev_init_channels(rdev_loc);
if (status != RPMSG_SUCCESS) {
return status;
}
*rdev = rdev_loc;
return RPMSG_SUCCESS;
}
/**
* rpmsg_rdev_deinit
*
* This function un-initializes the remote device.
*
* @param rdev - pointer to remote device to deinit.
*
* @return - none
*
*/
void rpmsg_rdev_deinit(struct remote_device *rdev)
{
struct metal_list *node;
struct rpmsg_channel *rp_chnl;
struct rpmsg_endpoint *rp_ept;
while(!metal_list_is_empty(&rdev->rp_channels)) {
node = rdev->rp_channels.next;
rp_chnl = metal_container_of(node, struct rpmsg_channel, node);
if (rdev->channel_destroyed) {
rdev->channel_destroyed(rp_chnl);
}
if ((rdev->support_ns) && (rdev->role == RPMSG_MASTER)) {
rpmsg_send_ns_message(rdev, rp_chnl, RPMSG_NS_DESTROY);
}
/* Delete default endpoint for channel */
if (rp_chnl->rp_ept) {
rpmsg_destroy_ept(rp_chnl->rp_ept);
}
_rpmsg_delete_channel(rp_chnl);
}
/* Delete name service endpoint */
metal_mutex_acquire(&rdev->lock);
rp_ept = rpmsg_rdev_get_endpoint_from_addr(rdev, RPMSG_NS_EPT_ADDR);
metal_mutex_release(&rdev->lock);
if (rp_ept) {
_destroy_endpoint(rdev, rp_ept);
}
metal_mutex_acquire(&rdev->lock);
rdev->rvq = 0;
rdev->tvq = 0;
if (rdev->mem_pool) {
sh_mem_delete_pool(rdev->mem_pool);
rdev->mem_pool = 0;
}
metal_mutex_release(&rdev->lock);
hil_free_vqs(&rdev->virt_dev);
metal_mutex_deinit(&rdev->lock);
metal_free_memory(rdev);
}
/**
* rpmsg_rdev_get_chnl_from_id
*
* This function returns channel node based on channel name. It must be called
* with mutex locked.
*
* @param stack - pointer to remote device
* @param rp_chnl_id - rpmsg channel name
*
* @return - rpmsg channel
*
*/
struct rpmsg_channel *rpmsg_rdev_get_chnl_from_id(struct remote_device *rdev,
char *rp_chnl_id)
{
struct rpmsg_channel *rp_chnl;
struct metal_list *node;
metal_list_for_each(&rdev->rp_channels, node) {
rp_chnl = metal_container_of(node, struct rpmsg_channel, node);
if (!rpmsg_memb_match(rp_chnl->name, rp_chnl_id,
sizeof(rp_chnl->name))) {
return rp_chnl;
}
}
return RPMSG_NULL;
}
/**
* rpmsg_rdev_get_endpoint_from_addr
*
* This function returns endpoint node based on src address. It must be called
* with mutex locked.
*
* @param rdev - pointer remote device control block
* @param addr - src address
*
* @return - rpmsg endpoint
*
*/
struct rpmsg_endpoint *rpmsg_rdev_get_endpoint_from_addr(struct remote_device *rdev,
unsigned long addr)
{
struct rpmsg_endpoint *rp_ept;
struct metal_list *node;
metal_list_for_each(&rdev->rp_endpoints, node) {
rp_ept = metal_container_of(node,
struct rpmsg_endpoint, node);
if (rp_ept->addr == addr) {
return rp_ept;
}
}
return RPMSG_NULL;
}
/*
* rpmsg_rdev_notify
*
* This function checks whether remote device is up or not. If it is up then
* notification is sent based on device role to start IPC.
*
* @param rdev - pointer to remote device
*
* @return - status of function execution
*
*/
int rpmsg_rdev_notify(struct remote_device *rdev)
{
struct virtio_device *vdev = &rdev->virt_dev;
hil_vdev_notify(vdev);
return RPMSG_SUCCESS;
}
/**
* rpmsg_rdev_init_channels
*
* This function is only applicable to RPMSG remote. It obtains channel IDs
* from the HIL and creates RPMSG channels corresponding to each ID.
*
* @param rdev - pointer to remote device
*
* @return - status of function execution
*
*/
int rpmsg_rdev_init_channels(struct remote_device *rdev)
{
struct rpmsg_channel *rp_chnl;
struct proc_chnl *chnl_info;
int num_chnls, idx;
metal_list_init(&rdev->rp_channels);
if (rdev->role == RPMSG_MASTER) {
chnl_info = hil_get_chnl_info(rdev->proc, &num_chnls);
for (idx = 0; idx < num_chnls; idx++) {
rp_chnl =
_rpmsg_create_channel(rdev, chnl_info[idx].name,
0x00, RPMSG_NS_EPT_ADDR);
if (!rp_chnl) {
return RPMSG_ERR_NO_MEM;
}
rp_chnl->rp_ept =
rpmsg_create_ept(rp_chnl, rdev->default_cb, rdev,
RPMSG_ADDR_ANY);
if (!rp_chnl->rp_ept) {
return RPMSG_ERR_NO_MEM;
}
rp_chnl->src = rp_chnl->rp_ept->addr;
}
}
return RPMSG_SUCCESS;
}
/**
* check if the remote is ready to start RPMsg communication
*/
int rpmsg_rdev_remote_ready(struct remote_device *rdev)
{
struct virtio_device *vdev = &rdev->virt_dev;
uint8_t status;
if (rdev->role == RPMSG_MASTER) {
while (1) {
status = vdev->func->get_status(vdev);
/* Busy wait until the remote is ready */
if (status & VIRTIO_CONFIG_STATUS_NEEDS_RESET) {
rpmsg_rdev_set_status(vdev, 0);
hil_vdev_notify(vdev);
} else if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) {
return true;
}
metal_cpu_yield();
}
} else {
return true;
}
/* Never come here */
return false;
}
/**
*------------------------------------------------------------------------
* The rest of the file implements the virtio device interface as defined
* by the virtio.h file.
*------------------------------------------------------------------------
*/
int rpmsg_rdev_create_virtqueues(struct virtio_device *dev, int flags, int nvqs,
const char *names[], vq_callback * callbacks[],
struct virtqueue *vqs_[])
{
struct remote_device *rdev;
struct vring_alloc_info ring_info;
struct virtqueue *vqs[RPMSG_MAX_VQ_PER_RDEV];
struct proc_vring *vring_table;
void *buffer;
struct metal_sg sg;
int idx, num_vrings, status;
(void)flags;
(void)vqs_;
rdev = (struct remote_device *)dev;
/* Get the vring HW info for the given virtio device */
vring_table = hil_get_vring_info(&rdev->proc->vdev, &num_vrings);
if (num_vrings > nvqs) {
return RPMSG_ERR_MAX_VQ;
}
/* Create virtqueue for each vring. */
for (idx = 0; idx < num_vrings; idx++) {
INIT_VRING_ALLOC_INFO(ring_info, vring_table[idx]);
if (rdev->role == RPMSG_REMOTE) {
metal_io_block_set(vring_table[idx].io,
metal_io_virt_to_offset(vring_table[idx].io,
ring_info.vaddr),
0x00,
vring_size(vring_table[idx].num_descs,
vring_table[idx].align));
}
status =
virtqueue_create(dev, idx, (char *)names[idx], &ring_info,
callbacks[idx], hil_vring_notify,
rdev->proc->sh_buff.io,
&vqs[idx]);
if (status != RPMSG_SUCCESS) {
return status;
}
}
//FIXME - a better way to handle this , tx for master is rx for remote and vice versa.
if (rdev->role == RPMSG_MASTER) {
rdev->tvq = vqs[0];
rdev->rvq = vqs[1];
} else {
rdev->tvq = vqs[1];
rdev->rvq = vqs[0];
}
if (rdev->role == RPMSG_REMOTE) {
sg.io = rdev->proc->sh_buff.io;
sg.len = RPMSG_BUFFER_SIZE;
for (idx = 0; ((idx < rdev->rvq->vq_nentries)
&& ((unsigned)idx < rdev->mem_pool->total_buffs / 2));
idx++) {
/* Initialize TX virtqueue buffers for remote device */
buffer = sh_mem_get_buffer(rdev->mem_pool);
if (!buffer) {
return RPMSG_ERR_NO_BUFF;
}
sg.virt = buffer;
metal_io_block_set(sg.io,
metal_io_virt_to_offset(sg.io, buffer),
0x00,
RPMSG_BUFFER_SIZE);
status =
virtqueue_add_buffer(rdev->rvq, &sg, 0, 1,
buffer);
if (status != RPMSG_SUCCESS) {
return status;
}
}
}
return RPMSG_SUCCESS;
}
unsigned char rpmsg_rdev_get_status(struct virtio_device *dev)
{
struct hil_proc *proc = dev->device;
struct proc_vdev *pvdev = &proc->vdev;
struct fw_rsc_vdev *vdev_rsc = pvdev->vdev_info;
if (!vdev_rsc)
return -1;
atomic_thread_fence(memory_order_seq_cst);
return vdev_rsc->status;
}
void rpmsg_rdev_set_status(struct virtio_device *dev, unsigned char status)
{
struct hil_proc *proc = dev->device;
struct proc_vdev *pvdev = &proc->vdev;
struct fw_rsc_vdev *vdev_rsc = pvdev->vdev_info;
if (!vdev_rsc)
return;
vdev_rsc->status = status;
atomic_thread_fence(memory_order_seq_cst);
}
uint32_t rpmsg_rdev_get_feature(struct virtio_device *dev)
{
return dev->features;
}
void rpmsg_rdev_set_feature(struct virtio_device *dev, uint32_t feature)
{
dev->features |= feature;
}
uint32_t rpmsg_rdev_negotiate_feature(struct virtio_device *dev,
uint32_t features)
{
(void)dev;
(void)features;
return 0;
}
/*
* Read/write a variable amount from the device specific (ie, network)
* configuration region. This region is encoded in the same endian as
* the guest.
*/
void rpmsg_rdev_read_config(struct virtio_device *dev, uint32_t offset,
void *dst, int length)
{
(void)dev;
(void)offset;
(void)dst;
(void)length;
return;
}
void rpmsg_rdev_write_config(struct virtio_device *dev, uint32_t offset,
void *src, int length)
{
(void)dev;
(void)offset;
(void)src;
(void)length;
return;
}
void rpmsg_rdev_reset(struct virtio_device *dev)
{
(void)dev;
return;
}

View file

@ -0,0 +1,531 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* rpmsg.c
*
* COMPONENT
*
* OpenAMP stack.
*
* DESCRIPTION
*
* Main file for the RPMSG driver. This file implements APIs as defined by
* RPMSG documentation(Linux docs) and also provides some utility functions.
*
* RPMSG driver represents each processor/core to which it communicates with
* remote_device control block.
* Each remote device(processor) defines its role in the communication i.e
* whether it is RPMSG Master or Remote. If the device(processor) to which
* driver is talking is RPMSG master then RPMSG driver implicitly behaves as
* Remote and vice versa.
* RPMSG Master is responsible for initiating communications with the Remote
* and shared buffers management. Terms remote device/core/proc are used
* interchangeably for the processor to which RPMSG driver is communicating
* irrespective of the fact whether it is RPMSG Remote or Master.
*
**************************************************************************/
#include <string.h>
#include <openamp/rpmsg.h>
#include <metal/sys.h>
#include <metal/assert.h>
#include <metal/cache.h>
#include <metal/sleep.h>
/**
* rpmsg_init
*
* Thus function allocates and initializes the rpmsg driver resources for
* given hil_proc. The successful return from this function leaves
* fully enabled IPC link.
*
* @param proc - pointer to hil_proc
* @param rdev - pointer to newly created remote device
* @param channel_created - callback function for channel creation
* @param channel_destroyed - callback function for channel deletion
* @param default_cb - default callback for channel I/O
* @param role - role of the other device, Master or Remote
*
* @return - status of function execution
*
*/
int rpmsg_init(struct hil_proc *proc,
struct remote_device **rdev,
rpmsg_chnl_cb_t channel_created,
rpmsg_chnl_cb_t channel_destroyed,
rpmsg_rx_cb_t default_cb, int role)
{
int status;
/* Initialize the remote device for given cpu id */
status = rpmsg_rdev_init(proc, rdev, role,
channel_created,
channel_destroyed, default_cb);
if (status == RPMSG_SUCCESS) {
/* Kick off IPC with the remote device */
status = rpmsg_start_ipc(*rdev);
}
/* Deinit system in case of error */
if (status != RPMSG_SUCCESS) {
rpmsg_deinit(*rdev);
}
return status;
}
/**
* rpmsg_deinit
*
* Thus function frees rpmsg driver resources for given remote device.
*
* @param rdev - pointer to device to de-init
*
*/
void rpmsg_deinit(struct remote_device *rdev)
{
if (rdev) {
rpmsg_rdev_deinit(rdev);
}
}
/**
* This function sends rpmsg "message" to remote device.
*
* @param rp_chnl - pointer to rpmsg channel
* @param src - source address of channel
* @param dst - destination address of channel
* @param data - data to transmit
* @param size - size of data
* @param wait - boolean, wait or not for buffer to become
* available
*
* @return - size of data sent or negative value for failure.
*
*/
int rpmsg_send_offchannel_raw(struct rpmsg_channel *rp_chnl, uint32_t src,
uint32_t dst, const void *data,
int size, int wait)
{
struct remote_device *rdev;
struct rpmsg_hdr rp_hdr;
void *buffer;
unsigned short idx;
int tick_count = 0;
unsigned long buff_len;
int ret;
struct metal_io_region *io;
if (!rp_chnl || !data) {
return RPMSG_ERR_PARAM;
}
/* Get the associated remote device for channel. */
rdev = rp_chnl->rdev;
/* Validate device state */
if (rp_chnl->state != RPMSG_CHNL_STATE_ACTIVE
|| rdev->state != RPMSG_DEV_STATE_ACTIVE) {
return RPMSG_ERR_DEV_STATE;
}
if (size > (rpmsg_get_buffer_size(rp_chnl))) {
return RPMSG_ERR_BUFF_SIZE;
}
/* Lock the device to enable exclusive access to virtqueues */
metal_mutex_acquire(&rdev->lock);
/* Get rpmsg buffer for sending message. */
buffer = rpmsg_get_tx_buffer(rdev, &buff_len, &idx);
/* Unlock the device */
metal_mutex_release(&rdev->lock);
if (!buffer && !wait) {
return RPMSG_ERR_NO_BUFF;
}
while (!buffer) {
/*
* Wait parameter is true - pool the buffer for
* 15 secs as defined by the APIs.
*/
metal_sleep_usec(RPMSG_TICKS_PER_INTERVAL);
metal_mutex_acquire(&rdev->lock);
buffer = rpmsg_get_tx_buffer(rdev, &buff_len, &idx);
metal_mutex_release(&rdev->lock);
tick_count += RPMSG_TICKS_PER_INTERVAL;
if (!buffer && (tick_count >=
(RPMSG_TICK_COUNT / RPMSG_TICKS_PER_INTERVAL))) {
return RPMSG_ERR_NO_BUFF;
}
}
/* Initialize RPMSG header. */
rp_hdr.dst = dst;
rp_hdr.src = src;
rp_hdr.len = size;
rp_hdr.reserved = 0;
/* Copy data to rpmsg buffer. */
io = rdev->proc->sh_buff.io;
metal_io_block_write(io,
metal_io_virt_to_offset(io, buffer),
&rp_hdr, sizeof(rp_hdr));
metal_io_block_write(io,
metal_io_virt_to_offset(io, RPMSG_LOCATE_DATA(buffer)),
data, size);
metal_mutex_acquire(&rdev->lock);
/* Enqueue buffer on virtqueue. */
ret = rpmsg_enqueue_buffer(rdev, buffer, buff_len, idx);
metal_assert(ret == VQUEUE_SUCCESS);
/* Let the other side know that there is a job to process. */
virtqueue_kick(rdev->tvq);
metal_mutex_release(&rdev->lock);
return size;
}
/**
* rpmsg_get_buffer_size
*
* Returns buffer size available for sending messages.
*
* @param channel - pointer to rpmsg channel
*
* @return - buffer size
*
*/
int rpmsg_get_buffer_size(struct rpmsg_channel *rp_chnl)
{
struct remote_device *rdev;
int length;
/* Get associated remote device for channel. */
rdev = rp_chnl->rdev;
metal_mutex_acquire(&rdev->lock);
if (rdev->role == RPMSG_REMOTE) {
/*
* If device role is Remote then buffers are provided by us
* (RPMSG Master), so just provide the macro.
*/
length = RPMSG_BUFFER_SIZE - sizeof(struct rpmsg_hdr);
} else {
/*
* If other core is Master then buffers are provided by it,
* so get the buffer size from the virtqueue.
*/
length =
(int)virtqueue_get_desc_size(rdev->tvq) -
sizeof(struct rpmsg_hdr);
}
metal_mutex_release(&rdev->lock);
return length;
}
void rpmsg_hold_rx_buffer(struct rpmsg_channel *rpdev, void *rxbuf)
{
struct rpmsg_hdr *rp_hdr = NULL;
if (!rpdev || !rxbuf)
return;
rp_hdr = RPMSG_HDR_FROM_BUF(rxbuf);
/* set held status to keep buffer */
rp_hdr->reserved |= RPMSG_BUF_HELD;
}
void rpmsg_release_rx_buffer(struct rpmsg_channel *rpdev, void *rxbuf)
{
struct rpmsg_hdr *hdr;
struct remote_device *rdev;
struct rpmsg_hdr_reserved * reserved = NULL;
unsigned int len;
if (!rpdev || !rxbuf)
return;
rdev = rpdev->rdev;
hdr = RPMSG_HDR_FROM_BUF(rxbuf);
/* Get the pointer to the reserved field that contains buffer size
* and the index */
reserved = (struct rpmsg_hdr_reserved*)&hdr->reserved;
hdr->reserved &= (~RPMSG_BUF_HELD);
len = (unsigned int)virtqueue_get_buffer_length(rdev->rvq,
reserved->idx);
metal_mutex_acquire(&rdev->lock);
/* Return used buffer, with total length
(header length + buffer size). */
rpmsg_return_buffer(rdev, hdr, (unsigned long)len, reserved->idx);
metal_mutex_release(&rdev->lock);
}
void *rpmsg_get_tx_payload_buffer(struct rpmsg_channel *rpdev, uint32_t *size,
int wait)
{
struct rpmsg_hdr *hdr;
struct remote_device *rdev;
struct rpmsg_hdr_reserved *reserved;
unsigned short idx;
unsigned long buff_len, tick_count = 0;
if (!rpdev || !size)
return NULL;
rdev = rpdev->rdev;
metal_mutex_acquire(&rdev->lock);
/* Get tx buffer from vring */
hdr = (struct rpmsg_hdr *) rpmsg_get_tx_buffer(rdev, &buff_len, &idx);
metal_mutex_release(&rdev->lock);
if (!hdr && !wait) {
return NULL;
} else {
while (!hdr) {
/*
* Wait parameter is true - pool the buffer for
* 15 secs as defined by the APIs.
*/
metal_sleep_usec(RPMSG_TICKS_PER_INTERVAL);
metal_mutex_acquire(&rdev->lock);
hdr = (struct rpmsg_hdr *) rpmsg_get_tx_buffer(rdev, &buff_len, &idx);
metal_mutex_release(&rdev->lock);
tick_count += RPMSG_TICKS_PER_INTERVAL;
if (tick_count >= (RPMSG_TICK_COUNT / RPMSG_TICKS_PER_INTERVAL)) {
return NULL;
}
}
/* Store the index into the reserved field to be used when sending */
reserved = (struct rpmsg_hdr_reserved*)&hdr->reserved;
reserved->idx = (uint16_t)idx;
/* Actual data buffer size is vring buffer size minus rpmsg header length */
*size = (uint32_t)(buff_len - sizeof(struct rpmsg_hdr));
return (void *)RPMSG_LOCATE_DATA(hdr);
}
}
int rpmsg_send_offchannel_nocopy(struct rpmsg_channel *rpdev, uint32_t src,
uint32_t dst, void *txbuf, int len)
{
struct rpmsg_hdr *hdr;
struct remote_device *rdev;
struct rpmsg_hdr_reserved * reserved = NULL;
int status;
if (!rpdev || !txbuf)
return RPMSG_ERR_PARAM;
rdev = rpdev->rdev;
hdr = RPMSG_HDR_FROM_BUF(txbuf);
/* Initialize RPMSG header. */
hdr->dst = dst;
hdr->src = src;
hdr->len = len;
hdr->flags = 0;
hdr->reserved &= (~RPMSG_BUF_HELD);
/* Get the pointer to the reserved field that contains buffer size and
* the index */
reserved = (struct rpmsg_hdr_reserved*)&hdr->reserved;
metal_mutex_acquire(&rdev->lock);
status = rpmsg_enqueue_buffer(rdev, hdr,
(unsigned long)virtqueue_get_buffer_length(
rdev->tvq, reserved->idx),
reserved->idx);
if (status == RPMSG_SUCCESS) {
/* Let the other side know that there is a job to process. */
virtqueue_kick(rdev->tvq);
/* Return size of data sent */
status = len;
}
metal_mutex_release(&rdev->lock);
return status;
}
/**
* rpmsg_create_ept
*
* This function creates rpmsg endpoint for the rpmsg channel.
*
* @param channel - pointer to rpmsg channel
* @param cb - Rx completion call back
* @param priv - private data
* @param addr - endpoint src address
*
* @return - pointer to endpoint control block
*
*/
struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rp_chnl,
rpmsg_rx_cb_t cb, void *priv,
uint32_t addr)
{
struct remote_device *rdev = RPMSG_NULL;
struct rpmsg_endpoint *rp_ept = RPMSG_NULL;
if (!rp_chnl || !cb) {
return RPMSG_NULL;
}
rdev = rp_chnl->rdev;
metal_mutex_acquire(&rdev->lock);
rp_ept = rpmsg_rdev_get_endpoint_from_addr(rdev, addr);
metal_mutex_release(&rdev->lock);
if (!rp_ept) {
rp_ept = _create_endpoint(rdev, cb, priv, addr);
if (rp_ept) {
rp_ept->rp_chnl = rp_chnl;
}
} else {
return RPMSG_NULL;
}
return rp_ept;
}
/**
* rpmsg_destroy_ept
*
* This function deletes rpmsg endpoint and performs cleanup.
*
* @param rp_ept - pointer to endpoint to destroy
*
*/
void rpmsg_destroy_ept(struct rpmsg_endpoint *rp_ept)
{
struct remote_device *rdev;
struct rpmsg_channel *rp_chnl;
if (!rp_ept)
return;
rp_chnl = rp_ept->rp_chnl;
rdev = rp_chnl->rdev;
_destroy_endpoint(rdev, rp_ept);
}
/**
* rpmsg_create_channel
*
* This function provides facility to create channel dynamically. It sends
* Name Service announcement to remote device to let it know about the channel
* creation. There must be an active communication among the cores (or atleast
* one rpmsg channel must already exist) before using this API to create new
* channels.
*
* @param rdev - pointer to remote device
* @param name - channel name
*
* @return - pointer to new rpmsg channel
*
*/
struct rpmsg_channel *rpmsg_create_channel(struct remote_device *rdev,
char *name)
{
struct rpmsg_channel *rp_chnl;
struct rpmsg_endpoint *rp_ept;
if (!rdev || !name) {
return RPMSG_NULL;
}
/* Create channel instance */
rp_chnl = _rpmsg_create_channel(rdev, name, RPMSG_NS_EPT_ADDR,
RPMSG_NS_EPT_ADDR);
if (!rp_chnl) {
return RPMSG_NULL;
}
/* Create default endpoint for the channel */
rp_ept = rpmsg_create_ept(rp_chnl, rdev->default_cb, rdev,
RPMSG_ADDR_ANY);
if (!rp_ept) {
_rpmsg_delete_channel(rp_chnl);
return RPMSG_NULL;
}
rp_chnl->rp_ept = rp_ept;
rp_chnl->src = rp_ept->addr;
rp_chnl->state = RPMSG_CHNL_STATE_NS;
/* Notify the application of channel creation event */
if (rdev->channel_created) {
rdev->channel_created(rp_chnl);
}
/* Send NS announcement to remote processor */
rpmsg_send_ns_message(rdev, rp_chnl, RPMSG_NS_CREATE);
return rp_chnl;
}
/**
* rpmsg_delete_channel
*
* Deletes the given RPMSG channel. The channel must first be created with the
* rpmsg_create_channel API.
*
* @param rp_chnl - pointer to rpmsg channel to delete
*
*/
void rpmsg_delete_channel(struct rpmsg_channel *rp_chnl)
{
struct remote_device *rdev;
if (!rp_chnl) {
return;
}
rdev = rp_chnl->rdev;
if (rp_chnl->state > RPMSG_CHNL_STATE_IDLE) {
/* Notify the other processor that channel no longer exists */
rpmsg_send_ns_message(rdev, rp_chnl, RPMSG_NS_DESTROY);
}
/* Notify channel deletion to application */
if (rdev->channel_destroyed) {
rdev->channel_destroyed(rp_chnl);
}
rpmsg_destroy_ept(rp_chnl->rp_ept);
_rpmsg_delete_channel(rp_chnl);
return;
}

View file

@ -0,0 +1,821 @@
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2015 Xilinx, Inc. All rights reserved.
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* rpmsg_core.c
*
* COMPONENT
*
* OpenAMP
*
* DESCRIPTION
*
* This file provides the core functionality of RPMSG messaging part like
* message parsing ,Rx/Tx callbacks handling , channel creation/deletion
* and address management.
*
*
**************************************************************************/
#include <string.h>
#include <openamp/rpmsg.h>
#include <metal/utilities.h>
#include <metal/io.h>
#include <metal/cache.h>
#include <metal/alloc.h>
#include <metal/cpu.h>
/* Internal functions */
static void rpmsg_rx_callback(struct virtqueue *vq);
static void rpmsg_tx_callback(struct virtqueue *vq);
/**
* rpmsg_memb_cpy
*
* This function copies contents from one memory to the other byte by byte.
*
* RPMsg can be used across different memories.
* memcpy/strncpy doesn't always work.
*
* @param src - pointer to source memory
* @param dest - pointer to target memory
* @param n - number of bytes to copy
*
* @return pointer to dest
*/
static void *rpmsg_memb_cpy(void *dest, const void *src, size_t n)
{
size_t i;
unsigned char *tmp_dest;
const unsigned char *tmp_src;
tmp_dest = dest;
tmp_src = src;
for (i = 0; i < n; i++, tmp_dest++, tmp_src++)
*tmp_dest = *tmp_src;
return dest;
}
/**
* rpmsg_start_ipc
*
* This function creates communication links(virtqueues) for remote device
* and notifies it to start IPC.
*
* @param rdev - remote device handle
*
* @return - status of function execution
*
*/
int rpmsg_start_ipc(struct remote_device *rdev)
{
struct virtio_device *virt_dev;
struct rpmsg_endpoint *ns_ept;
void (*callback[2]) (struct virtqueue * vq);
const char *vq_names[2];
unsigned long dev_features;
int status;
struct virtqueue *vqs[2];
int i;
virt_dev = &rdev->virt_dev;
/* Initialize names and callbacks based on the device role */
if (rdev->role == RPMSG_MASTER) {
vq_names[0] = "tx_vq";
vq_names[1] = "rx_vq";
callback[0] = rpmsg_tx_callback;
callback[1] = rpmsg_rx_callback;
} else {
vq_names[0] = "rx_vq";
vq_names[1] = "tx_vq";
callback[0] = rpmsg_rx_callback;
callback[1] = rpmsg_tx_callback;
}
/* Create virtqueues for remote device */
status = virt_dev->func->create_virtqueues(virt_dev, 0,
RPMSG_MAX_VQ_PER_RDEV,
vq_names, callback,
RPMSG_NULL);
if (status != RPMSG_SUCCESS)
return status;
dev_features = virt_dev->func->get_features(virt_dev);
/*
* Create name service announcement endpoint if device supports name
* service announcement feature.
*/
if ((dev_features & (1 << VIRTIO_RPMSG_F_NS))) {
rdev->support_ns = RPMSG_TRUE;
ns_ept = _create_endpoint(rdev, rpmsg_ns_callback, rdev,
RPMSG_NS_EPT_ADDR);
if (!ns_ept) {
return RPMSG_ERR_NO_MEM;
}
}
/* Initialize notifications for vring. */
if (rdev->role == RPMSG_MASTER) {
vqs[0] = rdev->tvq;
vqs[1] = rdev->rvq;
} else {
vqs[0] = rdev->rvq;
vqs[1] = rdev->tvq;
}
for (i = 0; i <= 1; i++) {
status = hil_enable_vring_notifications(i, vqs[i]);
if (status != RPMSG_SUCCESS) {
return status;
}
}
if (rdev->role == RPMSG_REMOTE) {
virt_dev->func->set_status(virt_dev,
VIRTIO_CONFIG_STATUS_DRIVER_OK);
status = rpmsg_rdev_notify(rdev);
}
if (status == RPMSG_SUCCESS)
rdev->state = RPMSG_DEV_STATE_ACTIVE;
return status;
}
/**
* _rpmsg_create_channel
*
* Creates new rpmsg channel with the given parameters.
*
* @param rdev - pointer to remote device which contains the channel
* @param name - name of the device
* @param src - source address for the rpmsg channel
* @param dst - destination address for the rpmsg channel
*
* @return - pointer to new rpmsg channel
*
*/
struct rpmsg_channel *_rpmsg_create_channel(struct remote_device *rdev,
char *name, unsigned long src,
unsigned long dst)
{
struct rpmsg_channel *rp_chnl;
rp_chnl = metal_allocate_memory(sizeof(struct rpmsg_channel));
if (rp_chnl) {
memset(rp_chnl, 0x00, sizeof(struct rpmsg_channel));
rpmsg_memb_cpy(rp_chnl->name, name, sizeof(rp_chnl->name)-1);
rp_chnl->src = src;
rp_chnl->dst = dst;
rp_chnl->rdev = rdev;
/* Place channel on channels list */
metal_mutex_acquire(&rdev->lock);
metal_list_add_tail(&rdev->rp_channels, &rp_chnl->node);
metal_mutex_release(&rdev->lock);
}
return rp_chnl;
}
/**
* _rpmsg_delete_channel
*
* Deletes given rpmsg channel.
*
* @param rp_chnl - pointer to rpmsg channel to delete
*
* return - none
*/
void _rpmsg_delete_channel(struct rpmsg_channel *rp_chnl)
{
if (rp_chnl) {
metal_mutex_acquire(&rp_chnl->rdev->lock);
metal_list_del(&rp_chnl->node);
metal_mutex_release(&rp_chnl->rdev->lock);
metal_free_memory(rp_chnl);
}
}
/**
* _create_endpoint
*
* This function creates rpmsg endpoint.
*
* @param rdev - pointer to remote device
* @param cb - Rx completion call back
* @param priv - private data
* @param addr - endpoint src address
*
* @return - pointer to endpoint control block
*
*/
struct rpmsg_endpoint *_create_endpoint(struct remote_device *rdev,
rpmsg_rx_cb_t cb, void *priv,
unsigned long addr)
{
struct rpmsg_endpoint *rp_ept;
int status = RPMSG_SUCCESS;
rp_ept = metal_allocate_memory(sizeof(struct rpmsg_endpoint));
if (!rp_ept) {
return RPMSG_NULL;
}
memset(rp_ept, 0x00, sizeof(struct rpmsg_endpoint));
metal_mutex_acquire(&rdev->lock);
if (addr != RPMSG_ADDR_ANY) {
/*
* Application has requested a particular src address for endpoint,
* first check if address is available.
*/
if (!rpmsg_is_address_set
(rdev->bitmap, RPMSG_ADDR_BMP_SIZE, addr)) {
/* Mark the address as used in the address bitmap. */
rpmsg_set_address(rdev->bitmap, RPMSG_ADDR_BMP_SIZE,
addr);
} else {
status = RPMSG_ERR_DEV_ADDR;
}
} else {
addr = rpmsg_get_address(rdev->bitmap, RPMSG_ADDR_BMP_SIZE);
if ((int)addr < 0) {
status = RPMSG_ERR_DEV_ADDR;
}
}
/* Do cleanup in case of error and return */
if (RPMSG_SUCCESS != status) {
metal_free_memory(rp_ept);
metal_mutex_release(&rdev->lock);
return RPMSG_NULL;
}
rp_ept->addr = addr;
rp_ept->cb = cb;
rp_ept->priv = priv;
metal_list_add_tail(&rdev->rp_endpoints, &rp_ept->node);
metal_mutex_release(&rdev->lock);
return rp_ept;
}
/**
* rpmsg_destroy_ept
*
* This function deletes rpmsg endpoint and performs cleanup.
*
* @param rdev - pointer to remote device
* @param rp_ept - pointer to endpoint to destroy
*
*/
void _destroy_endpoint(struct remote_device *rdev,
struct rpmsg_endpoint *rp_ept)
{
metal_mutex_acquire(&rdev->lock);
rpmsg_release_address(rdev->bitmap, RPMSG_ADDR_BMP_SIZE,
rp_ept->addr);
metal_list_del(&rp_ept->node);
metal_mutex_release(&rdev->lock);
/* free node and rp_ept */
metal_free_memory(rp_ept);
}
/**
* rpmsg_send_ns_message
*
* Sends name service announcement to remote device
*
* @param rdev - pointer to remote device
* @param rp_chnl - pointer to rpmsg channel
* @param flags - Channel creation/deletion flags
*
*/
int rpmsg_send_ns_message(struct remote_device *rdev,
struct rpmsg_channel *rp_chnl, unsigned long flags)
{
struct rpmsg_hdr rp_hdr;
struct rpmsg_ns_msg ns_msg;
unsigned short idx;
unsigned long len;
struct metal_io_region *io;
void *shbuf;
metal_mutex_acquire(&rdev->lock);
/* Get Tx buffer. */
shbuf = rpmsg_get_tx_buffer(rdev, &len, &idx);
if (!shbuf) {
metal_mutex_release(&rdev->lock);
return -RPMSG_ERR_NO_BUFF;
}
/* Fill out name service data. */
rp_hdr.dst = RPMSG_NS_EPT_ADDR;
rp_hdr.len = sizeof(ns_msg);
ns_msg.flags = flags;
ns_msg.addr = rp_chnl->src;
strncpy(ns_msg.name, rp_chnl->name, sizeof(ns_msg.name));
io = rdev->proc->sh_buff.io;
metal_io_block_write(io, metal_io_virt_to_offset(io, shbuf),
&rp_hdr, sizeof(rp_hdr));
metal_io_block_write(io,
metal_io_virt_to_offset(io, RPMSG_LOCATE_DATA(shbuf)),
&ns_msg, rp_hdr.len);
/* Place the buffer on virtqueue. */
rpmsg_enqueue_buffer(rdev, shbuf, len, idx);
/* Notify the other side that it has data to process. */
virtqueue_kick(rdev->tvq);
metal_mutex_release(&rdev->lock);
return RPMSG_SUCCESS;
}
/**
* rpmsg_enqueue_buffers
*
* Places buffer on the virtqueue for consumption by the other side.
*
* @param rdev - pointer to remote core
* @param buffer - buffer pointer
* @param len - buffer length
* @idx - buffer index
*
* @return - status of function execution
*
*/
int rpmsg_enqueue_buffer(struct remote_device *rdev, void *buffer,
unsigned long len, unsigned short idx)
{
int status;
struct metal_sg sg;
struct metal_io_region *io;
io = rdev->proc->sh_buff.io;
if (rdev->role == RPMSG_REMOTE) {
/* Initialize buffer node */
sg.virt = buffer;
sg.len = len;
sg.io = io;
status = virtqueue_add_buffer(rdev->tvq, &sg, 0, 1, buffer);
} else {
(void)sg;
status = virtqueue_add_consumed_buffer(rdev->tvq, idx, len);
}
return status;
}
/**
* rpmsg_return_buffer
*
* Places the used buffer back on the virtqueue.
*
* @param rdev - pointer to remote core
* @param buffer - buffer pointer
* @param len - buffer length
* @param idx - buffer index
*
*/
void rpmsg_return_buffer(struct remote_device *rdev, void *buffer,
unsigned long len, unsigned short idx)
{
struct metal_sg sg;
if (rdev->role == RPMSG_REMOTE) {
/* Initialize buffer node */
sg.virt = buffer;
sg.len = len;
sg.io = rdev->proc->sh_buff.io;
virtqueue_add_buffer(rdev->rvq, &sg, 0, 1, buffer);
} else {
(void)sg;
virtqueue_add_consumed_buffer(rdev->rvq, idx, len);
}
}
/**
* rpmsg_get_tx_buffer
*
* Provides buffer to transmit messages.
*
* @param rdev - pointer to remote device
* @param len - length of returned buffer
* @param idx - buffer index
*
* return - pointer to buffer.
*/
void *rpmsg_get_tx_buffer(struct remote_device *rdev, unsigned long *len,
unsigned short *idx)
{
void *data;
if (rdev->role == RPMSG_REMOTE) {
data = virtqueue_get_buffer(rdev->tvq, (uint32_t *) len, idx);
if (data == RPMSG_NULL) {
data = sh_mem_get_buffer(rdev->mem_pool);
*len = RPMSG_BUFFER_SIZE;
}
} else {
data =
virtqueue_get_available_buffer(rdev->tvq, idx,
(uint32_t *) len);
}
return data;
}
/**
* rpmsg_get_rx_buffer
*
* Retrieves the received buffer from the virtqueue.
*
* @param rdev - pointer to remote device
* @param len - size of received buffer
* @param idx - index of buffer
*
* @return - pointer to received buffer
*
*/
void *rpmsg_get_rx_buffer(struct remote_device *rdev, unsigned long *len,
unsigned short *idx)
{
void *data;
if (rdev->role == RPMSG_REMOTE) {
data = virtqueue_get_buffer(rdev->rvq, (uint32_t *) len, idx);
} else {
data =
virtqueue_get_available_buffer(rdev->rvq, idx,
(uint32_t *) len);
}
if (data) {
/* FIX ME: library should not worry about if it needs
* to flush/invalidate cache, it is shared memory.
* The shared memory should be mapped properly before
* using it.
*/
metal_cache_invalidate(data, (unsigned int)(*len));
}
return data;
}
/**
* rpmsg_free_buffer
*
* Frees the allocated buffers.
*
* @param rdev - pointer to remote device
* @param buffer - pointer to buffer to free
*
*/
void rpmsg_free_buffer(struct remote_device *rdev, void *buffer)
{
if (rdev->role == RPMSG_REMOTE) {
sh_mem_free_buffer(buffer, rdev->mem_pool);
}
}
/**
* rpmsg_tx_callback
*
* Tx callback function.
*
* @param vq - pointer to virtqueue on which Tx is has been
* completed.
*
*/
static void rpmsg_tx_callback(struct virtqueue *vq)
{
struct remote_device *rdev;
struct virtio_device *vdev;
struct rpmsg_channel *rp_chnl;
struct metal_list *node;
vdev = (struct virtio_device *)vq->vq_dev;
rdev = (struct remote_device *)vdev;
/* Check if the remote device is master. */
if (rdev->role == RPMSG_MASTER) {
/* Notification is received from the master. Now the remote(us) can
* performs one of two operations;
*
* a. If name service announcement is supported then it will send NS message.
* else
* b. It will update the channel state to active so that further communication
* can take place.
*/
metal_list_for_each(&rdev->rp_channels, node) {
rp_chnl = metal_container_of(node,
struct rpmsg_channel, node);
if (rp_chnl->state == RPMSG_CHNL_STATE_IDLE) {
if (rdev->support_ns) {
if (rpmsg_send_ns_message(rdev, rp_chnl,
RPMSG_NS_CREATE) ==
RPMSG_SUCCESS)
rp_chnl->state =
RPMSG_CHNL_STATE_NS;
} else {
rp_chnl->state =
RPMSG_CHNL_STATE_ACTIVE;
}
}
}
}
}
/**
* rpmsg_rx_callback
*
* Rx callback function.
*
* @param vq - pointer to virtqueue on which messages is received
*
*/
void rpmsg_rx_callback(struct virtqueue *vq)
{
struct remote_device *rdev;
struct virtio_device *vdev;
struct rpmsg_channel *rp_chnl;
struct rpmsg_endpoint *rp_ept;
struct rpmsg_hdr *rp_hdr;
struct rpmsg_hdr_reserved *reserved;
unsigned long len;
unsigned short idx;
vdev = (struct virtio_device *)vq->vq_dev;
rdev = (struct remote_device *)vdev;
metal_mutex_acquire(&rdev->lock);
/* Process the received data from remote node */
rp_hdr = (struct rpmsg_hdr *)rpmsg_get_rx_buffer(rdev, &len, &idx);
metal_mutex_release(&rdev->lock);
while (rp_hdr) {
/* Get the channel node from the remote device channels list. */
metal_mutex_acquire(&rdev->lock);
rp_ept = rpmsg_rdev_get_endpoint_from_addr(rdev, rp_hdr->dst);
metal_mutex_release(&rdev->lock);
if (!rp_ept) {
/* Fatal error no endpoint for the given dst addr. */
return;
}
rp_chnl = rp_ept->rp_chnl;
if ((rp_chnl) && (rp_chnl->state == RPMSG_CHNL_STATE_NS)) {
/* First message from RPMSG Master, update channel
* destination address and state */
if (rp_ept->addr == RPMSG_NS_EPT_ADDR) {
rp_ept->cb(rp_chnl,
(void *)RPMSG_LOCATE_DATA(rp_hdr),
rp_hdr->len, rdev,
rp_hdr->src);
} else {
rp_chnl->dst = rp_hdr->src;
rp_chnl->state = RPMSG_CHNL_STATE_ACTIVE;
/* Notify channel creation to application */
if (rdev->channel_created) {
rdev->channel_created(rp_chnl);
}
}
} else {
rp_ept->cb(rp_chnl, (void *)RPMSG_LOCATE_DATA(rp_hdr), rp_hdr->len,
rp_ept->priv, rp_hdr->src);
}
metal_mutex_acquire(&rdev->lock);
/* Check whether callback wants to hold buffer */
if (rp_hdr->reserved & RPMSG_BUF_HELD)
{
/* 'rp_hdr->reserved' field is now used as storage for
* 'idx' to release buffer later */
reserved = (struct rpmsg_hdr_reserved*)&rp_hdr->reserved;
reserved->idx = (uint16_t)idx;
} else {
/* Return used buffers. */
rpmsg_return_buffer(rdev, rp_hdr, len, idx);
}
rp_hdr =
(struct rpmsg_hdr *)rpmsg_get_rx_buffer(rdev, &len, &idx);
metal_mutex_release(&rdev->lock);
}
}
/**
* rpmsg_ns_callback
*
* This callback handles name service announcement from the remote device
* and creates/deletes rpmsg channels.
*
* @param server_chnl - pointer to server channel control block.
* @param data - pointer to received messages
* @param len - length of received data
* @param priv - any private data
* @param src - source address
*
* @return - none
*/
void rpmsg_ns_callback(struct rpmsg_channel *server_chnl, void *data, int len,
void *priv, unsigned long src)
{
struct remote_device *rdev;
struct rpmsg_channel *rp_chnl;
struct rpmsg_ns_msg *ns_msg;
(void)server_chnl;
(void)src;
(void)len;
rdev = (struct remote_device *)priv;
//FIXME: This assumes same name string size for channel name both on master
//and remote. If this is not the case then we will have to parse the
//message contents.
ns_msg = (struct rpmsg_ns_msg *)data;
if (ns_msg->flags & RPMSG_NS_DESTROY) {
metal_mutex_acquire(&rdev->lock);
rp_chnl = rpmsg_rdev_get_chnl_from_id(rdev, ns_msg->name);
metal_mutex_release(&rdev->lock);
if (rp_chnl) {
if (rdev->channel_destroyed) {
rdev->channel_destroyed(rp_chnl);
}
rpmsg_destroy_ept(rp_chnl->rp_ept);
_rpmsg_delete_channel(rp_chnl);
}
} else {
metal_mutex_acquire(&rdev->lock);
rp_chnl = rpmsg_rdev_get_chnl_from_id(rdev, ns_msg->name);
metal_mutex_release(&rdev->lock);
if (!rp_chnl) {
rp_chnl = _rpmsg_create_channel(rdev, ns_msg->name,
0x00,
ns_msg->addr);
}
if (rp_chnl) {
metal_mutex_acquire(&rdev->lock);
rp_chnl->state = RPMSG_CHNL_STATE_ACTIVE;
rp_chnl->dst = ns_msg->addr;
metal_mutex_release(&rdev->lock);
/* Create default endpoint for channel */
if (!rp_chnl->rp_ept) {
rp_chnl->rp_ept =
rpmsg_create_ept(rp_chnl,
rdev->default_cb, rdev,
RPMSG_ADDR_ANY);
if (rp_chnl->rp_ept) {
rp_chnl->src = rp_chnl->rp_ept->addr;
rpmsg_send_ns_message(rdev,
rp_chnl,
RPMSG_NS_CREATE);
}
}
if (rdev->channel_created)
rdev->channel_created(rp_chnl);
}
}
}
/**
* rpmsg_get_address
*
* This function provides unique 32 bit address.
*
* @param bitmap - bit map for addresses
* @param size - size of bitmap
*
* return - a unique address
*/
int rpmsg_get_address(unsigned long *bitmap, int size)
{
int addr = -1;
int i, tmp32;
/* Find first available buffer */
for (i = 0; i < size; i++) {
tmp32 = get_first_zero_bit(bitmap[i]);
if (tmp32 < 32) {
addr = tmp32 + (i*32);
bitmap[i] |= (1 << tmp32);
break;
}
}
return addr;
}
/**
* rpmsg_release_address
*
* Frees the given address.
*
* @param bitmap - bit map for addresses
* @param size - size of bitmap
* @param addr - address to free
*
* return - none
*/
int rpmsg_release_address(unsigned long *bitmap, int size, int addr)
{
unsigned int i, j;
unsigned long mask = 1;
if (addr >= size * 32)
return -1;
/* Mark the addr as available */
i = addr / 32;
j = addr % 32;
mask = mask << j;
bitmap[i] = bitmap[i] & (~mask);
return RPMSG_SUCCESS;
}
/**
* rpmsg_is_address_set
*
* Checks whether address is used or free.
*
* @param bitmap - bit map for addresses
* @param size - size of bitmap
* @param addr - address to free
*
* return - TRUE/FALSE
*/
int rpmsg_is_address_set(unsigned long *bitmap, int size, int addr)
{
int i, j;
unsigned long mask = 1;
if (addr >= size * 32)
return -1;
/* Mark the id as available */
i = addr / 32;
j = addr % 32;
mask = mask << j;
return (bitmap[i] & mask);
}
/**
* rpmsg_set_address
*
* Marks the address as consumed.
*
* @param bitmap - bit map for addresses
* @param size - size of bitmap
* @param addr - address to free
*
* return - none
*/
int rpmsg_set_address(unsigned long *bitmap, int size, int addr)
{
int i, j;
unsigned long mask = 1;
if (addr >= size * 32)
return -1;
/* Mark the id as available */
i = addr / 32;
j = addr % 32;
mask = mask << j;
bitmap[i] |= mask;
return RPMSG_SUCCESS;
}

View file

@ -0,0 +1,2 @@
collect (PROJECT_LIB_SOURCES virtio.c)
collect (PROJECT_LIB_SOURCES virtqueue.c)

View file

@ -0,0 +1,81 @@
/*-
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <openamp/virtio.h>
static const char *virtio_feature_name(unsigned long feature,
const struct virtio_feature_desc *);
//TODO : This structure may change depending on the types of devices we support.
static const struct virtio_ident {
unsigned short devid;
const char *name;
} virtio_ident_table[] = {
{
VIRTIO_ID_NETWORK, "Network"}, {
VIRTIO_ID_BLOCK, "Block"}, {
VIRTIO_ID_CONSOLE, "Console"}, {
VIRTIO_ID_ENTROPY, "Entropy"}, {
VIRTIO_ID_BALLOON, "Balloon"}, {
VIRTIO_ID_IOMEMORY, "IOMemory"}, {
VIRTIO_ID_SCSI, "SCSI"}, {
VIRTIO_ID_9P, "9P Transport"}, {
0, NULL}
};
/* Device independent features. */
static const struct virtio_feature_desc virtio_common_feature_desc[] = {
{VIRTIO_F_NOTIFY_ON_EMPTY, "NotifyOnEmpty"},
{VIRTIO_RING_F_INDIRECT_DESC, "RingIndirect"},
{VIRTIO_RING_F_EVENT_IDX, "EventIdx"},
{VIRTIO_F_BAD_FEATURE, "BadFeature"},
{0, NULL}
};
const char *virtio_dev_name(unsigned short devid)
{
const struct virtio_ident *ident;
for (ident = virtio_ident_table; ident->name != NULL; ident++) {
if (ident->devid == devid)
return (ident->name);
}
return (NULL);
}
static const char *virtio_feature_name(unsigned long val,
const struct virtio_feature_desc *desc)
{
int i, j;
const struct virtio_feature_desc *descs[2] = { desc,
virtio_common_feature_desc
};
for (i = 0; i < 2; i++) {
if (descs[i] == NULL)
continue;
for (j = 0; descs[i][j].vfd_val != 0; j++) {
if (val == descs[i][j].vfd_val)
return (descs[i][j].vfd_str);
}
}
return (NULL);
}
void virtio_describe(struct virtio_device *dev, const char *msg,
uint32_t features, struct virtio_feature_desc *desc)
{
(void)dev;
(void)msg;
(void)features;
// TODO: Not used currently - keeping it for future use
virtio_feature_name(0, desc);
}

View file

@ -0,0 +1,685 @@
/*-
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <string.h>
#include <openamp/virtqueue.h>
#include <metal/atomic.h>
#include <metal/dma.h>
#include <metal/io.h>
#include <metal/log.h>
#include <metal/alloc.h>
/* Prototype for internal functions. */
static void vq_ring_init(struct virtqueue *, void *, int);
static void vq_ring_update_avail(struct virtqueue *, uint16_t);
static uint16_t vq_ring_add_buffer(struct virtqueue *, struct vring_desc *,
uint16_t, struct metal_sg *, int, int);
static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
static void vq_ring_free_chain(struct virtqueue *, uint16_t);
static int vq_ring_must_notify_host(struct virtqueue *vq);
static void vq_ring_notify_host(struct virtqueue *vq);
static int virtqueue_nused(struct virtqueue *vq);
/**
* virtqueue_create - Creates new VirtIO queue
*
* @param device - Pointer to VirtIO device
* @param id - VirtIO queue ID , must be unique
* @param name - Name of VirtIO queue
* @param ring - Pointer to vring_alloc_info control block
* @param callback - Pointer to callback function, invoked
* when message is available on VirtIO queue
* @param notify - Pointer to notify function, used to notify
* other side that there is job available for it
* @param shm_io - shared memory I/O region of the virtqueue
* @param v_queue - Created VirtIO queue.
*
* @return - Function status
*/
int virtqueue_create(struct virtio_device *virt_dev, unsigned short id,
const char *name, struct vring_alloc_info *ring,
void (*callback) (struct virtqueue * vq),
void (*notify) (struct virtqueue * vq),
struct metal_io_region *shm_io,
struct virtqueue **v_queue)
{
struct virtqueue *vq = NULL;
int status = VQUEUE_SUCCESS;
uint32_t vq_size = 0;
VQ_PARAM_CHK(ring == NULL, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status,
ERROR_VRING_ALIGN);
if (status == VQUEUE_SUCCESS) {
vq_size = sizeof(struct virtqueue)
+ (ring->num_descs) * sizeof(struct vq_desc_extra);
vq = (struct virtqueue *)metal_allocate_memory(vq_size);
if (vq == NULL) {
return (ERROR_NO_MEM);
}
memset(vq, 0x00, vq_size);
vq->vq_dev = virt_dev;
strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
vq->vq_queue_index = id;
vq->vq_nentries = ring->num_descs;
vq->vq_free_cnt = vq->vq_nentries;
vq->callback = callback;
vq->notify = notify;
vq->shm_io = shm_io;
/* Initialize vring control block in virtqueue. */
vq_ring_init(vq, (void *)ring->vaddr, ring->align);
/* Disable callbacks - will be enabled by the application
* once initialization is completed.
*/
virtqueue_disable_cb(vq);
*v_queue = vq;
}
return (status);
}
/**
* virtqueue_add_buffer() - Enqueues new buffer in vring for consumption
* by other side. Readable buffers are always
* inserted before writable buffers
*
* @param vq - Pointer to VirtIO queue control block.
* @param sg - Pointer to buffer scatter/gather list
* @param readable - Number of readable buffers
* @param writable - Number of writable buffers
* @param cookie - Pointer to hold call back data
*
* @return - Function status
*/
int virtqueue_add_buffer(struct virtqueue *vq, struct metal_sg *sg,
int readable, int writable, void *cookie)
{
struct vq_desc_extra *dxp = NULL;
int status = VQUEUE_SUCCESS;
uint16_t head_idx;
uint16_t idx;
int needed;
needed = readable + writable;
VQ_PARAM_CHK(vq == NULL, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(needed < 1, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);
VQUEUE_BUSY(vq);
if (status == VQUEUE_SUCCESS) {
VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
head_idx = vq->vq_desc_head_idx;
VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
dxp = &vq->vq_descx[head_idx];
VQASSERT(vq, (dxp->cookie == NULL),
"cookie already exists for index");
dxp->cookie = cookie;
dxp->ndescs = needed;
/* Enqueue buffer onto the ring. */
idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx, sg,
readable, writable);
vq->vq_desc_head_idx = idx;
vq->vq_free_cnt -= needed;
if (vq->vq_free_cnt == 0) {
VQ_RING_ASSERT_CHAIN_TERM(vq);
} else {
VQ_RING_ASSERT_VALID_IDX(vq, idx);
}
/*
* Update vring_avail control block fields so that other
* side can get buffer using it.
*/
vq_ring_update_avail(vq, head_idx);
}
VQUEUE_IDLE(vq);
return (status);
}
/**
* virtqueue_add_single_buffer - Enqueues single buffer in vring
*
* @param vq - Pointer to VirtIO queue control block
* @param cookie - Pointer to hold call back data
* @param sg - metal_scatter/gather struct element
* @param writable - If buffer writable
* @param has_next - If buffers for subsequent call are
* to be chained
*
* @return - Function status
*/
int virtqueue_add_single_buffer(struct virtqueue *vq, void *cookie,
struct metal_sg *sg, int writable,
boolean has_next)
{
struct vq_desc_extra *dxp;
struct vring_desc *dp;
uint16_t head_idx;
uint16_t idx;
int status = VQUEUE_SUCCESS;
VQ_PARAM_CHK(vq == NULL, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);
VQUEUE_BUSY(vq);
if (status == VQUEUE_SUCCESS) {
VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
head_idx = vq->vq_desc_head_idx;
dxp = &vq->vq_descx[head_idx];
dxp->cookie = cookie;
dxp->ndescs = 1;
idx = head_idx;
dp = &vq->vq_ring.desc[idx];
dp->addr = metal_io_virt_to_phys(sg->io, sg->virt);
dp->len = sg->len;
dp->flags = 0;
idx = dp->next;
if (has_next)
dp->flags |= VRING_DESC_F_NEXT;
if (writable)
dp->flags |= VRING_DESC_F_WRITE;
vq->vq_desc_head_idx = idx;
vq->vq_free_cnt--;
if (vq->vq_free_cnt == 0) {
VQ_RING_ASSERT_CHAIN_TERM(vq);
} else {
VQ_RING_ASSERT_VALID_IDX(vq, idx);
}
vq_ring_update_avail(vq, head_idx);
}
VQUEUE_IDLE(vq);
return (status);
}
/**
* virtqueue_get_buffer - Returns used buffers from VirtIO queue
*
* @param vq - Pointer to VirtIO queue control block
* @param len - Length of conumed buffer
* @param idx - index of the buffer
*
* @return - Pointer to used buffer
*/
void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t * len, uint16_t * idx)
{
struct vring_used_elem *uep;
void *cookie;
uint16_t used_idx, desc_idx;
if ((vq == NULL) || (vq->vq_used_cons_idx == vq->vq_ring.used->idx))
return (NULL);
VQUEUE_BUSY(vq);
used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
uep = &vq->vq_ring.used->ring[used_idx];
atomic_thread_fence(memory_order_seq_cst);
desc_idx = (uint16_t) uep->id;
if (len != NULL)
*len = uep->len;
vq_ring_free_chain(vq, desc_idx);
cookie = vq->vq_descx[desc_idx].cookie;
vq->vq_descx[desc_idx].cookie = NULL;
if (idx != NULL)
*idx = used_idx;
VQUEUE_IDLE(vq);
return (cookie);
}
uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx)
{
return vq->vq_ring.desc[idx].len;
}
/**
* virtqueue_free - Frees VirtIO queue resources
*
* @param vq - Pointer to VirtIO queue control block
*
*/
void virtqueue_free(struct virtqueue *vq)
{
if (vq != NULL) {
if (vq->vq_free_cnt != vq->vq_nentries) {
metal_log(METAL_LOG_WARNING,
"%s: freeing non-empty virtqueue\r\n",
vq->vq_name);
}
metal_free_memory(vq);
}
}
/**
* virtqueue_get_available_buffer - Returns buffer available for use in the
* VirtIO queue
*
* @param vq - Pointer to VirtIO queue control block
* @param avail_idx - Pointer to index used in vring desc table
* @param len - Length of buffer
*
* @return - Pointer to available buffer
*/
void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t * avail_idx,
uint32_t * len)
{
uint16_t head_idx = 0;
void *buffer;
atomic_thread_fence(memory_order_seq_cst);
if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
return (NULL);
}
VQUEUE_BUSY(vq);
head_idx = vq->vq_available_idx++ & (vq->vq_nentries - 1);
*avail_idx = vq->vq_ring.avail->ring[head_idx];
buffer = metal_io_phys_to_virt(vq->shm_io, vq->vq_ring.desc[*avail_idx].addr);
*len = vq->vq_ring.desc[*avail_idx].len;
VQUEUE_IDLE(vq);
return (buffer);
}
/**
* virtqueue_add_consumed_buffer - Returns consumed buffer back to VirtIO queue
*
* @param vq - Pointer to VirtIO queue control block
* @param head_idx - Index of vring desc containing used buffer
* @param len - Length of buffer
*
* @return - Function status
*/
int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
uint32_t len)
{
struct vring_used_elem *used_desc = NULL;
uint16_t used_idx;
if (head_idx > vq->vq_nentries) {
return (ERROR_VRING_NO_BUFF);
}
VQUEUE_BUSY(vq);
used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1);
used_desc = &(vq->vq_ring.used->ring[used_idx]);
used_desc->id = head_idx;
used_desc->len = len;
atomic_thread_fence(memory_order_seq_cst);
vq->vq_ring.used->idx++;
VQUEUE_IDLE(vq);
return (VQUEUE_SUCCESS);
}
/**
* virtqueue_enable_cb - Enables callback generation
*
* @param vq - Pointer to VirtIO queue control block
*
* @return - Function status
*/
int virtqueue_enable_cb(struct virtqueue *vq)
{
return (vq_ring_enable_interrupt(vq, 0));
}
/**
* virtqueue_enable_cb - Disables callback generation
*
* @param vq - Pointer to VirtIO queue control block
*
*/
void virtqueue_disable_cb(struct virtqueue *vq)
{
VQUEUE_BUSY(vq);
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
vring_used_event(&vq->vq_ring) =
vq->vq_used_cons_idx - vq->vq_nentries - 1;
} else {
vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
VQUEUE_IDLE(vq);
}
/**
* virtqueue_kick - Notifies other side that there is buffer available for it.
*
* @param vq - Pointer to VirtIO queue control block
*/
void virtqueue_kick(struct virtqueue *vq)
{
VQUEUE_BUSY(vq);
/* Ensure updated avail->idx is visible to host. */
atomic_thread_fence(memory_order_seq_cst);
if (vq_ring_must_notify_host(vq))
vq_ring_notify_host(vq);
vq->vq_queued_cnt = 0;
VQUEUE_IDLE(vq);
}
/**
* virtqueue_dump Dumps important virtqueue fields , use for debugging purposes
*
* @param vq - Pointer to VirtIO queue control block
*/
void virtqueue_dump(struct virtqueue *vq)
{
if (vq == NULL)
return;
metal_log(METAL_LOG_DEBUG,
"VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
"desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
"used.idx=%d; avail.flags=0x%x; used.flags=0x%x\r\n",
vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
vq->vq_ring.used->flags);
}
/**
* virtqueue_get_desc_size - Returns vring descriptor size
*
* @param vq - Pointer to VirtIO queue control block
*
* @return - Descriptor length
*/
uint32_t virtqueue_get_desc_size(struct virtqueue * vq)
{
uint16_t head_idx = 0;
uint16_t avail_idx = 0;
uint32_t len = 0;
if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
return 0;
}
VQUEUE_BUSY(vq);
head_idx = vq->vq_available_idx & (vq->vq_nentries - 1);
avail_idx = vq->vq_ring.avail->ring[head_idx];
len = vq->vq_ring.desc[avail_idx].len;
VQUEUE_IDLE(vq);
return (len);
}
/**************************************************************************
* Helper Functions *
**************************************************************************/
/**
*
* vq_ring_add_buffer
*
*/
static uint16_t vq_ring_add_buffer(struct virtqueue *vq,
struct vring_desc *desc, uint16_t head_idx,
struct metal_sg *sg, int readable,
int writable)
{
struct vring_desc *dp;
int i, needed;
uint16_t idx;
(void)vq;
needed = readable + writable;
for (i = 0, idx = head_idx; i < needed; i++, idx = dp->next) {
VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
"premature end of free desc chain");
dp = &desc[idx];
dp->addr = metal_io_virt_to_phys(sg[i].io, sg[i].virt);
dp->len = sg[i].len;
dp->flags = 0;
if (i < needed - 1)
dp->flags |= VRING_DESC_F_NEXT;
/* Readable buffers are inserted into vring before the writable buffers. */
if (i >= readable)
dp->flags |= VRING_DESC_F_WRITE;
}
return (idx);
}
/**
*
* vq_ring_free_chain
*
*/
static void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp;
struct vq_desc_extra *dxp;
VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
dp = &vq->vq_ring.desc[desc_idx];
dxp = &vq->vq_descx[desc_idx];
if (vq->vq_free_cnt == 0) {
VQ_RING_ASSERT_CHAIN_TERM(vq);
}
vq->vq_free_cnt += dxp->ndescs;
dxp->ndescs--;
if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
while (dp->flags & VRING_DESC_F_NEXT) {
VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
dp = &vq->vq_ring.desc[dp->next];
dxp->ndescs--;
}
}
VQASSERT(vq, (dxp->ndescs == 0),
"failed to free entire desc chain, remaining");
/*
* We must append the existing free chain, if any, to the end of
* newly freed chain. If the virtqueue was completely used, then
* head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
*/
dp->next = vq->vq_desc_head_idx;
vq->vq_desc_head_idx = desc_idx;
}
/**
*
* vq_ring_init
*
*/
static void vq_ring_init(struct virtqueue *vq, void *ring_mem, int alignment)
{
struct vring *vr;
int i, size;
size = vq->vq_nentries;
vr = &vq->vq_ring;
vring_init(vr, size, (unsigned char *)ring_mem, alignment);
for (i = 0; i < size - 1; i++)
vr->desc[i].next = i + 1;
vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
}
/**
*
* vq_ring_update_avail
*
*/
static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
{
uint16_t avail_idx;
/*
* Place the head of the descriptor chain into the next slot and make
* it usable to the host. The chain is made available now rather than
* deferring to virtqueue_notify() in the hopes that if the host is
* currently running on another CPU, we can keep it processing the new
* descriptor.
*/
avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
vq->vq_ring.avail->ring[avail_idx] = desc_idx;
atomic_thread_fence(memory_order_seq_cst);
vq->vq_ring.avail->idx++;
/* Keep pending count until virtqueue_notify(). */
vq->vq_queued_cnt++;
}
/**
*
* vq_ring_enable_interrupt
*
*/
static int vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
{
/*
* Enable interrupts, making sure we get the latest index of
* what's already been consumed.
*/
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
} else {
vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
}
atomic_thread_fence(memory_order_seq_cst);
/*
* Enough items may have already been consumed to meet our threshold
* since we last checked. Let our caller know so it processes the new
* entries.
*/
if (virtqueue_nused(vq) > ndesc) {
return (1);
}
return (0);
}
/**
*
* virtqueue_interrupt
*
*/
void virtqueue_notification(struct virtqueue *vq)
{
atomic_thread_fence(memory_order_seq_cst);
if (vq->callback != NULL)
vq->callback(vq);
}
/**
*
* vq_ring_must_notify_host
*
*/
static int vq_ring_must_notify_host(struct virtqueue *vq)
{
uint16_t new_idx, prev_idx, event_idx;
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
new_idx = vq->vq_ring.avail->idx;
prev_idx = new_idx - vq->vq_queued_cnt;
event_idx = vring_avail_event(&vq->vq_ring);
return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
}
return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
}
/**
*
* vq_ring_notify_host
*
*/
static void vq_ring_notify_host(struct virtqueue *vq)
{
if (vq->notify != NULL)
vq->notify(vq);
}
/**
*
* virtqueue_nused
*
*/
static int virtqueue_nused(struct virtqueue *vq)
{
uint16_t used_idx, nused;
used_idx = vq->vq_ring.used->idx;
nused = (uint16_t) (used_idx - vq->vq_used_cons_idx);
VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
return (nused);
}