Compare commits

..

No commits in common. "6f64f77557967a41ef7df429f0a831787bb8418f" and "832261dbe5d7a52af03e3a877b007c34354d79e7" have entirely different histories.

11046 changed files with 103164 additions and 359428 deletions

View file

@ -46,7 +46,6 @@ ForEachMacros:
- 'SYS_DLIST_FOR_EACH_CONTAINER_SAFE'
- 'SYS_DLIST_FOR_EACH_NODE'
- 'SYS_DLIST_FOR_EACH_NODE_SAFE'
- 'SYS_SEM_LOCK'
- 'SYS_SFLIST_FOR_EACH_CONTAINER'
- 'SYS_SFLIST_FOR_EACH_CONTAINER_SAFE'
- 'SYS_SFLIST_FOR_EACH_NODE'
@ -80,8 +79,6 @@ ForEachMacros:
- 'HTTP_SERVER_CONTENT_TYPE_FOREACH'
- 'HTTP_SERVICE_FOREACH'
- 'HTTP_SERVICE_FOREACH_RESOURCE'
- 'I3C_BUS_FOR_EACH_I3CDEV'
- 'I3C_BUS_FOR_EACH_I2CDEV'
IfMacros:
- 'CHECKIF'
# Disabled for now, see bug https://github.com/zephyrproject-rtos/zephyr/issues/48520
@ -96,18 +93,11 @@ IncludeCategories:
- Regex: '.*'
Priority: 3
IndentCaseLabels: false
IndentGotoLabels: false
IndentWidth: 8
InsertBraces: true
SpaceBeforeInheritanceColon: False
SpaceBeforeParens: ControlStatementsExceptControlMacros
SortIncludes: Never
UseTab: ForContinuationAndIndentation
WhitespaceSensitiveMacros:
- COND_CODE_0
- COND_CODE_1
- IF_DISABLED
- IF_ENABLED
- LISTIFY
- STRINGIFY
- Z_STRINGIFY

View file

@ -1,21 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (c) 2024, Basalte bv
analyzer:
# Start by disabling all
- --disable-all
# Enable the sensitive profile
- --enable=sensitive
# Disable unused cases
- --disable=boost
- --disable=mpi
# Many identifiers in zephyr start with _
- --disable=clang-diagnostic-reserved-identifier
- --disable=clang-diagnostic-reserved-macro-identifier
# Cleanup
- --clean

View file

@ -2,20 +2,12 @@ name: Backport Issue Check
on:
pull_request_target:
types:
- edited
- opened
- reopened
- synchronize
branches:
- v*-branch
jobs:
backport:
name: Backport Issue Check
concurrency:
group: backport-issue-check-${{ github.ref }}
cancel-in-progress: true
runs-on: ubuntu-22.04
if: github.repository == 'zephyrproject-rtos/zephyr'

View file

@ -13,7 +13,7 @@ jobs:
steps:
- name: Download artifacts
uses: dawidd6/action-download-artifact@v6
uses: dawidd6/action-download-artifact@v3
with:
run_id: ${{ github.event.workflow_run.id }}

View file

@ -8,8 +8,6 @@ on:
- "west.yml"
- "subsys/bluetooth/**"
- "tests/bsim/**"
- "boards/nordic/nrf5*/*dt*"
- "dts/*/nordic/**"
- "tests/bluetooth/common/testlib/**"
- "samples/bluetooth/**"
- "boards/posix/**"
@ -18,7 +16,6 @@ on:
- "include/zephyr/arch/posix/**"
- "scripts/native_simulator/**"
- "samples/net/sockets/echo_*/**"
- "modules/mbedtls/**"
- "modules/openthread/**"
- "subsys/net/l2/openthread/**"
- "include/zephyr/net/openthread.h"
@ -37,7 +34,7 @@ jobs:
runs-on:
group: zephyr-runner-v2-linux-x64-4xlarge
container:
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.14.20240823
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.13.20240601
options: '--entrypoint /bin/bash'
env:
ZEPHYR_TOOLCHAIN_VARIANT: zephyr
@ -78,7 +75,6 @@ jobs:
git config --global user.name "Zephyr Bot"
rm -fr ".git/rebase-apply"
git rebase origin/${BASE_REF}
git clean -f -d
git log --pretty=oneline | head -n 10
west init -l . || true
west config manifest.group-filter -- +ci
@ -89,7 +85,7 @@ jobs:
echo "ZEPHYR_SDK_INSTALL_DIR=/opt/toolchains/zephyr-sdk-$( cat SDK_VERSION )" >> $GITHUB_ENV
- name: Check common triggering files
uses: tj-actions/changed-files@v45
uses: tj-actions/changed-files@v44
id: check-common-files
with:
files: |
@ -102,12 +98,9 @@ jobs:
include/zephyr/arch/posix/
scripts/native_simulator/
tests/bsim/*
boards/nordic/nrf5*/*dt*
dts/*/nordic/
modules/mbedtls/**
- name: Check if Bluethooth files changed
uses: tj-actions/changed-files@v45
uses: tj-actions/changed-files@v44
id: check-bluetooth-files
with:
files: |
@ -116,7 +109,7 @@ jobs:
subsys/bluetooth/
- name: Check if Networking files changed
uses: tj-actions/changed-files@v45
uses: tj-actions/changed-files@v44
id: check-networking-files
with:
files: |
@ -129,7 +122,7 @@ jobs:
include/zephyr/net/ieee802154*
- name: Check if UART files changed
uses: tj-actions/changed-files@v45
uses: tj-actions/changed-files@v44
id: check-uart-files
with:
files: |
@ -139,10 +132,10 @@ jobs:
- name: Update BabbleSim to manifest revision
if: >
steps.check-bluetooth-files.outputs.any_modified == 'true'
|| steps.check-networking-files.outputs.any_modified == 'true'
|| steps.check-uart-files.outputs.any_modified == 'true'
|| steps.check-common-files.outputs.any_modified == 'true'
steps.check-bluetooth-files.outputs.any_changed == 'true'
|| steps.check-networking-files.outputs.any_changed == 'true'
|| steps.check-uart-files.outputs.any_changed == 'true'
|| steps.check-common-files.outputs.any_changed == 'true'
run: |
export BSIM_VERSION=$( west list bsim -f {revision} )
echo "Manifest points to bsim sha $BSIM_VERSION"
@ -153,17 +146,17 @@ jobs:
make everything -s -j 8
- name: Run Bluetooth Tests with BSIM
if: steps.check-bluetooth-files.outputs.any_modified == 'true' || steps.check-common-files.outputs.any_modified == 'true'
if: steps.check-bluetooth-files.outputs.any_changed == 'true' || steps.check-common-files.outputs.any_changed == 'true'
run: |
tests/bsim/ci.bt.sh
- name: Run Networking Tests with BSIM
if: steps.check-networking-files.outputs.any_modified == 'true' || steps.check-common-files.outputs.any_modified == 'true'
if: steps.check-networking-files.outputs.any_changed == 'true' || steps.check-common-files.outputs.any_changed == 'true'
run: |
tests/bsim/ci.net.sh
- name: Run UART Tests with BSIM
if: steps.check-uart-files.outputs.any_modified == 'true' || steps.check-common-files.outputs.any_modified == 'true'
if: steps.check-uart-files.outputs.any_changed == 'true' || steps.check-common-files.outputs.any_changed == 'true'
run: |
tests/bsim/ci.uart.sh

View file

@ -12,7 +12,7 @@ jobs:
runs-on:
group: zephyr-runner-v2-linux-x64-4xlarge
container:
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.14.20240823
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.13.20240601
options: '--entrypoint /bin/bash'
strategy:
fail-fast: false
@ -62,7 +62,6 @@ jobs:
git config --global user.name "Zephyr Bot"
rm -fr ".git/rebase-apply"
git rebase origin/${BASE_REF}
git clean -f -d
git log --pretty=oneline | head -n 10
west init -l . || true
west config --global update.narrow true

View file

@ -14,7 +14,7 @@ jobs:
runs-on:
group: zephyr-runner-v2-linux-x64-4xlarge
container:
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.14.20240823
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.13.20240601
options: '--entrypoint /bin/bash'
strategy:
fail-fast: false

View file

@ -41,7 +41,6 @@ jobs:
git config --global user.name "Github Actions"
git remote -v
git rebase origin/${BASE_REF}
git clean -f -d
source zephyr-env.sh
# debug
ls -la

View file

@ -38,7 +38,7 @@ jobs:
run: |
pip3 install setuptools
pip3 install wheel
pip3 install python-magic lxml junitparser gitlint pylint pykwalify yamllint clang-format unidiff sphinx-lint
pip3 install python-magic lxml junitparser gitlint pylint pykwalify yamllint
pip3 install west
- name: west setup
@ -52,7 +52,6 @@ jobs:
[[ "$(git rev-list --merges --count origin/${BASE_REF}..)" == "0" ]] || \
(echo "::error ::Merge commits not allowed, rebase instead";false)
git rebase origin/${BASE_REF}
git clean -f -d
# debug
git log --pretty=oneline | head -n 10
west init -l . || true
@ -95,23 +94,16 @@ jobs:
exit 1;
fi
warns=("ClangFormat")
files=($(./scripts/ci/check_compliance.py -l))
for file in "${files[@]}"; do
f="${file}.txt"
if [[ -s $f ]]; then
results=$(cat $f)
results="${results//'%'/'%25'}"
results="${results//$'\n'/'%0A'}"
results="${results//$'\r'/'%0D'}"
if [[ "${warns[@]}" =~ "${file}" ]]; then
echo "::warning file=${f}::$results"
else
echo "::error file=${f}::$results"
exit=1
fi
errors=$(cat $f)
errors="${errors//'%'/'%25'}"
errors="${errors//$'\n'/'%0A'}"
errors="${errors//$'\r'/'%0D'}"
echo "::error file=${f}::$errors"
exit=1
fi
done

View file

@ -26,8 +26,13 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12', '3.13']
python-version: ['3.10', '3.11', '3.12']
os: [ubuntu-22.04, macos-14, windows-2022]
exclude:
- os: macos-14
python-version: 3.6
- os: windows-2022
python-version: 3.6
steps:
- name: checkout
uses: actions/checkout@v4

View file

@ -17,7 +17,7 @@ env:
# The latest CMake available directly with apt is 3.18, but we need >=3.20
# so we fetch that through pip.
CMAKE_VERSION: 3.20.5
DOXYGEN_VERSION: 1.12.0
DOXYGEN_VERSION: 1.9.6
# Job count is set to 2 less than the vCPU count of 16 because the total available RAM is 32GiB
# and each sphinx-build process may use more than 2GiB of RAM.
JOB_COUNT: 14
@ -29,7 +29,7 @@ jobs:
if: >
github.repository_owner == 'zephyrproject-rtos'
outputs:
file_check: ${{ steps.check-doc-files.outputs.any_modified }}
file_check: ${{ steps.check-doc-files.outputs.any_changed }}
steps:
- name: checkout
uses: actions/checkout@v4
@ -37,7 +37,7 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- name: Check if Documentation related files changed
uses: tj-actions/changed-files@v45
uses: tj-actions/changed-files@v44
id: check-doc-files
with:
files: |
@ -62,13 +62,20 @@ jobs:
if: >
github.repository_owner == 'zephyrproject-rtos' &&
( needs.doc-file-check.outputs.file_check == 'true' || github.event_name != 'pull_request' )
runs-on: ubuntu-22.04
runs-on:
group: zephyr-runner-v2-linux-x64-4xlarge
timeout-minutes: 90
concurrency:
group: doc-build-html-${{ github.ref }}
cancel-in-progress: true
steps:
- name: Print cloud service information
run: |
echo "ZEPHYR_RUNNER_CLOUD_PROVIDER = ${ZEPHYR_RUNNER_CLOUD_PROVIDER}"
echo "ZEPHYR_RUNNER_CLOUD_NODE = ${ZEPHYR_RUNNER_CLOUD_NODE}"
echo "ZEPHYR_RUNNER_CLOUD_POD = ${ZEPHYR_RUNNER_CLOUD_POD}"
- name: install-pkgs
run: |
sudo apt-get update
@ -94,7 +101,6 @@ jobs:
git config --global user.email "actions@zephyrproject.org"
git config --global user.name "Github Actions"
git rebase origin/${BASE_REF}
git clean -f -d
git log --graph --oneline HEAD...${PR_HEAD}
- name: cache-pip
@ -186,7 +192,8 @@ jobs:
if: |
github.event_name != 'pull_request' &&
github.repository_owner == 'zephyrproject-rtos'
runs-on: ubuntu-22.04
runs-on:
group: zephyr-runner-v2-linux-x64-4xlarge
container: texlive/texlive:latest
timeout-minutes: 120
concurrency:
@ -198,6 +205,12 @@ jobs:
run: |
git config --global --add safe.directory ${GITHUB_WORKSPACE}
- name: Print cloud service information
run: |
echo "ZEPHYR_RUNNER_CLOUD_PROVIDER = ${ZEPHYR_RUNNER_CLOUD_PROVIDER}"
echo "ZEPHYR_RUNNER_CLOUD_NODE = ${ZEPHYR_RUNNER_CLOUD_NODE}"
echo "ZEPHYR_RUNNER_CLOUD_POD = ${ZEPHYR_RUNNER_CLOUD_POD}"
- name: checkout
uses: actions/checkout@v4

View file

@ -21,20 +21,16 @@ jobs:
steps:
- name: Download artifacts
id: download-artifacts
uses: dawidd6/action-download-artifact@v6
uses: dawidd6/action-download-artifact@v3
with:
workflow: doc-build.yml
run_id: ${{ github.event.workflow_run.id }}
if_no_artifact_found: ignore
- name: Load PR number
if: steps.download-artifacts.outputs.found_artifact == 'true'
run: |
echo "PR_NUM=$(<pr_num/pr_num)" >> $GITHUB_ENV
- name: Check PR number
if: steps.download-artifacts.outputs.found_artifact == 'true'
id: check-pr
uses: carpentries/actions/check-valid-pr@v0.14.0
with:
@ -42,15 +38,12 @@ jobs:
sha: ${{ github.event.workflow_run.head_sha }}
- name: Validate PR number
if: |
steps.download-artifacts.outputs.found_artifact == 'true' &&
steps.check-pr.outputs.VALID != 'true'
if: steps.check-pr.outputs.VALID != 'true'
run: |
echo "ABORT: PR number validation failed!"
exit 1
- name: Uncompress HTML docs
if: steps.download-artifacts.outputs.found_artifact == 'true'
run: |
tar xf html-output/html-output.tar.xz -C html-output
if [ -f api-coverage/api-coverage.tar.xz ]; then
@ -58,7 +51,6 @@ jobs:
fi
- name: Configure AWS Credentials
if: steps.download-artifacts.outputs.found_artifact == 'true'
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ vars.AWS_BUILDS_ZEPHYR_PR_ACCESS_KEY_ID }}
@ -66,7 +58,6 @@ jobs:
aws-region: us-east-1
- name: Upload to AWS S3
if: steps.download-artifacts.outputs.found_artifact == 'true'
env:
HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
run: |

View file

@ -24,7 +24,7 @@ jobs:
steps:
- name: Download artifacts
uses: dawidd6/action-download-artifact@v6
uses: dawidd6/action-download-artifact@v3
with:
workflow: doc-build.yml
run_id: ${{ github.event.workflow_run.id }}

View file

@ -26,7 +26,7 @@ jobs:
group: zephyr-runner-v2-linux-x64-4xlarge
if: github.repository_owner == 'zephyrproject-rtos'
container:
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.14.20240823
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.13.20240601
options: '--entrypoint /bin/bash'
strategy:
fail-fast: false
@ -93,32 +93,3 @@ jobs:
. .venv/bin/activate
pip3 install awscli
aws s3 sync --quiet footprint_data/ s3://testing.zephyrproject.org/footprint_data/
- name: Transform Footprint data to Twister JSON reports
run: |
shopt -s globstar
export ZEPHYR_BASE=${PWD}
python3 ./scripts/footprint/pack_as_twister.py -vvv \
--plan ./scripts/footprint/plan.txt \
--test-name='name.feature' \
./footprint_data/*/footprints/*/*/
- name: Upload to ElasticSearch
env:
ELASTICSEARCH_KEY: ${{ secrets.ELASTICSEARCH_KEY }}
ELASTICSEARCH_SERVER: "https://elasticsearch.zephyrproject.io:443"
ELASTICSEARCH_INDEX: ${{ vars.FOOTPRINT_TRACKING_INDEX }}
run: |
pip3 install -U elasticsearch
run_date=`date --iso-8601=minutes`
python3 ./scripts/ci/upload_test_results_es.py -r ${run_date} \
--flatten footprint \
--flatten-list-names "{'children':'name'}" \
--transform "{ 'footprint_name': '^(?P<footprint_area>([^\/]+\/){0,2})(?P<footprint_path>([^\/]*\/)*)(?P<footprint_symbol>[^\/]*)$' }" \
--run-id "${{ github.run_id }}" \
--run-attempt "${{ github.run_attempt }}" \
--run-workflow "footprint-tracking:${{ github.event_name }}" \
--run-branch "${{ github.ref_name }}" \
-i ${ELASTICSEARCH_INDEX} \
./footprint_data/**/twister_footprint.json
#

View file

@ -26,7 +26,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-22.04, ubuntu-24.04, macos-13, macos-14, windows-2022]
os: [ubuntu-22.04, macos-13, macos-14, windows-2022]
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
@ -46,7 +46,6 @@ jobs:
git config --global user.email "actions@zephyrproject.org"
git config --global user.name "Github Actions"
git rebase origin/${BASE_REF}
git clean -f -d
git log --graph --oneline HEAD...${PR_HEAD}
- name: Set up Python

View file

@ -26,7 +26,7 @@ jobs:
west init -l . || true
- name: Manifest
uses: zephyrproject-rtos/action-manifest@v1.3.1
uses: zephyrproject-rtos/action-manifest@v1.3.0
with:
github-token: ${{ secrets.ZB_GITHUB_TOKEN }}
manifest-path: 'west.yml'

View file

@ -25,7 +25,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12', '3.13']
python-version: ['3.10', '3.11', '3.12']
os: [ubuntu-22.04]
steps:
- name: checkout

View file

@ -21,7 +21,7 @@ jobs:
echo "TRIMMED_VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_OUTPUT
- name: REUSE Compliance Check
uses: fsfe/reuse-action@v4
uses: fsfe/reuse-action@v1
with:
args: spdx -o zephyr-${{ steps.get_version.outputs.VERSION }}.spdx

View file

@ -1,61 +0,0 @@
# This workflow uses actions that are not certified by GitHub. They are provided
# by a third-party and are governed by separate terms of service, privacy
# policy, and support documentation.
name: Scorecards supply-chain security
on:
# For Branch-Protection check. Only the default branch is supported. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
branch_protection_rule:
# To guarantee Maintained check is occasionally updated. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
schedule:
- cron: '43 7 * * 6'
push:
branches:
- main
permissions: read-all
jobs:
analysis:
name: Scorecard analysis
runs-on: ubuntu-latest
permissions:
# Needed for Code scanning upload
security-events: write
# Needed for GitHub OIDC token if publish_results is true
id-token: write
steps:
- name: "Checkout code"
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0
with:
results_file: results.sarif
results_format: sarif
# Publish results to OpenSSF REST API for easy access by consumers.
# - Allows the repository to include the Scorecard badge.
# - See https://github.com/ossf/scorecard-action#publishing-results.
publish_results: true
# Upload the results as artifacts (optional). Commenting out will disable
# uploads of run results in SARIF format to the repository Actions tab.
# https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts
- name: "Upload artifact"
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: SARIF file
path: results.sarif
retention-days: 5
# Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15
with:
sarif_file: results.sarif

View file

@ -25,7 +25,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12', '3.13']
python-version: ['3.10', '3.11', '3.12']
os: [ubuntu-20.04]
steps:
- name: checkout
@ -43,7 +43,6 @@ jobs:
git config --global user.email "actions@zephyrproject.org"
git config --global user.name "Github Actions"
git rebase origin/${BASE_REF}
git clean -f -d
git log --graph --oneline HEAD...${PR_HEAD}
- name: Set up Python ${{ matrix.python-version }}

View file

@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-22.04
if: github.repository == 'zephyrproject-rtos/zephyr'
steps:
- uses: actions/stale@v9
- uses: actions/stale@v8
with:
stale-pr-message: 'This pull request has been marked as stale because it has been open (more
than) 60 days with no activity. Remove the stale label or add a comment saying that you

View file

@ -25,7 +25,7 @@ jobs:
runs-on:
group: zephyr-runner-v2-linux-x64-4xlarge
container:
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.14.20240823
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.13.20240601
options: '--entrypoint /bin/bash'
outputs:
subset: ${{ steps.output-services.outputs.subset }}
@ -77,7 +77,6 @@ jobs:
git config --global user.name "Zephyr Bot"
rm -fr ".git/rebase-apply"
git rebase origin/${BASE_REF}
git clean -f -d
git log --pretty=oneline | head -n 10
west init -l . || true
west config manifest.group-filter -- +ci,+optional
@ -130,7 +129,7 @@ jobs:
needs: twister-build-prep
if: needs.twister-build-prep.outputs.size != 0
container:
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.14.20240823
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.26.13.20240601
options: '--entrypoint /bin/bash'
strategy:
fail-fast: false
@ -186,11 +185,9 @@ jobs:
git config --global user.name "Zephyr Builder"
rm -fr ".git/rebase-apply"
git rebase origin/${BASE_REF}
git clean -f -d
git log --pretty=oneline | head -n 10
fi
echo "$HOME/.local/bin" >> $GITHUB_PATH
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
west init -l . || true
west config manifest.group-filter -- +ci,+optional
@ -204,8 +201,6 @@ jobs:
run: |
cmake --version
gcc --version
cargo --version
rustup target list --installed
ls -la
echo "github.ref: ${{ github.ref }}"
echo "github.base_ref: ${{ github.base_ref }}"
@ -315,7 +310,7 @@ jobs:
if: success() || failure()
steps:
# Needed for elasticearch and upload script
# Needed for opensearch and upload script
- if: github.event_name == 'push' || github.event_name == 'schedule'
name: Checkout
uses: actions/checkout@v4
@ -329,7 +324,7 @@ jobs:
path: artifacts
- if: github.event_name == 'push' || github.event_name == 'schedule'
name: Upload to elasticsearch
name: Upload to opensearch
run: |
pip3 install elasticsearch
# set run date on upload to get consistent and unified data across the matrix.

View file

@ -32,7 +32,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12', '3.13']
python-version: ['3.10', '3.11', '3.12']
os: [ubuntu-22.04]
steps:
- name: checkout

View file

@ -21,7 +21,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12', '3.13']
python-version: ['3.10', '3.11', '3.12']
os: [ubuntu-22.04]
container:
image: ghcr.io/zephyrproject-rtos/ci:v0.26.13
@ -43,8 +43,6 @@ jobs:
echo "$HOME/.local/bin" >> $GITHUB_PATH
west init -l . || true
# we do not depend on any hals, tools or bootloader, save some time and space...
west config manifest.group-filter -- -hal,-tools,-bootloader
west config --global update.narrow true
west update --path-cache /github/cache/zephyrproject 2>&1 1> west.update.log || west update --path-cache /github/cache/zephyrproject 2>&1 1> west.update.log || ( rm -rf ../modules ../bootloader ../tools && west update --path-cache /github/cache/zephyrproject)
west forall -c 'git reset --hard HEAD'

View file

@ -29,8 +29,13 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12', '3.13']
python-version: ['3.10', '3.11', '3.12']
os: [ubuntu-22.04, macos-14, windows-2022]
exclude:
- os: macos-14
python-version: 3.6
- os: windows-2022
python-version: 3.6
steps:
- name: checkout
uses: actions/checkout@v4

14
.gitignore vendored
View file

@ -59,17 +59,6 @@ venv
.clangd
new.info
# Cargo drops lock files in projects to capture resolved dependencies.
# We don't want to record these.
Cargo.lock
# Cargo encourages a .cargo/config.toml file to symlink to a generated file. Don't save these.
.cargo/
# Normal west builds will place the Rust target directory under the build directory. However,
# sometimes IDEs and such will litter these target directories as well.
target/
# CI output
compliance.xml
_error.types
@ -87,7 +76,6 @@ tags
BinaryFiles.txt
BoardYml.txt
Checkpatch.txt
ClangFormat.txt
DevicetreeBindings.txt
GitDiffCheck.txt
Gitlint.txt
@ -102,6 +90,4 @@ MaintainersFormat.txt
ModulesMaintainers.txt
Nits.txt
Pylint.txt
SphinxLint.txt
TextEncoding.txt
YAMLLint.txt

View file

@ -61,7 +61,6 @@ Lixin Guo <lixinx.guo@intel.com>
Łukasz Mazur <lukasz.mazur@hidglobal.com>
Manuel Argüelles <manuel.arguelles@nxp.com>
Manuel Argüelles <manuel.arguelles@nxp.com> <manuel.arguelles@coredumplabs.com>
Manuel Argüelles <manuel.arguelles@nxp.com> <marguelles.dev@gmail.com>
Marc Herbert <marc.herbert@intel.com> <46978960+marc-hb@users.noreply.github.com>
Marin Jurjević <marin.jurjevic@hotmail.com>
Mariusz Ryndzionek <mariusz.ryndzionek@firmwave.com>

View file

@ -152,7 +152,6 @@ zephyr_compile_options($<$<COMPILE_LANGUAGE:CXX>:$<TARGET_PROPERTY:compiler-cpp,
# Extra warnings options for twister run
if (CONFIG_COMPILER_WARNINGS_AS_ERRORS)
zephyr_compile_options($<$<COMPILE_LANGUAGE:C>:$<TARGET_PROPERTY:compiler,warnings_as_errors>>)
zephyr_compile_options($<$<COMPILE_LANGUAGE:CXX>:$<TARGET_PROPERTY:compiler,warnings_as_errors>>)
zephyr_compile_options($<$<COMPILE_LANGUAGE:ASM>:$<TARGET_PROPERTY:asm,warnings_as_errors>>)
zephyr_link_libraries($<TARGET_PROPERTY:linker,warnings_as_errors>)
endif()
@ -361,21 +360,8 @@ zephyr_compile_options(
$<$<COMPILE_LANGUAGE:ASM>:-D_ASMLANGUAGE>
)
find_package(Deprecated COMPONENTS toolchain_ld_base)
if(DEFINED TOOLCHAIN_LD_FLAGS)
zephyr_ld_options(${TOOLCHAIN_LD_FLAGS})
endif()
zephyr_link_libraries(PROPERTY base)
zephyr_link_libraries_ifndef(CONFIG_LINKER_USE_RELAX PROPERTY no_relax)
zephyr_link_libraries_ifdef(CONFIG_LINKER_USE_RELAX PROPERTY relax)
# Sort the common symbols and each input section by alignment
# in descending order to minimize padding between these symbols.
zephyr_link_libraries_ifdef(CONFIG_LINKER_SORT_BY_ALIGNMENT PROPERTY sort_alignment)
# @Intent: Set fundamental linker specific flags
toolchain_ld_base()
toolchain_ld_force_undefined_symbols(
_OffsetAbsSyms
@ -383,37 +369,13 @@ toolchain_ld_force_undefined_symbols(
)
if(NOT CONFIG_NATIVE_BUILD)
find_package(Deprecated COMPONENTS toolchain_ld_baremetal)
zephyr_link_libraries(PROPERTY baremetal)
# Note that some architectures will skip this flag if set to error, even
# though the compiler flag check passes (e.g. ARC and Xtensa). So warning
# should be the default for now.
#
# Skip this for native application as Zephyr only provides
# additions to the host toolchain linker script. The relocation
# sections (.rel*) requires us to override those provided
# by host toolchain. As we can't account for all possible
# combination of compiler and linker on all machines used
# for development, it is better to turn this off.
#
# CONFIG_LINKER_ORPHAN_SECTION_PLACE is to place the orphan sections
# without any warnings or errors, which is the default behavior.
# So there is no need to explicitly set a linker flag.
if(CONFIG_LINKER_ORPHAN_SECTION_WARN)
zephyr_link_libraries(PROPERTY orphan_warning)
elseif(CONFIG_LINKER_ORPHAN_SECTION_ERROR)
zephyr_link_libraries(PROPERTY orphan_error)
endif()
# @Intent: Set linker specific flags for bare metal target
toolchain_ld_baremetal()
endif()
if(CONFIG_CPP)
if(NOT CONFIG_MINIMAL_LIBCPP AND NOT CONFIG_NATIVE_LIBRARY)
find_package(Deprecated COMPONENTS toolchain_ld_cpp)
endif()
zephyr_link_libraries(PROPERTY cpp_base)
if(CONFIG_CPP AND NOT CONFIG_MINIMAL_LIBCPP AND NOT CONFIG_NATIVE_LIBRARY)
# @Intent: Set linker specific flags for C++
toolchain_ld_cpp()
endif()
# @Intent: Add the basic toolchain warning flags
@ -1651,14 +1613,11 @@ endif()
if(CONFIG_BUILD_OUTPUT_ADJUST_LMA)
math(EXPR adjustment "${CONFIG_BUILD_OUTPUT_ADJUST_LMA}" OUTPUT_FORMAT DECIMAL)
set(args_adjustment ${CONFIG_BUILD_OUTPUT_ADJUST_LMA_SECTIONS})
list(TRANSFORM args_adjustment PREPEND $<TARGET_PROPERTY:bintools,elfconvert_flag_lma_adjust>)
list(TRANSFORM args_adjustment APPEND +${adjustment})
list(APPEND
post_build_commands
COMMAND $<TARGET_PROPERTY:bintools,elfconvert_command>
$<TARGET_PROPERTY:bintools,elfconvert_flag_final>
${args_adjustment}
$<TARGET_PROPERTY:bintools,elfconvert_flag_lma_adjust>${adjustment}
$<TARGET_PROPERTY:bintools,elfconvert_flag_infile>${KERNEL_ELF_NAME}
$<TARGET_PROPERTY:bintools,elfconvert_flag_outfile>${KERNEL_ELF_NAME}
)
@ -2174,15 +2133,12 @@ endif()
set(llext_edk_file ${PROJECT_BINARY_DIR}/${CONFIG_LLEXT_EDK_NAME}.tar.xz)
# TODO maybe generate flags for C CXX ASM
zephyr_get_compile_definitions_for_lang(C zephyr_defs)
zephyr_get_compile_options_for_lang(C zephyr_flags)
# Filter out non LLEXT and LLEXT_EDK flags - and add required ones
llext_filter_zephyr_flags(LLEXT_REMOVE_FLAGS ${zephyr_flags} llext_filt_flags)
llext_filter_zephyr_flags(LLEXT_EDK_REMOVE_FLAGS ${llext_filt_flags} llext_filt_flags)
llext_filter_zephyr_flags(LLEXT_REMOVE_FLAGS ${zephyr_flags} llext_edk_cflags)
llext_filter_zephyr_flags(LLEXT_EDK_REMOVE_FLAGS ${llext_edk_cflags} llext_edk_cflags)
set(llext_edk_cflags ${zephyr_defs} -DLL_EXTENSION_BUILD)
list(APPEND llext_edk_cflags ${llext_filt_flags})
list(APPEND llext_edk_cflags ${LLEXT_APPEND_FLAGS})
list(APPEND llext_edk_cflags ${LLEXT_EDK_APPEND_FLAGS})
@ -2206,7 +2162,7 @@ add_custom_command(
-DAPPLICATION_SOURCE_DIR=${APPLICATION_SOURCE_DIR}
-DINTERFACE_INCLUDE_DIRECTORIES="$<TARGET_PROPERTY:zephyr_interface,INTERFACE_INCLUDE_DIRECTORIES>"
-Dllext_edk_file=${llext_edk_file}
-Dllext_edk_cflags="${llext_edk_cflags}"
-Dllext_cflags="${llext_edk_cflags}"
-Dllext_edk_name=${CONFIG_LLEXT_EDK_NAME}
-DWEST_TOPDIR=${WEST_TOPDIR}
-DZEPHYR_BASE=${ZEPHYR_BASE}
@ -2233,12 +2189,3 @@ add_subdirectory_ifdef(
CONFIG_MAKEFILE_EXPORTS
cmake/makefile_exports
)
toolchain_linker_finalize()
yaml_context(EXISTS NAME build_info result)
if(result)
build_info(zephyr version VALUE ${PROJECT_VERSION_STR})
build_info(zephyr zephyr-base VALUE ${ZEPHYR_BASE})
yaml_save(NAME build_info)
endif()

View file

@ -188,7 +188,6 @@
/drivers/dai/intel/ssp/ @kv2019i @marcinszkudlinski @abonislawski
/drivers/dai/intel/dmic/ @marcinszkudlinski @abonislawski
/drivers/dai/intel/alh/ @abonislawski
/drivers/dma/dma_dw_axi.c @pbalsundar
/drivers/dma/*dw* @tbursztyka
/drivers/dma/*dw_common* @abonislawski
/drivers/dma/*sam0* @Sizurka
@ -210,8 +209,6 @@
/drivers/ethernet/*adin2111* @GeorgeCGV
/drivers/ethernet/*oa_tc6* @lmajewski
/drivers/ethernet/*lan865x* @lmajewski
/drivers/ethernet/dwc_xgmac @Smale-12048867
/drivers/ethernet/dwc_xgmac/dwc_xgmac @Smale-12048867
/drivers/ethernet/phy/ @rlubos @tbursztyka @arvinf @jukkar
/drivers/ethernet/phy/*adin2111* @GeorgeCGV
/drivers/mdio/*adin2111* @GeorgeCGV
@ -370,6 +367,7 @@
/drivers/timer/*rcar_cmt* @aaillet
/drivers/timer/*esp32_sys* @uLipe
/drivers/timer/*sam0_rtc* @bendiscz
/drivers/timer/*arcv2* @ruuddw
/drivers/timer/*xtensa* @dcpleung
/drivers/timer/*rv32m1_lptmr* @mbolivar
/drivers/timer/*nrf_rtc* @anangl
@ -397,6 +395,7 @@
/drivers/wifi/eswifi/ @loicpoulain @nandojve
/drivers/wifi/winc1500/ @kludentwo
/drivers/virtualization/ @tbursztyka
/dts/arc/ @abrodkin @ruuddw @iriszzw @evgeniy-paltsev
/dts/arm/acsip/ @NorthernDean
/dts/arm/aspeed/ @aspeeddylan
/dts/arm/atmel/ @galak @nandojve

View file

@ -260,20 +260,6 @@ config LINKER_USE_PINNED_SECTION
Requires that pinned sections exist in the architecture, SoC,
board or custom linker script.
config LINKER_USE_ONDEMAND_SECTION
bool "Use Evictable Linker Section"
depends on DEMAND_MAPPING
depends on !LINKER_USE_PINNED_SECTION
depends on !ARCH_MAPS_ALL_RAM
help
If enabled, the symbols which may be evicted from memory
will be put into a linker section reserved for on-demand symbols.
During boot, the corresponding memory will be mapped as paged out.
This is conceptually the opposite of CONFIG_LINKER_USE_PINNED_SECTION.
Requires that on-demand sections exist in the architecture, SoC,
board or custom linker script.
config LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
bool "Generic sections are present at boot" if DEMAND_PAGING && LINKER_USE_PINNED_SECTION
default y
@ -289,7 +275,7 @@ config LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
config LINKER_LAST_SECTION_ID
bool "Last section identifier"
default y if !ARM64
default y
depends on ARM || ARM64 || RISCV
help
If enabled, the last section will contain an identifier.
@ -836,23 +822,6 @@ config BUILD_OUTPUT_ADJUST_LMA
default "$(dt_chosen_reg_addr_hex,$(DT_CHOSEN_IMAGE_M4))-\
$(dt_chosen_reg_addr_hex,$(DT_CHOSEN_Z_FLASH))"
config BUILD_OUTPUT_ADJUST_LMA_SECTIONS
def_string "*"
depends on BUILD_OUTPUT_ADJUST_LMA!=""
help
This determines the output sections to which the above LMA adjustment
will be applied.
The value can be the name of a section in the final ELF, like "text".
It can also be a pattern with wildcards, such as "*bss", which could
match more than one section name. Multiple such patterns can be given
as a ";"-separated list. It's possible to supply a 'negative' pattern
starting with "!", to exclude sections matched by a preceding pattern.
By default, all sections will have their LMA adjusted. The following
example excludes one section produced by the code relocation feature:
config BUILD_OUTPUT_ADJUST_LMA_SECTIONS
default "*;!.extflash_text_reloc"
config BUILD_OUTPUT_INFO_HEADER
bool "Create a image information header"
help
@ -963,8 +932,6 @@ config DEPRECATED
help
Symbol that must be selected by a feature or module if it is
considered to be deprecated.
When adding this to an option, remember to follow the instructions in
https://docs.zephyrproject.org/latest/develop/api/api_lifecycle.html#deprecated
config WARN_DEPRECATED
bool

File diff suppressed because it is too large Load diff

View file

@ -10,9 +10,12 @@
</p>
</a>
<a href="https://bestpractices.coreinfrastructure.org/projects/74"><img src="https://bestpractices.coreinfrastructure.org/projects/74/badge"></a>
<a href="https://scorecard.dev/viewer/?uri=github.com/zephyrproject-rtos/zephyr"><img src="https://api.securityscorecards.dev/projects/github.com/zephyrproject-rtos/zephyr/badge"></a>
<a href="https://github.com/zephyrproject-rtos/zephyr/actions/workflows/twister.yaml?query=branch%3Amain"><img src="https://github.com/zephyrproject-rtos/zephyr/actions/workflows/twister.yaml/badge.svg?event=push"></a>
<a href="https://bestpractices.coreinfrastructure.org/projects/74"><img
src="https://bestpractices.coreinfrastructure.org/projects/74/badge"></a>
<a
href="https://github.com/zephyrproject-rtos/zephyr/actions/workflows/twister.yaml?query=branch%3Amain">
<img
src="https://github.com/zephyrproject-rtos/zephyr/actions/workflows/twister.yaml/badge.svg?event=push"></a>
The Zephyr Project is a scalable real-time operating system (RTOS) supporting

View file

@ -53,8 +53,6 @@ config ARM64
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
select BARRIER_OPERATIONS_ARCH
select ARCH_HAS_DIRECTED_IPIS
select ARCH_HAS_DEMAND_PAGING
select ARCH_HAS_DEMAND_MAPPING
help
ARM64 (AArch64) architecture
@ -83,7 +81,6 @@ config X86
select ARCH_IS_SET
select ATOMIC_OPERATIONS_BUILTIN
select ARCH_SUPPORTS_COREDUMP
select ARCH_SUPPORTS_COREDUMP_PRIV_STACKS
select ARCH_SUPPORTS_ROM_START if !X86_64
select CPU_HAS_MMU
select ARCH_MEM_DOMAIN_DATA if USERSPACE && !X86_COMMON_PAGE_TABLE
@ -92,14 +89,12 @@ config X86
select ARCH_HAS_TIMING_FUNCTIONS
select ARCH_HAS_THREAD_LOCAL_STORAGE
select ARCH_HAS_DEMAND_PAGING if !X86_64
select ARCH_HAS_DEMAND_MAPPING if ARCH_HAS_DEMAND_PAGING
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
select NEED_LIBC_MEM_PARTITION if USERSPACE && TIMING_FUNCTIONS \
&& !BOARD_HAS_TIMING_FUNCTIONS \
&& !SOC_HAS_TIMING_FUNCTIONS
select ARCH_HAS_STACK_CANARIES_TLS
select ARCH_SUPPORTS_MEM_MAPPED_STACKS if X86_MMU && !DEMAND_PAGING
select ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET if USERSPACE
help
x86 architecture
@ -116,18 +111,17 @@ config RISCV
bool
select ARCH_IS_SET
select ARCH_SUPPORTS_COREDUMP
select ARCH_SUPPORTS_COREDUMP_PRIV_STACKS
select ARCH_SUPPORTS_ROM_START if !SOC_FAMILY_ESPRESSIF_ESP32
select ARCH_SUPPORTS_EMPTY_IRQ_SPURIOUS
select ARCH_HAS_CODE_DATA_RELOCATION
select ARCH_HAS_THREAD_LOCAL_STORAGE
select ARCH_HAS_STACKWALK
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
select USE_SWITCH_SUPPORTED
select USE_SWITCH
select SCHED_IPI_SUPPORTED if SMP
select ARCH_HAS_DIRECTED_IPIS
select BARRIER_OPERATIONS_BUILTIN
select ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET if USERSPACE
imply XIP
help
RISCV architecture
@ -142,7 +136,6 @@ config XTENSA
select ARCH_MEM_DOMAIN_DATA if USERSPACE
select ARCH_HAS_DIRECTED_IPIS
select THREAD_STACK_INFO
select ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET if USERSPACE
help
Xtensa architecture
@ -153,7 +146,6 @@ config ARCH_POSIX
select ARCH_HAS_CUSTOM_SWAP_TO_MAIN
select ARCH_HAS_CUSTOM_BUSY_WAIT
select ARCH_HAS_THREAD_ABORT
select ARCH_HAS_THREAD_NAME_HOOK
select NATIVE_BUILD
select HAS_COVERAGE_SUPPORT
select BARRIER_OPERATIONS_BUILTIN
@ -189,9 +181,8 @@ config BIG_ENDIAN
Little-endian architecture is the default and should leave this option
unselected. This option is selected by arch/$ARCH/Kconfig,
soc/**/Kconfig, or boards/**/Kconfig and the user should generally avoid
modifying it. The option is used to select linker script OUTPUT_FORMAT,
the toolchain flags (TOOLCHAIN_C_FLAGS, TOOLCHAIN_LD_FLAGS), and command
line option for gen_isr_tables.py.
modifying it. The option is used to select linker script OUTPUT_FORMAT
and command line option for gen_isr_tables.py.
config LITTLE_ENDIAN
# Hidden Kconfig option representing the default little-endian architecture
@ -423,17 +414,10 @@ config FRAME_POINTER
Select Y here to gain precise stack traces at the expense of slightly
increased size and decreased speed.
config ARCH_STACKWALK
bool "Compile the stack walking function"
default y
depends on ARCH_HAS_STACKWALK
help
Select Y here to compile the `arch_stack_walk()` function
config ARCH_STACKWALK_MAX_FRAMES
int "Max depth for stack walk function"
default 8
depends on ARCH_STACKWALK
depends on ARCH_HAS_STACKWALK
help
Depending on implementation, this can place a hard limit on the depths of the stack
for the stack walk function to examine.
@ -615,14 +599,6 @@ config SIMPLIFIED_EXCEPTION_CODES
down to the generic K_ERR_CPU_EXCEPTION, which makes testing code
much more portable.
config EMPTY_IRQ_SPURIOUS
bool "Create empty spurious interrupt handler"
depends on ARCH_SUPPORTS_EMPTY_IRQ_SPURIOUS
help
This option changes body of spurious interrupt handler. When enabled,
handler contains only an infinite while loop, when disabled, handler
contains the whole Zephyr fault handling procedure.
endmenu # Interrupt configuration
config INIT_ARCH_HW_AT_BOOT
@ -682,24 +658,23 @@ config ARCH_SUPPORTS_COREDUMP
config ARCH_SUPPORTS_COREDUMP_THREADS
bool
config ARCH_SUPPORTS_COREDUMP_PRIV_STACKS
bool
config ARCH_SUPPORTS_ARCH_HW_INIT
bool
config ARCH_SUPPORTS_ROM_START
bool
config ARCH_SUPPORTS_EMPTY_IRQ_SPURIOUS
bool
config ARCH_HAS_EXTRA_EXCEPTION_INFO
bool
config ARCH_HAS_GDBSTUB
bool
config ARCH_HAS_STACKWALK
bool
help
This is selected when the architecture implemented the arch_stack_walk() API.
config ARCH_HAS_COHERENCE
bool
help
@ -723,11 +698,6 @@ config ARCH_SUPPORTS_MEM_MAPPED_STACKS
help
Select when the architecture supports memory mapped stacks.
config ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET
bool
help
Select when the architecture implements arch_thread_priv_stack_space_get().
#
# Other architecture related options
#
@ -793,13 +763,6 @@ config ARCH_HAS_DEMAND_PAGING
This hidden configuration should be selected by the architecture if
demand paging is supported.
config ARCH_HAS_DEMAND_MAPPING
bool
help
This hidden configuration should be selected by the architecture if
demand paging is supported and arch_mem_map() supports
K_MEM_MAP_UNPAGED.
config ARCH_HAS_RESERVED_PAGE_FRAMES
bool
help
@ -825,7 +788,7 @@ config CPU_CACHE_INCOHERENT
help
This hidden configuration should be selected when the CPU has
incoherent cache. This applies to intra-CPU multiprocessing
incoherence and makes only sense when MP_MAX_NUM_CPUS > 1.
incoherence and makes only sense when MP_NUM_CPUS > 1.
config CPU_HAS_ICACHE
bool
@ -1031,7 +994,7 @@ config CACHE_DOUBLEMAP
point to the same cached/uncached memory at different locations.
This applies to intra-CPU multiprocessing incoherence and makes only
sense when MP_MAX_NUM_CPUS > 1.
sense when MP_NUM_CPUS > 1.
config CACHE_MANAGEMENT
bool "Cache management features"
@ -1115,28 +1078,9 @@ config TOOLCHAIN_HAS_BUILTIN_FFS
help
Hidden option to signal that toolchain has __builtin_ffs*().
config ARCH_HAS_CUSTOM_CPU_IDLE
bool
config ARCH_CPU_IDLE_CUSTOM
bool "Custom arch_cpu_idle implementation"
default n
help
This options allows applications to override the default arch idle implementation with
a custom one.
config ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
bool
help
This options allows applications to override the default arch idle implementation with
a custom one.
config ARCH_HAS_CUSTOM_SWAP_TO_MAIN
bool
help
It's possible that an architecture port cannot use _Swap() to swap to
the _main() thread, but instead must do something custom. It must
enable this option in that case.
config ARCH_HAS_CUSTOM_BUSY_WAIT
bool
help
It's possible that an architecture port cannot or does not want to use
the provided k_busy_wait(), but instead must do something custom. It must
enable this option in that case.

View file

@ -343,15 +343,6 @@ config ARC_NORMAL_FIRMWARE
resources of the ARC processors, and, therefore, it shall avoid
accessing them.
config ARC_VPX_COOPERATIVE_SHARING
bool "Cooperative sharing of ARC VPX vector registers"
select SCHED_CPU_MASK if MP_MAX_NUM_CPUS > 1
help
This option enables the cooperative sharing of the ARC VPX vector
registers. Threads that want to use those registers must successfully
call arc_vpx_lock() before using them, and call arc_vpx_unlock()
when done using them.
source "arch/arc/core/dsp/Kconfig"
menu "ARC MPU Options"
@ -383,9 +374,7 @@ config ARC_EXCEPTION_STACK_SIZE
endmenu
config ARC_EARLY_SOC_INIT
bool "Make early stage SoC-specific initialization [DEPRECATED]"
select SOC_RESET_HOOK
select DEPRECATED
bool "Make early stage SoC-specific initialization"
help
Call SoC per-core setup code on early stage initialization
(before C runtime initialization). Setup code is called in form of

View file

@ -227,8 +227,4 @@ static int init_dcache(void)
return 0;
}
void arch_cache_init(void)
{
init_dcache();
}
SYS_INIT(init_dcache, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);

View file

@ -26,7 +26,6 @@ SECTION_VAR(BSS, z_arc_cpu_sleep_mode)
.align 4
.word 0
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
/*
* @brief Put the CPU in low-power mode
*
@ -49,9 +48,7 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
sleep r1
j_s [blink]
nop
#endif
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
/*
* @brief Put the CPU in low-power mode, entered with IRQs locked
*
@ -59,7 +56,6 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
*
* void arch_cpu_atomic_idle(unsigned int key)
*/
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING
@ -74,4 +70,3 @@ SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
sleep r1
j_s.d [blink]
seti r0
#endif

View file

@ -54,7 +54,7 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
}
/* need to be executed on every core in the system */
void arch_irq_offload_init(void)
int arc_irq_offload_init(void)
{
IRQ_CONNECT(IRQ_OFFLOAD_LINE, IRQ_OFFLOAD_PRIO, arc_irq_offload_handler, NULL, 0);
@ -64,4 +64,8 @@ void arch_irq_offload_init(void)
* with generic irq_enable() but via z_arc_v2_irq_unit_int_enable().
*/
z_arc_v2_irq_unit_int_enable(IRQ_OFFLOAD_LINE);
return 0;
}
SYS_INIT(arc_irq_offload_init, POST_KERNEL, 0);

View file

@ -238,7 +238,7 @@ int arc_core_mpu_buffer_validate(const void *addr, size_t size, int write)
* This function provides the default configuration mechanism for the Memory
* Protection Unit (MPU).
*/
void arc_mpu_init(void)
static int arc_mpu_init(void)
{
uint32_t num_regions = get_num_regions();
@ -246,6 +246,7 @@ void arc_mpu_init(void)
if (mpu_config.num_regions > num_regions) {
__ASSERT(0, "Request to configure: %u regions (supported: %u)\n",
mpu_config.num_regions, num_regions);
return -EINVAL;
}
/* Disable MPU */
@ -277,7 +278,10 @@ void arc_mpu_init(void)
/* Enable MPU */
arc_core_mpu_enable();
return 0;
}
SYS_INIT(arc_mpu_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_COMMON_INTERNAL_H_ */

View file

@ -814,7 +814,7 @@ int arc_core_mpu_buffer_validate(const void *addr, size_t size, int write)
* This function provides the default configuration mechanism for the Memory
* Protection Unit (MPU).
*/
void arc_mpu_init(void)
static int arc_mpu_init(void)
{
uint32_t num_regions;
uint32_t i;
@ -826,7 +826,7 @@ void arc_mpu_init(void)
__ASSERT(0,
"Request to configure: %u regions (supported: %u)\n",
mpu_config.num_regions, num_regions);
return;
return -EINVAL;
}
static_regions_num = 0U;
@ -851,7 +851,7 @@ void arc_mpu_init(void)
MPU_DYNAMIC_REGION_AREAS_NUM) {
LOG_ERR("not enough dynamic regions %d",
dynamic_regions_num);
return;
return -EINVAL;
}
dyn_reg_info[dynamic_regions_num].index = i;
@ -886,8 +886,10 @@ void arc_mpu_init(void)
/* Enable MPU */
arc_core_mpu_enable();
return;
return 0;
}
SYS_INIT(arc_mpu_init, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V4_INTERNAL_H_ */

View file

@ -23,8 +23,6 @@
#include <zephyr/arch/arc/cluster.h>
#include <zephyr/kernel_structs.h>
#include <kernel_internal.h>
#include <zephyr/platform/hooks.h>
#include <zephyr/arch/cache.h>
/* XXX - keep for future use in full-featured cache APIs */
#if 0
@ -115,9 +113,6 @@ static void dev_state_zero(void)
#endif
extern FUNC_NORETURN void z_cstart(void);
extern void arc_mpu_init(void);
extern void arc_secureshield_init(void);
/**
* @brief Prepare to and run C code
*
@ -126,10 +121,6 @@ extern void arc_secureshield_init(void);
void z_prep_c(void)
{
#if defined(CONFIG_SOC_PREP_HOOK)
soc_prep_hook();
#endif
#ifdef CONFIG_ISA_ARCV3
arc_cluster_scm_enable();
#endif
@ -139,15 +130,6 @@ void z_prep_c(void)
dev_state_zero();
#endif
z_data_copy();
#if CONFIG_ARCH_CACHE
arch_cache_init();
#endif
#ifdef CONFIG_ARC_MPU
arc_mpu_init();
#endif
#ifdef CONFIG_ARC_SECURE_FIRMWARE
arc_secureshield_init();
#endif
z_cstart();
CODE_UNREACHABLE;
}

View file

@ -16,9 +16,8 @@
#include <zephyr/arch/cpu.h>
#include <swap_macros.h>
#include <zephyr/arch/arc/asm-compat/assembler.h>
#if defined(CONFIG_SOC_RESET_HOOK)
GTEXT(soc_reset_hook)
#ifdef CONFIG_ARC_EARLY_SOC_INIT
#include <soc_ctrl.h>
#endif
GDATA(z_interrupt_stacks)
@ -113,8 +112,8 @@ done_icache_invalidate:
done_dcache_invalidate:
#ifdef CONFIG_SOC_RESET_HOOK
bl soc_reset_hook
#ifdef CONFIG_ARC_EARLY_SOC_INIT
soc_early_asm_init_percpu
#endif
_dsp_extension_probe

View file

@ -48,7 +48,7 @@ static void sjli_table_init(void)
/*
* @brief initialization of secureshield related functions.
*/
void arc_secureshield_init(void)
static int arc_secureshield_init(void)
{
sjli_table_init();
@ -60,4 +60,9 @@ void arc_secureshield_init(void)
*
*/
__asm__ volatile("sflag 0x20");
return 0;
}
SYS_INIT(arc_secureshield_init, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);

View file

@ -19,15 +19,10 @@
#include <zephyr/arch/arc/v2/mpu/arc_core_mpu.h>
#endif
#if defined(CONFIG_ARC_VPX_COOPERATIVE_SHARING) || defined(CONFIG_DSP_SHARING)
#if defined(CONFIG_ARC_DSP) && defined(CONFIG_DSP_SHARING)
#include <zephyr/arch/arc/v2/dsp/arc_dsp.h>
static struct k_spinlock lock;
#endif
#if defined(CONFIG_ARC_VPX_COOPERATIVE_SHARING)
static struct k_sem vpx_sem[CONFIG_MP_MAX_NUM_CPUS];
#endif
/* initial stack frame */
struct init_stack_frame {
uintptr_t pc;
@ -325,65 +320,3 @@ void arc_dsp_enable(struct k_thread *thread, unsigned int options)
k_spin_unlock(&lock, key);
}
#endif /* CONFIG_ARC_DSP && CONFIG_DSP_SHARING */
#if defined(CONFIG_ARC_VPX_COOPERATIVE_SHARING)
int arc_vpx_lock(k_timeout_t timeout)
{
k_spinlock_key_t key;
unsigned int id;
key = k_spin_lock(&lock);
id = _current_cpu->id;
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
#endif
k_spin_unlock(&lock, key);
/*
* It is assumed that the thread is (still) pinned to
* the same CPU identified by <id>.
*/
return k_sem_take(&vpx_sem[id], timeout);
}
void arc_vpx_unlock(void)
{
k_spinlock_key_t key;
unsigned int id;
key = k_spin_lock(&lock);
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
#endif
id = _current_cpu->id;
k_spin_unlock(&lock, key);
/*
* It is assumed that this thread is (still) pinned to
* the CPU identified by <id>, and that it is the same CPU
* used by arc_vpx_lock().
*/
k_sem_give(&vpx_sem[id]);
}
void arc_vpx_unlock_force(unsigned int id)
{
__ASSERT(id < CONFIG_MP_MAX_NUM_CPUS, "");
k_sem_give(&vpx_sem[id]);
}
static int arc_vpx_sem_init(void)
{
for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
k_sem_init(vpx_sem, 1, 1);
}
return 0;
}
SYS_INIT(arc_vpx_sem_init, PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif

View file

@ -9,9 +9,11 @@
#ifdef CONFIG_IRQ_OFFLOAD
int arc_irq_offload_init(const struct device *unused);
static inline void arc_irq_offload_init_smp(void)
{
arch_irq_offload_init();
arc_irq_offload_init(NULL);
}
#else

View file

@ -166,14 +166,11 @@ config RUNTIME_NMI
needed, enable this option and attach it via z_arm_nmi_set_handler().
config PLATFORM_SPECIFIC_INIT
bool "Platform (SOC) specific startup hook [DEPRECATED]"
select DEPRECATED
bool "Platform (SOC) specific startup hook"
help
The platform specific initialization code (z_arm_platform_init) is
executed at the beginning of the startup code (__start).
This option is deprecated, use SOC_RESET_HOOK instead.
config FAULT_DUMP
int "Fault dump level"
default 2

View file

@ -93,14 +93,6 @@ config CPU_CORTEX_R7
help
This option signifies the use of a Cortex-R7 CPU
config CPU_CORTEX_R8
bool
select CPU_AARCH32_CORTEX_R
select ARMV7_R
select ARMV7_R_FP if CPU_HAS_FPU
help
This option signifies the use of a Cortex-R8 CPU
config CPU_CORTEX_R52
bool
select CPU_AARCH32_CORTEX_R

View file

@ -26,6 +26,5 @@ extern void __start(void);
#define BOOT_PARAM_UDF_SP_OFFSET 16
#define BOOT_PARAM_SVC_SP_OFFSET 20
#define BOOT_PARAM_SYS_SP_OFFSET 24
#define BOOT_PARAM_VOTING_OFFSET 28
#endif /* _BOOT_H_ */

View file

@ -217,7 +217,3 @@ int arch_icache_flush_and_invd_range(void *start_addr, size_t size)
}
#endif
void arch_cache_init(void)
{
}

View file

@ -49,7 +49,6 @@ _skip_\@:
#endif /* CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK */
.endm
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING
push {r0, lr}
@ -69,9 +68,6 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
bx lr
#endif
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING
push {r0, lr}
@ -97,4 +93,3 @@ SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
_irq_disabled:
bx lr
#endif

View file

@ -147,9 +147,8 @@ bool z_arm_fault_undef_instruction_fp(void)
* the FP was already enabled then this was an actual undefined
* instruction.
*/
if (__get_FPEXC() & FPEXC_EN) {
if (__get_FPEXC() & FPEXC_EN)
return true;
}
__set_FPEXC(FPEXC_EN);
@ -163,9 +162,8 @@ bool z_arm_fault_undef_instruction_fp(void)
struct __fpu_sf *spill_esf =
(struct __fpu_sf *)_current_cpu->fp_ctx;
if (spill_esf == NULL) {
if (spill_esf == NULL)
return false;
}
_current_cpu->fp_ctx = NULL;

View file

@ -339,15 +339,6 @@ z_arm_cortex_ar_irq_done:
str r0, [r2, #___cpu_t_nested_OFFSET]
/* Do not context switch if exiting a nested interrupt */
cmp r0, #0
/* Note that this function is only called from `z_arm_svc`,
* while handling irq_offload, with below modes set:
* ```
* if (cpu interrupts are nested)
* mode=MODE_SYS
* else
* mode=MODE_IRQ
* ```
*/
bhi __EXIT_INT
/* retrieve pointer to the current thread */

View file

@ -18,27 +18,6 @@
ubfx \rreg0, \rreg0, #0, #24
.endm
/*
* Get CPU logic id by looking up cpu_node_list
* returns
* reg0: MPID
* reg1: logic id (0 ~ CONFIG_MP_MAX_NUM_CPUS - 1)
* clobbers: reg0, reg1, reg2, reg3
*/
.macro get_cpu_logic_id reg0, reg1, reg2, reg3
get_cpu_id \reg0
ldr \reg3, =cpu_node_list
mov \reg1, #0
1: ldr \reg2, [\reg3, \reg1, lsl #2]
cmp \reg2, \reg0
beq 2f
add \reg1, \reg1, #1
cmp \reg1, #CONFIG_MP_MAX_NUM_CPUS
bne 1b
b .
2:
.endm
.macro get_cpu rreg0
/*
* Get CPU pointer.
@ -54,7 +33,8 @@
*/
srsdb sp!, #MODE_SYS
cps #MODE_SYS
push {r0-r3, r12, lr}
stmdb sp, {r0-r3, r12, lr}^
sub sp, #24
/* TODO: EXTRA_EXCEPTION_INFO */
mov r0, sp

View file

@ -1,6 +1,5 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
* Copyright 2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -22,13 +21,29 @@
#include <zephyr/linker/linker-defs.h>
#include <zephyr/sys/barrier.h>
#include <zephyr/arch/arm/cortex_a_r/lib_helpers.h>
#include <zephyr/platform/hooks.h>
#include <zephyr/arch/cache.h>
#if defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)
#include <cortex_a_r/stack.h>
#endif
#if defined(__GNUC__)
/*
* GCC can detect if memcpy is passed a NULL argument, however one of
* the cases of relocate_vector_table() it is valid to pass NULL, so we
* suppress the warning for this case. We need to do this before
* string.h is included to get the declaration of memcpy.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wnonnull"
#endif
#include <string.h>
#if defined(CONFIG_SW_VECTOR_RELAY) || defined(CONFIG_SW_VECTOR_RELAY_CLIENT)
Z_GENERIC_SECTION(.vt_pointer_section) __attribute__((used))
void *_vector_table_pointer;
#endif
#ifdef CONFIG_ARM_MPU
extern void z_arm_mpu_init(void);
extern void z_arm_configure_static_mpu_regions(void);
@ -36,6 +51,38 @@ extern void z_arm_configure_static_mpu_regions(void);
extern int z_arm_mmu_init(void);
#endif
#if defined(CONFIG_AARCH32_ARMV8_R)
#define VECTOR_ADDRESS ((uintptr_t)_vector_start)
static inline void relocate_vector_table(void)
{
write_sctlr(read_sctlr() & ~HIVECS);
write_vbar(VECTOR_ADDRESS & VBAR_MASK);
barrier_isync_fence_full();
}
#else
#define VECTOR_ADDRESS 0
void __weak relocate_vector_table(void)
{
#if defined(CONFIG_XIP) && (CONFIG_FLASH_BASE_ADDRESS != 0) || \
!defined(CONFIG_XIP) && (CONFIG_SRAM_BASE_ADDRESS != 0)
write_sctlr(read_sctlr() & ~HIVECS);
size_t vector_size = (size_t)_vector_end - (size_t)_vector_start;
(void)memcpy(VECTOR_ADDRESS, _vector_start, vector_size);
#elif defined(CONFIG_SW_VECTOR_RELAY) || defined(CONFIG_SW_VECTOR_RELAY_CLIENT)
_vector_table_pointer = _vector_start;
#endif
}
#if defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
#endif /* CONFIG_AARCH32_ARMV8_R */
#if defined(CONFIG_CPU_HAS_FPU)
static inline void z_arm_floating_point_init(void)
@ -100,12 +147,10 @@ extern FUNC_NORETURN void z_cstart(void);
*/
void z_prep_c(void)
{
#if defined(CONFIG_SOC_PREP_HOOK)
soc_prep_hook();
#endif
/* Initialize tpidruro with our struct _cpu instance address */
write_tpidruro((uintptr_t)&_kernel.cpus[0]);
relocate_vector_table();
#if defined(CONFIG_CPU_HAS_FPU)
z_arm_floating_point_init();
#endif
@ -115,9 +160,6 @@ void z_prep_c(void)
z_arm_init_stacks();
#endif
z_arm_interrupt_init();
#if CONFIG_ARCH_CACHE
arch_cache_init();
#endif
#ifdef CONFIG_ARM_MPU
z_arm_mpu_init();
z_arm_configure_static_mpu_regions();

View file

@ -1,6 +1,5 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
* Copyright 2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -13,56 +12,6 @@
#include <zephyr/kernel.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/sys/util.h>
#include <zephyr/linker/linker-defs.h>
#if defined(CONFIG_AARCH32_ARMV8_R)
#define VECTOR_ADDRESS ((uintptr_t)_vector_start)
static inline void relocate_vector_table(void)
{
write_sctlr(read_sctlr() & ~HIVECS);
write_vbar(VECTOR_ADDRESS & VBAR_MASK);
barrier_isync_fence_full();
}
#else
#if defined(__GNUC__)
/*
* GCC can detect if memcpy is passed a NULL argument, however one of
* the cases of relocate_vector_table() it is valid to pass NULL, so we
* suppress the warning for this case. We need to do this before
* string.h is included to get the declaration of memcpy.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wnonnull"
#endif /* __GNUC__ */
#include <string.h>
#define VECTOR_ADDRESS 0
void __weak relocate_vector_table(void)
{
#if defined(CONFIG_XIP) && (CONFIG_FLASH_BASE_ADDRESS != 0) || \
!defined(CONFIG_XIP) && (CONFIG_SRAM_BASE_ADDRESS != 0)
write_sctlr(read_sctlr() & ~HIVECS);
size_t vector_size = (size_t)_vector_end - (size_t)_vector_start;
(void)memcpy(VECTOR_ADDRESS, _vector_start, vector_size);
#endif
}
#if defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
#endif /* !CONFIG_AARCH32_ARMV8_R */
void z_arm_relocate_vector_table(void)
{
relocate_vector_table();
}
/**
*

View file

@ -1,7 +1,6 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
* Copyright (c) 2019 Stephanos Ioannidis <root@stephanos.io>
* Copyright 2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -31,8 +30,8 @@ GDATA(z_arm_sys_stack)
GDATA(z_arm_fiq_stack)
GDATA(z_arm_abort_stack)
GDATA(z_arm_undef_stack)
#if defined(CONFIG_SOC_RESET_HOOK)
GTEXT(soc_reset_hook)
#if defined(CONFIG_PLATFORM_SPECIFIC_INIT)
GTEXT(z_arm_platform_init)
#endif
/**
@ -201,62 +200,23 @@ EL1_Reset_Handler:
#endif /* CONFIG_DCLS */
ldr r0, =arm_cpu_boot_params
#if CONFIG_MP_MAX_NUM_CPUS > 1
/*
* This code uses voting locks, like arch/arm64/core/reset.S, to determine primary CPU.
*/
get_cpu_id r1
/*
* Get the "logic" id defined by cpu_node_list statically for voting lock self-identify.
* It is worth noting that this is NOT the final logic id (arch_curr_cpu()->id)
*/
get_cpu_logic_id r1, r2, r3, r4 // r1: MPID, r2: logic id
add r4, r0, #BOOT_PARAM_VOTING_OFFSET
/* signal our desire to vote */
mov r5, #1
strb r5, [r4, r2]
ldr r3, [r0, #BOOT_PARAM_MPID_OFFSET]
cmn r3, #1
beq 1f
/* some core already won, release */
mov r7, #0
strb r7, [r4, r2]
b _secondary_core
/* suggest current core then release */
1: str r1, [r0, #BOOT_PARAM_MPID_OFFSET]
strb r7, [r4, r2]
dmb
/* then wait until every core else is done voting */
mov r5, #0
2: ldrb r3, [r4, r5]
tst r3, #255
/* wait */
bne 2b
add r5, r5, #1
cmp r5, #CONFIG_MP_MAX_NUM_CPUS
bne 2b
/* check if current core won */
dmb
ldr r3, [r0, #BOOT_PARAM_MPID_OFFSET]
cmp r3, r1
ldrex r2, [r0, #BOOT_PARAM_MPID_OFFSET]
cmp r2, #-1
bne 1f
strex r3, r1, [r0, #BOOT_PARAM_MPID_OFFSET]
cmp r3, #0
beq _primary_core
/* fallthrough secondary */
/* loop until our turn comes */
_secondary_core:
dmb
1:
dmb ld
ldr r2, [r0, #BOOT_PARAM_MPID_OFFSET]
cmp r1, r2
bne _secondary_core
bne 1b
/* we can now load our stack pointer values and move on */
/* we can now move on */
ldr r4, =arch_secondary_cpu_init
ldr r5, [r0, #BOOT_PARAM_FIQ_SP_OFFSET]
ldr r6, [r0, #BOOT_PARAM_IRQ_SP_OFFSET]
@ -306,9 +266,9 @@ _primary_core:
msr CPSR_c, #(MODE_SYS | I_BIT | F_BIT)
mov sp, r10
#if defined(CONFIG_SOC_RESET_HOOK)
#if defined(CONFIG_PLATFORM_SPECIFIC_INIT)
/* Execute platform-specific initialisation if applicable */
bl soc_reset_hook
bl z_arm_platform_init
#endif
#if defined(CONFIG_WDOG_INIT)
@ -320,6 +280,4 @@ _primary_core:
bl z_arm_tcm_disable_ecc
#endif
bl z_arm_relocate_vector_table
bx r4

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2024 Arm Limited (or its affiliates).
* Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/
@ -51,7 +51,6 @@ struct boot_params {
char *udf_sp;
char *svc_sp;
char *sys_sp;
uint8_t voting[CONFIG_MP_MAX_NUM_CPUS];
arch_cpustart_t fn;
void *arg;
int cpu_num;
@ -65,7 +64,6 @@ BUILD_ASSERT(offsetof(struct boot_params, abt_sp) == BOOT_PARAM_ABT_SP_OFFSET);
BUILD_ASSERT(offsetof(struct boot_params, udf_sp) == BOOT_PARAM_UDF_SP_OFFSET);
BUILD_ASSERT(offsetof(struct boot_params, svc_sp) == BOOT_PARAM_SVC_SP_OFFSET);
BUILD_ASSERT(offsetof(struct boot_params, sys_sp) == BOOT_PARAM_SYS_SP_OFFSET);
BUILD_ASSERT(offsetof(struct boot_params, voting) == BOOT_PARAM_VOTING_OFFSET);
volatile struct boot_params arm_cpu_boot_params = {
.mpid = -1,
@ -77,7 +75,7 @@ volatile struct boot_params arm_cpu_boot_params = {
.sys_sp = (char *)(z_arm_sys_stack + CONFIG_ARMV7_SYS_STACK_SIZE),
};
const uint32_t cpu_node_list[] = {
static const uint32_t cpu_node_list[] = {
DT_FOREACH_CHILD_STATUS_OKAY_SEP(DT_PATH(cpus), DT_REG_ADDR, (,))};
/* cpu_map saves the maping of core id and mpid */
@ -139,14 +137,10 @@ void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz, arch_cpustart_
arm_cpu_boot_params.arg = arg;
arm_cpu_boot_params.cpu_num = cpu_num;
/* we need the barrier here to make sure the above changes to
* arm_cpu_boot_params are completed before we set the mpid
*/
barrier_dsync_fence_full();
/* store mpid last as this is our synchronization point */
arm_cpu_boot_params.mpid = cpu_mpid;
barrier_dsync_fence_full();
sys_cache_data_invd_range(
(void *)&arm_cpu_boot_params,
sizeof(arm_cpu_boot_params));

View file

@ -41,11 +41,6 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
GTEXT(z_arm_cortex_ar_exit_exc)
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_cortex_ar_exit_exc)
/* Note:
* This function is expected to be *always* called with
* processor mode set to MODE_SYS.
*/
/* decrement exception depth */
get_cpu r2
ldrb r1, [r2, #_cpu_offset_to_exc_depth]
@ -56,6 +51,7 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_cortex_ar_exit_exc)
* Restore r0-r3, r12, lr, lr_und and spsr_und from the exception stack
* and return to the current thread.
*/
pop {r0-r3, r12, lr}
ldmia sp, {r0-r3, r12, lr}^
add sp, #24
rfeia sp!
#endif

View file

@ -110,7 +110,3 @@ int arch_icache_flush_and_invd_range(void *start_addr, size_t size)
{
return -ENOTSUP;
}
void arch_cache_init(void)
{
}

View file

@ -53,7 +53,6 @@ void z_arm_cpu_idle_init(void)
} while (false)
#endif
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void)
{
#if defined(CONFIG_TRACING)
@ -97,9 +96,7 @@ void arch_cpu_idle(void)
__enable_irq();
__ISB();
}
#endif
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key)
{
#if defined(CONFIG_TRACING)
@ -138,4 +135,3 @@ void arch_cpu_atomic_idle(unsigned int key)
__enable_irq();
#endif
}
#endif

View file

@ -58,7 +58,7 @@ BUILD_ASSERT(!(CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE &
(CONFIG_CORTEX_M_NULL_POINTER_EXCEPTION_PAGE_SIZE - 1)),
"the size of the partition must be power of 2");
int z_arm_debug_enable_null_pointer_detection(void)
static int z_arm_debug_enable_null_pointer_detection(void)
{
z_arm_dwt_init();
@ -118,4 +118,7 @@ int z_arm_debug_enable_null_pointer_detection(void)
return 0;
}
SYS_INIT(z_arm_debug_enable_null_pointer_detection, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT */

View file

@ -743,7 +743,7 @@ static inline bool z_arm_is_pc_valid(uintptr_t pc)
return true;
}
#if DT_NODE_HAS_STATUS_OKAY(DT_CHOSEN(zephyr_itcm))
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_itcm), okay)
/* Is it in the ITCM */
if ((((uintptr_t)&__itcm_start) <= pc) && (pc < ((uintptr_t)&__itcm_end))) {
return true;
@ -1192,7 +1192,5 @@ void z_arm_fault_init(void)
#endif /* CONFIG_BUILTIN_STACK_GUARD */
#ifdef CONFIG_TRAP_UNALIGNED_ACCESS
SCB->CCR |= SCB_CCR_UNALIGN_TRP_Msk;
#else
SCB->CCR &= ~SCB_CCR_UNALIGN_TRP_Msk;
#endif /* CONFIG_TRAP_UNALIGNED_ACCESS */
}

View file

@ -27,11 +27,6 @@ SECTION_FUNC(TEXT, arch_pm_s2ram_suspend)
* r0: address of the system_off function
*/
push {r4-r12, lr}
/* Move system_off to protected register. */
mov r4, r0
/* Store CPU context */
ldr r1, =_cpu_context
mrs r2, msp
@ -76,7 +71,7 @@ SECTION_FUNC(TEXT, arch_pm_s2ram_suspend)
* Call the system_off function passed as parameter. This should never
* return.
*/
blx r4
blx r0
/*
* The system_off function returns here only when the powering off was
@ -86,10 +81,9 @@ SECTION_FUNC(TEXT, arch_pm_s2ram_suspend)
/*
* Reset the marking of suspend to RAM, return is ignored.
*/
push {r0}
bl pm_s2ram_mark_check_and_clear
/* Move system_off back to r0 as return value */
mov r0, r4
pop {r0}
pop {r4-r12, lr}
bx lr
@ -99,14 +93,11 @@ GTEXT(arch_pm_s2ram_resume)
SECTION_FUNC(TEXT, arch_pm_s2ram_resume)
/*
* Check if reset occurred after suspending to RAM.
* Store LR to ensure we can continue boot when we are not suspended
* to RAM. In addition to LR, R0 is pushed too, to ensure "SP mod 8 = 0",
* as stated by ARM rule 6.2.1.2 for AAPCS32.
*/
push {r0, lr}
push {lr}
bl pm_s2ram_mark_check_and_clear
cmp r0, #0x1
pop {r0, lr}
pop {lr}
beq resume
bx lr

View file

@ -20,8 +20,6 @@
#include <kernel_internal.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/sys/barrier.h>
#include <zephyr/platform/hooks.h>
#include <zephyr/arch/cache.h>
#if defined(__GNUC__)
/*
@ -183,10 +181,6 @@ extern FUNC_NORETURN void z_cstart(void);
*/
void z_prep_c(void)
{
#if defined(CONFIG_SOC_PREP_HOOK)
soc_prep_hook();
#endif
relocate_vector_table();
#if defined(CONFIG_CPU_HAS_FPU)
z_arm_floating_point_init();
@ -199,13 +193,6 @@ void z_prep_c(void)
#else
z_arm_interrupt_init();
#endif /* CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
#if CONFIG_ARCH_CACHE
arch_cache_init();
#endif
#ifdef CONFIG_NULL_POINTER_EXCEPTION_DETECTION_DWT
z_arm_debug_enable_null_pointer_detection();
#endif
z_cstart();
CODE_UNREACHABLE;
}

View file

@ -24,8 +24,8 @@ GDATA(z_interrupt_stacks)
#if defined(CONFIG_DEBUG_THREAD_INFO)
GDATA(z_sys_post_kernel)
#endif
#if defined(CONFIG_SOC_RESET_HOOK)
GTEXT(soc_reset_hook)
#if defined(CONFIG_PLATFORM_SPECIFIC_INIT)
GTEXT(z_arm_platform_init)
#endif
#if defined(CONFIG_INIT_ARCH_HW_AT_BOOT)
GTEXT(z_arm_init_arch_hw_at_boot)
@ -93,8 +93,8 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
bl arch_pm_s2ram_resume
#endif /* CONFIG_PM_S2RAM */
#if defined(CONFIG_SOC_RESET_HOOK)
bl soc_reset_hook
#if defined(CONFIG_PLATFORM_SPECIFIC_INIT)
bl z_arm_platform_init
#endif
#if defined(CONFIG_INIT_ARCH_HW_AT_BOOT)

View file

@ -12,34 +12,6 @@
LOG_MODULE_REGISTER(elf, CONFIG_LLEXT_LOG_LEVEL);
#define R_ARM_NONE 0
#define R_ARM_PC24 1
#define R_ARM_ABS32 2
#define R_ARM_REL32 3
#define R_ARM_COPY 20
#define R_ARM_GLOB_DAT 21
#define R_ARM_JUMP_SLOT 22
#define R_ARM_RELATIVE 23
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
#define R_ARM_TARGET1 38
#define R_ARM_V4BX 40
#define R_ARM_PREL31 42
#define R_ARM_MOVW_ABS_NC 43
#define R_ARM_MOVT_ABS 44
#define R_ARM_MOVW_PREL_NC 45
#define R_ARM_MOVT_PREL 46
#define R_ARM_ALU_PC_G0_NC 57
#define R_ARM_ALU_PC_G1_NC 59
#define R_ARM_LDR_PC_G2 63
#define R_ARM_THM_CALL 10
#define R_ARM_THM_JUMP24 30
#define R_ARM_THM_MOVW_ABS_NC 47
#define R_ARM_THM_MOVT_ABS 48
#define R_ARM_THM_MOVW_PREL_NC 49
#define R_ARM_THM_MOVT_PREL 50
#define OPCODE2ARMMEM(x) ((uint32_t)(x))
#define OPCODE2THM16MEM(x) ((uint16_t)(x))
#define MEM2ARMOPCODE(x) OPCODE2ARMMEM(x)

View file

@ -42,7 +42,3 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
offload_routine = NULL;
k_sched_unlock();
}
void arch_irq_offload_init(void)
{
}

View file

@ -9,7 +9,6 @@ zephyr_library_sources(
irq_init.c
irq_manage.c
prep_c.c
reboot.c
reset.S
reset.c
switch.S
@ -29,7 +28,6 @@ if(${SRAM_LENGTH} GREATER 11 OR ${KERNEL_VM_LENGTH} GREATER 11)
zephyr_cc_option(-mcmodel=large)
endif()
zephyr_library_sources_ifdef(CONFIG_LLEXT elf.c)
zephyr_library_sources_ifdef(CONFIG_FPU_SHARING fpu.c fpu.S)
zephyr_library_sources_ifdef(CONFIG_ARM_MMU mmu.c mmu.S)
zephyr_library_sources_ifdef(CONFIG_ARM_MPU cortex_r/arm_mpu.c)

View file

@ -161,14 +161,6 @@ config ARM64_EXCEPTION_STACK_TRACE
help
Internal config to enable runtime stack traces on fatal exceptions.
config ARCH_HAS_STACKWALK
bool
default y
depends on FRAME_POINTER
help
Internal config to indicate that the arch_stack_walk() API is implemented
and it can be enabled.
config ARM64_SAFE_EXCEPTION_STACK_SIZE
int "The stack size of the safe exception stack"
default 4096

View file

@ -13,7 +13,7 @@
#include <zephyr/arch/cpu.h>
_ASM_FILE_PROLOGUE
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
GTEXT(arch_cpu_idle)
SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING
@ -25,9 +25,7 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
wfi
msr daifclr, #(DAIFCLR_IRQ_BIT)
ret
#endif
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
GTEXT(arch_cpu_atomic_idle)
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING
@ -43,5 +41,3 @@ SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
msr daifclr, #(DAIFCLR_IRQ_BIT)
_irq_disabled:
ret
#endif

View file

@ -1,515 +0,0 @@
/*
* Copyright (c) 2024 BayLibre SAS
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/llext/elf.h>
#include <zephyr/llext/llext.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/util.h>
#include <zephyr/sys/byteorder.h>
LOG_MODULE_REGISTER(elf, CONFIG_LLEXT_LOG_LEVEL);
#define R_ARM_NONE 0
#define R_AARCH64_NONE 256
/* Static data relocations */
#define R_AARCH64_ABS64 257
#define R_AARCH64_ABS32 258
#define R_AARCH64_ABS16 259
#define R_AARCH64_PREL64 260
#define R_AARCH64_PREL32 261
#define R_AARCH64_PREL16 262
/* Static relocations */
#define R_AARCH64_MOVW_UABS_G0 263
#define R_AARCH64_MOVW_UABS_G0_NC 264
#define R_AARCH64_MOVW_UABS_G1 265
#define R_AARCH64_MOVW_UABS_G1_NC 266
#define R_AARCH64_MOVW_UABS_G2 267
#define R_AARCH64_MOVW_UABS_G2_NC 268
#define R_AARCH64_MOVW_UABS_G3 269
#define R_AARCH64_MOVW_SABS_G0 270
#define R_AARCH64_MOVW_SABS_G1 271
#define R_AARCH64_MOVW_SABS_G2 272
#define R_AARCH64_MOVW_PREL_G0 287
#define R_AARCH64_MOVW_PREL_G0_NC 288
#define R_AARCH64_MOVW_PREL_G1 289
#define R_AARCH64_MOVW_PREL_G1_NC 290
#define R_AARCH64_MOVW_PREL_G2 291
#define R_AARCH64_MOVW_PREL_G2_NC 292
#define R_AARCH64_MOVW_PREL_G3 293
#define R_AARCH64_LD_PREL_LO19 273
#define R_AARCH64_ADR_PREL_LO21 274
#define R_AARCH64_ADR_PREL_PG_HI21 275
#define R_AARCH64_ADR_PREL_PG_HI21_NC 276
#define R_AARCH64_ADD_ABS_LO12_NC 277
#define R_AARCH64_LDST8_ABS_LO12_NC 278
#define R_AARCH64_TSTBR14 279
#define R_AARCH64_CONDBR19 280
#define R_AARCH64_JUMP26 282
#define R_AARCH64_CALL26 283
#define R_AARCH64_LDST16_ABS_LO12_NC 284
#define R_AARCH64_LDST32_ABS_LO12_NC 285
#define R_AARCH64_LDST64_ABS_LO12_NC 286
#define R_AARCH64_LDST128_ABS_LO12_NC 299
/* Masks for immediate values */
#define AARCH64_MASK_IMM12 BIT_MASK(12)
#define AARCH64_MASK_IMM14 BIT_MASK(14)
#define AARCH64_MASK_IMM16 BIT_MASK(16)
#define AARCH64_MASK_IMM19 BIT_MASK(19)
#define AARCH64_MASK_IMM26 BIT_MASK(26)
/* MOV instruction helper symbols */
#define AARCH64_MASK_MOV_OPCODE BIT_MASK(8)
#define AARCH64_SHIFT_MOV_OPCODE (23)
#define AARCH64_SHIFT_MOV_IMM16 (5)
#define AARCH64_OPCODE_MOVN (0b00100101)
#define AARCH64_OPCODE_MOVZ (0b10100101)
/* ADR instruction helper symbols */
#define AARCH64_MASK_ADR_IMMLO BIT_MASK(2)
#define AARCH64_MASK_ADR_IMMHI BIT_MASK(19)
#define AARCH64_SHIFT_ADR_IMMLO (29)
#define AARCH64_SHIFT_ADR_IMMHI (5)
#define AARCH64_ADR_IMMLO_BITS (2)
#define AARCH64_PAGE(expr) ((expr) & ~0xFFF)
enum aarch64_reloc_type {
AARCH64_RELOC_TYPE_NONE,
AARCH64_RELOC_TYPE_ABS,
AARCH64_RELOC_TYPE_PREL,
AARCH64_RELOC_TYPE_PAGE,
};
/**
* @brief Function computing a relocation (X in AArch64 ELF).
*
* @param[in] reloc_type Type of relocation operation.
* @param[in] loc Address of an opcode to rewrite (P in AArch64 ELF).
* @param[in] sym_base_addr Address of the symbol referenced by relocation (S in AArch64 ELF).
* @param[in] addend Addend from RELA relocation.
*
* @return Result of the relocation operation (X in AArch64 ELF)
*/
static uint64_t reloc(enum aarch64_reloc_type reloc_type, uintptr_t loc, uintptr_t sym_base_addr,
int64_t addend)
{
switch (reloc_type) {
case AARCH64_RELOC_TYPE_ABS:
return sym_base_addr + addend;
case AARCH64_RELOC_TYPE_PREL:
return sym_base_addr + addend - loc;
case AARCH64_RELOC_TYPE_PAGE:
return AARCH64_PAGE(sym_base_addr + addend) - AARCH64_PAGE(loc);
case AARCH64_RELOC_TYPE_NONE:
return 0;
}
CODE_UNREACHABLE;
}
/**
* @brief Handler for static data relocations.
*
* @param[in] rel Relocation data provided by ELF
* @param[in] reloc_type Type of relocation operation.
* @param[in] loc Address of an opcode to rewrite (P in AArch64 ELF).
* @param[in] sym_base_addr Address of the symbol referenced by relocation (S in AArch64 ELF).
*
* @retval -ERANGE Relocation value overflow
* @retval 0 Successful relocation
*/
static int data_reloc_handler(elf_rela_t *rel, elf_word reloc_type, uintptr_t loc,
uintptr_t sym_base_addr)
{
int64_t x;
switch (reloc_type) {
case R_AARCH64_ABS64:
*(int64_t *)loc = reloc(AARCH64_RELOC_TYPE_ABS, loc, sym_base_addr, rel->r_addend);
break;
case R_AARCH64_ABS32:
x = reloc(AARCH64_RELOC_TYPE_ABS, loc, sym_base_addr, rel->r_addend);
if (x < 0 || x > UINT32_MAX) {
return -ERANGE;
}
*(uint32_t *)loc = (uint32_t)x;
break;
case R_AARCH64_ABS16:
x = reloc(AARCH64_RELOC_TYPE_ABS, loc, sym_base_addr, rel->r_addend);
if (x < 0 || x > UINT16_MAX) {
return -ERANGE;
}
*(uint16_t *)loc = (uint16_t)x;
break;
case R_AARCH64_PREL64:
*(int64_t *)loc = reloc(AARCH64_RELOC_TYPE_PREL, loc, sym_base_addr, rel->r_addend);
break;
case R_AARCH64_PREL32:
x = reloc(AARCH64_RELOC_TYPE_PREL, loc, sym_base_addr, rel->r_addend);
if (x < INT32_MIN || x > INT32_MAX) {
return -ERANGE;
}
*(int32_t *)loc = (int32_t)x;
break;
case R_AARCH64_PREL16:
x = reloc(AARCH64_RELOC_TYPE_PREL, loc, sym_base_addr, rel->r_addend);
if (x < INT16_MIN || x > INT16_MAX) {
return -ERANGE;
}
*(int16_t *)loc = (int16_t)x;
break;
default:
CODE_UNREACHABLE;
}
return 0;
}
/**
* @brief Handler for relocations using MOV* instructions.
*
* @param[in] rel Relocation data provided by ELF
* @param[in] reloc_type Type of relocation operation.
* @param[in] loc Address of an opcode to rewrite (P in AArch64 ELF).
* @param[in] sym_base_addr Address of the symbol referenced by relocation (S in AArch64 ELF).
*
* @retval -ERANGE Relocation value overflow
* @retval 0 Successful relocation
*/
static int movw_reloc_handler(elf_rela_t *rel, elf_word reloc_type, uintptr_t loc,
uintptr_t sym_base_addr)
{
int64_t x;
uint32_t imm;
int lsb = 0; /* LSB of X to be used */
bool is_movnz = false;
enum aarch64_reloc_type type = AARCH64_RELOC_TYPE_ABS;
uint32_t opcode = sys_le32_to_cpu(*(uint32_t *)loc);
switch (reloc_type) {
case R_AARCH64_MOVW_SABS_G0:
is_movnz = true;
case R_AARCH64_MOVW_UABS_G0_NC:
case R_AARCH64_MOVW_UABS_G0:
break;
case R_AARCH64_MOVW_SABS_G1:
is_movnz = true;
case R_AARCH64_MOVW_UABS_G1_NC:
case R_AARCH64_MOVW_UABS_G1:
lsb = 16;
break;
case R_AARCH64_MOVW_SABS_G2:
is_movnz = true;
case R_AARCH64_MOVW_UABS_G2_NC:
case R_AARCH64_MOVW_UABS_G2:
lsb = 32;
break;
case R_AARCH64_MOVW_UABS_G3:
lsb = 48;
break;
case R_AARCH64_MOVW_PREL_G0:
is_movnz = true;
case R_AARCH64_MOVW_PREL_G0_NC:
type = AARCH64_RELOC_TYPE_PREL;
break;
case R_AARCH64_MOVW_PREL_G1:
is_movnz = true;
case R_AARCH64_MOVW_PREL_G1_NC:
type = AARCH64_RELOC_TYPE_PREL;
lsb = 16;
break;
case R_AARCH64_MOVW_PREL_G2:
is_movnz = true;
case R_AARCH64_MOVW_PREL_G2_NC:
type = AARCH64_RELOC_TYPE_PREL;
lsb = 32;
break;
case R_AARCH64_MOVW_PREL_G3:
is_movnz = true;
type = AARCH64_RELOC_TYPE_PREL;
lsb = 48;
break;
default:
CODE_UNREACHABLE;
}
x = reloc(type, loc, sym_base_addr, rel->r_addend);
imm = x >> lsb;
/* Manipulate opcode for signed relocations. Result depends on sign of immediate value. */
if (is_movnz) {
opcode &= ~(AARCH64_MASK_MOV_OPCODE << AARCH64_SHIFT_MOV_OPCODE);
if (x >= 0) {
opcode |= (AARCH64_OPCODE_MOVN << AARCH64_SHIFT_MOV_OPCODE);
} else {
opcode |= (AARCH64_OPCODE_MOVZ << AARCH64_SHIFT_MOV_OPCODE);
/* Need to invert immediate value for MOVZ. */
imm = ~imm;
}
}
opcode &= ~(AARCH64_MASK_IMM16 << AARCH64_SHIFT_MOV_IMM16);
opcode |= (imm & AARCH64_MASK_IMM16) << AARCH64_SHIFT_MOV_IMM16;
*(uint32_t *)loc = sys_cpu_to_le32(opcode);
if (imm > UINT16_MAX) {
return -ERANGE;
}
return 0;
}
/**
* @brief Handler for static relocations except these related to MOV* instructions.
*
* @param[in] rel Relocation data provided by ELF
* @param[in] reloc_type Type of relocation operation.
* @param[in] loc Address of an opcode to rewrite (P in AArch64 ELF).
* @param[in] sym_base_addr Address of the symbol referenced by relocation (S in AArch64 ELF).
*
* @retval -ERANGE Relocation value overflow
* @retval 0 Successful relocation
*/
static int imm_reloc_handler(elf_rela_t *rel, elf_word reloc_type, uintptr_t loc,
uintptr_t sym_base_addr)
{
int lsb = 2; /* LSB of X to be used */
int len; /* bit length of immediate value */
int shift = 10; /* shift of the immediate in instruction encoding */
uint64_t imm;
uint32_t bitmask = AARCH64_MASK_IMM12;
int64_t x;
bool is_adr = false;
enum aarch64_reloc_type type = AARCH64_RELOC_TYPE_ABS;
uint32_t opcode = sys_le32_to_cpu(*(uint32_t *)loc);
switch (reloc_type) {
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
lsb = 0;
len = 12;
break;
case R_AARCH64_LDST16_ABS_LO12_NC:
lsb = 1;
len = 11;
break;
case R_AARCH64_LDST32_ABS_LO12_NC:
len = 10;
break;
case R_AARCH64_LDST64_ABS_LO12_NC:
lsb = 3;
len = 9;
break;
case R_AARCH64_LDST128_ABS_LO12_NC:
lsb = 4;
len = 8;
break;
case R_AARCH64_LD_PREL_LO19:
case R_AARCH64_CONDBR19:
type = AARCH64_RELOC_TYPE_PREL;
bitmask = AARCH64_MASK_IMM19;
shift = 5;
len = 19;
break;
case R_AARCH64_ADR_PREL_LO21:
type = AARCH64_RELOC_TYPE_PREL;
is_adr = true;
lsb = 0;
len = 21;
break;
case R_AARCH64_TSTBR14:
type = AARCH64_RELOC_TYPE_PREL;
bitmask = AARCH64_MASK_IMM14;
shift = 5;
len = 14;
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
case R_AARCH64_ADR_PREL_PG_HI21:
type = AARCH64_RELOC_TYPE_PAGE;
is_adr = true;
lsb = 12;
len = 21;
break;
case R_AARCH64_CALL26:
case R_AARCH64_JUMP26:
type = AARCH64_RELOC_TYPE_PREL;
bitmask = AARCH64_MASK_IMM26;
shift = 0;
len = 26;
break;
default:
CODE_UNREACHABLE;
}
x = reloc(type, loc, sym_base_addr, rel->r_addend);
x >>= lsb;
imm = x & BIT_MASK(len);
/* ADR instruction has immediate value split into two fields. */
if (is_adr) {
uint32_t immlo, immhi;
immlo = (imm & AARCH64_MASK_ADR_IMMLO) << AARCH64_SHIFT_ADR_IMMLO;
imm >>= AARCH64_ADR_IMMLO_BITS;
immhi = (imm & AARCH64_MASK_ADR_IMMHI) << AARCH64_SHIFT_ADR_IMMHI;
imm = immlo | immhi;
shift = 0;
bitmask = ((AARCH64_MASK_ADR_IMMLO << AARCH64_SHIFT_ADR_IMMLO) |
(AARCH64_MASK_ADR_IMMHI << AARCH64_SHIFT_ADR_IMMHI));
}
opcode &= ~(bitmask << shift);
opcode |= (imm & bitmask) << shift;
*(uint32_t *)loc = sys_cpu_to_le32(opcode);
/* Mask X sign bit and upper bits. */
x = (int64_t)(x & ~BIT_MASK(len - 1)) >> (len - 1);
/* Incrementing X will either overflow and set it to 0 or
* set it 1. Any other case indicates that there was an overflow in relocation.
*/
if ((int64_t)x++ > 1) {
return -ERANGE;
}
return 0;
}
/**
* @brief Architecture specific function for relocating partially linked (static) elf
*
* Elf files contain a series of relocations described in a section. These relocation
* instructions are architecture specific and each architecture supporting extensions
* must implement this.
*
* The relocation codes for arm64 are well documented
* https://github.com/ARM-software/abi-aa/blob/main/aaelf64/aaelf64.rst#relocation
*
* @param[in] rel Relocation data provided by ELF
* @param[in] loc Address of an opcode to rewrite (P in AArch64 ELF)
* @param[in] sym_base_addr Address of the symbol referenced by relocation (S in AArch64 ELF)
* @param[in] sym_name Name of symbol referenced by relocation
* @param[in] load_bias `.text` load address
* @retval 0 Success
* @retval -ENOTSUP Unsupported relocation
* @retval -ENOEXEC Invalid relocation
*/
int arch_elf_relocate(elf_rela_t *rel, uintptr_t loc, uintptr_t sym_base_addr, const char *sym_name,
uintptr_t load_bias)
{
int ret = 0;
bool overflow_check = true;
elf_word reloc_type = ELF_R_TYPE(rel->r_info);
switch (reloc_type) {
case R_ARM_NONE:
case R_AARCH64_NONE:
overflow_check = false;
break;
case R_AARCH64_ABS64:
case R_AARCH64_PREL64:
overflow_check = false;
case R_AARCH64_ABS16:
case R_AARCH64_ABS32:
case R_AARCH64_PREL16:
case R_AARCH64_PREL32:
ret = data_reloc_handler(rel, reloc_type, loc, sym_base_addr);
break;
case R_AARCH64_MOVW_UABS_G0_NC:
case R_AARCH64_MOVW_UABS_G1_NC:
case R_AARCH64_MOVW_UABS_G2_NC:
case R_AARCH64_MOVW_UABS_G3:
case R_AARCH64_MOVW_PREL_G0_NC:
case R_AARCH64_MOVW_PREL_G1_NC:
case R_AARCH64_MOVW_PREL_G2_NC:
case R_AARCH64_MOVW_PREL_G3:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G0:
case R_AARCH64_MOVW_UABS_G1:
case R_AARCH64_MOVW_UABS_G2:
case R_AARCH64_MOVW_SABS_G0:
case R_AARCH64_MOVW_SABS_G1:
case R_AARCH64_MOVW_SABS_G2:
case R_AARCH64_MOVW_PREL_G0:
case R_AARCH64_MOVW_PREL_G1:
case R_AARCH64_MOVW_PREL_G2:
ret = movw_reloc_handler(rel, reloc_type, loc, sym_base_addr);
break;
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
case R_AARCH64_LDST16_ABS_LO12_NC:
case R_AARCH64_LDST32_ABS_LO12_NC:
case R_AARCH64_LDST64_ABS_LO12_NC:
case R_AARCH64_LDST128_ABS_LO12_NC:
overflow_check = false;
case R_AARCH64_LD_PREL_LO19:
case R_AARCH64_ADR_PREL_LO21:
case R_AARCH64_TSTBR14:
case R_AARCH64_CONDBR19:
ret = imm_reloc_handler(rel, reloc_type, loc, sym_base_addr);
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
ret = imm_reloc_handler(rel, reloc_type, loc, sym_base_addr);
break;
case R_AARCH64_CALL26:
case R_AARCH64_JUMP26:
ret = imm_reloc_handler(rel, reloc_type, loc, sym_base_addr);
/* TODO Handle case when address exceeds +/- 128MB */
break;
default:
LOG_ERR("unknown relocation: %llu\n", reloc_type);
return -ENOEXEC;
}
if (overflow_check && ret == -ERANGE) {
LOG_ERR("sym '%s': relocation out of range (%#lx -> %#lx)\n", sym_name, loc,
sym_base_addr);
return -ENOEXEC;
}
return 0;
}

View file

@ -21,8 +21,6 @@
#include <zephyr/sys/poweroff.h>
#include <kernel_arch_func.h>
#include "paging.h"
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK
@ -196,13 +194,9 @@ static void esf_dump(const struct arch_esf *esf)
LOG_ERR("x16: 0x%016llx x17: 0x%016llx", esf->x16, esf->x17);
LOG_ERR("x18: 0x%016llx lr: 0x%016llx", esf->x18, esf->lr);
}
#endif /* CONFIG_EXCEPTION_DEBUG */
#ifdef CONFIG_ARCH_STACKWALK
typedef bool (*arm64_stacktrace_cb)(void *cookie, unsigned long addr, void *fp);
static void walk_stackframe(arm64_stacktrace_cb cb, void *cookie, const struct arch_esf *esf,
int max_frames)
#ifdef CONFIG_EXCEPTION_STACK_TRACE
static void esf_unwind(const struct arch_esf *esf)
{
/*
* For GCC:
@ -224,61 +218,30 @@ static void walk_stackframe(arm64_stacktrace_cb cb, void *cookie, const struct a
* + +-----------------+
*/
uint64_t *fp;
uint64_t *fp = (uint64_t *) esf->fp;
unsigned int count = 0;
uint64_t lr;
if (esf != NULL) {
fp = (uint64_t *) esf->fp;
} else {
return;
}
for (int i = 0; (fp != NULL) && (i < max_frames); i++) {
LOG_ERR("");
for (int i = 0; (fp != NULL) && (i < CONFIG_EXCEPTION_STACK_TRACE_MAX_FRAMES); i++) {
lr = fp[1];
if (!cb(cookie, lr, fp)) {
break;
}
#ifdef CONFIG_SYMTAB
uint32_t offset = 0;
const char *name = symtab_find_symbol_name(lr, &offset);
LOG_ERR("backtrace %2d: fp: 0x%016llx lr: 0x%016llx [%s+0x%x]",
count++, (uint64_t) fp, lr, name, offset);
#else
LOG_ERR("backtrace %2d: fp: 0x%016llx lr: 0x%016llx",
count++, (uint64_t) fp, lr);
#endif
fp = (uint64_t *) fp[0];
}
}
void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
const struct k_thread *thread, const struct arch_esf *esf)
{
ARG_UNUSED(thread);
walk_stackframe((arm64_stacktrace_cb)callback_fn, cookie, esf,
CONFIG_ARCH_STACKWALK_MAX_FRAMES);
}
#endif /* CONFIG_ARCH_STACKWALK */
#ifdef CONFIG_EXCEPTION_STACK_TRACE
static bool print_trace_address(void *arg, unsigned long lr, void *fp)
{
int *i = arg;
#ifdef CONFIG_SYMTAB
uint32_t offset = 0;
const char *name = symtab_find_symbol_name(lr, &offset);
LOG_ERR(" %d: fp: 0x%016llx lr: 0x%016lx [%s+0x%x]", (*i)++, (uint64_t)fp, lr, name,
offset);
#else
LOG_ERR(" %d: fp: 0x%016llx lr: 0x%016lx", (*i)++, (uint64_t)fp, lr);
#endif /* CONFIG_SYMTAB */
return true;
}
static void esf_unwind(const struct arch_esf *esf)
{
int i = 0;
LOG_ERR("");
LOG_ERR("call trace:");
walk_stackframe(print_trace_address, &i, esf, CONFIG_ARCH_STACKWALK_MAX_FRAMES);
LOG_ERR("");
}
#endif /* CONFIG_EXCEPTION_STACK_TRACE */
#endif
#endif /* CONFIG_EXCEPTION_DEBUG */
#ifdef CONFIG_ARM64_STACK_PROTECTION
static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, uint64_t far)
@ -324,9 +287,8 @@ static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, u
static bool is_recoverable(struct arch_esf *esf, uint64_t esr, uint64_t far,
uint64_t elr)
{
if (!esf) {
if (!esf)
return false;
}
#ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
@ -375,12 +337,6 @@ void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf)
}
#endif
if (IS_ENABLED(CONFIG_DEMAND_PAGING) &&
reason != K_ERR_STACK_CHK_FAIL &&
z_arm64_do_demand_paging(esf, esr, far)) {
return;
}
if (GET_EL(el) != MODE_EL0) {
#ifdef CONFIG_EXCEPTION_DEBUG
bool dump_far = false;
@ -389,9 +345,8 @@ void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf)
dump_esr(esr, &dump_far);
if (dump_far) {
if (dump_far)
LOG_ERR("FAR_ELn: 0x%016llx", far);
}
LOG_ERR("TPIDRRO: 0x%016llx", read_tpidrro_el0());
#endif /* CONFIG_EXCEPTION_DEBUG */

View file

@ -23,7 +23,3 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
: [svid] "i" (_SVC_CALL_IRQ_OFFLOAD),
"r" (x0), "r" (x1));
}
void arch_irq_offload_init(void)
{
}

View file

@ -11,7 +11,6 @@
#include <zephyr/device.h>
#include <zephyr/init.h>
#include <zephyr/kernel.h>
#include <zephyr/kernel/mm/demand_paging.h>
#include <kernel_arch_func.h>
#include <kernel_arch_interface.h>
#include <kernel_internal.h>
@ -22,10 +21,8 @@
#include <zephyr/linker/linker-defs.h>
#include <zephyr/spinlock.h>
#include <zephyr/sys/util.h>
#include <mmu.h>
#include "mmu.h"
#include "paging.h"
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
@ -142,12 +139,6 @@ int arm64_mmu_tables_total_usage(void)
static inline bool is_free_desc(uint64_t desc)
{
return desc == 0;
}
static inline bool is_inval_desc(uint64_t desc)
{
/* invalid descriptors aren't necessarily free */
return (desc & PTE_DESC_TYPE_MASK) == PTE_INVALID_DESC;
}
@ -212,10 +203,8 @@ static void debug_show_pte(uint64_t *pte, unsigned int level)
if (is_block_desc(*pte)) {
MMU_DEBUG("[Block] ");
} else if (!is_inval_desc(*pte)) {
MMU_DEBUG("[Page] ");
} else {
MMU_DEBUG("[paged-out] ");
MMU_DEBUG("[Page] ");
}
uint8_t mem_type = (*pte >> 2) & MT_TYPE_MASK;
@ -227,7 +216,6 @@ static void debug_show_pte(uint64_t *pte, unsigned int level)
MMU_DEBUG((*pte & PTE_BLOCK_DESC_AP_ELx) ? "-ELx" : "-ELh");
MMU_DEBUG((*pte & PTE_BLOCK_DESC_PXN) ? "-PXN" : "-PX");
MMU_DEBUG((*pte & PTE_BLOCK_DESC_UXN) ? "-UXN" : "-UX");
MMU_DEBUG((*pte & PTE_SW_WRITABLE) ? "-WRITABLE" : "");
MMU_DEBUG("\n");
}
#else
@ -243,15 +231,8 @@ static void set_pte_table_desc(uint64_t *pte, uint64_t *table, unsigned int leve
static void set_pte_block_desc(uint64_t *pte, uint64_t desc, unsigned int level)
{
if (level != XLAT_LAST_LEVEL) {
desc |= PTE_BLOCK_DESC;
} else if (!IS_ENABLED(CONFIG_DEMAND_PAGING) || (desc & PTE_BLOCK_DESC_AF) != 0) {
desc |= PTE_PAGE_DESC;
} else {
/*
* Demand paging configured and AF unset: leave the descriptor
* type to "invalid" as in arch_mem_page_out().
*/
if (desc) {
desc |= (level == XLAT_LAST_LEVEL) ? PTE_PAGE_DESC : PTE_BLOCK_DESC;
}
*pte = desc;
debug_show_pte(pte, level);
@ -392,11 +373,6 @@ static void del_mapping(uint64_t *table, uintptr_t virt, size_t size,
continue;
}
if (step != level_size && is_block_desc(*pte)) {
/* need to split this block mapping */
expand_to_table(pte, level);
}
if (is_table_desc(*pte, level)) {
subtable = pte_desc_table(*pte);
del_mapping(subtable, virt, step, level + 1);
@ -404,6 +380,12 @@ static void del_mapping(uint64_t *table, uintptr_t virt, size_t size,
continue;
}
dec_table_ref(subtable);
} else {
/*
* We assume that block mappings will be unmapped
* as a whole and not partially.
*/
__ASSERT(step == level_size, "");
}
/* free this entry */
@ -675,8 +657,6 @@ static uint64_t get_region_desc(uint32_t attrs)
/* AP bits for Data access permission */
desc |= (attrs & MT_RW) ? PTE_BLOCK_DESC_AP_RW : PTE_BLOCK_DESC_AP_RO;
desc |= (IS_ENABLED(CONFIG_DEMAND_PAGING) && (attrs & MT_RW)) ?
PTE_SW_WRITABLE : 0;
/* Mirror permissions to EL0 */
desc |= (attrs & MT_RW_AP_ELx) ?
@ -684,11 +664,6 @@ static uint64_t get_region_desc(uint32_t attrs)
/* the access flag */
desc |= PTE_BLOCK_DESC_AF;
if (IS_ENABLED(CONFIG_DEMAND_PAGING) && (attrs & MT_PAGED_OUT) != 0) {
/* set it up for demand paging like arch_mem_page_out() */
desc &= ~PTE_BLOCK_DESC_AF;
desc |= PTE_BLOCK_DESC_AP_RO;
}
/* memory attribute index field */
mem_type = MT_TYPE(attrs);
@ -711,20 +686,17 @@ static uint64_t get_region_desc(uint32_t attrs)
case MT_NORMAL_NC:
case MT_NORMAL:
/* Make Normal RW memory as execute never */
if ((attrs & MT_RW) || (attrs & MT_P_EXECUTE_NEVER)) {
if ((attrs & MT_RW) || (attrs & MT_P_EXECUTE_NEVER))
desc |= PTE_BLOCK_DESC_PXN;
}
if (((attrs & MT_RW) && (attrs & MT_RW_AP_ELx)) ||
(attrs & MT_U_EXECUTE_NEVER)) {
(attrs & MT_U_EXECUTE_NEVER))
desc |= PTE_BLOCK_DESC_UXN;
}
if (mem_type == MT_NORMAL) {
if (mem_type == MT_NORMAL)
desc |= PTE_BLOCK_DESC_INNER_SHARE;
} else {
else
desc |= PTE_BLOCK_DESC_OUTER_SHARE;
}
}
/* non-Global bit */
@ -783,12 +755,6 @@ static void invalidate_tlb_all(void)
: : : "memory");
}
static inline void invalidate_tlb_page(uintptr_t virt)
{
/* to be refined */
invalidate_tlb_all();
}
/* zephyr execution regions with appropriate attributes */
struct arm_mmu_flat_range {
@ -878,9 +844,8 @@ static void setup_page_tables(struct arm_mmu_ptables *ptables)
uintptr_t max_va = 0, max_pa = 0;
MMU_DEBUG("xlat tables:\n");
for (index = 0U; index < CONFIG_MAX_XLAT_TABLES; index++) {
for (index = 0U; index < CONFIG_MAX_XLAT_TABLES; index++)
MMU_DEBUG("%d: %p\n", index, xlat_tables + index * Ln_XLAT_NUM_ENTRIES);
}
for (index = 0U; index < mmu_config.num_regions; index++) {
region = &mmu_config.mmu_regions[index];
@ -1102,10 +1067,6 @@ static int __arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flag
entry_flags |= MT_RW_AP_ELx;
}
if (IS_ENABLED(CONFIG_DEMAND_PAGING) && (flags & K_MEM_MAP_UNPAGED) != 0) {
entry_flags |= MT_PAGED_OUT;
}
return add_map(ptables, "generic", phys, (uintptr_t)virt, size, entry_flags);
}
@ -1368,9 +1329,8 @@ void z_arm64_thread_mem_domains_init(struct k_thread *incoming)
{
struct arm_mmu_ptables *ptables;
if ((incoming->base.user_options & K_USER) == 0) {
if ((incoming->base.user_options & K_USER) == 0)
return;
}
ptables = incoming->arch.ptables;
@ -1386,311 +1346,3 @@ void z_arm64_swap_mem_domains(struct k_thread *incoming)
}
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_DEMAND_PAGING
static uint64_t *get_pte_location(struct arm_mmu_ptables *ptables,
uintptr_t virt)
{
uint64_t *pte;
uint64_t *table = ptables->base_xlat_table;
unsigned int level = BASE_XLAT_LEVEL;
for (;;) {
pte = &table[XLAT_TABLE_VA_IDX(virt, level)];
if (level == XLAT_LAST_LEVEL) {
return pte;
}
if (is_table_desc(*pte, level)) {
level++;
table = pte_desc_table(*pte);
continue;
}
/* anything else is unexpected */
return NULL;
}
}
void arch_mem_page_out(void *addr, uintptr_t location)
{
uintptr_t virt = (uintptr_t)addr;
uint64_t *pte = get_pte_location(&kernel_ptables, virt);
uint64_t desc;
__ASSERT(pte != NULL, "");
desc = *pte;
/* mark the entry invalid to the hardware */
desc &= ~PTE_DESC_TYPE_MASK;
desc |= PTE_INVALID_DESC;
/* store the location token in place of the physical address */
__ASSERT((location & ~PTE_PHYSADDR_MASK) == 0, "");
desc &= ~PTE_PHYSADDR_MASK;
desc |= location;
/*
* The location token may be 0. Make sure the whole descriptor
* doesn't end up being zero as this would be seen as a free entry.
*/
desc |= PTE_BLOCK_DESC_AP_RO;
*pte = desc;
MMU_DEBUG("page_out: virt=%#lx location=%#lx\n", virt, location);
debug_show_pte(pte, XLAT_LAST_LEVEL);
sync_domains(virt, CONFIG_MMU_PAGE_SIZE, "page_out");
invalidate_tlb_page(virt);
}
void arch_mem_page_in(void *addr, uintptr_t phys)
{
uintptr_t virt = (uintptr_t)addr;
uint64_t *pte = get_pte_location(&kernel_ptables, virt);
uint64_t desc;
__ASSERT((phys & ~PTE_PHYSADDR_MASK) == 0, "");
__ASSERT(pte != NULL, "");
desc = *pte;
__ASSERT(!is_free_desc(desc), "");
/* mark the entry valid again to the hardware */
desc &= ~PTE_DESC_TYPE_MASK;
desc |= PTE_PAGE_DESC;
/* store the physical address */
desc &= ~PTE_PHYSADDR_MASK;
desc |= phys;
/* mark as clean */
desc |= PTE_BLOCK_DESC_AP_RO;
/* and make it initially unaccessible to track unaccessed pages */
desc &= ~PTE_BLOCK_DESC_AF;
*pte = desc;
MMU_DEBUG("page_in: virt=%#lx phys=%#lx\n", virt, phys);
debug_show_pte(pte, XLAT_LAST_LEVEL);
sync_domains(virt, CONFIG_MMU_PAGE_SIZE, "page_in");
invalidate_tlb_page(virt);
}
enum arch_page_location arch_page_location_get(void *addr, uintptr_t *location)
{
uintptr_t virt = (uintptr_t)addr;
uint64_t *pte = get_pte_location(&kernel_ptables, virt);
uint64_t desc;
enum arch_page_location status;
if (!pte) {
return ARCH_PAGE_LOCATION_BAD;
}
desc = *pte;
if (is_free_desc(desc)) {
return ARCH_PAGE_LOCATION_BAD;
}
switch (desc & PTE_DESC_TYPE_MASK) {
case PTE_PAGE_DESC:
status = ARCH_PAGE_LOCATION_PAGED_IN;
break;
case PTE_INVALID_DESC:
status = ARCH_PAGE_LOCATION_PAGED_OUT;
break;
default:
return ARCH_PAGE_LOCATION_BAD;
}
*location = desc & PTE_PHYSADDR_MASK;
return status;
}
uintptr_t arch_page_info_get(void *addr, uintptr_t *phys, bool clear_accessed)
{
uintptr_t virt = (uintptr_t)addr;
uint64_t *pte = get_pte_location(&kernel_ptables, virt);
uint64_t desc;
uintptr_t status = 0;
if (!pte) {
return ARCH_DATA_PAGE_NOT_MAPPED;
}
desc = *pte;
if (is_free_desc(desc)) {
return ARCH_DATA_PAGE_NOT_MAPPED;
}
switch (desc & PTE_DESC_TYPE_MASK) {
case PTE_PAGE_DESC:
status |= ARCH_DATA_PAGE_LOADED;
break;
case PTE_INVALID_DESC:
/* page not loaded */
break;
default:
return ARCH_DATA_PAGE_NOT_MAPPED;
}
if (phys) {
*phys = desc & PTE_PHYSADDR_MASK;
}
if ((status & ARCH_DATA_PAGE_LOADED) == 0) {
return status;
}
if ((desc & PTE_BLOCK_DESC_AF) != 0) {
status |= ARCH_DATA_PAGE_ACCESSED;
}
if ((desc & PTE_BLOCK_DESC_AP_RO) == 0) {
status |= ARCH_DATA_PAGE_DIRTY;
}
if (clear_accessed) {
desc &= ~PTE_BLOCK_DESC_AF;
*pte = desc;
MMU_DEBUG("page_info: virt=%#lx (clearing AF)\n", virt);
debug_show_pte(pte, XLAT_LAST_LEVEL);
sync_domains(virt, CONFIG_MMU_PAGE_SIZE, "unaccessible");
invalidate_tlb_page(virt);
}
return status;
}
#define MT_SCRATCH (MT_NORMAL | MT_P_RW_U_NA | MT_DEFAULT_SECURE_STATE)
void arch_mem_scratch(uintptr_t phys)
{
uintptr_t virt = (uintptr_t)K_MEM_SCRATCH_PAGE;
size_t size = CONFIG_MMU_PAGE_SIZE;
int ret = add_map(&kernel_ptables, "scratch", phys, virt, size, MT_SCRATCH);
if (ret) {
LOG_ERR("add_map() returned %d", ret);
} else {
sync_domains(virt, size, "scratch");
invalidate_tlb_page(virt);
}
}
static bool do_mem_page_fault(struct arch_esf *esf, uintptr_t virt)
{
/*
* The k_mem_page_fault() code expects to be called with IRQs enabled
* if the fault happened in a context where IRQs were enabled.
*/
if (arch_irq_unlocked(esf->spsr)) {
enable_irq();
}
bool ok = k_mem_page_fault((void *)virt);
disable_irq();
return ok;
}
/* Called from the fault handler. Returns true if the fault is resolved. */
bool z_arm64_do_demand_paging(struct arch_esf *esf, uint64_t esr, uint64_t far)
{
uintptr_t virt = far;
uint64_t *pte, desc;
uintptr_t phys;
/* filter relevant exceptions */
switch (GET_ESR_EC(esr)) {
case 0x21: /* insn abort from current EL */
case 0x25: /* data abort from current EL */
break;
default:
return false;
}
/* make sure the fault happened in the expected range */
if (!IN_RANGE(virt,
(uintptr_t)K_MEM_VIRT_RAM_START,
((uintptr_t)K_MEM_VIRT_RAM_END - 1))) {
return false;
}
virt = ROUND_DOWN(virt, CONFIG_MMU_PAGE_SIZE);
pte = get_pte_location(&kernel_ptables, virt);
if (!pte) {
/* page mapping doesn't exist, let the core code do its thing */
return do_mem_page_fault(esf, virt);
}
desc = *pte;
if ((desc & PTE_DESC_TYPE_MASK) != PTE_PAGE_DESC) {
/* page is not loaded/mapped */
return do_mem_page_fault(esf, virt);
}
/*
* From this point, we expect only 2 cases:
*
* 1) the Access Flag was not set so we set it marking the page
* as accessed;
*
* 2) the page was read-only and a write occurred so we clear the
* RO flag marking the page dirty.
*
* We bail out on anything else.
*
* Fault status codes for Data aborts (DFSC):
* 0b0010LL Access flag fault
* 0b0011LL Permission fault
*/
uint32_t dfsc = GET_ESR_ISS(esr) & GENMASK(5, 0);
bool write = (GET_ESR_ISS(esr) & BIT(6)) != 0; /* WnR */
if (dfsc == (0b001000 | XLAT_LAST_LEVEL) &&
(desc & PTE_BLOCK_DESC_AF) == 0) {
/* page is being accessed: set the access flag */
desc |= PTE_BLOCK_DESC_AF;
if (write) {
if ((desc & PTE_SW_WRITABLE) == 0) {
/* we don't actually have write permission */
return false;
}
/*
* Let's avoid another fault immediately after
* returning by making the page read-write right away
* effectively marking it "dirty" as well.
*/
desc &= ~PTE_BLOCK_DESC_AP_RO;
}
*pte = desc;
sync_domains(virt, CONFIG_MMU_PAGE_SIZE, "accessed");
/* no TLB inval needed after setting AF */
/* tell the eviction algorithm about it */
phys = desc & PTE_PHYSADDR_MASK;
k_mem_paging_eviction_accessed(phys);
return true;
}
if (dfsc == (0b001100 | XLAT_LAST_LEVEL) && write &&
(desc & PTE_BLOCK_DESC_AP_RO) != 0 &&
(desc & PTE_SW_WRITABLE) != 0) {
/* make it "dirty" i.e. read-write */
desc &= ~PTE_BLOCK_DESC_AP_RO;
*pte = desc;
sync_domains(virt, CONFIG_MMU_PAGE_SIZE, "dirtied");
invalidate_tlb_page(virt);
/* this also counts as an access refresh */
phys = desc & PTE_PHYSADDR_MASK;
k_mem_paging_eviction_accessed(phys);
return true;
}
return false;
}
#endif /* CONFIG_DEMAND_PAGING */

View file

@ -126,14 +126,6 @@
*/
#define PTE_PHYSADDR_MASK GENMASK64(47, PAGE_SIZE_SHIFT)
/*
* Descriptor bits 58 to 55 are defined as "Reserved for Software Use".
*
* When using demand paging, RW memory is marked RO to trap the first write
* for dirty page tracking. Bit 55 indicates if memory is actually writable.
*/
#define PTE_SW_WRITABLE (1ULL << 55)
/*
* TCR definitions.
*/

View file

@ -1,12 +0,0 @@
/*
* Copyright (c) 2024 BayLibre SAS
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef Z_ARM64_PAGING_H
#define Z_ARM64_PAGING_H
bool z_arm64_do_demand_paging(struct arch_esf *esf, uint64_t esr, uint64_t far);
#endif /* Z_ARM64_PAGING_H */

View file

@ -16,8 +16,6 @@
#include <kernel_internal.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/platform/hooks.h>
#include <zephyr/arch/cache.h>
extern void z_arm64_mm_init(bool is_primary_core);
@ -32,10 +30,6 @@ __weak void z_arm64_mm_init(bool is_primary_core) { }
*/
void z_prep_c(void)
{
#if defined(CONFIG_SOC_PREP_HOOK)
soc_prep_hook();
#endif
/* Initialize tpidrro_el0 with our struct _cpu instance address */
write_tpidrro_el0((uintptr_t)&_kernel.cpus[0]);
@ -58,9 +52,6 @@ extern FUNC_NORETURN void arch_secondary_cpu_init(void);
void z_arm64_secondary_prep_c(void)
{
arch_secondary_cpu_init();
#if CONFIG_ARCH_CACHE
arch_cache_init();
#endif
CODE_UNREACHABLE;
}

View file

@ -1,35 +0,0 @@
/*
* Copyright 2024 NXP
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/reboot.h>
#include <zephyr/drivers/pm_cpu_ops.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_PM_CPU_OPS_PSCI
void __weak sys_arch_reboot(int type)
{
unsigned char reset_type;
if (type == SYS_REBOOT_COLD) {
reset_type = SYS_COLD_RESET;
} else if (type == SYS_REBOOT_WARM) {
reset_type = SYS_WARM_RESET;
} else {
LOG_ERR("Invalid reboot type");
return;
}
pm_system_reset(reset_type);
}
#else
void __weak sys_arch_reboot(int type)
{
LOG_WRN("%s is not implemented", __func__);
ARG_UNUSED(type);
}
#endif

View file

@ -4,5 +4,8 @@
# Needed to separate definitions in common Xen headers
zephyr_compile_options($<$<COMPILE_LANGUAGE:ASM>:-D__ASSEMBLY__>)
# Xen interface version used in headers for correct definition
zephyr_compile_options(-D__XEN_INTERFACE_VERSION__=0x00040e00)
zephyr_library_sources(hypercall.S)
zephyr_library_sources(enlighten.c)

View file

@ -25,11 +25,3 @@ config XEN_DOM0LESS
help
Configures Zephyr as DomU, that can be started on Dom0less
setup.
config XEN_INTERFACE_VERSION
hex "Xen interface version"
default 0x00040e00
depends on XEN
help
Xen interface version to use. This is the version of the
interface that Zephyr will use to communicate with the hypervisor.

View file

@ -42,7 +42,7 @@ static int xen_map_shared_info(const shared_info_t *shared_page)
return HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
}
int xen_enlighten_init(void)
static int xen_enlighten_init(void)
{
int ret = 0;
shared_info_t *info = (shared_info_t *) shared_info_buf;
@ -66,3 +66,5 @@ int xen_enlighten_init(void)
return 0;
}
SYS_INIT(xen_enlighten_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);

View file

@ -28,13 +28,8 @@ extern "C" {
#ifndef _ASMLANGUAGE
extern void xen_enlighten_init(void);
static ALWAYS_INLINE void arch_kernel_init(void)
{
#ifdef CONFIG_XEN
xen_enlighten_init();
#endif
}
static inline void arch_switch(void *switch_to, void **switched_from)

View file

@ -17,11 +17,6 @@ if(CONFIG_GEN_ISR_TABLES)
)
endif()
zephyr_library_sources_ifdef(
CONFIG_ISR_TABLE_SHELL
isr_tables_shell.c
)
zephyr_library_sources_ifdef(
CONFIG_MULTI_LEVEL_INTERRUPTS
multilevel_irq.c

View file

@ -30,10 +30,3 @@ config LEGACY_MULTI_LEVEL_TABLE_GENERATION
help
A make-shift Kconfig to continue generating the multi-level interrupt LUT
with the legacy way using DT macros.
config ISR_TABLE_SHELL
bool "Shell command to dump the ISR tables"
depends on GEN_SW_ISR_TABLE
depends on SHELL
help
This option enables a shell command to dump the ISR tables.

View file

@ -1,67 +0,0 @@
/*
* Copyright (c) 2024 Meta Platforms.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/debug/symtab.h>
#include <zephyr/shell/shell.h>
#include <zephyr/sw_isr_table.h>
static void dump_isr_table_entry(const struct shell *sh, int idx, struct _isr_table_entry *entry)
{
if ((entry->isr == z_irq_spurious) || (entry->isr == NULL)) {
return;
}
#ifdef CONFIG_SYMTAB
const char *name = symtab_find_symbol_name((uintptr_t)entry->isr, NULL);
shell_print(sh, "%4d: %s(%p)", idx, name, entry->arg);
#else
shell_print(sh, "%4d: %p(%p)", idx, entry->isr, entry->arg);
#endif /* CONFIG_SYMTAB */
}
static int cmd_sw_isr_table(const struct shell *sh, size_t argc, char **argv)
{
shell_print(sh, "_sw_isr_table[%d]\n", IRQ_TABLE_SIZE);
for (int idx = 0; idx < IRQ_TABLE_SIZE; idx++) {
dump_isr_table_entry(sh, idx, &_sw_isr_table[idx]);
}
return 0;
}
#ifdef CONFIG_SHARED_INTERRUPTS
static int cmd_shared_sw_isr_table(const struct shell *sh, size_t argc, char **argv)
{
shell_print(sh, "z_shared_sw_isr_table[%d][%d]\n", IRQ_TABLE_SIZE,
CONFIG_SHARED_IRQ_MAX_NUM_CLIENTS);
for (int idx = 0; idx < IRQ_TABLE_SIZE; idx++) {
for (int c = 0; c < z_shared_sw_isr_table[idx].client_num; c++) {
dump_isr_table_entry(sh, idx, &z_shared_sw_isr_table[idx].clients[c]);
}
}
return 0;
}
#endif /* CONFIG_SHARED_INTERRUPTS */
SHELL_STATIC_SUBCMD_SET_CREATE(isr_table_cmds,
SHELL_CMD_ARG(sw_isr_table, NULL,
"Dump _sw_isr_table.\n"
"Usage: isr_table sw_isr_table",
cmd_sw_isr_table, 1, 0),
#ifdef CONFIG_SHARED_INTERRUPTS
SHELL_CMD_ARG(shared_sw_isr_table, NULL,
"Dump z_shared_sw_isr_table.\n"
"Usage: isr_table shared_sw_isr_table",
cmd_shared_sw_isr_table, 1, 0),
#endif /* CONFIG_SHARED_INTERRUPTS */
SHELL_SUBCMD_SET_END);
SHELL_CMD_ARG_REGISTER(isr_table, &isr_table_cmds, "ISR tables shell command",
NULL, 0, 0);

View file

@ -11,12 +11,9 @@
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/util.h>
BUILD_ASSERT(CONFIG_MAX_IRQ_PER_AGGREGATOR < BIT(CONFIG_2ND_LEVEL_INTERRUPT_BITS),
BUILD_ASSERT((CONFIG_NUM_2ND_LEVEL_AGGREGATORS * CONFIG_MAX_IRQ_PER_AGGREGATOR) <=
BIT(CONFIG_2ND_LEVEL_INTERRUPT_BITS),
"L2 bits not enough to cover the number of L2 IRQs");
#ifdef CONFIG_3RD_LEVEL_INTERRUPTS
BUILD_ASSERT(CONFIG_MAX_IRQ_PER_AGGREGATOR < BIT(CONFIG_3RD_LEVEL_INTERRUPT_BITS),
"L3 bits not enough to cover the number of L3 IRQs");
#endif /* CONFIG_3RD_LEVEL_INTERRUPTS */
/**
* @brief Get the aggregator that's responsible for the given irq
@ -88,8 +85,7 @@ unsigned int z_get_sw_isr_table_idx(unsigned int irq)
table_idx -= CONFIG_GEN_IRQ_START_VECTOR;
__ASSERT(table_idx < IRQ_TABLE_SIZE, "table_idx(%d) < IRQ_TABLE_SIZE(%d)", table_idx,
IRQ_TABLE_SIZE);
__ASSERT_NO_MSG(table_idx < IRQ_TABLE_SIZE);
return table_idx;
}

View file

@ -92,8 +92,8 @@ void z_isr_install(unsigned int irq, void (*routine)(const void *),
for (i = 0; i < shared_entry->client_num; i++) {
client = &shared_entry->clients[i];
__ASSERT((client->isr == routine && client->arg == param) == false,
"ISR/arg combination is already registered");
__ASSERT(client->isr != routine && client->arg != param,
"trying to register duplicate ISR/arg pair");
}
shared_entry->clients[shared_entry->client_num].isr = routine;

View file

@ -19,16 +19,12 @@ static ALWAYS_INLINE void mips_idle(unsigned int key)
__asm__ volatile("wait");
}
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void)
{
mips_idle(1);
}
#endif
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key)
{
mips_idle(key);
}
#endif

View file

@ -48,7 +48,3 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
irq_unlock(key);
}
void arch_irq_offload_init(void)
{
}

View file

@ -11,8 +11,6 @@
#include <kernel_internal.h>
#include <zephyr/irq.h>
#include <zephyr/platform/hooks.h>
#include <zephyr/arch/cache.h>
static void interrupt_init(void)
{
@ -46,15 +44,9 @@ static void interrupt_init(void)
void z_prep_c(void)
{
#if defined(CONFIG_SOC_PREP_HOOK)
soc_prep_hook();
#endif
z_bss_zero();
interrupt_init();
#if CONFIG_ARCH_CACHE
arch_cache_init();
#endif
z_cstart();
CODE_UNREACHABLE;

View file

@ -7,7 +7,6 @@
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void)
{
/* Do nothing but unconditionally unlock interrupts and return to the
@ -15,9 +14,7 @@ void arch_cpu_idle(void)
*/
irq_unlock(NIOS2_STATUS_PIE_MSK);
}
#endif
#ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key)
{
/* Do nothing but restore IRQ state. This CPU does not have any
@ -25,4 +22,3 @@ void arch_cpu_atomic_idle(unsigned int key)
*/
irq_unlock(key);
}
#endif

View file

@ -41,7 +41,3 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
irq_unlock(key);
}
void arch_irq_offload_init(void)
{
}

View file

@ -21,8 +21,6 @@
#include <zephyr/linker/linker-defs.h>
#include <zephyr/kernel_structs.h>
#include <kernel_internal.h>
#include <zephyr/platform/hooks.h>
#include <zephyr/arch/cache.h>
/**
* @brief Prepare to and run C code
@ -32,10 +30,6 @@
void z_prep_c(void)
{
#if defined(CONFIG_SOC_PREP_HOOK)
soc_prep_hook();
#endif
z_bss_zero();
z_data_copy();
/* In most XIP scenarios we copy the exception code into RAM, so need
@ -50,9 +44,6 @@ void z_prep_c(void)
*/
z_nios2_dcache_flush_all();
#endif
#endif
#if CONFIG_ARCH_CACHE
arch_cache_init();
#endif
z_cstart();
CODE_UNREACHABLE;

View file

@ -21,12 +21,10 @@ endif()
if(CONFIG_NATIVE_APPLICATION)
zephyr_include_directories(
nsi_compat/
${ZEPHYR_BASE}/scripts/native_simulator/common/src/include/
)
zephyr_library_sources(
posix_core_nsi.c
posix_core.c
nsi_compat/nsi_compat.c
${ZEPHYR_BASE}/scripts/native_simulator/common/src/nct.c
${ZEPHYR_BASE}/scripts/native_simulator/common/src/nce.c
${ZEPHYR_BASE}/scripts/native_simulator/common/src/nsi_host_trampolines.c
)

View file

@ -14,10 +14,6 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
{
posix_irq_offload(routine, parameter);
}
void arch_irq_offload_init(void)
{
}
#endif
void arch_irq_enable(unsigned int irq)

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2023 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef NSI_COMMON_SRC_INCL_NCE_IF_H
#define NSI_COMMON_SRC_INCL_NCE_IF_H
#ifdef __cplusplus
extern "C" {
#endif
/* Native simulator CPU start/stop emulation module interface */
void *nce_init(void);
void nce_terminate(void *this);
void nce_boot_cpu(void *this, void (*start_routine)(void));
void nce_halt_cpu(void *this);
void nce_wake_cpu(void *this);
int nce_is_cpu_running(void *this);
#ifdef __cplusplus
}
#endif
#endif /* NSI_COMMON_SRC_INCL_NCE_IF_H */

View file

@ -0,0 +1,19 @@
/*
* Copyright (c) 2023 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*
* Note: This is a provisional header which exists to allow
* old POSIX arch based boards (i.e. native_posix) to provide access
* to the host C library as if the native simulator trampolines
* existed.
*
* Boards based on the native simulator do NOT use this file
*/
#ifndef ARCH_POSIX_CORE_NSI_COMPAT_NSI_HOST_TRAMPOLINES_H
#define ARCH_POSIX_CORE_NSI_COMPAT_NSI_HOST_TRAMPOLINES_H
#include "../scripts/native_simulator/common/src/include/nsi_host_trampolines.h"
#endif /* ARCH_POSIX_CORE_NSI_COMPAT_NSI_HOST_TRAMPOLINES_H */

View file

@ -0,0 +1,15 @@
/*
* Copyright (c) 2023 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ARCH_POSIX_CORE_NSI_SAFE_CALLL_H
#define ARCH_POSIX_CORE_NSI_SAFE_CALLL_H
#include "nsi_tracing.h"
#include "posix_arch_internal.h"
#define NSI_SAFE_CALL PC_SAFE_CALL
#endif /* ARCH_POSIX_CORE_NSI_SAFE_CALLL_H */

View file

@ -0,0 +1,22 @@
/*
* Copyright (c) 2023 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ARCH_POSIX_CORE_NSI_TRACING_H
#define ARCH_POSIX_CORE_NSI_TRACING_H
#ifdef __cplusplus
extern "C" {
#endif
void nsi_print_error_and_exit(const char *format, ...);
void nsi_print_warning(const char *format, ...);
void nsi_print_trace(const char *format, ...);
#ifdef __cplusplus
}
#endif
#endif /* ARCH_POSIX_CORE_NSI_TRACING_H */

View file

@ -0,0 +1,539 @@
/*
* Copyright (c) 2017 Oticon A/S
* Copyright (c) 2023 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* Here is where things actually happen for the POSIX arch
*
* We isolate all functions here, to ensure they can be compiled as
* independently as possible to the remainder of Zephyr to avoid name clashes
* as Zephyr does provide functions with the same names as the POSIX threads
* functions
*/
/**
* Principle of operation:
*
* The Zephyr OS and its app run as a set of native pthreads.
* The Zephyr OS only sees one of this thread executing at a time.
* Which is running is controlled using {cond|mtx}_threads and
* currently_allowed_thread.
*
* The main part of the execution of each thread will occur in a fully
* synchronous and deterministic manner, and only when commanded by the Zephyr
* kernel.
* But the creation of a thread will spawn a new pthread whose start
* is asynchronous to the rest, until synchronized in posix_wait_until_allowed()
* below.
* Similarly aborting and canceling threads execute a tail in a quite
* asynchronous manner.
*
* This implementation is meant to be portable in between POSIX systems.
* A table (threads_table) is used to abstract the native pthreads.
* And index in this table is used to identify threads in the IF to the kernel.
*
*/
#include <pthread.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include "posix_core.h"
#include "posix_arch_internal.h"
#define PREFIX "POSIX arch core: "
#define ERPREFIX PREFIX"error on "
#define NO_MEM_ERR PREFIX"Can't allocate memory\n"
#define PC_ENABLE_CANCEL 0 /* See Note.c1 */
#define PC_ALLOC_CHUNK_SIZE 64
#define PC_REUSE_ABORTED_ENTRIES 0
/* tests/kernel/threads/scheduling/schedule_api fails when setting
* PC_REUSE_ABORTED_ENTRIES => don't set it by now
*/
static int threads_table_size;
struct threads_table_el {
enum {NOTUSED = 0, USED, ABORTING, ABORTED, FAILED} state;
bool running; /* Is this the currently running thread */
pthread_t thread; /* Actual pthread_t as returned by native kernel */
int thead_cnt; /* For debugging: Unique, consecutive, thread number */
/* Pointer to the status kept in the Zephyr thread stack */
posix_thread_status_t *t_status;
};
static struct threads_table_el *threads_table;
static int thread_create_count; /* For debugging. Thread creation counter */
/*
* Conditional variable to block/awake all threads during swaps()
* (we only need 1 mutex and 1 cond variable for all threads)
*/
static pthread_cond_t cond_threads = PTHREAD_COND_INITIALIZER;
/* Mutex for the conditional variable posix_core_cond_threads */
static pthread_mutex_t mtx_threads = PTHREAD_MUTEX_INITIALIZER;
/* Token which tells which process is allowed to run now */
static int currently_allowed_thread;
static bool terminate; /* Are we terminating the program == cleaning up */
static void posix_wait_until_allowed(int this_th_nbr);
static void *posix_thread_starter(void *arg);
static void posix_preexit_cleanup(void);
extern void posix_arch_thread_entry(void *pa_thread_status);
/**
* Helper function, run by a thread is being aborted
*/
static void abort_tail(int this_th_nbr)
{
PC_DEBUG("Thread [%i] %i: %s: Aborting (exiting) (rel mut)\n",
threads_table[this_th_nbr].thead_cnt,
this_th_nbr,
__func__);
threads_table[this_th_nbr].running = false;
threads_table[this_th_nbr].state = ABORTED;
posix_preexit_cleanup();
pthread_exit(NULL);
}
/**
* Helper function to block this thread until it is allowed again
* (somebody calls posix_let_run() with this thread number
*
* Note that we go out of this function (the while loop below)
* with the mutex locked by this particular thread.
* In normal circumstances, the mutex is only unlocked internally in
* pthread_cond_wait() while waiting for cond_threads to be signaled
*/
static void posix_wait_until_allowed(int this_th_nbr)
{
threads_table[this_th_nbr].running = false;
PC_DEBUG("Thread [%i] %i: %s: Waiting to be allowed to run (rel mut)\n",
threads_table[this_th_nbr].thead_cnt,
this_th_nbr,
__func__);
while (this_th_nbr != currently_allowed_thread) {
pthread_cond_wait(&cond_threads, &mtx_threads);
if (threads_table &&
(threads_table[this_th_nbr].state == ABORTING)) {
abort_tail(this_th_nbr);
}
}
threads_table[this_th_nbr].running = true;
PC_DEBUG("Thread [%i] %i: %s(): I'm allowed to run! (hav mut)\n",
threads_table[this_th_nbr].thead_cnt,
this_th_nbr,
__func__);
}
/**
* Helper function to let the thread <next_allowed_th> run
* Note: posix_let_run() can only be called with the mutex locked
*/
static void posix_let_run(int next_allowed_th)
{
PC_DEBUG("%s: We let thread [%i] %i run\n",
__func__,
threads_table[next_allowed_th].thead_cnt,
next_allowed_th);
currently_allowed_thread = next_allowed_th;
/*
* We let all threads know one is able to run now (it may even be us
* again if fancied)
* Note that as we hold the mutex, they are going to be blocked until
* we reach our own posix_wait_until_allowed() while loop
*/
PC_SAFE_CALL(pthread_cond_broadcast(&cond_threads));
}
static void posix_preexit_cleanup(void)
{
/*
* Release the mutex so the next allowed thread can run
*/
PC_SAFE_CALL(pthread_mutex_unlock(&mtx_threads));
/* We detach ourselves so nobody needs to join to us */
pthread_detach(pthread_self());
}
/**
* Let the ready thread run and block this thread until it is allowed again
*
* called from arch_swap() which does the picking from the kernel structures
*/
void posix_swap(int next_allowed_thread_nbr, int this_th_nbr)
{
posix_let_run(next_allowed_thread_nbr);
if (threads_table[this_th_nbr].state == ABORTING) {
PC_DEBUG("Thread [%i] %i: %s: Aborting curr.\n",
threads_table[this_th_nbr].thead_cnt,
this_th_nbr,
__func__);
abort_tail(this_th_nbr);
} else {
posix_wait_until_allowed(this_th_nbr);
}
}
/**
* Let the ready thread (main) run, and exit this thread (init)
*
* Called from arch_switch_to_main_thread() which does the picking from the
* kernel structures
*
* Note that we could have just done a swap(), but that would have left the
* init thread lingering. Instead here we exit the init thread after enabling
* the new one
*/
void posix_main_thread_start(int next_allowed_thread_nbr)
{
posix_let_run(next_allowed_thread_nbr);
PC_DEBUG("%s: Init thread dying now (rel mut)\n",
__func__);
posix_preexit_cleanup();
pthread_exit(NULL);
}
/**
* Handler called when any thread is cancelled or exits
*/
static void posix_cleanup_handler(void *arg)
{
/*
* If we are not terminating, this is just an aborted thread,
* and the mutex was already released
* Otherwise, release the mutex so other threads which may be
* caught waiting for it could terminate
*/
if (!terminate) {
return;
}
#if POSIX_ARCH_DEBUG_PRINTS
posix_thread_status_t *ptr = (posix_thread_status_t *) arg;
PC_DEBUG("Thread %i: %s: Canceling (rel mut)\n",
ptr->thread_idx,
__func__);
#endif
PC_SAFE_CALL(pthread_mutex_unlock(&mtx_threads));
/* We detach ourselves so nobody needs to join to us */
pthread_detach(pthread_self());
}
/**
* Helper function to start a Zephyr thread as a POSIX thread:
* It will block the thread until a arch_swap() is called for it
*
* Spawned from posix_new_thread() below
*/
static void *posix_thread_starter(void *arg)
{
int thread_idx = (intptr_t)arg;
PC_DEBUG("Thread [%i] %i: %s: Starting\n",
threads_table[thread_idx].thead_cnt,
thread_idx,
__func__);
/*
* We block until all other running threads reach the while loop
* in posix_wait_until_allowed() and they release the mutex
*/
PC_SAFE_CALL(pthread_mutex_lock(&mtx_threads));
/*
* The program may have been finished before this thread ever got to run
*/
/* LCOV_EXCL_START */ /* See Note1 */
if (!threads_table) {
posix_cleanup_handler(arg);
pthread_exit(NULL);
}
/* LCOV_EXCL_STOP */
pthread_cleanup_push(posix_cleanup_handler, arg);
PC_DEBUG("Thread [%i] %i: %s: After start mutex (hav mut)\n",
threads_table[thread_idx].thead_cnt,
thread_idx,
__func__);
/*
* The thread would try to execute immediately, so we block it
* until allowed
*/
posix_wait_until_allowed(thread_idx);
posix_thread_status_t *ptr = threads_table[thread_idx].t_status;
posix_arch_thread_entry(ptr);
/*
* We only reach this point if the thread actually returns which should
* not happen. But we handle it gracefully just in case
*/
/* LCOV_EXCL_START */
posix_print_trace(PREFIX"Thread [%i] %i [%lu] ended!?!\n",
threads_table[thread_idx].thead_cnt,
thread_idx,
pthread_self());
threads_table[thread_idx].running = false;
threads_table[thread_idx].state = FAILED;
pthread_cleanup_pop(1);
return NULL;
/* LCOV_EXCL_STOP */
}
/**
* Return the first free entry index in the threads table
*/
static int ttable_get_empty_slot(void)
{
for (int i = 0; i < threads_table_size; i++) {
if ((threads_table[i].state == NOTUSED)
|| (PC_REUSE_ABORTED_ENTRIES
&& (threads_table[i].state == ABORTED))) {
return i;
}
}
/*
* else, we run out table without finding an index
* => we expand the table
*/
threads_table = realloc(threads_table,
(threads_table_size + PC_ALLOC_CHUNK_SIZE)
* sizeof(struct threads_table_el));
if (threads_table == NULL) { /* LCOV_EXCL_BR_LINE */
posix_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
}
/* Clear new piece of table */
(void)memset(&threads_table[threads_table_size], 0,
PC_ALLOC_CHUNK_SIZE * sizeof(struct threads_table_el));
threads_table_size += PC_ALLOC_CHUNK_SIZE;
/* The first newly created entry is good: */
return threads_table_size - PC_ALLOC_CHUNK_SIZE;
}
/**
* Called from arch_new_thread(),
* Create a new POSIX thread for the new Zephyr thread.
* arch_new_thread() picks from the kernel structures what it is that we need
* to call with what parameters
*/
int posix_new_thread(void *ptr)
{
int t_slot;
t_slot = ttable_get_empty_slot();
threads_table[t_slot].state = USED;
threads_table[t_slot].running = false;
threads_table[t_slot].thead_cnt = thread_create_count++;
threads_table[t_slot].t_status = ptr;
/*
* Note: If you are here due to a valgrind reported memory leak in
* pthread_create() please use the provided valgrind.supp suppression file.
*/
PC_SAFE_CALL(pthread_create(&threads_table[t_slot].thread,
NULL,
posix_thread_starter,
(void *)(intptr_t)t_slot));
PC_DEBUG("%s created thread [%i] %i [%lu]\n",
__func__,
threads_table[t_slot].thead_cnt,
t_slot,
threads_table[t_slot].thread);
return t_slot;
}
/*
* Initialize the posix architecture
*
* Prepare whatever needs to be prepared to be able to start threads
*/
void posix_arch_init(void)
{
thread_create_count = 0;
currently_allowed_thread = -1;
threads_table = calloc(PC_ALLOC_CHUNK_SIZE,
sizeof(struct threads_table_el));
if (threads_table == NULL) { /* LCOV_EXCL_BR_LINE */
posix_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
}
threads_table_size = PC_ALLOC_CHUNK_SIZE;
PC_SAFE_CALL(pthread_mutex_lock(&mtx_threads));
}
/*
* Free any allocated memory by the posix core and clean up.
* Note that this function cannot be called from a SW thread
* (the CPU is assumed halted. Otherwise we will cancel ourselves)
*
* This function cannot guarantee the threads will be cancelled before the HW
* thread exists. The only way to do that, would be to wait for each of them in
* a join (without detaching them, but that could lead to locks in some
* convoluted cases. As a call to this function can come from an ASSERT or other
* error termination, we better do not assume things are working fine.
* => we prefer the supposed memory leak report from valgrind, and ensure we
* will not hang
*/
void posix_arch_clean_up(void)
{
if (!threads_table) { /* LCOV_EXCL_BR_LINE */
return; /* LCOV_EXCL_LINE */
}
terminate = true;
#if (PC_ENABLE_CANCEL)
for (int i = 0; i < threads_table_size; i++) {
if (threads_table[i].state != USED) {
continue;
}
/* LCOV_EXCL_START */
if (pthread_cancel(threads_table[i].thread)) {
posix_print_warning(
PREFIX"cleanup: could not stop thread %i\n",
i);
}
/* LCOV_EXCL_STOP */
}
#endif
free(threads_table);
threads_table = NULL;
}
void posix_abort_thread(int thread_idx)
{
if (thread_idx == currently_allowed_thread) {
PC_DEBUG("Thread [%i] %i: %s Marked myself "
"as aborting\n",
threads_table[thread_idx].thead_cnt,
thread_idx,
__func__);
} else {
if (threads_table[thread_idx].state != USED) { /* LCOV_EXCL_BR_LINE */
/* The thread may have been already aborted before */
return; /* LCOV_EXCL_LINE */
}
PC_DEBUG("Aborting not scheduled thread [%i] %i\n",
threads_table[thread_idx].thead_cnt,
thread_idx);
}
threads_table[thread_idx].state = ABORTING;
/*
* Note: the native thread will linger in RAM until it catches the
* mutex or awakes on the condition.
* Note that even if we would pthread_cancel() the thread here, that
* would be the case, but with a pthread_cancel() the mutex state would
* be uncontrolled
*/
}
int posix_arch_get_unique_thread_id(int thread_idx)
{
return threads_table[thread_idx].thead_cnt;
}
/*
* Notes about coverage:
*
* Note1:
*
* This condition will only be triggered in very unlikely cases
* (once every few full regression runs).
* It is therefore excluded from the coverage report to avoid confusing
* developers.
*
* Background: This arch creates a pthread as soon as the Zephyr kernel creates
* a Zephyr thread. A pthread creation is an asynchronous process handled by the
* host kernel.
*
* This architecture normally keeps only 1 thread executing at a time.
* But part of the pre-initialization during creation of a new thread
* and some cleanup at the tail of the thread termination are executed
* in parallel to other threads.
* That is, the execution of those code paths is a bit indeterministic.
*
* Only when the Zephyr kernel attempts to swap to a new thread does this
* architecture need to wait until its pthread is ready and initialized
* (has reached posix_wait_until_allowed())
*
* In some cases (tests) threads are created which are never actually needed
* (typically the idle thread). That means the test may finish before this
* thread's underlying pthread has reached posix_wait_until_allowed().
*
* In this unlikely cases the initialization or cleanup of the thread follows
* non-typical code paths.
* This code paths are there to ensure things work always, no matter
* the load of the host. Without them, very rare & mysterious segfault crashes
* would occur.
* But as they are very atypical and only triggered with some host loads,
* they will be covered in the coverage reports only rarely.
*
* Note2:
*
* Some other code will never or only very rarely trigger and is therefore
* excluded with LCOV_EXCL_LINE
*
*
* Notes about (memory) cleanup:
*
* Note.c1:
*
* In some very rare cases in very loaded machines, a race in the glibc pthread_cancel()
* seems to be triggered.
* In this, the cancelled thread cleanup overtakes the pthread_cancel() code, and frees the
* pthread structure before pthread_cancel() has finished, resulting in a dereference into already
* free'd memory, and therefore a segfault.
* Calling pthread_cancel() during cleanup is not required beyond preventing a valgrind
* memory leak report (all threads will be canceled immediately on exit).
* Therefore we do not do this, to avoid this very rare crashes.
*/

View file

@ -57,8 +57,3 @@ int posix_arch_get_unique_thread_id(int thread_idx)
{
return nct_get_unique_thread_id(te_state, thread_idx);
}
int posix_arch_thread_name_set(int thread_idx, const char *str)
{
return nct_thread_name_set(te_state, thread_idx, str);
}

Some files were not shown because too many files have changed in this diff Show more