Compare commits

..

13 commits

Author SHA1 Message Date
a8dd8e3eae samples: watchdog: add the ch32v003evt to the sample 2024-11-03 20:59:36 +01:00
1863af82c8 drivers: watchdog: add a CH32V00x Independent Watchdog (IWDT) driver 2024-11-03 20:53:35 +01:00
9ba0ccf52d runners: add minichlink
This commit adds a runner wrapper for the 'minichlink' program which
offers a free, open mechanism to use the CH-LinkE programming dongle for
the CH32V003.

https://github.com/cnlohr/ch32v003fun/tree/master/minichlink

Signed-off-by: Michael Hope <michaelh@juju.nz>
Signed-off-by: Dhiru Kholia <dhiru.kholia@gmail.com>
2024-11-03 16:16:31 +05:30
81080a1ce1 boards: add ch32v003evt
This commit adds support for the CH32V003EVT board which features a
32-bit general-purpose RISC-V MCU.

Signed-off-by: Michael Hope <michaelh@juju.nz>
Signed-off-by: Dhiru Kholia <dhiru.kholia@gmail.com>
2024-11-03 16:16:23 +05:30
2ab7d4317a drivers: add the gpio driver for wch ch32v003
This commit adds the gpio driver for WCH CH32V003.

Signed-off-by: Michael Hope <michaelh@juju.nz>
Signed-off-by: Dhiru Kholia <dhiru.kholia@gmail.com>
2024-11-02 21:54:51 +05:30
bb64ab5f90 drivers: add the pfic interrupt controller
This commit adds the pfic interrupt controller driver for WCH CH32V003.

Signed-off-by: Michael Hope <michaelh@juju.nz>
Signed-off-by: Dhiru Kholia <dhiru.kholia@gmail.com>
2024-11-02 21:54:51 +05:30
f8bc6df2be drivers: add the ch32v00x usart driver
This commit adds the usart driver for WCH CH32V003.

Signed-off-by: Michael Hope <michaelh@juju.nz>
Signed-off-by: Dhiru Kholia <dhiru.kholia@gmail.com>
2024-11-02 21:54:51 +05:30
f52a0135d8 drivers: add the ch32v00x systick driver
This commit adds the systick driver for WCH CH32V003.

Signed-off-by: Michael Hope <michaelh@juju.nz>
Signed-off-by: Dhiru Kholia <dhiru.kholia@gmail.com>
2024-11-02 21:54:51 +05:30
2af384ddfb drivers: add the ch32v00x clock controller
This commit adds the clock driver for WCH CH32V003.

Signed-off-by: Michael Hope <michaelh@juju.nz>
Signed-off-by: Dhiru Kholia <dhiru.kholia@gmail.com>
2024-11-02 21:54:48 +05:30
5f14e9385d drivers: add ch32v00x pinctrl support
This commit adds the pinctrl driver for WCH CH32V003.

Signed-off-by: Michael Hope <michaelh@juju.nz>
Signed-off-by: Dhiru Kholia <dhiru.kholia@gmail.com>
2024-11-02 21:46:08 +05:30
5c3c784846 soc: add wch_ch32v003 soc files
This commit adds the soc support for WCH CH32V003.

Signed-off-by: Michael Hope <michaelh@juju.nz>
Signed-off-by: Dhiru Kholia <dhiru.kholia@gmail.com>
2024-11-02 21:46:05 +05:30
233f93aa24 dts: add the ch32v003 dtsi
This commit adds the dtsi and bindings for the WCH CH32V003 which is a
32-bit general-purpose RISC-V MCU.

Signed-off-by: Michael Hope <michaelh@juju.nz>
Signed-off-by: Dhiru Kholia <dhiru.kholia@gmail.com>
2024-11-02 21:32:21 +05:30
Dhiru Kholia
155bcc0385 west: add the hal_wch
This is used for WCH chips including the CH32V003.

Signed-off-by: Dhiru Kholia <dhiru.kholia@gmail.com>
2024-11-02 21:32:21 +05:30
3045 changed files with 18037 additions and 76489 deletions

3
.github/SECURITY.md vendored
View file

@ -8,10 +8,9 @@ updates:
- The most recent release, and the release prior to that.
- Active LTS releases.
At this time, with the latest release of v4.0, the supported
At this time, with the latest release of v3.6, the supported
versions are:
- v4.0: Current release
- v3.7: Current LTS
- v3.6: Prior release
- v2.7: Prior LTS

View file

@ -107,13 +107,13 @@ jobs:
export ZEPHYR_TOOLCHAIN_VARIANT=llvm
# check if we need to run a full twister or not based on files changed
python3 ./scripts/ci/test_plan.py --no-detailed-test-id --platform ${{ matrix.platform }} -c origin/${BASE_REF}..
python3 ./scripts/ci/test_plan.py --platform ${{ matrix.platform }} -c origin/${BASE_REF}..
# We can limit scope to just what has changed
if [ -s testplan.json ]; then
echo "report_needed=1" >> $GITHUB_OUTPUT
# Full twister but with options based on changes
./scripts/twister --no-detailed-test-id --force-color --inline-logs -M -N -v --load-tests testplan.json --retry-failed 2
./scripts/twister --force-color --inline-logs -M -N -v --load-tests testplan.json --retry-failed 2
else
# if nothing is run, skip reporting step
echo "report_needed=0" >> $GITHUB_OUTPUT

View file

@ -34,7 +34,7 @@ jobs:
CCACHE_REMOTE_STORAGE: "redis://cache-*.keydb-cache.svc.cluster.local|shards=1,2,3"
CCACHE_REMOTE_ONLY: "true"
# `--specs` is ignored because ccache is unable to resovle the toolchain specs file path.
CCACHE_IGNOREOPTIONS: '-specs=* --specs=*'
CCACHE_IGNOREOPTIONS: '--specs=*'
steps:
- name: Apply container owner mismatch workaround
run: |

View file

@ -38,7 +38,7 @@ jobs:
run: |
pip3 install setuptools
pip3 install wheel
pip3 install python-magic lxml junitparser gitlint pylint pykwalify yamllint clang-format unidiff sphinx-lint ruff
pip3 install python-magic lxml junitparser gitlint pylint pykwalify yamllint clang-format unidiff sphinx-lint
pip3 install west
- name: west setup
@ -79,7 +79,7 @@ jobs:
git log --pretty=oneline | head -n 10
# Increase rename limit to allow for large PRs
git config diff.renameLimit 10000
./scripts/ci/check_compliance.py --annotate -e KconfigBasic -e ClangFormat \
./scripts/ci/check_compliance.py --annotate -e KconfigBasic \
-c origin/${BASE_REF}..
- name: upload-results

View file

@ -42,7 +42,6 @@ jobs:
with:
files: |
doc/
boards/**/doc/
**.rst
include/
kernel/include/kernel_arch_interface.h

View file

@ -26,7 +26,7 @@ jobs:
west init -l . || true
- name: Manifest
uses: zephyrproject-rtos/action-manifest@v1.5.0
uses: zephyrproject-rtos/action-manifest@v1.3.1
with:
github-token: ${{ secrets.ZB_GITHUB_TOKEN }}
manifest-path: 'west.yml'

View file

@ -1,146 +0,0 @@
name: Prepare For a Twister Run
on:
workflow_call:
outputs:
subset:
description: subset
value: ${{ jobs.prep_push.outputs.subset != '' && jobs.prep_push.outputs.subset || jobs.prep_pr.outputs.subset }}
size:
description: size
value: ${{ jobs.prep_push.outputs.size != '' && jobs.prep_push.outputs.size || jobs.prep_pr.outputs.size }}
fullrun:
description: fullrun
value: ${{ jobs.prep_push.outputs.fullrun != '' && jobs.prep_push.outputs.fullrun || jobs.prep_pr.outputs.size }}
jobs:
prep_pr:
if: github.repository_owner == 'zephyrproject-rtos' && github.event_name == 'pull_request_target'
runs-on:
group: zephyr-runner-v2-linux-x64-4xlarge
container:
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.27.4.20241026
options: '--entrypoint /bin/bash'
outputs:
subset: ${{ steps.output-services.outputs.subset }}
size: ${{ steps.output-services.outputs.size }}
fullrun: ${{ steps.output-services.outputs.fullrun }}
env:
MATRIX_SIZE: 10
PUSH_MATRIX_SIZE: 20
DAILY_MATRIX_SIZE: 80
BSIM_OUT_PATH: /opt/bsim/
BSIM_COMPONENTS_PATH: /opt/bsim/components
TESTS_PER_BUILDER: 700
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
BASE_REF: ${{ github.base_ref }}
steps:
- name: Apply container owner mismatch workaround
run: |
# FIXME: The owner UID of the GITHUB_WORKSPACE directory may not
# match the container user UID because of the way GitHub
# Actions runner is implemented. Remove this workaround when
# GitHub comes up with a fundamental fix for this problem.
git config --global --add safe.directory ${GITHUB_WORKSPACE}
- name: Print cloud service information
run: |
echo "ZEPHYR_RUNNER_CLOUD_PROVIDER = ${ZEPHYR_RUNNER_CLOUD_PROVIDER}"
echo "ZEPHYR_RUNNER_CLOUD_NODE = ${ZEPHYR_RUNNER_CLOUD_NODE}"
echo "ZEPHYR_RUNNER_CLOUD_POD = ${ZEPHYR_RUNNER_CLOUD_POD}"
- name: Clone cached Zephyr repository
continue-on-error: true
run: |
git clone --shared /repo-cache/zephyrproject/zephyr .
git remote set-url origin ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}
- name: Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
persist-credentials: false
- name: Environment Setup
run: |
git config --global user.email "bot@zephyrproject.org"
git config --global user.name "Zephyr Bot"
rm -fr ".git/rebase-apply"
git rebase origin/${BASE_REF}
git clean -f -d
git log --pretty=oneline | head -n 10
west init -l . || true
west config manifest.group-filter -- +ci,+optional
west config --global update.narrow true
west update --path-cache /repo-cache/zephyrproject 2>&1 1> west.update.log || west update --path-cache /repo-cache/zephyrproject 2>&1 1> west.update.log || ( rm -rf ../modules ../bootloader ../tools && west update --path-cache /repo-cache/zephyrproject)
west forall -c 'git reset --hard HEAD'
echo "ZEPHYR_SDK_INSTALL_DIR=/opt/toolchains/zephyr-sdk-$( cat SDK_VERSION )" >> $GITHUB_ENV
- name: Generate Test Plan with Twister
id: test-plan
run: |
export ZEPHYR_BASE=${PWD}
export ZEPHYR_TOOLCHAIN_VARIANT=zephyr
python3 ./scripts/ci/test_plan.py -c origin/${BASE_REF}.. --no-detailed-test-id --pull-request -t $TESTS_PER_BUILDER
if [ -s .testplan ]; then
cat .testplan >> $GITHUB_ENV
else
echo "TWISTER_NODES=${MATRIX_SIZE}" >> $GITHUB_ENV
fi
rm -f testplan.json .testplan
- name: Determine matrix size
id: output-services
run: |
if [ -n "${TWISTER_NODES}" ]; then
subset="[$(seq -s',' 1 ${TWISTER_NODES})]"
else
subset="[$(seq -s',' 1 ${MATRIX_SIZE})]"
fi
size=${TWISTER_NODES}
echo "subset=${subset}" >> $GITHUB_OUTPUT
echo "size=${size}" >> $GITHUB_OUTPUT
echo "fullrun=${TWISTER_FULL}" >> $GITHUB_OUTPUT
prep_push:
if: github.repository_owner == 'zephyrproject-rtos' && (github.event_name == 'push' || github.event_name == 'schedule')
runs-on: ubuntu-22.04
outputs:
subset: ${{ steps.output-services.outputs.subset }}
size: ${{ steps.output-services.outputs.size }}
fullrun: ${{ steps.output-services.outputs.fullrun }}
env:
MATRIX_SIZE: 10
PUSH_MATRIX_SIZE: 20
DAILY_MATRIX_SIZE: 80
BSIM_OUT_PATH: /opt/bsim/
BSIM_COMPONENTS_PATH: /opt/bsim/components
TESTS_PER_BUILDER: 700
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
BASE_REF: ${{ github.base_ref }}
steps:
- name: Print cloud service information
run: |
echo "ZEPHYR_RUNNER_CLOUD_PROVIDER = ${ZEPHYR_RUNNER_CLOUD_PROVIDER}"
echo "ZEPHYR_RUNNER_CLOUD_NODE = ${ZEPHYR_RUNNER_CLOUD_NODE}"
echo "ZEPHYR_RUNNER_CLOUD_POD = ${ZEPHYR_RUNNER_CLOUD_POD}"
- name: Determine matrix size
id: output-services
run: |
if [ "${{github.event_name}}" = "push" ]; then
subset="[$(seq -s',' 1 ${PUSH_MATRIX_SIZE})]"
size=${MATRIX_SIZE}
elif [ "${{github.event_name}}" = "schedule" -a "${{github.repository}}" = "zephyrproject-rtos/zephyr" ]; then
subset="[$(seq -s',' 1 ${DAILY_MATRIX_SIZE})]"
size=${DAILY_MATRIX_SIZE}
else
size=0
fi
echo "subset=${subset}" >> $GITHUB_OUTPUT
echo "size=${size}" >> $GITHUB_OUTPUT
echo "fullrun=${TWISTER_FULL}" >> $GITHUB_OUTPUT

View file

@ -1,53 +0,0 @@
name: Publish Twister Test Results
on:
workflow_run:
workflows: ["Run tests with twister"]
branches:
- main
types:
- completed
jobs:
upload-to-elasticsearch:
if: |
github.repository == 'zephyrproject-rtos/zephyr' &&
github.event.workflow_run.event != 'pull_request_target'
env:
ELASTICSEARCH_KEY: ${{ secrets.ELASTICSEARCH_KEY }}
ELASTICSEARCH_SERVER: "https://elasticsearch.zephyrproject.io:443"
runs-on: ubuntu-22.04
steps:
# Needed for elasticearch and upload script
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Download Artifacts
id: download-artifacts
uses: dawidd6/action-download-artifact@v6
with:
path: artifacts
workflow: twister.yml
run_id: ${{ github.event.workflow_run.id }}
if_no_artifact_found: ignore
- name: Upload to elasticsearch
if: steps.download-artifacts.outputs.found_artifact == 'true'
run: |
pip3 install elasticsearch
# set run date on upload to get consistent and unified data across the matrix.
run_date=`date --iso-8601=minutes`
if [ "${{github.event.workflow_run.event}}" = "push" ]; then
python3 ./scripts/ci/upload_test_results_es.py -r ${run_date} \
--run-attempt ${{github.run_attempt}} \
--run-branch ${{github.ref_name}} \
--index zephyr-main-ci-push-1 artifacts/*/*/twister.json
elif [ "${{github.event.workflow_run.event}}" = "schedule" ]; then
python3 ./scripts/ci/upload_test_results_es.py -r ${run_date} \
--run-attempt ${{github.run_attempt}} \
--run-branch ${{github.ref_name}} \
--index zephyr-main-ci-weekly-1 artifacts/*/*/twister.json
fi

View file

@ -21,7 +21,108 @@ concurrency:
jobs:
twister-build-prep:
uses: ./.github/workflows/twister-prep.yaml
if: github.repository_owner == 'zephyrproject-rtos'
runs-on:
group: zephyr-runner-v2-linux-x64-4xlarge
container:
image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.27.4.20241026
options: '--entrypoint /bin/bash'
outputs:
subset: ${{ steps.output-services.outputs.subset }}
size: ${{ steps.output-services.outputs.size }}
fullrun: ${{ steps.output-services.outputs.fullrun }}
env:
MATRIX_SIZE: 10
PUSH_MATRIX_SIZE: 20
DAILY_MATRIX_SIZE: 80
BSIM_OUT_PATH: /opt/bsim/
BSIM_COMPONENTS_PATH: /opt/bsim/components
TESTS_PER_BUILDER: 700
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
BASE_REF: ${{ github.base_ref }}
steps:
- name: Apply container owner mismatch workaround
run: |
# FIXME: The owner UID of the GITHUB_WORKSPACE directory may not
# match the container user UID because of the way GitHub
# Actions runner is implemented. Remove this workaround when
# GitHub comes up with a fundamental fix for this problem.
git config --global --add safe.directory ${GITHUB_WORKSPACE}
- name: Print cloud service information
run: |
echo "ZEPHYR_RUNNER_CLOUD_PROVIDER = ${ZEPHYR_RUNNER_CLOUD_PROVIDER}"
echo "ZEPHYR_RUNNER_CLOUD_NODE = ${ZEPHYR_RUNNER_CLOUD_NODE}"
echo "ZEPHYR_RUNNER_CLOUD_POD = ${ZEPHYR_RUNNER_CLOUD_POD}"
- name: Clone cached Zephyr repository
if: github.event_name == 'pull_request_target'
continue-on-error: true
run: |
git clone --shared /repo-cache/zephyrproject/zephyr .
git remote set-url origin ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}
- name: Checkout
if: github.event_name == 'pull_request_target'
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
persist-credentials: false
- name: Environment Setup
if: github.event_name == 'pull_request_target'
run: |
git config --global user.email "bot@zephyrproject.org"
git config --global user.name "Zephyr Bot"
rm -fr ".git/rebase-apply"
git rebase origin/${BASE_REF}
git clean -f -d
git log --pretty=oneline | head -n 10
west init -l . || true
west config manifest.group-filter -- +ci,+optional
west config --global update.narrow true
west update --path-cache /repo-cache/zephyrproject 2>&1 1> west.update.log || west update --path-cache /repo-cache/zephyrproject 2>&1 1> west.update.log || ( rm -rf ../modules ../bootloader ../tools && west update --path-cache /repo-cache/zephyrproject)
west forall -c 'git reset --hard HEAD'
echo "ZEPHYR_SDK_INSTALL_DIR=/opt/toolchains/zephyr-sdk-$( cat SDK_VERSION )" >> $GITHUB_ENV
- name: Generate Test Plan with Twister
if: github.event_name == 'pull_request_target'
id: test-plan
run: |
export ZEPHYR_BASE=${PWD}
export ZEPHYR_TOOLCHAIN_VARIANT=zephyr
python3 ./scripts/ci/test_plan.py -c origin/${BASE_REF}.. --pull-request -t $TESTS_PER_BUILDER
if [ -s .testplan ]; then
cat .testplan >> $GITHUB_ENV
else
echo "TWISTER_NODES=${MATRIX_SIZE}" >> $GITHUB_ENV
fi
rm -f testplan.json .testplan
- name: Determine matrix size
id: output-services
run: |
if [ "${{github.event_name}}" = "pull_request_target" ]; then
if [ -n "${TWISTER_NODES}" ]; then
subset="[$(seq -s',' 1 ${TWISTER_NODES})]"
else
subset="[$(seq -s',' 1 ${MATRIX_SIZE})]"
fi
size=${TWISTER_NODES}
elif [ "${{github.event_name}}" = "push" ]; then
subset="[$(seq -s',' 1 ${PUSH_MATRIX_SIZE})]"
size=${MATRIX_SIZE}
elif [ "${{github.event_name}}" = "schedule" -a "${{github.repository}}" = "zephyrproject-rtos/zephyr" ]; then
subset="[$(seq -s',' 1 ${DAILY_MATRIX_SIZE})]"
size=${DAILY_MATRIX_SIZE}
else
size=0
fi
echo "subset=${subset}" >> $GITHUB_OUTPUT
echo "size=${size}" >> $GITHUB_OUTPUT
echo "fullrun=${TWISTER_FULL}" >> $GITHUB_OUTPUT
twister-build:
runs-on:
@ -41,13 +142,13 @@ jobs:
CCACHE_REMOTE_STORAGE: "redis://cache-*.keydb-cache.svc.cluster.local|shards=1,2,3"
CCACHE_REMOTE_ONLY: "true"
# `--specs` is ignored because ccache is unable to resolve the toolchain specs file path.
CCACHE_IGNOREOPTIONS: '-specs=* --specs=*'
CCACHE_IGNOREOPTIONS: '--specs=*'
BSIM_OUT_PATH: /opt/bsim/
BSIM_COMPONENTS_PATH: /opt/bsim/components
TWISTER_COMMON: '--no-detailed-test-id --force-color --inline-logs -v -N -M --retry-failed 3 --timeout-multiplier 2 '
WEEKLY_OPTIONS: ' -M --build-only --all --show-footprint --report-filtered'
TWISTER_COMMON: ' --force-color --inline-logs -v -N -M --retry-failed 3 --timeout-multiplier 2 '
DAILY_OPTIONS: ' -M --build-only --all --show-footprint'
PR_OPTIONS: ' --clobber-output --integration'
PUSH_OPTIONS: ' --clobber-output -M --show-footprint --report-filtered'
PUSH_OPTIONS: ' --clobber-output -M --show-footprint'
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
BASE_REF: ${{ github.base_ref }}
steps:
@ -129,7 +230,6 @@ jobs:
- if: github.event_name == 'push'
name: Run Tests with Twister (Push)
id: run_twister
run: |
export ZEPHYR_BASE=${PWD}
export ZEPHYR_TOOLCHAIN_VARIANT=zephyr
@ -143,12 +243,11 @@ jobs:
- if: github.event_name == 'pull_request_target'
name: Run Tests with Twister (Pull Request)
id: run_twister_pr
run: |
rm -f testplan.json
export ZEPHYR_BASE=${PWD}
export ZEPHYR_TOOLCHAIN_VARIANT=zephyr
python3 ./scripts/ci/test_plan.py -c origin/${BASE_REF}.. --pull-request --no-detailed-test-id
python3 ./scripts/ci/test_plan.py -c origin/${BASE_REF}.. --pull-request
./scripts/twister --subset ${{matrix.subset}}/${{ strategy.job-total }} --load-tests testplan.json ${TWISTER_COMMON} ${PR_OPTIONS}
if [ "${{matrix.subset}}" = "1" -a ${{needs.twister-build-prep.outputs.fullrun}} = 'True' ]; then
./scripts/zephyr_module.py --twister-out module_tests.args
@ -159,15 +258,14 @@ jobs:
- if: github.event_name == 'schedule'
name: Run Tests with Twister (Daily)
id: run_twister_sched
run: |
export ZEPHYR_BASE=${PWD}
export ZEPHYR_TOOLCHAIN_VARIANT=zephyr
./scripts/twister --subset ${{matrix.subset}}/${{ strategy.job-total }} ${TWISTER_COMMON} ${WEEKLY_OPTIONS}
./scripts/twister --subset ${{matrix.subset}}/${{ strategy.job-total }} ${TWISTER_COMMON} ${DAILY_OPTIONS}
if [ "${{matrix.subset}}" = "1" ]; then
./scripts/zephyr_module.py --twister-out module_tests.args
if [ -s module_tests.args ]; then
./scripts/twister +module_tests.args --outdir module_tests ${TWISTER_COMMON} ${WEEKLY_OPTIONS}
./scripts/twister +module_tests.args --outdir module_tests ${TWISTER_COMMON} ${DAILY_OPTIONS}
fi
fi
@ -208,18 +306,42 @@ jobs:
twister-test-results:
name: "Publish Unit Tests Results"
needs:
- twister-build
env:
ELASTICSEARCH_KEY: ${{ secrets.ELASTICSEARCH_KEY }}
ELASTICSEARCH_SERVER: "https://elasticsearch.zephyrproject.io:443"
needs: twister-build
runs-on: ubuntu-22.04
# the build-and-test job might be skipped, we don't need to run this job then
if: success() || failure()
steps:
# Needed for elasticearch and upload script
- if: github.event_name == 'push' || github.event_name == 'schedule'
name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Download Artifacts
uses: actions/download-artifact@v4
with:
path: artifacts
- if: github.event_name == 'push' || github.event_name == 'schedule'
name: Upload to elasticsearch
run: |
pip3 install elasticsearch
# set run date on upload to get consistent and unified data across the matrix.
run_date=`date --iso-8601=minutes`
if [ "${{github.event_name}}" = "push" ]; then
python3 ./scripts/ci/upload_test_results_es.py -r ${run_date} \
--index zephyr-main-ci-push-1 artifacts/*/*/twister.json
elif [ "${{github.event_name}}" = "schedule" ]; then
python3 ./scripts/ci/upload_test_results_es.py -r ${run_date} \
--index zephyr-main-ci-weekly-1 artifacts/*/*/twister.json
fi
- name: Merge Test Results
run: |
pip3 install junitparser junit2html

View file

@ -44,8 +44,7 @@ jobs:
west init -l . || true
# we do not depend on any hals, tools or bootloader, save some time and space...
west config manifest.group-filter -- -hal,-tools,-bootloader,-babblesim
west config manifest.project-filter -- -nrf_hw_models
west config manifest.group-filter -- -hal,-tools,-bootloader
west config --global update.narrow true
west update --path-cache /github/cache/zephyrproject 2>&1 1> west.update.log || west update --path-cache /github/cache/zephyrproject 2>&1 1> west.update.log || ( rm -rf ../modules ../bootloader ../tools && west update --path-cache /github/cache/zephyrproject)
west forall -c 'git reset --hard HEAD'

View file

@ -66,7 +66,7 @@ jobs:
- name: install pytest
run: |
pip3 install wheel
pip3 install pytest west pyelftools canopen natsort progress mypy intelhex psutil ply pyserial anytree
pip3 install pytest west pyelftools canopen natsort progress mypy intelhex psutil ply pyserial
- name: run pytest-win
if: runner.os == 'Windows'
run: |

1
.gitignore vendored
View file

@ -102,7 +102,6 @@ MaintainersFormat.txt
ModulesMaintainers.txt
Nits.txt
Pylint.txt
Ruff.txt
SphinxLint.txt
TextEncoding.txt
YAMLLint.txt

File diff suppressed because it is too large Load diff

View file

@ -1,31 +0,0 @@
# Copyright (c) 2024 Basalte bv
# SPDX-License-Identifier: Apache-2.0
extend = ".ruff-excludes.toml"
line-length = 100
target-version = "py310"
[lint]
select = [
# zephyr-keep-sorted-start
"B", # flake8-bugbear
"E", # pycodestyle
"F", # pyflakes
"I", # isort
"SIM", # flake8-simplify
"UP", # pyupgrade
"W", # pycodestyle warnings
# zephyr-keep-sorted-stop
]
ignore = [
# zephyr-keep-sorted-start
"SIM108", # Allow if-else blocks instead of forcing ternary operator
"UP027", # deprecated pyupgrade rule
# zephyr-keep-sorted-stop
]
[format]
quote-style = "preserve"
line-ending = "lf"

View file

@ -598,8 +598,6 @@ add_custom_command(
COMMAND_EXPAND_LISTS
)
add_custom_target(version_h DEPENDS ${PROJECT_BINARY_DIR}/include/generated/zephyr/version.h)
zephyr_get(KERNEL_VERSION_CUSTOMIZATION SYSBUILD LOCAL)
set_property(TARGET version_h PROPERTY KERNEL_VERSION_CUSTOMIZATION ${KERNEL_VERSION_CUSTOMIZATION})
if(EXISTS ${APPLICATION_SOURCE_DIR}/VERSION)
add_custom_command(
@ -618,8 +616,6 @@ if(EXISTS ${APPLICATION_SOURCE_DIR}/VERSION)
app_version_h
DEPENDS ${PROJECT_BINARY_DIR}/include/generated/zephyr/app_version.h)
add_dependencies(zephyr_interface app_version_h)
zephyr_get(APP_VERSION_CUSTOMIZATION SYSBUILD LOCAL)
set_property(TARGET app_version_h PROPERTY APP_VERSION_CUSTOMIZATION ${APP_VERSION_CUSTOMIZATION})
endif()
# Unfortunately, the order in which CMakeLists.txt code is processed
@ -1039,6 +1035,14 @@ if(CONFIG_USERSPACE)
set(PROCESS_GPERF ${ZEPHYR_BASE}/scripts/build/process_gperf.py)
endif()
get_property(GLOBAL_CSTD GLOBAL PROPERTY CSTD)
if(DEFINED GLOBAL_CSTD)
message(DEPRECATION
"Global CSTD property is deprecated, see Kconfig.zephyr for C Standard options.")
set(CSTD ${GLOBAL_CSTD})
list(APPEND CMAKE_C_COMPILE_FEATURES ${compile_features_${CSTD}})
endif()
# @Intent: Obtain compiler specific flag for specifying the c standard
zephyr_compile_options(
$<$<COMPILE_LANGUAGE:C>:$<TARGET_PROPERTY:compiler,cstd>${CSTD}>

View file

@ -133,7 +133,6 @@
/boards/arm64/intel_socfpga_agilex_socdk/ @siclim @ngboonkhai
/boards/arm64/intel_socfpga_agilex5_socdk/ @teikheng @gdengi
/boards/arm64/rcar_*/ @lorc @xakep-amatop
/boards/amd/acp_6_0_adsp/ @dineshkumar.kalva @basavaraj.hiregoudar
# All cmake related files
/doc/develop/tools/coccinelle.rst @himanshujha199640 @JuliaLawall
/doc/services/device_mgmt/smp_protocol.rst @de-nordic @nordicjm

View file

@ -448,7 +448,6 @@ config CODING_GUIDELINE_CHECK
config NATIVE_LIBC
bool
select FULL_LIBC_SUPPORTED
select TC_PROVIDES_POSIX_C_LANG_SUPPORT_R
help
Zephyr will use the host system C library.
@ -471,11 +470,9 @@ config NATIVE_APPLICATION
default y if ARCH_POSIX
depends on !NATIVE_LIBRARY
select NATIVE_BUILD
select DEPRECATED
help
Build as a native application that can run on the host and using
resources and libraries provided by the host. This option is deprecated
and will be removed in Zephyr v4.3
resources and libraries provided by the host.
config NATIVE_LIBRARY
bool
@ -1024,6 +1021,32 @@ config IS_BOOTLOADER
This option indicates that Zephyr will act as a bootloader to execute
a separate Zephyr image payload.
config BOOTLOADER_SRAM_SIZE
int "SRAM reserved for bootloader [DEPRECATED]"
default 0
depends on !XIP || IS_BOOTLOADER
depends on ARM || XTENSA
help
This option specifies the amount of SRAM (measure in kB) reserved for
a bootloader image, when either:
- the Zephyr image itself is to act as the bootloader, or
- Zephyr is a !XIP image, which implicitly assumes existence of a
bootloader that loads the Zephyr !XIP image onto SRAM.
This option is deprecated, users should transition to using DTS to set this, if needed.
To be removed after Zephyr 3.7 release.
config BOOTLOADER_SRAM_SIZE_DEPRECATED
bool
default y
select DEPRECATED
depends on BOOTLOADER_SRAM_SIZE != 0
depends on !XIP || IS_BOOTLOADER
depends on ARM || XTENSA
help
Non-prompt symbol to indicate that the deprecated BOOTLOADER_SRAM_SIZE Kconfig has a
non-0 value. Please transition to using devicetree.
config BOOTLOADER_BOSSA
bool "BOSSA bootloader support"
select USE_DT_CODE_PARTITION

View file

@ -314,13 +314,12 @@ Bluetooth HCI:
- HoZHel
files:
- include/zephyr/drivers/bluetooth/
- include/zephyr/drivers/bluetooth.h
- drivers/bluetooth/
- samples/bluetooth/hci_*/
- tests/bsim/bluetooth/hci_uart/
- dts/bindings/bluetooth/
labels:
- "area: Bluetooth HCI"
- "area: Bluetooth Host"
- "area: Bluetooth"
tests:
- bluetooth
@ -390,10 +389,8 @@ Bluetooth Host:
- doc/connectivity/bluetooth/img/ctlr*
- doc/connectivity/bluetooth/api/audio/
- doc/connectivity/bluetooth/api/mesh/
- doc/connectivity/bluetooth/api/shell/iso.rst
- doc/connectivity/bluetooth/api/controller.rst
- doc/connectivity/bluetooth/shell/classic/a2dp.rst
- doc/connectivity/bluetooth/shell/host/iso.rst
- doc/connectivity/bluetooth/shell/audio/
- samples/bluetooth/bap*/
- samples/bluetooth/cap*/
- samples/bluetooth/hap*/
@ -477,7 +474,6 @@ Bluetooth Audio:
- tests/bluetooth/tester/overlay-le-audio.conf
- tests/bluetooth/tester/src/audio/
- doc/connectivity/bluetooth/api/audio/
- doc/connectivity/bluetooth/shell/audio/
- samples/bluetooth/bap*/
- samples/bluetooth/cap*/
- samples/bluetooth/hap*/
@ -496,7 +492,6 @@ Bluetooth Classic:
collaborators:
- jhedberg
files:
- doc/connectivity/bluetooth/shell/classic/a2dp.rst
- subsys/bluetooth/common/
- subsys/bluetooth/host/classic/
- include/zephyr/bluetooth/classic/
@ -516,7 +511,7 @@ Bluetooth ISO:
- rugeGerritsen
files:
- include/zephyr/bluetooth/iso.h
- doc/connectivity/bluetooth/shell/host/iso.rst
- doc/connectivity/bluetooth/api/shell/iso.rst
- samples/bluetooth/iso_*/
- subsys/bluetooth/Kconfig.iso
- subsys/bluetooth/host/iso.c
@ -1011,8 +1006,8 @@ Documentation Infrastructure:
Release Notes:
status: maintained
maintainers:
- fabiobaltieri
- kartben
- dkalowsk
- mmahadevan108
collaborators:
- kartben
files:
@ -1591,7 +1586,9 @@ Release Notes:
- drivers.i3c
"Drivers: IEEE 802.15.4":
status: odd fixes
status: maintained
maintainers:
- fgrandel
collaborators:
- rlubos
- ankuns
@ -2081,11 +2078,6 @@ Release Notes:
status: maintained
maintainers:
- jilaypandya
collaborators:
- bjarki-andreasen
- dipakgmx
- fabiobaltieri
- faxe1008
files:
- drivers/stepper/
- include/zephyr/drivers/stepper.h
@ -2176,7 +2168,6 @@ Release Notes:
- rlubos
- kludentwo
- krish2718
- MaochenWang1
files:
- drivers/wifi/
- dts/bindings/wifi/
@ -2203,7 +2194,7 @@ Release Notes:
collaborators:
- sachinthegreen
files:
- drivers/wifi/nrf_wifi/
- drivers/wifi/nrfwifi/
- dts/bindings/wifi/nordic,nrf70.yaml
- dts/bindings/wifi/nordic,nrf70-qspi.yaml
- dts/bindings/wifi/nordic,nrf70-spi.yaml
@ -2932,6 +2923,8 @@ Networking:
status: maintained
maintainers:
- jukkar
collaborators:
- fgrandel
files:
- doc/connectivity/networking/api/gptp.rst
- include/zephyr/net/gptp.h
@ -2998,12 +2991,14 @@ Networking:
- subsys/net/lib/ptp/
- samples/net/ptp/
labels:
- "area: PTP"
- "area: Networking"
tests:
- sample.net.ptp
"Networking: Native IEEE 802.15.4":
status: odd fixes
status: maintained
maintainers:
- fgrandel
collaborators:
- rlubos
- jukkar
@ -3045,7 +3040,6 @@ Networking:
collaborators:
- rlubos
- krish2718
- MaochenWang1
files:
- doc/connectivity/networking/api/wifi.rst
- include/zephyr/net/wifi*.h
@ -3144,7 +3138,6 @@ Power management:
status: maintained
maintainers:
- ceolin
- bjarki-andreasen
collaborators:
- nashif
- teburd
@ -3500,8 +3493,6 @@ Nuvoton NPCM Platforms:
- soc/nuvoton/npcm/
- boards/nuvoton/npcm*/
- dts/arm/nuvoton/
- drivers/*/*_npcm*
- include/zephyr/dt-bindings/*/npcm_*
labels:
- "platform: Nuvoton NPCM"
@ -3557,7 +3548,6 @@ Silabs SiM3U Platforms:
- drivers/*/Kconfig.si32
- dts/arm/silabs/sim3u*
- dts/bindings/*/*silabs,si32*
- include/zephyr/dt-bindings/pinctrl/*si32*
- soc/silabs/silabs_sim3/
labels:
- "platform: Silabs SiM3U"
@ -3680,7 +3670,7 @@ NXP Drivers:
- drivers/*/*.mcux
- drivers/*/*.nxp
- drivers/*/*nxp*
- drivers/*/*/*kinetis*
- drivers/*/*kinetis*
- drivers/misc/*/nxp*
- include/zephyr/dt-bindings/*/*nxp*
- include/zephyr/dt-bindings/*/*mcux*
@ -5065,16 +5055,6 @@ West:
labels:
- "area: native port"
"West project: nrf_wifi":
status: maintained
maintainers:
- krish2718
- sachinthegreen
files:
- modules/nrf_wifi/
labels:
- "area: Wi-Fi"
"West project: open-amp":
status: odd fixes
collaborators:
@ -5261,8 +5241,6 @@ West:
maintainers:
- krish2718
- jukkar
collaborators:
- MaochenWang1
files:
- modules/hostap/
labels:
@ -5285,6 +5263,7 @@ Xtensa arch:
- boards/qemu/xtensa/
- boards/cdns/xt-sim/
- soc/cdns/dc233c/
- soc/cdns/dc233c/
- soc/cdns/xtensa_sample_controller/
- tests/arch/xtensa/
labels:

View file

@ -1,5 +1,5 @@
VERSION_MAJOR = 4
VERSION_MINOR = 0
PATCHLEVEL = 99
PATCHLEVEL = 0
VERSION_TWEAK = 0
EXTRAVERSION =
EXTRAVERSION = rc2

View file

@ -55,8 +55,6 @@ config ARM64
select ARCH_HAS_DIRECTED_IPIS
select ARCH_HAS_DEMAND_PAGING
select ARCH_HAS_DEMAND_MAPPING
select ARCH_SUPPORTS_EVICTION_TRACKING
select EVICTION_TRACKING if DEMAND_PAGING
help
ARM64 (AArch64) architecture
@ -696,12 +694,6 @@ config ARCH_SUPPORTS_ROM_START
config ARCH_SUPPORTS_EMPTY_IRQ_SPURIOUS
bool
config ARCH_SUPPORTS_EVICTION_TRACKING
bool
help
Architecture code supports page tracking for eviction algorithms
when demand paging is enabled.
config ARCH_HAS_EXTRA_EXCEPTION_INFO
bool
@ -1148,9 +1140,3 @@ config ARCH_HAS_CUSTOM_BUSY_WAIT
It's possible that an architecture port cannot or does not want to use
the provided k_busy_wait(), but instead must do something custom. It must
enable this option in that case.
config ARCH_HAS_CUSTOM_CURRENT_IMPL
bool
help
Select when architecture implements arch_current_thread() &
arch_current_thread_set().

View file

@ -262,7 +262,7 @@ config ARC_CURRENT_THREAD_USE_NO_TLS
RGF_NUM_BANKS the parameter is disabled by-default because banks syncronization
requires significant time, and it slows down performance.
ARCMWDT works with tls pointer in different way then GCC. Optimized access to
TLS pointer via arch_current_thread() does not provide significant advantages
TLS pointer via _current variable does not provide significant advantages
in case of MetaWare.
config GEN_ISR_TABLES

View file

@ -34,5 +34,3 @@ add_subdirectory_ifdef(CONFIG_ARC_CORE_MPU mpu)
add_subdirectory_ifdef(CONFIG_ARC_SECURE_FIRMWARE secureshield)
zephyr_linker_sources(ROM_START SORT_KEY 0x0vectors vector_table.ld)
zephyr_library_sources_ifdef(CONFIG_LLEXT elf.c)

View file

@ -1,80 +0,0 @@
/*
* Copyright (c) 2024 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/llext/elf.h>
#include <zephyr/llext/llext.h>
#include <zephyr/llext/loader.h>
#include <zephyr/logging/log.h>
#include <zephyr/sys/util.h>
LOG_MODULE_REGISTER(elf, CONFIG_LLEXT_LOG_LEVEL);
#define R_ARC_32 4
#define R_ARC_B26 5 /* AKA R_ARC_64 */
#define R_ARC_S25W_PCREL 17
#define R_ARC_32_ME 27
/* ARCompact insns packed in memory have Middle Endian encoding */
#define ME(x) (((x & 0xffff0000) >> 16) | ((x & 0xffff) << 16))
/**
* @brief Architecture specific function for relocating shared elf
*
* Elf files contain a series of relocations described in multiple sections.
* These relocation instructions are architecture specific and each architecture
* supporting modules must implement this.
*
* The relocation codes are well documented:
* https://github.com/foss-for-synopsys-dwc-arc-processors/arc-ABI-manual/blob/master/ARCv2_ABI.pdf
* https://github.com/zephyrproject-rtos/binutils-gdb
*/
int arch_elf_relocate(elf_rela_t *rel, uintptr_t loc, uintptr_t sym_base_addr, const char *sym_name,
uintptr_t load_bias)
{
int ret = 0;
uint32_t insn = UNALIGNED_GET((uint32_t *)loc);
uint32_t value;
sym_base_addr += rel->r_addend;
int reloc_type = ELF32_R_TYPE(rel->r_info);
switch (reloc_type) {
case R_ARC_32:
case R_ARC_B26:
UNALIGNED_PUT(sym_base_addr, (uint32_t *)loc);
break;
case R_ARC_S25W_PCREL:
/* ((S + A) - P) >> 2
* S = symbol address
* A = addend
* P = relative offset to PCL
*/
value = (sym_base_addr + rel->r_addend - (loc & ~0x3)) >> 2;
insn = ME(insn);
/* disp25w */
insn = insn & ~0x7fcffcf;
insn |= ((value >> 0) & 0x01ff) << 18;
insn |= ((value >> 9) & 0x03ff) << 6;
insn |= ((value >> 19) & 0x000f) << 0;
insn = ME(insn);
UNALIGNED_PUT(insn, (uint32_t *)loc);
break;
case R_ARC_32_ME:
UNALIGNED_PUT(ME(sym_base_addr), (uint32_t *)loc);
break;
default:
LOG_ERR("unknown relocation: %u\n", reloc_type);
ret = -ENOEXEC;
break;
}
return ret;
}

View file

@ -55,7 +55,7 @@ static bool z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp)
{
#if defined(CONFIG_MULTITHREADING)
uint32_t guard_end, guard_start;
const struct k_thread *thread = arch_current_thread();
const struct k_thread *thread = _current;
if (!thread) {
/* TODO: Under what circumstances could we get here ? */

View file

@ -49,8 +49,8 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
__asm__ volatile("sync");
/* If arch_current_thread() was aborted in the offload routine, we shouldn't be here */
__ASSERT_NO_MSG((arch_current_thread()->base.thread_state & _THREAD_DEAD) == 0);
/* If _current was aborted in the offload routine, we shouldn't be here */
__ASSERT_NO_MSG((_current->base.thread_state & _THREAD_DEAD) == 0);
}
/* need to be executed on every core in the system */

View file

@ -16,7 +16,6 @@
#include <ipi.h>
#include <zephyr/init.h>
#include <zephyr/irq.h>
#include <zephyr/platform/hooks.h>
#include <arc_irq_offload.h>
volatile struct {
@ -116,11 +115,6 @@ void arch_secondary_cpu_init(int cpu_num)
DT_IRQ(DT_NODELABEL(ici), priority), 0);
irq_enable(DT_IRQN(DT_NODELABEL(ici)));
#endif
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
/* call the function set by arch_cpu_start */
fn = arc_cpu_init[cpu_num].fn;

View file

@ -210,7 +210,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#ifdef CONFIG_MULTITHREADING
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
{
*old_thread = arch_current_thread();
*old_thread = _current;
return z_get_next_switch_handle(NULL);
}
@ -227,16 +227,16 @@ void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
setup_stack_vars(arch_current_thread());
setup_stack_vars(_current);
/* possible optimizaiton: no need to load mem domain anymore */
/* need to lock cpu here ? */
configure_mpu_thread(arch_current_thread());
configure_mpu_thread(_current);
z_arc_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)arch_current_thread()->stack_info.start,
(arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta), arch_current_thread());
(uint32_t)_current->stack_info.start,
(_current->stack_info.size -
_current->stack_info.delta), _current);
CODE_UNREACHABLE;
}
#endif
@ -336,7 +336,7 @@ int arc_vpx_lock(k_timeout_t timeout)
id = _current_cpu->id;
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
#endif
k_spin_unlock(&lock, key);
@ -355,7 +355,7 @@ void arc_vpx_unlock(void)
key = k_spin_lock(&lock);
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
#endif
id = _current_cpu->id;
k_spin_unlock(&lock, key);

View file

@ -29,7 +29,7 @@ size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr)
void *_Preserve_flags _mwget_tls(void)
{
return (void *)(arch_current_thread()->tls);
return (void *)(_current->tls);
}
#else

View file

@ -26,8 +26,6 @@
#include <v2/irq.h>
#include <zephyr/platform/hooks.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -35,10 +33,6 @@ extern "C" {
static ALWAYS_INLINE void arch_kernel_init(void)
{
z_irq_setup();
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
}

View file

@ -178,7 +178,7 @@ bool z_arm_fault_undef_instruction_fp(void)
* context because it is about to be overwritten.
*/
if (((_current_cpu->nested == 2)
&& (arch_current_thread()->base.user_options & K_FP_REGS))
&& (_current->base.user_options & K_FP_REGS))
|| ((_current_cpu->nested > 2)
&& (spill_esf->undefined & FPEXC_EN))) {
/*
@ -196,7 +196,7 @@ bool z_arm_fault_undef_instruction_fp(void)
* means that a thread that uses the VFP does not have to,
* but should, set K_FP_REGS on thread creation.
*/
arch_current_thread()->base.user_options |= K_FP_REGS;
_current->base.user_options |= K_FP_REGS;
}
return false;

View file

@ -62,6 +62,7 @@ static inline void z_arm_floating_point_init(void)
__set_CPACR(reg_val);
barrier_isync_fence_full();
#if !defined(CONFIG_FPU_SHARING)
/*
* FPEXC: Floating-Point Exception Control register
* comp. ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition,
@ -83,6 +84,7 @@ static inline void z_arm_floating_point_init(void)
*/
__set_FPEXC(FPEXC_EN);
#endif
#endif
}
#endif /* CONFIG_CPU_HAS_FPU */

View file

@ -12,7 +12,6 @@
#include "zephyr/cache.h"
#include "zephyr/kernel/thread_stack.h"
#include "zephyr/toolchain/gcc.h"
#include <zephyr/platform/hooks.h>
#define INV_MPID UINT32_MAX
@ -199,10 +198,6 @@ void arch_secondary_cpu_init(void)
*/
#endif
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
fn = arm_cpu_boot_params.fn;
arg = arm_cpu_boot_params.arg;
barrier_dsync_fence_full();

View file

@ -17,8 +17,8 @@
int arch_swap(unsigned int key)
{
/* store off key and return value */
arch_current_thread()->arch.basepri = key;
arch_current_thread()->arch.swap_return_value = -EAGAIN;
_current->arch.basepri = key;
_current->arch.swap_return_value = -EAGAIN;
z_arm_cortex_r_svc();
irq_unlock(key);
@ -26,5 +26,5 @@ int arch_swap(unsigned int key)
/* Context switch is performed here. Returning implies the
* thread has been context-switched-in again.
*/
return arch_current_thread()->arch.swap_return_value;
return _current->arch.swap_return_value;
}

View file

@ -70,7 +70,7 @@ SECTION_FUNC(TEXT, z_arm_do_swap)
#if defined(CONFIG_FPU_SHARING)
ldrb r0, [r2, #_thread_offset_to_user_options]
tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */
tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */
beq out_fp_inactive
mov ip, #FPEXC_EN
@ -152,7 +152,7 @@ out_fp_inactive:
#if defined(CONFIG_FPU_SHARING)
ldrb r0, [r2, #_thread_offset_to_user_options]
tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */
tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */
beq in_fp_inactive
mov r3, #FPEXC_EN
@ -336,14 +336,12 @@ _context_switch:
_oops:
/*
* Pass the exception frame to z_do_kernel_oops.
* Pass the exception frame to z_do_kernel_oops. r0 contains the
* exception reason.
*/
cps #MODE_SYS
mov r0, sp
cps #MODE_SVC
/* Zero callee_regs and exc_return (only used on Cortex-M) */
mov r1, #0
mov r2, #0
bl z_do_kernel_oops
b z_arm_int_exit

View file

@ -150,12 +150,10 @@ offload:
_oops:
/*
* Pass the exception frame to z_do_kernel_oops.
* Pass the exception frame to z_do_kernel_oops. r0 contains the
* exception reason.
*/
mov r0, sp
/* Zero callee_regs and exc_return (only used on Cortex-M) */
mov r1, #0
mov r2, #0
bl z_do_kernel_oops
inv:

View file

@ -198,8 +198,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
{
/* Set up privileged stack before entering user mode */
arch_current_thread()->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
_current->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(_current->stack_obj);
#if defined(CONFIG_MPU_STACK_GUARD)
#if defined(CONFIG_THREAD_STACK_INFO)
/* We're dropping to user mode which means the guard area is no
@ -208,13 +208,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
* which accounted for memory borrowed from the thread stack.
*/
#if FP_GUARD_EXTRA_SIZE > 0
if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
}
#endif /* FP_GUARD_EXTRA_SIZE */
arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_THREAD_STACK_INFO */
/* Stack guard area reserved at the bottom of the thread's
@ -222,23 +222,23 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
* buffer area accordingly.
*/
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_current_thread()->arch.priv_stack_start +=
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
_current->arch.priv_stack_start +=
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#endif /* CONFIG_MPU_STACK_GUARD */
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
arch_current_thread()->arch.priv_stack_end =
arch_current_thread()->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
_current->arch.priv_stack_end =
_current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
#endif
z_arm_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)arch_current_thread()->stack_info.start,
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
(uint32_t)_current->stack_info.start,
_current->stack_info.size -
_current->stack_info.delta);
CODE_UNREACHABLE;
}
@ -304,7 +304,7 @@ EXPORT_SYMBOL(z_arm_thread_is_in_user_mode);
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
{
#if defined(CONFIG_MULTITHREADING)
const struct k_thread *thread = arch_current_thread();
const struct k_thread *thread = _current;
if (thread == NULL) {
return 0;
@ -314,7 +314,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
defined(CONFIG_MPU_STACK_GUARD)
uint32_t guard_len =
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
/* If MPU_STACK_GUARD is not enabled, the guard length is
@ -377,7 +377,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
int arch_float_disable(struct k_thread *thread)
{
if (thread != arch_current_thread()) {
if (thread != _current) {
return -EINVAL;
}

View file

@ -40,6 +40,54 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#define EACD(edr) (((edr) & SYSMPU_EDR_EACD_MASK) >> SYSMPU_EDR_EACD_SHIFT)
#endif
/* Exception Return (EXC_RETURN) is provided in LR upon exception entry.
* It is used to perform an exception return and to detect possible state
* transition upon exception.
*/
/* Prefix. Indicates that this is an EXC_RETURN value.
* This field reads as 0b11111111.
*/
#define EXC_RETURN_INDICATOR_PREFIX (0xFF << 24)
/* bit[0]: Exception Secure. The security domain the exception was taken to. */
#define EXC_RETURN_EXCEPTION_SECURE_Pos 0
#define EXC_RETURN_EXCEPTION_SECURE_Msk \
BIT(EXC_RETURN_EXCEPTION_SECURE_Pos)
#define EXC_RETURN_EXCEPTION_SECURE_Non_Secure 0
#define EXC_RETURN_EXCEPTION_SECURE_Secure EXC_RETURN_EXCEPTION_SECURE_Msk
/* bit[2]: Stack Pointer selection. */
#define EXC_RETURN_SPSEL_Pos 2
#define EXC_RETURN_SPSEL_Msk BIT(EXC_RETURN_SPSEL_Pos)
#define EXC_RETURN_SPSEL_MAIN 0
#define EXC_RETURN_SPSEL_PROCESS EXC_RETURN_SPSEL_Msk
/* bit[3]: Mode. Indicates the Mode that was stacked from. */
#define EXC_RETURN_MODE_Pos 3
#define EXC_RETURN_MODE_Msk BIT(EXC_RETURN_MODE_Pos)
#define EXC_RETURN_MODE_HANDLER 0
#define EXC_RETURN_MODE_THREAD EXC_RETURN_MODE_Msk
/* bit[4]: Stack frame type. Indicates whether the stack frame is a standard
* integer only stack frame or an extended floating-point stack frame.
*/
#define EXC_RETURN_STACK_FRAME_TYPE_Pos 4
#define EXC_RETURN_STACK_FRAME_TYPE_Msk BIT(EXC_RETURN_STACK_FRAME_TYPE_Pos)
#define EXC_RETURN_STACK_FRAME_TYPE_EXTENDED 0
#define EXC_RETURN_STACK_FRAME_TYPE_STANDARD EXC_RETURN_STACK_FRAME_TYPE_Msk
/* bit[5]: Default callee register stacking. Indicates whether the default
* stacking rules apply, or whether the callee registers are already on the
* stack.
*/
#define EXC_RETURN_CALLEE_STACK_Pos 5
#define EXC_RETURN_CALLEE_STACK_Msk BIT(EXC_RETURN_CALLEE_STACK_Pos)
#define EXC_RETURN_CALLEE_STACK_SKIPPED 0
#define EXC_RETURN_CALLEE_STACK_DEFAULT EXC_RETURN_CALLEE_STACK_Msk
/* bit[6]: Secure or Non-secure stack. Indicates whether a Secure or
* Non-secure stack is used to restore stack frame on exception return.
*/
#define EXC_RETURN_RETURN_STACK_Pos 6
#define EXC_RETURN_RETURN_STACK_Msk BIT(EXC_RETURN_RETURN_STACK_Pos)
#define EXC_RETURN_RETURN_STACK_Non_Secure 0
#define EXC_RETURN_RETURN_STACK_Secure EXC_RETURN_RETURN_STACK_Msk
/* Integrity signature for an ARMv8-M implementation */
#if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
#define INTEGRITY_SIGNATURE_STD 0xFEFA125BUL
@ -1064,7 +1112,9 @@ void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,
__ASSERT(esf != NULL,
"ESF could not be retrieved successfully. Shall never occur.");
z_arm_set_fault_sp(esf, exc_return);
#ifdef CONFIG_DEBUG_COREDUMP
z_arm_coredump_fault_sp = POINTER_TO_UINT(esf);
#endif
reason = fault_handle(esf, fault, &recoverable);
if (recoverable) {

View file

@ -14,145 +14,6 @@
#include <zephyr/arch/cpu.h>
#include <zephyr/arch/common/pm_s2ram.h>
/**
* Macro expanding to an integer literal equal to the offset of
* field `sr_name` in `struct __cpu_context`. This macro has to
* be implemented in C, because GEN_OFFSET_SYM provides offsets
* as C preprocessor definitions - there are not visible to the
* assembler.
*
* See also: `arch/arm/core/offsets/offsets_aarch32.c`
*/
#define CPU_CTX_SR_OFFSET(sr_name) \
___cpu_context_t_ ## sr_name ## _OFFSET
/**
* Macros used to save / load a special register in __cpu_context.
* These also have to be implemented in C due to CPU_CTX_SR_OFFSET.
*/
#define SAVE_SPECIAL_REG(sr_name, cpu_ctx_reg, tmp_reg) \
mrs tmp_reg, sr_name; \
str tmp_reg, [cpu_ctx_reg, # CPU_CTX_SR_OFFSET(sr_name)];
#define RESTORE_SPECIAL_REG(sr_name, cpu_ctx_reg, tmp_reg) \
ldr tmp_reg, [cpu_ctx_reg, # CPU_CTX_SR_OFFSET(sr_name)]; \
msr sr_name, tmp_reg;
/*
* The following macros could be written as assembler macros, but C is used
* for portability (assembler macro syntax may differ between toolchains).
*/
/*
* Pushes registers r4~r12 and lr on the stack.
* r0 is unmodified but other GPRs may be overwritten.
*/
#if !defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* `push` on ARMv6-M / ARMv8-M Baseline:
* only r0~r7 and lr may be pushed
*/
#define PUSH_GPRS \
push {r4-r7}; \
mov r1, r8; \
mov r2, r9; \
mov r3, r10; \
mov r4, r11; \
mov r5, r12; \
push {r1-r5, lr}
#else
/* `push` on ARMv7-M and ARMv8-M Mainline: no limitation */
#define PUSH_GPRS \
push {r4-r12, lr}
#endif /* !CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
/*
* Pops registers r4~r12 and lr from the stack
* r0 is unmodified but other GPRs may be overwritten.
*/
#if !defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* `pop` on ARMv6-M / ARMv8-M Baseline:
* can only pop to r0~r7 and pc (not lr!)
*/
#define POP_GPRS \
pop {r1-r6}; \
mov lr, r6; \
mov r12, r5; \
mov r11, r4; \
mov r10, r3; \
mov r9, r2; \
mov r8, r1; \
pop {r4-r7}
#else
/* `pop` on ARMv7-M and ARMv8-M Mainline: no limitation */
#define POP_GPRS \
pop {r4-r12, lr}
#endif /* !CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* Registers present only on ARMv7-M and ARMv8-M Mainline */
#define SAVE_FM_BP_REGS(cpu_ctx, tmp_reg) \
SAVE_SPECIAL_REG(faultmask, cpu_ctx, tmp_reg) \
SAVE_SPECIAL_REG(basepri, cpu_ctx, tmp_reg)
#define RESTORE_FM_BP_REGS(cpu_ctx, tmp_reg) \
RESTORE_SPECIAL_REG(faultmask, cpu_ctx, tmp_reg) \
RESTORE_SPECIAL_REG(basepri, cpu_ctx, tmp_reg)
#else
/* Registers not present: do nothing */
#define SAVE_FM_BP_REGS(cpu_ctx, tmp_reg)
#define RESTORE_FM_BP_REGS(cpu_ctx, tmp_reg)
#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
#if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
/* Registers present only on certain ARMv8-M implementations */
#define SAVE_SPLIM_REGS(cpu_ctx, tmp_reg) \
SAVE_SPECIAL_REG(msplim, cpu_ctx, tmp_reg) \
SAVE_SPECIAL_REG(psplim, cpu_ctx, tmp_reg)
#define RESTORE_SPLIM_REGS(cpu_ctx, tmp_reg) \
RESTORE_SPECIAL_REG(msplim, cpu_ctx, tmp_reg) \
RESTORE_SPECIAL_REG(psplim, cpu_ctx, tmp_reg)
#else
/* Registers not present: do nothing */
#define SAVE_SPLIM_REGS(cpu_ctx, tmp_reg)
#define RESTORE_SPLIM_REGS(cpu_ctx, tmp_reg)
#endif /* CONFIG_CPU_CORTEX_M_HAS_SPLIM */
/*
* Saves the CPU's special registers in the `struct __cpu_context`
* pointed to by the `cpu_ctx` register.
* The `tmp_reg` register is overwritten as part of this process.
*/
#define SAVE_SPECIAL_REGISTERS(cpu_ctx, tmp_reg) \
SAVE_SPECIAL_REG(msp, cpu_ctx, tmp_reg) \
SAVE_SPECIAL_REG(psp, cpu_ctx, tmp_reg) \
SAVE_SPECIAL_REG(primask, cpu_ctx, tmp_reg) \
SAVE_SPLIM_REGS( cpu_ctx, tmp_reg) \
SAVE_FM_BP_REGS( cpu_ctx, tmp_reg) \
SAVE_SPECIAL_REG(control, cpu_ctx, tmp_reg)
/*
* Restores the CPU's special registers from the `struct __cpu_context`
* pointed to by the `cpu_ctx` register.
* The `tmp_reg` register is overwritten as part of this process.
*
* N.B.: ISB at the end is required because "Software must use an ISB
* barrier instruction to ensure a write to the CONTROL register takes
* effect before the next instruction is executed."
*
* If this macro is modified, make sure CONTROL is always the last
* restored register, and that an ISB follows the MSR instruction.
*/
#define RESTORE_SPECIAL_REGISTERS(cpu_ctx, tmp_reg) \
RESTORE_SPECIAL_REG(msp, cpu_ctx, tmp_reg) \
RESTORE_SPECIAL_REG(psp, cpu_ctx, tmp_reg) \
RESTORE_SPECIAL_REG(primask, cpu_ctx, tmp_reg) \
RESTORE_SPLIM_REGS( cpu_ctx, tmp_reg) \
RESTORE_FM_BP_REGS( cpu_ctx, tmp_reg) \
RESTORE_SPECIAL_REG(control, cpu_ctx, tmp_reg) \
isb
_ASM_FILE_PROLOGUE
GTEXT(pm_s2ram_mark_set)
@ -165,7 +26,7 @@ SECTION_FUNC(TEXT, arch_pm_s2ram_suspend)
*
* r0: address of the system_off function
*/
PUSH_GPRS
push {r4-r12, lr}
/* Move system_off to protected register. */
mov r4, r0
@ -173,7 +34,38 @@ SECTION_FUNC(TEXT, arch_pm_s2ram_suspend)
/* Store CPU context */
ldr r1, =_cpu_context
SAVE_SPECIAL_REGISTERS(/* ctx: */ r1, /* tmp: */ r2)
mrs r2, msp
str r2, [r1, #___cpu_context_t_msp_OFFSET]
mrs r2, msplim
str r2, [r1, #___cpu_context_t_msplim_OFFSET]
mrs r2, psp
str r2, [r1, #___cpu_context_t_psp_OFFSET]
mrs r2, psplim
str r2, [r1, #___cpu_context_t_psplim_OFFSET]
mrs r2, apsr
str r2, [r1, #___cpu_context_t_apsr_OFFSET]
mrs r2, ipsr
str r2, [r1, #___cpu_context_t_ipsr_OFFSET]
mrs r2, epsr
str r2, [r1, #___cpu_context_t_epsr_OFFSET]
mrs r2, primask
str r2, [r1, #___cpu_context_t_primask_OFFSET]
mrs r2, faultmask
str r2, [r1, #___cpu_context_t_faultmask_OFFSET]
mrs r2, basepri
str r2, [r1, #___cpu_context_t_basepri_OFFSET]
mrs r2, control
str r2, [r1, #___cpu_context_t_control_OFFSET]
/*
* Mark entering suspend to RAM.
@ -203,7 +95,7 @@ SECTION_FUNC(TEXT, arch_pm_s2ram_suspend)
/* Move system_off back to r0 as return value */
mov r0, r4
POP_GPRS
pop {r4-r12, lr}
bx lr
@ -225,12 +117,44 @@ resume:
*/
ldr r0, =_cpu_context
RESTORE_SPECIAL_REGISTERS(/* ctx: */ r0, /* tmp: */ r1)
ldr r1, [r0, #___cpu_context_t_msp_OFFSET]
msr msp, r1
POP_GPRS
ldr r1, [r0, #___cpu_context_t_msplim_OFFSET]
msr msplim, r1
ldr r1, [r0, #___cpu_context_t_psp_OFFSET]
msr psp, r1
ldr r1, [r0, #___cpu_context_t_psplim_OFFSET]
msr psplim, r1
ldr r1, [r0, #___cpu_context_t_apsr_OFFSET]
msr apsr_nzcvq, r1
ldr r1, [r0, #___cpu_context_t_ipsr_OFFSET]
msr ipsr, r1
ldr r1, [r0, #___cpu_context_t_epsr_OFFSET]
msr epsr, r1
ldr r1, [r0, #___cpu_context_t_primask_OFFSET]
msr primask, r1
ldr r1, [r0, #___cpu_context_t_faultmask_OFFSET]
msr faultmask, r1
ldr r1, [r0, #___cpu_context_t_basepri_OFFSET]
msr basepri, r1
ldr r1, [r0, #___cpu_context_t_control_OFFSET]
msr control, r1
isb
pop {r4-r12, lr}
/*
* Set the return value and return
*/
movs r0, #0
mov r0, #0
bx lr

View file

@ -33,8 +33,8 @@
int arch_swap(unsigned int key)
{
/* store off key and return value */
arch_current_thread()->arch.basepri = key;
arch_current_thread()->arch.swap_return_value = -EAGAIN;
_current->arch.basepri = key;
_current->arch.swap_return_value = -EAGAIN;
/* set pending bit to make sure we will take a PendSV exception */
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
@ -45,5 +45,5 @@ int arch_swap(unsigned int key)
/* Context switch is performed here. Returning implies the
* thread has been context-switched-in again.
*/
return arch_current_thread()->arch.swap_return_value;
return _current->arch.swap_return_value;
}

View file

@ -288,7 +288,7 @@ in_fp_endif:
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
/* Re-program dynamic memory map */
push {r2,lr}
mov r0, r2 /* arch_current_thread() thread */
mov r0, r2 /* _current thread */
bl z_arm_configure_dynamic_mpu_regions
pop {r2,lr}
#endif
@ -447,7 +447,6 @@ _oops:
mov r1, sp /* pointer to _callee_saved_t */
#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
#endif /* CONFIG_EXTRA_EXCEPTION_INFO */
mov r2, lr /* EXC_RETURN */
bl z_do_kernel_oops
/* return from SVC exception is done here */
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)

View file

@ -231,8 +231,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
{
/* Set up privileged stack before entering user mode */
arch_current_thread()->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
_current->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(_current->stack_obj);
#if defined(CONFIG_MPU_STACK_GUARD)
#if defined(CONFIG_THREAD_STACK_INFO)
/* We're dropping to user mode which means the guard area is no
@ -241,13 +241,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
* which accounted for memory borrowed from the thread stack.
*/
#if FP_GUARD_EXTRA_SIZE > 0
if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
}
#endif /* FP_GUARD_EXTRA_SIZE */
arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_THREAD_STACK_INFO */
/* Stack guard area reserved at the bottom of the thread's
@ -255,18 +255,18 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
* buffer area accordingly.
*/
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_current_thread()->arch.priv_stack_start +=
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
_current->arch.priv_stack_start +=
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#endif /* CONFIG_MPU_STACK_GUARD */
z_arm_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)arch_current_thread()->stack_info.start,
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
(uint32_t)_current->stack_info.start,
_current->stack_info.size -
_current->stack_info.delta);
CODE_UNREACHABLE;
}
@ -379,7 +379,7 @@ void configure_builtin_stack_guard(struct k_thread *thread)
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
{
#if defined(CONFIG_MULTITHREADING)
const struct k_thread *thread = arch_current_thread();
const struct k_thread *thread = _current;
if (thread == NULL) {
return 0;
@ -389,7 +389,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
defined(CONFIG_MPU_STACK_GUARD)
uint32_t guard_len =
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else
/* If MPU_STACK_GUARD is not enabled, the guard length is
@ -452,7 +452,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
int arch_float_disable(struct k_thread *thread)
{
if (thread != arch_current_thread()) {
if (thread != _current) {
return -EINVAL;
}
@ -522,7 +522,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
{
z_arm_prepare_switch_to_main();
arch_current_thread_set(main_thread);
_current = main_thread;
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
/* On Cortex-M, TLS uses a global variable as pointer to

View file

@ -27,7 +27,7 @@ void z_impl_k_thread_abort(k_tid_t thread)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
if (arch_current_thread() == thread) {
if (_current == thread) {
if (arch_is_in_isr()) {
/* ARM is unlike most arches in that this is true
* even for non-peripheral interrupts, even though

View file

@ -28,7 +28,8 @@
*/
static inline uint64_t z_arm_dwt_freq_get(void)
{
#if defined(CONFIG_CMSIS_CORE_HAS_SYSTEM_CORE_CLOCK)
#if defined(CONFIG_SOC_FAMILY_NORDIC_NRF) || \
defined(CONFIG_SOC_SERIES_IMXRT6XX)
/*
* DWT frequency is taken directly from the
* System Core clock (CPU) frequency, if the

View file

@ -101,9 +101,8 @@ void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf)
*
* @param esf exception frame
* @param callee_regs Callee-saved registers (R4-R11)
* @param exc_return EXC_RETURN value present in LR after exception entry.
*/
void z_do_kernel_oops(const struct arch_esf *esf, _callee_saved_t *callee_regs, uint32_t exc_return)
void z_do_kernel_oops(const struct arch_esf *esf, _callee_saved_t *callee_regs)
{
#if !(defined(CONFIG_EXTRA_EXCEPTION_INFO) && defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE))
ARG_UNUSED(callee_regs);
@ -111,8 +110,6 @@ void z_do_kernel_oops(const struct arch_esf *esf, _callee_saved_t *callee_regs,
/* Stacked R0 holds the exception reason. */
unsigned int reason = esf->basic.r0;
z_arm_set_fault_sp(esf, exc_return);
#if defined(CONFIG_USERSPACE)
if (z_arm_preempted_thread_in_user_mode(esf)) {
/*

View file

@ -83,21 +83,17 @@ GEN_OFFSET_SYM(_thread_stack_info_t, start);
*/
#if defined(CONFIG_PM_S2RAM)
GEN_OFFSET_SYM(_cpu_context_t, msp);
GEN_OFFSET_SYM(_cpu_context_t, msplim);
GEN_OFFSET_SYM(_cpu_context_t, psp);
GEN_OFFSET_SYM(_cpu_context_t, primask);
GEN_OFFSET_SYM(_cpu_context_t, control);
GEN_OFFSET_SYM(_cpu_context_t, psplim);
GEN_OFFSET_SYM(_cpu_context_t, apsr);
GEN_OFFSET_SYM(_cpu_context_t, ipsr);
GEN_OFFSET_SYM(_cpu_context_t, epsr);
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* Registers present only on ARMv7-M and ARMv8-M Mainline */
GEN_OFFSET_SYM(_cpu_context_t, primask);
GEN_OFFSET_SYM(_cpu_context_t, faultmask);
GEN_OFFSET_SYM(_cpu_context_t, basepri);
#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */
#if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
/* Registers present only on certain ARMv8-M implementations */
GEN_OFFSET_SYM(_cpu_context_t, msplim);
GEN_OFFSET_SYM(_cpu_context_t, psplim);
#endif /* CONFIG_CPU_CORTEX_M_HAS_SPLIM */
GEN_OFFSET_SYM(_cpu_context_t, control);
#endif /* CONFIG_PM_S2RAM */
#endif /* _ARM_OFFSETS_INC_ */

View file

@ -43,15 +43,6 @@ static ALWAYS_INLINE bool arch_is_in_nested_exception(const struct arch_esf *esf
return (arch_curr_cpu()->arch.exc_depth > 1U) ? (true) : (false);
}
/**
* @brief No current implementation where core dump is not supported
*
* @param esf exception frame
* @param exc_return EXC_RETURN value present in LR after exception entry.
*/
static ALWAYS_INLINE void z_arm_set_fault_sp(const struct arch_esf *esf, uint32_t exc_return)
{}
#if defined(CONFIG_USERSPACE)
/*
* This function is used by privileged code to determine if the thread

View file

@ -20,8 +20,6 @@
#ifndef ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_A_R_KERNEL_ARCH_FUNC_H_
#define ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_A_R_KERNEL_ARCH_FUNC_H_
#include <zephyr/platform/hooks.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -30,9 +28,6 @@ extern "C" {
static ALWAYS_INLINE void arch_kernel_init(void)
{
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
}
#ifndef CONFIG_USE_SWITCH

View file

@ -39,54 +39,6 @@ extern volatile irq_offload_routine_t offload_routine;
*/
#define AIRCR_VECT_KEY_PERMIT_WRITE 0x05FAUL
/* Exception Return (EXC_RETURN) is provided in LR upon exception entry.
* It is used to perform an exception return and to detect possible state
* transition upon exception.
*/
/* Prefix. Indicates that this is an EXC_RETURN value.
* This field reads as 0b11111111.
*/
#define EXC_RETURN_INDICATOR_PREFIX (0xFF << 24)
/* bit[0]: Exception Secure. The security domain the exception was taken to. */
#define EXC_RETURN_EXCEPTION_SECURE_Pos 0
#define EXC_RETURN_EXCEPTION_SECURE_Msk \
BIT(EXC_RETURN_EXCEPTION_SECURE_Pos)
#define EXC_RETURN_EXCEPTION_SECURE_Non_Secure 0
#define EXC_RETURN_EXCEPTION_SECURE_Secure EXC_RETURN_EXCEPTION_SECURE_Msk
/* bit[2]: Stack Pointer selection. */
#define EXC_RETURN_SPSEL_Pos 2
#define EXC_RETURN_SPSEL_Msk BIT(EXC_RETURN_SPSEL_Pos)
#define EXC_RETURN_SPSEL_MAIN 0
#define EXC_RETURN_SPSEL_PROCESS EXC_RETURN_SPSEL_Msk
/* bit[3]: Mode. Indicates the Mode that was stacked from. */
#define EXC_RETURN_MODE_Pos 3
#define EXC_RETURN_MODE_Msk BIT(EXC_RETURN_MODE_Pos)
#define EXC_RETURN_MODE_HANDLER 0
#define EXC_RETURN_MODE_THREAD EXC_RETURN_MODE_Msk
/* bit[4]: Stack frame type. Indicates whether the stack frame is a standard
* integer only stack frame or an extended floating-point stack frame.
*/
#define EXC_RETURN_STACK_FRAME_TYPE_Pos 4
#define EXC_RETURN_STACK_FRAME_TYPE_Msk BIT(EXC_RETURN_STACK_FRAME_TYPE_Pos)
#define EXC_RETURN_STACK_FRAME_TYPE_EXTENDED 0
#define EXC_RETURN_STACK_FRAME_TYPE_STANDARD EXC_RETURN_STACK_FRAME_TYPE_Msk
/* bit[5]: Default callee register stacking. Indicates whether the default
* stacking rules apply, or whether the callee registers are already on the
* stack.
*/
#define EXC_RETURN_CALLEE_STACK_Pos 5
#define EXC_RETURN_CALLEE_STACK_Msk BIT(EXC_RETURN_CALLEE_STACK_Pos)
#define EXC_RETURN_CALLEE_STACK_SKIPPED 0
#define EXC_RETURN_CALLEE_STACK_DEFAULT EXC_RETURN_CALLEE_STACK_Msk
/* bit[6]: Secure or Non-secure stack. Indicates whether a Secure or
* Non-secure stack is used to restore stack frame on exception return.
*/
#define EXC_RETURN_RETURN_STACK_Pos 6
#define EXC_RETURN_RETURN_STACK_Msk BIT(EXC_RETURN_RETURN_STACK_Pos)
#define EXC_RETURN_RETURN_STACK_Non_Secure 0
#define EXC_RETURN_RETURN_STACK_Secure EXC_RETURN_RETURN_STACK_Msk
/*
* The current executing vector is found in the IPSR register. All
* IRQs and system exceptions are considered as interrupt context.
@ -232,43 +184,6 @@ static ALWAYS_INLINE void z_arm_clear_faults(void)
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
}
/**
* @brief Set z_arm_coredump_fault_sp to stack pointer value expected by GDB
*
* @param esf exception frame
* @param exc_return EXC_RETURN value present in LR after exception entry.
*/
static ALWAYS_INLINE void z_arm_set_fault_sp(const struct arch_esf *esf, uint32_t exc_return)
{
#ifdef CONFIG_DEBUG_COREDUMP
z_arm_coredump_fault_sp = POINTER_TO_UINT(esf);
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) || defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* Gdb expects a stack pointer that does not include the exception stack frame in order to
* unwind. So adjust the stack pointer accordingly.
*/
z_arm_coredump_fault_sp += sizeof(esf->basic);
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* Assess whether thread had been using the FP registers and add size of additional
* registers if necessary
*/
if ((exc_return & EXC_RETURN_STACK_FRAME_TYPE_STANDARD) ==
EXC_RETURN_STACK_FRAME_TYPE_EXTENDED) {
z_arm_coredump_fault_sp += sizeof(esf->fpu);
}
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#if !(defined(CONFIG_ARMV8_M_MAINLINE) || defined(CONFIG_ARMV8_M_BASELINE))
if ((esf->basic.xpsr & SCB_CCR_STKALIGN_Msk) == SCB_CCR_STKALIGN_Msk) {
/* Adjust stack alignment after PSR bit[9] detected */
z_arm_coredump_fault_sp |= 0x4;
}
#endif /* !CONFIG_ARMV8_M_MAINLINE */
#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE || CONFIG_ARMV6_M_ARMV8_M_BASELINE */
#endif /* CONFIG_DEBUG_COREDUMP */
}
/**
* @brief Assess whether a debug monitor event should be treated as an error
*

View file

@ -20,8 +20,6 @@
#ifndef ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_KERNEL_ARCH_FUNC_H_
#define ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_KERNEL_ARCH_FUNC_H_
#include <zephyr/platform/hooks.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -55,10 +53,6 @@ static ALWAYS_INLINE void arch_kernel_init(void)
*/
z_arm_configure_static_mpu_regions();
#endif /* CONFIG_ARM_MPU */
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
}
static ALWAYS_INLINE void

View file

@ -727,7 +727,7 @@ static int configure_dynamic_mpu_regions(struct k_thread *thread)
*/
thread->arch.region_num = (uint8_t)region_num;
if (thread == arch_current_thread()) {
if (thread == _current) {
ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num);
}
@ -795,7 +795,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
ret = configure_dynamic_mpu_regions(thread);
#ifdef CONFIG_SMP
if (ret == 0 && thread != arch_current_thread()) {
if (ret == 0 && thread != _current) {
/* the thread could be running on another CPU right now */
z_arm64_mem_cfg_ipi();
}
@ -810,7 +810,7 @@ int arch_mem_domain_thread_remove(struct k_thread *thread)
ret = configure_dynamic_mpu_regions(thread);
#ifdef CONFIG_SMP
if (ret == 0 && thread != arch_current_thread()) {
if (ret == 0 && thread != _current) {
/* the thread could be running on another CPU right now */
z_arm64_mem_cfg_ipi();
}

View file

@ -306,9 +306,8 @@ static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, u
}
}
#ifdef CONFIG_USERSPACE
else if ((arch_current_thread()->base.user_options & K_USER) != 0 &&
GET_ESR_EC(esr) == 0x24) {
sp_limit = (uint64_t)arch_current_thread()->stack_info.start;
else if ((_current->base.user_options & K_USER) != 0 && GET_ESR_EC(esr) == 0x24) {
sp_limit = (uint64_t)_current->stack_info.start;
guard_start = sp_limit - Z_ARM64_STACK_GUARD_SIZE;
sp = esf->sp;
if (sp <= sp_limit || (guard_start <= far && far <= sp_limit)) {
@ -435,7 +434,7 @@ void z_arm64_do_kernel_oops(struct arch_esf *esf)
* User mode is only allowed to induce oopses and stack check
* failures via software-triggered system fatal exceptions.
*/
if (((arch_current_thread()->base.user_options & K_USER) != 0) &&
if (((_current->base.user_options & K_USER) != 0) &&
reason != K_ERR_STACK_CHK_FAIL) {
reason = K_ERR_KERNEL_OOPS;
}

View file

@ -36,7 +36,7 @@ static void DBG(char *msg, struct k_thread *th)
strcpy(buf, "CPU# exc# ");
buf[3] = '0' + _current_cpu->id;
buf[8] = '0' + arch_exception_depth();
strcat(buf, arch_current_thread()->name);
strcat(buf, _current->name);
strcat(buf, ": ");
strcat(buf, msg);
strcat(buf, " ");
@ -125,7 +125,7 @@ static void flush_owned_fpu(struct k_thread *thread)
* replace it, and this avoids a deadlock where
* two CPUs want to pull each other's FPU context.
*/
if (thread == arch_current_thread()) {
if (thread == _current) {
arch_flush_local_fpu();
while (atomic_ptr_get(&_kernel.cpus[i].arch.fpu_owner) == thread) {
barrier_dsync_fence_full();
@ -260,15 +260,15 @@ void z_arm64_fpu_trap(struct arch_esf *esf)
* Make sure the FPU context we need isn't live on another CPU.
* The current CPU's FPU context is NULL at this point.
*/
flush_owned_fpu(arch_current_thread());
flush_owned_fpu(_current);
#endif
/* become new owner */
atomic_ptr_set(&_current_cpu->arch.fpu_owner, arch_current_thread());
atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current);
/* restore our content */
z_arm64_fpu_restore(&arch_current_thread()->arch.saved_fp_context);
DBG("restore", arch_current_thread());
z_arm64_fpu_restore(&_current->arch.saved_fp_context);
DBG("restore", _current);
}
/*
@ -287,7 +287,7 @@ static void fpu_access_update(unsigned int exc_update_level)
if (arch_exception_depth() == exc_update_level) {
/* We're about to execute non-exception code */
if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == arch_current_thread()) {
if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == _current) {
/* turn on FPU access */
write_cpacr_el1(cpacr | CPACR_EL1_FPEN_NOTRAP);
} else {

View file

@ -1309,7 +1309,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
}
thread->arch.ptables = domain_ptables;
if (thread == arch_current_thread()) {
if (thread == _current) {
z_arm64_swap_ptables(thread);
} else {
#ifdef CONFIG_SMP

View file

@ -23,7 +23,6 @@
#include <zephyr/drivers/interrupt_controller/gic.h>
#include <zephyr/drivers/pm_cpu_ops.h>
#include <zephyr/arch/arch_interface.h>
#include <zephyr/platform/hooks.h>
#include <zephyr/sys/barrier.h>
#include <zephyr/irq.h>
#include "boot.h"
@ -164,10 +163,6 @@ void arch_secondary_cpu_init(int cpu_num)
#endif
#endif
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
fn = arm64_cpu_boot_params.fn;
arg = arm64_cpu_boot_params.arg;
barrier_dsync_fence_full();
@ -240,7 +235,7 @@ void mem_cfg_ipi_handler(const void *unused)
* This is a no-op if the page table is already the right one.
* Lock irq to prevent the interrupt during mem region switch.
*/
z_arm64_swap_mem_domains(arch_current_thread());
z_arm64_swap_mem_domains(_current);
arch_irq_unlock(key);
}

View file

@ -159,15 +159,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
uint64_t tmpreg;
/* Map the thread stack */
z_arm64_thread_mem_domains_init(arch_current_thread());
z_arm64_thread_mem_domains_init(_current);
/* Top of the user stack area */
stack_el0 = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
stack_el0 = Z_STACK_PTR_ALIGN(_current->stack_info.start +
_current->stack_info.size -
_current->stack_info.delta);
/* Top of the privileged non-user-accessible part of the stack */
stack_el1 = (uintptr_t)(arch_current_thread()->stack_obj + ARCH_THREAD_STACK_RESERVED);
stack_el1 = (uintptr_t)(_current->stack_obj + ARCH_THREAD_STACK_RESERVED);
register void *x0 __asm__("x0") = user_entry;
register void *x1 __asm__("x1") = p1;

View file

@ -22,8 +22,6 @@
#include <kernel_arch_data.h>
#include <zephyr/platform/hooks.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -37,10 +35,6 @@ static ALWAYS_INLINE void arch_kernel_init(void)
#ifdef CONFIG_XEN
xen_enlighten_init();
#endif
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
}
static inline void arch_switch(void *switch_to, void **switched_from)

View file

@ -9,7 +9,6 @@
SECTION_DATA_PROLOGUE(.ramfunc,,)
{
__ramfunc_region_start = .;
MPU_ALIGN(__ramfunc_size);
__ramfunc_start = .;
*(.ramfunc)

View file

@ -70,7 +70,6 @@ GTEXT(_Fault)
GTEXT(_k_neg_eagain)
GTEXT(z_thread_mark_switched_in)
GTEXT(z_thread_mark_switched_out)
/* exports */
GTEXT(__isr_vec)
@ -210,9 +209,6 @@ on_thread_stack:
#endif /* CONFIG_PREEMPT_ENABLED */
reschedule:
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
jal z_thread_mark_switched_out
#endif
/*
* Check if the current thread is the same as the thread on the ready Q. If
* so, do not reschedule.

View file

@ -19,8 +19,6 @@
#include <kernel_arch_data.h>
#include <zephyr/platform/hooks.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -28,9 +26,6 @@ extern "C" {
#ifndef _ASMLANGUAGE
static ALWAYS_INLINE void arch_kernel_init(void)
{
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
}
static ALWAYS_INLINE void

View file

@ -22,8 +22,6 @@
#include <kernel_arch_data.h>
#include <zephyr/platform/hooks.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -32,9 +30,6 @@ extern "C" {
static ALWAYS_INLINE void arch_kernel_init(void)
{
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
}
static ALWAYS_INLINE void

View file

@ -23,7 +23,7 @@
int arch_swap(unsigned int key)
{
/*
* struct k_thread * arch_current_thread() is the currently running thread
* struct k_thread * _current is the currently running thread
* struct k_thread * _kernel.ready_q.cache contains the next thread to
* run (cannot be NULL)
*
@ -34,8 +34,8 @@ int arch_swap(unsigned int key)
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_out();
#endif
arch_current_thread()->callee_saved.key = key;
arch_current_thread()->callee_saved.retval = -EAGAIN;
_current->callee_saved.key = key;
_current->callee_saved.retval = -EAGAIN;
/* retval may be modified with a call to
* arch_thread_return_value_set()
@ -47,10 +47,10 @@ int arch_swap(unsigned int key)
posix_thread_status_t *this_thread_ptr =
(posix_thread_status_t *)
arch_current_thread()->callee_saved.thread_status;
_current->callee_saved.thread_status;
arch_current_thread_set(_kernel.ready_q.cache);
_current = _kernel.ready_q.cache;
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in();
#endif
@ -66,9 +66,9 @@ int arch_swap(unsigned int key)
/* When we continue, _kernel->current points back to this thread */
irq_unlock(arch_current_thread()->callee_saved.key);
irq_unlock(_current->callee_saved.key);
return arch_current_thread()->callee_saved.retval;
return _current->callee_saved.retval;
}
@ -94,7 +94,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
z_thread_mark_switched_out();
#endif
arch_current_thread_set(_kernel.ready_q.cache);
_current = _kernel.ready_q.cache;
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in();

View file

@ -96,25 +96,6 @@ void posix_arch_thread_entry(void *pa_thread_status)
z_thread_entry(ptr->entry_point, ptr->arg1, ptr->arg2, ptr->arg3);
}
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
int arch_float_disable(struct k_thread *thread)
{
ARG_UNUSED(thread);
/* Posix always has FPU enabled so cannot be disabled */
return -ENOTSUP;
}
int arch_float_enable(struct k_thread *thread, unsigned int options)
{
ARG_UNUSED(thread);
ARG_UNUSED(options);
/* Posix always has FPU enabled so nothing to do here */
return 0;
}
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#if defined(CONFIG_ARCH_HAS_THREAD_ABORT)
void z_impl_k_thread_abort(k_tid_t thread)
{
@ -131,7 +112,7 @@ void z_impl_k_thread_abort(k_tid_t thread)
key = irq_lock();
if (arch_current_thread() == thread) {
if (_current == thread) {
if (tstatus->aborted == 0) { /* LCOV_EXCL_BR_LINE */
tstatus->aborted = 1;
} else {

View file

@ -12,8 +12,6 @@
#include <kernel_arch_data.h>
#include <zephyr/platform/hooks.h>
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
@ -22,9 +20,7 @@ extern "C" {
static inline void arch_kernel_init(void)
{
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
/* Nothing to be done */
}
static ALWAYS_INLINE void

View file

@ -1,5 +1,4 @@
# Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
# Copyright (c) 2024 Antmicro <www.antmicro.com>
# SPDX-License-Identifier: Apache-2.0
menu "RISCV Options"
@ -28,16 +27,6 @@ config RISCV_GP
global pointer at program start or earlier than any instruction
using GP relative addressing.
config RISCV_CURRENT_VIA_GP
bool "Store current thread into the global pointer (GP) register"
depends on !RISCV_GP && !USERSPACE
depends on MP_MAX_NUM_CPUS > 1
select ARCH_HAS_CUSTOM_CURRENT_IMPL
help
Store the current thread's pointer into the global pointer (GP) register.
When is enabled, calls to `arch_current_thread()` & `k_sched_current_thread_query()` will
be reduced to a single register read.
config RISCV_ALWAYS_SWITCH_THROUGH_ECALL
bool "Do not use mret outside a trap handler context"
depends on MULTITHREADING
@ -48,31 +37,6 @@ config RISCV_ALWAYS_SWITCH_THROUGH_ECALL
and most people should say n here to minimize context switching
overhead.
choice RISCV_SMP_IPI_IMPL
prompt "RISC-V SMP IPI implementation"
depends on SMP
default RISCV_SMP_IPI_CLINT if DT_HAS_SIFIVE_CLINT0_ENABLED
default RISCV_SMP_IPI_CUSTOM
config RISCV_SMP_IPI_CLINT
bool "CLINT-based IPI"
depends on DT_HAS_SIFIVE_CLINT0_ENABLED
help
Use CLINT-based IPI implementation.
config RISCV_SMP_IPI_CUSTOM
bool "Custom IPI implementation"
help
Allow custom IPI implementation.
When this is selected, the following functions must be provided:
- arch_sched_directed_ipi()
- arch_flush_fpu_ipi() if CONFIG_FPU_SHARING
- arch_spin_relax() if CONFIG_FPU_SHARING
- arch_smp_init()
endchoice # RISCV_SMP_IPI_IMPL
menu "RISCV Processor Options"
config INCLUDE_RESET_VECTOR
@ -347,6 +311,7 @@ endif #RISCV_PMP
config PMP_STACK_GUARD
def_bool y
depends on MULTITHREADING
depends on HW_STACK_PROTECTION
config PMP_STACK_GUARD_MIN_SIZE
@ -385,15 +350,6 @@ config NULL_POINTER_EXCEPTION_REGION_SIZE
endif # NULL_POINTER_EXCEPTION_DETECTION_PMP
config RISCV_IMPRECISE_FPU_STATE_TRACKING
bool "Imprecise implementation of FPU state tracking"
depends on FPU
help
According to the RISC-V Instruction Set Manual: Volume II, Version 20240411
(Section 3.1.6.6), some implementations may choose to track the dirtiness of
the floating-point register state imprecisely by reporting the state to be
dirty even when it has not been modified. This option reflects that.
endmenu
config MAIN_STACK_SIZE

View file

@ -17,12 +17,6 @@ if ((CONFIG_MP_MAX_NUM_CPUS GREATER 1) OR (CONFIG_SMP))
zephyr_library_sources(smp.c)
endif ()
if (CONFIG_SMP)
zephyr_library_sources(ipi.c)
zephyr_library_sources_ifdef(CONFIG_RISCV_SMP_IPI_CLINT ipi_clint.c)
endif()
zephyr_library_sources_ifdef(CONFIG_FPU_SHARING fpu.c fpu.S)
zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP coredump.c)
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)

View file

@ -158,43 +158,33 @@ static bool bad_stack_pointer(struct arch_esf *esf)
uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf);
#ifdef CONFIG_USERSPACE
if (arch_current_thread()->arch.priv_stack_start != 0 &&
sp >= arch_current_thread()->arch.priv_stack_start &&
sp < arch_current_thread()->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) {
if (_current->arch.priv_stack_start != 0 &&
sp >= _current->arch.priv_stack_start &&
sp < _current->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) {
return true;
}
if (z_stack_is_user_capable(arch_current_thread()->stack_obj) &&
sp >= arch_current_thread()->stack_info.start - K_THREAD_STACK_RESERVED &&
sp < arch_current_thread()->stack_info.start - K_THREAD_STACK_RESERVED
if (z_stack_is_user_capable(_current->stack_obj) &&
sp >= _current->stack_info.start - K_THREAD_STACK_RESERVED &&
sp < _current->stack_info.start - K_THREAD_STACK_RESERVED
+ Z_RISCV_STACK_GUARD_SIZE) {
return true;
}
#endif /* CONFIG_USERSPACE */
#if CONFIG_MULTITHREADING
if (sp >= arch_current_thread()->stack_info.start - K_KERNEL_STACK_RESERVED &&
sp < arch_current_thread()->stack_info.start - K_KERNEL_STACK_RESERVED
if (sp >= _current->stack_info.start - K_KERNEL_STACK_RESERVED &&
sp < _current->stack_info.start - K_KERNEL_STACK_RESERVED
+ Z_RISCV_STACK_GUARD_SIZE) {
return true;
}
#else
uintptr_t isr_stack = (uintptr_t)z_interrupt_stacks;
uintptr_t main_stack = (uintptr_t)z_main_stack;
if ((sp >= isr_stack && sp < isr_stack + Z_RISCV_STACK_GUARD_SIZE) ||
(sp >= main_stack && sp < main_stack + Z_RISCV_STACK_GUARD_SIZE)) {
return true;
}
#endif /* CONFIG_MULTITHREADING */
#endif /* CONFIG_PMP_STACK_GUARD */
#ifdef CONFIG_USERSPACE
if ((esf->mstatus & MSTATUS_MPP) == 0 &&
(esf->sp < arch_current_thread()->stack_info.start ||
esf->sp > arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta)) {
(esf->sp < _current->stack_info.start ||
esf->sp > _current->stack_info.start +
_current->stack_info.size -
_current->stack_info.delta)) {
/* user stack pointer moved outside of its allowed stack */
return true;
}
@ -246,9 +236,9 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
void z_impl_user_fault(unsigned int reason)
{
struct arch_esf *oops_esf = arch_current_thread()->syscall_frame;
struct arch_esf *oops_esf = _current->syscall_frame;
if (((arch_current_thread()->base.user_options & K_USER) != 0) &&
if (((_current->base.user_options & K_USER) != 0) &&
reason != K_ERR_STACK_CHK_FAIL) {
reason = K_ERR_KERNEL_OOPS;
}

View file

@ -36,8 +36,8 @@ static void DBG(char *msg, struct k_thread *th)
strcpy(buf, "CPU# exc# ");
buf[3] = '0' + _current_cpu->id;
buf[8] = '0' + arch_current_thread()->arch.exception_depth;
strcat(buf, arch_current_thread()->name);
buf[8] = '0' + _current->arch.exception_depth;
strcat(buf, _current->name);
strcat(buf, ": ");
strcat(buf, msg);
strcat(buf, " ");
@ -82,12 +82,12 @@ static void z_riscv_fpu_load(void)
"must be called with FPU access disabled");
/* become new owner */
atomic_ptr_set(&_current_cpu->arch.fpu_owner, arch_current_thread());
atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current);
/* restore our content */
csr_set(mstatus, MSTATUS_FS_INIT);
z_riscv_fpu_restore(&arch_current_thread()->arch.saved_fp_context);
DBG("restore", arch_current_thread());
z_riscv_fpu_restore(&_current->arch.saved_fp_context);
DBG("restore", _current);
}
/*
@ -168,7 +168,7 @@ static void flush_owned_fpu(struct k_thread *thread)
* replace it, and this avoids a deadlock where
* two CPUs want to pull each other's FPU context.
*/
if (thread == arch_current_thread()) {
if (thread == _current) {
z_riscv_fpu_disable();
arch_flush_local_fpu();
do {
@ -213,7 +213,7 @@ void z_riscv_fpu_trap(struct arch_esf *esf)
/* save current owner's content if any */
arch_flush_local_fpu();
if (arch_current_thread()->arch.exception_depth > 0) {
if (_current->arch.exception_depth > 0) {
/*
* We were already in exception when the FPU access trapped.
* We give it access and prevent any further IRQ recursion
@ -233,7 +233,7 @@ void z_riscv_fpu_trap(struct arch_esf *esf)
* Make sure the FPU context we need isn't live on another CPU.
* The current CPU's FPU context is NULL at this point.
*/
flush_owned_fpu(arch_current_thread());
flush_owned_fpu(_current);
#endif
/* make it accessible and clean to the returning context */
@ -256,13 +256,13 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
__ASSERT((csr_read(mstatus) & MSTATUS_IEN) == 0,
"must be called with IRQs disabled");
if (arch_current_thread()->arch.exception_depth == exc_update_level) {
if (_current->arch.exception_depth == exc_update_level) {
/* We're about to execute non-exception code */
if (_current_cpu->arch.fpu_owner == arch_current_thread()) {
if (_current_cpu->arch.fpu_owner == _current) {
/* everything is already in place */
return true;
}
if (arch_current_thread()->arch.fpu_recently_used) {
if (_current->arch.fpu_recently_used) {
/*
* Before this thread was context-switched out,
* it made active use of the FPU, but someone else
@ -273,7 +273,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
z_riscv_fpu_disable();
arch_flush_local_fpu();
#ifdef CONFIG_SMP
flush_owned_fpu(arch_current_thread());
flush_owned_fpu(_current);
#endif
z_riscv_fpu_load();
_current_cpu->arch.fpu_state = MSTATUS_FS_CLEAN;

View file

@ -1,14 +0,0 @@
/*
* Copyright (c) 2021 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ipi.h>
#include <zephyr/kernel.h>
void arch_sched_broadcast_ipi(void)
{
arch_sched_directed_ipi(IPI_ALL_CPUS_MASK);
}

View file

@ -1,97 +0,0 @@
/*
* Copyright (c) 2021 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ipi.h>
#include <ksched.h>
#include <zephyr/kernel.h>
#define MSIP_BASE 0x2000000UL
#define MSIP(hartid) ((volatile uint32_t *)MSIP_BASE)[hartid]
static atomic_val_t cpu_pending_ipi[CONFIG_MP_MAX_NUM_CPUS];
#define IPI_SCHED 0
#define IPI_FPU_FLUSH 1
void arch_sched_directed_ipi(uint32_t cpu_bitmap)
{
unsigned int key = arch_irq_lock();
unsigned int id = _current_cpu->id;
unsigned int num_cpus = arch_num_cpus();
for (unsigned int i = 0; i < num_cpus; i++) {
if ((i != id) && _kernel.cpus[i].arch.online && ((cpu_bitmap & BIT(i)) != 0)) {
atomic_set_bit(&cpu_pending_ipi[i], IPI_SCHED);
MSIP(_kernel.cpus[i].arch.hartid) = 1;
}
}
arch_irq_unlock(key);
}
#ifdef CONFIG_FPU_SHARING
void arch_flush_fpu_ipi(unsigned int cpu)
{
atomic_set_bit(&cpu_pending_ipi[cpu], IPI_FPU_FLUSH);
MSIP(_kernel.cpus[cpu].arch.hartid) = 1;
}
#endif /* CONFIG_FPU_SHARING */
static void sched_ipi_handler(const void *unused)
{
ARG_UNUSED(unused);
MSIP(csr_read(mhartid)) = 0;
atomic_val_t pending_ipi = atomic_clear(&cpu_pending_ipi[_current_cpu->id]);
if (pending_ipi & ATOMIC_MASK(IPI_SCHED)) {
z_sched_ipi();
}
#ifdef CONFIG_FPU_SHARING
if (pending_ipi & ATOMIC_MASK(IPI_FPU_FLUSH)) {
/* disable IRQs */
csr_clear(mstatus, MSTATUS_IEN);
/* perform the flush */
arch_flush_local_fpu();
/*
* No need to re-enable IRQs here as long as
* this remains the last case.
*/
}
#endif /* CONFIG_FPU_SHARING */
}
#ifdef CONFIG_FPU_SHARING
/*
* Make sure there is no pending FPU flush request for this CPU while
* waiting for a contended spinlock to become available. This prevents
* a deadlock when the lock we need is already taken by another CPU
* that also wants its FPU content to be reinstated while such content
* is still live in this CPU's FPU.
*/
void arch_spin_relax(void)
{
atomic_val_t *pending_ipi = &cpu_pending_ipi[_current_cpu->id];
if (atomic_test_and_clear_bit(pending_ipi, IPI_FPU_FLUSH)) {
/*
* We may not be in IRQ context here hence cannot use
* arch_flush_local_fpu() directly.
*/
arch_float_disable(_current_cpu->arch.fpu_owner);
}
}
#endif /* CONFIG_FPU_SHARING */
int arch_smp_init(void)
{
IRQ_CONNECT(RISCV_IRQ_MSOFT, 0, sched_ipi_handler, NULL, 0);
irq_enable(RISCV_IRQ_MSOFT);
return 0;
}

View file

@ -163,14 +163,6 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
lr t0, ___cpu_t_current_OFFSET(s0)
lr tp, _thread_offset_to_tls(t0)
/* Make sure global pointer is sane */
#ifdef CONFIG_RISCV_GP
.option push
.option norelax
la gp, __global_pointer$
.option pop
#endif /* CONFIG_RISCV_GP */
/* Clear our per-thread usermode flag */
lui t0, %tprel_hi(is_user_mode)
add t0, t0, tp, %tprel_add(is_user_mode)
@ -297,7 +289,7 @@ is_fp: /* Process the FP trap and quickly return from exception */
mv a0, sp
tail z_riscv_fpu_trap
2:
no_fp: /* increment arch_current_thread()->arch.exception_depth */
no_fp: /* increment _current->arch.exception_depth */
lr t0, ___cpu_t_current_OFFSET(s0)
lb t1, _thread_offset_to_exception_depth(t0)
add t1, t1, 1
@ -356,7 +348,7 @@ no_fp: /* increment arch_current_thread()->arch.exception_depth */
li t1, RISCV_EXC_ECALLU
beq t0, t1, is_user_syscall
#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
#ifdef CONFIG_PMP_STACK_GUARD
/*
* Determine if we come from user space. If so, reconfigure the PMP for
* kernel mode stack guard.
@ -397,7 +389,7 @@ is_kernel_syscall:
addi t0, t0, 4
sr t0, __struct_arch_esf_mepc_OFFSET(sp)
#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
#ifdef CONFIG_PMP_STACK_GUARD
/* Re-activate PMP for m-mode */
li t1, MSTATUS_MPP
csrc mstatus, t1
@ -508,7 +500,7 @@ do_irq_offload:
#ifdef CONFIG_USERSPACE
is_user_syscall:
#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
#ifdef CONFIG_PMP_STACK_GUARD
/*
* We came from userspace and need to reconfigure the
* PMP for kernel mode stack guard.
@ -578,7 +570,7 @@ valid_syscall_id:
is_interrupt:
#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
#ifdef CONFIG_PMP_STACK_GUARD
#ifdef CONFIG_USERSPACE
/*
* If we came from userspace then we need to reconfigure the
@ -724,7 +716,7 @@ no_reschedule:
mv a0, sp
call z_riscv_fpu_exit_exc
/* decrement arch_current_thread()->arch.exception_depth */
/* decrement _current->arch.exception_depth */
lr t0, ___cpu_t_current_OFFSET(s0)
lb t1, _thread_offset_to_exception_depth(t0)
add t1, t1, -1
@ -748,7 +740,7 @@ fp_trap_exit:
and t0, t2, t1
bnez t0, 1f
#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
#ifdef CONFIG_PMP_STACK_GUARD
/* Remove kernel stack guard and Reconfigure PMP for user mode */
lr a0, ___cpu_t_current_OFFSET(s0)
call z_riscv_pmp_usermode_enable

View file

@ -348,8 +348,8 @@ static unsigned int global_pmp_end_index;
*/
void z_riscv_pmp_init(void)
{
unsigned long pmp_addr[CONFIG_PMP_SLOTS];
unsigned long pmp_cfg[CONFIG_PMP_SLOTS / PMPCFG_STRIDE];
unsigned long pmp_addr[5];
unsigned long pmp_cfg[2];
unsigned int index = 0;
/* The read-only area is always there for every mode */
@ -370,7 +370,6 @@ void z_riscv_pmp_init(void)
#endif
#ifdef CONFIG_PMP_STACK_GUARD
#ifdef CONFIG_MULTITHREADING
/*
* Set the stack guard for this CPU's IRQ stack by making the bottom
* addresses inaccessible. This will never change so we do it here
@ -397,21 +396,6 @@ void z_riscv_pmp_init(void)
/* And forget about that last entry as we won't need it later */
index--;
#else
/* Without multithreading setup stack guards for IRQ and main stacks */
set_pmp_entry(&index, PMP_NONE | PMP_L,
(uintptr_t)z_interrupt_stacks,
Z_RISCV_STACK_GUARD_SIZE,
pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
set_pmp_entry(&index, PMP_NONE | PMP_L,
(uintptr_t)z_main_stack,
Z_RISCV_STACK_GUARD_SIZE,
pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
/* Write those entries to PMP regs. */
write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
#endif /* CONFIG_MULTITHREADING */
#else
/* Write those entries to PMP regs. */
write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
@ -435,6 +419,7 @@ void z_riscv_pmp_init(void)
}
#endif
__ASSERT(index <= PMPCFG_STRIDE, "provision for one global word only");
global_pmp_cfg[0] = pmp_cfg[0];
global_pmp_last_addr = pmp_addr[index - 1];
global_pmp_end_index = index;
@ -469,7 +454,6 @@ static inline unsigned int z_riscv_pmp_thread_init(unsigned long *pmp_addr,
#ifdef CONFIG_PMP_STACK_GUARD
#ifdef CONFIG_MULTITHREADING
/**
* @brief Prepare the PMP stackguard content for given thread.
*
@ -527,8 +511,6 @@ void z_riscv_pmp_stackguard_enable(struct k_thread *thread)
csr_set(mstatus, MSTATUS_MPRV);
}
#endif /* CONFIG_MULTITHREADING */
/**
* @brief Remove PMP stackguard content to actual PMP registers
*/
@ -752,8 +734,8 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
int ret = -1;
/* Check if this is on the stack */
if (IS_WITHIN(start, size, arch_current_thread()->stack_info.start,
arch_current_thread()->stack_info.size)) {
if (IS_WITHIN(start, size,
_current->stack_info.start, _current->stack_info.size)) {
return 0;
}
@ -768,7 +750,7 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
}
/* Look for a matching partition in our memory domain */
struct k_mem_domain *domain = arch_current_thread()->mem_domain_info.mem_domain;
struct k_mem_domain *domain = _current->mem_domain_info.mem_domain;
int p_idx, remaining_partitions;
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);

View file

@ -7,11 +7,11 @@
#include <zephyr/init.h>
#include <zephyr/kernel.h>
#include <ksched.h>
#include <ipi.h>
#include <zephyr/irq.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/arch/riscv/irq.h>
#include <zephyr/drivers/pm_cpu_ops.h>
#include <zephyr/platform/hooks.h>
volatile struct {
arch_cpustart_t fn;
@ -79,8 +79,101 @@ void arch_secondary_cpu_init(int hartid)
/* Enable on secondary cores so that they can respond to PLIC */
irq_enable(RISCV_IRQ_MEXT);
#endif /* CONFIG_PLIC_IRQ_AFFINITY */
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
riscv_cpu_init[cpu_num].fn(riscv_cpu_init[cpu_num].arg);
}
#ifdef CONFIG_SMP
#define MSIP_BASE 0x2000000UL
#define MSIP(hartid) ((volatile uint32_t *)MSIP_BASE)[hartid]
static atomic_val_t cpu_pending_ipi[CONFIG_MP_MAX_NUM_CPUS];
#define IPI_SCHED 0
#define IPI_FPU_FLUSH 1
void arch_sched_directed_ipi(uint32_t cpu_bitmap)
{
unsigned int key = arch_irq_lock();
unsigned int id = _current_cpu->id;
unsigned int num_cpus = arch_num_cpus();
for (unsigned int i = 0; i < num_cpus; i++) {
if ((i != id) && _kernel.cpus[i].arch.online &&
((cpu_bitmap & BIT(i)) != 0)) {
atomic_set_bit(&cpu_pending_ipi[i], IPI_SCHED);
MSIP(_kernel.cpus[i].arch.hartid) = 1;
}
}
arch_irq_unlock(key);
}
void arch_sched_broadcast_ipi(void)
{
arch_sched_directed_ipi(IPI_ALL_CPUS_MASK);
}
#ifdef CONFIG_FPU_SHARING
void arch_flush_fpu_ipi(unsigned int cpu)
{
atomic_set_bit(&cpu_pending_ipi[cpu], IPI_FPU_FLUSH);
MSIP(_kernel.cpus[cpu].arch.hartid) = 1;
}
#endif
static void sched_ipi_handler(const void *unused)
{
ARG_UNUSED(unused);
MSIP(csr_read(mhartid)) = 0;
atomic_val_t pending_ipi = atomic_clear(&cpu_pending_ipi[_current_cpu->id]);
if (pending_ipi & ATOMIC_MASK(IPI_SCHED)) {
z_sched_ipi();
}
#ifdef CONFIG_FPU_SHARING
if (pending_ipi & ATOMIC_MASK(IPI_FPU_FLUSH)) {
/* disable IRQs */
csr_clear(mstatus, MSTATUS_IEN);
/* perform the flush */
arch_flush_local_fpu();
/*
* No need to re-enable IRQs here as long as
* this remains the last case.
*/
}
#endif
}
#ifdef CONFIG_FPU_SHARING
/*
* Make sure there is no pending FPU flush request for this CPU while
* waiting for a contended spinlock to become available. This prevents
* a deadlock when the lock we need is already taken by another CPU
* that also wants its FPU content to be reinstated while such content
* is still live in this CPU's FPU.
*/
void arch_spin_relax(void)
{
atomic_val_t *pending_ipi = &cpu_pending_ipi[_current_cpu->id];
if (atomic_test_and_clear_bit(pending_ipi, IPI_FPU_FLUSH)) {
/*
* We may not be in IRQ context here hence cannot use
* arch_flush_local_fpu() directly.
*/
arch_float_disable(_current_cpu->arch.fpu_owner);
}
}
#endif
int arch_smp_init(void)
{
IRQ_CONNECT(RISCV_IRQ_MSOFT, 0, sched_ipi_handler, NULL, 0);
irq_enable(RISCV_IRQ_MSOFT);
return 0;
}
#endif /* CONFIG_SMP */

View file

@ -108,7 +108,7 @@ static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k
/* Unwind the provided exception stack frame */
fp = esf->s0;
ra = esf->mepc;
} else if ((csf == NULL) || (csf == &arch_current_thread()->callee_saved)) {
} else if ((csf == NULL) || (csf == &_current->callee_saved)) {
/* Unwind current thread (default case when nothing is provided ) */
fp = (uintptr_t)__builtin_frame_address(0);
ra = (uintptr_t)walk_stackframe;
@ -181,7 +181,7 @@ static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k
/* Unwind the provided exception stack frame */
sp = z_riscv_get_sp_before_exc(esf);
ra = esf->mepc;
} else if ((csf == NULL) || (csf == &arch_current_thread()->callee_saved)) {
} else if ((csf == NULL) || (csf == &_current->callee_saved)) {
/* Unwind current thread (default case when nothing is provided ) */
sp = current_stack_pointer;
ra = (uintptr_t)walk_stackframe;
@ -215,10 +215,8 @@ void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
const struct k_thread *thread, const struct arch_esf *esf)
{
if (thread == NULL) {
/* In case `thread` is NULL, default that to `arch_current_thread()`
* and try to unwind
*/
thread = arch_current_thread();
/* In case `thread` is NULL, default that to `_current` and try to unwind */
thread = _current;
}
walk_stackframe((riscv_stacktrace_cb)callback_fn, cookie, thread, esf, in_stack_bound,
@ -282,8 +280,7 @@ void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf
int i = 0;
LOG_ERR("call trace:");
walk_stackframe(print_trace_address, &i, arch_current_thread(), esf, in_fatal_stack_bound,
csf);
walk_stackframe(print_trace_address, &i, _current, esf, in_fatal_stack_bound, csf);
LOG_ERR("");
}
#endif /* CONFIG_EXCEPTION_STACK_TRACE */

View file

@ -132,29 +132,28 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
/* Set up privileged stack */
#ifdef CONFIG_GEN_PRIV_STACKS
arch_current_thread()->arch.priv_stack_start =
(unsigned long)z_priv_stack_find(arch_current_thread()->stack_obj);
_current->arch.priv_stack_start =
(unsigned long)z_priv_stack_find(_current->stack_obj);
/* remove the stack guard from the main stack */
arch_current_thread()->stack_info.start -= K_THREAD_STACK_RESERVED;
arch_current_thread()->stack_info.size += K_THREAD_STACK_RESERVED;
_current->stack_info.start -= K_THREAD_STACK_RESERVED;
_current->stack_info.size += K_THREAD_STACK_RESERVED;
#else
arch_current_thread()->arch.priv_stack_start =
(unsigned long)arch_current_thread()->stack_obj;
_current->arch.priv_stack_start = (unsigned long)_current->stack_obj;
#endif /* CONFIG_GEN_PRIV_STACKS */
top_of_priv_stack = Z_STACK_PTR_ALIGN(arch_current_thread()->arch.priv_stack_start +
top_of_priv_stack = Z_STACK_PTR_ALIGN(_current->arch.priv_stack_start +
K_KERNEL_STACK_RESERVED +
CONFIG_PRIVILEGED_STACK_SIZE);
#ifdef CONFIG_INIT_STACKS
/* Initialize the privileged stack */
(void)memset((void *)arch_current_thread()->arch.priv_stack_start, 0xaa,
(void)memset((void *)_current->arch.priv_stack_start, 0xaa,
Z_STACK_PTR_ALIGN(K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE));
#endif /* CONFIG_INIT_STACKS */
top_of_user_stack = Z_STACK_PTR_ALIGN(
arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
_current->stack_info.start +
_current->stack_info.size -
_current->stack_info.delta);
status = csr_read(mstatus);
@ -170,12 +169,12 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
#ifdef CONFIG_PMP_STACK_GUARD
/* reconfigure as the kernel mode stack will be different */
z_riscv_pmp_stackguard_prepare(arch_current_thread());
z_riscv_pmp_stackguard_prepare(_current);
#endif
/* Set up Physical Memory Protection */
z_riscv_pmp_usermode_prepare(arch_current_thread());
z_riscv_pmp_usermode_enable(arch_current_thread());
z_riscv_pmp_usermode_prepare(_current);
z_riscv_pmp_usermode_enable(_current);
/* preserve stack pointer for next exception entry */
arch_curr_cpu()->arch.user_exc_sp = top_of_priv_stack;

View file

@ -18,8 +18,6 @@
#include <kernel_arch_data.h>
#include <pmp.h>
#include <zephyr/platform/hooks.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -55,9 +53,6 @@ static ALWAYS_INLINE void arch_kernel_init(void)
#ifdef CONFIG_RISCV_PMP
z_riscv_pmp_init();
#endif
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
}
static ALWAYS_INLINE void

View file

@ -26,12 +26,6 @@ config NUM_IRQS
int
default 32
config SPARC_SVT
bool "Single-vector trapping"
help
Use Single-vector trapping (SVT). Defined by SPARC-V8 Embedded (V8E)
Architecture Specification and available in some LEON processors.
config SPARC_CASA
bool "CASA instructions"
help

View file

@ -13,9 +13,8 @@ zephyr_library_sources(
thread.c
window_trap.S
sw_trap_set_pil.S
trap_table_mvt.S
)
zephyr_library_sources_ifdef(CONFIG_SPARC_SVT trap_table_svt.S)
zephyr_library_sources_ifndef(CONFIG_SPARC_SVT trap_table_mvt.S)
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)
zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE tls.c)

View file

@ -12,18 +12,6 @@
GTEXT(__sparc_trap_reset)
SECTION_FUNC(TEXT, __sparc_trap_reset)
#ifdef CONFIG_SPARC_SVT
#ifdef CONFIG_SOC_SPARC_LEON
/* On LEON, enable single vector trapping by setting ASR17.SV. */
rd %asr17, %g1
set (1<<13), %g2
or %g1, %g2, %g1
wr %g1, %asr17
#else
#error "Don't know how to enable SVT on this SOC"
#endif
#endif
set __sparc_trap_table, %g1
wr %g1, %tbr
wr 2, %wim

View file

@ -61,7 +61,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
{
*old_thread = arch_current_thread();
*old_thread = _current;
return z_get_next_switch_handle(*old_thread);
}

View file

@ -1,138 +0,0 @@
/*
* Copyright (c) 2023 Frontgrade Gaisler AB
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* This file contains the trap entry for SPARC operating with
* single-vector trap model, defined in SPARC V8E. The processor
* redirects execution to a single entry on any trap event. From
* there, two levels of look-up tables are used to find the trap
* handler.
*
* - Execution time is constant.
* - Condition flags are not modified.
* - Provides handler with PSR in l0, TBR in l6
* - This SVT implementation is less than 400 bytes long. (An MVT
* table is always 4096 bytes long.)
*
* See trap_table_mvt.S for information about SPARC trap types.
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/sparc/sparc.h>
#ifdef CONFIG_IRQ_OFFLOAD
#define IRQ_OFFLOAD_HANDLER __sparc_trap_irq_offload
#else
#define IRQ_OFFLOAD_HANDLER __sparc_trap_fault
#endif
GTEXT(__sparc_trap_table)
GTEXT(__start)
SECTION_SUBSEC_FUNC(TEXT, traptable, __sparc_trap_table)
__start:
rd %psr, %l0
mov %tbr, %l6
and %l6, 0xf00, %l7
srl %l7, 6, %l7
set __sparc_trap_table_svt_level0, %l4
ld [%l4 + %l7], %l4
and %l6, 0x0f0, %l7
srl %l7, 2, %l7
ld [%l4 + %l7], %l4
srl %l6, 4, %l3
jmp %l4
and %l3, 0xf, %l3 /* Interrupt level */
__sparc_trap_svt_in_trap:
ta 0x00
nop
SECTION_VAR(RODATA, __sparc_trap_table_svt_tables)
.align 4
__sparc_trap_table_svt_level0:
.word __sparc_trap_table_svt_00
.word __sparc_trap_table_svt_10
.word __sparc_trap_table_svt_allbad
.word __sparc_trap_table_svt_allbad
.word __sparc_trap_table_svt_allbad
.word __sparc_trap_table_svt_allbad
.word __sparc_trap_table_svt_allbad
.word __sparc_trap_table_svt_allbad
.word __sparc_trap_table_svt_80
.word __sparc_trap_table_svt_allbad
.word __sparc_trap_table_svt_allbad
.word __sparc_trap_table_svt_allbad
.word __sparc_trap_table_svt_allbad
.word __sparc_trap_table_svt_allbad
.word __sparc_trap_table_svt_allbad
.word __sparc_trap_table_svt_allbad
__sparc_trap_table_svt_00:
.word __sparc_trap_reset
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_window_overflow
.word __sparc_trap_window_underflow
__sparc_trap_table_svt_allbad:
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
__sparc_trap_table_svt_10:
.word __sparc_trap_fault
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
.word __sparc_trap_interrupt
__sparc_trap_table_svt_80:
.word __sparc_trap_svt_in_trap
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_flush_windows
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_sw_set_pil
.word __sparc_trap_fault
.word __sparc_trap_fault
.word __sparc_trap_fault
.word IRQ_OFFLOAD_HANDLER
.word __sparc_trap_fault
.word __sparc_trap_except_reason

View file

@ -17,8 +17,6 @@
#include <kernel_arch_data.h>
#include <zephyr/platform/hooks.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -26,9 +24,6 @@ extern "C" {
#ifndef _ASMLANGUAGE
static ALWAYS_INLINE void arch_kernel_init(void)
{
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
}
void z_sparc_context_switch(struct k_thread *newt, struct k_thread *oldt);

View file

@ -49,7 +49,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
{
uintptr_t start, end;
if (arch_current_thread() == NULL || arch_is_in_isr()) {
if (_current == NULL || arch_is_in_isr()) {
/* We were servicing an interrupt or in early boot environment
* and are supposed to be on the interrupt stack */
int cpu_id;
@ -64,7 +64,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
end = start + CONFIG_ISR_STACK_SIZE;
#ifdef CONFIG_USERSPACE
} else if ((cs & 0x3U) == 0U &&
(arch_current_thread()->base.user_options & K_USER) != 0) {
(_current->base.user_options & K_USER) != 0) {
/* The low two bits of the CS register is the privilege
* level. It will be 0 in supervisor mode and 3 in user mode
* corresponding to ring 0 / ring 3.
@ -72,14 +72,14 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
* If we get here, we must have been doing a syscall, check
* privilege elevation stack bounds
*/
start = arch_current_thread()->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE;
end = arch_current_thread()->stack_info.start;
start = _current->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE;
end = _current->stack_info.start;
#endif /* CONFIG_USERSPACE */
} else {
/* Normal thread operation, check its stack buffer */
start = arch_current_thread()->stack_info.start;
end = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size);
start = _current->stack_info.start;
end = Z_STACK_PTR_ALIGN(_current->stack_info.start +
_current->stack_info.size);
}
return (addr <= start) || (addr + size > end);
@ -97,7 +97,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
__pinned_func
bool z_x86_check_guard_page(uintptr_t addr)
{
struct k_thread *thread = arch_current_thread();
struct k_thread *thread = _current;
uintptr_t start, end;
/* Front guard size - before thread stack area */
@ -233,7 +233,7 @@ static inline uintptr_t get_cr3(const struct arch_esf *esf)
* switch when we took the exception via z_x86_trampoline_to_kernel
*/
if ((esf->cs & 0x3) != 0) {
return arch_current_thread()->arch.ptables;
return _current->arch.ptables;
}
#else
ARG_UNUSED(esf);

View file

@ -207,7 +207,7 @@ void z_float_enable(struct k_thread *thread, unsigned int options)
/* Associate the new FP context with the specified thread */
if (thread == arch_current_thread()) {
if (thread == _current) {
/*
* When enabling FP support for the current thread, just claim
* ownership of the FPU and leave CR0[TS] unset.
@ -222,7 +222,7 @@ void z_float_enable(struct k_thread *thread, unsigned int options)
* of the FPU to them (unless we need it ourselves).
*/
if ((arch_current_thread()->base.user_options & _FP_USER_MASK) == 0) {
if ((_current->base.user_options & _FP_USER_MASK) == 0) {
/*
* We are not FP-capable, so mark FPU as owned by the
* thread we've just enabled FP support for, then
@ -278,7 +278,7 @@ int z_float_disable(struct k_thread *thread)
thread->base.user_options &= ~_FP_USER_MASK;
if (thread == arch_current_thread()) {
if (thread == _current) {
z_FpAccessDisable();
_kernel.current_fp = (struct k_thread *)0;
} else {
@ -314,7 +314,7 @@ void _FpNotAvailableExcHandler(struct arch_esf *pEsf)
/* Enable highest level of FP capability configured into the kernel */
k_float_enable(arch_current_thread(), _FP_USER_MASK);
k_float_enable(_current, _FP_USER_MASK);
}
_EXCEPTION_CONNECT_NOCODE(_FpNotAvailableExcHandler,
IV_DEVICE_NOT_AVAILABLE, 0);

View file

@ -132,9 +132,9 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
/* Transition will reset stack pointer to initial, discarding
* any old context since this is a one-way operation
*/
stack_end = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start +
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
stack_end = Z_STACK_PTR_ALIGN(_current->stack_info.start +
_current->stack_info.size -
_current->stack_info.delta);
#ifdef CONFIG_X86_64
/* x86_64 SysV ABI requires 16 byte stack alignment, which
@ -156,15 +156,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
* Note that this also needs to page in the reserved
* portion of the stack (which is usually the page just
* before the beginning of stack in
* arch_current_thread()->stack_info.start.
* _current->stack_info.start.
*/
uintptr_t stack_start;
size_t stack_size;
uintptr_t stack_aligned_start;
size_t stack_aligned_size;
stack_start = POINTER_TO_UINT(arch_current_thread()->stack_obj);
stack_size = K_THREAD_STACK_LEN(arch_current_thread()->stack_info.size);
stack_start = POINTER_TO_UINT(_current->stack_obj);
stack_size = K_THREAD_STACK_LEN(_current->stack_info.size);
#if defined(CONFIG_X86_STACK_PROTECTION)
/* With hardware stack protection, the first page of stack
@ -182,7 +182,7 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
#endif
z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end,
arch_current_thread()->stack_info.start);
_current->stack_info.start);
CODE_UNREACHABLE;
}

View file

@ -421,7 +421,7 @@ void z_x86_tlb_ipi(const void *arg)
/* We might have been moved to another memory domain, so always invoke
* z_x86_thread_page_tables_get() instead of using current CR3 value.
*/
ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(arch_current_thread()));
ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(_current));
#endif
/*
* In the future, we can consider making this smarter, such as
@ -1440,7 +1440,7 @@ static inline void bcb_fence(void)
__pinned_func
int arch_buffer_validate(const void *addr, size_t size, int write)
{
pentry_t *ptables = z_x86_thread_page_tables_get(arch_current_thread());
pentry_t *ptables = z_x86_thread_page_tables_get(_current);
uint8_t *virt;
size_t aligned_size;
int ret = 0;
@ -1958,7 +1958,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
* IPI takes care of this if the thread is currently running on some
* other CPU.
*/
if (thread == arch_current_thread() && thread->arch.ptables != z_x86_cr3_get()) {
if (thread == _current && thread->arch.ptables != z_x86_cr3_get()) {
z_x86_cr3_set(thread->arch.ptables);
}
#endif /* CONFIG_X86_KPTI */
@ -1980,9 +1980,8 @@ void z_x86_current_stack_perms(void)
/* Clear any previous context in the stack buffer to prevent
* unintentional data leakage.
*/
(void)memset((void *)arch_current_thread()->stack_info.start, 0xAA,
arch_current_thread()->stack_info.size -
arch_current_thread()->stack_info.delta);
(void)memset((void *)_current->stack_info.start, 0xAA,
_current->stack_info.size - _current->stack_info.delta);
/* Only now is it safe to grant access to the stack buffer since any
* previous context has been erased.
@ -1992,13 +1991,13 @@ void z_x86_current_stack_perms(void)
* This will grant stack and memory domain access if it wasn't set
* already (in which case this returns very quickly).
*/
z_x86_swap_update_common_page_table(arch_current_thread());
z_x86_swap_update_common_page_table(_current);
#else
/* Memory domain access is already programmed into the page tables.
* Need to enable access to this new user thread's stack buffer in
* its domain-specific page tables.
*/
set_stack_perms(arch_current_thread(), z_x86_thread_page_tables_get(arch_current_thread()));
set_stack_perms(_current, z_x86_thread_page_tables_get(_current));
#endif
}
#endif /* CONFIG_USERSPACE */

View file

@ -14,17 +14,13 @@
#include <stddef.h> /* For size_t */
#include <zephyr/platform/hooks.h>
#ifdef __cplusplus
extern "C" {
#endif
static inline void arch_kernel_init(void)
{
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
/* No-op on this arch */
}
static ALWAYS_INLINE void

View file

@ -8,8 +8,6 @@
#include <zephyr/kernel_structs.h>
#include <zephyr/platform/hooks.h>
#ifndef _ASMLANGUAGE
extern void z_x86_switch(void *switch_to, void **switched_from);
@ -29,9 +27,7 @@ extern void z_x86_ipi_setup(void);
static inline void arch_kernel_init(void)
{
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
/* nothing */;
}
FUNC_NORETURN void z_x86_cpu_init(struct x86_cpuboot *cpuboot);

View file

@ -10,18 +10,8 @@
#include <zephyr/llext/loader.h>
#include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(llext, CONFIG_LLEXT_LOG_LEVEL);
LOG_MODULE_DECLARE(llext);
/*
* ELF relocation tables on Xtensa contain relocations of different types. They
* specify how the relocation should be performed. Which relocations are used
* depends on the type of the ELF object (e.g. shared or partially linked
* object), structure of the object (single or multiple source files), compiler
* flags used (e.g. -fPIC), etc. Also not all relocation table entries should be
* acted upon. Some of them describe relocations that have already been
* resolved by the linker. We have to distinguish them from actionable
* relocations and only need to handle the latter ones.
*/
#define R_XTENSA_NONE 0
#define R_XTENSA_32 1
#define R_XTENSA_RTLD 2
@ -29,126 +19,71 @@ LOG_MODULE_DECLARE(llext, CONFIG_LLEXT_LOG_LEVEL);
#define R_XTENSA_JMP_SLOT 4
#define R_XTENSA_RELATIVE 5
#define R_XTENSA_PLT 6
#define R_XTENSA_ASM_EXPAND 11
#define R_XTENSA_SLOT0_OP 20
static void xtensa_elf_relocate(struct llext_loader *ldr, struct llext *ext,
const elf_rela_t *rel, uint8_t *text, uintptr_t addr,
uint8_t *loc, int type, uint32_t stb)
{
elf_word *got_entry = (elf_word *)loc;
switch (type) {
case R_XTENSA_RELATIVE:
/* Relocate a local symbol: Xtensa specific. Seems to only be used with PIC */
*got_entry += (uintptr_t)text - addr;
break;
case R_XTENSA_GLOB_DAT:
case R_XTENSA_JMP_SLOT:
if (stb == STB_GLOBAL) {
*got_entry = addr;
}
break;
case R_XTENSA_32:
/* Used for both LOCAL and GLOBAL bindings */
*got_entry += addr;
break;
case R_XTENSA_SLOT0_OP:
/* Apparently only actionable with LOCAL bindings */
;
elf_sym_t rsym;
int ret = llext_seek(ldr, ldr->sects[LLEXT_MEM_SYMTAB].sh_offset +
ELF_R_SYM(rel->r_info) * sizeof(elf_sym_t));
if (!ret) {
ret = llext_read(ldr, &rsym, sizeof(elf_sym_t));
}
if (ret) {
LOG_ERR("Failed to read a symbol table entry, LLEXT linking might fail.");
return;
}
/*
* So far in all observed use-cases
* llext_loaded_sect_ptr(ldr, ext, rsym.st_shndx) was already
* available as the "addr" argument of this function, supplied
* by arch_elf_relocate_local() from its non-STT_SECTION branch.
*/
uintptr_t link_addr = (uintptr_t)llext_loaded_sect_ptr(ldr, ext, rsym.st_shndx) +
rsym.st_value + rel->r_addend;
ssize_t value = (link_addr - (((uintptr_t)got_entry + 3) & ~3)) >> 2;
/* Check the opcode */
if ((loc[0] & 0xf) == 1 && !loc[1] && !loc[2]) {
/* L32R: low nibble is 1 */
loc[1] = value & 0xff;
loc[2] = (value >> 8) & 0xff;
} else if ((loc[0] & 0xf) == 5 && !(loc[0] & 0xc0) && !loc[1] && !loc[2]) {
/* CALLn: low nibble is 5 */
loc[0] = (loc[0] & 0x3f) | ((value << 6) & 0xc0);
loc[1] = (value >> 2) & 0xff;
loc[2] = (value >> 10) & 0xff;
} else {
LOG_DBG("%p: unhandled OPC or no relocation %02x%02x%02x inf %#x offs %#x",
(void *)loc, loc[2], loc[1], loc[0],
rel->r_info, rel->r_offset);
break;
}
break;
case R_XTENSA_ASM_EXPAND:
/* Nothing to do */
break;
default:
LOG_DBG("Unsupported relocation type %u", type);
return;
}
LOG_DBG("Applied relocation to %#x type %u at %p",
*(uint32_t *)((uintptr_t)got_entry & ~3), type, (void *)got_entry);
}
/**
* @brief Architecture specific function for STB_LOCAL ELF relocations
* @brief Architecture specific function for relocating shared elf
*
* Elf files contain a series of relocations described in multiple sections.
* These relocation instructions are architecture specific and each architecture
* supporting modules must implement this.
*/
void arch_elf_relocate_local(struct llext_loader *ldr, struct llext *ext, const elf_rela_t *rel,
const elf_sym_t *sym, size_t got_offset,
const struct llext_load_param *ldr_parm)
void arch_elf_relocate_local(struct llext_loader *ldr, struct llext *ext,
const elf_rela_t *rel, const elf_sym_t *sym, size_t got_offset)
{
uint8_t *text = ext->mem[LLEXT_MEM_TEXT];
uint8_t *loc = text + got_offset;
int type = ELF32_R_TYPE(rel->r_info);
elf_word *got_entry = (elf_word *)(text + got_offset);
uintptr_t sh_addr;
if (ELF_ST_TYPE(sym->st_info) == STT_SECTION) {
elf_shdr_t *shdr = llext_peek(ldr, ldr->hdr.e_shoff +
sym->st_shndx * ldr->hdr.e_shentsize);
sh_addr = shdr->sh_addr &&
(!ldr_parm->section_detached || !ldr_parm->section_detached(shdr)) ?
shdr->sh_addr : (uintptr_t)llext_peek(ldr, shdr->sh_offset);
sh_addr = shdr->sh_addr ? : (uintptr_t)llext_peek(ldr, shdr->sh_offset);
} else {
sh_addr = ldr->sects[LLEXT_MEM_TEXT].sh_addr;
}
xtensa_elf_relocate(ldr, ext, rel, text, sh_addr, loc, type, ELF_ST_BIND(sym->st_info));
}
switch (type) {
case R_XTENSA_RELATIVE:
/* Relocate a local symbol: Xtensa specific */
*got_entry += (uintptr_t)text - sh_addr;
break;
case R_XTENSA_32:
*got_entry += sh_addr;
break;
case R_XTENSA_SLOT0_OP:
;
uint8_t *opc = (uint8_t *)got_entry;
/**
* @brief Architecture specific function for STB_GLOBAL ELF relocations
*/
void arch_elf_relocate_global(struct llext_loader *ldr, struct llext *ext, const elf_rela_t *rel,
const elf_sym_t *sym, size_t got_offset, const void *link_addr)
{
uint8_t *text = ext->mem[LLEXT_MEM_TEXT];
elf_word *got_entry = (elf_word *)(text + got_offset);
int type = ELF32_R_TYPE(rel->r_info);
/* Check the opcode: is this an L32R? And does it have to be relocated? */
if ((opc[0] & 0xf) != 1 || opc[1] || opc[2])
break;
/* For global relocations we expect the initial value for R_XTENSA_RELATIVE to be zero */
if (type == R_XTENSA_RELATIVE && *got_entry) {
LOG_WRN("global: non-zero relative value %#x", *got_entry);
elf_sym_t rsym;
int ret = llext_seek(ldr, ldr->sects[LLEXT_MEM_SYMTAB].sh_offset +
ELF_R_SYM(rel->r_info) * sizeof(elf_sym_t));
if (!ret) {
ret = llext_read(ldr, &rsym, sizeof(elf_sym_t));
}
if (ret)
return;
uintptr_t link_addr = (uintptr_t)llext_loaded_sect_ptr(ldr, ext, rsym.st_shndx) +
rsym.st_value + rel->r_addend;
ssize_t value = (link_addr - (((uintptr_t)got_entry + 3) & ~3)) >> 2;
opc[1] = value & 0xff;
opc[2] = (value >> 8) & 0xff;
break;
default:
LOG_DBG("unsupported relocation type %u", type);
return;
}
xtensa_elf_relocate(ldr, ext, rel, text, (uintptr_t)link_addr, (uint8_t *)got_entry, type,
ELF_ST_BIND(sym->st_info));
LOG_DBG("relocation to %#x type %u at %p", *got_entry, type, (void *)got_entry);
}

View file

@ -140,7 +140,7 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf)
#ifdef CONFIG_USERSPACE
void z_impl_xtensa_user_fault(unsigned int reason)
{
if ((arch_current_thread()->base.user_options & K_USER) != 0) {
if ((_current->base.user_options & K_USER) != 0) {
if ((reason != K_ERR_KERNEL_OOPS) &&
(reason != K_ERR_STACK_CHK_FAIL)) {
reason = K_ERR_KERNEL_OOPS;

View file

@ -1086,7 +1086,7 @@ static int mem_buffer_validate(const void *addr, size_t size, int write, int rin
int ret = 0;
uint8_t *virt;
size_t aligned_size;
const struct k_thread *thread = arch_current_thread();
const struct k_thread *thread = _current;
uint32_t *ptables = thread_page_tables_get(thread);
/* addr/size arbitrary, fix this up into an aligned region */

View file

@ -156,7 +156,7 @@ int arch_float_enable(struct k_thread *thread, unsigned int options)
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
struct k_thread *current = arch_current_thread();
struct k_thread *current = _current;
size_t stack_end;
/* Transition will reset stack pointer to initial, discarding

View file

@ -34,7 +34,7 @@ extern char xtensa_arch_kernel_oops_epc[];
bool xtensa_is_outside_stack_bounds(uintptr_t addr, size_t sz, uint32_t ps)
{
uintptr_t start, end;
struct k_thread *thread = arch_current_thread();
struct k_thread *thread = _current;
bool was_in_isr, invalid;
/* Without userspace, there is no privileged stack so the thread stack

View file

@ -14,7 +14,6 @@
#include <kernel_internal.h>
#include <string.h>
#include <zephyr/cache.h>
#include <zephyr/platform/hooks.h>
#include <zephyr/zsr.h>
#ifdef __cplusplus
@ -26,9 +25,7 @@ K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
static ALWAYS_INLINE void arch_kernel_init(void)
{
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
soc_per_core_init_hook();
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
}
void xtensa_switch(void *switch_to, void **switched_from);

View file

@ -9,7 +9,6 @@
#include <espressif/esp32c3/esp32c3_fx4.dtsi>
#include "esp32c3_042_oled-pinctrl.dtsi"
#include <espressif/partitions_0x0_default.dtsi>
/ {
model = "01space ESP32C3 0.42 OLED";
@ -105,3 +104,37 @@
&esp32_bt_hci {
status = "okay";
};
&flash0 {
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
#size-cells = <1>;
boot_partition: partition@0 {
label = "mcuboot";
reg = <0x00000000 0x0000F000>;
read-only;
};
slot0_partition: partition@10000 {
label = "image-0";
reg = <0x00010000 0x00100000>;
};
slot1_partition: partition@110000 {
label = "image-1";
reg = <0x00110000 0x00100000>;
};
scratch_partition: partition@210000 {
label = "image-scratch";
reg = <0x00210000 0x00040000>;
};
storage_partition: partition@250000 {
label = "storage";
reg = <0x00250000 0x00006000>;
};
};
};

View file

@ -14,6 +14,7 @@ testing:
- cmsis_rtos_v2
- net
- mpu
- tinycrypt
- crypto
- aes
- cmm

View file

@ -12,4 +12,7 @@ CONFIG_UART_INTERRUPT_DRIVEN=y
CONFIG_CONSOLE=y
CONFIG_UART_CONSOLE=y
# pinctrl
CONFIG_PINCTRL=y
CONFIG_XIP=y

View file

@ -46,7 +46,3 @@
status = "okay";
current-speed = <115200>;
};
&cpu {
compatible = "intel,x86_64";
};

View file

@ -1,5 +0,0 @@
# Copyright (c) 2024 Daikin Comfort Technologies North America, Inc.
# SPDX-License-Identifier: Apache-2.0
config BOARD_ADAFRUIT_FEATHER_M4_EXPRESS
select SOC_SAMD51J19A

View file

@ -1,51 +0,0 @@
/*
* Copyright (c) 2022, Gerson Fernando Budke <nandojve@gmail.com>
* Copyright (c) 2024 Daikin Comfort Technologies North America, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <dt-bindings/pinctrl/samd51j-pinctrl.h>
&pinctrl {
sercom1_spi_default: sercom1_spi_default {
group1 {
pinmux = <PB23C_SERCOM1_PAD3>,
<PA17C_SERCOM1_PAD1>,
<PB22C_SERCOM1_PAD2>;
};
};
sercom2_i2c_default: sercom3_i2c_default {
group1 {
pinmux = <PA12C_SERCOM2_PAD0>,
<PA13C_SERCOM2_PAD1>;
};
};
sercom5_uart_default: sercom5_uart_default {
group1 {
pinmux = <PB17C_SERCOM5_PAD1>,
<PB16C_SERCOM5_PAD0>;
};
};
pwm0_default: pwm0_default {
group1 {
pinmux = <PA22G_TCC0_WO2>;
};
};
pwm1_default: pwm1_default {
group1 {
pinmux = <PA18F_TCC1_WO2>,
<PA19F_TCC1_WO3>;
};
};
usb_dc_default: usb_dc_default {
group1 {
pinmux = <PA25H_USB_DP>,
<PA24H_USB_DM>;
};
};
};

View file

@ -1,111 +0,0 @@
/*
* Copyright (c) 2020 Google LLC.
* Copyright (c) 2024 Daikin Comfort Technologies North America, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/dts-v1/;
#include <atmel/samd5xx19.dtsi>
#include "adafruit_feather_m4_express-pinctrl.dtsi"
/ {
model = "Adafruit Feather M4 Express";
compatible = "adafruit,feather-m4-express";
chosen {
zephyr,console = &sercom5;
zephyr,shell-uart = &sercom5;
zephyr,sram = &sram0;
zephyr,flash = &flash0;
zephyr,code-partition = &code_partition;
};
/* These aliases are provided for compatibility with samples */
aliases {
led0 = &led0;
pwm-0 = &tcc0;
};
leds {
compatible = "gpio-leds";
led0: led_0 {
gpios = <&porta 23 0>;
label = "LED";
};
};
};
&cpu0 {
clock-frequency = <120000000>;
};
&sercom5 {
status = "okay";
compatible = "atmel,sam0-uart";
current-speed = <115200>;
rxpo = <1>;
txpo = <0>;
pinctrl-0 = <&sercom5_uart_default>;
pinctrl-names = "default";
};
&sercom1 {
status = "okay";
compatible = "atmel,sam0-spi";
dipo = <3>;
dopo = <0>;
#address-cells = <1>;
#size-cells = <0>;
pinctrl-0 = <&sercom1_spi_default>;
pinctrl-names = "default";
};
&tcc0 {
status = "okay";
compatible = "atmel,sam0-tcc-pwm";
prescaler = <8>;
#pwm-cells = <2>;
pinctrl-0 = <&pwm0_default>;
pinctrl-names = "default";
};
zephyr_udc0: &usb0 {
status = "okay";
pinctrl-0 = <&usb_dc_default>;
pinctrl-names = "default";
};
&dmac {
status = "okay";
};
&flash0 {
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
#size-cells = <1>;
boot_partition: partition@0 {
label = "uf2";
reg = <0x00000000 DT_SIZE_K(16)>;
read-only;
};
code_partition: partition@4000 {
label = "code";
reg = <0x4000 DT_SIZE_K(512-16-16)>;
read-only;
};
/*
* The final 16 KiB is reserved for the application.
* Storage partition will be used by FCB/LittleFS/NVS
* if enabled.
*/
storage_partition: partition@7c000 {
label = "storage";
reg = <0x7c000 DT_SIZE_K(16)>;
};
};
};

Some files were not shown because too many files have changed in this diff Show more