everywhere: fix typos

Fix a lot of typos

Signed-off-by: Nazar Kazakov <nazar.kazakov.work@gmail.com>
This commit is contained in:
Nazar Kazakov 2022-03-16 21:07:43 +00:00 committed by Anas Nashif
commit f483b1bc4c
757 changed files with 1284 additions and 1284 deletions

View file

@ -2,7 +2,7 @@ Title: Test to verify code fault handling in ISR execution context
and the behavior of irq_lock() and irq_unlock() when invoked
from User Mode. An additional test case verifies that null
pointer dereferencing attempts are detected and interpreted
as CPU fauls. Tests supported only on Cortex-M architecture.
as CPU faults. Tests supported only on Cortex-M architecture.
Description:

View file

@ -10,7 +10,7 @@ ARM Cortex-M targets. In detail the test verifies that
- PSPLIM is set to the main stack base (if applicable)
- FPU state is reset (if applicable)
- Interrupts are enabled when switching to main()
- Interrupts may be registerd and serviced
- Interrupts may be registered and serviced
- Activating PendSV triggers a Reserved Exception error
---------------------------------------------------------------------------

View file

@ -3,7 +3,7 @@ Title: Test to verify code execution from SRAM for XIP images (ARM Only)
Description:
This test verifies that we can define functions in SRAM (and
sucessfully execute them from SRAM) in ARM XIP images. It
successfully execute them from SRAM) in ARM XIP images. It
also verifies that the .ramfunc section is accessible by
nPRIV code when building with support for user mode
(CONFIG_USERSPACE=y). Only for ARM Cortex-M targets.

View file

@ -1,5 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2021 Nordic Semicondutor ASA
# Copyright (c) 2021 Nordic Semiconductor ASA
config ARM_STORE_EXC_RETURN
bool

View file

@ -8,7 +8,7 @@
#
# Note, will need 64K HEAP_MEM per CPUs added.
#
# This doesn't necessarely include the Interrupt Translation Table, which are
# This doesn't necessarily include the Interrupt Translation Table, which are
# 256bytes aligned tables, for reference a 32 ITEs table needs 256bytes.
#
# To permit allocating 256 ITT tables of 32 ITEs, 13x64K HEAP_MEM is needed

View file

@ -17,7 +17,7 @@ void test_psci_func(void)
ver = psci_version();
zassert_false((PSCI_VERSION_MAJOR(ver) == 0 &&
PSCI_VERSION_MINOR(ver) < 2),
"Wrong PSCI firware version");
"Wrong PSCI firmware version");
/* This should return -PSCI_RET_ALREADY_ON that is mapped to -EINVAL */
ret = pm_cpu_on(0, 0);

View file

@ -486,7 +486,7 @@ int interrupt_test(void)
* main thread calling functions that don't involve
* exceptions. By experiment, calling spill_fn with a
* first (depth) argument of 6 or 7 results in a
* shallow call tree that won't throw exepctions. At
* shallow call tree that won't throw exceptions. At
* least we're executing real code which depends on
* its register state and validating that interrupts
* don't hurt.

View file

@ -13,7 +13,7 @@
*/
const static uint32_t dlog_N = 18;
/* rbnode structure is embeddedable in user structure */
/* rbnode structure is embeddable in user structure */
struct container_node {
struct rbnode node;
int value;
@ -34,7 +34,7 @@ bool node_lessthan(struct rbnode *a, struct rbnode *b)
* @details
* Test Objective:
* - Define and initialize a rbtree, and test two features:
* first, rbtree node struct can be embeded in any user struct.
* first, rbtree node struct can be embedded in any user struct.
* last, rbtree can be walked though by some macro APIs.
*
* Testing techniques:

View file

@ -45,7 +45,7 @@ K_SEM_DEFINE(sync_sema, 0, 1);
/**
*
* thread_onne
* thread_one
*
* Fiber makes all the test preparations: registers the interrupt handler,
* gets the first timestamp and invokes the software interrupt.

View file

@ -205,7 +205,7 @@ int lifo_test(void)
k_fifo_put(&sync_fifo, element);
}
/* test get wait & put functions between co-op and premptive threads */
/* test get wait & put functions between co-op and preemptive threads */
fprintf(output_file, sz_test_case_fmt,
"LIFO #3");
fprintf(output_file, sz_description,

View file

@ -203,7 +203,7 @@ int fifo_test(void)
k_fifo_put(&sync_fifo, element);
}
/* test get wait & put functions between co-op and premptive threads */
/* test get wait & put functions between co-op and preemptive threads */
fprintf(output_file, sz_test_case_fmt,
"FIFO #3");
fprintf(output_file, sz_description,

View file

@ -190,7 +190,7 @@ int stack_test(void)
return_value += check_result(i, t);
/* test get wait & put stack functions across co-op and premptive
/* test get wait & put stack functions across co-op and preemptive
* threads
*/
fprintf(output_file, sz_test_case_fmt,

View file

@ -127,7 +127,7 @@ void main(void)
number_of_loops = NUMBER_OF_LOOPS;
/* The following code is needed to make the benchmakring run on
/* The following code is needed to make the benchmarking run on
* slower platforms.
*/
uint64_t time_stamp = sys_clock_tick_get();

View file

@ -84,7 +84,7 @@ static int chan_recv_cb(struct bt_l2cap_chan *l2cap_chan, struct net_buf *buf)
struct channel *chan = CONTAINER_OF(l2cap_chan, struct channel, le);
const uint32_t received_iterration = net_buf_pull_le32(buf);
LOG_DBG("received_iterration %i sdus_receied %i, chan_id: %d, data_length: %d",
LOG_DBG("received_iterration %i sdus_received %i, chan_id: %d, data_length: %d",
received_iterration, chan->sdus_received, chan->chan_id, buf->len);
if (!TEST_FLAG(unsequenced_data) && received_iterration != chan->sdus_received) {
FAIL("Received out of sequence data.");
@ -97,7 +97,7 @@ static int chan_recv_cb(struct bt_l2cap_chan *l2cap_chan, struct net_buf *buf)
FAIL("Payload received didn't match expected value memcmp returned %i", retval);
}
/*By the time we rx on long msg channel we should have alrady rx on short msg channel*/
/*By the time we rx on long msg channel we should have already rx on short msg channel*/
if (chan->chan_id == 0) {
if (channels[SHORT_MSG_CHAN_IDX].sdus_received !=
(channels[LONG_MSG_CHAN_IDX].sdus_received + 1)) {
@ -239,7 +239,7 @@ static void disconnect_all_channels(void)
const int err = bt_l2cap_chan_disconnect(&channels[i].le.chan);
if (err) {
LOG_DBG("can't disconnnect channel (err: %d)", err);
LOG_DBG("can't disconnect channel (err: %d)", err);
}
channels[i].in_use = false;
@ -346,7 +346,7 @@ static void send_sdu(int iteration, int chan_idx, int bytes)
sys_put_le32(iteration, channels[chan_idx].payload);
if (channels[chan_idx].buf != 0) {
FAIL("Buf should have been dealocated by now");
FAIL("Buf should have been deallocated by now");
return;
}
@ -395,7 +395,7 @@ static void send_sdu_concurrently(void)
&channels[k].work);
if (err < 0) {
FAIL("Failed to submit work to the queque, error: %d", err);
FAIL("Failed to submit work to the queue, error: %d", err);
}
}

View file

@ -120,7 +120,7 @@ static int settings_custom_load(struct settings_store *cs, const struct settings
return fclose(fp);
}
/* Entries are saved to optimize redability of the settings file for test development and
/* Entries are saved to optimize readability of the settings file for test development and
* debugging purposes. Format:
* <entry-key>=<entry-value-hex-str>\n
*/
@ -171,7 +171,7 @@ static int settings_custom_save(struct settings_store *cs, const char *name,
(uint8_t)value[valcnt / 2]);
};
/* helps in making settings file redable */
/* helps in making settings file readable */
bufval[valcnt++] = '\n';
bufval[valcnt] = 0;

View file

@ -560,7 +560,7 @@ static void recv_jitter_check(int32_t interval, uint8_t count)
uint32_t jitter = 0;
int err;
/* The measurment starts by the first received message. */
/* The measurement starts by the first received message. */
err = k_sem_take(&publish_sem, K_SECONDS(20));
if (err) {
FAIL("Recv timed out");

View file

@ -843,7 +843,7 @@ static void test_cfg_save(void)
current_stack_cfg->net_transmit,
&transmit);
if (err || transmit != current_stack_cfg->net_transmit) {
FAIL("Net transmit set failed (err %d, trasmit %x)", err, transmit);
FAIL("Net transmit set failed (err %d, transmit %x)", err, transmit);
}
err = bt_mesh_cfg_relay_set(test_netkey_idx, TEST_ADDR,

View file

@ -293,7 +293,7 @@ static void test_tx_seg_concurrent(void)
* Send another message, then end the IV update state before it's finished.
* The IV index should change when this message finishes.
*
* The IV update should not interfer with the segmented message, and the
* The IV update should not interfere with the segmented message, and the
*/
static void test_tx_seg_ivu(void)
{

View file

@ -6,7 +6,7 @@ source $(dirname "${BASH_SOURCE[0]}")/../../_mesh_test.sh
# Check that disabling LPN gives correct behaviour.
#
# In this test the lpn node will enable the lpn feature, and then immediatley
# In this test the lpn node will enable the lpn feature, and then immediately
# disables it again. Then we check that that the lpn node is actually in a
# disabled state. This test ensures that a lpn disable call is not overwritten
# by a subsequent lpn enable call, since the enable call is associated with

View file

@ -4,7 +4,7 @@
source $(dirname "${BASH_SOURCE[0]}")/../../_mesh_test.sh
# Test receieves on group and virtual addresses in the LPN
# Test receives on group and virtual addresses in the LPN
RunTest mesh_friendship_msg_group \
friendship_lpn_group \
friendship_other_group \

View file

@ -13,7 +13,7 @@ conf=prj_pst_conf
RunTest mesh_persistence_reprovisioning persistence_reprovisioning_device \
persistence_reprovisioning_provisioner -- -argstest clear-settings=1
# Repeate the test
# Repeat the test
conf=prj_pst_conf
RunTest mesh_persistence_reprovisioning persistence_reprovisioning_device \
persistence_reprovisioning_provisioner

View file

@ -31,7 +31,7 @@ How to use it
Assuming you have already
`installed BabbleSim <https://babblesim.github.io/fetching.html>`_.
Add to your environment the variable EDTT_PATH poitining to the
Add to your environment the variable EDTT_PATH pointing to the
EDTT folder. You can do this by adding it to your `~/.bashrc`, `~/.zephyrrc`,
or similar something like:
```

View file

@ -179,7 +179,7 @@ static void edptd_create_fifo_if(void)
if (pb_com_path == NULL) {
bs_trace_error_line("Not connected to Phy."
"EDTT IF cannot be brough up\n");
"EDTT IF cannot be brought up\n");
}
/* At this point we have connected to the Phy so the COM folder does
@ -198,7 +198,7 @@ static void edptd_create_fifo_if(void)
if ((pb_create_fifo_if_not_there(fifo_path[TO_DEVICE]) != 0)
|| (pb_create_fifo_if_not_there(fifo_path[TO_BRIDGE]) != 0)) {
bs_trace_error_line("Couldnt create FIFOs for EDTT IF\n");
bs_trace_error_line("Couldn't create FIFOs for EDTT IF\n");
}
/* we block here until the bridge opens its end */

View file

@ -151,7 +151,7 @@ static void value_v6_indicate_cb(struct bt_conn *conn,
uint8_t err)
{
printk("Indication for attribute 'Value V6' %s\n",
(err) ? "failed" : "succeded");
(err) ? "failed" : "succeeded");
}
/**

View file

@ -12,7 +12,7 @@ function display_help(){
echo "run_parallel.sh [-help] [options]"
echo " Execute all cases which do not start with an _ (underscore)"
echo " [options] will be passed directly to the scripts"
echo " The results will be saved to \${RESULTS_FILE}, by deault"
echo " The results will be saved to \${RESULTS_FILE}, by default"
echo " ../RunResults.xml"
echo " Testcases are searched for in \${SEARCH_PATH}, by default this folder"
}

View file

@ -50,7 +50,7 @@ static void setup(void)
* +-----+ +-------+ +-----+
* | | |
* | Start initiation | |
* | CTE Reqest Proc. | |
* | CTE Request Proc. | |
* |--------------------------->| |
* | | |
* | | LL_LE_CTE_REQ |
@ -119,7 +119,7 @@ void test_cte_req_central_local(void)
* +-----+ +-------+ +-----+
* | | |
* | Start initiator | |
* | CTE Reqest Proc. | |
* | CTE Request Proc. | |
* |--------------------------->| |
* | | |
* | | LL_LE_CTE_REQ |
@ -189,7 +189,7 @@ void test_cte_req_peripheral_local(void)
* +-----+ +-------+ +-----+
* | | |
* | Start responder | |
* | CTE Reqest Proc. | |
* | CTE Request Proc. | |
* |--------------------------->| |
* | | |
* | | LL_LE_CTE_REQ |
@ -258,7 +258,7 @@ void test_cte_req_central_remote(void)
* +-----+ +-------+ +-----+
* | | |
* | Start responder | |
* | CTE Reqest Proc . | |
* | CTE Request Proc. | |
* |--------------------------->| |
* | | |
* | | LL_LE_CTE_REQ |
@ -329,7 +329,7 @@ void test_cte_req_peripheral_remote(void)
* +-----+ +-------+ +-----+
* | | |
* | Start initiation | |
* | CTE Reqest Proc. | |
* | CTE Request Proc. | |
* |--------------------------->| |
* | | |
* | | LL_LE_CTE_REQ |
@ -403,7 +403,7 @@ void test_cte_req_rejected_inv_ll_param_central_local(void)
* +-----+ +-------+ +-----+
* | | |
* | Start initiation | |
* | CTE Reqest Proc. | |
* | CTE Request Proc. | |
* |--------------------------->| |
* | | |
* | | LL_LE_CTE_REQ |
@ -653,7 +653,7 @@ static bool is_instant_reached(struct ll_conn *conn, uint16_t instant)
{
/* Check if instant is in the past.
*
* NOTE: If conn_event > instant then subtract operation will result in value greather than
* NOTE: If conn_event > instant then subtract operation will result in value greater than
* 0x7FFF for uint16_t type. This is based on modulo 65536 math. The 0x7FFF is
* maximum positive difference between actual value of connection event counter and
* instant.
@ -768,7 +768,7 @@ void check_phy_update_and_cte_req_complete(bool is_local, struct pdu_data_llctrl
event_prepare(&conn);
if (!is_local && cte_req != NULL) {
/* Handle remote PHY update request completion and local CTE reques in the same
/* Handle remote PHY update request completion and local CTE request in the same
* event.
*/
@ -830,7 +830,7 @@ void check_phy_update_and_cte_req_complete(bool is_local, struct pdu_data_llctrl
* @param is_local Flag informing if PHY request is local or remote.
* @param cte_req Parameters of CTE request procedure. If it is NULL there were no CTE
* request.
* @param phy_req Parameters of PHY update reques.
* @param phy_req Parameters of PHY update request.
* @param events_at_start Number of connection events at function start.
* @param ctx_num_at_end Expected number of free procedure contexts at function end.
*/
@ -885,7 +885,7 @@ static void run_phy_update_central(bool is_local, struct pdu_data_llctrl_cte_req
/* TX Ack */
event_tx_ack(&conn, tx);
/* Check that data tx is no lonnger paused */
/* Check that data tx is no longer paused */
zassert_equal(conn.tx_q.pause_data, 0U, "Data tx is paused");
/* Done */
@ -914,7 +914,7 @@ static void run_phy_update_central(bool is_local, struct pdu_data_llctrl_cte_req
* @param is_local Flag informing if PHY request is local or remote.
* @param cte_req Parameters of CTE request procedure. If it is NULL there were no CTE
* request.
* @param phy_req Parameters of PHY update reques.
* @param phy_req Parameters of PHY update request.
* @param events_at_start Number of connection events at function start.
* @param ctx_num_at_end Expected number of free procedure contexts at function end.
*/

View file

@ -390,7 +390,7 @@ void test_hci_conn_update(void)
zassert_equal(err, BT_HCI_ERR_UNKNOWN_CMD, "Errorcode %d", err);
}
/* Connection Update or Connecton Parameter Req. */
/* Connection Update or Connection Parameter Req. */
conn_from_pool->llcp.fex.features_used |= BIT64(BT_LE_FEAT_BIT_CONN_PARAM_REQ);
err = ll_conn_update(conn_handle, cmd, status, interval_min, interval_max, latency,
timeout);
@ -401,14 +401,14 @@ void test_hci_conn_update(void)
timeout);
zassert_equal(err, BT_HCI_ERR_SUCCESS, "Errorcode %d", err);
/* Connecton Parameter Req. Reply */
/* Connection Parameter Req. Reply */
cmd = 2U;
conn_from_pool->llcp.fex.features_used |= BIT64(BT_LE_FEAT_BIT_CONN_PARAM_REQ);
err = ll_conn_update(conn_handle, cmd, status, interval_min, interval_max, latency,
timeout);
zassert_equal(err, BT_HCI_ERR_SUCCESS, "Errorcode %d", err);
/* Connecton Parameter Req. Neg. Reply */
/* Connection Parameter Req. Neg. Reply */
status = 0x01;
conn_from_pool->llcp.fex.features_used |= BIT64(BT_LE_FEAT_BIT_CONN_PARAM_REQ);
err = ll_conn_update(conn_handle, cmd, status, 0U, 0U, 0U, 0U);

View file

@ -165,7 +165,7 @@ void test_phy_update_central_loc(void)
/* TX Ack */
event_tx_ack(&conn, tx);
/* Check that data tx is no lonnger paused */
/* Check that data tx is no longer paused */
zassert_equal(conn.tx_q.pause_data, 0U, "Data tx is paused");
/* Done */

View file

@ -134,7 +134,7 @@ void test_tx_buffer_alloc(void)
tx_alloc_idx++;
#else /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
/* Test that there are excactly LLCP_CONN * LLCP_TX_CTRL_BUF_NUM_MAX
/* Test that there are exactly LLCP_CONN * LLCP_TX_CTRL_BUF_NUM_MAX
* buffers available
*/
for (i = 0;

View file

@ -269,7 +269,7 @@ isoal_status_t sink_sdu_emit_test(const struct isoal_sink *sink_ctx,
zassert_equal(res, 0, "len=%u buf[0]=0x%x ref[0]=0x%0x",
sink_ctx->sdu_production.sdu_written, buf[0], pdu_ref[0]);
/* Advnace reference pointer, this will be needed when a PDU is split over multiple SDUs */
/* Advance reference pointer, this will be needed when a PDU is split over multiple SDUs */
pdu_ref += sink_ctx->sdu_production.sdu_written;
zassert_true(sdu_emit_expected, "");
clear_sdu_buf();
@ -629,7 +629,7 @@ void test_unframed_single_pdu_err(void)
/* Test recombine */
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
/* Expectig 0 data to be written but with error status */
/* Expecting 0 data to be written but with error status */
zassert_equal(sink->sdu_production.sdu_written, 0, "written=%u",
sink->sdu_production.sdu_written);
zassert_equal(sink->sdu_production.sdu_status, ISOAL_SDU_STATUS_ERRORS, "sdu_status=0x%x",
@ -655,7 +655,7 @@ void test_unframed_seq_err(void)
/* Test recombine, should now trigger emit since this is last PDU in SDU */
pdu_ref = &testdata[0];
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
/* Expectig data to be written but with error status */
/* Expecting data to be written but with error status */
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
zassert_equal(sink->sdu_production.sdu_written, 3+7, "written=%u",
sink->sdu_production.sdu_written);
@ -684,7 +684,7 @@ void test_unframed_seq_pdu_err(void)
/* Test recombine, should now trigger emit since this is last PDU in SDU */
pdu_ref = &testdata[0];
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
/* Expectig no new data to be written but with error status */
/* Expecting no new data to be written but with error status */
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
zassert_equal(sink->sdu_production.sdu_written, 3, "written=%u",
sink->sdu_production.sdu_written);
@ -694,7 +694,7 @@ void test_unframed_seq_pdu_err(void)
/**
* @brief Excercise padding pdu path
* @brief Exercise padding pdu path
*/
void test_unframed_padding(void)
{
@ -727,7 +727,7 @@ void test_unframed_padding(void)
payload_number++, 923749, 0);
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
/* Expectig 0 data to be written, with no error status */
/* Expecting 0 data to be written, with no error status */
zassert_equal(sink->sdu_production.sdu_written, 5+7+0, "written=%u",
sink->sdu_production.sdu_written);
zassert_equal(sink->sdu_production.sdu_status, ISOAL_SDU_STATUS_VALID, "sdu_status=0x%x",
@ -739,7 +739,7 @@ void test_unframed_padding(void)
payload_number++, 923750, 0);
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
/* Expectig 0 data to be written but with error status */
/* Expecting 0 data to be written but with error status */
zassert_equal(sink->sdu_production.sdu_written, 5+7+0, "written=%u",
sink->sdu_production.sdu_written);
zassert_equal(sink->sdu_production.sdu_status, ISOAL_SDU_STATUS_VALID, "sdu_status=0x%x",
@ -761,7 +761,7 @@ void test_unframed_padding_no_end(void)
payload_number++, 923749, 0);
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
/* Expectig 0 data to be written, with no error status */
/* Expecting 0 data to be written, with no error status */
zassert_equal(sink->sdu_production.sdu_written, 0, "written=%u",
sink->sdu_production.sdu_written);
zassert_equal(sink->sdu_production.sdu_status, ISOAL_SDU_STATUS_VALID, "sdu_status=0x%x",
@ -773,7 +773,7 @@ void test_unframed_padding_no_end(void)
payload_number++, 923750, 0);
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
/* Expectig 0 data to be written, with no error status */
/* Expecting 0 data to be written, with no error status */
zassert_equal(sink->sdu_production.sdu_written, 0, "written=%u",
sink->sdu_production.sdu_written);
zassert_equal(sink->sdu_production.sdu_status, ISOAL_SDU_STATUS_VALID, "sdu_status=0x%x",
@ -785,7 +785,7 @@ void test_unframed_padding_no_end(void)
payload_number++, 923751, 0);
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
/* Expectig 0 data to be written but with error status */
/* Expecting 0 data to be written but with error status */
zassert_equal(sink->sdu_production.sdu_written, 0, "written=%u",
sink->sdu_production.sdu_written);
zassert_equal(sink->sdu_production.sdu_status, ISOAL_SDU_STATUS_ERRORS, "sdu_status=0x%x",
@ -822,7 +822,7 @@ void test_unframed_padding_error1(void)
payload_number++, 923750, 0);
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
/* Expectig 0 data to be written, with no error status */
/* Expecting 0 data to be written, with no error status */
zassert_equal(sink->sdu_production.sdu_written, 0, "written=%u",
sink->sdu_production.sdu_written);
zassert_equal(sink->sdu_production.sdu_status, ISOAL_SDU_STATUS_ERRORS, "sdu_status=0x%x",
@ -834,7 +834,7 @@ void test_unframed_padding_error1(void)
payload_number++, 923751, 0);
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
/* Expectig 0 data to be written but with error status */
/* Expecting 0 data to be written but with error status */
zassert_equal(sink->sdu_production.sdu_written, 0, "written=%u",
sink->sdu_production.sdu_written);
zassert_equal(sink->sdu_production.sdu_status, ISOAL_SDU_STATUS_ERRORS, "sdu_status=0x%x",
@ -883,7 +883,7 @@ void test_unframed_padding_error2(void)
payload_number++, 923751, 0);
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
/* Expectig 0 data to be written but with error status */
/* Expecting 0 data to be written but with error status */
zassert_equal(sink->sdu_production.sdu_written, 5, "written=%u",
sink->sdu_production.sdu_written);
zassert_equal(sink->sdu_production.sdu_status, ISOAL_SDU_STATUS_ERRORS, "sdu_status=0x%x",
@ -952,7 +952,7 @@ void test_unframed_zero_len_packet(void)
payload_number++, 923751, 0);
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
/* Expectig 0 data to be written but with error status */
/* Expecting 0 data to be written but with error status */
zassert_equal(sink->sdu_production.sdu_written, 0, "written=%u",
sink->sdu_production.sdu_written);
zassert_equal(sink->sdu_production.sdu_status, ISOAL_SDU_STATUS_VALID, "sdu_status=0x%x",
@ -964,7 +964,7 @@ void test_unframed_zero_len_packet(void)
*/
void test_unframed_dbl_packet_no_end(void)
{
/* Test assumes two PDUs per SDU intyerval */
/* Test assumes two PDUs per SDU interval */
clear_sdu_buf();
construct_pdu_unframed(PDU_BIS_LLID_START_CONTINUE, &testdata[2], 5,
@ -1178,7 +1178,7 @@ void test_framed_single_pdu_err(void)
construct_pdu_framed_start(0, 1, pdu_ref, 10, payload_number++, 1000, 1 /* error */);
err = isoal_rx_pdu_recombine(sink_hdl, &pdu_meta);
zassert_equal(err, ISOAL_STATUS_OK, "err=0x%02x", err);
/* Expectig 0 data to be written but with error status */
/* Expecting 0 data to be written but with error status */
zassert_equal(sink->sdu_production.sdu_written, 0, "written=%u",
sink->sdu_production.sdu_written);
zassert_equal(sink->sdu_production.sdu_status, ISOAL_SDU_STATUS_ERRORS, "sdu_status=0x%x",

View file

@ -74,7 +74,7 @@ void helper_privacy_add(int skew)
ex_pos = (1 + skew) % CONFIG_BT_CTLR_RPA_CACHE_SIZE;
zassert_equal(pos, ex_pos, "");
/* check that a1 can no loger be found */
/* check that a1 can no longer be found */
pos = prpa_cache_find(&a1);
zassert_equal(pos, FILTER_IDX_NONE, "");
}

View file

@ -10,7 +10,7 @@
/**
* @brief This is a stub of a function that provides static address.
*
* This function is defined to silent wanring printed by Host during BT stack initialization due
* This function is defined to silent warning printed by Host during BT stack initialization due
* to lack of static address stored in controller.
*/
uint8_t hci_vendor_read_static_addr(struct bt_hci_vs_static_addr addrs[], uint8_t size)

View file

@ -335,7 +335,7 @@ void common_validate_per_adv_pdu(struct pdu_adv *pdu, enum test_pdu_ext_adv_type
}
/*
* @brief Helper function to prepre CTE configuration for a given advertising set.
* @brief Helper function to prepare CTE configuration for a given advertising set.
*
* Note: There is a single instance of CTE configuration. In case there is a need
* to use multiple advertising sets at once, all will use the same CTE configuration.

View file

@ -58,7 +58,7 @@ void test_add_number_of_cte_to_sigle_pdu_chain(void)
err = ll_df_set_cl_cte_tx_enable(handle, true);
zassert_equal(err, 0,
"Unexpected error while enabling CTE for periodic avertising chain, err: %d",
"Unexpected error while enabling CTE for periodic advertising chain, err: %d",
err);
/* Validate result */
@ -84,7 +84,7 @@ void test_add_cte_for_each_pdu_in_chain(void)
err = ll_df_set_cl_cte_tx_enable(handle, true);
zassert_equal(err, 0,
"Unexpected error while enabling CTE for periodic avertising chain, err: %d",
"Unexpected error while enabling CTE for periodic advertising chain, err: %d",
err);
/* Validate result */
@ -110,7 +110,7 @@ void test_add_cte_for_not_all_pdu_in_chain(void)
err = ll_df_set_cl_cte_tx_enable(handle, true);
zassert_equal(err, 0,
"Unexpected error while enabling CTE for periodic avertising chain, err: %d",
"Unexpected error while enabling CTE for periodic advertising chain, err: %d",
err);
/* Validate result */
@ -147,7 +147,7 @@ void test_add_cte_to_not_all_pdus_in_chain_enqueued_to_lll(void)
err = ll_df_set_cl_cte_tx_enable(handle, true);
zassert_equal(err, 0,
"Unexpected error while enabling CTE for periodic avertising chain, err: %d",
"Unexpected error while enabling CTE for periodic advertising chain, err: %d",
err);
/* Validate result */
@ -173,7 +173,7 @@ void test_add_cte_for_single_pdu_chain(void)
err = ll_df_set_cl_cte_tx_enable(handle, true);
zassert_equal(err, 0,
"Unexpected error while enabling CTE for periodic avertising chain, err: %d",
"Unexpected error while enabling CTE for periodic advertising chain, err: %d",
err);
/* Validate result */

View file

@ -61,7 +61,7 @@ void test_remove_cte_from_chain_extended_to_tx_all_cte(void)
err = ll_df_set_cl_cte_tx_enable(handle, false);
zassert_equal(err, 0,
"Unexpected error while disabling CTE for periodic avertising chain, err: %d",
"Unexpected error while disabling CTE for periodic advertising chain, err: %d",
err);
/* Validate result */
common_validate_per_adv_chain(adv, TEST_PER_ADV_SINGLE_PDU);
@ -85,12 +85,12 @@ void test_remove_cte_from_chain_where_each_pdu_includes_cte(void)
err = ll_df_set_cl_cte_tx_enable(handle, true);
zassert_equal(err, 0,
"Unexpected error while enabling CTE for periodic avertising chain, err: %d",
"Unexpected error while enabling CTE for periodic advertising chain, err: %d",
err);
err = ll_df_set_cl_cte_tx_enable(handle, false);
zassert_equal(err, 0,
"Unexpected error while disabling CTE for periodic avertising chain, err: %d",
"Unexpected error while disabling CTE for periodic advertising chain, err: %d",
err);
/* Validate result */
common_validate_per_adv_chain(adv, TEST_CTE_COUNT);
@ -116,7 +116,7 @@ void test_remove_cte_from_chain_with_more_pdu_than_cte(void)
err = ll_df_set_cl_cte_tx_enable(handle, false);
zassert_equal(err, 0,
"Unexpected error while disabling CTE for periodic avertising chain, err: %d",
"Unexpected error while disabling CTE for periodic advertising chain, err: %d",
err);
/* Validate result */
common_validate_per_adv_chain(adv, TEST_PER_ADV_CHAIN_LENGTH);
@ -142,7 +142,7 @@ void test_remove_cte_from_single_pdu_chain(void)
err = ll_df_set_cl_cte_tx_enable(handle, false);
zassert_equal(err, 0,
"Unexpected error while disabling CTE for periodic avertising chain, err: %d",
"Unexpected error while disabling CTE for periodic advertising chain, err: %d",
err);
/* Validate result */
common_validate_per_adv_chain(adv, TEST_PER_ADV_SINGLE_PDU);
@ -180,7 +180,7 @@ void remove_cte_from_chain_after_enqueue_to_lll(uint8_t cte_count, uint8_t init_
err = ll_df_set_cl_cte_tx_enable(handle, true);
zassert_equal(err, 0,
"Unexpected error while enabling CTE for periodic avertising chain, err: %d",
"Unexpected error while enabling CTE for periodic advertising chain, err: %d",
err);
/* Swap PDU double buffer and get new latest PDU data */
@ -215,7 +215,7 @@ void remove_cte_from_chain_after_enqueue_to_lll(uint8_t cte_count, uint8_t init_
err = ll_df_set_cl_cte_tx_enable(handle, false);
zassert_equal(err, 0,
"Unexpected error while disabling CTE for periodic avertising chain, err: %d",
"Unexpected error while disabling CTE for periodic advertising chain, err: %d",
err);
/* Validate result */
common_validate_per_adv_chain(adv, expected_pdu_in_chain_after_cte_disable);

View file

@ -1,4 +1,4 @@
# Incresed stack due to settings API usage
# Increased stack due to settings API usage
CONFIG_SYSTEM_WORKQUEUE_STACK_SIZE=2048
CONFIG_TEST=y

View file

@ -1,4 +1,4 @@
# Incresed stack due to settings API usage
# Increased stack due to settings API usage
CONFIG_SYSTEM_WORKQUEUE_STACK_SIZE=2048
CONFIG_TEST=y

View file

@ -247,7 +247,7 @@ void flash_init(void)
{
flash_dev = device_get_binding(DT_CHOSEN_ZEPHYR_FLASH_CONTROLLER_LABEL);
if (!flash_dev) {
TC_ERROR("flash controller initialization failedi\n");
TC_ERROR("flash controller initialization failed\n");
}
flash_test();
}

View file

@ -12,7 +12,7 @@
* The i2s_cavs driver is being used.
*
* In this test app, I2S transmission and reception are tested as follows:
* I2S port #3 of Intel S1000 is configured for birectional mode
* I2S port #3 of Intel S1000 is configured for bidirectional mode
* i.e., I2S_DIR_TX and I2S_DIR_RX
* After each frame is received, it is sent/looped back on the same I2S
* The transmit direction is started after 2 frames are queued. This is done to

View file

@ -48,7 +48,7 @@ struct spi_buf txb[MAX_TX_BUF], rxb;
struct spi_config spi_cfg_single, spi_cfg_dual, spi_cfg_quad;
/**
* @brief Test spi devcie
* @brief Test spi device
* @details
* - Find spi device
* - Read flash jedec id
@ -237,7 +237,7 @@ void test_spi_single_read(void)
spi_opcode = SPI_FAST_READ_DATA;
/* read data using spi single mode */
/* set the spi opreation code and address */
/* set the spi operation code and address */
memset(safbuf, 0, TEST_BUF_SIZE);
safbuf[0] = spi_opcode & 0xFFU;
safbuf[1] = SPI_TEST_ADDRESS & 0xFFFFFFU;
@ -297,7 +297,7 @@ void test_spi_dual_read(void)
spi_opcode = SPI_DUAL_FAST_READ_DATA;
/* read data using spi dual mode */
/* set the spi opreation code and address */
/* set the spi operation code and address */
memset(safbuf, 0, TEST_BUF_SIZE);
safbuf[0] = spi_opcode & 0xFFU;
safbuf[1] = SPI_TEST_ADDRESS & 0xFFFFFFU;
@ -476,7 +476,7 @@ void test_spi_quad_write(void)
spi_cfg_quad.cs = NULL;
/* write data using spi quad mode */
/* send quad wirte opcode and address using single mode */
/* send quad write opcode and address using single mode */
memset(safbuf, 0, TEST_BUF_SIZE);
safbuf[0] = SPI_QUAD_WRITE_DATA;
safbuf[1] = SPI_TEST_ADDRESS_2 & 0xFFFFFFU;
@ -539,7 +539,7 @@ void test_spi_quad_read(void)
spi_opcode = SPI_QUAD_FAST_READ_DATA;
/* read data using spi quad mode */
/* set the spi opreation code and address */
/* set the spi operation code and address */
memset(safbuf, 0, TEST_BUF_SIZE);
safbuf[0] = spi_opcode & 0xFFU;
safbuf[1] = SPI_TEST_ADDRESS_2 & 0xFFFFFFU;

View file

@ -161,7 +161,7 @@ static void test_rtc_offset(void)
diff = native_rtc_gettime_us(RTC_CLOCK_REALTIME) - start_rtc_time[0];
zassert_true(diff == offset, "Offseting RTC failed\n");
zassert_true(diff == offset, "Offsetting RTC failed\n");
}
void test_main(void)

View file

@ -1,4 +1,4 @@
# Test of the native_posix real timenes and RTC model
# Test of the native_posix real timeness and RTC model
tests:
boards.native_posix.rtc:
platform_allow: native_posix native_posix_64

View file

@ -42,7 +42,7 @@
* - AES128 CCM mode encryption RFC 3610 test vector #8
* - AES128 CCM mode encryption RFC 3610 test vector #9
* - AES128 CCM mode encryption No associated data
* - AES128 CCM mode encryption No payhoad data
* - AES128 CCM mode encryption No payload data
*/
#include <tinycrypt/ccm_mode.h>

View file

@ -335,7 +335,7 @@
#endif
/* Invalid value that is not supposed to be written by the driver. It is used
* to mark the sample buffer entries as empty. If needed, it can be overriden
* to mark the sample buffer entries as empty. If needed, it can be overridden
* for a particular board by providing a specific definition above.
*/
#if !defined(INVALID_ADC_VALUE)

View file

@ -18,7 +18,7 @@
#define ADC_2ND_CHANNEL_ID 1
#define INVALID_ADC_VALUE SHRT_MIN
/* Raw to milivolt conversion doesn't handle rounding */
/* Raw to millivolt conversion doesn't handle rounding */
#define MV_OUTPUT_EPS 2
#define SEQUENCE_STEP 100
@ -494,7 +494,7 @@ static void test_adc_emul_input_higher_than_ref(void)
/*
* Check samples - returned value should max out on reference value.
* Raw value shoudn't exceed resolution.
* Raw value shouldn't exceed resolution.
*/
check_samples(samples, ADC_REF_INTERNAL_MV, 0 /* step */,
1 /* channels */, 0 /* first channel data */,
@ -538,7 +538,7 @@ static void test_adc_emul_reference(void)
ret = adc_channel_setup(adc_dev, &channel_cfg);
zassert_not_equal(ret, 0,
"Setting up of the %d channel shuldn't succeeded",
"Setting up of the %d channel shouldn't succeeded",
ADC_2ND_CHANNEL_ID);
channel_setup(adc_dev, ADC_REF_INTERNAL, ADC_GAIN_1,

View file

@ -409,7 +409,7 @@ static void test_set_loopback(void)
/*
* Sending a message to the wild should work because we are in loopback mode
* and therfor ACK the frame ourselves
* and therefore ACK the frame ourselves
*/
static void test_send_and_forget(void)
{

View file

@ -23,7 +23,7 @@
* of the first filter by 3 and tests the corner case of the last filter
* is used.
* -# Test message sending and receiving
* -# Remove first fillter (gets free) and add an extended filter.
* -# Remove first filter (gets free) and add an extended filter.
* This shrinks bank 0 to 2 filters/bank which leads to a right shift
* of the first buffer by two.
* -# Test message sending and receiving.
@ -115,7 +115,7 @@ static void send_test_msg(const struct device *can_dev,
}
/*
* Test a more adcvanced filter handling. Add more than one filter at
* Test a more advanced filter handling. Add more than one filter at
* the same time, remove and change the filters before the message.
* This tests the internals filter handling of the driver itself.
*/

View file

@ -6,7 +6,7 @@
#include <device.h>
#include <drivers/sensor.h>
/* Mock of internal temperature sensore. */
/* Mock of internal temperature sensor. */
#ifdef CONFIG_TEMP_NRF5
#error "Cannot be enabled because it is being mocked"
#endif

View file

@ -88,7 +88,7 @@ static void request_cb(struct onoff_manager *mgr, struct onoff_client *cli,
zassert_true(err >= 0, "err: %d", err);
}
/* Test checks if premature clock release works ok. If clock is released befure
/* Test checks if premature clock release works ok. If clock is released before
* it is started it is the best to do that release from the callback to avoid
* waiting until clock is started in the release context.
*/

View file

@ -10,7 +10,7 @@
*/
/*
* Warning: HSE is not implmeneted on available boards, hence:
* Warning: HSE is not implemented on available boards, hence:
* This configuration is only available for build
*/

View file

@ -215,7 +215,7 @@ void test_set_top_value_with_alarm_instance(const char *dev_name)
counter_period_us = COUNTER_PERIOD_US_VAL;
} else {
/* if more counter drivers exist other than RTC,
the test vaule set to 20000 by default */
the test value set to 20000 by default */
counter_period_us = 20000;
}
top_cfg.ticks = counter_us_to_ticks(dev, counter_period_us);
@ -271,7 +271,7 @@ void test_set_top_value_without_alarm_instance(const char *dev_name)
counter_period_us = COUNTER_PERIOD_US_VAL;
} else {
/* if more counter drivers exist other than RTC,
the test vaule set to 20000 by default */
the test value set to 20000 by default */
counter_period_us = 20000;
}
dev = device_get_binding(dev_name);
@ -370,7 +370,7 @@ void test_single_shot_alarm_instance(const char *dev_name, bool set_top)
counter_period_us = COUNTER_PERIOD_US_VAL;
} else {
/* if more counter drivers exist other than RTC,
the test vaule set to 20000 by default */
the test value set to 20000 by default */
counter_period_us = 20000;
}
dev = device_get_binding(dev_name);
@ -517,7 +517,7 @@ void test_multiple_alarms_instance(const char *dev_name)
counter_period_us = COUNTER_PERIOD_US_VAL;
} else {
/* if more counter drivers exist other than RTC,
the test vaule set to 20000 by default */
the test value set to 20000 by default */
counter_period_us = 20000;
}
dev = device_get_binding(dev_name);
@ -683,10 +683,10 @@ void test_late_alarm_instance(const char *dev_name)
err = counter_set_guard_period(dev, guard,
COUNTER_GUARD_PERIOD_LATE_TO_SET);
zassert_equal(0, err, "%s: Unexcepted error", dev_name);
zassert_equal(0, err, "%s: Unexpected error", dev_name);
err = counter_start(dev);
zassert_equal(0, err, "%s: Unexcepted error", dev_name);
zassert_equal(0, err, "%s: Unexpected error", dev_name);
k_busy_wait(2*tick_us);
@ -735,10 +735,10 @@ void test_late_alarm_error_instance(const char *dev_name)
err = counter_set_guard_period(dev, guard,
COUNTER_GUARD_PERIOD_LATE_TO_SET);
zassert_equal(0, err, "%s: Unexcepted error", dev_name);
zassert_equal(0, err, "%s: Unexpected error", dev_name);
err = counter_start(dev);
zassert_equal(0, err, "%s: Unexcepted error", dev_name);
zassert_equal(0, err, "%s: Unexpected error", dev_name);
k_busy_wait(2*tick_us);
@ -801,7 +801,7 @@ static void test_short_relative_alarm_instance(const char *dev_name)
};
err = counter_start(dev);
zassert_equal(0, err, "%s: Unexcepted error", dev_name);
zassert_equal(0, err, "%s: Unexpected error", dev_name);
alarm_cfg.ticks = 1;
@ -903,7 +903,7 @@ static void test_cancelled_alarm_does_not_expire_instance(const char *dev_name)
};
err = counter_start(dev);
zassert_equal(0, err, "%s: Unexcepted error", dev_name);
zassert_equal(0, err, "%s: Unexpected error", dev_name);
for (int i = 0; i < us/2; ++i) {

View file

@ -554,10 +554,10 @@ void test_late_alarm_instance(const char *dev_name)
err = counter_set_guard_period(dev, guard,
COUNTER_GUARD_PERIOD_LATE_TO_SET);
zassert_equal(0, err, "%s: Unexcepted error", dev_name);
zassert_equal(0, err, "%s: Unexpected error", dev_name);
err = counter_start(dev);
zassert_equal(0, err, "%s: Unexcepted error", dev_name);
zassert_equal(0, err, "%s: Unexpected error", dev_name);
k_sleep(K_USEC(2 * tick_us));
@ -604,10 +604,10 @@ void test_late_alarm_error_instance(const char *dev_name)
err = counter_set_guard_period(dev, guard,
COUNTER_GUARD_PERIOD_LATE_TO_SET);
zassert_equal(0, err, "%s: Unexcepted error", dev_name);
zassert_equal(0, err, "%s: Unexpected error", dev_name);
err = counter_start(dev);
zassert_equal(0, err, "%s: Unexcepted error", dev_name);
zassert_equal(0, err, "%s: Unexpected error", dev_name);
k_sleep(K_USEC(2 * tick_us));
@ -666,7 +666,7 @@ static void test_short_relative_alarm_instance(const char *dev_name)
};
err = counter_start(dev);
zassert_equal(0, err, "%s: Unexcepted error", dev_name);
zassert_equal(0, err, "%s: Unexpected error", dev_name);
alarm_cfg.ticks = 1;
@ -761,7 +761,7 @@ static void test_cancelled_alarm_does_not_expire_instance(const char *dev_name)
};
err = counter_start(dev);
zassert_equal(0, err, "%s: Unexcepted error", dev_name);
zassert_equal(0, err, "%s: Unexpected error", dev_name);
for (int i = 0; i < us / 2; ++i) {

View file

@ -27,7 +27,7 @@
#define TEST_AREA_MAX ((FLASH_AREA_SIZE(storage)) + (FLASH_TEST_REGION_OFFSET))
#else
/* SoC emebded NVM */
/* SoC embedded NVM */
#define FLASH_DEVICE DT_CHOSEN_ZEPHYR_FLASH_CONTROLLER_LABEL
#ifdef CONFIG_TRUSTED_EXECUTION_NONSECURE

View file

@ -263,7 +263,7 @@ static void test_double_write(void)
rc = flash_write(flash_dev, FLASH_SIMULATOR_BASE_OFFSET,
&data, sizeof(data));
zassert_equal(0, rc, "flash_write should succedd");
zassert_equal(0, rc, "flash_write should succeed");
rc = flash_write(flash_dev, FLASH_SIMULATOR_BASE_OFFSET,
&data, sizeof(data));

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020 Nordic Smiconductor ASA
* Copyright (c) 2020 Nordic Semiconductor ASA
* SPDX-License-Identifier: Apache-2.0
*/

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021 Denx Software Enginerring GmbH
* Copyright (c) 2021 Denx Software Engineering GmbH
*
* SPDX-License-Identifier: Apache-2.0
*/

View file

@ -236,9 +236,9 @@ void test_sensor_handle_triggers(void)
/* check the result of the trigger channel */
zassert_equal(data.val1, trigger_elements[i].data.val1,
"retrived data is not match.");
"retrieved data is not match.");
zassert_equal(data.val2, trigger_elements[i].data.val2,
"retrived data is not match.");
"retrieved data is not match.");
/* set attributes for no trig dev */
zassert_equal(sensor_attr_set(dev_no_trig,
@ -263,7 +263,7 @@ void test_sensor_handle_triggers(void)
}
/**
* @brief Test unit coversion of sensor module
* @brief Test unit conversion of sensor module
* @details Verify helper function to convert acceleration from
* Gs to m/s^2 and from m/s^2 to Gs. Verify helper function
* to convert radians to degrees and degrees to radians. Verify

View file

@ -6,7 +6,7 @@ Some architectures support different ISA variants, each backed a different
multilib in a same compiler. Selecting wrong multilib may have adverse
effects on performance, or sometime produce broken executables altogether
(for example, ARM Cortex-M requires thumb2 multilib and will be broken with
default ("arm") multilib or "thumb" multlib). This app is a smoke-test
default ("arm") multilib or "thumb" multilib). This app is a smoke-test
for selecting non-wrong multilib - it uses operation(s) which guaranteedly
will call support routine(s) in libgcc and checks for expected result.

View file

@ -306,7 +306,7 @@ void atomic_handler(void *p1, void *p2, void *p3)
* @brief Verify atomic operation with threads
*
* @details Creat two preempt threads with equal priority to
* atomiclly access the same atomic value. Because these preempt
* atomically access the same atomic value. Because these preempt
* threads are of equal priority, so enable time slice to make
* them scheduled. The thread will execute for some time.
* In this time, the two sub threads will be scheduled separately

View file

@ -87,7 +87,7 @@ void test_bitarray_declare(void)
SYS_BITARRAY_DEFINE(ba_128_bit, 128);
SYS_BITARRAY_DEFINE(ba_129_bit, 129);
/* Test SYS_BITFIELD_DECLARE by asserting that a sufficent number of uint32_t
/* Test SYS_BITFIELD_DECLARE by asserting that a sufficient number of uint32_t
* in the declared array are set as free to represent the number of bits
*/
@ -381,7 +381,7 @@ void alloc_and_free_loop(int divisor)
for (bit = 0U; bit < ba.num_bits; ++bit) {
cur_popcnt = get_bitarray_popcnt(&ba);
zassert_equal(cur_popcnt, expected_popcnt,
"bit count expeceted %u, got %u (at bit %u)",
"bit count expected %u, got %u (at bit %u)",
expected_popcnt, cur_popcnt, bit);
/* Allocate half of remaining bits */
@ -405,7 +405,7 @@ void alloc_and_free_loop(int divisor)
cur_popcnt = get_bitarray_popcnt(&ba);
zassert_equal(cur_popcnt, expected_popcnt,
"bit count expeceted %u, got %u (at bit %u)",
"bit count expected %u, got %u (at bit %u)",
expected_popcnt, cur_popcnt, bit);
/* Free all but the first bit of allocated region */
@ -459,14 +459,14 @@ void alloc_and_free_interval(void)
ret, cnt);
zassert_equal(offset, expected_offset,
"offset expeceted %u, got %u (cnt %u)",
"offset expected %u, got %u (cnt %u)",
expected_offset, offset, cnt);
expected_popcnt += 4;
cur_popcnt = get_bitarray_popcnt(&ba);
zassert_equal(cur_popcnt, expected_popcnt,
"bit count expeceted %u, got %u (cnt %u)",
"bit count expected %u, got %u (cnt %u)",
expected_popcnt, cur_popcnt, cnt);
@ -618,8 +618,8 @@ void test_bitarray_region_set_clear(void)
/**
* @brief Test find MSB and LSB operations
*
* @details Verify the functions that find out the most significiant
* bit and least significiant bit work as expected.
* @details Verify the functions that find out the most significant
* bit and least significant bit work as expected.
*
* @see find_msb_set(), find_lsb_set()
*/

View file

@ -86,7 +86,7 @@ void test_clock_uptime(void)
* @brief Test 32-bit clock cycle functionality
*
* @details
* Test Objectve:
* Test Objective:
* - The kernel architecture provide a 32bit monotonically increasing
* cycle counter
* - This routine tests the k_cycle_get_32() and k_uptime_get_32()
@ -109,9 +109,9 @@ void test_clock_uptime(void)
* - Success if cycles increase monotonically, failure otherwise.
*
* Test Procedure:
* -# At mili-second boundary, get cycles repeatedly by k_cycle_get_32()
* -# At milli-second boundary, get cycles repeatedly by k_cycle_get_32()
* till cycles increased
* -# At mili-second boundary, get cycles repeatedly by k_uptime_get_32()
* -# At milli-second boundary, get cycles repeatedly by k_uptime_get_32()
* till cycles increased
* -# Cross check cycles gotten by k_cycle_get_32() and k_uptime_get_32(),
* the delta cycle should be greater than 1 milli-second.
@ -246,7 +246,7 @@ void test_ms_time_duration(void)
zassert_true(tdata.stop_count == 0,
"stop %u not 0", tdata.stop_count);
/** cleanup environemtn */
/** cleanup environment */
k_timer_stop(&ktimer);
}
/**

View file

@ -86,7 +86,7 @@ Testing irq_disable() and irq_enable()
Testing some kernel context routines
Testing k_current_get() from an ISR and task
Testing k_is_in_isr() from an ISR
Testing k_is_in_isr() from a preemtible thread
Testing k_is_in_isr() from a preemptible thread
Spawning a thread from a task
Thread to test k_current_get() and k_is_in_isr()
Thread to test k_yield()

View file

@ -143,7 +143,7 @@ static ISR_INFO isr_info;
* @brief Test cpu idle function
*
* @details
* Test Objectve:
* Test Objective:
* - The kernel architecture provide an idle function to be run when the system
* has no work for the current CPU
* - This routine tests the k_cpu_idle() routine
@ -182,7 +182,7 @@ static void test_kernel_cpu_idle(void);
* @brief Test cpu idle function
*
* @details
* Test Objectve:
* Test Objective:
* - The kernel architecture provide an idle function to be run when the system
* has no work for the current CPU
* - This routine tests the k_cpu_atomic_idle() routine
@ -198,7 +198,7 @@ static void test_kernel_cpu_idle(void);
* - N/A
*
* Test Procedure:
* -# Record system time befor cpu enters idle state
* -# Record system time before cpu enters idle state
* -# Enter cpu idle state by k_cpu_atomic_idle()
* -# Record system time after cpu idle state is interrupted
* -# Compare the two system time values.
@ -573,7 +573,7 @@ static void test_kernel_interrupts(void)
* @details
* Test Objective:
* - To verify the kernel architecture layer shall provide a mechanism to
* simultenously mask all local CPU interrupts and return the previous mask
* simultaneously mask all local CPU interrupts and return the previous mask
* state for restoration.
* - This routine tests the routines for disabling and enabling interrupts.
* These include irq_disable() and irq_enable().
@ -634,7 +634,7 @@ static void test_kernel_timer_interrupts(void)
* @brief Test some context routines
*
* @details
* Test Objectve:
* Test Objective:
* - Thread context handles derived from context switches must be able to be
* restored upon interrupt exit
*
@ -967,7 +967,7 @@ static void delayed_thread(void *num, void *arg2, void *arg3)
}
/**
* @brief Test timouts
* @brief Test timeouts
*
* @ingroup kernel_context_tests
*
@ -991,7 +991,7 @@ static void test_busy_wait(void)
}
/**
* @brief Test timouts
* @brief Test timeouts
*
* @ingroup kernel_context_tests
*

View file

@ -11,7 +11,7 @@
#define MY_DRIVER_A "my_driver_A"
#define MY_DRIVER_B "my_driver_B"
/* define indivial driver A */
/* define individual driver A */
static int my_driver_A_do_this(const struct device *device, int foo, int bar)
{
return foo + bar;
@ -33,7 +33,7 @@ int common_driver_init(const struct device *dev)
return 0;
}
/* define indivial driver B */
/* define individual driver B */
static int my_driver_B_do_this(const struct device *device, int foo, int bar)
{
return foo - bar;

View file

@ -235,7 +235,7 @@ static void test_device_list(void)
zassert_false((devcount == 0), NULL);
}
/* this is for storing sequence during initializtion */
/* this is for storing sequence during initialization */
extern int init_level_sequence[4];
extern int init_priority_sequence[4];
extern unsigned int seq_level_cnt;

View file

@ -36,13 +36,13 @@
#define PRIORITY_4 4
/* this is for storing sequence during initializtion */
/* this is for storing sequence during initialization */
__pinned_bss int init_level_sequence[4] = {0};
__pinned_bss int init_priority_sequence[4] = {0};
__pinned_bss unsigned int seq_level_cnt;
__pinned_bss unsigned int seq_priority_cnt;
/* define driver type 1: for testing initialize levels and priorites */
/* define driver type 1: for testing initialize levels and priorities */
typedef int (*my_api_configure_t)(const struct device *dev, int dev_config);
struct my_driver_api {
@ -130,7 +130,7 @@ static int my_driver_pri_4_init(const struct device *dev)
* @brief Test providing control device driver initialization order
*
* @details Test that kernel shall provide control over device driver
* initalization order, using initialization level and priority for each
* initialization order, using initialization level and priority for each
* instance. We use DEVICE_DEFINE to define device instances and set
* it's level and priority here, then we run check function later after
* all of this instance finish their initialization.
@ -154,7 +154,7 @@ DEVICE_DEFINE(my_driver_level_4, MY_DRIVER_LV_4, &my_driver_lv_4_init,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &funcs_my_drivers);
/* We use priority value of 20 to create a possible sorting conflict with
* priority value of 2. So if the linker sorting isn't woring correctly
* priority value of 2. So if the linker sorting isn't working correctly
* we'll find out.
*/
DEVICE_DEFINE(my_driver_priority_4, MY_DRIVER_PRI_4,

View file

@ -71,10 +71,10 @@ void isr_handler(const void *param)
orig_t_keep_run = 0;
/* If the work is busy, we don't sumbit it. */
/* If the work is busy, we don't submit it. */
if (!k_work_busy_get(work)) {
zassert_equal(k_work_submit_to_queue(&wq_queue, work),
1, "kwork not sumbmitted or queued");
1, "kwork not submitted or queued");
atomic_inc(&submit_success);
}
@ -104,7 +104,7 @@ static void init_dyn_interrupt(void)
ztest_test_skip();
}
/* We just initialize dynamic interrput once, then reuse them */
/* We just initialize dynamic interrupt once, then reuse them */
if (!vector_num) {
vector_num = irq_connect_dynamic(TEST_IRQ_DYN_LINE, 1,
isr_handler, (void *)&irq_param, 0);

View file

@ -555,7 +555,7 @@ void test_mbox_get_put_block_data(void)
* @brief Test mailbox enhance capabilities
*
* @details
* - Define and initilized a message queue and a mailbox
* - Define and initialized a message queue and a mailbox
* - Verify the capability of message queue and mailbox
* - with same data.
*

View file

@ -65,7 +65,7 @@ void test_shared_multi_heap(void)
reg = get_reg_addr(block);
zassert_equal(reg->addr, 0x42000000, "block in the wrong memory region");
zassert_equal(reg->attr, SMH_REG_ATTR_CACHEABLE, "wrong memery attribute");
zassert_equal(reg->attr, SMH_REG_ATTR_CACHEABLE, "wrong memory attribute");
/*
* Request another small cacheable chunk. It should be allocated in the

View file

@ -481,7 +481,7 @@ void futex_wake(void *p1, void *p2, void *p3)
zassert_equal(ret_value, 0, NULL);
/* Test user can write to the futex value
* Use assertion to verify substraction correctness
* Use assertion to verify subtraction correctness
* Initial value was 13, after atomic_sub() must be 12
*/
atomic_sub(&simple_futex.val, 1);

View file

@ -92,7 +92,7 @@ static void transplanted_function(bool *executed)
}
/**
* Show that mapping with/withour K_MEM_PERM_EXEC works as expected
* Show that mapping with/without K_MEM_PERM_EXEC works as expected
*
* @ingroup kernel_memprotect_tests
*/

View file

@ -90,7 +90,7 @@ static void test_thread_1_for_SU(void *p1, void *p2, void *p3)
* - Then check child thread can't access to the parent thread object using API
* command k_thread_priority_get()
* - At the same moment that test verifies that child thread was granted
* permission on a kernel objects. That meanis child user thread caller
* permission on a kernel objects. That means child user thread caller
* already has permission on the thread objects being granted.
* @ingroup kernel_memprotect_tests

View file

@ -138,7 +138,7 @@ static void syscall_invalid_kobject_user_part(void *p1, void *p2, void *p3)
{
k_sem_give(&kobject_sem);
/* should causdde a fault */
/* should cause a fault */
set_fault_valid(true);
/* should cause fault. typecasting to override compiler warning */
@ -362,7 +362,7 @@ void test_kobject_release_from_user(void)
}
/**
* @brief Test release and access grant an invaild kobject
* @brief Test release and access grant an invalid kobject
*
* @details Validate release and access grant an invalid kernel object.
*
@ -631,7 +631,7 @@ static void new_thread_from_user_child(void *p1, void *p2, void *p3)
* - Test user thread can create new thread.
* - Verify that given thread and thread stack permissions to the user thread,
* allow to create new user thread.
* - Veify that new created user thread have access to its own thread object
* - Verify that new created user thread have access to its own thread object
* by aborting itself.
*
* @ingroup kernel_memprotect_tests
@ -679,7 +679,7 @@ static void new_user_thrd_child_with_in_use_stack(void *p1, void *p2, void *p3)
/**
* @brief Test create new user thread from a user thread with in-use stack obj
*
* @details The kernel must prevent new user threads to use initiliazed (in-use)
* @details The kernel must prevent new user threads to use initialized (in-use)
* stack objects. In that case extra_thread is going to be create with in-use
* stack object child_stack. That will generate error, showing that kernel
* memory protection is working correctly.
@ -965,7 +965,7 @@ static void higher_prio_from_user_child(void *p1, void *p2, void *p3)
zassert_unreachable("k_object validation failure in k thread create");
}
/**
* @brief Thread creation with prority is higher than current thread
* @brief Thread creation with priority is higher than current thread
*
* @details _handler_k_thread_create validation.
*
@ -1012,7 +1012,7 @@ static void invalid_prio_from_user_child(void *p1, void *p2, void *p3)
zassert_unreachable("k_object validation failure in k thread create");
}
/**
* @brief Create a new thread whose prority is invalid.
* @brief Create a new thread whose priority is invalid.
*
* @details _handler_k_thread_create validation.
*
@ -1093,7 +1093,7 @@ void test_mark_thread_exit_uninitialized(void)
}
/****************************************************************************/
/* object validatoin checks */
/* object validation checks */
static void tThread_object_free_error(void *p1, void *p2, void *p3)
{
@ -1131,7 +1131,7 @@ void test_kobject_free_error(void)
/**
* @brief Test alloc an invalid kernel object
*
* @details Allocate invalid kernel objects, then no alloction
* @details Allocate invalid kernel objects, then no allocation
* will be returned.
*
* @ingroup kernel_memprotect_tests
@ -1140,7 +1140,7 @@ void test_kobject_free_error(void)
*/
void test_kobject_init_error(void)
{
/* invalid kernel object alloction */
/* invalid kernel object allocation */
zassert_is_null(k_object_alloc(K_OBJ_ANY-1),
"expected got NULL kobject");
zassert_is_null(k_object_alloc(K_OBJ_LAST),

View file

@ -551,7 +551,7 @@ void test_mem_part_add_error_zerosize(void)
/**
* @brief Test error case of memory partition address wraparound
*
* @details Try to add a partition whose adddress is wraparound.
* @details Try to add a partition whose address is wraparound.
* k_mem_domain_add_partition() should return error.
*
* @ingroup kernel_memprotect_tests

View file

@ -166,7 +166,7 @@ void test_simple_sem_from_isr(void)
signal_count = sys_sem_count_get(&simple_sem);
zassert_true(signal_count == (i + 1),
"signal count missmatch Expected %d, got %d",
"signal count mismatch Expected %d, got %d",
(i + 1), signal_count);
}
@ -186,7 +186,7 @@ void test_simple_sem_from_task(void)
signal_count = sys_sem_count_get(&simple_sem);
zassert_true(signal_count == (i + 1),
"signal count missmatch Expected %d, got %d",
"signal count mismatch Expected %d, got %d",
(i + 1), signal_count);
}
@ -208,7 +208,7 @@ void test_sem_take_no_wait(void)
signal_count = sys_sem_count_get(&simple_sem);
zassert_true(signal_count == i,
"signal count missmatch Expected %d, got %d",
"signal count mismatch Expected %d, got %d",
i, signal_count);
}
@ -231,7 +231,7 @@ void test_sem_take_no_wait_fails(void)
signal_count = sys_sem_count_get(&simple_sem);
zassert_true(signal_count == 0U,
"signal count missmatch Expected 0, got %d",
"signal count mismatch Expected 0, got %d",
signal_count);
}
@ -444,7 +444,7 @@ void test_sem_give_take_from_isr(void)
signal_count = sys_sem_count_get(&simple_sem);
zassert_true(signal_count == i + 1,
"signal count missmatch Expected %d, got %d",
"signal count mismatch Expected %d, got %d",
i + 1, signal_count);
}
@ -454,7 +454,7 @@ void test_sem_give_take_from_isr(void)
signal_count = sys_sem_count_get(&simple_sem);
zassert_true(signal_count == (i - 1),
"signal count missmatch Expected %d, got %d",
"signal count mismatch Expected %d, got %d",
(i - 1), signal_count);
}
}
@ -477,7 +477,7 @@ void test_sem_give_limit(void)
signal_count = sys_sem_count_get(&simple_sem);
zassert_true(signal_count == i + 1,
"signal count missmatch Expected %d, got %d",
"signal count mismatch Expected %d, got %d",
i + 1, signal_count);
}
@ -486,14 +486,14 @@ void test_sem_give_limit(void)
if (ret_value == -EAGAIN) {
signal_count = sys_sem_count_get(&simple_sem);
zassert_true(signal_count == SEM_MAX_VAL,
"signal count missmatch Expected %d, got %d",
"signal count mismatch Expected %d, got %d",
SEM_MAX_VAL, signal_count);
sys_sem_take(&simple_sem, K_FOREVER);
} else if (ret_value == 0) {
signal_count = sys_sem_count_get(&simple_sem);
zassert_true(signal_count == SEM_MAX_VAL,
"signal count missmatch Expected %d, got %d",
"signal count mismatch Expected %d, got %d",
SEM_MAX_VAL, signal_count);
}
} while (ret_value == -EAGAIN);
@ -546,12 +546,12 @@ void test_sem_multiple_threads_wait(void)
signal_count = sys_sem_count_get(&simple_sem);
zassert_true(signal_count == 0U,
"signal count missmatch Expected 0, got %d",
"signal count mismatch Expected 0, got %d",
signal_count);
signal_count = sys_sem_count_get(&multiple_thread_sem);
zassert_true(signal_count == 0U,
"signal count missmatch Expected 0, got %d",
"signal count mismatch Expected 0, got %d",
signal_count);
repeat_count++;

View file

@ -364,7 +364,7 @@ K_APP_DMEM(default_part) int32_t size = (0 - CONFIG_PRIVILEGED_STACK_SIZE -
#endif
/**
* @brief Test to read provileged stack
* @brief Test to read privileged stack
*
* @ingroup kernel_memprotect_tests
*/

View file

@ -76,7 +76,7 @@ FUNC_NORETURN void cpu1_fn(void *arg)
* -# Enter a while loop and wait for cpu_running equals to 1.
* -# In target function, check if the address is &cpu_arg and its content
* equal to 12345.
* -# Set the global flag varible cpu_running to 1.
* -# Set the global flag variable cpu_running to 1.
* -# In main thread, check if the cpu_running equals to 1.
*
* Expected Test Result:

View file

@ -267,7 +267,7 @@ void test_msgq_usage(void)
/* waiting to continue */
k_sem_take(&test_continue, K_FOREVER);
/* rather than schedule this thread by k_msleep(), use semaphor with
/* rather than schedule this thread by k_msleep(), use semaphore with
* a timeout value, so there is no give operation over service_sema
*/
TC_PRINT("try to kill service1\n");

View file

@ -111,7 +111,7 @@ static void tThread_T1_priority_inheritance(void *p1, void *p2, void *p3)
/* record its original priority */
int priority_origin = k_thread_priority_get((k_tid_t)p2);
/* wait for a time period to see if priority inheritance happended */
/* wait for a time period to see if priority inheritance happened */
k_sleep(K_MSEC(500));
int priority = k_thread_priority_get((k_tid_t)p2);
@ -182,7 +182,7 @@ static void tThread_waiter(void *p1, void *p2, void *p3)
zassert_true(k_mutex_lock((struct k_mutex *)p1, K_FOREVER) == 0,
"Failed to get the test_mutex");
/* keep the next waiter waitting for a while */
/* keep the next waiter waiting for a while */
thread_ret = TC_PASS;
k_mutex_unlock((struct k_mutex *)p1);
}
@ -285,12 +285,12 @@ void test_mutex_recursive(void)
/**
* @brief Test mutex's priority inheritance mechanism
* @details To verify mutex provide priority inheritance to prevent prority
* @details To verify mutex provide priority inheritance to prevent priority
* inversion, and there are 3 cases need to run.
* The thread T1 hold the mutex first and cases list as below:
* - case 1. When prority T2 > T1, priority inheritance happened.
* - case 2. When prority T1 > T2, priority inheritance won't happened.
* - case 3. When prority T2 > T3 > T1, priority inheritance happened but T2
* - case 1. When priority T2 > T1, priority inheritance happened.
* - case 2. When priority T1 > T2, priority inheritance won't happened.
* - case 3. When priority T2 > T3 > T1, priority inheritance happened but T2
* wait for timeout and T3 got the mutex.
* @ingroup kernel_mutex_tests
*/

View file

@ -52,7 +52,7 @@ static void tThread_entry_negative(void *p1, void *p2, void *p3)
TC_PRINT("current case is %d\n", choice);
/* Set up the fault or assert are expected before we call
* the target tested funciton.
* the target tested function.
*/
switch (choice) {
case MUTEX_INIT_NULL:

View file

@ -310,7 +310,7 @@ void test_pending_fifo(void)
k_work_submit_to_queue(&offload_work_q, &offload1.work_item);
/*
* Verify that preemiptible threads 'task_high' and 'task_low' do not
* Verify that preemptible threads 'task_high' and 'task_low' do not
* busy-wait. If they are not busy-waiting, then they must be pending.
*/

View file

@ -36,7 +36,7 @@ ZTEST_BMEM uint8_t rx_buffer[PIPE_SIZE + 1];
/* the pipe will always pass */
#define NO_CONSTRAINT (0U)
/* Pipe will atleast put one byte */
/* Pipe will at least put one byte */
#define ATLEAST_1 (1U)
/* Pipe must put all data on the buffer */
@ -46,7 +46,7 @@ ZTEST_BMEM uint8_t rx_buffer[PIPE_SIZE + 1];
#define TIMEOUT_VAL (K_MSEC(10))
#define TIMEOUT_200MSEC (K_MSEC(200))
/* encompasing structs */
/* encompassing structs */
struct pipe_sequence {
uint32_t size;
uint32_t min_size;
@ -156,12 +156,12 @@ void pipe_put_single(void)
zassert_true((return_value ==
single_elements[index].return_value),
" Return value of k_pipe_put missmatch at index = %d expected =%d received = %d\n",
" Return value of k_pipe_put mismatch at index = %d expected =%d received = %d\n",
index,
single_elements[index].return_value, return_value);
zassert_true((written == single_elements[index].sent_bytes),
"Bytes written missmatch written is %d but expected is %d index = %d\n",
"Bytes written mismatch written is %d but expected is %d index = %d\n",
written,
single_elements[index].sent_bytes, index);
@ -194,12 +194,12 @@ void pipe_get_single(void *p1, void *p2, void *p3)
zassert_true((return_value ==
single_elements[index].return_value),
"Return value of k_pipe_get missmatch at index = %d expected =%d received = %d\n",
"Return value of k_pipe_get mismatch at index = %d expected =%d received = %d\n",
index, single_elements[index].return_value,
return_value);
zassert_true((read == single_elements[index].sent_bytes),
"Bytes read missmatch read is %d but expected is %d index = %d\n",
"Bytes read mismatch read is %d but expected is %d index = %d\n",
read, single_elements[index].sent_bytes, index);
zassert_true(rx_buffer_check(rx_buffer, read) == read,
@ -231,13 +231,13 @@ void pipe_put_multiple(void)
zassert_true((return_value ==
multiple_elements[index].return_value),
"Return value of k_pipe_put missmatch at index = %d expected =%d received = %d\n",
"Return value of k_pipe_put mismatch at index = %d expected =%d received = %d\n",
index,
multiple_elements[index].return_value,
return_value);
zassert_true((written == multiple_elements[index].sent_bytes),
"Bytes written missmatch written is %d but expected is %d index = %d\n",
"Bytes written mismatch written is %d but expected is %d index = %d\n",
written,
multiple_elements[index].sent_bytes, index);
if (return_value != RETURN_SUCCESS) {
@ -272,12 +272,12 @@ void pipe_get_multiple(void *p1, void *p2, void *p3)
zassert_true((return_value ==
multiple_elements[index].return_value),
"Return value of k_pipe_get missmatch at index = %d expected =%d received = %d\n",
"Return value of k_pipe_get mismatch at index = %d expected =%d received = %d\n",
index, multiple_elements[index].return_value,
return_value);
zassert_true((read == multiple_elements[index].sent_bytes),
"Bytes read missmatch read is %d but expected is %d index = %d\n",
"Bytes read mismatch read is %d but expected is %d index = %d\n",
read, multiple_elements[index].sent_bytes, index);
zassert_true(rx_buffer_check(rx_buffer, read) == read,
@ -591,12 +591,12 @@ void pipe_put_forever_timeout(void)
zassert_true((return_value ==
wait_elements[index].return_value),
"Return value of k_pipe_put missmatch at index = %d expected =%d received = %d\n",
"Return value of k_pipe_put mismatch at index = %d expected =%d received = %d\n",
index, wait_elements[index].return_value,
return_value);
zassert_true((written == wait_elements[index].sent_bytes),
"Bytes written missmatch written is %d but expected is %d index = %d\n",
"Bytes written mismatch written is %d but expected is %d index = %d\n",
written, wait_elements[index].sent_bytes, index);
}
@ -625,12 +625,12 @@ void pipe_get_forever_timeout(void *p1, void *p2, void *p3)
zassert_true((return_value ==
wait_elements[index].return_value),
"Return value of k_pipe_get missmatch at index = %d expected =%d received = %d\n",
"Return value of k_pipe_get mismatch at index = %d expected =%d received = %d\n",
index, wait_elements[index].return_value,
return_value);
zassert_true((read == wait_elements[index].sent_bytes),
"Bytes read missmatch read is %d but expected is %d index = %d\n",
"Bytes read mismatch read is %d but expected is %d index = %d\n",
read, wait_elements[index].sent_bytes, index);
@ -659,12 +659,12 @@ void pipe_put_get_timeout(void)
zassert_true((return_value ==
timeout_elements[index].return_value),
"Return value of k_pipe_get missmatch at index = %d expected =%d received = %d\n",
"Return value of k_pipe_get mismatch at index = %d expected =%d received = %d\n",
index, timeout_elements[index].return_value,
return_value);
zassert_true((read == timeout_elements[index].sent_bytes),
"Bytes read missmatch read is %d but expected is %d index = %d\n",
"Bytes read mismatch read is %d but expected is %d index = %d\n",
read, timeout_elements[index].sent_bytes, index);

View file

@ -143,7 +143,7 @@ void test_pipe_avail_w_lt_r(void)
* @ref k_pipe.bytes_used is zero.
*
* In this case, @ref k_pipe.bytes_used is relevant because the read and
* write indeces are equal.
* write indices are equal.
*
* r
* w
@ -186,7 +186,7 @@ void test_pipe_avail_r_eq_w_empty(void)
* @ref k_pipe.bytes_used is equal to @ref k_pipe.size.
*
* In this case, @ref k_pipe.bytes_used is relevant because the read and
* write indeces are equal.
* write indices are equal.
*
* r
* w

View file

@ -714,7 +714,7 @@ static void threadstate(void *p1, void *p2, void *p3)
* - manipulating thread state to consider case where no polling thread
* is available during event signalling.
* - defined a signal poll as waitable events in poll and
* verify the result after siganl raised
* verify the result after signal raised
*
* @ingroup kernel_poll_tests
*

View file

@ -44,7 +44,7 @@ static void tThread_entry(void *p1, void *p2, void *p3)
/**
* @brief Test k_queue_append_list() failure scenario
*
* @details Accroding to the API k_queue_append_list to
* @details According to the API k_queue_append_list to
* design some error condition to verify error branch of
* the API.
* 1. Verify that the list's head is empty.

View file

@ -169,7 +169,7 @@ void unqueue_worker(void *p1, void *p2, void *p3)
}
/**
* @brief Validate the behavior of dealine_set when the thread is not queued
* @brief Validate the behavior of deadline_set when the thread is not queued
*
* @details Create a bunch of threads with scheduling delay which make the
* thread in unqueued state. The k_thread_deadline_set() call should not make

View file

@ -214,7 +214,7 @@ void validate_wakeup(int src, int target, k_tid_t last_thread)
zassert_false(!preempted && target_wins,
"higher priority thread should have preempted");
/* The scheudler implements a 'first added to
/* The scheduler implements a 'first added to
* queue' policy for threads within a single
* priority, so the last thread woken up (the
* target) must never run before the source
@ -227,7 +227,7 @@ void validate_wakeup(int src, int target, k_tid_t last_thread)
* policy OR the opposite ("run newly woken
* threads first"), and long term we may want
* to revisit this particular check and maybe
* make the poilicy configurable.
* make the policy configurable.
*/
zassert_false(preempted && tie,
"tied priority should not preempt");

View file

@ -37,7 +37,7 @@ static void sem_thread_give_uninit(void *p1, void *p2, void *p3)
{
ztest_set_fault_valid(true);
/* use sem without initialse */
/* use sem without initialise */
k_sem_give(&uninit_sem);
ztest_test_fail();
@ -70,7 +70,7 @@ static void thread_high_prio_sem_take(void *p1, void *p2, void *p3)
*
* @details Using semaphore with some situations
* - Use a uninitialized semaphore
* - Use semphore normally
* - Use semaphore normally
* - Use semaphore with different priority threads
*
* @ingroup kernel_sys_sem_tests

View file

@ -498,8 +498,8 @@ static void thread_get_cpu_entry(void *p1, void *p2, void *p3)
* Test Objective:
* - To verify architecture layer provides a mechanism to return a pointer to the
* current kernel CPU record of the running CPU.
* We call arch_curr_cpu() and get it's member, both in main and spwaned thread
* speratively, and compare them. They shall be different in SMP enviornment.
* We call arch_curr_cpu() and get its member, both in main and spawned thread
* separately, and compare them. They shall be different in SMP environment.
*
* Testing techniques:
* - Interface testing, function and block box testing,
@ -756,7 +756,7 @@ static void t2_mutex_lock(void *p1, void *p2, void *p3)
}
/**
* @brief Test scenairo that a thread release the global lock
* @brief Test scenario that a thread release the global lock
*
* @ingroup kernel_smp_tests
*
@ -927,11 +927,11 @@ static int run_concurrency(int type, void *func)
* @ingroup kernel_smp_tests
*
* @details Validate the global lock and unlock API of SMP are thread-safe.
* We make 3 thread to increase the global count in differenet cpu and
* We make 3 thread to increase the global count in different cpu and
* they both do locking then unlocking for LOOP_COUNT times. It shall be no
* deadlock happened and total global count shall be 3 * LOOP COUNT.
*
* We show the 4 kinds of scenairo:
* We show the 4 kinds of scenario:
* - No any lock used
* - Use global irq lock
* - Use semaphore

View file

@ -295,7 +295,7 @@ void thread_entry_wait(void *p1, void *p2, void *p3)
/**
* @brief Test that the stack pop can be waited
* if no item availablle
* if no item available
*
* @details Create and initialize a new stack
* Set two timeout parameters to indicate
@ -325,9 +325,9 @@ void test_stack_pop_can_wait(void)
k_stack_pop(&stack3, &rx_data[i], K_FOREVER);
}
zassert_true(rx_data[2] == tx_data[2], "wait foreve and pop failed\n");
zassert_true(rx_data[2] == tx_data[2], "wait forever and pop failed\n");
k_stack_pop(&stack3, &rx_data[3], K_MSEC(50));
zassert_true(rx_data[3] == tx_data[3], "Wait maxmum time pop failed\n");
zassert_true(rx_data[3] == tx_data[3], "Wait maximum time pop failed\n");
/* Clear the spawn thread to avoid side effect */
k_thread_abort(tid);
/*free the buffer allocated*/

View file

@ -85,7 +85,7 @@ void test_stack_push_full(void)
for (int i = 0; i < STACK_LEN; i++) {
zassert_true(k_stack_push(&stack, tx_data[i]) == 0, "push data into stack failed");
}
/* Verify that push a data in the full stack, a nagetive value will be met */
/* Verify that push a data in the full stack, a negative value will be met */
zassert_true(k_stack_push(&stack, data_tmp) == -ENOMEM, "push data successful");
}

View file

@ -89,7 +89,7 @@ static void customdata_entry(void *p1, void *p2, void *p3)
zassert_is_null(k_thread_custom_data_get(), NULL);
while (1) {
k_thread_custom_data_set((void *)data);
/* relinguish cpu for a while */
/* relinquish cpu for a while */
k_msleep(50);
/** TESTPOINT: custom data comparison */
zassert_equal(data, (long)k_thread_custom_data_get(), NULL);
@ -491,7 +491,7 @@ void test_thread_join_deadlock(void)
/*
* entry for a delayed thread, do nothing. After the thread is created,
* just check how many ticks expires and how many ticks remain before
* the trhead start
* the thread start
*/
static void user_start_thread(void *p1, void *p2, void *p3)
{
@ -546,7 +546,7 @@ static void foreach_callback(const struct k_thread *thread, void *user_data)
((k_thread_runtime_stats_t *)user_data)->execution_cycles +=
stats.execution_cycles;
}
/* This case accumulates every threath's execution_cycles first, then
/* This case accumulates every thread's execution_cycles first, then
* get the total execution_cycles from a global
* k_thread_runtime_stats_t to see that all time is reflected in the
* total.
@ -620,10 +620,10 @@ void test_k_busy_wait_user(void)
#define INT_ARRAY_SIZE 128
int large_stack(size_t *space)
{
/* use "volatile" to protect this varaible from being optimized out */
/* use "volatile" to protect this variable from being optimized out */
volatile int a[INT_ARRAY_SIZE];
/* to avoid unused varaible error */
/* to avoid unused variable error */
a[0] = 1;
return k_thread_stack_space_get(k_current_get(), space);
@ -635,7 +635,7 @@ int small_stack(size_t *space)
}
/* test k_thread_stack_sapce_get(), unused stack space in large_stack_space()
* is samller than that in small_stack() because the former function has a
* is smaller than that in small_stack() because the former function has a
* large local variable
*/
void test_k_thread_stack_space_get_user(void)

View file

@ -180,7 +180,7 @@ void test_k_thread_foreach_unlocked(void)
/**
* @brief Test k_thread_foreach API with null callback
*
* @details Call k_thread_foreach() with null callback will triger __ASSERT()
* @details Call k_thread_foreach() with null callback will trigger __ASSERT()
* and this test thread will be aborted by z_fatal_error()
* @see k_thread_foreach()
* @ingroup kernel_thread_tests
@ -194,7 +194,7 @@ void test_k_thread_foreach_null_cb(void)
/**
* @brief Test k_thread_foreach_unlocked API with null callback
*
* @details Call k_thread_foreach_unlocked() with null callback will triger
* @details Call k_thread_foreach_unlocked() with null callback will trigger
* __ASSERT() and this test thread will be aborted by z_fatal_error()
*
* @see k_thread_foreach_unlocked()
@ -210,7 +210,7 @@ void test_k_thread_foreach_unlocked_null_cb(void)
* @brief Test k_thread_state_str API with null callback
*
* @details It's impossible to sched a thread step by step manually to
* experence each state from _THREAD_PRESTART to _THREAD_DEAD. To cover each
* experience each state from _THREAD_PRESTART to _THREAD_DEAD. To cover each
* line of function k_thread_state_str(), set thread_state of tdata1 and check
* the string this function returns
*

View file

@ -41,7 +41,7 @@ static void tThread_entry_negative(void *p1, void *p2, void *p3)
TC_PRINT("current case is %d\n", choice);
/* Set up the fault or assert are expected before we call
* the target tested funciton.
* the target tested function.
*/
switch (choice) {
case THREAD_START:

View file

@ -4,7 +4,7 @@
# complete these tests almost instantaneously because of qemu timer
# quirks ("time warp") even though the test reports that it completes
# in e.g. 14 s. We can take advantage of that for fast tests on each PR
# but we also want to excercise this code path during daily builds or
# but we also want to exercise this code path during daily builds or
# otherwise when users specify "twister --enable-slow".
#
# As other platforms are added with varying timer frequencies, increase

View file

@ -197,7 +197,7 @@ void test_timer_duration_period(void)
k_timer_start(&duration_timer, K_FOREVER, K_MSEC(PERIOD));
TIMER_ASSERT(tdata.stop_cnt == 1, &duration_timer);
/* cleanup environemtn */
/* cleanup environment */
k_timer_stop(&duration_timer);
}
@ -267,7 +267,7 @@ void test_timer_period_0(void)
&& (tdata.expire_cnt == 0)), &period0_timer);
TIMER_ASSERT(tdata.stop_cnt == 0, &period0_timer);
/* cleanup environemtn */
/* cleanup environment */
k_timer_stop(&period0_timer);
}
@ -306,7 +306,7 @@ void test_timer_period_k_forever(void)
&period0_timer);
TIMER_ASSERT(tdata.stop_cnt == 0, &period0_timer);
/* cleanup environemtn */
/* cleanup environment */
k_timer_stop(&period0_timer);
}
@ -406,7 +406,7 @@ void test_timer_periodicity(void)
*
* Please note, that expected firing time is not the
* one requested, as the kernel uses the ticks to manage
* time. The actual perioid will be equal to [tick time]
* time. The actual period will be equal to [tick time]
* multiplied by k_ms_to_ticks_ceil32(PERIOD).
*
* In the case of inexact conversion the delta will
@ -614,7 +614,7 @@ static void user_data_timer_handler(struct k_timer *timer)
* Validate user-specific data associated with timer
*
* It creates prototype of K_TIMER_DEFINE and starts the timer using
* k_timer_start() with specific initial duration, alongwith associated
* k_timer_start() with specific initial duration, along with associated
* user data using k_timer_user_data_set and k_timer_user_data_get().
* Stops the timer using k_timer_stop() and checks for correct data
* retrieval after timer completion.
@ -719,7 +719,7 @@ void test_timer_remaining(void)
"tick/busy slew %d larger than test threshold %u",
delta_ticks, slew_ticks);
/* Note +1 tick precision: even though we're calcluating in
/* Note +1 tick precision: even though we're calculating in
* ticks, we're waiting in k_busy_wait(), not for a timer
* interrupt, so it's possible for that to take 1 tick longer
* than expected on systems where the requested microsecond

View file

@ -39,7 +39,7 @@ int test_frequency(void)
* Validates monotonic timer's clock calibration.
*
* It reads the System clocks h/w timer frequency value continuously
* using k_cycle_get_32() to verify its working and correctiveness.
* using k_cycle_get_32() to verify its working and correctness.
* It also checks system tick frequency by checking the delta error
* between generated and system clock provided HW cycles per sec values.
*

View file

@ -2,7 +2,7 @@ tests:
kernel.work.api:
min_flash: 34
tags: kernel
# this patform fails to run due to #40376, all
# this platform fails to run due to #40376, all
# the related CI checks got blocked, so exclude it.
platform_exclude: hifive1
timeout: 70

View file

@ -1,4 +1,4 @@
Title: Test workqeue APIs
Title: Test workqueue APIs
Description:

View file

@ -48,7 +48,7 @@ void test_globals(void)
{
int i;
/* Array should be filled with monotomically incrementing values */
/* Array should be filled with monotonically incrementing values */
for (i = 0; i < XIP_TEST_ARRAY_SZ; i++) {
/**TESTPOINT: Check if the array value is correct*/

Some files were not shown because too many files have changed in this diff Show more