include: misc: util.h: Rename min/max to MIN/MAX
There are issues using lowercase min and max macros when compiling a C++ application with a third-party toolchain such as GNU ARM Embedded when using some STL headers i.e. <chrono>. This is because there are actual C++ functions called min and max defined in some of the STL headers and these macros interfere with them. By changing the macros to UPPERCASE, which is consistent with almost all other pre-processor macros this naming conflict is avoided. All files that use these macros have been updated. Signed-off-by: Carlos Stuart <carlosstuart1970@gmail.com>
This commit is contained in:
parent
413ede4b89
commit
75f77db432
106 changed files with 229 additions and 229 deletions
|
@ -36,7 +36,7 @@ void posix_exit(int exit_code)
|
||||||
{
|
{
|
||||||
static int max_exit_code;
|
static int max_exit_code;
|
||||||
|
|
||||||
max_exit_code = max(exit_code, max_exit_code);
|
max_exit_code = MAX(exit_code, max_exit_code);
|
||||||
/*
|
/*
|
||||||
* posix_soc_clean_up may not return if this is called from a SW thread,
|
* posix_soc_clean_up may not return if this is called from a SW thread,
|
||||||
* but instead it would get posix_exit() recalled again
|
* but instead it would get posix_exit() recalled again
|
||||||
|
|
|
@ -126,7 +126,7 @@ void hwtimer_set_real_time_mode(bool new_rt)
|
||||||
|
|
||||||
static void hwtimer_update_timer(void)
|
static void hwtimer_update_timer(void)
|
||||||
{
|
{
|
||||||
hw_timer_timer = min(hw_timer_tick_timer, hw_timer_awake_timer);
|
hw_timer_timer = MIN(hw_timer_tick_timer, hw_timer_awake_timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void host_clock_gettime(struct timespec *tv)
|
static inline void host_clock_gettime(struct timespec *tv)
|
||||||
|
|
|
@ -232,7 +232,7 @@ static size_t h4_discard(struct device *uart, size_t len)
|
||||||
{
|
{
|
||||||
u8_t buf[33];
|
u8_t buf[33];
|
||||||
|
|
||||||
return uart_fifo_read(uart, buf, min(len, sizeof(buf)));
|
return uart_fifo_read(uart, buf, MIN(len, sizeof(buf)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void read_payload(void)
|
static inline void read_payload(void)
|
||||||
|
|
|
@ -58,7 +58,7 @@ static int entropy_sam_get_entropy(struct device *dev, u8_t *buffer,
|
||||||
}
|
}
|
||||||
|
|
||||||
value = trng->TRNG_ODATA;
|
value = trng->TRNG_ODATA;
|
||||||
to_copy = min(length, sizeof(value));
|
to_copy = MIN(length, sizeof(value));
|
||||||
|
|
||||||
memcpy(buffer, &value, to_copy);
|
memcpy(buffer, &value, to_copy);
|
||||||
buffer += to_copy;
|
buffer += to_copy;
|
||||||
|
|
|
@ -35,7 +35,7 @@ static int entropy_native_posix_get_entropy(struct device *dev, u8_t *buffer,
|
||||||
*/
|
*/
|
||||||
long int value = random();
|
long int value = random();
|
||||||
|
|
||||||
size_t to_copy = min(length, sizeof(long int));
|
size_t to_copy = MIN(length, sizeof(long int));
|
||||||
|
|
||||||
memcpy(buffer, &value, to_copy);
|
memcpy(buffer, &value, to_copy);
|
||||||
buffer += to_copy;
|
buffer += to_copy;
|
||||||
|
|
|
@ -185,7 +185,7 @@ static int flash_sam_write(struct device *dev, off_t offset,
|
||||||
|
|
||||||
/* Maximum size without crossing a page */
|
/* Maximum size without crossing a page */
|
||||||
eop_len = -(offset | ~(IFLASH_PAGE_SIZE - 1));
|
eop_len = -(offset | ~(IFLASH_PAGE_SIZE - 1));
|
||||||
write_len = min(len, eop_len);
|
write_len = MIN(len, eop_len);
|
||||||
|
|
||||||
rc = flash_sam_write_page(dev, offset, data8, write_len);
|
rc = flash_sam_write_page(dev, offset, data8, write_len);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
|
|
|
@ -105,7 +105,7 @@ static int flash_nios2_qspi_erase(struct device *dev, off_t offset, size_t len)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* calculate the byte size of data to be written in a sector */
|
/* calculate the byte size of data to be written in a sector */
|
||||||
length_to_erase = min(qspi_dev->sector_size - offset_in_block,
|
length_to_erase = MIN(qspi_dev->sector_size - offset_in_block,
|
||||||
remaining_length);
|
remaining_length);
|
||||||
|
|
||||||
/* Erase sector */
|
/* Erase sector */
|
||||||
|
@ -297,7 +297,7 @@ static int flash_nios2_qspi_write(struct device *dev, off_t offset,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* calculate the byte size of data to be written in a sector */
|
/* calculate the byte size of data to be written in a sector */
|
||||||
length_to_write = min(qspi_dev->sector_size - offset_in_block,
|
length_to_write = MIN(qspi_dev->sector_size - offset_in_block,
|
||||||
remaining_length);
|
remaining_length);
|
||||||
|
|
||||||
rc = flash_nios2_qspi_write_block(dev,
|
rc = flash_nios2_qspi_write_block(dev,
|
||||||
|
|
|
@ -69,8 +69,8 @@ static inline void _i2c_dw_data_ask(struct device *dev)
|
||||||
tx_empty = I2C_DW_FIFO_DEPTH - regs->ic_txflr;
|
tx_empty = I2C_DW_FIFO_DEPTH - regs->ic_txflr;
|
||||||
|
|
||||||
/* Figure out how many bytes we can request */
|
/* Figure out how many bytes we can request */
|
||||||
cnt = min(I2C_DW_FIFO_DEPTH, dw->request_bytes);
|
cnt = MIN(I2C_DW_FIFO_DEPTH, dw->request_bytes);
|
||||||
cnt = min(min(tx_empty, rx_empty), cnt);
|
cnt = MIN(MIN(tx_empty, rx_empty), cnt);
|
||||||
|
|
||||||
while (cnt > 0) {
|
while (cnt > 0) {
|
||||||
/* Tell controller to get another byte */
|
/* Tell controller to get another byte */
|
||||||
|
|
|
@ -388,7 +388,7 @@ static int i2c_esp32_read_msg(struct device *dev, u16_t addr,
|
||||||
|
|
||||||
for (; msg.len; cmd = (void *)I2C_COMD0_REG(config->index)) {
|
for (; msg.len; cmd = (void *)I2C_COMD0_REG(config->index)) {
|
||||||
volatile struct i2c_esp32_cmd *wait_cmd = NULL;
|
volatile struct i2c_esp32_cmd *wait_cmd = NULL;
|
||||||
u32_t to_read = min(I2C_ESP32_BUFFER_SIZE, msg.len - 1);
|
u32_t to_read = MIN(I2C_ESP32_BUFFER_SIZE, msg.len - 1);
|
||||||
|
|
||||||
/* Might be the last byte, in which case, `to_read` will
|
/* Might be the last byte, in which case, `to_read` will
|
||||||
* be 0 here. See comment below.
|
* be 0 here. See comment below.
|
||||||
|
@ -464,7 +464,7 @@ static int i2c_esp32_write_msg(struct device *dev, u16_t addr,
|
||||||
cmd = i2c_esp32_write_addr(dev, cmd, &msg, addr);
|
cmd = i2c_esp32_write_addr(dev, cmd, &msg, addr);
|
||||||
|
|
||||||
for (; msg.len; cmd = (void *)I2C_COMD0_REG(config->index)) {
|
for (; msg.len; cmd = (void *)I2C_COMD0_REG(config->index)) {
|
||||||
u32_t to_send = min(I2C_ESP32_BUFFER_SIZE, msg.len);
|
u32_t to_send = MIN(I2C_ESP32_BUFFER_SIZE, msg.len);
|
||||||
u32_t i;
|
u32_t i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,7 @@ static int spi_sam_configure(struct device *dev,
|
||||||
|
|
||||||
/* Use the requested or next higest possible frequency */
|
/* Use the requested or next higest possible frequency */
|
||||||
div = SOC_ATMEL_SAM_MCK_FREQ_HZ / config->frequency;
|
div = SOC_ATMEL_SAM_MCK_FREQ_HZ / config->frequency;
|
||||||
div = max(1, min(UINT8_MAX, div));
|
div = MAX(1, MIN(UINT8_MAX, div));
|
||||||
spi_csr |= SPI_CSR_SCBR(div);
|
spi_csr |= SPI_CSR_SCBR(div);
|
||||||
|
|
||||||
regs->SPI_CR = SPI_CR_SPIDIS; /* Disable SPI */
|
regs->SPI_CR = SPI_CR_SPIDIS; /* Disable SPI */
|
||||||
|
|
|
@ -95,7 +95,7 @@ static int spi_sam0_configure(struct device *dev,
|
||||||
|
|
||||||
/* Use the requested or next higest possible frequency */
|
/* Use the requested or next higest possible frequency */
|
||||||
div = (SOC_ATMEL_SAM0_GCLK0_FREQ_HZ / config->frequency) / 2 - 1;
|
div = (SOC_ATMEL_SAM0_GCLK0_FREQ_HZ / config->frequency) / 2 - 1;
|
||||||
div = max(0, min(UINT8_MAX, div));
|
div = MAX(0, MIN(UINT8_MAX, div));
|
||||||
|
|
||||||
/* Update the configuration only if it has changed */
|
/* Update the configuration only if it has changed */
|
||||||
if (regs->CTRLA.reg != ctrla.reg || regs->CTRLB.reg != ctrlb.reg ||
|
if (regs->CTRLA.reg != ctrla.reg || regs->CTRLB.reg != ctrlb.reg ||
|
||||||
|
|
|
@ -194,7 +194,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
|
||||||
#if defined(CONFIG_TICKLESS_KERNEL)
|
#if defined(CONFIG_TICKLESS_KERNEL)
|
||||||
u32_t delay;
|
u32_t delay;
|
||||||
|
|
||||||
ticks = min(MAX_TICKS, max(ticks - 1, 0));
|
ticks = MIN(MAX_TICKS, MAX(ticks - 1, 0));
|
||||||
|
|
||||||
/* Desired delay in the future */
|
/* Desired delay in the future */
|
||||||
delay = (ticks == 0) ? MIN_DELAY : ticks * CYC_PER_TICK;
|
delay = (ticks == 0) ? MIN_DELAY : ticks * CYC_PER_TICK;
|
||||||
|
|
|
@ -93,7 +93,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
|
||||||
#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_QEMU_TICKLESS_WORKAROUND)
|
#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_QEMU_TICKLESS_WORKAROUND)
|
||||||
u32_t delay;
|
u32_t delay;
|
||||||
|
|
||||||
ticks = min(MAX_TICKS, max(ticks - 1, 0));
|
ticks = MIN(MAX_TICKS, MAX(ticks - 1, 0));
|
||||||
|
|
||||||
/* Desired delay in the future */
|
/* Desired delay in the future */
|
||||||
delay = (ticks == 0) ? MIN_DELAY : ticks * CYC_PER_TICK;
|
delay = (ticks == 0) ? MIN_DELAY : ticks * CYC_PER_TICK;
|
||||||
|
|
|
@ -109,7 +109,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
|
||||||
}
|
}
|
||||||
|
|
||||||
ticks = ticks == K_FOREVER ? max_ticks : ticks;
|
ticks = ticks == K_FOREVER ? max_ticks : ticks;
|
||||||
ticks = max(min(ticks - 1, (s32_t)max_ticks), 0);
|
ticks = MAX(MIN(ticks - 1, (s32_t)max_ticks), 0);
|
||||||
|
|
||||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||||
u32_t now = MAIN_COUNTER_REG, cyc;
|
u32_t now = MAIN_COUNTER_REG, cyc;
|
||||||
|
|
|
@ -128,7 +128,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
|
||||||
|
|
||||||
#ifdef CONFIG_TICKLESS_KERNEL
|
#ifdef CONFIG_TICKLESS_KERNEL
|
||||||
ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
|
ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
|
||||||
ticks = max(min(ticks - 1, (s32_t)MAX_TICKS), 0);
|
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the requested delay in tick-aligned cycles. Increase
|
* Get the requested delay in tick-aligned cycles. Increase
|
||||||
|
@ -136,7 +136,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
|
||||||
* cycles elapsed since the last tick. Cap at the maximum
|
* cycles elapsed since the last tick. Cap at the maximum
|
||||||
* tick-aligned delta.
|
* tick-aligned delta.
|
||||||
*/
|
*/
|
||||||
u32_t cyc = min((1 + ticks) * CYC_PER_TICK, MAX_DELAY);
|
u32_t cyc = MIN((1 + ticks) * CYC_PER_TICK, MAX_DELAY);
|
||||||
|
|
||||||
u32_t key = irq_lock();
|
u32_t key = irq_lock();
|
||||||
u32_t d = counter_sub(counter(), last_count);
|
u32_t d = counter_sub(counter(), last_count);
|
||||||
|
|
|
@ -95,7 +95,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
|
||||||
}
|
}
|
||||||
|
|
||||||
ticks = ticks == K_FOREVER ? MAX_TICKS : ticks;
|
ticks = ticks == K_FOREVER ? MAX_TICKS : ticks;
|
||||||
ticks = max(min(ticks - 1, (s32_t)MAX_TICKS), 0);
|
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
|
||||||
|
|
||||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||||
u64_t now = mtime();
|
u64_t now = mtime();
|
||||||
|
|
|
@ -205,7 +205,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
|
||||||
#ifdef CONFIG_TICKLESS_KERNEL
|
#ifdef CONFIG_TICKLESS_KERNEL
|
||||||
|
|
||||||
ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
|
ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
|
||||||
ticks = max(min(ticks - 1, (s32_t) MAX_TICKS), 0);
|
ticks = MAX(MIN(ticks - 1, (s32_t) MAX_TICKS), 0);
|
||||||
|
|
||||||
/* Compute number of RTC cycles until the next timeout. */
|
/* Compute number of RTC cycles until the next timeout. */
|
||||||
u32_t count = rtc_count();
|
u32_t count = rtc_count();
|
||||||
|
|
|
@ -82,7 +82,7 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
|
||||||
|
|
||||||
#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_QEMU_TICKLESS_WORKAROUND)
|
#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_QEMU_TICKLESS_WORKAROUND)
|
||||||
ticks = ticks == K_FOREVER ? MAX_TICKS : ticks;
|
ticks = ticks == K_FOREVER ? MAX_TICKS : ticks;
|
||||||
ticks = max(min(ticks - 1, (s32_t)MAX_TICKS), 0);
|
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
|
||||||
|
|
||||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||||
u32_t curr = ccount(), cyc;
|
u32_t curr = ccount(), cyc;
|
||||||
|
|
|
@ -1674,7 +1674,7 @@ int usb_dc_ep_read_wait(u8_t ep, u8_t *data, u32_t max_data_len,
|
||||||
|
|
||||||
k_mutex_lock(&ctx->drv_lock, K_FOREVER);
|
k_mutex_lock(&ctx->drv_lock, K_FOREVER);
|
||||||
|
|
||||||
bytes_to_copy = min(max_data_len, ep_ctx->buf.len);
|
bytes_to_copy = MIN(max_data_len, ep_ctx->buf.len);
|
||||||
|
|
||||||
if (!data && !max_data_len) {
|
if (!data && !max_data_len) {
|
||||||
if (read_bytes) {
|
if (read_bytes) {
|
||||||
|
|
|
@ -475,7 +475,7 @@ int usb_dc_ep_configure(const struct usb_dc_ep_cfg_data *const cfg)
|
||||||
* Map the endpoint size to the buffer size. Only power of 2 buffer
|
* Map the endpoint size to the buffer size. Only power of 2 buffer
|
||||||
* sizes between 8 and 1024 are possible, get the next power of 2.
|
* sizes between 8 and 1024 are possible, get the next power of 2.
|
||||||
*/
|
*/
|
||||||
log2ceil_mps = 32 - __builtin_clz((max(cfg->ep_mps, 8) << 1) - 1) - 1;
|
log2ceil_mps = 32 - __builtin_clz((MAX(cfg->ep_mps, 8) << 1) - 1) - 1;
|
||||||
regval |= USBHS_DEVEPTCFG_EPSIZE(log2ceil_mps - 3);
|
regval |= USBHS_DEVEPTCFG_EPSIZE(log2ceil_mps - 3);
|
||||||
dev_data.ep_data[ep_idx].mps = cfg->ep_mps;
|
dev_data.ep_data[ep_idx].mps = cfg->ep_mps;
|
||||||
|
|
||||||
|
@ -695,7 +695,7 @@ int usb_dc_ep_write(u8_t ep, const u8_t *data, u32_t data_len, u32_t *ret_bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write the data to the FIFO */
|
/* Write the data to the FIFO */
|
||||||
packet_len = min(data_len, dev_data.ep_data[ep_idx].mps);
|
packet_len = MIN(data_len, dev_data.ep_data[ep_idx].mps);
|
||||||
for (int i = 0; i < packet_len; i++) {
|
for (int i = 0; i < packet_len; i++) {
|
||||||
usb_dc_ep_fifo_put(ep_idx, data[i]);
|
usb_dc_ep_fifo_put(ep_idx, data[i]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -490,7 +490,7 @@ int usb_dc_ep_read_ex(u8_t ep, u8_t *buf, u32_t max_data_len,
|
||||||
}
|
}
|
||||||
|
|
||||||
remain = bytes - data->out_at;
|
remain = bytes - data->out_at;
|
||||||
take = min(max_data_len, remain);
|
take = MIN(max_data_len, remain);
|
||||||
memcpy(buf, (u8_t *)addr + data->out_at, take);
|
memcpy(buf, (u8_t *)addr + data->out_at, take);
|
||||||
|
|
||||||
if (read_bytes != NULL) {
|
if (read_bytes != NULL) {
|
||||||
|
|
|
@ -797,7 +797,7 @@ int usb_dc_ep_read_wait(u8_t ep, u8_t *data, u32_t max_data_len,
|
||||||
* previously stored in the buffer.
|
* previously stored in the buffer.
|
||||||
*/
|
*/
|
||||||
if (data) {
|
if (data) {
|
||||||
read_count = min(read_count, max_data_len);
|
read_count = MIN(read_count, max_data_len);
|
||||||
memcpy(data, usb_dc_stm32_state.ep_buf[EP_IDX(ep)] +
|
memcpy(data, usb_dc_stm32_state.ep_buf[EP_IDX(ep)] +
|
||||||
ep_state->read_offset, read_count);
|
ep_state->read_offset, read_count);
|
||||||
ep_state->read_count -= read_count;
|
ep_state->read_count -= read_count;
|
||||||
|
|
|
@ -159,7 +159,7 @@ data:
|
||||||
}
|
}
|
||||||
|
|
||||||
while (eswifi_spi_cmddata_ready(spi) && to_read) {
|
while (eswifi_spi_cmddata_ready(spi) && to_read) {
|
||||||
to_read = min(rlen - offset, to_read);
|
to_read = MIN(rlen - offset, to_read);
|
||||||
memset(rsp + offset, 0, to_read);
|
memset(rsp + offset, 0, to_read);
|
||||||
eswifi_spi_read(eswifi, rsp + offset, to_read);
|
eswifi_spi_read(eswifi, rsp + offset, to_read);
|
||||||
offset += to_read;
|
offset += to_read;
|
||||||
|
|
|
@ -121,7 +121,7 @@ zephyr_log_mgmt_walk_cb(struct mdlog *log, struct mdlog_offset *log_offset,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
read_len = min(len - sizeof ueh, LOG_MGMT_BODY_LEN - sizeof ueh);
|
read_len = MIN(len - sizeof ueh, LOG_MGMT_BODY_LEN - sizeof ueh);
|
||||||
rc = mdlog_read(log, desciptor, zephyr_log_mgmt_walk_arg->body, sizeof ueh,
|
rc = mdlog_read(log, desciptor, zephyr_log_mgmt_walk_arg->body, sizeof ueh,
|
||||||
read_len);
|
read_len);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
|
|
|
@ -68,7 +68,7 @@ extern "C" {
|
||||||
#define STACK_GUARD_SIZE 0
|
#define STACK_GUARD_SIZE 0
|
||||||
#endif /* CONFIG_MPU_STACK_GUARD */
|
#endif /* CONFIG_MPU_STACK_GUARD */
|
||||||
|
|
||||||
#define STACK_SIZE_ALIGN(x) max(STACK_ALIGN, x)
|
#define STACK_SIZE_ALIGN(x) MAX(STACK_ALIGN, x)
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -91,7 +91,7 @@ extern "C" {
|
||||||
|
|
||||||
#define _ARCH_THREAD_STACK_LEN(size) \
|
#define _ARCH_THREAD_STACK_LEN(size) \
|
||||||
(POW2_CEIL(STACK_SIZE_ALIGN(size)) + \
|
(POW2_CEIL(STACK_SIZE_ALIGN(size)) + \
|
||||||
max(POW2_CEIL(STACK_SIZE_ALIGN(size)), \
|
MAX(POW2_CEIL(STACK_SIZE_ALIGN(size)), \
|
||||||
POW2_CEIL(STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE)))
|
POW2_CEIL(STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE)))
|
||||||
|
|
||||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||||
|
|
|
@ -109,7 +109,7 @@ extern "C" {
|
||||||
#if defined(CONFIG_USERSPACE)
|
#if defined(CONFIG_USERSPACE)
|
||||||
#define STACK_ALIGN CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
|
#define STACK_ALIGN CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
|
||||||
#else
|
#else
|
||||||
#define STACK_ALIGN max(STACK_ALIGN_SIZE, MPU_GUARD_ALIGN_AND_SIZE)
|
#define STACK_ALIGN MAX(STACK_ALIGN_SIZE, MPU_GUARD_ALIGN_AND_SIZE)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -613,7 +613,7 @@ extern struct task_state_segment _main_tss;
|
||||||
|
|
||||||
#define _ARCH_THREAD_STACK_LEN(size) \
|
#define _ARCH_THREAD_STACK_LEN(size) \
|
||||||
(ROUND_UP((size), \
|
(ROUND_UP((size), \
|
||||||
max(_STACK_BASE_ALIGN, _STACK_SIZE_ALIGN)) + \
|
MAX(_STACK_BASE_ALIGN, _STACK_SIZE_ALIGN)) + \
|
||||||
_STACK_GUARD_SIZE)
|
_STACK_GUARD_SIZE)
|
||||||
|
|
||||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||||
|
|
|
@ -86,12 +86,12 @@ constexpr size_t ARRAY_SIZE(T(&)[N]) { return N; }
|
||||||
#define INLINE
|
#define INLINE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef max
|
#ifndef MAX
|
||||||
#define max(a, b) (((a) > (b)) ? (a) : (b))
|
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef min
|
#ifndef MIN
|
||||||
#define min(a, b) (((a) < (b)) ? (a) : (b))
|
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline int is_power_of_two(unsigned int x)
|
static inline int is_power_of_two(unsigned int x)
|
||||||
|
|
|
@ -95,7 +95,7 @@ extern "C" {
|
||||||
* to zero so LLDP Rx agents can invalidate the entry related to this node.
|
* to zero so LLDP Rx agents can invalidate the entry related to this node.
|
||||||
*/
|
*/
|
||||||
#define NET_LLDP_TTL \
|
#define NET_LLDP_TTL \
|
||||||
min((CONFIG_NET_LLDP_TX_INTERVAL * CONFIG_NET_LLDP_TX_HOLD) + 1, 65535)
|
MIN((CONFIG_NET_LLDP_TX_INTERVAL * CONFIG_NET_LLDP_TX_HOLD) + 1, 65535)
|
||||||
|
|
||||||
|
|
||||||
struct net_if;
|
struct net_if;
|
||||||
|
|
|
@ -197,7 +197,7 @@ void k_pipe_cleanup(struct k_pipe *pipe)
|
||||||
static size_t pipe_xfer(unsigned char *dest, size_t dest_size,
|
static size_t pipe_xfer(unsigned char *dest, size_t dest_size,
|
||||||
const unsigned char *src, size_t src_size)
|
const unsigned char *src, size_t src_size)
|
||||||
{
|
{
|
||||||
size_t num_bytes = min(dest_size, src_size);
|
size_t num_bytes = MIN(dest_size, src_size);
|
||||||
const unsigned char *end = src + num_bytes;
|
const unsigned char *end = src + num_bytes;
|
||||||
|
|
||||||
while (src != end) {
|
while (src != end) {
|
||||||
|
@ -227,7 +227,7 @@ static size_t pipe_buffer_put(struct k_pipe *pipe,
|
||||||
|
|
||||||
|
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
run_length = min(pipe->size - pipe->bytes_used,
|
run_length = MIN(pipe->size - pipe->bytes_used,
|
||||||
pipe->size - pipe->write_index);
|
pipe->size - pipe->write_index);
|
||||||
|
|
||||||
bytes_copied = pipe_xfer(pipe->buffer + pipe->write_index,
|
bytes_copied = pipe_xfer(pipe->buffer + pipe->write_index,
|
||||||
|
@ -263,7 +263,7 @@ static size_t pipe_buffer_get(struct k_pipe *pipe,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
run_length = min(pipe->bytes_used,
|
run_length = MIN(pipe->bytes_used,
|
||||||
pipe->size - pipe->read_index);
|
pipe->size - pipe->read_index);
|
||||||
|
|
||||||
bytes_copied = pipe_xfer(dest + num_bytes_read,
|
bytes_copied = pipe_xfer(dest + num_bytes_read,
|
||||||
|
|
|
@ -62,7 +62,7 @@ static s32_t next_timeout(void)
|
||||||
{
|
{
|
||||||
int maxw = can_wait_forever ? K_FOREVER : INT_MAX;
|
int maxw = can_wait_forever ? K_FOREVER : INT_MAX;
|
||||||
struct _timeout *to = first();
|
struct _timeout *to = first();
|
||||||
s32_t ret = to == NULL ? maxw : max(0, to->dticks - elapsed());
|
s32_t ret = to == NULL ? maxw : MAX(0, to->dticks - elapsed());
|
||||||
|
|
||||||
#ifdef CONFIG_TIMESLICING
|
#ifdef CONFIG_TIMESLICING
|
||||||
if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
|
if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
|
||||||
|
@ -76,7 +76,7 @@ void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks)
|
||||||
{
|
{
|
||||||
__ASSERT(!sys_dnode_is_linked(&to->node), "");
|
__ASSERT(!sys_dnode_is_linked(&to->node), "");
|
||||||
to->fn = fn;
|
to->fn = fn;
|
||||||
ticks = max(1, ticks);
|
ticks = MAX(1, ticks);
|
||||||
|
|
||||||
LOCKED(&timeout_lock) {
|
LOCKED(&timeout_lock) {
|
||||||
struct _timeout *t;
|
struct _timeout *t;
|
||||||
|
|
|
@ -114,11 +114,11 @@ u32_t ring_buf_put_claim(struct ring_buf *buf, u8_t **data, u32_t size)
|
||||||
buf->misc.byte_mode.tmp_tail);
|
buf->misc.byte_mode.tmp_tail);
|
||||||
|
|
||||||
/* Limit requested size to available size. */
|
/* Limit requested size to available size. */
|
||||||
size = min(size, space);
|
size = MIN(size, space);
|
||||||
trail_size = buf->size - buf->misc.byte_mode.tmp_tail;
|
trail_size = buf->size - buf->misc.byte_mode.tmp_tail;
|
||||||
|
|
||||||
/* Limit allocated size to trail size. */
|
/* Limit allocated size to trail size. */
|
||||||
allocated = min(trail_size, size);
|
allocated = MIN(trail_size, size);
|
||||||
|
|
||||||
*data = &buf->buf.buf8[buf->misc.byte_mode.tmp_tail];
|
*data = &buf->buf.buf8[buf->misc.byte_mode.tmp_tail];
|
||||||
buf->misc.byte_mode.tmp_tail =
|
buf->misc.byte_mode.tmp_tail =
|
||||||
|
@ -169,10 +169,10 @@ u32_t ring_buf_get_claim(struct ring_buf *buf, u8_t **data, u32_t size)
|
||||||
trail_size = buf->size - buf->misc.byte_mode.tmp_head;
|
trail_size = buf->size - buf->misc.byte_mode.tmp_head;
|
||||||
|
|
||||||
/* Limit requested size to available size. */
|
/* Limit requested size to available size. */
|
||||||
granted_size = min(size, space);
|
granted_size = MIN(size, space);
|
||||||
|
|
||||||
/* Limit allocated size to trail size. */
|
/* Limit allocated size to trail size. */
|
||||||
granted_size = min(trail_size, granted_size);
|
granted_size = MIN(trail_size, granted_size);
|
||||||
|
|
||||||
*data = &buf->buf.buf8[buf->misc.byte_mode.tmp_head];
|
*data = &buf->buf.buf8[buf->misc.byte_mode.tmp_head];
|
||||||
buf->misc.byte_mode.tmp_head =
|
buf->misc.byte_mode.tmp_head =
|
||||||
|
|
|
@ -487,7 +487,7 @@ static ssize_t write_adv_data(struct bt_conn *conn,
|
||||||
* controlled by characteristics 4 (Radio Tx Power) and
|
* controlled by characteristics 4 (Radio Tx Power) and
|
||||||
* 5 (Advertised Tx Power).
|
* 5 (Advertised Tx Power).
|
||||||
*/
|
*/
|
||||||
slot->ad[2].data_len = min(slot->ad[2].data_len,
|
slot->ad[2].data_len = MIN(slot->ad[2].data_len,
|
||||||
len + EDS_URL_WRITE_OFFSET);
|
len + EDS_URL_WRITE_OFFSET);
|
||||||
memcpy(&slot->ad[2].data + EDS_URL_WRITE_OFFSET, buf,
|
memcpy(&slot->ad[2].data + EDS_URL_WRITE_OFFSET, buf,
|
||||||
slot->ad[2].data_len - EDS_URL_WRITE_OFFSET);
|
slot->ad[2].data_len - EDS_URL_WRITE_OFFSET);
|
||||||
|
|
|
@ -103,7 +103,7 @@ static size_t h4_discard(struct device *uart, size_t len)
|
||||||
{
|
{
|
||||||
u8_t buf[H4_DISCARD_LEN];
|
u8_t buf[H4_DISCARD_LEN];
|
||||||
|
|
||||||
return uart_fifo_read(uart, buf, min(len, sizeof(buf)));
|
return uart_fifo_read(uart, buf, MIN(len, sizeof(buf)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct net_buf *h4_cmd_recv(int *remaining)
|
static struct net_buf *h4_cmd_recv(int *remaining)
|
||||||
|
|
|
@ -170,7 +170,7 @@ void board_heartbeat(u8_t hops, u16_t feat)
|
||||||
printk("%u hops\n", hops);
|
printk("%u hops\n", hops);
|
||||||
|
|
||||||
if (hops) {
|
if (hops) {
|
||||||
hops = min(hops, ARRAY_SIZE(hops_img));
|
hops = MIN(hops, ARRAY_SIZE(hops_img));
|
||||||
mb_display_image(disp, MB_DISPLAY_MODE_SINGLE, K_SECONDS(2),
|
mb_display_image(disp, MB_DISPLAY_MODE_SINGLE, K_SECONDS(2),
|
||||||
&hops_img[hops - 1], 1);
|
&hops_img[hops - 1], 1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -322,7 +322,7 @@ static void vnd_hello(struct bt_mesh_model *model,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
len = min(buf->len, HELLO_MAX);
|
len = MIN(buf->len, HELLO_MAX);
|
||||||
memcpy(str, buf->data, len);
|
memcpy(str, buf->data, len);
|
||||||
str[len] = '\0';
|
str[len] = '\0';
|
||||||
|
|
||||||
|
@ -417,7 +417,7 @@ static void send_hello(struct k_work *work)
|
||||||
|
|
||||||
bt_mesh_model_msg_init(&msg, OP_VND_HELLO);
|
bt_mesh_model_msg_init(&msg, OP_VND_HELLO);
|
||||||
net_buf_simple_add_mem(&msg, name,
|
net_buf_simple_add_mem(&msg, name,
|
||||||
min(HELLO_MAX, first_name_len(name)));
|
MIN(HELLO_MAX, first_name_len(name)));
|
||||||
|
|
||||||
if (bt_mesh_model_send(&vnd_models[0], &ctx, &msg, NULL, NULL) == 0) {
|
if (bt_mesh_model_send(&vnd_models[0], &ctx, &msg, NULL, NULL) == 0) {
|
||||||
board_show_text("Saying \"hi!\" to everyone", false,
|
board_show_text("Saying \"hi!\" to everyone", false,
|
||||||
|
|
|
@ -85,7 +85,7 @@ static size_t print_line(enum font_size font_size, int row, const char *text,
|
||||||
|
|
||||||
cfb_framebuffer_set_font(epd_dev, font_size);
|
cfb_framebuffer_set_font(epd_dev, font_size);
|
||||||
|
|
||||||
len = min(len, fonts[font_size].columns);
|
len = MIN(len, fonts[font_size].columns);
|
||||||
memcpy(line, text, len);
|
memcpy(line, text, len);
|
||||||
line[len] = '\0';
|
line[len] = '\0';
|
||||||
|
|
||||||
|
|
|
@ -251,7 +251,7 @@ static int copy_pkt_to_buf(struct net_buf *src, u16_t offset,
|
||||||
}
|
}
|
||||||
|
|
||||||
for (copied = 0U; src && n_bytes > 0; offset = 0U) {
|
for (copied = 0U; src && n_bytes > 0; offset = 0U) {
|
||||||
to_copy = min(n_bytes, src->len - offset);
|
to_copy = MIN(n_bytes, src->len - offset);
|
||||||
|
|
||||||
memcpy(dst + copied, (char *)src->data + offset, to_copy);
|
memcpy(dst + copied, (char *)src->data + offset, to_copy);
|
||||||
copied += to_copy;
|
copied += to_copy;
|
||||||
|
|
|
@ -756,10 +756,10 @@ static int large_get(struct coap_resource *resource,
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
size = min(coap_block_size_to_bytes(ctx.block_size),
|
size = MIN(coap_block_size_to_bytes(ctx.block_size),
|
||||||
ctx.total_size - ctx.current);
|
ctx.total_size - ctx.current);
|
||||||
|
|
||||||
memset(payload, 'A', min(size, sizeof(payload)));
|
memset(payload, 'A', MIN(size, sizeof(payload)));
|
||||||
|
|
||||||
r = coap_packet_append_payload(&response, (u8_t *)payload, size);
|
r = coap_packet_append_payload(&response, (u8_t *)payload, size);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
|
|
|
@ -26,7 +26,7 @@ const char *bt_hex_real(const void *buf, size_t len)
|
||||||
const u8_t *b = buf;
|
const u8_t *b = buf;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
len = min(len, (sizeof(str) - 1) / 2);
|
len = MIN(len, (sizeof(str) - 1) / 2);
|
||||||
|
|
||||||
for (i = 0; i < len; i++) {
|
for (i = 0; i < len; i++) {
|
||||||
str[i * 2] = hex[b[i] >> 4];
|
str[i * 2] = hex[b[i] >> 4];
|
||||||
|
|
|
@ -1168,7 +1168,7 @@ static inline u32_t isr_rx_adv(u8_t devmatch_ok, u8_t devmatch_id,
|
||||||
HAL_TICKER_US_TO_TICKS(RADIO_TICKER_XTAL_OFFSET_US);
|
HAL_TICKER_US_TO_TICKS(RADIO_TICKER_XTAL_OFFSET_US);
|
||||||
conn->hdr.ticks_preempt_to_start =
|
conn->hdr.ticks_preempt_to_start =
|
||||||
HAL_TICKER_US_TO_TICKS(RADIO_TICKER_PREEMPT_PART_MIN_US);
|
HAL_TICKER_US_TO_TICKS(RADIO_TICKER_PREEMPT_PART_MIN_US);
|
||||||
ticks_slot_offset = max(conn->hdr.ticks_active_to_start,
|
ticks_slot_offset = MAX(conn->hdr.ticks_active_to_start,
|
||||||
conn->hdr.ticks_xtal_to_start);
|
conn->hdr.ticks_xtal_to_start);
|
||||||
conn_interval_us -=
|
conn_interval_us -=
|
||||||
conn->slave.window_widening_periodic_us;
|
conn->slave.window_widening_periodic_us;
|
||||||
|
@ -1662,7 +1662,7 @@ static inline u32_t isr_rx_scan(u8_t devmatch_ok, u8_t devmatch_id,
|
||||||
conn->hdr.ticks_preempt_to_start = HAL_TICKER_US_TO_TICKS(
|
conn->hdr.ticks_preempt_to_start = HAL_TICKER_US_TO_TICKS(
|
||||||
RADIO_TICKER_PREEMPT_PART_MIN_US);
|
RADIO_TICKER_PREEMPT_PART_MIN_US);
|
||||||
conn->hdr.ticks_slot = _radio.scanner.ticks_conn_slot;
|
conn->hdr.ticks_slot = _radio.scanner.ticks_conn_slot;
|
||||||
ticks_slot_offset = max(conn->hdr.ticks_active_to_start,
|
ticks_slot_offset = MAX(conn->hdr.ticks_active_to_start,
|
||||||
conn->hdr.ticks_xtal_to_start);
|
conn->hdr.ticks_xtal_to_start);
|
||||||
|
|
||||||
/* Stop Scanner */
|
/* Stop Scanner */
|
||||||
|
@ -2296,7 +2296,7 @@ static inline u8_t isr_rx_conn_pkt_ctrl_dle(struct pdu_data *pdu_data_rx,
|
||||||
* peer max_rx_octets
|
* peer max_rx_octets
|
||||||
*/
|
*/
|
||||||
if (lr->max_rx_octets >= PDU_DC_PAYLOAD_SIZE_MIN) {
|
if (lr->max_rx_octets >= PDU_DC_PAYLOAD_SIZE_MIN) {
|
||||||
eff_tx_octets = min(lr->max_rx_octets,
|
eff_tx_octets = MIN(lr->max_rx_octets,
|
||||||
_radio.conn_curr->default_tx_octets);
|
_radio.conn_curr->default_tx_octets);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2304,7 +2304,7 @@ static inline u8_t isr_rx_conn_pkt_ctrl_dle(struct pdu_data *pdu_data_rx,
|
||||||
* peer max_tx_octets
|
* peer max_tx_octets
|
||||||
*/
|
*/
|
||||||
if (lr->max_tx_octets >= PDU_DC_PAYLOAD_SIZE_MIN) {
|
if (lr->max_tx_octets >= PDU_DC_PAYLOAD_SIZE_MIN) {
|
||||||
eff_rx_octets = min(lr->max_tx_octets,
|
eff_rx_octets = MIN(lr->max_tx_octets,
|
||||||
LL_LENGTH_OCTETS_RX_MAX);
|
LL_LENGTH_OCTETS_RX_MAX);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2315,11 +2315,11 @@ static inline u8_t isr_rx_conn_pkt_ctrl_dle(struct pdu_data *pdu_data_rx,
|
||||||
if (lr->max_rx_time >=
|
if (lr->max_rx_time >=
|
||||||
RADIO_PKT_TIME(PDU_DC_PAYLOAD_SIZE_MIN, 0)) {
|
RADIO_PKT_TIME(PDU_DC_PAYLOAD_SIZE_MIN, 0)) {
|
||||||
eff_tx_time =
|
eff_tx_time =
|
||||||
min(lr->max_rx_time,
|
MIN(lr->max_rx_time,
|
||||||
_radio.conn_curr->default_tx_time);
|
_radio.conn_curr->default_tx_time);
|
||||||
#if defined(CONFIG_BT_CTLR_PHY_CODED)
|
#if defined(CONFIG_BT_CTLR_PHY_CODED)
|
||||||
eff_tx_time =
|
eff_tx_time =
|
||||||
max(eff_tx_time,
|
MAX(eff_tx_time,
|
||||||
RADIO_PKT_TIME(PDU_DC_PAYLOAD_SIZE_MIN,
|
RADIO_PKT_TIME(PDU_DC_PAYLOAD_SIZE_MIN,
|
||||||
_radio.conn_curr->phy_tx));
|
_radio.conn_curr->phy_tx));
|
||||||
#endif /* CONFIG_BT_CTLR_PHY_CODED */
|
#endif /* CONFIG_BT_CTLR_PHY_CODED */
|
||||||
|
@ -2331,12 +2331,12 @@ static inline u8_t isr_rx_conn_pkt_ctrl_dle(struct pdu_data *pdu_data_rx,
|
||||||
if (lr->max_tx_time >=
|
if (lr->max_tx_time >=
|
||||||
RADIO_PKT_TIME(PDU_DC_PAYLOAD_SIZE_MIN, 0)) {
|
RADIO_PKT_TIME(PDU_DC_PAYLOAD_SIZE_MIN, 0)) {
|
||||||
eff_rx_time =
|
eff_rx_time =
|
||||||
min(lr->max_tx_time,
|
MIN(lr->max_tx_time,
|
||||||
RADIO_PKT_TIME(LL_LENGTH_OCTETS_RX_MAX,
|
RADIO_PKT_TIME(LL_LENGTH_OCTETS_RX_MAX,
|
||||||
BIT(2)));
|
BIT(2)));
|
||||||
#if defined(CONFIG_BT_CTLR_PHY_CODED)
|
#if defined(CONFIG_BT_CTLR_PHY_CODED)
|
||||||
eff_rx_time =
|
eff_rx_time =
|
||||||
max(eff_rx_time,
|
MAX(eff_rx_time,
|
||||||
RADIO_PKT_TIME(PDU_DC_PAYLOAD_SIZE_MIN,
|
RADIO_PKT_TIME(PDU_DC_PAYLOAD_SIZE_MIN,
|
||||||
_radio.conn_curr->phy_rx));
|
_radio.conn_curr->phy_rx));
|
||||||
#endif /* CONFIG_BT_CTLR_PHY_CODED */
|
#endif /* CONFIG_BT_CTLR_PHY_CODED */
|
||||||
|
@ -5031,7 +5031,7 @@ static void prepare_normal_set(struct shdr *hdr, u8_t ticker_user_id,
|
||||||
if (hdr->ticks_xtal_to_start & XON_BITMASK) {
|
if (hdr->ticks_xtal_to_start & XON_BITMASK) {
|
||||||
u32_t ticker_status;
|
u32_t ticker_status;
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(hdr->ticks_active_to_start,
|
MAX(hdr->ticks_active_to_start,
|
||||||
hdr->ticks_preempt_to_start);
|
hdr->ticks_preempt_to_start);
|
||||||
u32_t ticks_drift_minus = (hdr->ticks_xtal_to_start &
|
u32_t ticks_drift_minus = (hdr->ticks_xtal_to_start &
|
||||||
~XON_BITMASK) -
|
~XON_BITMASK) -
|
||||||
|
@ -5154,10 +5154,10 @@ static void mayfly_xtal_stop_calc(void *params)
|
||||||
|
|
||||||
/* Compensate for current ticker in reduced prepare */
|
/* Compensate for current ticker in reduced prepare */
|
||||||
if (hdr_curr->ticks_xtal_to_start & XON_BITMASK) {
|
if (hdr_curr->ticks_xtal_to_start & XON_BITMASK) {
|
||||||
ticks_slot_abs = max(hdr_curr->ticks_active_to_start,
|
ticks_slot_abs = MAX(hdr_curr->ticks_active_to_start,
|
||||||
hdr_curr->ticks_preempt_to_start);
|
hdr_curr->ticks_preempt_to_start);
|
||||||
} else {
|
} else {
|
||||||
ticks_slot_abs = max(hdr_curr->ticks_active_to_start,
|
ticks_slot_abs = MAX(hdr_curr->ticks_active_to_start,
|
||||||
hdr_curr->ticks_xtal_to_start);
|
hdr_curr->ticks_xtal_to_start);
|
||||||
}
|
}
|
||||||
ticks_slot_abs += hdr_curr->ticks_slot;
|
ticks_slot_abs += hdr_curr->ticks_slot;
|
||||||
|
@ -5167,7 +5167,7 @@ static void mayfly_xtal_stop_calc(void *params)
|
||||||
LL_ASSERT(hdr_next);
|
LL_ASSERT(hdr_next);
|
||||||
|
|
||||||
ticks_prepare_to_start_next =
|
ticks_prepare_to_start_next =
|
||||||
max(hdr_next->ticks_active_to_start,
|
MAX(hdr_next->ticks_active_to_start,
|
||||||
hdr_next->ticks_preempt_to_start);
|
hdr_next->ticks_preempt_to_start);
|
||||||
|
|
||||||
/* Compensate for next ticker in reduced prepare */
|
/* Compensate for next ticker in reduced prepare */
|
||||||
|
@ -5322,7 +5322,7 @@ static void sched_after_mstr_free_slot_get(u8_t user_id,
|
||||||
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
||||||
if (conn->hdr.ticks_xtal_to_start & XON_BITMASK) {
|
if (conn->hdr.ticks_xtal_to_start & XON_BITMASK) {
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(conn->hdr.ticks_active_to_start,
|
MAX(conn->hdr.ticks_active_to_start,
|
||||||
conn->hdr.ticks_preempt_to_start);
|
conn->hdr.ticks_preempt_to_start);
|
||||||
|
|
||||||
ticks_slot_abs_curr =
|
ticks_slot_abs_curr =
|
||||||
|
@ -5335,7 +5335,7 @@ static void sched_after_mstr_free_slot_get(u8_t user_id,
|
||||||
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
||||||
{
|
{
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(conn->hdr.ticks_active_to_start,
|
MAX(conn->hdr.ticks_active_to_start,
|
||||||
conn->hdr.ticks_xtal_to_start);
|
conn->hdr.ticks_xtal_to_start);
|
||||||
|
|
||||||
ticks_slot_abs_curr = ticks_prepare_to_start;
|
ticks_slot_abs_curr = ticks_prepare_to_start;
|
||||||
|
@ -5448,7 +5448,7 @@ static void sched_free_win_offset_calc(struct connection *conn_curr,
|
||||||
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
||||||
if (conn_curr->hdr.ticks_xtal_to_start & XON_BITMASK) {
|
if (conn_curr->hdr.ticks_xtal_to_start & XON_BITMASK) {
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(conn_curr->hdr.ticks_active_to_start,
|
MAX(conn_curr->hdr.ticks_active_to_start,
|
||||||
conn_curr->hdr.ticks_preempt_to_start);
|
conn_curr->hdr.ticks_preempt_to_start);
|
||||||
|
|
||||||
ticks_slot_abs = conn_curr->hdr.ticks_xtal_to_start &
|
ticks_slot_abs = conn_curr->hdr.ticks_xtal_to_start &
|
||||||
|
@ -5458,7 +5458,7 @@ static void sched_free_win_offset_calc(struct connection *conn_curr,
|
||||||
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
||||||
{
|
{
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(conn_curr->hdr.ticks_active_to_start,
|
MAX(conn_curr->hdr.ticks_active_to_start,
|
||||||
conn_curr->hdr.ticks_xtal_to_start);
|
conn_curr->hdr.ticks_xtal_to_start);
|
||||||
|
|
||||||
ticks_slot_abs = ticks_prepare_to_start;
|
ticks_slot_abs = ticks_prepare_to_start;
|
||||||
|
@ -5537,7 +5537,7 @@ static void sched_free_win_offset_calc(struct connection *conn_curr,
|
||||||
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
||||||
if (conn->hdr.ticks_xtal_to_start & XON_BITMASK) {
|
if (conn->hdr.ticks_xtal_to_start & XON_BITMASK) {
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(conn->hdr.ticks_active_to_start,
|
MAX(conn->hdr.ticks_active_to_start,
|
||||||
conn->hdr.ticks_preempt_to_start);
|
conn->hdr.ticks_preempt_to_start);
|
||||||
|
|
||||||
ticks_slot_abs_curr =
|
ticks_slot_abs_curr =
|
||||||
|
@ -5550,7 +5550,7 @@ static void sched_free_win_offset_calc(struct connection *conn_curr,
|
||||||
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
||||||
{
|
{
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(conn->hdr.ticks_active_to_start,
|
MAX(conn->hdr.ticks_active_to_start,
|
||||||
conn->hdr.ticks_xtal_to_start);
|
conn->hdr.ticks_xtal_to_start);
|
||||||
|
|
||||||
ticks_slot_abs_curr = ticks_prepare_to_start;
|
ticks_slot_abs_curr = ticks_prepare_to_start;
|
||||||
|
@ -5807,7 +5807,7 @@ static void event_common_prepare(u32_t ticks_at_expire,
|
||||||
* active to start duration.
|
* active to start duration.
|
||||||
*/
|
*/
|
||||||
if (_ticks_xtal_to_start & XON_BITMASK) {
|
if (_ticks_xtal_to_start & XON_BITMASK) {
|
||||||
_ticks_xtal_to_start = max(_ticks_active_to_start,
|
_ticks_xtal_to_start = MAX(_ticks_active_to_start,
|
||||||
ticks_preempt_to_start);
|
ticks_preempt_to_start);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
||||||
|
@ -5912,7 +5912,7 @@ static void event_common_prepare(u32_t ticks_at_expire,
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
||||||
|
|
||||||
ticks_to_start_new = max(_radio.ticks_active_to_start,
|
ticks_to_start_new = MAX(_radio.ticks_active_to_start,
|
||||||
*ticks_xtal_to_start);
|
*ticks_xtal_to_start);
|
||||||
|
|
||||||
/* drift the primary as required due to active line change */
|
/* drift the primary as required due to active line change */
|
||||||
|
@ -6754,7 +6754,7 @@ static void event_scan_prepare(u32_t ticks_at_expire, u32_t remainder,
|
||||||
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
||||||
if (_radio.scanner.hdr.ticks_xtal_to_start & XON_BITMASK) {
|
if (_radio.scanner.hdr.ticks_xtal_to_start & XON_BITMASK) {
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(_radio.scanner.hdr.ticks_active_to_start,
|
MAX(_radio.scanner.hdr.ticks_active_to_start,
|
||||||
_radio.scanner.hdr.ticks_preempt_to_start);
|
_radio.scanner.hdr.ticks_preempt_to_start);
|
||||||
|
|
||||||
ticks_at_expire_normal -=
|
ticks_at_expire_normal -=
|
||||||
|
@ -6944,7 +6944,7 @@ static inline void event_conn_upd_init(struct connection *conn,
|
||||||
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
||||||
if (conn->hdr.ticks_xtal_to_start & XON_BITMASK) {
|
if (conn->hdr.ticks_xtal_to_start & XON_BITMASK) {
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(conn->hdr.ticks_active_to_start,
|
MAX(conn->hdr.ticks_active_to_start,
|
||||||
conn->hdr.ticks_preempt_to_start);
|
conn->hdr.ticks_preempt_to_start);
|
||||||
|
|
||||||
conn->llcp.conn_upd.ticks_anchor -=
|
conn->llcp.conn_upd.ticks_anchor -=
|
||||||
|
@ -7103,7 +7103,7 @@ static inline u32_t event_conn_upd_prep(struct connection *conn,
|
||||||
/* restore to normal prepare */
|
/* restore to normal prepare */
|
||||||
if (conn->hdr.ticks_xtal_to_start & XON_BITMASK) {
|
if (conn->hdr.ticks_xtal_to_start & XON_BITMASK) {
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(conn->hdr.ticks_active_to_start,
|
MAX(conn->hdr.ticks_active_to_start,
|
||||||
conn->hdr.ticks_preempt_to_start);
|
conn->hdr.ticks_preempt_to_start);
|
||||||
|
|
||||||
conn->hdr.ticks_xtal_to_start &= ~XON_BITMASK;
|
conn->hdr.ticks_xtal_to_start &= ~XON_BITMASK;
|
||||||
|
@ -7128,7 +7128,7 @@ static inline u32_t event_conn_upd_prep(struct connection *conn,
|
||||||
conn->latency_prepare -= (instant_latency - latency);
|
conn->latency_prepare -= (instant_latency - latency);
|
||||||
|
|
||||||
/* calculate the offset, window widening and interval */
|
/* calculate the offset, window widening and interval */
|
||||||
ticks_slot_offset = max(conn->hdr.ticks_active_to_start,
|
ticks_slot_offset = MAX(conn->hdr.ticks_active_to_start,
|
||||||
conn->hdr.ticks_xtal_to_start);
|
conn->hdr.ticks_xtal_to_start);
|
||||||
conn_interval_us = conn->llcp.conn_upd.interval * 1250;
|
conn_interval_us = conn->llcp.conn_upd.interval * 1250;
|
||||||
periodic_us = conn_interval_us;
|
periodic_us = conn_interval_us;
|
||||||
|
@ -7683,7 +7683,7 @@ static inline void event_conn_param_req(struct connection *conn,
|
||||||
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
||||||
if (conn->hdr.ticks_xtal_to_start & XON_BITMASK) {
|
if (conn->hdr.ticks_xtal_to_start & XON_BITMASK) {
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(conn->hdr.ticks_active_to_start,
|
MAX(conn->hdr.ticks_active_to_start,
|
||||||
conn->hdr.ticks_preempt_to_start);
|
conn->hdr.ticks_preempt_to_start);
|
||||||
|
|
||||||
conn->llcp_conn_param.ticks_ref -=
|
conn->llcp_conn_param.ticks_ref -=
|
||||||
|
@ -10685,7 +10685,7 @@ u32_t radio_adv_enable(u16_t interval, u8_t chan_map, u8_t filter_policy,
|
||||||
_radio.advertiser.hdr.ticks_slot = HAL_TICKER_US_TO_TICKS(slot_us);
|
_radio.advertiser.hdr.ticks_slot = HAL_TICKER_US_TO_TICKS(slot_us);
|
||||||
|
|
||||||
ticks_slot_offset =
|
ticks_slot_offset =
|
||||||
max(_radio.advertiser.hdr.ticks_active_to_start,
|
MAX(_radio.advertiser.hdr.ticks_active_to_start,
|
||||||
_radio.advertiser.hdr.ticks_xtal_to_start);
|
_radio.advertiser.hdr.ticks_xtal_to_start);
|
||||||
|
|
||||||
#if !defined(CONFIG_BT_HCI_MESH_EXT)
|
#if !defined(CONFIG_BT_HCI_MESH_EXT)
|
||||||
|
@ -10888,7 +10888,7 @@ u32_t radio_scan_enable(u8_t type, u8_t init_addr_type, u8_t *init_addr,
|
||||||
HAL_TICKER_US_TO_TICKS(RADIO_TICKER_XTAL_OFFSET_US));
|
HAL_TICKER_US_TO_TICKS(RADIO_TICKER_XTAL_OFFSET_US));
|
||||||
}
|
}
|
||||||
|
|
||||||
ticks_slot_offset = max(_radio.scanner.hdr.ticks_active_to_start,
|
ticks_slot_offset = MAX(_radio.scanner.hdr.ticks_active_to_start,
|
||||||
_radio.scanner.hdr.ticks_xtal_to_start);
|
_radio.scanner.hdr.ticks_xtal_to_start);
|
||||||
|
|
||||||
ticks_anchor = ticker_ticks_now_get();
|
ticks_anchor = ticker_ticks_now_get();
|
||||||
|
|
|
@ -319,7 +319,7 @@ struct pdu_data_q_tx {
|
||||||
|
|
||||||
#define LL_MEM_RX_POOL_SZ (MROUND(offsetof(struct radio_pdu_node_rx, \
|
#define LL_MEM_RX_POOL_SZ (MROUND(offsetof(struct radio_pdu_node_rx, \
|
||||||
pdu_data) + \
|
pdu_data) + \
|
||||||
max((PDU_AC_SIZE_MAX + PDU_AC_SIZE_EXTRA), \
|
MAX((PDU_AC_SIZE_MAX + PDU_AC_SIZE_EXTRA), \
|
||||||
(offsetof(struct pdu_data, lldata) + \
|
(offsetof(struct pdu_data, lldata) + \
|
||||||
LL_LENGTH_OCTETS_RX_MAX))) * \
|
LL_LENGTH_OCTETS_RX_MAX))) * \
|
||||||
(RADIO_PACKET_COUNT_RX_MAX + 3))
|
(RADIO_PACKET_COUNT_RX_MAX + 3))
|
||||||
|
|
|
@ -326,11 +326,11 @@ u32_t lll_evt_offset_get(struct evt_hdr *evt)
|
||||||
if (0) {
|
if (0) {
|
||||||
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
||||||
} else if (evt->ticks_xtal_to_start & XON_BITMASK) {
|
} else if (evt->ticks_xtal_to_start & XON_BITMASK) {
|
||||||
return max(evt->ticks_active_to_start,
|
return MAX(evt->ticks_active_to_start,
|
||||||
evt->ticks_preempt_to_start);
|
evt->ticks_preempt_to_start);
|
||||||
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
#endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
|
||||||
} else {
|
} else {
|
||||||
return max(evt->ticks_active_to_start,
|
return MAX(evt->ticks_active_to_start,
|
||||||
evt->ticks_xtal_to_start);
|
evt->ticks_xtal_to_start);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -391,7 +391,7 @@ static int prepare(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb,
|
||||||
/* Calc the preempt timeout */
|
/* Calc the preempt timeout */
|
||||||
evt = HDR_LLL2EVT(prepare_param->param);
|
evt = HDR_LLL2EVT(prepare_param->param);
|
||||||
preempt_anchor = prepare_param->ticks_at_expire;
|
preempt_anchor = prepare_param->ticks_at_expire;
|
||||||
preempt_to = max(evt->ticks_active_to_start,
|
preempt_to = MAX(evt->ticks_active_to_start,
|
||||||
evt->ticks_xtal_to_start) -
|
evt->ticks_xtal_to_start) -
|
||||||
evt->ticks_preempt_to_start;
|
evt->ticks_preempt_to_start;
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,7 @@ static MFIFO_DEFINE(ll_pdu_rx_free, sizeof(void *), LL_PDU_RX_CNT);
|
||||||
|
|
||||||
#define PDU_RX_POOL_SIZE (MROUND(offsetof(struct node_rx_pdu, pdu) + \
|
#define PDU_RX_POOL_SIZE (MROUND(offsetof(struct node_rx_pdu, pdu) + \
|
||||||
sizeof(struct node_rx_ftr) + \
|
sizeof(struct node_rx_ftr) + \
|
||||||
max((PDU_AC_SIZE_MAX + PDU_AC_SIZE_EXTRA), \
|
MAX((PDU_AC_SIZE_MAX + PDU_AC_SIZE_EXTRA), \
|
||||||
(offsetof(struct pdu_data, lldata) + \
|
(offsetof(struct pdu_data, lldata) + \
|
||||||
PDU_RX_OCTETS_MAX))) * RX_CNT)
|
PDU_RX_OCTETS_MAX))) * RX_CNT)
|
||||||
|
|
||||||
|
|
|
@ -715,7 +715,7 @@ u8_t ll_adv_enable(u8_t enable)
|
||||||
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
|
HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
|
||||||
adv->evt.ticks_slot = HAL_TICKER_US_TO_TICKS(slot_us);
|
adv->evt.ticks_slot = HAL_TICKER_US_TO_TICKS(slot_us);
|
||||||
|
|
||||||
ticks_slot_offset = max(adv->evt.ticks_active_to_start,
|
ticks_slot_offset = MAX(adv->evt.ticks_active_to_start,
|
||||||
adv->evt.ticks_xtal_to_start);
|
adv->evt.ticks_xtal_to_start);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
|
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
|
||||||
|
|
|
@ -1452,7 +1452,7 @@ static inline void event_conn_upd_init(struct ll_conn *conn,
|
||||||
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
||||||
if (conn->evt.ticks_xtal_to_start & XON_BITMASK) {
|
if (conn->evt.ticks_xtal_to_start & XON_BITMASK) {
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(conn->evt.ticks_active_to_start,
|
MAX(conn->evt.ticks_active_to_start,
|
||||||
conn->evt.ticks_preempt_to_start);
|
conn->evt.ticks_preempt_to_start);
|
||||||
|
|
||||||
conn->llcp.conn_upd.ticks_anchor -=
|
conn->llcp.conn_upd.ticks_anchor -=
|
||||||
|
@ -1620,7 +1620,7 @@ static inline int event_conn_upd_prep(struct ll_conn *conn,
|
||||||
/* restore to normal prepare */
|
/* restore to normal prepare */
|
||||||
if (conn->evt.ticks_xtal_to_start & XON_BITMASK) {
|
if (conn->evt.ticks_xtal_to_start & XON_BITMASK) {
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(conn->evt.ticks_active_to_start,
|
MAX(conn->evt.ticks_active_to_start,
|
||||||
conn->evt.ticks_preempt_to_start);
|
conn->evt.ticks_preempt_to_start);
|
||||||
|
|
||||||
conn->evt.ticks_xtal_to_start &= ~XON_BITMASK;
|
conn->evt.ticks_xtal_to_start &= ~XON_BITMASK;
|
||||||
|
@ -1645,7 +1645,7 @@ static inline int event_conn_upd_prep(struct ll_conn *conn,
|
||||||
lll->latency_prepare -= (instant_latency - latency);
|
lll->latency_prepare -= (instant_latency - latency);
|
||||||
|
|
||||||
/* calculate the offset, window widening and interval */
|
/* calculate the offset, window widening and interval */
|
||||||
ticks_slot_offset = max(conn->evt.ticks_active_to_start,
|
ticks_slot_offset = MAX(conn->evt.ticks_active_to_start,
|
||||||
conn->evt.ticks_xtal_to_start);
|
conn->evt.ticks_xtal_to_start);
|
||||||
conn_interval_us = conn->llcp.conn_upd.interval * 1250;
|
conn_interval_us = conn->llcp.conn_upd.interval * 1250;
|
||||||
periodic_us = conn_interval_us;
|
periodic_us = conn_interval_us;
|
||||||
|
@ -2162,7 +2162,7 @@ static inline void event_conn_param_req(struct ll_conn *conn,
|
||||||
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
#if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
|
||||||
if (conn->evt.ticks_xtal_to_start & XON_BITMASK) {
|
if (conn->evt.ticks_xtal_to_start & XON_BITMASK) {
|
||||||
u32_t ticks_prepare_to_start =
|
u32_t ticks_prepare_to_start =
|
||||||
max(conn->evt.ticks_active_to_start,
|
MAX(conn->evt.ticks_active_to_start,
|
||||||
conn->evt.ticks_preempt_to_start);
|
conn->evt.ticks_preempt_to_start);
|
||||||
|
|
||||||
conn->llcp_conn_param.ticks_ref -=
|
conn->llcp_conn_param.ticks_ref -=
|
||||||
|
|
|
@ -473,7 +473,7 @@ void ull_master_setup(memq_link_t *link, struct node_rx_hdr *rx,
|
||||||
ftr->us_radio_rdy + 328 + TIFS_US +
|
ftr->us_radio_rdy + 328 + TIFS_US +
|
||||||
328);
|
328);
|
||||||
|
|
||||||
ticks_slot_offset = max(conn->evt.ticks_active_to_start,
|
ticks_slot_offset = MAX(conn->evt.ticks_active_to_start,
|
||||||
conn->evt.ticks_xtal_to_start);
|
conn->evt.ticks_xtal_to_start);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
|
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
|
||||||
|
@ -630,7 +630,7 @@ void ull_master_setup(memq_link_t *link, struct node_rx_hdr *rx,
|
||||||
conn->hdr.ticks_preempt_to_start = HAL_TICKER_US_TO_TICKS(
|
conn->hdr.ticks_preempt_to_start = HAL_TICKER_US_TO_TICKS(
|
||||||
EVENT_OVERHEAD_PREEMPT_MIN_US);
|
EVENT_OVERHEAD_PREEMPT_MIN_US);
|
||||||
conn->hdr.ticks_slot = _radio.scanner.ticks_conn_slot;
|
conn->hdr.ticks_slot = _radio.scanner.ticks_conn_slot;
|
||||||
ticks_slot_offset = max(conn->hdr.ticks_active_to_start,
|
ticks_slot_offset = MAX(conn->hdr.ticks_active_to_start,
|
||||||
conn->hdr.ticks_xtal_to_start);
|
conn->hdr.ticks_xtal_to_start);
|
||||||
|
|
||||||
/* Stop Scanner */
|
/* Stop Scanner */
|
||||||
|
|
|
@ -191,7 +191,7 @@ u8_t ull_scan_enable(struct ll_scan_set *scan)
|
||||||
lll->ticks_window = 0;
|
lll->ticks_window = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ticks_slot_offset = max(scan->evt.ticks_active_to_start,
|
ticks_slot_offset = MAX(scan->evt.ticks_active_to_start,
|
||||||
scan->evt.ticks_xtal_to_start);
|
scan->evt.ticks_xtal_to_start);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
|
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
|
||||||
|
|
|
@ -271,7 +271,7 @@ void ull_slave_setup(memq_link_t *link, struct node_rx_hdr *rx,
|
||||||
ftr->us_radio_rdy + 328 + TIFS_US +
|
ftr->us_radio_rdy + 328 + TIFS_US +
|
||||||
328);
|
328);
|
||||||
|
|
||||||
ticks_slot_offset = max(conn->evt.ticks_active_to_start,
|
ticks_slot_offset = MAX(conn->evt.ticks_active_to_start,
|
||||||
conn->evt.ticks_xtal_to_start);
|
conn->evt.ticks_xtal_to_start);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
|
if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
|
||||||
|
|
|
@ -1040,7 +1040,7 @@ static inline void ticker_job_compare_update(struct ticker_instance *instance,
|
||||||
ticks_elapsed = ticker_ticks_diff_get(ctr, cc) +
|
ticks_elapsed = ticker_ticks_diff_get(ctr, cc) +
|
||||||
HAL_TICKER_CNTR_CMP_OFFSET_MIN +
|
HAL_TICKER_CNTR_CMP_OFFSET_MIN +
|
||||||
HAL_TICKER_CNTR_SET_LATENCY;
|
HAL_TICKER_CNTR_SET_LATENCY;
|
||||||
cc += max(ticks_elapsed, ticks_to_expire);
|
cc += MAX(ticks_elapsed, ticks_to_expire);
|
||||||
cc &= HAL_TICKER_CNTR_MASK;
|
cc &= HAL_TICKER_CNTR_MASK;
|
||||||
|
|
||||||
instance->trigger_set_cb(cc);
|
instance->trigger_set_cb(cc);
|
||||||
|
|
|
@ -247,7 +247,7 @@ static u8_t att_mtu_req(struct bt_att *att, struct net_buf *buf)
|
||||||
* A device's Exchange MTU Request shall contain the same MTU as the
|
* A device's Exchange MTU Request shall contain the same MTU as the
|
||||||
* device's Exchange MTU Response (i.e. the MTU shall be symmetric).
|
* device's Exchange MTU Response (i.e. the MTU shall be symmetric).
|
||||||
*/
|
*/
|
||||||
att->chan.rx.mtu = min(mtu_client, mtu_server);
|
att->chan.rx.mtu = MIN(mtu_client, mtu_server);
|
||||||
att->chan.tx.mtu = att->chan.rx.mtu;
|
att->chan.tx.mtu = att->chan.rx.mtu;
|
||||||
|
|
||||||
BT_DBG("Negotiated MTU %u", att->chan.rx.mtu);
|
BT_DBG("Negotiated MTU %u", att->chan.rx.mtu);
|
||||||
|
@ -363,7 +363,7 @@ static u8_t att_mtu_rsp(struct bt_att *att, struct net_buf *buf)
|
||||||
return att_handle_rsp(att, NULL, 0, BT_ATT_ERR_INVALID_PDU);
|
return att_handle_rsp(att, NULL, 0, BT_ATT_ERR_INVALID_PDU);
|
||||||
}
|
}
|
||||||
|
|
||||||
att->chan.rx.mtu = min(mtu, BT_ATT_MTU);
|
att->chan.rx.mtu = MIN(mtu, BT_ATT_MTU);
|
||||||
|
|
||||||
/* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part F] page 484:
|
/* BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part F] page 484:
|
||||||
*
|
*
|
||||||
|
|
|
@ -1308,7 +1308,7 @@ static struct net_buf *create_frag(struct bt_conn *conn, struct net_buf *buf)
|
||||||
/* Fragments never have a TX completion callback */
|
/* Fragments never have a TX completion callback */
|
||||||
conn_tx(frag)->cb = NULL;
|
conn_tx(frag)->cb = NULL;
|
||||||
|
|
||||||
frag_len = min(conn_mtu(conn), net_buf_tailroom(frag));
|
frag_len = MIN(conn_mtu(conn), net_buf_tailroom(frag));
|
||||||
|
|
||||||
net_buf_add_mem(frag, buf->data, frag_len);
|
net_buf_add_mem(frag, buf->data, frag_len);
|
||||||
net_buf_pull(buf, frag_len);
|
net_buf_pull(buf, frag_len);
|
||||||
|
|
|
@ -796,7 +796,7 @@ ssize_t bt_gatt_attr_read(struct bt_conn *conn, const struct bt_gatt_attr *attr,
|
||||||
return BT_GATT_ERR(BT_ATT_ERR_INVALID_OFFSET);
|
return BT_GATT_ERR(BT_ATT_ERR_INVALID_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
len = min(buf_len, value_len - offset);
|
len = MIN(buf_len, value_len - offset);
|
||||||
|
|
||||||
BT_DBG("handle 0x%04x offset %u length %u", attr->handle, offset,
|
BT_DBG("handle 0x%04x offset %u length %u", attr->handle, offset,
|
||||||
len);
|
len);
|
||||||
|
@ -2552,7 +2552,7 @@ static int gatt_prepare_write(struct bt_conn *conn,
|
||||||
struct bt_att_prepare_write_req *req;
|
struct bt_att_prepare_write_req *req;
|
||||||
u16_t len;
|
u16_t len;
|
||||||
|
|
||||||
len = min(params->length, bt_att_get_mtu(conn) - sizeof(*req) - 1);
|
len = MIN(params->length, bt_att_get_mtu(conn) - sizeof(*req) - 1);
|
||||||
|
|
||||||
buf = bt_att_create_pdu(conn, BT_ATT_OP_PREPARE_WRITE_REQ,
|
buf = bt_att_create_pdu(conn, BT_ATT_OP_PREPARE_WRITE_REQ,
|
||||||
sizeof(*req) + len);
|
sizeof(*req) + len);
|
||||||
|
|
|
@ -3652,7 +3652,7 @@ static void read_buffer_size_complete(struct net_buf *buf)
|
||||||
|
|
||||||
BT_DBG("ACL BR/EDR buffers: pkts %u mtu %u", pkts, bt_dev.le.mtu);
|
BT_DBG("ACL BR/EDR buffers: pkts %u mtu %u", pkts, bt_dev.le.mtu);
|
||||||
|
|
||||||
pkts = min(pkts, CONFIG_BT_CONN_TX_MAX);
|
pkts = MIN(pkts, CONFIG_BT_CONN_TX_MAX);
|
||||||
|
|
||||||
k_sem_init(&bt_dev.le.pkts, pkts, pkts);
|
k_sem_init(&bt_dev.le.pkts, pkts, pkts);
|
||||||
}
|
}
|
||||||
|
@ -3673,7 +3673,7 @@ static void le_read_buffer_size_complete(struct net_buf *buf)
|
||||||
|
|
||||||
BT_DBG("ACL LE buffers: pkts %u mtu %u", rp->le_max_num, bt_dev.le.mtu);
|
BT_DBG("ACL LE buffers: pkts %u mtu %u", rp->le_max_num, bt_dev.le.mtu);
|
||||||
|
|
||||||
le_max_num = min(rp->le_max_num, CONFIG_BT_CONN_TX_MAX);
|
le_max_num = MIN(rp->le_max_num, CONFIG_BT_CONN_TX_MAX);
|
||||||
k_sem_init(&bt_dev.le.pkts, le_max_num, le_max_num);
|
k_sem_init(&bt_dev.le.pkts, le_max_num, le_max_num);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -4858,7 +4858,7 @@ int bt_set_id_addr(const bt_addr_le_t *addr)
|
||||||
|
|
||||||
void bt_id_get(bt_addr_le_t *addrs, size_t *count)
|
void bt_id_get(bt_addr_le_t *addrs, size_t *count)
|
||||||
{
|
{
|
||||||
size_t to_copy = min(*count, bt_dev.id_count);
|
size_t to_copy = MIN(*count, bt_dev.id_count);
|
||||||
|
|
||||||
memcpy(addrs, bt_dev.id_addr, to_copy * sizeof(bt_addr_le_t));
|
memcpy(addrs, bt_dev.id_addr, to_copy * sizeof(bt_addr_le_t));
|
||||||
*count = to_copy;
|
*count = to_copy;
|
||||||
|
@ -5053,7 +5053,7 @@ static uint8_t bt_read_static_addr(bt_addr_le_t *addr)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
rp = (void *)rsp->data;
|
rp = (void *)rsp->data;
|
||||||
cnt = min(rp->num_addrs, CONFIG_BT_ID_MAX);
|
cnt = MIN(rp->num_addrs, CONFIG_BT_ID_MAX);
|
||||||
|
|
||||||
for (i = 0; i < cnt; i++) {
|
for (i = 0; i < cnt; i++) {
|
||||||
addr[i].type = BT_ADDR_LE_RANDOM;
|
addr[i].type = BT_ADDR_LE_RANDOM;
|
||||||
|
|
|
@ -662,7 +662,7 @@ static void l2cap_chan_rx_init(struct bt_l2cap_le_chan *chan)
|
||||||
/* MPS shall not be bigger than MTU + 2 as the remaining bytes cannot
|
/* MPS shall not be bigger than MTU + 2 as the remaining bytes cannot
|
||||||
* be used.
|
* be used.
|
||||||
*/
|
*/
|
||||||
chan->rx.mps = min(chan->rx.mtu + 2, L2CAP_MAX_LE_MPS);
|
chan->rx.mps = MIN(chan->rx.mtu + 2, L2CAP_MAX_LE_MPS);
|
||||||
k_sem_init(&chan->rx.credits, 0, UINT_MAX);
|
k_sem_init(&chan->rx.credits, 0, UINT_MAX);
|
||||||
|
|
||||||
if (BT_DBG_ENABLED &&
|
if (BT_DBG_ENABLED &&
|
||||||
|
@ -1085,9 +1085,9 @@ segment:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Don't send more that TX MPS including SDU length */
|
/* Don't send more that TX MPS including SDU length */
|
||||||
len = min(net_buf_tailroom(seg), ch->tx.mps - sdu_hdr_len);
|
len = MIN(net_buf_tailroom(seg), ch->tx.mps - sdu_hdr_len);
|
||||||
/* Limit if original buffer is smaller than the segment */
|
/* Limit if original buffer is smaller than the segment */
|
||||||
len = min(buf->len, len);
|
len = MIN(buf->len, len);
|
||||||
net_buf_add_mem(seg, buf->data, len);
|
net_buf_add_mem(seg, buf->data, len);
|
||||||
net_buf_pull(buf, len);
|
net_buf_pull(buf, len);
|
||||||
|
|
||||||
|
|
|
@ -740,7 +740,7 @@ static void l2cap_br_conn_req(struct bt_l2cap_br *l2cap, u8_t ident,
|
||||||
atomic_set_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_ACCEPTOR);
|
atomic_set_bit(BR_CHAN(chan)->flags, L2CAP_FLAG_CONN_ACCEPTOR);
|
||||||
|
|
||||||
/* Disable fragmentation of l2cap rx pdu */
|
/* Disable fragmentation of l2cap rx pdu */
|
||||||
BR_CHAN(chan)->rx.mtu = min(BR_CHAN(chan)->rx.mtu, L2CAP_BR_MAX_MTU);
|
BR_CHAN(chan)->rx.mtu = MIN(BR_CHAN(chan)->rx.mtu, L2CAP_BR_MAX_MTU);
|
||||||
|
|
||||||
switch (l2cap_br_conn_security(chan, psm)) {
|
switch (l2cap_br_conn_security(chan, psm)) {
|
||||||
case L2CAP_CONN_SECURITY_PENDING:
|
case L2CAP_CONN_SECURITY_PENDING:
|
||||||
|
|
|
@ -103,7 +103,7 @@ static inline void adv_send(struct net_buf *buf)
|
||||||
struct bt_data ad;
|
struct bt_data ad;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
adv_int = max(adv_int_min,
|
adv_int = MAX(adv_int_min,
|
||||||
BT_MESH_TRANSMIT_INT(BT_MESH_ADV(buf)->xmit));
|
BT_MESH_TRANSMIT_INT(BT_MESH_ADV(buf)->xmit));
|
||||||
duration = (MESH_SCAN_WINDOW_MS +
|
duration = (MESH_SCAN_WINDOW_MS +
|
||||||
((BT_MESH_TRANSMIT_COUNT(BT_MESH_ADV(buf)->xmit) + 1) *
|
((BT_MESH_TRANSMIT_COUNT(BT_MESH_ADV(buf)->xmit) + 1) *
|
||||||
|
|
|
@ -55,7 +55,7 @@ static void comp_data_status(struct bt_mesh_model *model,
|
||||||
param = cli->op_param;
|
param = cli->op_param;
|
||||||
|
|
||||||
*(param->status) = net_buf_simple_pull_u8(buf);
|
*(param->status) = net_buf_simple_pull_u8(buf);
|
||||||
to_copy = min(net_buf_simple_tailroom(param->comp), buf->len);
|
to_copy = MIN(net_buf_simple_tailroom(param->comp), buf->len);
|
||||||
net_buf_simple_add_mem(param->comp, buf->data, to_copy);
|
net_buf_simple_add_mem(param->comp, buf->data, to_copy);
|
||||||
|
|
||||||
k_sem_give(&cli->op_sync);
|
k_sem_give(&cli->op_sync);
|
||||||
|
|
|
@ -3378,8 +3378,8 @@ void bt_mesh_heartbeat(u16_t src, u16_t dst, u8_t hops, u16_t feat)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg->hb_sub.min_hops = min(cfg->hb_sub.min_hops, hops);
|
cfg->hb_sub.min_hops = MIN(cfg->hb_sub.min_hops, hops);
|
||||||
cfg->hb_sub.max_hops = max(cfg->hb_sub.max_hops, hops);
|
cfg->hb_sub.max_hops = MAX(cfg->hb_sub.max_hops, hops);
|
||||||
|
|
||||||
if (cfg->hb_sub.count < 0xffff) {
|
if (cfg->hb_sub.count < 0xffff) {
|
||||||
cfg->hb_sub.count++;
|
cfg->hb_sub.count++;
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define LPN_RECV_DELAY CONFIG_BT_MESH_LPN_RECV_DELAY
|
#define LPN_RECV_DELAY CONFIG_BT_MESH_LPN_RECV_DELAY
|
||||||
#define SCAN_LATENCY min(CONFIG_BT_MESH_LPN_SCAN_LATENCY, \
|
#define SCAN_LATENCY MIN(CONFIG_BT_MESH_LPN_SCAN_LATENCY, \
|
||||||
LPN_RECV_DELAY)
|
LPN_RECV_DELAY)
|
||||||
|
|
||||||
#define FRIEND_REQ_RETRY_TIMEOUT K_SECONDS(CONFIG_BT_MESH_LPN_RETRY_TIMEOUT)
|
#define FRIEND_REQ_RETRY_TIMEOUT K_SECONDS(CONFIG_BT_MESH_LPN_RETRY_TIMEOUT)
|
||||||
|
@ -834,12 +834,12 @@ static s32_t poll_timeout(struct bt_mesh_lpn *lpn)
|
||||||
{
|
{
|
||||||
/* If we're waiting for segment acks keep polling at high freq */
|
/* If we're waiting for segment acks keep polling at high freq */
|
||||||
if (bt_mesh_tx_in_progress()) {
|
if (bt_mesh_tx_in_progress()) {
|
||||||
return min(POLL_TIMEOUT_MAX(lpn), K_SECONDS(1));
|
return MIN(POLL_TIMEOUT_MAX(lpn), K_SECONDS(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lpn->poll_timeout < POLL_TIMEOUT_MAX(lpn)) {
|
if (lpn->poll_timeout < POLL_TIMEOUT_MAX(lpn)) {
|
||||||
lpn->poll_timeout *= 2;
|
lpn->poll_timeout *= 2;
|
||||||
lpn->poll_timeout = min(lpn->poll_timeout,
|
lpn->poll_timeout = MIN(lpn->poll_timeout,
|
||||||
POLL_TIMEOUT_MAX(lpn));
|
POLL_TIMEOUT_MAX(lpn));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -962,7 +962,7 @@ int bt_mesh_lpn_friend_update(struct bt_mesh_net_rx *rx,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set initial poll timeout */
|
/* Set initial poll timeout */
|
||||||
lpn->poll_timeout = min(POLL_TIMEOUT_MAX(lpn),
|
lpn->poll_timeout = MIN(POLL_TIMEOUT_MAX(lpn),
|
||||||
POLL_TIMEOUT_INIT);
|
POLL_TIMEOUT_INIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -408,7 +408,7 @@ static int prov_send_adv(struct net_buf_simple *msg)
|
||||||
|
|
||||||
link.tx.buf[0] = start;
|
link.tx.buf[0] = start;
|
||||||
|
|
||||||
seg_len = min(msg->len, START_PAYLOAD_MAX);
|
seg_len = MIN(msg->len, START_PAYLOAD_MAX);
|
||||||
BT_DBG("seg 0 len %u: %s", seg_len, bt_hex(msg->data, seg_len));
|
BT_DBG("seg 0 len %u: %s", seg_len, bt_hex(msg->data, seg_len));
|
||||||
net_buf_add_mem(start, msg->data, seg_len);
|
net_buf_add_mem(start, msg->data, seg_len);
|
||||||
net_buf_simple_pull(msg, seg_len);
|
net_buf_simple_pull(msg, seg_len);
|
||||||
|
@ -429,7 +429,7 @@ static int prov_send_adv(struct net_buf_simple *msg)
|
||||||
|
|
||||||
link.tx.buf[seg_id] = buf;
|
link.tx.buf[seg_id] = buf;
|
||||||
|
|
||||||
seg_len = min(msg->len, CONT_PAYLOAD_MAX);
|
seg_len = MIN(msg->len, CONT_PAYLOAD_MAX);
|
||||||
|
|
||||||
BT_DBG("seg_id %u len %u: %s", seg_id, seg_len,
|
BT_DBG("seg_id %u len %u: %s", seg_id, seg_len,
|
||||||
bt_hex(msg->data, seg_len));
|
bt_hex(msg->data, seg_len));
|
||||||
|
|
|
@ -1170,8 +1170,8 @@ static s32_t gatt_proxy_advertise(struct bt_mesh_subnet *sub)
|
||||||
* 6 slices, but make sure that a slice is at least one
|
* 6 slices, but make sure that a slice is at least one
|
||||||
* second long (to avoid excessive rotation).
|
* second long (to avoid excessive rotation).
|
||||||
*/
|
*/
|
||||||
max_timeout = NODE_ID_TIMEOUT / max(subnet_count, 6);
|
max_timeout = NODE_ID_TIMEOUT / MAX(subnet_count, 6);
|
||||||
max_timeout = max(max_timeout, K_SECONDS(1));
|
max_timeout = MAX(max_timeout, K_SECONDS(1));
|
||||||
|
|
||||||
if (remaining > max_timeout || remaining < 0) {
|
if (remaining > max_timeout || remaining < 0) {
|
||||||
remaining = max_timeout;
|
remaining = max_timeout;
|
||||||
|
|
|
@ -379,7 +379,7 @@ static int send_seg(struct bt_mesh_net_tx *net_tx, struct net_buf_simple *sdu,
|
||||||
(seg_o >> 3)));
|
(seg_o >> 3)));
|
||||||
net_buf_add_u8(seg, ((seg_o & 0x07) << 5) | tx->seg_n);
|
net_buf_add_u8(seg, ((seg_o & 0x07) << 5) | tx->seg_n);
|
||||||
|
|
||||||
len = min(sdu->len, 12);
|
len = MIN(sdu->len, 12);
|
||||||
net_buf_add_mem(seg, sdu->data, len);
|
net_buf_add_mem(seg, sdu->data, len);
|
||||||
net_buf_simple_pull(sdu, len);
|
net_buf_simple_pull(sdu, len);
|
||||||
|
|
||||||
|
@ -921,7 +921,7 @@ static inline s32_t ack_timeout(struct seg_rx *rx)
|
||||||
/* Make sure we don't send more frequently than the duration for
|
/* Make sure we don't send more frequently than the duration for
|
||||||
* each packet (default is 300ms).
|
* each packet (default is 300ms).
|
||||||
*/
|
*/
|
||||||
return max(to, K_MSEC(400));
|
return MAX(to, K_MSEC(400));
|
||||||
}
|
}
|
||||||
|
|
||||||
int bt_mesh_ctl_send(struct bt_mesh_net_tx *tx, u8_t ctl_op, void *data,
|
int bt_mesh_ctl_send(struct bt_mesh_net_tx *tx, u8_t ctl_op, void *data,
|
||||||
|
|
|
@ -86,7 +86,7 @@ static void encode_drops(struct bt_monitor_hdr *hdr, u8_t type,
|
||||||
count = atomic_set(val, 0);
|
count = atomic_set(val, 0);
|
||||||
if (count) {
|
if (count) {
|
||||||
hdr->ext[hdr->hdr_len++] = type;
|
hdr->ext[hdr->hdr_len++] = type;
|
||||||
hdr->ext[hdr->hdr_len++] = min(count, 255);
|
hdr->ext[hdr->hdr_len++] = MIN(count, 255);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -402,7 +402,7 @@ static void rfcomm_connected(struct bt_l2cap_chan *chan)
|
||||||
BT_DBG("Session %p", session);
|
BT_DBG("Session %p", session);
|
||||||
|
|
||||||
/* Need to include UIH header and FCS*/
|
/* Need to include UIH header and FCS*/
|
||||||
session->mtu = min(session->br_chan.rx.mtu,
|
session->mtu = MIN(session->br_chan.rx.mtu,
|
||||||
session->br_chan.tx.mtu) -
|
session->br_chan.tx.mtu) -
|
||||||
BT_RFCOMM_HDR_SIZE + BT_RFCOMM_FCS_SIZE;
|
BT_RFCOMM_HDR_SIZE + BT_RFCOMM_FCS_SIZE;
|
||||||
|
|
||||||
|
@ -480,7 +480,7 @@ static struct bt_rfcomm_dlc *rfcomm_dlc_accept(struct bt_rfcomm_session *session
|
||||||
}
|
}
|
||||||
|
|
||||||
rfcomm_dlc_init(dlc, session, dlci, BT_RFCOMM_ROLE_ACCEPTOR);
|
rfcomm_dlc_init(dlc, session, dlci, BT_RFCOMM_ROLE_ACCEPTOR);
|
||||||
dlc->mtu = min(dlc->mtu, session->mtu);
|
dlc->mtu = MIN(dlc->mtu, session->mtu);
|
||||||
|
|
||||||
return dlc;
|
return dlc;
|
||||||
}
|
}
|
||||||
|
@ -963,7 +963,7 @@ static int rfcomm_dlc_start(struct bt_rfcomm_dlc *dlc)
|
||||||
result = rfcomm_dlc_security(dlc);
|
result = rfcomm_dlc_security(dlc);
|
||||||
switch (result) {
|
switch (result) {
|
||||||
case RFCOMM_SECURITY_PASSED:
|
case RFCOMM_SECURITY_PASSED:
|
||||||
dlc->mtu = min(dlc->mtu, dlc->session->mtu);
|
dlc->mtu = MIN(dlc->mtu, dlc->session->mtu);
|
||||||
dlc->state = BT_RFCOMM_STATE_CONFIG;
|
dlc->state = BT_RFCOMM_STATE_CONFIG;
|
||||||
rfcomm_send_pn(dlc, BT_RFCOMM_MSG_CMD_CR);
|
rfcomm_send_pn(dlc, BT_RFCOMM_MSG_CMD_CR);
|
||||||
break;
|
break;
|
||||||
|
@ -1182,7 +1182,7 @@ static void rfcomm_handle_pn(struct bt_rfcomm_session *session,
|
||||||
|
|
||||||
BT_DBG("Incoming connection accepted dlc %p", dlc);
|
BT_DBG("Incoming connection accepted dlc %p", dlc);
|
||||||
|
|
||||||
dlc->mtu = min(dlc->mtu, sys_le16_to_cpu(pn->mtu));
|
dlc->mtu = MIN(dlc->mtu, sys_le16_to_cpu(pn->mtu));
|
||||||
|
|
||||||
if (pn->flow_ctrl == BT_RFCOMM_PN_CFC_CMD) {
|
if (pn->flow_ctrl == BT_RFCOMM_PN_CFC_CMD) {
|
||||||
if (session->cfc == BT_RFCOMM_CFC_UNKNOWN) {
|
if (session->cfc == BT_RFCOMM_CFC_UNKNOWN) {
|
||||||
|
@ -1206,14 +1206,14 @@ static void rfcomm_handle_pn(struct bt_rfcomm_session *session,
|
||||||
rfcomm_dlc_close(dlc);
|
rfcomm_dlc_close(dlc);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
dlc->mtu = min(dlc->mtu, sys_le16_to_cpu(pn->mtu));
|
dlc->mtu = MIN(dlc->mtu, sys_le16_to_cpu(pn->mtu));
|
||||||
rfcomm_send_pn(dlc, BT_RFCOMM_MSG_RESP_CR);
|
rfcomm_send_pn(dlc, BT_RFCOMM_MSG_RESP_CR);
|
||||||
} else {
|
} else {
|
||||||
if (dlc->state != BT_RFCOMM_STATE_CONFIG) {
|
if (dlc->state != BT_RFCOMM_STATE_CONFIG) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
dlc->mtu = min(dlc->mtu, sys_le16_to_cpu(pn->mtu));
|
dlc->mtu = MIN(dlc->mtu, sys_le16_to_cpu(pn->mtu));
|
||||||
if (pn->flow_ctrl == BT_RFCOMM_PN_CFC_RESP) {
|
if (pn->flow_ctrl == BT_RFCOMM_PN_CFC_RESP) {
|
||||||
if (session->cfc == BT_RFCOMM_CFC_UNKNOWN) {
|
if (session->cfc == BT_RFCOMM_CFC_UNKNOWN) {
|
||||||
session->cfc = BT_RFCOMM_CFC_SUPPORTED;
|
session->cfc = BT_RFCOMM_CFC_SUPPORTED;
|
||||||
|
@ -1538,7 +1538,7 @@ static void rfcomm_encrypt_change(struct bt_l2cap_chan *chan,
|
||||||
rfcomm_send_ua(session, dlc->dlci);
|
rfcomm_send_ua(session, dlc->dlci);
|
||||||
rfcomm_dlc_connected(dlc);
|
rfcomm_dlc_connected(dlc);
|
||||||
} else {
|
} else {
|
||||||
dlc->mtu = min(dlc->mtu, session->mtu);
|
dlc->mtu = MIN(dlc->mtu, session->mtu);
|
||||||
dlc->state = BT_RFCOMM_STATE_CONFIG;
|
dlc->state = BT_RFCOMM_STATE_CONFIG;
|
||||||
rfcomm_send_pn(dlc, BT_RFCOMM_MSG_CMD_CR);
|
rfcomm_send_pn(dlc, BT_RFCOMM_MSG_CMD_CR);
|
||||||
}
|
}
|
||||||
|
|
|
@ -650,7 +650,7 @@ static u16_t sdp_svc_search_req(struct bt_sdp *sdp, struct net_buf *buf,
|
||||||
|
|
||||||
/* 4 bytes per Service Record Handle */
|
/* 4 bytes per Service Record Handle */
|
||||||
/* 4 bytes for ContinuationState */
|
/* 4 bytes for ContinuationState */
|
||||||
if ((min(SDP_MTU, sdp->chan.tx.mtu) - resp_buf->len) <
|
if ((MIN(SDP_MTU, sdp->chan.tx.mtu) - resp_buf->len) <
|
||||||
(4 + 4 + sizeof(struct bt_sdp_hdr))) {
|
(4 + 4 + sizeof(struct bt_sdp_hdr))) {
|
||||||
pkt_full = true;
|
pkt_full = true;
|
||||||
}
|
}
|
||||||
|
@ -846,7 +846,7 @@ static u8_t select_attrs(struct bt_sdp_attribute *attr, u8_t att_idx,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sad->rsp_buf) {
|
if (sad->rsp_buf) {
|
||||||
space = min(SDP_MTU, sad->sdp->chan.tx.mtu) -
|
space = MIN(SDP_MTU, sad->sdp->chan.tx.mtu) -
|
||||||
sad->rsp_buf->len - sizeof(struct bt_sdp_hdr);
|
sad->rsp_buf->len - sizeof(struct bt_sdp_hdr);
|
||||||
|
|
||||||
if ((!sad->state->pkt_full) &&
|
if ((!sad->state->pkt_full) &&
|
||||||
|
|
|
@ -539,7 +539,7 @@ static u8_t get_encryption_key_size(struct bt_smp *smp)
|
||||||
* encryption key length parameters shall be used as the encryption key
|
* encryption key length parameters shall be used as the encryption key
|
||||||
* size.
|
* size.
|
||||||
*/
|
*/
|
||||||
return min(req->max_key_size, rsp->max_key_size);
|
return MIN(req->max_key_size, rsp->max_key_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_BT_PRIVACY) || defined(CONFIG_BT_SIGNING) || \
|
#if defined(CONFIG_BT_PRIVACY) || defined(CONFIG_BT_SIGNING) || \
|
||||||
|
|
|
@ -57,7 +57,7 @@ static bool data_cb(struct bt_data *data, void *user_data)
|
||||||
switch (data->type) {
|
switch (data->type) {
|
||||||
case BT_DATA_NAME_SHORTENED:
|
case BT_DATA_NAME_SHORTENED:
|
||||||
case BT_DATA_NAME_COMPLETE:
|
case BT_DATA_NAME_COMPLETE:
|
||||||
memcpy(name, data->data, min(data->data_len, NAME_LEN - 1));
|
memcpy(name, data->data, MIN(data->data_len, NAME_LEN - 1));
|
||||||
return false;
|
return false;
|
||||||
default:
|
default:
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -318,7 +318,7 @@ static int cmd_write(const struct shell *shell, size_t argc, char *argv[])
|
||||||
size_t len;
|
size_t len;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
len = min(strtoul(argv[4], NULL, 16), sizeof(gatt_write_buf));
|
len = MIN(strtoul(argv[4], NULL, 16), sizeof(gatt_write_buf));
|
||||||
|
|
||||||
for (i = 1; i < len; i++) {
|
for (i = 1; i < len; i++) {
|
||||||
gatt_write_buf[i] = gatt_write_buf[0];
|
gatt_write_buf[i] = gatt_write_buf[0];
|
||||||
|
@ -371,7 +371,7 @@ static int cmd_write_without_rsp(const struct shell *shell,
|
||||||
if (argc > 3) {
|
if (argc > 3) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
len = min(strtoul(argv[3], NULL, 16), sizeof(gatt_write_buf));
|
len = MIN(strtoul(argv[3], NULL, 16), sizeof(gatt_write_buf));
|
||||||
|
|
||||||
for (i = 1; i < len; i++) {
|
for (i = 1; i < len; i++) {
|
||||||
gatt_write_buf[i] = gatt_write_buf[0];
|
gatt_write_buf[i] = gatt_write_buf[0];
|
||||||
|
@ -673,7 +673,7 @@ static ssize_t read_met(struct bt_conn *conn, const struct bt_gatt_attr *attr,
|
||||||
const char *value = attr->user_data;
|
const char *value = attr->user_data;
|
||||||
u16_t value_len;
|
u16_t value_len;
|
||||||
|
|
||||||
value_len = min(strlen(value), CHAR_SIZE_MAX);
|
value_len = MIN(strlen(value), CHAR_SIZE_MAX);
|
||||||
|
|
||||||
return bt_gatt_attr_read(conn, attr, buf, len, offset, value,
|
return bt_gatt_attr_read(conn, attr, buf, len, offset, value,
|
||||||
value_len);
|
value_len);
|
||||||
|
|
|
@ -307,7 +307,7 @@ static int cmd_send(const struct shell *shell, size_t argc, char *argv[])
|
||||||
count = strtoul(argv[1], NULL, 10);
|
count = strtoul(argv[1], NULL, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
len = min(l2ch_chan.ch.tx.mtu, DATA_MTU - BT_L2CAP_CHAN_SEND_RESERVE);
|
len = MIN(l2ch_chan.ch.tx.mtu, DATA_MTU - BT_L2CAP_CHAN_SEND_RESERVE);
|
||||||
|
|
||||||
while (count--) {
|
while (count--) {
|
||||||
buf = net_buf_alloc(&data_tx_pool, K_FOREVER);
|
buf = net_buf_alloc(&data_tx_pool, K_FOREVER);
|
||||||
|
|
|
@ -204,7 +204,7 @@ static int cmd_send(const struct shell *shell, size_t argc, char *argv[])
|
||||||
while (count--) {
|
while (count--) {
|
||||||
buf = bt_rfcomm_create_pdu(&pool);
|
buf = bt_rfcomm_create_pdu(&pool);
|
||||||
/* Should reserve one byte in tail for FCS */
|
/* Should reserve one byte in tail for FCS */
|
||||||
len = min(rfcomm_dlc.mtu, net_buf_tailroom(buf) - 1);
|
len = MIN(rfcomm_dlc.mtu, net_buf_tailroom(buf) - 1);
|
||||||
|
|
||||||
net_buf_add_mem(buf, buf_data, len);
|
net_buf_add_mem(buf, buf_data, len);
|
||||||
ret = bt_rfcomm_dlc_send(&rfcomm_dlc, buf);
|
ret = bt_rfcomm_dlc_send(&rfcomm_dlc, buf);
|
||||||
|
|
|
@ -567,7 +567,7 @@ static int sdhc_rx_block(struct sdhc_data *data, u8_t *buf, int len)
|
||||||
|
|
||||||
/* Read the data in batches */
|
/* Read the data in batches */
|
||||||
for (i = 0; i < len; i += sizeof(sdhc_ones)) {
|
for (i = 0; i < len; i += sizeof(sdhc_ones)) {
|
||||||
int remain = min(sizeof(sdhc_ones), len - i);
|
int remain = MIN(sizeof(sdhc_ones), len - i);
|
||||||
|
|
||||||
struct spi_buf tx_bufs[] = {
|
struct spi_buf tx_bufs[] = {
|
||||||
{
|
{
|
||||||
|
|
|
@ -145,7 +145,7 @@ static int _nvs_flash_block_cmp(struct nvs_fs *fs, u32_t addr, const void *data,
|
||||||
|
|
||||||
block_size = NVS_BLOCK_SIZE & ~(fs->write_block_size - 1);
|
block_size = NVS_BLOCK_SIZE & ~(fs->write_block_size - 1);
|
||||||
while (len) {
|
while (len) {
|
||||||
bytes_to_cmp = min(block_size, len);
|
bytes_to_cmp = MIN(block_size, len);
|
||||||
rc = _nvs_flash_rd(fs, addr, buf, bytes_to_cmp);
|
rc = _nvs_flash_rd(fs, addr, buf, bytes_to_cmp);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -175,7 +175,7 @@ static int _nvs_flash_cmp_const(struct nvs_fs *fs, u32_t addr, u8_t value,
|
||||||
block_size = NVS_BLOCK_SIZE & ~(fs->write_block_size - 1);
|
block_size = NVS_BLOCK_SIZE & ~(fs->write_block_size - 1);
|
||||||
(void)memset(cmp, value, block_size);
|
(void)memset(cmp, value, block_size);
|
||||||
while (len) {
|
while (len) {
|
||||||
bytes_to_cmp = min(block_size, len);
|
bytes_to_cmp = MIN(block_size, len);
|
||||||
rc = _nvs_flash_block_cmp(fs, addr, cmp, bytes_to_cmp);
|
rc = _nvs_flash_block_cmp(fs, addr, cmp, bytes_to_cmp);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -198,7 +198,7 @@ static int _nvs_flash_block_move(struct nvs_fs *fs, u32_t addr, size_t len)
|
||||||
block_size = NVS_BLOCK_SIZE & ~(fs->write_block_size - 1);
|
block_size = NVS_BLOCK_SIZE & ~(fs->write_block_size - 1);
|
||||||
|
|
||||||
while (len) {
|
while (len) {
|
||||||
bytes_to_copy = min(block_size, len);
|
bytes_to_copy = MIN(block_size, len);
|
||||||
rc = _nvs_flash_rd(fs, addr, buf, bytes_to_copy);
|
rc = _nvs_flash_rd(fs, addr, buf, bytes_to_copy);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -910,7 +910,7 @@ ssize_t nvs_read_hist(struct nvs_fs *fs, u16_t id, void *data, size_t len,
|
||||||
|
|
||||||
rd_addr &= ADDR_SECT_MASK;
|
rd_addr &= ADDR_SECT_MASK;
|
||||||
rd_addr += wlk_ate.offset;
|
rd_addr += wlk_ate.offset;
|
||||||
rc = _nvs_flash_rd(fs, rd_addr, data, min(len, wlk_ate.len));
|
rc = _nvs_flash_rd(fs, rd_addr, data, MIN(len, wlk_ate.len));
|
||||||
if (rc) {
|
if (rc) {
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -281,7 +281,7 @@ static int cmd_read(const struct shell *shell, size_t argc, char **argv)
|
||||||
u8_t buf[16];
|
u8_t buf[16];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
read = fs_read(&file, buf, min(count, sizeof(buf)));
|
read = fs_read(&file, buf, MIN(count, sizeof(buf)));
|
||||||
if (read <= 0) {
|
if (read <= 0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,7 +110,7 @@ static int line_out_drop_mode(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (drop_warn) {
|
if (drop_warn) {
|
||||||
int cnt = min(drop_cnt, DROP_MAX);
|
int cnt = MIN(drop_cnt, DROP_MAX);
|
||||||
|
|
||||||
if (cnt < 10) {
|
if (cnt < 10) {
|
||||||
line_buf[DROP_MSG_LEN - 2] = ' ';
|
line_buf[DROP_MSG_LEN - 2] = ' ';
|
||||||
|
|
|
@ -189,7 +189,7 @@ int log_printk(const char *fmt, va_list ap)
|
||||||
|
|
||||||
length = vsnprintk(formatted_str,
|
length = vsnprintk(formatted_str,
|
||||||
sizeof(formatted_str), fmt, ap);
|
sizeof(formatted_str), fmt, ap);
|
||||||
length = min(length, sizeof(formatted_str));
|
length = MIN(length, sizeof(formatted_str));
|
||||||
|
|
||||||
msg = log_msg_hexdump_create(NULL, formatted_str,
|
msg = log_msg_hexdump_create(NULL, formatted_str,
|
||||||
length);
|
length);
|
||||||
|
@ -536,7 +536,7 @@ u32_t log_filter_set(struct log_backend const *const backend,
|
||||||
backend = log_backend_get(i);
|
backend = log_backend_get(i);
|
||||||
current = log_filter_set(backend, domain_id,
|
current = log_filter_set(backend, domain_id,
|
||||||
src_id, level);
|
src_id, level);
|
||||||
max = max(current, max);
|
max = MAX(current, max);
|
||||||
}
|
}
|
||||||
|
|
||||||
level = max;
|
level = max;
|
||||||
|
@ -544,7 +544,7 @@ u32_t log_filter_set(struct log_backend const *const backend,
|
||||||
u32_t max = log_filter_get(backend, domain_id,
|
u32_t max = log_filter_get(backend, domain_id,
|
||||||
src_id, false);
|
src_id, false);
|
||||||
|
|
||||||
level = min(level, max);
|
level = MIN(level, max);
|
||||||
|
|
||||||
LOG_FILTER_SLOT_SET(filters,
|
LOG_FILTER_SLOT_SET(filters,
|
||||||
log_backend_id_get(backend),
|
log_backend_id_get(backend),
|
||||||
|
|
|
@ -199,7 +199,7 @@ static void copy_args_to_msg(struct log_msg *msg, u32_t *args, u32_t nargs)
|
||||||
}
|
}
|
||||||
|
|
||||||
while (nargs != 0) {
|
while (nargs != 0) {
|
||||||
u32_t cpy_args = min(nargs, ARGS_CONT_MSG);
|
u32_t cpy_args = MIN(nargs, ARGS_CONT_MSG);
|
||||||
|
|
||||||
(void)memcpy(cont->payload.args, args,
|
(void)memcpy(cont->payload.args, args,
|
||||||
cpy_args * sizeof(u32_t));
|
cpy_args * sizeof(u32_t));
|
||||||
|
|
|
@ -614,7 +614,7 @@ void log_output_dropped_process(const struct log_output *log_output, u32_t cnt)
|
||||||
log_output_func_t outf = log_output->func;
|
log_output_func_t outf = log_output->func;
|
||||||
struct device *dev = (struct device *)log_output->control_block->ctx;
|
struct device *dev = (struct device *)log_output->control_block->ctx;
|
||||||
|
|
||||||
cnt = min(cnt, 9999);
|
cnt = MIN(cnt, 9999);
|
||||||
len = snprintf(buf, sizeof(buf), "%d", cnt);
|
len = snprintf(buf, sizeof(buf), "%d", cnt);
|
||||||
|
|
||||||
buffer_write(outf, (u8_t *)prefix, sizeof(prefix) - 1, dev);
|
buffer_write(outf, (u8_t *)prefix, sizeof(prefix) - 1, dev);
|
||||||
|
|
|
@ -145,7 +145,7 @@ static u8_t *fixed_data_alloc(struct net_buf *buf, size_t *size, s32_t timeout)
|
||||||
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
||||||
const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
|
const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
|
||||||
|
|
||||||
*size = min(fixed->data_size, *size);
|
*size = MIN(fixed->data_size, *size);
|
||||||
|
|
||||||
return fixed->data_pool + fixed->data_size * net_buf_id(buf);
|
return fixed->data_pool + fixed->data_size * net_buf_id(buf);
|
||||||
}
|
}
|
||||||
|
@ -316,7 +316,7 @@ success:
|
||||||
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
||||||
u32_t diff = k_uptime_get_32() - alloc_start;
|
u32_t diff = k_uptime_get_32() - alloc_start;
|
||||||
|
|
||||||
timeout -= min(timeout, diff);
|
timeout -= MIN(timeout, diff);
|
||||||
}
|
}
|
||||||
|
|
||||||
buf->__buf = data_alloc(buf, &size, timeout);
|
buf->__buf = data_alloc(buf, &size, timeout);
|
||||||
|
@ -590,7 +590,7 @@ struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout)
|
||||||
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
||||||
u32_t diff = k_uptime_get_32() - alloc_start;
|
u32_t diff = k_uptime_get_32() - alloc_start;
|
||||||
|
|
||||||
timeout -= min(timeout, diff);
|
timeout -= MIN(timeout, diff);
|
||||||
}
|
}
|
||||||
|
|
||||||
clone->__buf = data_alloc(clone, &size, timeout);
|
clone->__buf = data_alloc(clone, &size, timeout);
|
||||||
|
@ -681,7 +681,7 @@ size_t net_buf_linearize(void *dst, size_t dst_len, struct net_buf *src,
|
||||||
size_t to_copy;
|
size_t to_copy;
|
||||||
size_t copied;
|
size_t copied;
|
||||||
|
|
||||||
len = min(len, dst_len);
|
len = MIN(len, dst_len);
|
||||||
|
|
||||||
frag = src;
|
frag = src;
|
||||||
|
|
||||||
|
@ -694,7 +694,7 @@ size_t net_buf_linearize(void *dst, size_t dst_len, struct net_buf *src,
|
||||||
/* traverse the fragment chain until len bytes are copied */
|
/* traverse the fragment chain until len bytes are copied */
|
||||||
copied = 0;
|
copied = 0;
|
||||||
while (frag && len > 0) {
|
while (frag && len > 0) {
|
||||||
to_copy = min(len, frag->len - offset);
|
to_copy = MIN(len, frag->len - offset);
|
||||||
memcpy((u8_t *)dst + copied, frag->data + offset, to_copy);
|
memcpy((u8_t *)dst + copied, frag->data + offset, to_copy);
|
||||||
|
|
||||||
copied += to_copy;
|
copied += to_copy;
|
||||||
|
@ -723,7 +723,7 @@ size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
|
||||||
const u8_t *value8 = value;
|
const u8_t *value8 = value;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
u16_t count = min(len, net_buf_tailroom(frag));
|
u16_t count = MIN(len, net_buf_tailroom(frag));
|
||||||
|
|
||||||
net_buf_add_mem(frag, value8, count);
|
net_buf_add_mem(frag, value8, count);
|
||||||
len -= count;
|
len -= count;
|
||||||
|
|
|
@ -476,7 +476,7 @@ static void dhcpv4_enter_bound(struct net_if *iface)
|
||||||
renewal_time, rebinding_time);
|
renewal_time, rebinding_time);
|
||||||
|
|
||||||
iface->config.dhcpv4.timer_start = k_uptime_get();
|
iface->config.dhcpv4.timer_start = k_uptime_get();
|
||||||
iface->config.dhcpv4.request_time = min(renewal_time, rebinding_time);
|
iface->config.dhcpv4.request_time = MIN(renewal_time, rebinding_time);
|
||||||
|
|
||||||
dhcpv4_update_timeout_work(iface->config.dhcpv4.request_time);
|
dhcpv4_update_timeout_work(iface->config.dhcpv4.request_time);
|
||||||
}
|
}
|
||||||
|
@ -517,7 +517,7 @@ static u32_t dhcph4_manage_timers(struct net_if *iface, s64_t timeout)
|
||||||
return dhcpv4_send_request(iface);
|
return dhcpv4_send_request(iface);
|
||||||
}
|
}
|
||||||
|
|
||||||
return min(iface->config.dhcpv4.renewal_time,
|
return MIN(iface->config.dhcpv4.renewal_time,
|
||||||
iface->config.dhcpv4.rebinding_time);
|
iface->config.dhcpv4.rebinding_time);
|
||||||
case NET_DHCPV4_RENEWING:
|
case NET_DHCPV4_RENEWING:
|
||||||
case NET_DHCPV4_REBINDING:
|
case NET_DHCPV4_REBINDING:
|
||||||
|
|
|
@ -600,12 +600,12 @@ static struct net_pkt *net_pkt_get(struct k_mem_slab *slab,
|
||||||
iface_len = data_len = net_if_get_mtu(iface);
|
iface_len = data_len = net_if_get_mtu(iface);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
|
if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
|
||||||
data_len = max(iface_len, NET_IPV6_MTU);
|
data_len = MAX(iface_len, NET_IPV6_MTU);
|
||||||
data_len -= NET_IPV6H_LEN;
|
data_len -= NET_IPV6H_LEN;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
|
if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
|
||||||
data_len = max(iface_len, NET_IPV4_MTU);
|
data_len = MAX(iface_len, NET_IPV4_MTU);
|
||||||
data_len -= NET_IPV4H_LEN;
|
data_len -= NET_IPV4H_LEN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1034,7 +1034,7 @@ int net_frag_linear_copy(struct net_buf *dst, struct net_buf *src,
|
||||||
/* traverse the fragment chain until len bytes are copied */
|
/* traverse the fragment chain until len bytes are copied */
|
||||||
copied = 0U;
|
copied = 0U;
|
||||||
while (src && len > 0) {
|
while (src && len > 0) {
|
||||||
to_copy = min(len, src->len - offset);
|
to_copy = MIN(len, src->len - offset);
|
||||||
memcpy(dst->data + copied, src->data + offset, to_copy);
|
memcpy(dst->data + copied, src->data + offset, to_copy);
|
||||||
|
|
||||||
copied += to_copy;
|
copied += to_copy;
|
||||||
|
@ -1404,7 +1404,7 @@ struct net_buf *net_pkt_write(struct net_pkt *pkt, struct net_buf *frag,
|
||||||
|
|
||||||
do {
|
do {
|
||||||
u16_t space = frag->size - net_buf_headroom(frag) - offset;
|
u16_t space = frag->size - net_buf_headroom(frag) - offset;
|
||||||
u16_t count = min(len, space);
|
u16_t count = MIN(len, space);
|
||||||
int size_to_add;
|
int size_to_add;
|
||||||
|
|
||||||
memcpy(frag->data + offset, data, count);
|
memcpy(frag->data + offset, data, count);
|
||||||
|
@ -1452,7 +1452,7 @@ static inline bool insert_data(struct net_pkt *pkt, struct net_buf *frag,
|
||||||
struct net_buf *insert;
|
struct net_buf *insert;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
u16_t count = min(len, net_buf_tailroom(frag));
|
u16_t count = MIN(len, net_buf_tailroom(frag));
|
||||||
|
|
||||||
if (data) {
|
if (data) {
|
||||||
/* Copy insert data */
|
/* Copy insert data */
|
||||||
|
@ -1660,7 +1660,7 @@ static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
|
||||||
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
||||||
u32_t diff = k_uptime_get_32() - alloc_start;
|
u32_t diff = k_uptime_get_32() - alloc_start;
|
||||||
|
|
||||||
timeout -= min(timeout, diff);
|
timeout -= MIN(timeout, diff);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
|
#if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
|
||||||
|
@ -1723,9 +1723,9 @@ static size_t pkt_buffer_length(struct net_pkt *pkt,
|
||||||
|
|
||||||
/* Family vs iface MTU */
|
/* Family vs iface MTU */
|
||||||
if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
|
if (IS_ENABLED(CONFIG_NET_IPV6) && family == AF_INET6) {
|
||||||
max_len = max(max_len, NET_IPV6_MTU);
|
max_len = MAX(max_len, NET_IPV6_MTU);
|
||||||
} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
|
} else if (IS_ENABLED(CONFIG_NET_IPV4) && family == AF_INET) {
|
||||||
max_len = max(max_len, NET_IPV4_MTU);
|
max_len = MAX(max_len, NET_IPV4_MTU);
|
||||||
} else { /* family == AF_UNSPEC */
|
} else { /* family == AF_UNSPEC */
|
||||||
#if defined (CONFIG_NET_L2_ETHERNET)
|
#if defined (CONFIG_NET_L2_ETHERNET)
|
||||||
if (net_if_l2(net_pkt_iface(pkt)) ==
|
if (net_if_l2(net_pkt_iface(pkt)) ==
|
||||||
|
@ -1743,7 +1743,7 @@ static size_t pkt_buffer_length(struct net_pkt *pkt,
|
||||||
|
|
||||||
max_len -= existing;
|
max_len -= existing;
|
||||||
|
|
||||||
return min(size, max_len);
|
return MIN(size, max_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t pkt_estimate_headers_length(struct net_pkt *pkt,
|
static size_t pkt_estimate_headers_length(struct net_pkt *pkt,
|
||||||
|
@ -1893,7 +1893,7 @@ int net_pkt_alloc_buffer(struct net_pkt *pkt,
|
||||||
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
||||||
u32_t diff = k_uptime_get_32() - alloc_start;
|
u32_t diff = k_uptime_get_32() - alloc_start;
|
||||||
|
|
||||||
timeout -= min(timeout, diff);
|
timeout -= MIN(timeout, diff);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if NET_LOG_LEVEL >= LOG_LEVEL_DBG
|
#if NET_LOG_LEVEL >= LOG_LEVEL_DBG
|
||||||
|
@ -2084,7 +2084,7 @@ pkt_alloc_with_buffer(struct k_mem_slab *slab,
|
||||||
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
||||||
u32_t diff = k_uptime_get_32() - alloc_start;
|
u32_t diff = k_uptime_get_32() - alloc_start;
|
||||||
|
|
||||||
timeout -= min(timeout, diff);
|
timeout -= MIN(timeout, diff);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if NET_LOG_LEVEL >= LOG_LEVEL_DBG
|
#if NET_LOG_LEVEL >= LOG_LEVEL_DBG
|
||||||
|
|
|
@ -282,7 +282,7 @@ struct net_tcp *net_tcp_alloc(struct net_context *context)
|
||||||
tcp_context[i].context = context;
|
tcp_context[i].context = context;
|
||||||
|
|
||||||
tcp_context[i].send_seq = tcp_init_isn();
|
tcp_context[i].send_seq = tcp_init_isn();
|
||||||
tcp_context[i].recv_wnd = min(NET_TCP_MAX_WIN, NET_TCP_BUF_MAX_LEN);
|
tcp_context[i].recv_wnd = MIN(NET_TCP_MAX_WIN, NET_TCP_BUF_MAX_LEN);
|
||||||
tcp_context[i].send_mss = NET_TCP_DEFAULT_MSS;
|
tcp_context[i].send_mss = NET_TCP_DEFAULT_MSS;
|
||||||
|
|
||||||
tcp_context[i].accept_cb = NULL;
|
tcp_context[i].accept_cb = NULL;
|
||||||
|
|
|
@ -555,7 +555,7 @@ static bool parse_ipv6(const char *str, size_t str_len,
|
||||||
int end, len, ret, i;
|
int end, len, ret, i;
|
||||||
u16_t port;
|
u16_t port;
|
||||||
|
|
||||||
len = min(INET6_ADDRSTRLEN, str_len);
|
len = MIN(INET6_ADDRSTRLEN, str_len);
|
||||||
|
|
||||||
for (i = 0; i < len; i++) {
|
for (i = 0; i < len; i++) {
|
||||||
if (!str[i]) {
|
if (!str[i]) {
|
||||||
|
@ -571,7 +571,7 @@ static bool parse_ipv6(const char *str, size_t str_len,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
end = min(len, ptr - (str + 1));
|
end = MIN(len, ptr - (str + 1));
|
||||||
memcpy(ipaddr, str + 1, end);
|
memcpy(ipaddr, str + 1, end);
|
||||||
} else {
|
} else {
|
||||||
end = len;
|
end = len;
|
||||||
|
@ -637,7 +637,7 @@ static bool parse_ipv4(const char *str, size_t str_len,
|
||||||
int end, len, ret, i;
|
int end, len, ret, i;
|
||||||
u16_t port;
|
u16_t port;
|
||||||
|
|
||||||
len = min(NET_IPV4_ADDR_LEN, str_len);
|
len = MIN(NET_IPV4_ADDR_LEN, str_len);
|
||||||
|
|
||||||
for (i = 0; i < len; i++) {
|
for (i = 0; i < len; i++) {
|
||||||
if (!str[i]) {
|
if (!str[i]) {
|
||||||
|
@ -653,7 +653,7 @@ static bool parse_ipv4(const char *str, size_t str_len,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
end = min(len, ptr - str);
|
end = MIN(len, ptr - str);
|
||||||
} else {
|
} else {
|
||||||
end = len;
|
end = len;
|
||||||
}
|
}
|
||||||
|
|
|
@ -128,7 +128,7 @@ static inline u8_t copy_data(struct ieee802154_fragment_ctx *ctx,
|
||||||
{
|
{
|
||||||
u8_t move = ctx->frag->len - (ctx->pos - ctx->frag->data);
|
u8_t move = ctx->frag->len - (ctx->pos - ctx->frag->data);
|
||||||
|
|
||||||
move = min(move, max);
|
move = MIN(move, max);
|
||||||
|
|
||||||
memcpy(frame_buf->data + frame_buf->len, ctx->pos, move);
|
memcpy(frame_buf->data + frame_buf->len, ctx->pos, move);
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ loop:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
be = min(be + 1, max_be);
|
be = MIN(be + 1, max_be);
|
||||||
nb++;
|
nb++;
|
||||||
|
|
||||||
if (nb > max_bo) {
|
if (nb > max_bo) {
|
||||||
|
|
|
@ -947,7 +947,7 @@ static int update_descriptive_block(struct coap_block_context *ctx,
|
||||||
ctx->total_size = size;
|
ctx->total_size = size;
|
||||||
}
|
}
|
||||||
ctx->current = new_current;
|
ctx->current = new_current;
|
||||||
ctx->block_size = min(GET_BLOCK_SIZE(block), ctx->block_size);
|
ctx->block_size = MIN(GET_BLOCK_SIZE(block), ctx->block_size);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -993,7 +993,7 @@ static int update_control_block2(struct coap_block_context *ctx,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->current = new_current;
|
ctx->current = new_current;
|
||||||
ctx->block_size = min(GET_BLOCK_SIZE(block), ctx->block_size);
|
ctx->block_size = MIN(GET_BLOCK_SIZE(block), ctx->block_size);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -473,7 +473,7 @@ static int dns_read(struct net_context *ctx,
|
||||||
int offset;
|
int offset;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
data_len = min(net_pkt_appdatalen(pkt), DNS_RESOLVER_MAX_BUF_SIZE);
|
data_len = MIN(net_pkt_appdatalen(pkt), DNS_RESOLVER_MAX_BUF_SIZE);
|
||||||
offset = net_pkt_get_len(pkt) - data_len;
|
offset = net_pkt_get_len(pkt) - data_len;
|
||||||
|
|
||||||
/* Store the DNS query name into a temporary net_buf. This means
|
/* Store the DNS query name into a temporary net_buf. This means
|
||||||
|
|
|
@ -296,7 +296,7 @@ static int dns_read(struct net_context *ctx,
|
||||||
int offset;
|
int offset;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
data_len = min(net_pkt_appdatalen(pkt), DNS_RESOLVER_MAX_BUF_SIZE);
|
data_len = MIN(net_pkt_appdatalen(pkt), DNS_RESOLVER_MAX_BUF_SIZE);
|
||||||
offset = net_pkt_get_len(pkt) - data_len;
|
offset = net_pkt_get_len(pkt) - data_len;
|
||||||
|
|
||||||
/* Store the DNS query name into a temporary net_buf. This means
|
/* Store the DNS query name into a temporary net_buf. This means
|
||||||
|
|
|
@ -340,7 +340,7 @@ static int dns_read(struct dns_resolve_context *ctx,
|
||||||
int ret;
|
int ret;
|
||||||
int server_idx, query_idx;
|
int server_idx, query_idx;
|
||||||
|
|
||||||
data_len = min(net_pkt_appdatalen(pkt), DNS_RESOLVER_MAX_BUF_SIZE);
|
data_len = MIN(net_pkt_appdatalen(pkt), DNS_RESOLVER_MAX_BUF_SIZE);
|
||||||
offset = net_pkt_get_len(pkt) - data_len;
|
offset = net_pkt_get_len(pkt) - data_len;
|
||||||
|
|
||||||
/* TODO: Instead of this temporary copy, just use the net_pkt directly.
|
/* TODO: Instead of this temporary copy, just use the net_pkt directly.
|
||||||
|
|
|
@ -214,7 +214,7 @@ static char *sprint_token(const u8_t *token, u8_t tkl)
|
||||||
if (token && tkl != LWM2M_MSG_TOKEN_LEN_SKIP) {
|
if (token && tkl != LWM2M_MSG_TOKEN_LEN_SKIP) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
tkl = min(tkl, sizeof(buf) / 2 - 1);
|
tkl = MIN(tkl, sizeof(buf) / 2 - 1);
|
||||||
|
|
||||||
for (i = 0; i < tkl; i++) {
|
for (i = 0; i < tkl; i++) {
|
||||||
*ptr++ = to_hex_digit(token[i] >> 4);
|
*ptr++ = to_hex_digit(token[i] >> 4);
|
||||||
|
@ -541,7 +541,7 @@ static int engine_add_observer(struct lwm2m_message *msg,
|
||||||
observe_node_data[i].event_timestamp =
|
observe_node_data[i].event_timestamp =
|
||||||
observe_node_data[i].last_timestamp;
|
observe_node_data[i].last_timestamp;
|
||||||
observe_node_data[i].min_period_sec = attrs.pmin;
|
observe_node_data[i].min_period_sec = attrs.pmin;
|
||||||
observe_node_data[i].max_period_sec = max(attrs.pmax, attrs.pmin);
|
observe_node_data[i].max_period_sec = MAX(attrs.pmax, attrs.pmin);
|
||||||
observe_node_data[i].format = format;
|
observe_node_data[i].format = format;
|
||||||
observe_node_data[i].counter = 1U;
|
observe_node_data[i].counter = 1U;
|
||||||
sys_slist_append(&engine_observer_list,
|
sys_slist_append(&engine_observer_list,
|
||||||
|
@ -2337,7 +2337,7 @@ static int lwm2m_write_attr_handler(struct lwm2m_engine_obj *obj,
|
||||||
|
|
||||||
/* loop through options to parse attribute */
|
/* loop through options to parse attribute */
|
||||||
for (i = 0; i < nr_opt; i++) {
|
for (i = 0; i < nr_opt; i++) {
|
||||||
int limit = min(options[i].len, 5), plen = 0, vlen;
|
int limit = MIN(options[i].len, 5), plen = 0, vlen;
|
||||||
float32_value_t val = { 0 };
|
float32_value_t val = { 0 };
|
||||||
type = 0U;
|
type = 0U;
|
||||||
|
|
||||||
|
@ -2595,9 +2595,9 @@ static int lwm2m_write_attr_handler(struct lwm2m_engine_obj *obj,
|
||||||
obs->path.obj_id, obs->path.obj_inst_id,
|
obs->path.obj_id, obs->path.obj_inst_id,
|
||||||
obs->path.res_id, obs->path.level,
|
obs->path.res_id, obs->path.level,
|
||||||
obs->min_period_sec, obs->max_period_sec,
|
obs->min_period_sec, obs->max_period_sec,
|
||||||
nattrs.pmin, max(nattrs.pmin, nattrs.pmax));
|
nattrs.pmin, MAX(nattrs.pmin, nattrs.pmax));
|
||||||
obs->min_period_sec = (u32_t)nattrs.pmin;
|
obs->min_period_sec = (u32_t)nattrs.pmin;
|
||||||
obs->max_period_sec = (u32_t)max(nattrs.pmin, nattrs.pmax);
|
obs->max_period_sec = (u32_t)MAX(nattrs.pmin, nattrs.pmax);
|
||||||
(void)memset(&nattrs, 0, sizeof(nattrs));
|
(void)memset(&nattrs, 0, sizeof(nattrs));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -370,7 +370,7 @@ int zsock_accept_ctx(struct net_context *parent, struct sockaddr *addr,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (addr != NULL && addrlen != NULL) {
|
if (addr != NULL && addrlen != NULL) {
|
||||||
int len = min(*addrlen, sizeof(ctx->remote));
|
int len = MIN(*addrlen, sizeof(ctx->remote));
|
||||||
|
|
||||||
memcpy(addr, &ctx->remote, len);
|
memcpy(addr, &ctx->remote, len);
|
||||||
/* addrlen is a value-result argument, set to actual
|
/* addrlen is a value-result argument, set to actual
|
||||||
|
|
|
@ -443,7 +443,7 @@ static void dtls_peer_address_get(struct net_context *context,
|
||||||
struct sockaddr *peer_addr,
|
struct sockaddr *peer_addr,
|
||||||
socklen_t *addrlen)
|
socklen_t *addrlen)
|
||||||
{
|
{
|
||||||
socklen_t len = min(context->tls->dtls_peer_addrlen, *addrlen);
|
socklen_t len = MIN(context->tls->dtls_peer_addrlen, *addrlen);
|
||||||
|
|
||||||
memcpy(peer_addr, &context->tls->dtls_peer_addr, len);
|
memcpy(peer_addr, &context->tls->dtls_peer_addr, len);
|
||||||
*addrlen = len;
|
*addrlen = len;
|
||||||
|
@ -938,7 +938,7 @@ static int tls_opt_sec_tag_list_get(struct net_context *context,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
len = min(context->tls->options.sec_tag_list.sec_tag_count *
|
len = MIN(context->tls->options.sec_tag_list.sec_tag_count *
|
||||||
sizeof(sec_tag_t), *optlen);
|
sizeof(sec_tag_t), *optlen);
|
||||||
|
|
||||||
memcpy(optval, context->tls->options.sec_tag_list.sec_tags, len);
|
memcpy(optval, context->tls->options.sec_tag_list.sec_tags, len);
|
||||||
|
@ -1266,7 +1266,7 @@ int ztls_accept_ctx(struct net_context *parent, struct sockaddr *addr,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (addr != NULL && addrlen != NULL) {
|
if (addr != NULL && addrlen != NULL) {
|
||||||
int len = min(*addrlen, sizeof(child->remote));
|
int len = MIN(*addrlen, sizeof(child->remote));
|
||||||
|
|
||||||
memcpy(addr, &child->remote, len);
|
memcpy(addr, &child->remote, len);
|
||||||
/* addrlen is a value-result argument, set to actual
|
/* addrlen is a value-result argument, set to actual
|
||||||
|
|
|
@ -140,7 +140,7 @@ int settings_val_read_cb(void *value_ctx, void *buf, size_t len)
|
||||||
|
|
||||||
if (value_context->runtime) {
|
if (value_context->runtime) {
|
||||||
rt_ctx = value_context->read_cb_ctx;
|
rt_ctx = value_context->read_cb_ctx;
|
||||||
len_read = min(len, rt_ctx->size);
|
len_read = MIN(len, rt_ctx->size);
|
||||||
memcpy(buf, rt_ctx->p_value, len_read);
|
memcpy(buf, rt_ctx->p_value, len_read);
|
||||||
return len_read;
|
return len_read;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -178,7 +178,7 @@ int settings_line_write(const char *name, const char *value, size_t val_len,
|
||||||
while (w_size < sizeof(w_buf)) {
|
while (w_size < sizeof(w_buf)) {
|
||||||
#ifdef CONFIG_SETTINGS_USE_BASE64
|
#ifdef CONFIG_SETTINGS_USE_BASE64
|
||||||
if (enc_len) {
|
if (enc_len) {
|
||||||
add = min(enc_len, sizeof(w_buf) - w_size);
|
add = MIN(enc_len, sizeof(w_buf) - w_size);
|
||||||
memcpy(&w_buf[w_size], p_enc, add);
|
memcpy(&w_buf[w_size], p_enc, add);
|
||||||
enc_len -= add;
|
enc_len -= add;
|
||||||
w_size += add;
|
w_size += add;
|
||||||
|
@ -187,7 +187,7 @@ int settings_line_write(const char *name, const char *value, size_t val_len,
|
||||||
#endif
|
#endif
|
||||||
if (rem) {
|
if (rem) {
|
||||||
#ifdef CONFIG_SETTINGS_USE_BASE64
|
#ifdef CONFIG_SETTINGS_USE_BASE64
|
||||||
add = min(rem, MAX_ENC_BLOCK_SIZE/4*3);
|
add = MIN(rem, MAX_ENC_BLOCK_SIZE/4*3);
|
||||||
rc = base64_encode(enc_buf, sizeof(enc_buf), &enc_len, value, add);
|
rc = base64_encode(enc_buf, sizeof(enc_buf), &enc_len, value, add);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -196,7 +196,7 @@ int settings_line_write(const char *name, const char *value, size_t val_len,
|
||||||
rem -= add;
|
rem -= add;
|
||||||
p_enc = enc_buf;
|
p_enc = enc_buf;
|
||||||
#else
|
#else
|
||||||
add = min(rem, sizeof(w_buf) - w_size);
|
add = MIN(rem, sizeof(w_buf) - w_size);
|
||||||
memcpy(&w_buf[w_size], value, add);
|
memcpy(&w_buf[w_size], value, add);
|
||||||
value += add;
|
value += add;
|
||||||
rem -= add;
|
rem -= add;
|
||||||
|
@ -324,7 +324,7 @@ static int settings_line_raw_read_until(off_t seek, char *out, size_t len_req,
|
||||||
|
|
||||||
off = seek - off;
|
off = seek - off;
|
||||||
len = read_size - off;
|
len = read_size - off;
|
||||||
len = min(rem_size, len);
|
len = MIN(rem_size, len);
|
||||||
|
|
||||||
if (until_char != NULL) {
|
if (until_char != NULL) {
|
||||||
char *pend;
|
char *pend;
|
||||||
|
@ -384,7 +384,7 @@ int settings_line_val_read(off_t val_off, off_t off, char *out, size_t len_req,
|
||||||
read_size = rem_size / 3 * 4;
|
read_size = rem_size / 3 * 4;
|
||||||
read_size += (rem_size % 3 != 0 || off_begin != off) ? 4 : 0;
|
read_size += (rem_size % 3 != 0 || off_begin != off) ? 4 : 0;
|
||||||
|
|
||||||
read_size = min(read_size, sizeof(enc_buf) - 1);
|
read_size = MIN(read_size, sizeof(enc_buf) - 1);
|
||||||
exp_size = read_size;
|
exp_size = read_size;
|
||||||
|
|
||||||
rc = settings_line_raw_read(val_off + seek_begin, enc_buf,
|
rc = settings_line_raw_read(val_off + seek_begin, enc_buf,
|
||||||
|
@ -406,7 +406,7 @@ int settings_line_val_read(off_t val_off, off_t off, char *out, size_t len_req,
|
||||||
read_size);
|
read_size);
|
||||||
dec_buf[olen] = 0;
|
dec_buf[olen] = 0;
|
||||||
|
|
||||||
clen = min(olen + off_begin - off, rem_size);
|
clen = MIN(olen + off_begin - off, rem_size);
|
||||||
|
|
||||||
memcpy(out, &dec_buf[off - off_begin], clen);
|
memcpy(out, &dec_buf[off - off_begin], clen);
|
||||||
rem_size -= clen;
|
rem_size -= clen;
|
||||||
|
@ -498,7 +498,7 @@ int settings_entry_copy(void *dst_ctx, off_t dst_off, void *src_ctx,
|
||||||
size_t chunk_size;
|
size_t chunk_size;
|
||||||
|
|
||||||
while (len) {
|
while (len) {
|
||||||
chunk_size = min(len, sizeof(buf));
|
chunk_size = MIN(len, sizeof(buf));
|
||||||
|
|
||||||
rc = settings_io_cb.read_cb(src_ctx, src_off, buf, &chunk_size);
|
rc = settings_io_cb.read_cb(src_ctx, src_off, buf, &chunk_size);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
|
|
@ -81,7 +81,7 @@ static int settings_cmp(char const *val, size_t val_len, void *val_read_cb_ctx,
|
||||||
off_t off = 0;
|
off_t off = 0;
|
||||||
|
|
||||||
for (rem = val_len; rem > 0; rem -= len_read) {
|
for (rem = val_len; rem > 0; rem -= len_read) {
|
||||||
len_read = exp_len = min(sizeof(buf), rem);
|
len_read = exp_len = MIN(sizeof(buf), rem);
|
||||||
rc = settings_line_val_read(val_off, off, buf, len_read,
|
rc = settings_line_val_read(val_off, off, buf, len_read,
|
||||||
&len_read, val_read_cb_ctx);
|
&len_read, val_read_cb_ctx);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
|
|
@ -547,7 +547,7 @@ static int hid_custom_handle_req(struct usb_setup_packet *setup,
|
||||||
|
|
||||||
LOG_DBG("Return HID Descriptor");
|
LOG_DBG("Return HID Descriptor");
|
||||||
|
|
||||||
*len = min(*len, hid_desc->if0_hid.bLength);
|
*len = MIN(*len, hid_desc->if0_hid.bLength);
|
||||||
*data = (u8_t *)&hid_desc->if0_hid;
|
*data = (u8_t *)&hid_desc->if0_hid;
|
||||||
break;
|
break;
|
||||||
case HID_CLASS_DESCRIPTOR_REPORT:
|
case HID_CLASS_DESCRIPTOR_REPORT:
|
||||||
|
@ -560,7 +560,7 @@ static int hid_custom_handle_req(struct usb_setup_packet *setup,
|
||||||
if (*len != dev_data->report_size) {
|
if (*len != dev_data->report_size) {
|
||||||
LOG_WRN("len %d doesn't match "
|
LOG_WRN("len %d doesn't match "
|
||||||
"Report Descriptor size", *len);
|
"Report Descriptor size", *len);
|
||||||
*len = min(*len, dev_data->report_size);
|
*len = MIN(*len, dev_data->report_size);
|
||||||
}
|
}
|
||||||
*data = (u8_t *)dev_data->report_desc;
|
*data = (u8_t *)dev_data->report_desc;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1081,7 +1081,7 @@ static int append_bytes(u8_t *out_buf, u16_t buf_len, u8_t *data,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
u16_t count = min(len, remaining);
|
u16_t count = MIN(len, remaining);
|
||||||
#if VERBOSE_DEBUG
|
#if VERBOSE_DEBUG
|
||||||
LOG_DBG("len %u remaining %u count %u", len, remaining, count);
|
LOG_DBG("len %u remaining %u count %u", len, remaining, count);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -59,7 +59,7 @@ LOG_MODULE_REGISTER(usb_dfu);
|
||||||
#define NUMOF_ALTERNATE_SETTINGS 2
|
#define NUMOF_ALTERNATE_SETTINGS 2
|
||||||
|
|
||||||
#ifdef CONFIG_USB_COMPOSITE_DEVICE
|
#ifdef CONFIG_USB_COMPOSITE_DEVICE
|
||||||
#define USB_DFU_MAX_XFER_SIZE (min(CONFIG_USB_COMPOSITE_BUFFER_SIZE, \
|
#define USB_DFU_MAX_XFER_SIZE (MIN(CONFIG_USB_COMPOSITE_BUFFER_SIZE, \
|
||||||
CONFIG_USB_DFU_MAX_XFER_SIZE))
|
CONFIG_USB_DFU_MAX_XFER_SIZE))
|
||||||
#else
|
#else
|
||||||
#define USB_DFU_MAX_XFER_SIZE CONFIG_USB_DFU_MAX_XFER_SIZE
|
#define USB_DFU_MAX_XFER_SIZE CONFIG_USB_DFU_MAX_XFER_SIZE
|
||||||
|
|
|
@ -303,7 +303,7 @@ static void usb_handle_control_transfer(u8_t ep,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Send smallest of requested and offered length */
|
/* Send smallest of requested and offered length */
|
||||||
usb_dev.data_buf_residue = min(usb_dev.data_buf_len, length);
|
usb_dev.data_buf_residue = MIN(usb_dev.data_buf_len, length);
|
||||||
/* Send first part (possibly a zero-length status message) */
|
/* Send first part (possibly a zero-length status message) */
|
||||||
usb_data_to_host();
|
usb_data_to_host();
|
||||||
} else if (ep == USB_CONTROL_OUT_EP0) {
|
} else if (ep == USB_CONTROL_OUT_EP0) {
|
||||||
|
|
|
@ -209,7 +209,7 @@ int pipeput(struct k_pipe *pipe,
|
||||||
t = BENCH_START();
|
t = BENCH_START();
|
||||||
for (i = 0; option == _1_TO_N || (i < count); i++) {
|
for (i = 0; option == _1_TO_N || (i < count); i++) {
|
||||||
size_t sizexferd = 0;
|
size_t sizexferd = 0;
|
||||||
size_t size2xfer = min(size, size2xfer_total - sizexferd_total);
|
size_t size2xfer = MIN(size, size2xfer_total - sizexferd_total);
|
||||||
int ret;
|
int ret;
|
||||||
size_t mim_num_of_bytes = 0;
|
size_t mim_num_of_bytes = 0;
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue