net: sockets: move poll implementation to zvfs
Move the implementation of zsock_poll to zvfs_poll. This allows other types of file descriptors to also make use of poll() functionality even when the network subsystem is not enabled. Additionally, it partially removes a dependency cycle between posix and networking by moving functionality into a mutual dependency. Signed-off-by: Chris Friedt <cfriedt@tenstorrent.com>
This commit is contained in:
parent
5ccbaeff39
commit
881dc1fa7a
15 changed files with 275 additions and 224 deletions
|
@ -629,7 +629,10 @@ static inline int zsock_ioctl_wrapper(int sock, unsigned long request, ...)
|
|||
* it may conflict with generic POSIX ``poll()`` function).
|
||||
* @endrst
|
||||
*/
|
||||
__syscall int zsock_poll(struct zsock_pollfd *fds, int nfds, int timeout);
|
||||
static inline int zsock_poll(struct zsock_pollfd *fds, int nfds, int timeout)
|
||||
{
|
||||
return zvfs_poll(fds, nfds, timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get various socket options
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
#ifndef ZEPHYR_INCLUDE_NET_SOCKET_POLL_H_
|
||||
#define ZEPHYR_INCLUDE_NET_SOCKET_POLL_H_
|
||||
|
||||
#include <zephyr/sys/fdtable.h>
|
||||
|
||||
/* Setting for pollfd to avoid circular inclusion */
|
||||
|
||||
/**
|
||||
|
@ -20,6 +22,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef __DOXYGEN__
|
||||
/**
|
||||
* @brief Definition of the monitored socket/file descriptor.
|
||||
*
|
||||
|
@ -30,6 +33,9 @@ struct zsock_pollfd {
|
|||
short events; /**< Requested events */
|
||||
short revents; /**< Returned events */
|
||||
};
|
||||
#else
|
||||
#define zsock_pollfd zvfs_pollfd
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -27,6 +27,13 @@
|
|||
#define ZVFS_MODE_IFLNK 0120000
|
||||
#define ZVFS_MODE_IFSOCK 0140000
|
||||
|
||||
#define ZVFS_POLLIN BIT(0)
|
||||
#define ZVFS_POLLPRI BIT(1)
|
||||
#define ZVFS_POLLOUT BIT(2)
|
||||
#define ZVFS_POLLERR BIT(3)
|
||||
#define ZVFS_POLLHUP BIT(4)
|
||||
#define ZVFS_POLLNVAL BIT(5)
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
@ -192,6 +199,14 @@ static inline int zvfs_fdtable_call_ioctl(const struct fd_op_vtable *vtable, voi
|
|||
return res;
|
||||
}
|
||||
|
||||
struct zvfs_pollfd {
|
||||
int fd;
|
||||
short events;
|
||||
short revents;
|
||||
};
|
||||
|
||||
__syscall int zvfs_poll(struct zvfs_pollfd *fds, int nfds, int poll_timeout);
|
||||
|
||||
/**
|
||||
* Request codes for fd_op_vtable.ioctl().
|
||||
*
|
||||
|
|
|
@ -12,6 +12,9 @@ zephyr_sources(
|
|||
)
|
||||
|
||||
zephyr_sources_ifdef(CONFIG_FDTABLE fdtable.c)
|
||||
zephyr_syscall_header_ifdef(CONFIG_FDTABLE
|
||||
${ZEPHYR_BASE}/include/zephyr/sys/fdtable.h
|
||||
)
|
||||
|
||||
zephyr_sources_ifdef(CONFIG_CBPRINTF_COMPLETE cbprintf_complete.c)
|
||||
zephyr_sources_ifdef(CONFIG_CBPRINTF_NANO cbprintf_nano.c)
|
||||
|
|
|
@ -2,3 +2,4 @@
|
|||
|
||||
zephyr_library()
|
||||
zephyr_library_sources_ifdef(CONFIG_ZVFS_EVENTFD zvfs_eventfd.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_ZVFS_POLL zvfs_poll.c)
|
||||
|
|
|
@ -16,7 +16,7 @@ if ZVFS
|
|||
|
||||
config ZVFS_EVENTFD
|
||||
bool "ZVFS event file descriptor support"
|
||||
select POLL
|
||||
imply ZVFS_POLL
|
||||
help
|
||||
Enable support for ZVFS event file descriptors. An eventfd can
|
||||
be used as an event wait/notify mechanism together with POSIX calls
|
||||
|
@ -33,4 +33,22 @@ config ZVFS_EVENTFD_MAX
|
|||
|
||||
endif # ZVFS_EVENTFD
|
||||
|
||||
config ZVFS_POLL
|
||||
bool "ZVFS poll"
|
||||
select POLL
|
||||
help
|
||||
Enable support for zvfs_poll().
|
||||
|
||||
if ZVFS_POLL
|
||||
|
||||
config ZVFS_POLL_MAX
|
||||
int "Max number of supported zvfs_poll() entries"
|
||||
default 6 if WIFI_NM_WPA_SUPPLICANT
|
||||
default 4 if SHELL_BACKEND_TELNET
|
||||
default 3
|
||||
help
|
||||
Maximum number of entries supported for poll() call.
|
||||
|
||||
endif # ZVFS_POLL
|
||||
|
||||
endif # ZVFS
|
||||
|
|
213
lib/os/zvfs/zvfs_poll.c
Normal file
213
lib/os/zvfs/zvfs_poll.c
Normal file
|
@ -0,0 +1,213 @@
|
|||
/*
|
||||
* Copyright (c) 2017-2018 Linaro Limited
|
||||
* Copyright (c) 2021 Nordic Semiconductor
|
||||
* Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
|
||||
* Copyright (c) 2024 Tenstorrent AI ULC
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/internal/syscall_handler.h>
|
||||
#include <zephyr/sys/fdtable.h>
|
||||
|
||||
#if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
|
||||
bool net_socket_is_tls(void *obj);
|
||||
#else
|
||||
#define net_socket_is_tls(obj) false
|
||||
#endif
|
||||
|
||||
int zvfs_poll_internal(struct zvfs_pollfd *fds, int nfds, k_timeout_t timeout)
|
||||
{
|
||||
bool retry;
|
||||
int ret = 0;
|
||||
int i;
|
||||
struct zvfs_pollfd *pfd;
|
||||
struct k_poll_event poll_events[CONFIG_ZVFS_POLL_MAX];
|
||||
struct k_poll_event *pev;
|
||||
struct k_poll_event *pev_end = poll_events + ARRAY_SIZE(poll_events);
|
||||
const struct fd_op_vtable *vtable;
|
||||
struct k_mutex *lock;
|
||||
k_timepoint_t end;
|
||||
bool offload = false;
|
||||
const struct fd_op_vtable *offl_vtable = NULL;
|
||||
void *offl_ctx = NULL;
|
||||
|
||||
end = sys_timepoint_calc(timeout);
|
||||
|
||||
pev = poll_events;
|
||||
for (pfd = fds, i = nfds; i--; pfd++) {
|
||||
void *ctx;
|
||||
int result;
|
||||
|
||||
/* Per POSIX, negative fd's are just ignored */
|
||||
if (pfd->fd < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx = zvfs_get_fd_obj_and_vtable(pfd->fd, &vtable, &lock);
|
||||
if (ctx == NULL) {
|
||||
/* Will set POLLNVAL in return loop */
|
||||
continue;
|
||||
}
|
||||
|
||||
(void)k_mutex_lock(lock, K_FOREVER);
|
||||
|
||||
result = zvfs_fdtable_call_ioctl(vtable, ctx, ZFD_IOCTL_POLL_PREPARE, pfd, &pev,
|
||||
pev_end);
|
||||
if (result == -EALREADY) {
|
||||
/* If POLL_PREPARE returned with EALREADY, it means
|
||||
* it already detected that some socket is ready. In
|
||||
* this case, we still perform a k_poll to pick up
|
||||
* as many events as possible, but without any wait.
|
||||
*/
|
||||
timeout = K_NO_WAIT;
|
||||
end = sys_timepoint_calc(timeout);
|
||||
result = 0;
|
||||
} else if (result == -EXDEV) {
|
||||
/* If POLL_PREPARE returned EXDEV, it means
|
||||
* it detected an offloaded socket.
|
||||
* If offloaded socket is used with native TLS, the TLS
|
||||
* wrapper for the offloaded poll will be used.
|
||||
* In case the fds array contains a mixup of offloaded
|
||||
* and non-offloaded sockets, the offloaded poll handler
|
||||
* shall return an error.
|
||||
*/
|
||||
offload = true;
|
||||
if (offl_vtable == NULL || net_socket_is_tls(ctx)) {
|
||||
offl_vtable = vtable;
|
||||
offl_ctx = ctx;
|
||||
}
|
||||
|
||||
result = 0;
|
||||
}
|
||||
|
||||
k_mutex_unlock(lock);
|
||||
|
||||
if (result < 0) {
|
||||
errno = -result;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (offload) {
|
||||
int poll_timeout;
|
||||
|
||||
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
|
||||
poll_timeout = SYS_FOREVER_MS;
|
||||
} else {
|
||||
poll_timeout = k_ticks_to_ms_floor32(timeout.ticks);
|
||||
}
|
||||
|
||||
return zvfs_fdtable_call_ioctl(offl_vtable, offl_ctx, ZFD_IOCTL_POLL_OFFLOAD, fds,
|
||||
nfds, poll_timeout);
|
||||
}
|
||||
|
||||
timeout = sys_timepoint_timeout(end);
|
||||
|
||||
do {
|
||||
ret = k_poll(poll_events, pev - poll_events, timeout);
|
||||
/* EAGAIN when timeout expired, EINTR when cancelled (i.e. EOF) */
|
||||
if (ret != 0 && ret != -EAGAIN && ret != -EINTR) {
|
||||
errno = -ret;
|
||||
return -1;
|
||||
}
|
||||
|
||||
retry = false;
|
||||
ret = 0;
|
||||
|
||||
pev = poll_events;
|
||||
for (pfd = fds, i = nfds; i--; pfd++) {
|
||||
void *ctx;
|
||||
int result;
|
||||
|
||||
pfd->revents = 0;
|
||||
|
||||
if (pfd->fd < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx = zvfs_get_fd_obj_and_vtable(pfd->fd, &vtable, &lock);
|
||||
if (ctx == NULL) {
|
||||
pfd->revents = ZVFS_POLLNVAL;
|
||||
ret++;
|
||||
continue;
|
||||
}
|
||||
|
||||
(void)k_mutex_lock(lock, K_FOREVER);
|
||||
|
||||
result = zvfs_fdtable_call_ioctl(vtable, ctx, ZFD_IOCTL_POLL_UPDATE, pfd,
|
||||
&pev);
|
||||
k_mutex_unlock(lock);
|
||||
|
||||
if (result == -EAGAIN) {
|
||||
retry = true;
|
||||
continue;
|
||||
} else if (result != 0) {
|
||||
errno = -result;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pfd->revents != 0) {
|
||||
ret++;
|
||||
}
|
||||
}
|
||||
|
||||
if (retry) {
|
||||
if (ret > 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
timeout = sys_timepoint_timeout(end);
|
||||
|
||||
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} while (retry);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int z_impl_zvfs_poll(struct zvfs_pollfd *fds, int nfds, int poll_timeout)
|
||||
{
|
||||
k_timeout_t timeout;
|
||||
|
||||
if (poll_timeout < 0) {
|
||||
timeout = K_FOREVER;
|
||||
} else {
|
||||
timeout = K_MSEC(poll_timeout);
|
||||
}
|
||||
|
||||
return zvfs_poll_internal(fds, nfds, timeout);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
static inline int z_vrfy_zvfs_poll(struct zvfs_pollfd *fds, int nfds, int timeout)
|
||||
{
|
||||
struct zvfs_pollfd *fds_copy;
|
||||
size_t fds_size;
|
||||
int ret;
|
||||
|
||||
/* Copy fds array from user mode */
|
||||
if (size_mul_overflow(nfds, sizeof(struct zvfs_pollfd), &fds_size)) {
|
||||
errno = EFAULT;
|
||||
return -1;
|
||||
}
|
||||
fds_copy = k_usermode_alloc_from_copy((void *)fds, fds_size);
|
||||
if (!fds_copy) {
|
||||
errno = ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = z_impl_zvfs_poll(fds_copy, nfds, timeout);
|
||||
|
||||
if (ret >= 0) {
|
||||
k_usermode_to_copy((void *)fds, fds_copy, fds_size);
|
||||
}
|
||||
k_free(fds_copy);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#include <zephyr/syscalls/zvfs_poll_mrsh.c>
|
||||
#endif
|
|
@ -6,9 +6,10 @@ menu "POSIX device I/O"
|
|||
|
||||
config POSIX_DEVICE_IO
|
||||
bool "POSIX device I/O [EXPERIMENTAL]"
|
||||
select FDTABLE
|
||||
select EXPERIMENTAL
|
||||
select REQUIRES_FULL_LIBC
|
||||
select ZVFS
|
||||
select ZVFS_POLL
|
||||
help
|
||||
Select 'y' here and Zephyr will provide an implementation of the POSIX_DEVICE_IO Option
|
||||
Group such as FD_CLR(), FD_ISSET(), FD_SET(), FD_ZERO(), close(), fdopen(), fileno(), open(),
|
||||
|
|
|
@ -37,8 +37,7 @@ FUNC_ALIAS(open, _open, int);
|
|||
|
||||
int poll(struct pollfd *fds, int nfds, int timeout)
|
||||
{
|
||||
/* TODO: create zvfs_poll() and dispatch to subsystems based on file type */
|
||||
return zsock_poll(fds, nfds, timeout);
|
||||
return zvfs_poll(fds, nfds, timeout);
|
||||
}
|
||||
|
||||
ssize_t pread(int fd, void *buf, size_t count, off_t offset)
|
||||
|
|
|
@ -5,7 +5,8 @@
|
|||
|
||||
menuconfig NET_SOCKETS
|
||||
bool "BSD Sockets compatible API"
|
||||
select FDTABLE
|
||||
select ZVFS
|
||||
select ZVFS_POLL
|
||||
help
|
||||
Provide BSD Sockets like API on top of native Zephyr networking API.
|
||||
|
||||
|
|
|
@ -821,217 +821,6 @@ static inline int z_vrfy_zsock_ioctl_impl(int sock, unsigned long request, va_li
|
|||
#include <zephyr/syscalls/zsock_ioctl_impl_mrsh.c>
|
||||
#endif
|
||||
|
||||
int zsock_poll_internal(struct zsock_pollfd *fds, int nfds, k_timeout_t timeout)
|
||||
{
|
||||
bool retry;
|
||||
int ret = 0;
|
||||
int i;
|
||||
struct zsock_pollfd *pfd;
|
||||
struct k_poll_event poll_events[CONFIG_NET_SOCKETS_POLL_MAX];
|
||||
struct k_poll_event *pev;
|
||||
struct k_poll_event *pev_end = poll_events + ARRAY_SIZE(poll_events);
|
||||
const struct fd_op_vtable *vtable;
|
||||
struct k_mutex *lock;
|
||||
k_timepoint_t end;
|
||||
bool offload = false;
|
||||
const struct fd_op_vtable *offl_vtable = NULL;
|
||||
void *offl_ctx = NULL;
|
||||
|
||||
end = sys_timepoint_calc(timeout);
|
||||
|
||||
pev = poll_events;
|
||||
for (pfd = fds, i = nfds; i--; pfd++) {
|
||||
void *ctx;
|
||||
int result;
|
||||
|
||||
/* Per POSIX, negative fd's are just ignored */
|
||||
if (pfd->fd < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx = get_sock_vtable(pfd->fd,
|
||||
(const struct socket_op_vtable **)&vtable,
|
||||
&lock);
|
||||
if (ctx == NULL) {
|
||||
/* Will set POLLNVAL in return loop */
|
||||
continue;
|
||||
}
|
||||
|
||||
(void)k_mutex_lock(lock, K_FOREVER);
|
||||
|
||||
result = zvfs_fdtable_call_ioctl(vtable, ctx,
|
||||
ZFD_IOCTL_POLL_PREPARE,
|
||||
pfd, &pev, pev_end);
|
||||
if (result == -EALREADY) {
|
||||
/* If POLL_PREPARE returned with EALREADY, it means
|
||||
* it already detected that some socket is ready. In
|
||||
* this case, we still perform a k_poll to pick up
|
||||
* as many events as possible, but without any wait.
|
||||
*/
|
||||
timeout = K_NO_WAIT;
|
||||
end = sys_timepoint_calc(timeout);
|
||||
result = 0;
|
||||
} else if (result == -EXDEV) {
|
||||
/* If POLL_PREPARE returned EXDEV, it means
|
||||
* it detected an offloaded socket.
|
||||
* If offloaded socket is used with native TLS, the TLS
|
||||
* wrapper for the offloaded poll will be used.
|
||||
* In case the fds array contains a mixup of offloaded
|
||||
* and non-offloaded sockets, the offloaded poll handler
|
||||
* shall return an error.
|
||||
*/
|
||||
offload = true;
|
||||
if (offl_vtable == NULL || net_socket_is_tls(ctx)) {
|
||||
offl_vtable = vtable;
|
||||
offl_ctx = ctx;
|
||||
}
|
||||
|
||||
result = 0;
|
||||
}
|
||||
|
||||
k_mutex_unlock(lock);
|
||||
|
||||
if (result < 0) {
|
||||
errno = -result;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (offload) {
|
||||
int poll_timeout;
|
||||
|
||||
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
|
||||
poll_timeout = SYS_FOREVER_MS;
|
||||
} else {
|
||||
poll_timeout = k_ticks_to_ms_floor32(timeout.ticks);
|
||||
}
|
||||
|
||||
return zvfs_fdtable_call_ioctl(offl_vtable, offl_ctx,
|
||||
ZFD_IOCTL_POLL_OFFLOAD,
|
||||
fds, nfds, poll_timeout);
|
||||
}
|
||||
|
||||
timeout = sys_timepoint_timeout(end);
|
||||
|
||||
do {
|
||||
ret = k_poll(poll_events, pev - poll_events, timeout);
|
||||
/* EAGAIN when timeout expired, EINTR when cancelled (i.e. EOF) */
|
||||
if (ret != 0 && ret != -EAGAIN && ret != -EINTR) {
|
||||
errno = -ret;
|
||||
return -1;
|
||||
}
|
||||
|
||||
retry = false;
|
||||
ret = 0;
|
||||
|
||||
pev = poll_events;
|
||||
for (pfd = fds, i = nfds; i--; pfd++) {
|
||||
void *ctx;
|
||||
int result;
|
||||
|
||||
pfd->revents = 0;
|
||||
|
||||
if (pfd->fd < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx = get_sock_vtable(
|
||||
pfd->fd,
|
||||
(const struct socket_op_vtable **)&vtable,
|
||||
&lock);
|
||||
if (ctx == NULL) {
|
||||
pfd->revents = ZSOCK_POLLNVAL;
|
||||
ret++;
|
||||
continue;
|
||||
}
|
||||
|
||||
(void)k_mutex_lock(lock, K_FOREVER);
|
||||
|
||||
result = zvfs_fdtable_call_ioctl(vtable, ctx,
|
||||
ZFD_IOCTL_POLL_UPDATE,
|
||||
pfd, &pev);
|
||||
k_mutex_unlock(lock);
|
||||
|
||||
if (result == -EAGAIN) {
|
||||
retry = true;
|
||||
continue;
|
||||
} else if (result != 0) {
|
||||
errno = -result;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pfd->revents != 0) {
|
||||
ret++;
|
||||
}
|
||||
}
|
||||
|
||||
if (retry) {
|
||||
if (ret > 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
timeout = sys_timepoint_timeout(end);
|
||||
|
||||
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} while (retry);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int z_impl_zsock_poll(struct zsock_pollfd *fds, int nfds, int poll_timeout)
|
||||
{
|
||||
k_timeout_t timeout;
|
||||
int ret;
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, poll, fds, nfds, poll_timeout);
|
||||
|
||||
if (poll_timeout < 0) {
|
||||
timeout = K_FOREVER;
|
||||
} else {
|
||||
timeout = K_MSEC(poll_timeout);
|
||||
}
|
||||
|
||||
ret = zsock_poll_internal(fds, nfds, timeout);
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, poll, fds, nfds,
|
||||
ret < 0 ? -errno : ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
static inline int z_vrfy_zsock_poll(struct zsock_pollfd *fds,
|
||||
int nfds, int timeout)
|
||||
{
|
||||
struct zsock_pollfd *fds_copy;
|
||||
size_t fds_size;
|
||||
int ret;
|
||||
|
||||
/* Copy fds array from user mode */
|
||||
if (size_mul_overflow(nfds, sizeof(struct zsock_pollfd), &fds_size)) {
|
||||
errno = EFAULT;
|
||||
return -1;
|
||||
}
|
||||
fds_copy = k_usermode_alloc_from_copy((void *)fds, fds_size);
|
||||
if (!fds_copy) {
|
||||
errno = ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = z_impl_zsock_poll(fds_copy, nfds, timeout);
|
||||
|
||||
if (ret >= 0) {
|
||||
k_usermode_to_copy((void *)fds, fds_copy, fds_size);
|
||||
}
|
||||
k_free(fds_copy);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#include <zephyr/syscalls/zsock_poll_mrsh.c>
|
||||
#endif
|
||||
|
||||
int z_impl_zsock_inet_pton(sa_family_t family, const char *src, void *dst)
|
||||
{
|
||||
if (net_addr_pton(family, src, dst) == 0) {
|
||||
|
|
|
@ -521,7 +521,7 @@ void sys_trace_k_event_init(struct k_event *event);
|
|||
*/
|
||||
struct sockaddr;
|
||||
struct msghdr;
|
||||
struct zsock_pollfd;
|
||||
struct zvfs_pollfd;
|
||||
|
||||
void sys_trace_socket_init(int sock, int family, int type, int proto);
|
||||
void sys_trace_socket_close_enter(int sock);
|
||||
|
@ -552,8 +552,8 @@ void sys_trace_socket_fcntl_enter(int sock, int cmd, int flags);
|
|||
void sys_trace_socket_fcntl_exit(int sock, int ret);
|
||||
void sys_trace_socket_ioctl_enter(int sock, int req);
|
||||
void sys_trace_socket_ioctl_exit(int sock, int ret);
|
||||
void sys_trace_socket_poll_enter(const struct zsock_pollfd *fds, int nfds, int timeout);
|
||||
void sys_trace_socket_poll_exit(const struct zsock_pollfd *fds, int nfds, int ret);
|
||||
void sys_trace_socket_poll_enter(const struct zvfs_pollfd *fds, int nfds, int timeout);
|
||||
void sys_trace_socket_poll_exit(const struct zvfs_pollfd *fds, int nfds, int ret);
|
||||
void sys_trace_socket_getsockopt_enter(int sock, int level, int optname);
|
||||
void sys_trace_socket_getsockopt_exit(int sock, int level, int optname, void *optval,
|
||||
size_t optlen, int ret);
|
||||
|
|
|
@ -16,7 +16,7 @@ DEFINE_FAKE_VALUE_FUNC(ssize_t, z_impl_zsock_recvfrom, int, void *, size_t, int,
|
|||
DEFINE_FAKE_VALUE_FUNC(ssize_t, z_impl_zsock_sendto, int, void *, size_t, int,
|
||||
const struct sockaddr *, socklen_t);
|
||||
|
||||
struct zsock_pollfd {
|
||||
struct zvfs_pollfd {
|
||||
int fd;
|
||||
short events;
|
||||
short revents;
|
||||
|
@ -39,7 +39,7 @@ int z_impl_zsock_socket(int family, int type, int proto)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int z_impl_zsock_poll(struct zsock_pollfd *fds, int nfds, int poll_timeout)
|
||||
int z_impl_zvfs_poll(struct zvfs_pollfd *fds, int nfds, int poll_timeout)
|
||||
{
|
||||
LOG_INF("Polling, events %d", my_events);
|
||||
k_sleep(K_MSEC(1));
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
CONFIG_ZTEST=y
|
||||
CONFIG_ZTEST_STACK_SIZE=5120
|
||||
CONFIG_MP_MAX_NUM_CPUS=1
|
||||
|
||||
CONFIG_ZVFS=y
|
||||
|
|
|
@ -54,7 +54,7 @@ sys_slist_t *lwm2m_obs_obj_path_list(void)
|
|||
static sys_slist_t engine_obj_inst_list = SYS_SLIST_STATIC_INIT(&engine_obj_inst_list);
|
||||
sys_slist_t *lwm2m_engine_obj_inst_list(void) { return &engine_obj_inst_list; }
|
||||
|
||||
struct zsock_pollfd {
|
||||
struct zvfs_pollfd {
|
||||
int fd;
|
||||
short events;
|
||||
short revents;
|
||||
|
@ -123,7 +123,7 @@ ssize_t z_impl_zsock_recvfrom(int sock, void *buf, size_t max_len, int flags,
|
|||
return -1;
|
||||
}
|
||||
|
||||
int z_impl_zsock_poll(struct zsock_pollfd *fds, int nfds, int poll_timeout)
|
||||
int z_impl_zvfs_poll(struct zvfs_pollfd *fds, int nfds, int poll_timeout)
|
||||
{
|
||||
k_sleep(K_MSEC(1));
|
||||
fds->revents = my_events;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue