userspace: add thread-level permission tracking

Now creating a thread will assign it a unique, monotonically increasing
id which is used to reference the permission bitfield in the kernel
object metadata.

Stub functions in userspace.c now implemented.

_new_thread is now wrapped in a common function with pre- and post-
architecture thread initialization tasks.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2017-08-30 14:31:03 -07:00 committed by Andrew Boie
commit 2acfcd6b05
5 changed files with 67 additions and 23 deletions

View file

@ -310,6 +310,11 @@ struct _thread_base {
/* this thread's entry in a timeout queue */
struct _timeout timeout;
#endif
#ifdef CONFIG_USERSPACE
/* Bit position in kernel object permissions bitfield for this thread */
unsigned int perm_index;
#endif
};
typedef struct _thread_base _thread_base_t;

View file

@ -43,11 +43,18 @@ FUNC_NORETURN void _Cstart(void);
extern FUNC_NORETURN void _thread_entry(k_thread_entry_t entry,
void *p1, void *p2, void *p3);
/* Implemented by architectures. Only called from _setup_new_thread. */
extern void _new_thread(struct k_thread *thread, k_thread_stack_t pStack,
size_t stackSize, k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, unsigned int options);
extern void _setup_new_thread(struct k_thread *new_thread,
k_thread_stack_t stack, size_t stack_size,
k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, u32_t options);
/* context switching and scheduling-related routines */
extern unsigned int __swap(unsigned int key);

View file

@ -267,6 +267,9 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
dummy_thread->stack_info.start = 0;
dummy_thread->stack_info.size = 0;
#endif
#ifdef CONFIG_USERSPACE
dummy_thread->base.perm_index = 0;
#endif
#endif
/* _kernel.ready_q is all zeroes */
@ -299,20 +302,18 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
*/
_ready_q.cache = _main_thread;
_new_thread(_main_thread, _main_stack,
_setup_new_thread(_main_thread, _main_stack,
MAIN_STACK_SIZE, _main, NULL, NULL, NULL,
CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL);
_mark_thread_as_started(_main_thread);
_add_thread_to_ready_q(_main_thread);
_k_object_init(_main_thread);
#ifdef CONFIG_MULTITHREADING
_new_thread(_idle_thread, _idle_stack,
_setup_new_thread(_idle_thread, _idle_stack,
IDLE_STACK_SIZE, idle, NULL, NULL, NULL,
K_LOWEST_THREAD_PRIO, K_ESSENTIAL);
_mark_thread_as_started(_idle_thread);
_add_thread_to_ready_q(_idle_thread);
_k_object_init(_idle_thread);
#endif
initialize_timeouts();

View file

@ -22,10 +22,26 @@
#include <drivers/system_timer.h>
#include <ksched.h>
#include <wait_q.h>
#include <atomic.h>
extern struct _static_thread_data _static_thread_data_list_start[];
extern struct _static_thread_data _static_thread_data_list_end[];
#ifdef CONFIG_USERSPACE
/* Each thread gets assigned an index into a permission bitfield */
static atomic_t thread_index;
static unsigned int thread_index_get(void)
{
unsigned int retval;
retval = (int)atomic_inc(&thread_index);
__ASSERT(retval < 8 * CONFIG_MAX_THREAD_BYTES,
"too many threads created, increase CONFIG_MAX_THREAD_BYTES");
return retval;
}
#endif
#define _FOREACH_STATIC_THREAD(thread_data) \
for (struct _static_thread_data *thread_data = \
_static_thread_data_list_start; \
@ -241,8 +257,24 @@ static void schedule_new_thread(struct k_thread *thread, s32_t delay)
}
#endif
#ifdef CONFIG_MULTITHREADING
void _setup_new_thread(struct k_thread *new_thread,
k_thread_stack_t stack, size_t stack_size,
k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, u32_t options)
{
_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
prio, options);
#ifdef CONFIG_USERSPACE
new_thread->base.perm_index = thread_index_get();
_k_object_init(new_thread);
/* Any given thread has access to itself */
k_object_grant_access(new_thread, new_thread);
#endif
}
#ifdef CONFIG_MULTITHREADING
k_tid_t k_thread_create(struct k_thread *new_thread,
k_thread_stack_t stack,
size_t stack_size, k_thread_entry_t entry,
@ -250,9 +282,8 @@ k_tid_t k_thread_create(struct k_thread *new_thread,
int prio, u32_t options, s32_t delay)
{
__ASSERT(!_is_in_isr(), "Threads may not be created in ISRs");
_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
prio, options);
_k_object_init(new_thread);
if (delay != K_FOREVER) {
schedule_new_thread(new_thread, delay);
@ -394,7 +425,7 @@ void _init_static_threads(void)
unsigned int key;
_FOREACH_STATIC_THREAD(thread_data) {
_new_thread(
_setup_new_thread(
thread_data->init_thread,
thread_data->init_stack,
thread_data->init_stack_size,
@ -406,7 +437,6 @@ void _init_static_threads(void)
thread_data->init_options);
thread_data->init_thread->init_data = thread_data;
_k_object_init(thread_data->init_thread);
}
_sched_lock();

View file

@ -69,21 +69,22 @@ const char *otype_to_str(enum k_objects otype)
static void set_thread_perms(struct _k_object *ko, struct k_thread *thread)
{
ARG_UNUSED(ko);
ARG_UNUSED(thread);
/* STUB */
if (thread->base.perm_index < 8 * CONFIG_MAX_THREAD_BYTES) {
sys_bitfield_set_bit((mem_addr_t)&ko->perms,
thread->base.perm_index);
}
}
static int test_thread_perms(struct _k_object *ko)
{
ARG_UNUSED(ko);
/* STUB */
return 1;
if (_current->base.perm_index < 8 * CONFIG_MAX_THREAD_BYTES) {
return sys_bitfield_test_bit((mem_addr_t)&ko->perms,
_current->base.perm_index);
}
return 0;
}
void k_object_grant_access(void *object, struct k_thread *thread)
{