userspace: allow thread IDs to be re-used

It's currently too easy to run out of thread IDs as they
are never re-used on thread exit.

Now the kernel maintains a bitfield of in-use thread IDs,
updated on thread creation and termination. When a thread
exits, the permission bitfield for all kernel objects is
updated to revoke access for that retired thread ID, so that
a new thread re-using that ID will not gain access to objects
that it should not have.

Because of these runtime updates, setting the permission
bitmap for an object to all ones for a "public" object doesn't
work properly any more; a flag is now set for this instead.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2017-10-13 13:57:07 -07:00 committed by Andrew Boie
commit 04caa679c9
7 changed files with 97 additions and 20 deletions

View file

@ -180,6 +180,7 @@ struct _k_object {
} __packed; } __packed;
#define K_OBJ_FLAG_INITIALIZED BIT(0) #define K_OBJ_FLAG_INITIALIZED BIT(0)
#define K_OBJ_FLAG_PUBLIC BIT(1)
/** /**
* Lookup a kernel object and init its metadata if it exists * Lookup a kernel object and init its metadata if it exists
@ -253,6 +254,9 @@ __syscall void k_object_access_revoke(void *object, struct k_thread *thread);
* as it is possible for such code to derive the addresses of kernel objects * as it is possible for such code to derive the addresses of kernel objects
* and perform unwanted operations on them. * and perform unwanted operations on them.
* *
* It is not possible to revoke permissions on public objects; once public,
* any thread may use it.
*
* @param object Address of kernel object * @param object Address of kernel object
*/ */
__syscall void k_object_access_all_grant(void *object); __syscall void k_object_access_all_grant(void *object);

View file

@ -118,6 +118,11 @@ struct _kernel {
struct k_thread *threads; /* singly linked list of ALL fiber+tasks */ struct k_thread *threads; /* singly linked list of ALL fiber+tasks */
#endif #endif
#if defined(CONFIG_USERSPACE)
/* 0 bits for ids currently in use, 1 for free ids */
u8_t free_thread_ids[CONFIG_MAX_THREAD_BYTES];
#endif
/* arch-specific part of _kernel */ /* arch-specific part of _kernel */
struct _kernel_arch arch; struct _kernel_arch arch;
}; };

View file

@ -98,12 +98,12 @@ extern void _thread_perms_set(struct _k_object *ko, struct k_thread *thread);
*/ */
extern void _thread_perms_clear(struct _k_object *ko, struct k_thread *thread); extern void _thread_perms_clear(struct _k_object *ko, struct k_thread *thread);
/** /*
* Grant all current and future threads access to a kernel object * Revoke access to all objects for the provided thread
* *
* @param ko Kernel object metadata to update * @param thread Thread object to revoke access
*/ */
extern void _thread_perms_all_set(struct _k_object *ko); extern void _thread_perms_all_clear(struct k_thread *thread);
/** /**
* Clear initialization state of a kernel object * Clear initialization state of a kernel object

View file

@ -274,6 +274,10 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
/* _kernel.ready_q is all zeroes */ /* _kernel.ready_q is all zeroes */
#ifdef CONFIG_USERSPACE
/* Mark all potential IDs as available */
memset(_kernel.free_thread_ids, 0xFF, CONFIG_MAX_THREAD_BYTES);
#endif
/* /*
* The interrupt library needs to be initialized early since a series * The interrupt library needs to be initialized early since a series

View file

@ -29,17 +29,60 @@ extern struct _static_thread_data _static_thread_data_list_start[];
extern struct _static_thread_data _static_thread_data_list_end[]; extern struct _static_thread_data _static_thread_data_list_end[];
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
/* Each thread gets assigned an index into a permission bitfield */ static int thread_count;
static atomic_t thread_index;
static unsigned int thread_index_get(void) /*
* Fetch an unused thread ID. Returns -1 if all thread IDs are in use
*/
static int get_next_thread_index(void)
{ {
unsigned int retval; int key, pos = -1;
retval = (int)atomic_inc(&thread_index); key = irq_lock();
__ASSERT(retval < 8 * CONFIG_MAX_THREAD_BYTES,
"too many threads created, increase CONFIG_MAX_THREAD_BYTES"); if (thread_count == CONFIG_MAX_THREAD_BYTES * 8) {
return retval; /* We have run out of thread IDs! */
goto out;
}
/* find an unused bit in the kernel's bitfield of in-use thread IDs */
for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
int fs;
fs = find_lsb_set(_kernel.free_thread_ids[i]);
if (fs) {
/* find_lsb_set counts bit positions starting at 1 */
--fs;
_kernel.free_thread_ids[i] &= ~(1 << fs);
pos = fs + (i * 8);
break;
}
}
thread_count++;
out:
irq_unlock(key);
return pos;
}
static void free_thread_index(int id)
{
int index, key;
u8_t bit;
if (id == -1) {
return;
}
key = irq_lock();
thread_count--;
index = id / 8;
bit = 1 << (id % 8);
_kernel.free_thread_ids[index] |= bit;
irq_unlock(key);
} }
#endif #endif
@ -281,7 +324,7 @@ void _setup_new_thread(struct k_thread *new_thread,
_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, _new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
prio, options); prio, options);
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
new_thread->base.perm_index = thread_index_get(); new_thread->base.perm_index = get_next_thread_index();
_k_object_init(new_thread); _k_object_init(new_thread);
/* Any given thread has access to itself */ /* Any given thread has access to itself */
@ -458,6 +501,13 @@ void _k_thread_single_abort(struct k_thread *thread)
* and triggers errors if API calls are made on it from user threads * and triggers errors if API calls are made on it from user threads
*/ */
_k_object_uninit(thread); _k_object_uninit(thread);
if (thread->base.perm_index != -1) {
free_thread_index(thread->base.perm_index);
/* Revoke permissions on thread's ID so that it may be recycled */
_thread_perms_all_clear(thread);
}
#endif #endif
} }

View file

@ -138,8 +138,27 @@ void _thread_perms_clear(struct _k_object *ko, struct k_thread *thread)
} }
} }
static void clear_perms_cb(struct _k_object *ko, void *ctx_ptr)
{
int id = (int)ctx_ptr;
sys_bitfield_clear_bit((mem_addr_t)&ko->perms, id);
}
void _thread_perms_all_clear(struct k_thread *thread)
{
if (thread->base.perm_index < 8 * CONFIG_MAX_THREAD_BYTES) {
_k_object_wordlist_foreach(clear_perms_cb,
(void *)thread->base.perm_index);
}
}
static int thread_perms_test(struct _k_object *ko) static int thread_perms_test(struct _k_object *ko)
{ {
if (ko->flags & K_OBJ_FLAG_PUBLIC) {
return 1;
}
if (_current->base.perm_index < 8 * CONFIG_MAX_THREAD_BYTES) { if (_current->base.perm_index < 8 * CONFIG_MAX_THREAD_BYTES) {
return sys_bitfield_test_bit((mem_addr_t)&ko->perms, return sys_bitfield_test_bit((mem_addr_t)&ko->perms,
_current->base.perm_index); _current->base.perm_index);
@ -147,11 +166,6 @@ static int thread_perms_test(struct _k_object *ko)
return 0; return 0;
} }
void _thread_perms_all_set(struct _k_object *ko)
{
memset(ko->perms, 0xFF, CONFIG_MAX_THREAD_BYTES);
}
static void dump_permission_error(struct _k_object *ko) static void dump_permission_error(struct _k_object *ko)
{ {
printk("thread %p (%d) does not have permission on %s %p [", printk("thread %p (%d) does not have permission on %s %p [",
@ -202,7 +216,7 @@ void _impl_k_object_access_all_grant(void *object)
struct _k_object *ko = _k_object_find(object); struct _k_object *ko = _k_object_find(object);
if (ko) { if (ko) {
_thread_perms_all_set(ko); ko->flags |= K_OBJ_FLAG_PUBLIC;
} }
} }

View file

@ -65,7 +65,7 @@ _SYSCALL_HANDLER(k_object_access_all_grant, object)
ko = validate_any_object((void *)object); ko = validate_any_object((void *)object);
_SYSCALL_VERIFY_MSG(ko, "object %p access denied", (void *)object); _SYSCALL_VERIFY_MSG(ko, "object %p access denied", (void *)object);
_thread_perms_all_set(ko); ko->flags |= K_OBJ_FLAG_PUBLIC;
return 0; return 0;
} }