kernel: logging: convert K_DEBUG to LOG_DBG

Move K_DEBUG to use LOG_DBG instead of plain printk.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2019-12-02 10:24:08 -05:00 committed by Maureen Helm
commit 2c5d40437b
6 changed files with 14 additions and 28 deletions

View file

@ -30,12 +30,6 @@ extern "C" {
* @}
*/
#ifdef CONFIG_KERNEL_DEBUG
#define K_DEBUG(fmt, ...) printk("[%s] " fmt, __func__, ##__VA_ARGS__)
#else
#define K_DEBUG(fmt, ...)
#endif
#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED)
#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES)
#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1)

View file

@ -297,14 +297,6 @@ config INIT_STACKS
water mark can be easily determined. This applies to the stack areas
for threads, as well as to the interrupt stack.
config KERNEL_DEBUG
bool "Kernel debugging"
select INIT_STACKS
help
Enable kernel debugging.
Note that debugging the kernel internals can be very verbose.
config BOOT_BANNER
bool "Boot banner"
default y

View file

@ -271,8 +271,6 @@ static inline void z_sched_lock(void)
compiler_barrier();
K_DEBUG("scheduler locked (%p:%d)\n",
_current, _current->base.sched_locked);
#endif
}

View file

@ -39,6 +39,7 @@
#include <syscall_handler.h>
#include <tracing/tracing.h>
#include <sys/check.h>
LOG_MODULE_DECLARE(os);
/* We use a global spinlock here because some of the synchronization
* is protecting things like owner thread priorities which aren't
@ -106,7 +107,7 @@ static bool adjust_owner_prio(struct k_mutex *mutex, int32_t new_prio)
{
if (mutex->owner->base.prio != new_prio) {
K_DEBUG("%p (ready (y/n): %c) prio changed to %d (was %d)\n",
LOG_DBG("%p (ready (y/n): %c) prio changed to %d (was %d)",
mutex->owner, z_is_thread_ready(mutex->owner) ?
'y' : 'n',
new_prio, mutex->owner->base.prio);
@ -136,7 +137,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
mutex->lock_count++;
mutex->owner = _current;
K_DEBUG("%p took mutex %p, count: %d, orig prio: %d\n",
LOG_DBG("%p took mutex %p, count: %d, orig prio: %d",
_current, mutex, mutex->lock_count,
mutex->owner_orig_prio);
@ -155,7 +156,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
new_prio = new_prio_for_inheritance(_current->base.prio,
mutex->owner->base.prio);
K_DEBUG("adjusting prio up on mutex %p\n", mutex);
LOG_DBG("adjusting prio up on mutex %p", mutex);
if (z_is_prio_higher(new_prio, mutex->owner->base.prio)) {
resched = adjust_owner_prio(mutex, new_prio);
@ -163,9 +164,9 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
int got_mutex = z_pend_curr(&lock, key, &mutex->wait_q, timeout);
K_DEBUG("on mutex %p got_mutex value: %d\n", mutex, got_mutex);
LOG_DBG("on mutex %p got_mutex value: %d", mutex, got_mutex);
K_DEBUG("%p got mutex %p (y/n): %c\n", _current, mutex,
LOG_DBG("%p got mutex %p (y/n): %c", _current, mutex,
got_mutex ? 'y' : 'n');
if (got_mutex == 0) {
@ -175,7 +176,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
/* timed out */
K_DEBUG("%p timeout on mutex %p\n", _current, mutex);
LOG_DBG("%p timeout on mutex %p", _current, mutex);
key = k_spin_lock(&lock);
@ -185,7 +186,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
new_prio_for_inheritance(waiter->base.prio, mutex->owner_orig_prio) :
mutex->owner_orig_prio;
K_DEBUG("adjusting prio down on mutex %p\n", mutex);
LOG_DBG("adjusting prio down on mutex %p", mutex);
resched = adjust_owner_prio(mutex, new_prio) || resched;
@ -236,7 +237,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex)
sys_trace_void(SYS_TRACE_ID_MUTEX_UNLOCK);
z_sched_lock();
K_DEBUG("mutex %p lock_count: %d\n", mutex, mutex->lock_count);
LOG_DBG("mutex %p lock_count: %d", mutex, mutex->lock_count);
/*
* If we are the owner and count is greater than 1, then decrement
@ -256,7 +257,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex)
mutex->owner = new_owner;
K_DEBUG("new owner of mutex %p: %p (prio: %d)\n",
LOG_DBG("new owner of mutex %p: %p (prio: %d)",
mutex, new_owner, new_owner ? new_owner->base.prio : -1000);
if (new_owner != NULL) {

View file

@ -14,6 +14,8 @@
#include <drivers/timer/system_timer.h>
#include <stdbool.h>
#include <kernel_internal.h>
#include <logging/log.h>
LOG_MODULE_DECLARE(os);
/* Maximum time between the time a self-aborting thread flags itself
* DEAD and the last read or write to its stack memory (i.e. the time
@ -782,7 +784,7 @@ void k_sched_unlock(void)
update_cache(0);
}
K_DEBUG("scheduler unlocked (%p:%d)\n",
LOG_DBG("scheduler unlocked (%p:%d)",
_current, _current->base.sched_locked);
z_reschedule_unlocked();
@ -1149,7 +1151,7 @@ static int32_t z_tick_sleep(int32_t ticks)
__ASSERT(!arch_is_in_isr(), "");
K_DEBUG("thread %p for %d ticks\n", _current, ticks);
LOG_DBG("thread %p for %d ticks", _current, ticks);
/* wait of 0 ms is treated as a 'yield' */
if (ticks == 0) {

View file

@ -1,5 +1,4 @@
CONFIG_TEST=y
CONFIG_DEBUG=y
CONFIG_STDOUT_CONSOLE=y
CONFIG_KERNEL_DEBUG=y
CONFIG_ASSERT=y