logging: Use spin lock

Updated log_core to use spin lock instead of irq_lock.
Refactored z_log_msg_post_finalize function.

Signed-off-by: Krzysztof Chruscinski <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
Krzysztof Chruscinski 2022-01-10 14:51:10 +01:00 committed by Anas Nashif
commit dfe9561d9b

View file

@ -89,6 +89,7 @@ static atomic_t log_strdup_in_use;
static uint32_t log_strdup_max; static uint32_t log_strdup_max;
static uint32_t log_strdup_longest; static uint32_t log_strdup_longest;
static struct k_timer log_process_thread_timer; static struct k_timer log_process_thread_timer;
static struct k_spinlock list_lock;
static log_timestamp_t dummy_timestamp(void); static log_timestamp_t dummy_timestamp(void);
static log_timestamp_get_t timestamp_func = dummy_timestamp; static log_timestamp_get_t timestamp_func = dummy_timestamp;
@ -228,43 +229,50 @@ static void detect_missed_strdup(struct log_msg *msg)
static void z_log_msg_post_finalize(void) static void z_log_msg_post_finalize(void)
{ {
atomic_val_t cnt = atomic_inc(&buffered_cnt);
if (panic_mode) { if (panic_mode) {
unsigned int key = irq_lock(); static struct k_spinlock process_lock;
k_spinlock_key_t key = k_spin_lock(&process_lock);
(void)log_process(false); (void)log_process(false);
irq_unlock(key); k_spin_unlock(&process_lock, key);
} else if (proc_tid != NULL && cnt == 0) { } else if (proc_tid != NULL) {
k_timer_start(&log_process_thread_timer, static atomic_t reentry_flag;
K_MSEC(CONFIG_LOG_PROCESS_THREAD_SLEEP_MS), K_NO_WAIT); atomic_val_t cnt = atomic_inc(&buffered_cnt);
} else if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
if ((cnt == CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) && if (cnt == 0) {
(proc_tid != NULL)) { if (atomic_inc(&reentry_flag) == 0) {
k_timer_start(&log_process_thread_timer,
K_MSEC(CONFIG_LOG_PROCESS_THREAD_SLEEP_MS),
K_NO_WAIT);
}
atomic_dec(&reentry_flag);
} else if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
cnt == CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
k_timer_stop(&log_process_thread_timer); k_timer_stop(&log_process_thread_timer);
k_sem_give(&log_process_thread_sem); k_sem_give(&log_process_thread_sem);
} else {
/* No action needed. Message processing will be triggered by the
* timeout or when number of upcoming messages exceeds the
* threshold.
*/
} }
} else {
/* No action needed. Message processing will be triggered by the
* timeout or when number of upcoming messages exceeds the
* threshold.
*/
;
} }
} }
static inline void msg_finalize(struct log_msg *msg, static inline void msg_finalize(struct log_msg *msg,
struct log_msg_ids src_level) struct log_msg_ids src_level)
{ {
unsigned int key; k_spinlock_key_t key;
msg->hdr.ids = src_level; msg->hdr.ids = src_level;
msg->hdr.timestamp = timestamp_func(); msg->hdr.timestamp = timestamp_func();
key = irq_lock(); key = k_spin_lock(&list_lock);
log_list_add_tail(&list, msg); log_list_add_tail(&list, msg);
irq_unlock(key); k_spin_unlock(&list_lock, key);
z_log_msg_post_finalize(); z_log_msg_post_finalize();
} }
@ -785,10 +793,10 @@ union log_msgs get_msg(void)
return msg; return msg;
} }
int key = irq_lock(); k_spinlock_key_t key = k_spin_lock(&list_lock);
msg.msg = log_list_head_get(&list); msg.msg = log_list_head_get(&list);
irq_unlock(key); k_spin_unlock(&list_lock, key);
return msg; return msg;
} }
@ -904,14 +912,14 @@ char *z_log_strdup(const char *str)
if (IS_ENABLED(CONFIG_LOG_STRDUP_POOL_PROFILING)) { if (IS_ENABLED(CONFIG_LOG_STRDUP_POOL_PROFILING)) {
size_t slen = strlen(str); size_t slen = strlen(str);
static struct k_spinlock lock; static struct k_spinlock strdup_lock;
k_spinlock_key_t key; k_spinlock_key_t key;
key = k_spin_lock(&lock); key = k_spin_lock(&strdup_lock);
log_strdup_in_use++; log_strdup_in_use++;
log_strdup_max = MAX(log_strdup_in_use, log_strdup_max); log_strdup_max = MAX(log_strdup_in_use, log_strdup_max);
log_strdup_longest = MAX(slen, log_strdup_longest); log_strdup_longest = MAX(slen, log_strdup_longest);
k_spin_unlock(&lock, key); k_spin_unlock(&strdup_lock, key);
} }
/* Set 'allocated' flag. */ /* Set 'allocated' flag. */