From 37929b3428ca5f3927c1d5622dd532ad8ce6731f Mon Sep 17 00:00:00 2001 From: "Charles E. Youse" Date: Thu, 15 Aug 2019 08:56:43 -0700 Subject: [PATCH] arch/x86_64: do not modify CR8 in interrupt path Currently, the interrupt service code manually raises the CPU task priority to the priority level of the vector being serviced to defer any lower-priority interrupts. This is unnecessary; the local APIC is aware that an interrupt is in-service and accounts for its priority when deciding whether to issue an overriding interrupt to the CPU. Signed-off-by: Charles E. Youse --- arch/x86_64/core/xuk.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/arch/x86_64/core/xuk.c b/arch/x86_64/core/xuk.c index 8ca90736e7a..a833c202662 100644 --- a/arch/x86_64/core/xuk.c +++ b/arch/x86_64/core/xuk.c @@ -176,15 +176,7 @@ long _isr_c_top(unsigned long vecret, unsigned long rsp, z_isr_entry(); - /* Set current priority in CR8 to the currently-serviced IRQ - * and re-enable interrupts - */ - unsigned long long cr8, cr8new = vector >> 4; - - __asm__ volatile("movq %%cr8, %0;" - "movq %1, %%cr8;" - "sti" - : "=r"(cr8) : "r"(cr8new)); + __asm__ volatile("sti"); /* re-enable interrupts */ if (h->fn) { h->fn(h->arg, err); @@ -192,10 +184,7 @@ long _isr_c_top(unsigned long vecret, unsigned long rsp, z_unhandled_vector(vector, err, frame); } - /* Mask interrupts to finish processing (they'll get restored - * in the upcoming IRET) and restore CR8 - */ - __asm__ volatile("cli; movq %0, %%cr8" : : "r"(cr8)); + __asm__ volatile("cli"); /* mask [at least] until upcoming IRET */ /* Signal EOI if it's an APIC-managed interrupt */ if (vector > 0x1f) {