diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index bba3cf8..361d506 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -1,15 +1,33 @@ #ifndef _X86_IRQFLAGS_H_ #define _X86_IRQFLAGS_H_ +#include #include #ifndef __ASSEMBLY__ +#include /* * Interrupt control: */ +DECLARE_PER_CPU(u32, irq_flags); + +static inline void native_irq_disable(void) +{ + asm volatile("cli": : :"memory"); + this_cpu_write_4(irq_flags, 0); +} + +static inline void native_irq_enable(void) +{ + this_cpu_write_4(irq_flags, X86_EFLAGS_IF); + asm volatile("sti": : :"memory"); +} + static inline unsigned long native_save_fl(void) { + unsigned long disabled; +#ifdef DEBUG unsigned long flags; /* @@ -22,30 +40,37 @@ static inline unsigned long native_save_fl(void) : "=rm" (flags) : /* no input */ : "memory"); +#endif + disabled = this_cpu_read_4(irq_flags); +#ifdef DEBUG + if ((flags ^ disabled) & X86_EFLAGS_IF) { + disabled = flags & X86_EFLAGS_IF; + this_cpu_write_4(irq_flags, disabled); + BUG(); + } +#endif - return flags; + return disabled; } static inline void native_restore_fl(unsigned long flags) { + BUG_ON(flags & ~X86_EFLAGS_IF); + if (flags & X86_EFLAGS_IF) + native_irq_enable(); + else + native_irq_disable(); +#if 0 asm volatile("push %0 ; popf" : /* no output */ :"g" (flags) :"memory", "cc"); -} - -static inline void native_irq_disable(void) -{ - asm volatile("cli": : :"memory"); -} - -static inline void native_irq_enable(void) -{ - asm volatile("sti": : :"memory"); +#endif } static inline void native_safe_halt(void) { + this_cpu_write_4(irq_flags, X86_EFLAGS_IF); asm volatile("sti; hlt": : :"memory"); } @@ -111,8 +136,11 @@ static inline notrace unsigned long arch_local_irq_save(void) } #else -#define ENABLE_INTERRUPTS(x) sti -#define DISABLE_INTERRUPTS(x) cli +#define ENABLE_INTERRUPTS(x) movl $0,PER_CPU_VAR(irq_flags) \ + sti + +#define DISABLE_INTERRUPTS(x) cli \ + movl $X86_EFLAGS_IF,PER_CPU_VAR(irq_flags) #ifdef CONFIG_X86_64 #define SWAPGS swapgs @@ -137,13 +165,14 @@ static inline notrace unsigned long arch_local_irq_save(void) swapgs; \ sysretl #define ENABLE_INTERRUPTS_SYSEXIT32 \ + movl $X86_EFLAGS_IF,PER_CPU_VAR(irq_flags) \ swapgs; \ sti; \ sysexit #else #define INTERRUPT_RETURN iret -#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit +#define ENABLE_INTERRUPTS_SYSEXIT X86_EFLAGS_IF,PER_CPU_VAR(irq_flags); sti; sysexit #define GET_CR0_INTO_EAX movl %cr0, %eax #endif @@ -170,11 +199,11 @@ static inline int arch_irqs_disabled(void) #define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk #define ARCH_LOCKDEP_SYS_EXIT_IRQ \ TRACE_IRQS_ON; \ - sti; \ + ENABLE_INTERRUPTS(); \ SAVE_REST; \ LOCKDEP_SYS_EXIT; \ RESTORE_REST; \ - cli; \ + DISABLE_INTERRUPTS; \ TRACE_IRQS_OFF; #else diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d814772..b189708 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1079,6 +1079,7 @@ DEFINE_PER_CPU_FIRST(union irq_stack_union, */ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = &init_task; +DEFINE_PER_CPU(u32, irq_flags) = 0; EXPORT_PER_CPU_SYMBOL(current_task); DEFINE_PER_CPU(unsigned long, kernel_stack) = diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index c1d01e6..2872b9a 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -286,12 +286,13 @@ ENDPROC(native_usergs_sysret64) .macro TRACE_IRQS_IRETQ offset=ARGOFFSET -#ifdef CONFIG_TRACE_IRQFLAGS bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ jnc 1f + movl $X86_EFLAGS_IF,PER_CPU_VAR(irq_flags) +#ifdef CONFIG_TRACE_IRQFLAGS TRACE_IRQS_ON -1: #endif +1: .endm /* diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index e4595f1..fc2347d 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -186,6 +186,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) unsigned vector = ~regs->orig_ax; unsigned irq; + this_cpu_write_4(irq_flags, 0); irq_enter(); exit_idle();