b227e23399
Previous patches allow the NMI subsystem to process multipe NMI events in one NMI. As previously discussed this can cause issues when an event triggered another NMI but is processed in the current NMI. This causes the next NMI to go unprocessed and become an 'unknown' NMI. To handle this, we first have to flag whether or not the NMI handler handled more than one event or not. If it did, then there exists a chance that the next NMI might be already processed. Once the NMI is flagged as a candidate to be swallowed, we next look for a back-to-back NMI condition. This is determined by looking at the %rip from pt_regs. If it is the same as the previous NMI, it is assumed the cpu did not have a chance to jump back into a non-NMI context and execute code and instead handled another NMI. If both of those conditions are true then we will swallow any unknown NMI. There still exists a chance that we accidentally swallow a real unknown NMI, but for now things seem better. An optimization has also been added to the nmi notifier rountine. Because x86 can latch up to one NMI while currently processing an NMI, we don't have to worry about executing _all_ the handlers in a standalone NMI. The idea is if multiple NMIs come in, the second NMI will represent them. For those back-to-back NMI cases, we have the potentail to drop NMIs. Therefore only execute all the handlers in the second half of a detected back-to-back NMI. Signed-off-by: Don Zickus <dzickus@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1317409584-23662-5-git-send-email-dzickus@redhat.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
48 lines
1.1 KiB
C
48 lines
1.1 KiB
C
#ifndef _ASM_X86_NMI_H
|
|
#define _ASM_X86_NMI_H
|
|
|
|
#include <linux/pm.h>
|
|
#include <asm/irq.h>
|
|
#include <asm/io.h>
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
|
|
extern int reserve_perfctr_nmi(unsigned int);
|
|
extern void release_perfctr_nmi(unsigned int);
|
|
extern int reserve_evntsel_nmi(unsigned int);
|
|
extern void release_evntsel_nmi(unsigned int);
|
|
|
|
struct ctl_table;
|
|
extern int proc_nmi_enabled(struct ctl_table *, int ,
|
|
void __user *, size_t *, loff_t *);
|
|
extern int unknown_nmi_panic;
|
|
|
|
void arch_trigger_all_cpu_backtrace(void);
|
|
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
|
#endif
|
|
|
|
#define NMI_FLAG_FIRST 1
|
|
|
|
enum {
|
|
NMI_LOCAL=0,
|
|
NMI_UNKNOWN,
|
|
NMI_MAX
|
|
};
|
|
|
|
#define NMI_DONE 0
|
|
#define NMI_HANDLED 1
|
|
|
|
typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
|
|
|
|
int register_nmi_handler(unsigned int, nmi_handler_t, unsigned long,
|
|
const char *);
|
|
|
|
void unregister_nmi_handler(unsigned int, const char *);
|
|
|
|
void stop_nmi(void);
|
|
void restart_nmi(void);
|
|
void local_touch_nmi(void);
|
|
|
|
#endif /* _ASM_X86_NMI_H */
|