cec5f268cd
TPAUSE instructs the processor to enter an implementation-dependent optimized state. The instruction execution wakes up when the time-stamp counter reaches or exceeds the implicit EDX:EAX 64-bit input value. The instruction execution also wakes up due to the expiration of the operating system time-limit or by an external interrupt or exceptions such as a debug exception or a machine check exception. TPAUSE offers a choice of two lower power states: 1. Light-weight power/performance optimized state C0.1 2. Improved power/performance optimized state C0.2 This way, it can save power with low wake-up latency in comparison to spinloop based delay. The selection between the two is governed by the input register. TPAUSE is available on processors with X86_FEATURE_WAITPKG. Co-developed-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Kyung Min Park <kyung.min.park@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Tony Luck <tony.luck@intel.com> Link: https://lkml.kernel.org/r/1587757076-30337-4-git-send-email-kyung.min.park@intel.com
134 lines
3.1 KiB
C
134 lines
3.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (c) 1991,1992,1995 Linus Torvalds
|
|
* Copyright (c) 1994 Alan Modra
|
|
* Copyright (c) 1995 Markus Kuhn
|
|
* Copyright (c) 1996 Ingo Molnar
|
|
* Copyright (c) 1998 Andrea Arcangeli
|
|
* Copyright (c) 2002,2006 Vojtech Pavlik
|
|
* Copyright (c) 2003 Andi Kleen
|
|
*
|
|
*/
|
|
|
|
#include <linux/clocksource.h>
|
|
#include <linux/clockchips.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/irq.h>
|
|
#include <linux/i8253.h>
|
|
#include <linux/time.h>
|
|
#include <linux/export.h>
|
|
|
|
#include <asm/vsyscall.h>
|
|
#include <asm/x86_init.h>
|
|
#include <asm/i8259.h>
|
|
#include <asm/timer.h>
|
|
#include <asm/hpet.h>
|
|
#include <asm/time.h>
|
|
|
|
#ifdef CONFIG_X86_64
|
|
__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
|
|
#endif
|
|
|
|
unsigned long profile_pc(struct pt_regs *regs)
|
|
{
|
|
unsigned long pc = instruction_pointer(regs);
|
|
|
|
if (!user_mode(regs) && in_lock_functions(pc)) {
|
|
#ifdef CONFIG_FRAME_POINTER
|
|
return *(unsigned long *)(regs->bp + sizeof(long));
|
|
#else
|
|
unsigned long *sp = (unsigned long *)regs->sp;
|
|
/*
|
|
* Return address is either directly at stack pointer
|
|
* or above a saved flags. Eflags has bits 22-31 zero,
|
|
* kernel addresses don't.
|
|
*/
|
|
if (sp[0] >> 22)
|
|
return sp[0];
|
|
if (sp[1] >> 22)
|
|
return sp[1];
|
|
#endif
|
|
}
|
|
return pc;
|
|
}
|
|
EXPORT_SYMBOL(profile_pc);
|
|
|
|
/*
|
|
* Default timer interrupt handler for PIT/HPET
|
|
*/
|
|
static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
|
{
|
|
global_clock_event->event_handler(global_clock_event);
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static void __init setup_default_timer_irq(void)
|
|
{
|
|
unsigned long flags = IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER;
|
|
|
|
/*
|
|
* Unconditionally register the legacy timer interrupt; even
|
|
* without legacy PIC/PIT we need this for the HPET0 in legacy
|
|
* replacement mode.
|
|
*/
|
|
if (request_irq(0, timer_interrupt, flags, "timer", NULL))
|
|
pr_info("Failed to register legacy timer interrupt\n");
|
|
}
|
|
|
|
/* Default timer init function */
|
|
void __init hpet_time_init(void)
|
|
{
|
|
if (!hpet_enable()) {
|
|
if (!pit_timer_init())
|
|
return;
|
|
}
|
|
|
|
setup_default_timer_irq();
|
|
}
|
|
|
|
static __init void x86_late_time_init(void)
|
|
{
|
|
/*
|
|
* Before PIT/HPET init, select the interrupt mode. This is required
|
|
* to make the decision whether PIT should be initialized correct.
|
|
*/
|
|
x86_init.irqs.intr_mode_select();
|
|
|
|
/* Setup the legacy timers */
|
|
x86_init.timers.timer_init();
|
|
|
|
/*
|
|
* After PIT/HPET timers init, set up the final interrupt mode for
|
|
* delivering IRQs.
|
|
*/
|
|
x86_init.irqs.intr_mode_init();
|
|
tsc_init();
|
|
|
|
if (static_cpu_has(X86_FEATURE_WAITPKG))
|
|
use_tpause_delay();
|
|
}
|
|
|
|
/*
|
|
* Initialize TSC and delay the periodic timer init to
|
|
* late x86_late_time_init() so ioremap works.
|
|
*/
|
|
void __init time_init(void)
|
|
{
|
|
late_time_init = x86_late_time_init;
|
|
}
|
|
|
|
/*
|
|
* Sanity check the vdso related archdata content.
|
|
*/
|
|
void clocksource_arch_init(struct clocksource *cs)
|
|
{
|
|
if (cs->vdso_clock_mode == VDSO_CLOCKMODE_NONE)
|
|
return;
|
|
|
|
if (cs->mask != CLOCKSOURCE_MASK(64)) {
|
|
pr_warn("clocksource %s registered with invalid mask %016llx for VDSO. Disabling VDSO support.\n",
|
|
cs->name, cs->mask);
|
|
cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
|
|
}
|
|
}
|