1e02ce4ccc
Context switches and TLB flushes can change individual bits of CR4. CR4 reads take several cycles, so store a shadow copy of CR4 in a per-cpu variable. To avoid wasting a cache line, I added the CR4 shadow to cpu_tlbstate, which is already touched in switch_mm. The heaviest users of the cr4 shadow will be switch_mm and __switch_to_xtra, and __switch_to_xtra is called shortly after switch_mm during context switch, so the cacheline is likely to be hot. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Kees Cook <keescook@chromium.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Vince Weaver <vince@deater.net> Cc: "hillf.zj" <hillf.zj@alibaba-inc.com> Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/3a54dd3353fffbf84804398e00dfdc5b7c1afd7d.1414190806.git.luto@amacapital.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
123 lines
3.3 KiB
C
123 lines
3.3 KiB
C
#include <linux/io.h>
|
|
#include <linux/memblock.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/realmode.h>
|
|
|
|
struct real_mode_header *real_mode_header;
|
|
u32 *trampoline_cr4_features;
|
|
|
|
void __init reserve_real_mode(void)
|
|
{
|
|
phys_addr_t mem;
|
|
unsigned char *base;
|
|
size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
|
|
|
|
/* Has to be under 1M so we can execute real-mode AP code. */
|
|
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
|
|
if (!mem)
|
|
panic("Cannot allocate trampoline\n");
|
|
|
|
base = __va(mem);
|
|
memblock_reserve(mem, size);
|
|
real_mode_header = (struct real_mode_header *) base;
|
|
printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
|
|
base, (unsigned long long)mem, size);
|
|
}
|
|
|
|
void __init setup_real_mode(void)
|
|
{
|
|
u16 real_mode_seg;
|
|
const u32 *rel;
|
|
u32 count;
|
|
unsigned char *base;
|
|
unsigned long phys_base;
|
|
struct trampoline_header *trampoline_header;
|
|
size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
|
|
#ifdef CONFIG_X86_64
|
|
u64 *trampoline_pgd;
|
|
u64 efer;
|
|
#endif
|
|
|
|
base = (unsigned char *)real_mode_header;
|
|
|
|
memcpy(base, real_mode_blob, size);
|
|
|
|
phys_base = __pa(base);
|
|
real_mode_seg = phys_base >> 4;
|
|
|
|
rel = (u32 *) real_mode_relocs;
|
|
|
|
/* 16-bit segment relocations. */
|
|
count = *rel++;
|
|
while (count--) {
|
|
u16 *seg = (u16 *) (base + *rel++);
|
|
*seg = real_mode_seg;
|
|
}
|
|
|
|
/* 32-bit linear relocations. */
|
|
count = *rel++;
|
|
while (count--) {
|
|
u32 *ptr = (u32 *) (base + *rel++);
|
|
*ptr += phys_base;
|
|
}
|
|
|
|
/* Must be perfomed *after* relocation. */
|
|
trampoline_header = (struct trampoline_header *)
|
|
__va(real_mode_header->trampoline_header);
|
|
|
|
#ifdef CONFIG_X86_32
|
|
trampoline_header->start = __pa_symbol(startup_32_smp);
|
|
trampoline_header->gdt_limit = __BOOT_DS + 7;
|
|
trampoline_header->gdt_base = __pa_symbol(boot_gdt);
|
|
#else
|
|
/*
|
|
* Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
|
|
* so we need to mask it out.
|
|
*/
|
|
rdmsrl(MSR_EFER, efer);
|
|
trampoline_header->efer = efer & ~EFER_LMA;
|
|
|
|
trampoline_header->start = (u64) secondary_startup_64;
|
|
trampoline_cr4_features = &trampoline_header->cr4;
|
|
*trampoline_cr4_features = __read_cr4();
|
|
|
|
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
|
|
trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
|
|
trampoline_pgd[511] = init_level4_pgt[511].pgd;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* reserve_real_mode() gets called very early, to guarantee the
|
|
* availability of low memory. This is before the proper kernel page
|
|
* tables are set up, so we cannot set page permissions in that
|
|
* function. Also trampoline code will be executed by APs so we
|
|
* need to mark it executable at do_pre_smp_initcalls() at least,
|
|
* thus run it as a early_initcall().
|
|
*/
|
|
static int __init set_real_mode_permissions(void)
|
|
{
|
|
unsigned char *base = (unsigned char *) real_mode_header;
|
|
size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
|
|
|
|
size_t ro_size =
|
|
PAGE_ALIGN(real_mode_header->ro_end) -
|
|
__pa(base);
|
|
|
|
size_t text_size =
|
|
PAGE_ALIGN(real_mode_header->ro_end) -
|
|
real_mode_header->text_start;
|
|
|
|
unsigned long text_start =
|
|
(unsigned long) __va(real_mode_header->text_start);
|
|
|
|
set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
|
|
set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
|
|
set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
|
|
|
|
return 0;
|
|
}
|
|
early_initcall(set_real_mode_permissions);
|