20273941f2
Christoph reported a nice splat which illustrated a race in the new stack based kmap_atomic implementation. The problem is that we pop our stack slot before we're completely done resetting its state -- in particular clearing the PTE (sometimes that's CONFIG_DEBUG_HIGHMEM). If an interrupt happens before we actually clear the PTE used for the last slot, that interrupt can reuse the slot in a dirty state, which triggers a BUG in kmap_atomic(). Fix this by introducing kmap_atomic_idx() which reports the current slot index without actually releasing it and use that to find the PTE and delay the _pop() until after we're completely done. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reported-by: Christoph Hellwig <hch@infradead.org> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
90 lines
2.0 KiB
C
90 lines
2.0 KiB
C
/* highmem.c: arch-specific highmem stuff
|
|
*
|
|
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
#include <linux/highmem.h>
|
|
#include <linux/module.h>
|
|
|
|
void *kmap(struct page *page)
|
|
{
|
|
might_sleep();
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
return kmap_high(page);
|
|
}
|
|
|
|
EXPORT_SYMBOL(kmap);
|
|
|
|
void kunmap(struct page *page)
|
|
{
|
|
if (in_interrupt())
|
|
BUG();
|
|
if (!PageHighMem(page))
|
|
return;
|
|
kunmap_high(page);
|
|
}
|
|
|
|
EXPORT_SYMBOL(kunmap);
|
|
|
|
struct page *kmap_atomic_to_page(void *ptr)
|
|
{
|
|
return virt_to_page(ptr);
|
|
}
|
|
|
|
void *__kmap_atomic(struct page *page)
|
|
{
|
|
unsigned long paddr;
|
|
int type;
|
|
|
|
pagefault_disable();
|
|
type = kmap_atomic_idx_push();
|
|
paddr = page_to_phys(page);
|
|
|
|
switch (type) {
|
|
/*
|
|
* The first 4 primary maps are reserved for architecture code
|
|
*/
|
|
case 0: return __kmap_atomic_primary(4, paddr, 6);
|
|
case 1: return __kmap_atomic_primary(5, paddr, 7);
|
|
case 2: return __kmap_atomic_primary(6, paddr, 8);
|
|
case 3: return __kmap_atomic_primary(7, paddr, 9);
|
|
case 4: return __kmap_atomic_primary(8, paddr, 10);
|
|
|
|
case 5 ... 5 + NR_TLB_LINES - 1:
|
|
return __kmap_atomic_secondary(type - 5, paddr);
|
|
|
|
default:
|
|
BUG();
|
|
return NULL;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(__kmap_atomic);
|
|
|
|
void __kunmap_atomic(void *kvaddr)
|
|
{
|
|
int type = kmap_atomic_idx();
|
|
switch (type) {
|
|
case 0: __kunmap_atomic_primary(4, 6); break;
|
|
case 1: __kunmap_atomic_primary(5, 7); break;
|
|
case 2: __kunmap_atomic_primary(6, 8); break;
|
|
case 3: __kunmap_atomic_primary(7, 9); break;
|
|
case 4: __kunmap_atomic_primary(8, 10); break;
|
|
|
|
case 5 ... 5 + NR_TLB_LINES - 1:
|
|
__kunmap_atomic_secondary(type - 5, kvaddr);
|
|
break;
|
|
|
|
default:
|
|
BUG();
|
|
}
|
|
kmap_atomic_idx_pop();
|
|
pagefault_enable();
|
|
}
|
|
EXPORT_SYMBOL(__kunmap_atomic);
|