kernel-ark/include/linux/highmem.h
Cesar Eduardo Barros 597781f3e5 kmap_atomic: make kunmap_atomic() harder to misuse
kunmap_atomic() is currently at level -4 on Rusty's "Hard To Misuse"
list[1] ("Follow common convention and you'll get it wrong"), except in
some architectures when CONFIG_DEBUG_HIGHMEM is set[2][3].

kunmap() takes a pointer to a struct page; kunmap_atomic(), however, takes
takes a pointer to within the page itself.  This seems to once in a while
trip people up (the convention they are following is the one from
kunmap()).

Make it much harder to misuse, by moving it to level 9 on Rusty's list[4]
("The compiler/linker won't let you get it wrong").  This is done by
refusing to build if the type of its first argument is a pointer to a
struct page.

The real kunmap_atomic() is renamed to kunmap_atomic_notypecheck()
(which is what you would call in case for some strange reason calling it
with a pointer to a struct page is not incorrect in your code).

The previous version of this patch was compile tested on x86-64.

[1] http://ozlabs.org/~rusty/index.cgi/tech/2008-04-01.html
[2] In these cases, it is at level 5, "Do it right or it will always
    break at runtime."
[3] At least mips and powerpc look very similar, and sparc also seems to
    share a common ancestor with both; there seems to be quite some
    degree of copy-and-paste coding here. The include/asm/highmem.h file
    for these three archs mention x86 CPUs at its top.
[4] http://ozlabs.org/~rusty/index.cgi/tech/2008-03-30.html
[5] As an aside, could someone tell me why mn10300 uses unsigned long as
    the first parameter of kunmap_atomic() instead of void *?

Signed-off-by: Cesar Eduardo Barros <cesarb@cesarb.net>
Cc: Russell King <linux@arm.linux.org.uk> (arch/arm)
Cc: Ralf Baechle <ralf@linux-mips.org> (arch/mips)
Cc: David Howells <dhowells@redhat.com> (arch/frv, arch/mn10300)
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com> (arch/mn10300)
Cc: Kyle McMartin <kyle@mcmartin.ca> (arch/parisc)
Cc: Helge Deller <deller@gmx.de> (arch/parisc)
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> (arch/parisc)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> (arch/powerpc)
Cc: Paul Mackerras <paulus@samba.org> (arch/powerpc)
Cc: "David S. Miller" <davem@davemloft.net> (arch/sparc)
Cc: Thomas Gleixner <tglx@linutronix.de> (arch/x86)
Cc: Ingo Molnar <mingo@redhat.com> (arch/x86)
Cc: "H. Peter Anvin" <hpa@zytor.com> (arch/x86)
Cc: Arnd Bergmann <arnd@arndb.de> (include/asm-generic)
Cc: Rusty Russell <rusty@rustcorp.com.au> ("Hard To Misuse" list)
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-09 20:44:54 -07:00

218 lines
5.4 KiB
C

#ifndef _LINUX_HIGHMEM_H
#define _LINUX_HIGHMEM_H
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#ifndef ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
}
#endif
#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page)
{
}
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
}
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
}
#endif
#include <asm/kmap_types.h>
#ifdef CONFIG_DEBUG_HIGHMEM
void debug_kmap_atomic(enum km_type type);
#else
static inline void debug_kmap_atomic(enum km_type type)
{
}
#endif
#ifdef CONFIG_HIGHMEM
#include <asm/highmem.h>
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
extern unsigned long totalhigh_pages;
void kmap_flush_unused(void);
#else /* CONFIG_HIGHMEM */
static inline unsigned int nr_free_highpages(void) { return 0; }
#define totalhigh_pages 0UL
#ifndef ARCH_HAS_KMAP
static inline void *kmap(struct page *page)
{
might_sleep();
return page_address(page);
}
static inline void kunmap(struct page *page)
{
}
static inline void *kmap_atomic(struct page *page, enum km_type idx)
{
pagefault_disable();
return page_address(page);
}
#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
#define kunmap_atomic_notypecheck(addr, idx) do { pagefault_enable(); } while (0)
#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#define kmap_flush_unused() do {} while(0)
#endif
#endif /* CONFIG_HIGHMEM */
/* Prevent people trying to call kunmap_atomic() as if it were kunmap() */
/* kunmap_atomic() should get the return value of kmap_atomic, not the page. */
#define kunmap_atomic(addr, idx) do { \
BUILD_BUG_ON(__same_type((addr), struct page *)); \
kunmap_atomic_notypecheck((addr), (idx)); \
} while (0)
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *addr = kmap_atomic(page, KM_USER0);
clear_user_page(addr, vaddr, page);
kunmap_atomic(addr, KM_USER0);
}
#endif
#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/**
* __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
* @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
* @vma: The VMA the page is to be allocated for
* @vaddr: The virtual address the page will be inserted into
*
* This function will allocate a page for a VMA but the caller is expected
* to specify via movableflags whether the page will be movable in the
* future or not
*
* An architecture may override this function by defining
* __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
* implementation.
*/
static inline struct page *
__alloc_zeroed_user_highpage(gfp_t movableflags,
struct vm_area_struct *vma,
unsigned long vaddr)
{
struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
vma, vaddr);
if (page)
clear_user_highpage(page, vaddr);
return page;
}
#endif
/**
* alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
* @vma: The VMA the page is to be allocated for
* @vaddr: The virtual address the page will be inserted into
*
* This function will allocate a page for a VMA that the caller knows will
* be able to migrate in the future using move_pages() or reclaimed
*/
static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
unsigned long vaddr)
{
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
}
static inline void clear_highpage(struct page *page)
{
void *kaddr = kmap_atomic(page, KM_USER0);
clear_page(kaddr);
kunmap_atomic(kaddr, KM_USER0);
}
static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
{
void *kaddr = kmap_atomic(page, KM_USER0);
BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
if (end1 > start1)
memset(kaddr + start1, 0, end1 - start1);
if (end2 > start2)
memset(kaddr + start2, 0, end2 - start2);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
}
static inline void zero_user_segment(struct page *page,
unsigned start, unsigned end)
{
zero_user_segments(page, start, end, 0, 0);
}
static inline void zero_user(struct page *page,
unsigned start, unsigned size)
{
zero_user_segments(page, start, start + size, 0, 0);
}
static inline void __deprecated memclear_highpage_flush(struct page *page,
unsigned int offset, unsigned int size)
{
zero_user(page, offset, size);
}
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
static inline void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
char *vfrom, *vto;
vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
copy_user_page(vto, vfrom, vaddr, to);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
}
#endif
static inline void copy_highpage(struct page *to, struct page *from)
{
char *vfrom, *vto;
vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
copy_page(vto, vfrom);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
}
#endif /* _LINUX_HIGHMEM_H */