2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* linux/mm/memory.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* demand-loading started 01.12.91 - seems it is high on the list of
|
|
|
|
* things wanted, and it should be easy to implement. - Linus
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, demand-loading was easy, shared pages a little bit tricker. Shared
|
|
|
|
* pages started 02.12.91, seems to work. - Linus.
|
|
|
|
*
|
|
|
|
* Tested sharing by executing about 30 /bin/sh: under the old kernel it
|
|
|
|
* would have taken more than the 6M I have free, but it worked well as
|
|
|
|
* far as I could see.
|
|
|
|
*
|
|
|
|
* Also corrected some "invalidate()"s - I wasn't doing enough of them.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Real VM (paging to/from disk) started 18.12.91. Much more work and
|
|
|
|
* thought has to go into this. Oh, well..
|
|
|
|
* 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
|
|
|
|
* Found it. Everything seems to work now.
|
|
|
|
* 20.12.91 - Ok, making the swap-device changeable like the root.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 05.04.94 - Multi-page memory management added for v1.1.
|
|
|
|
* Idea by Alex Bligh (alex@cconcepts.co.uk)
|
|
|
|
*
|
|
|
|
* 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
|
|
|
|
* (Gerhard.Wichert@pdb.siemens.de)
|
|
|
|
*
|
|
|
|
* Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/hugetlb.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/rmap.h>
|
|
|
|
#include <linux/module.h>
|
2006-07-14 07:24:37 +00:00
|
|
|
#include <linux/delayacct.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/init.h>
|
2006-09-26 06:30:58 +00:00
|
|
|
#include <linux/writeback.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
|
|
|
|
#include <linux/swapops.h>
|
|
|
|
#include <linux/elf.h>
|
|
|
|
|
[PATCH] sparsemem memory model
Sparsemem abstracts the use of discontiguous mem_maps[]. This kind of
mem_map[] is needed by discontiguous memory machines (like in the old
CONFIG_DISCONTIGMEM case) as well as memory hotplug systems. Sparsemem
replaces DISCONTIGMEM when enabled, and it is hoped that it can eventually
become a complete replacement.
A significant advantage over DISCONTIGMEM is that it's completely separated
from CONFIG_NUMA. When producing this patch, it became apparent in that NUMA
and DISCONTIG are often confused.
Another advantage is that sparse doesn't require each NUMA node's ranges to be
contiguous. It can handle overlapping ranges between nodes with no problems,
where DISCONTIGMEM currently throws away that memory.
Sparsemem uses an array to provide different pfn_to_page() translations for
each SECTION_SIZE area of physical memory. This is what allows the mem_map[]
to be chopped up.
In order to do quick pfn_to_page() operations, the section number of the page
is encoded in page->flags. Part of the sparsemem infrastructure enables
sharing of these bits more dynamically (at compile-time) between the
page_zone() and sparsemem operations. However, on 32-bit architectures, the
number of bits is quite limited, and may require growing the size of the
page->flags type in certain conditions. Several things might force this to
occur: a decrease in the SECTION_SIZE (if you want to hotplug smaller areas of
memory), an increase in the physical address space, or an increase in the
number of used page->flags.
One thing to note is that, once sparsemem is present, the NUMA node
information no longer needs to be stored in the page->flags. It might provide
speed increases on certain platforms and will be stored there if there is
room. But, if out of room, an alternate (theoretically slower) mechanism is
used.
This patch introduces CONFIG_FLATMEM. It is used in almost all cases where
there used to be an #ifndef DISCONTIG, because SPARSEMEM and DISCONTIGMEM
often have to compile out the same areas of code.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Martin Bligh <mbligh@aracnet.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 07:07:54 +00:00
|
|
|
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
2005-04-16 22:20:36 +00:00
|
|
|
/* use the per-pgdat data instead for discontigmem - mbligh */
|
|
|
|
unsigned long max_mapnr;
|
|
|
|
struct page *mem_map;
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(max_mapnr);
|
|
|
|
EXPORT_SYMBOL(mem_map);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
unsigned long num_physpages;
|
|
|
|
/*
|
|
|
|
* A number of key systems in x86 including ioremap() rely on the assumption
|
|
|
|
* that high_memory defines the upper bound on direct map memory, then end
|
|
|
|
* of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
|
|
|
|
* highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
|
|
|
|
* and ZONE_HIGHMEM.
|
|
|
|
*/
|
|
|
|
void * high_memory;
|
|
|
|
unsigned long vmalloc_earlyreserve;
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(num_physpages);
|
|
|
|
EXPORT_SYMBOL(high_memory);
|
|
|
|
EXPORT_SYMBOL(vmalloc_earlyreserve);
|
|
|
|
|
2006-02-16 22:41:58 +00:00
|
|
|
int randomize_va_space __read_mostly = 1;
|
|
|
|
|
|
|
|
static int __init disable_randmaps(char *s)
|
|
|
|
{
|
|
|
|
randomize_va_space = 0;
|
2006-03-31 10:30:33 +00:00
|
|
|
return 1;
|
2006-02-16 22:41:58 +00:00
|
|
|
}
|
|
|
|
__setup("norandmaps", disable_randmaps);
|
|
|
|
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* If a p?d_bad entry is found while walking page tables, report
|
|
|
|
* the error, before resetting entry to p?d_none. Usually (but
|
|
|
|
* very seldom) called out from the p?d_none_or_clear_bad macros.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void pgd_clear_bad(pgd_t *pgd)
|
|
|
|
{
|
|
|
|
pgd_ERROR(*pgd);
|
|
|
|
pgd_clear(pgd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pud_clear_bad(pud_t *pud)
|
|
|
|
{
|
|
|
|
pud_ERROR(*pud);
|
|
|
|
pud_clear(pud);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pmd_clear_bad(pmd_t *pmd)
|
|
|
|
{
|
|
|
|
pmd_ERROR(*pmd);
|
|
|
|
pmd_clear(pmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: this doesn't free the actual pages themselves. That
|
|
|
|
* has been handled earlier when unmapping all the memory regions.
|
|
|
|
*/
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
struct page *page = pmd_page(*pmd);
|
|
|
|
pmd_clear(pmd);
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
pte_lock_deinit(page);
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
pte_free_tlb(tlb, page);
|
2006-06-30 08:55:38 +00:00
|
|
|
dec_zone_page_state(page, NR_PAGETABLE);
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
tlb->mm->nr_ptes--;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
|
|
|
|
unsigned long addr, unsigned long end,
|
|
|
|
unsigned long floor, unsigned long ceiling)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
pmd_t *pmd;
|
|
|
|
unsigned long next;
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
unsigned long start;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
start = addr;
|
2005-04-16 22:20:36 +00:00
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
|
|
continue;
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
free_pte_range(tlb, pmd);
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
|
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
start &= PUD_MASK;
|
|
|
|
if (start < floor)
|
|
|
|
return;
|
|
|
|
if (ceiling) {
|
|
|
|
ceiling &= PUD_MASK;
|
|
|
|
if (!ceiling)
|
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
if (end - 1 > ceiling - 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, start);
|
|
|
|
pud_clear(pud);
|
|
|
|
pmd_free_tlb(tlb, pmd);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
|
|
|
|
unsigned long addr, unsigned long end,
|
|
|
|
unsigned long floor, unsigned long ceiling)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
pud_t *pud;
|
|
|
|
unsigned long next;
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
unsigned long start;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
start = addr;
|
2005-04-16 22:20:36 +00:00
|
|
|
pud = pud_offset(pgd, addr);
|
|
|
|
do {
|
|
|
|
next = pud_addr_end(addr, end);
|
|
|
|
if (pud_none_or_clear_bad(pud))
|
|
|
|
continue;
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
free_pmd_range(tlb, pud, addr, next, floor, ceiling);
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (pud++, addr = next, addr != end);
|
|
|
|
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
start &= PGDIR_MASK;
|
|
|
|
if (start < floor)
|
|
|
|
return;
|
|
|
|
if (ceiling) {
|
|
|
|
ceiling &= PGDIR_MASK;
|
|
|
|
if (!ceiling)
|
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
if (end - 1 > ceiling - 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pud = pud_offset(pgd, start);
|
|
|
|
pgd_clear(pgd);
|
|
|
|
pud_free_tlb(tlb, pud);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
* This function frees user-level page tables of a process.
|
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Must be called with pagetable lock held.
|
|
|
|
*/
|
2005-04-19 20:29:16 +00:00
|
|
|
void free_pgd_range(struct mmu_gather **tlb,
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
unsigned long addr, unsigned long end,
|
|
|
|
unsigned long floor, unsigned long ceiling)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
unsigned long next;
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
unsigned long start;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The next few lines have given us lots of grief...
|
|
|
|
*
|
|
|
|
* Why are we testing PMD* at this top level? Because often
|
|
|
|
* there will be no work to do at all, and we'd prefer not to
|
|
|
|
* go all the way down to the bottom just to discover that.
|
|
|
|
*
|
|
|
|
* Why all these "- 1"s? Because 0 represents both the bottom
|
|
|
|
* of the address space and the top of it (using -1 for the
|
|
|
|
* top wouldn't help much: the masks would do the wrong thing).
|
|
|
|
* The rule is that addr 0 and floor 0 refer to the bottom of
|
|
|
|
* the address space, but end 0 and ceiling 0 refer to the top
|
|
|
|
* Comparisons need to use "end - 1" and "ceiling - 1" (though
|
|
|
|
* that end 0 case should be mythical).
|
|
|
|
*
|
|
|
|
* Wherever addr is brought up or ceiling brought down, we must
|
|
|
|
* be careful to reject "the opposite 0" before it confuses the
|
|
|
|
* subsequent tests. But what about where end is brought down
|
|
|
|
* by PMD_SIZE below? no, end can't go down to 0 there.
|
|
|
|
*
|
|
|
|
* Whereas we round start (addr) and ceiling down, by different
|
|
|
|
* masks at different levels, in order to test whether a table
|
|
|
|
* now has no other vmas using it, so can be freed, we don't
|
|
|
|
* bother to round floor or end up - the tests don't need that.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
addr &= PMD_MASK;
|
|
|
|
if (addr < floor) {
|
|
|
|
addr += PMD_SIZE;
|
|
|
|
if (!addr)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (ceiling) {
|
|
|
|
ceiling &= PMD_MASK;
|
|
|
|
if (!ceiling)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (end - 1 > ceiling - 1)
|
|
|
|
end -= PMD_SIZE;
|
|
|
|
if (addr > end - 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
start = addr;
|
2005-04-19 20:29:16 +00:00
|
|
|
pgd = pgd_offset((*tlb)->mm, addr);
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
|
|
|
next = pgd_addr_end(addr, end);
|
|
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
|
|
continue;
|
2005-04-19 20:29:16 +00:00
|
|
|
free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (pgd++, addr = next, addr != end);
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
|
2005-10-30 01:16:02 +00:00
|
|
|
if (!(*tlb)->fullmm)
|
2005-04-19 20:29:16 +00:00
|
|
|
flush_tlb_pgtables((*tlb)->mm, start, end);
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
|
2005-04-19 20:29:16 +00:00
|
|
|
unsigned long floor, unsigned long ceiling)
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
{
|
|
|
|
while (vma) {
|
|
|
|
struct vm_area_struct *next = vma->vm_next;
|
|
|
|
unsigned long addr = vma->vm_start;
|
|
|
|
|
[PATCH] mm: unlink vma before pagetables
In most places the descent from pgd to pud to pmd to pte holds mmap_sem
(exclusively or not), which ensures that free_pgtables cannot be freeing page
tables from any level at the same time. But truncation and reverse mapping
descend without mmap_sem.
No problem: just make sure that a vma is unlinked from its prio_tree (or
nonlinear list) and from its anon_vma list, after zapping the vma, but before
freeing its page tables. Then neither vmtruncate nor rmap can reach that vma
whose page tables are now volatile (nor do they need to reach it, since all
its page entries have been zapped by this stage).
The i_mmap_lock and anon_vma->lock already serialize this correctly; but the
locking hierarchy is such that we cannot take them while holding
page_table_lock. Well, we're trying to push that down anyway. So in this
patch, move anon_vma_unlink and unlink_file_vma into free_pgtables, at the
same time as moving page_table_lock around calls to unmap_vmas.
tlb_gather_mmu and tlb_finish_mmu then fall outside the page_table_lock, but
we made them preempt_disable and preempt_enable earlier; and a long source
audit of all the architectures has shown no problem with removing
page_table_lock from them. free_pgtables doesn't need page_table_lock for
itself, nor for what it calls; tlb->mm->nr_ptes is usually protected by
page_table_lock, but partly by non-exclusive mmap_sem - here it's decremented
with exclusive mmap_sem, or mm_users 0. update_hiwater_rss and
vm_unacct_memory don't need page_table_lock either.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:29 +00:00
|
|
|
/*
|
|
|
|
* Hide vma from rmap and vmtruncate before freeing pgtables
|
|
|
|
*/
|
|
|
|
anon_vma_unlink(vma);
|
|
|
|
unlink_file_vma(vma);
|
|
|
|
|
[PATCH] hugepage: Fix hugepage logic in free_pgtables()
free_pgtables() has special logic to call hugetlb_free_pgd_range() instead
of the normal free_pgd_range() on hugepage VMAs. However, the test it uses
to do so is incorrect: it calls is_hugepage_only_range on a hugepage sized
range at the start of the vma. is_hugepage_only_range() will return true
if the given range has any intersection with a hugepage address region, and
in this case the given region need not be hugepage aligned. So, for
example, this test can return true if called on, say, a 4k VMA immediately
preceding a (nicely aligned) hugepage VMA.
At present we get away with this because the powerpc version of
hugetlb_free_pgd_range() is just a call to free_pgd_range(). On ia64 (the
only other arch with a non-trivial is_hugepage_only_range()) we get away
with it for a different reason; the hugepage area is not contiguous with
the rest of the user address space, and VMAs are not permitted in between,
so the test can't return a false positive there.
Nonetheless this should be fixed. We do that in the patch below by
replacing the is_hugepage_only_range() test with an explicit test of the
VMA using is_vm_hugetlb_page().
This in turn changes behaviour for platforms where is_hugepage_only_range()
returns false always (everything except powerpc and ia64). We address this
by ensuring that hugetlb_free_pgd_range() is defined to be identical to
free_pgd_range() (instead of a no-op) on everything except ia64. Even so,
it will prevent some otherwise possible coalescing of calls down to
free_pgd_range(). Since this only happens for hugepage VMAs, removing this
small optimization seems unlikely to cause any trouble.
This patch causes no regressions on the libhugetlbfs testsuite - ppc64
POWER5 (8-way), ppc64 G5 (2-way) and i386 Pentium M (UP).
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Acked-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 08:08:57 +00:00
|
|
|
if (is_vm_hugetlb_page(vma)) {
|
2005-04-19 20:29:16 +00:00
|
|
|
hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
floor, next? next->vm_start: ceiling);
|
2005-04-19 20:29:16 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Optimization: gather nearby vmas into one call down
|
|
|
|
*/
|
|
|
|
while (next && next->vm_start <= vma->vm_end + PMD_SIZE
|
2006-03-22 08:08:58 +00:00
|
|
|
&& !is_vm_hugetlb_page(next)) {
|
2005-04-19 20:29:16 +00:00
|
|
|
vma = next;
|
|
|
|
next = vma->vm_next;
|
[PATCH] mm: unlink vma before pagetables
In most places the descent from pgd to pud to pmd to pte holds mmap_sem
(exclusively or not), which ensures that free_pgtables cannot be freeing page
tables from any level at the same time. But truncation and reverse mapping
descend without mmap_sem.
No problem: just make sure that a vma is unlinked from its prio_tree (or
nonlinear list) and from its anon_vma list, after zapping the vma, but before
freeing its page tables. Then neither vmtruncate nor rmap can reach that vma
whose page tables are now volatile (nor do they need to reach it, since all
its page entries have been zapped by this stage).
The i_mmap_lock and anon_vma->lock already serialize this correctly; but the
locking hierarchy is such that we cannot take them while holding
page_table_lock. Well, we're trying to push that down anyway. So in this
patch, move anon_vma_unlink and unlink_file_vma into free_pgtables, at the
same time as moving page_table_lock around calls to unmap_vmas.
tlb_gather_mmu and tlb_finish_mmu then fall outside the page_table_lock, but
we made them preempt_disable and preempt_enable earlier; and a long source
audit of all the architectures has shown no problem with removing
page_table_lock from them. free_pgtables doesn't need page_table_lock for
itself, nor for what it calls; tlb->mm->nr_ptes is usually protected by
page_table_lock, but partly by non-exclusive mmap_sem - here it's decremented
with exclusive mmap_sem, or mm_users 0. update_hiwater_rss and
vm_unacct_memory don't need page_table_lock either.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:29 +00:00
|
|
|
anon_vma_unlink(vma);
|
|
|
|
unlink_file_vma(vma);
|
2005-04-19 20:29:16 +00:00
|
|
|
}
|
|
|
|
free_pgd_range(tlb, addr, vma->vm_end,
|
|
|
|
floor, next? next->vm_start: ceiling);
|
|
|
|
}
|
[PATCH] freepgt: free_pgtables use vma list
Recent woes with some arches needing their own pgd_addr_end macro; and 4-level
clear_page_range regression since 2.6.10's clear_page_tables; and its
long-standing well-known inefficiency in searching throughout the higher-level
page tables for those few entries to clear and free: all can be blamed on
ignoring the list of vmas when we free page tables.
Replace exit_mmap's clear_page_range of the total user address space by
free_pgtables operating on the mm's vma list; unmap_region use it in the same
way, giving floor and ceiling beyond which it may not free tables. This
brings lmbench fork/exec/sh numbers back to 2.6.10 (unless preempt is enabled,
in which case latency fixes spoil unmap_vmas throughput).
Beware: the do_mmap_pgoff driver failure case must now use unmap_region
instead of zap_page_range, since a page table might have been allocated, and
can only be freed while it is touched by some vma.
Move free_pgtables from mmap.c to memory.c, where its lower levels are adapted
from the clear_page_range levels. (Most of free_pgtables' old code was
actually for a non-existent case, prev not properly set up, dating from before
hch gave us split_vma.) Pass mmu_gather** in the public interfaces, since we
might want to add latency lockdrops later; but no attempt to do so yet, going
by vma should itself reduce latency.
But what if is_hugepage_only_range? Those ia64 and ppc64 cases need careful
examination: put that off until a later patch of the series.
What of x86_64's 32bit vdso page __map_syscall32 maps outside any vma?
And the range to sparc64's flush_tlb_pgtables? It's less clear to me now that
we need to do more than is done here - every PMD_SIZE ever occupied will be
flushed, do we really have to flush every PGDIR_SIZE ever partially occupied?
A shame to complicate it unnecessarily.
Special thanks to David Miller for time spent repairing my ceilings.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-19 20:29:15 +00:00
|
|
|
vma = next;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-10-30 01:16:22 +00:00
|
|
|
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-10-30 01:16:23 +00:00
|
|
|
struct page *new = pte_alloc_one(mm, address);
|
2005-10-30 01:16:22 +00:00
|
|
|
if (!new)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
pte_lock_init(new);
|
2005-10-30 01:16:23 +00:00
|
|
|
spin_lock(&mm->page_table_lock);
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
if (pmd_present(*pmd)) { /* Another has populated it */
|
|
|
|
pte_lock_deinit(new);
|
2005-10-30 01:16:22 +00:00
|
|
|
pte_free(new);
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
} else {
|
2005-04-16 22:20:36 +00:00
|
|
|
mm->nr_ptes++;
|
2006-06-30 08:55:38 +00:00
|
|
|
inc_zone_page_state(new, NR_PAGETABLE);
|
2005-04-16 22:20:36 +00:00
|
|
|
pmd_populate(mm, pmd, new);
|
|
|
|
}
|
2005-10-30 01:16:23 +00:00
|
|
|
spin_unlock(&mm->page_table_lock);
|
2005-10-30 01:16:22 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-10-30 01:16:22 +00:00
|
|
|
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-10-30 01:16:22 +00:00
|
|
|
pte_t *new = pte_alloc_one_kernel(&init_mm, address);
|
|
|
|
if (!new)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
spin_lock(&init_mm.page_table_lock);
|
|
|
|
if (pmd_present(*pmd)) /* Another has populated it */
|
|
|
|
pte_free_kernel(new);
|
|
|
|
else
|
|
|
|
pmd_populate_kernel(&init_mm, pmd, new);
|
|
|
|
spin_unlock(&init_mm.page_table_lock);
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-10-30 01:16:05 +00:00
|
|
|
static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
|
|
|
|
{
|
|
|
|
if (file_rss)
|
|
|
|
add_mm_counter(mm, file_rss, file_rss);
|
|
|
|
if (anon_rss)
|
|
|
|
add_mm_counter(mm, anon_rss, anon_rss);
|
|
|
|
}
|
|
|
|
|
2005-10-30 01:16:12 +00:00
|
|
|
/*
|
2005-11-28 22:34:23 +00:00
|
|
|
* This function is called to print an error when a bad pte
|
|
|
|
* is found. For example, we might have a PFN-mapped pte in
|
|
|
|
* a region that doesn't allow it.
|
2005-10-30 01:16:12 +00:00
|
|
|
*
|
|
|
|
* The calling function must still handle the error.
|
|
|
|
*/
|
|
|
|
void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
|
|
|
|
{
|
|
|
|
printk(KERN_ERR "Bad pte = %08llx, process = %s, "
|
|
|
|
"vm_flags = %lx, vaddr = %lx\n",
|
|
|
|
(long long)pte_val(pte),
|
|
|
|
(vma->vm_mm == current->mm ? current->comm : "???"),
|
|
|
|
vma->vm_flags, vaddr);
|
|
|
|
dump_stack();
|
|
|
|
}
|
|
|
|
|
2005-12-12 04:38:17 +00:00
|
|
|
static inline int is_cow_mapping(unsigned int flags)
|
|
|
|
{
|
|
|
|
return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
|
|
|
|
}
|
|
|
|
|
[PATCH] unpaged: anon in VM_UNPAGED
copy_one_pte needs to copy the anonymous COWed pages in a VM_UNPAGED area,
zap_pte_range needs to free them, do_wp_page needs to COW them: just like
ordinary pages, not like the unpaged.
But recognizing them is a little subtle: because PageReserved is no longer a
condition for remap_pfn_range, we can now mmap all of /dev/mem (whether the
distro permits, and whether it's advisable on this or that architecture, is
another matter). So if we can see a PageAnon, it may not be ours to mess with
(or may be ours from elsewhere in the address space). I suspect there's an
entertaining insoluble self-referential problem here, but the page_is_anon
function does a good practical job, and MAP_PRIVATE PROT_WRITE VM_UNPAGED will
always be an odd choice.
In updating the comment on page_address_in_vma, noticed a potential NULL
dereference, in a path we don't actually take, but fixed it.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-22 05:32:18 +00:00
|
|
|
/*
|
2005-11-28 22:34:23 +00:00
|
|
|
* This function gets the "struct page" associated with a pte.
|
|
|
|
*
|
|
|
|
* NOTE! Some mappings do not have "struct pages". A raw PFN mapping
|
|
|
|
* will have each page table entry just pointing to a raw page frame
|
|
|
|
* number, and as far as the VM layer is concerned, those do not have
|
|
|
|
* pages associated with them - even if the PFN might point to memory
|
|
|
|
* that otherwise is perfectly fine and has a "struct page".
|
|
|
|
*
|
|
|
|
* The way we recognize those mappings is through the rules set up
|
|
|
|
* by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
|
|
|
|
* and the vm_pgoff will point to the first PFN mapped: thus every
|
|
|
|
* page that is a raw mapping will always honor the rule
|
|
|
|
*
|
|
|
|
* pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
|
|
|
|
*
|
|
|
|
* and if that isn't true, the page has been COW'ed (in which case it
|
|
|
|
* _does_ have a "struct page" associated with it even if it is in a
|
|
|
|
* VM_PFNMAP range).
|
[PATCH] unpaged: anon in VM_UNPAGED
copy_one_pte needs to copy the anonymous COWed pages in a VM_UNPAGED area,
zap_pte_range needs to free them, do_wp_page needs to COW them: just like
ordinary pages, not like the unpaged.
But recognizing them is a little subtle: because PageReserved is no longer a
condition for remap_pfn_range, we can now mmap all of /dev/mem (whether the
distro permits, and whether it's advisable on this or that architecture, is
another matter). So if we can see a PageAnon, it may not be ours to mess with
(or may be ours from elsewhere in the address space). I suspect there's an
entertaining insoluble self-referential problem here, but the page_is_anon
function does a good practical job, and MAP_PRIVATE PROT_WRITE VM_UNPAGED will
always be an odd choice.
In updating the comment on page_address_in_vma, noticed a potential NULL
dereference, in a path we don't actually take, but fixed it.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-22 05:32:18 +00:00
|
|
|
*/
|
2005-11-28 22:34:23 +00:00
|
|
|
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
|
[PATCH] unpaged: anon in VM_UNPAGED
copy_one_pte needs to copy the anonymous COWed pages in a VM_UNPAGED area,
zap_pte_range needs to free them, do_wp_page needs to COW them: just like
ordinary pages, not like the unpaged.
But recognizing them is a little subtle: because PageReserved is no longer a
condition for remap_pfn_range, we can now mmap all of /dev/mem (whether the
distro permits, and whether it's advisable on this or that architecture, is
another matter). So if we can see a PageAnon, it may not be ours to mess with
(or may be ours from elsewhere in the address space). I suspect there's an
entertaining insoluble self-referential problem here, but the page_is_anon
function does a good practical job, and MAP_PRIVATE PROT_WRITE VM_UNPAGED will
always be an odd choice.
In updating the comment on page_address_in_vma, noticed a potential NULL
dereference, in a path we don't actually take, but fixed it.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-22 05:32:18 +00:00
|
|
|
{
|
2005-11-28 22:34:23 +00:00
|
|
|
unsigned long pfn = pte_pfn(pte);
|
|
|
|
|
2006-03-22 08:08:42 +00:00
|
|
|
if (unlikely(vma->vm_flags & VM_PFNMAP)) {
|
2005-11-28 22:34:23 +00:00
|
|
|
unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
|
|
|
|
if (pfn == vma->vm_pgoff + off)
|
|
|
|
return NULL;
|
2005-12-12 04:38:17 +00:00
|
|
|
if (!is_cow_mapping(vma->vm_flags))
|
2005-12-12 03:46:02 +00:00
|
|
|
return NULL;
|
2005-11-28 22:34:23 +00:00
|
|
|
}
|
|
|
|
|
2006-03-25 15:20:22 +00:00
|
|
|
/*
|
|
|
|
* Add some anal sanity checks for now. Eventually,
|
|
|
|
* we should just do "return pfn_to_page(pfn)", but
|
|
|
|
* in the meantime we check that we get a valid pfn,
|
|
|
|
* and that the resulting page looks ok.
|
|
|
|
*/
|
2005-11-28 22:34:23 +00:00
|
|
|
if (unlikely(!pfn_valid(pfn))) {
|
|
|
|
print_bad_pte(vma, pte, addr);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE! We still have PageReserved() pages in the page
|
|
|
|
* tables.
|
|
|
|
*
|
|
|
|
* The PAGE_ZERO() pages and various VDSO mappings can
|
|
|
|
* cause them to exist.
|
|
|
|
*/
|
|
|
|
return pfn_to_page(pfn);
|
[PATCH] unpaged: anon in VM_UNPAGED
copy_one_pte needs to copy the anonymous COWed pages in a VM_UNPAGED area,
zap_pte_range needs to free them, do_wp_page needs to COW them: just like
ordinary pages, not like the unpaged.
But recognizing them is a little subtle: because PageReserved is no longer a
condition for remap_pfn_range, we can now mmap all of /dev/mem (whether the
distro permits, and whether it's advisable on this or that architecture, is
another matter). So if we can see a PageAnon, it may not be ours to mess with
(or may be ours from elsewhere in the address space). I suspect there's an
entertaining insoluble self-referential problem here, but the page_is_anon
function does a good practical job, and MAP_PRIVATE PROT_WRITE VM_UNPAGED will
always be an odd choice.
In updating the comment on page_address_in_vma, noticed a potential NULL
dereference, in a path we don't actually take, but fixed it.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-22 05:32:18 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* copy one vm_area from one task to the other. Assumes the page tables
|
|
|
|
* already present in the new task to be cleared in the whole range
|
|
|
|
* covered by this vma.
|
|
|
|
*/
|
|
|
|
|
2005-10-30 01:16:13 +00:00
|
|
|
static inline void
|
2005-04-16 22:20:36 +00:00
|
|
|
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
2005-10-30 01:16:12 +00:00
|
|
|
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
|
2005-10-30 01:16:13 +00:00
|
|
|
unsigned long addr, int *rss)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-10-30 01:16:12 +00:00
|
|
|
unsigned long vm_flags = vma->vm_flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
pte_t pte = *src_pte;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
/* pte contains position in swap or file, so copy. */
|
|
|
|
if (unlikely(!pte_present(pte))) {
|
|
|
|
if (!pte_file(pte)) {
|
[PATCH] Swapless page migration: add R/W migration entries
Implement read/write migration ptes
We take the upper two swapfiles for the two types of migration ptes and define
a series of macros in swapops.h.
The VM is modified to handle the migration entries. migration entries can
only be encountered when the page they are pointing to is locked. This limits
the number of places one has to fix. We also check in copy_pte_range and in
mprotect_pte_range() for migration ptes.
We check for migration ptes in do_swap_cache and call a function that will
then wait on the page lock. This allows us to effectively stop all accesses
to apge.
Migration entries are created by try_to_unmap if called for migration and
removed by local functions in migrate.c
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration (I've no NUMA, just
hacking it up to migrate recklessly while running load), I've hit the
BUG_ON(!PageLocked(p)) in migration_entry_to_page.
This comes from an orphaned migration entry, unrelated to the current
correctly locked migration, but hit by remove_anon_migration_ptes as it
checks an address in each vma of the anon_vma list.
Such an orphan may be left behind if an earlier migration raced with fork:
copy_one_pte can duplicate a migration entry from parent to child, after
remove_anon_migration_ptes has checked the child vma, but before it has
removed it from the parent vma. (If the process were later to fault on this
orphaned entry, it would hit the same BUG from migration_entry_wait.)
This could be fixed by locking anon_vma in copy_one_pte, but we'd rather
not. There's no such problem with file pages, because vma_prio_tree_add
adds child vma after parent vma, and the page table locking at each end is
enough to serialize. Follow that example with anon_vma: add new vmas to the
tail instead of the head.
(There's no corresponding problem when inserting migration entries,
because a missed pte will leave the page count and mapcount high, which is
allowed for. And there's no corresponding problem when migrating via swap,
because a leftover swap entry will be correctly faulted. But the swapless
method has no refcounting of its entries.)
From: Ingo Molnar <mingo@elte.hu>
pte_unmap_unlock() takes the pte pointer as an argument.
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration, gcc has tried to exec
a pointer instead of a string: smells like COW mappings are not being
properly write-protected on fork.
The protection in copy_one_pte looks very convincing, until at last you
realize that the second arg to make_migration_entry is a boolean "write",
and SWP_MIGRATION_READ is 30.
Anyway, it's better done like in change_pte_range, using
is_write_migration_entry and make_migration_entry_read.
From: Hugh Dickins <hugh@veritas.com>
Remove unnecessary obfuscation from sys_swapon's range check on swap type,
which blew up causing memory corruption once swapless migration made
MAX_SWAPFILES no longer 2 ^ MAX_SWAPFILES_SHIFT.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Christoph Lameter <clameter@engr.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
From: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:03:35 +00:00
|
|
|
swp_entry_t entry = pte_to_swp_entry(pte);
|
|
|
|
|
|
|
|
swap_duplicate(entry);
|
2005-04-16 22:20:36 +00:00
|
|
|
/* make sure dst_mm is on swapoff's mmlist. */
|
|
|
|
if (unlikely(list_empty(&dst_mm->mmlist))) {
|
|
|
|
spin_lock(&mmlist_lock);
|
2005-10-30 01:16:41 +00:00
|
|
|
if (list_empty(&dst_mm->mmlist))
|
|
|
|
list_add(&dst_mm->mmlist,
|
|
|
|
&src_mm->mmlist);
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock(&mmlist_lock);
|
|
|
|
}
|
[PATCH] Swapless page migration: add R/W migration entries
Implement read/write migration ptes
We take the upper two swapfiles for the two types of migration ptes and define
a series of macros in swapops.h.
The VM is modified to handle the migration entries. migration entries can
only be encountered when the page they are pointing to is locked. This limits
the number of places one has to fix. We also check in copy_pte_range and in
mprotect_pte_range() for migration ptes.
We check for migration ptes in do_swap_cache and call a function that will
then wait on the page lock. This allows us to effectively stop all accesses
to apge.
Migration entries are created by try_to_unmap if called for migration and
removed by local functions in migrate.c
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration (I've no NUMA, just
hacking it up to migrate recklessly while running load), I've hit the
BUG_ON(!PageLocked(p)) in migration_entry_to_page.
This comes from an orphaned migration entry, unrelated to the current
correctly locked migration, but hit by remove_anon_migration_ptes as it
checks an address in each vma of the anon_vma list.
Such an orphan may be left behind if an earlier migration raced with fork:
copy_one_pte can duplicate a migration entry from parent to child, after
remove_anon_migration_ptes has checked the child vma, but before it has
removed it from the parent vma. (If the process were later to fault on this
orphaned entry, it would hit the same BUG from migration_entry_wait.)
This could be fixed by locking anon_vma in copy_one_pte, but we'd rather
not. There's no such problem with file pages, because vma_prio_tree_add
adds child vma after parent vma, and the page table locking at each end is
enough to serialize. Follow that example with anon_vma: add new vmas to the
tail instead of the head.
(There's no corresponding problem when inserting migration entries,
because a missed pte will leave the page count and mapcount high, which is
allowed for. And there's no corresponding problem when migrating via swap,
because a leftover swap entry will be correctly faulted. But the swapless
method has no refcounting of its entries.)
From: Ingo Molnar <mingo@elte.hu>
pte_unmap_unlock() takes the pte pointer as an argument.
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration, gcc has tried to exec
a pointer instead of a string: smells like COW mappings are not being
properly write-protected on fork.
The protection in copy_one_pte looks very convincing, until at last you
realize that the second arg to make_migration_entry is a boolean "write",
and SWP_MIGRATION_READ is 30.
Anyway, it's better done like in change_pte_range, using
is_write_migration_entry and make_migration_entry_read.
From: Hugh Dickins <hugh@veritas.com>
Remove unnecessary obfuscation from sys_swapon's range check on swap type,
which blew up causing memory corruption once swapless migration made
MAX_SWAPFILES no longer 2 ^ MAX_SWAPFILES_SHIFT.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Christoph Lameter <clameter@engr.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
From: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:03:35 +00:00
|
|
|
if (is_write_migration_entry(entry) &&
|
|
|
|
is_cow_mapping(vm_flags)) {
|
|
|
|
/*
|
|
|
|
* COW mappings require pages in both parent
|
|
|
|
* and child to be set to read.
|
|
|
|
*/
|
|
|
|
make_migration_entry_read(&entry);
|
|
|
|
pte = swp_entry_to_pte(entry);
|
|
|
|
set_pte_at(src_mm, addr, src_pte, pte);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-10-30 01:16:05 +00:00
|
|
|
goto out_set_pte;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If it's a COW mapping, write protect it both
|
|
|
|
* in the parent and the child
|
|
|
|
*/
|
2005-12-12 04:38:17 +00:00
|
|
|
if (is_cow_mapping(vm_flags)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
ptep_set_wrprotect(src_mm, addr, src_pte);
|
2006-10-01 06:29:30 +00:00
|
|
|
pte = pte_wrprotect(pte);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If it's a shared mapping, mark it clean in
|
|
|
|
* the child
|
|
|
|
*/
|
|
|
|
if (vm_flags & VM_SHARED)
|
|
|
|
pte = pte_mkclean(pte);
|
|
|
|
pte = pte_mkold(pte);
|
2005-11-28 22:34:23 +00:00
|
|
|
|
|
|
|
page = vm_normal_page(vma, addr, pte);
|
|
|
|
if (page) {
|
|
|
|
get_page(page);
|
|
|
|
page_dup_rmap(page);
|
|
|
|
rss[!!PageAnon(page)]++;
|
|
|
|
}
|
2005-10-30 01:16:05 +00:00
|
|
|
|
|
|
|
out_set_pte:
|
|
|
|
set_pte_at(dst_mm, addr, dst_pte, pte);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
|
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, unsigned long end)
|
|
|
|
{
|
|
|
|
pte_t *src_pte, *dst_pte;
|
2005-10-30 01:16:23 +00:00
|
|
|
spinlock_t *src_ptl, *dst_ptl;
|
2005-10-30 01:15:53 +00:00
|
|
|
int progress = 0;
|
2005-10-30 01:16:13 +00:00
|
|
|
int rss[2];
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
again:
|
2005-10-30 01:16:05 +00:00
|
|
|
rss[1] = rss[0] = 0;
|
2005-10-30 01:16:23 +00:00
|
|
|
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!dst_pte)
|
|
|
|
return -ENOMEM;
|
|
|
|
src_pte = pte_offset_map_nested(src_pmd, addr);
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
src_ptl = pte_lockptr(src_mm, src_pmd);
|
2006-07-03 07:25:08 +00:00
|
|
|
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
|
2006-10-01 06:29:33 +00:00
|
|
|
arch_enter_lazy_mmu_mode();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* We are holding two locks at this point - either of them
|
|
|
|
* could generate latencies in another task on another CPU.
|
|
|
|
*/
|
2005-10-30 01:15:53 +00:00
|
|
|
if (progress >= 32) {
|
|
|
|
progress = 0;
|
|
|
|
if (need_resched() ||
|
2005-10-30 01:16:23 +00:00
|
|
|
need_lockbreak(src_ptl) ||
|
|
|
|
need_lockbreak(dst_ptl))
|
2005-10-30 01:15:53 +00:00
|
|
|
break;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
if (pte_none(*src_pte)) {
|
|
|
|
progress++;
|
|
|
|
continue;
|
|
|
|
}
|
2005-10-30 01:16:13 +00:00
|
|
|
copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
|
2005-04-16 22:20:36 +00:00
|
|
|
progress += 8;
|
|
|
|
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
|
|
|
|
|
2006-10-01 06:29:33 +00:00
|
|
|
arch_leave_lazy_mmu_mode();
|
2005-10-30 01:16:23 +00:00
|
|
|
spin_unlock(src_ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
pte_unmap_nested(src_pte - 1);
|
2005-10-30 01:16:05 +00:00
|
|
|
add_mm_rss(dst_mm, rss[0], rss[1]);
|
2005-10-30 01:16:23 +00:00
|
|
|
pte_unmap_unlock(dst_pte - 1, dst_ptl);
|
|
|
|
cond_resched();
|
2005-04-16 22:20:36 +00:00
|
|
|
if (addr != end)
|
|
|
|
goto again;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
|
pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, unsigned long end)
|
|
|
|
{
|
|
|
|
pmd_t *src_pmd, *dst_pmd;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
|
|
|
|
if (!dst_pmd)
|
|
|
|
return -ENOMEM;
|
|
|
|
src_pmd = pmd_offset(src_pud, addr);
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
if (pmd_none_or_clear_bad(src_pmd))
|
|
|
|
continue;
|
|
|
|
if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
|
|
|
|
vma, addr, next))
|
|
|
|
return -ENOMEM;
|
|
|
|
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
|
pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, unsigned long end)
|
|
|
|
{
|
|
|
|
pud_t *src_pud, *dst_pud;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
|
|
|
|
if (!dst_pud)
|
|
|
|
return -ENOMEM;
|
|
|
|
src_pud = pud_offset(src_pgd, addr);
|
|
|
|
do {
|
|
|
|
next = pud_addr_end(addr, end);
|
|
|
|
if (pud_none_or_clear_bad(src_pud))
|
|
|
|
continue;
|
|
|
|
if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
|
|
|
|
vma, addr, next))
|
|
|
|
return -ENOMEM;
|
|
|
|
} while (dst_pud++, src_pud++, addr = next, addr != end);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
|
struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
pgd_t *src_pgd, *dst_pgd;
|
|
|
|
unsigned long next;
|
|
|
|
unsigned long addr = vma->vm_start;
|
|
|
|
unsigned long end = vma->vm_end;
|
|
|
|
|
2005-08-28 06:49:11 +00:00
|
|
|
/*
|
|
|
|
* Don't copy ptes where a page fault will fill them correctly.
|
|
|
|
* Fork becomes much lighter when there are big shared or private
|
|
|
|
* readonly mappings. The tradeoff is that copy_page_range is more
|
|
|
|
* efficient than faulting.
|
|
|
|
*/
|
2005-12-16 18:21:23 +00:00
|
|
|
if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
|
2005-08-28 06:49:11 +00:00
|
|
|
if (!vma->anon_vma)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (is_vm_hugetlb_page(vma))
|
|
|
|
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
|
|
|
|
|
|
|
|
dst_pgd = pgd_offset(dst_mm, addr);
|
|
|
|
src_pgd = pgd_offset(src_mm, addr);
|
|
|
|
do {
|
|
|
|
next = pgd_addr_end(addr, end);
|
|
|
|
if (pgd_none_or_clear_bad(src_pgd))
|
|
|
|
continue;
|
|
|
|
if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
|
|
|
|
vma, addr, next))
|
|
|
|
return -ENOMEM;
|
|
|
|
} while (dst_pgd++, src_pgd++, addr = next, addr != end);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-11-14 00:06:42 +00:00
|
|
|
static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
2005-10-30 01:16:12 +00:00
|
|
|
struct vm_area_struct *vma, pmd_t *pmd,
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long addr, unsigned long end,
|
2005-11-14 00:06:42 +00:00
|
|
|
long *zap_work, struct zap_details *details)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-10-30 01:16:12 +00:00
|
|
|
struct mm_struct *mm = tlb->mm;
|
2005-04-16 22:20:36 +00:00
|
|
|
pte_t *pte;
|
2005-10-30 01:16:30 +00:00
|
|
|
spinlock_t *ptl;
|
2005-10-30 01:16:05 +00:00
|
|
|
int file_rss = 0;
|
|
|
|
int anon_rss = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-30 01:16:30 +00:00
|
|
|
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
2006-10-01 06:29:33 +00:00
|
|
|
arch_enter_lazy_mmu_mode();
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
|
|
|
pte_t ptent = *pte;
|
2005-11-14 00:06:42 +00:00
|
|
|
if (pte_none(ptent)) {
|
|
|
|
(*zap_work)--;
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
2005-11-14 00:06:42 +00:00
|
|
|
}
|
2006-03-17 07:04:09 +00:00
|
|
|
|
|
|
|
(*zap_work) -= PAGE_SIZE;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (pte_present(ptent)) {
|
[PATCH] unpaged: anon in VM_UNPAGED
copy_one_pte needs to copy the anonymous COWed pages in a VM_UNPAGED area,
zap_pte_range needs to free them, do_wp_page needs to COW them: just like
ordinary pages, not like the unpaged.
But recognizing them is a little subtle: because PageReserved is no longer a
condition for remap_pfn_range, we can now mmap all of /dev/mem (whether the
distro permits, and whether it's advisable on this or that architecture, is
another matter). So if we can see a PageAnon, it may not be ours to mess with
(or may be ours from elsewhere in the address space). I suspect there's an
entertaining insoluble self-referential problem here, but the page_is_anon
function does a good practical job, and MAP_PRIVATE PROT_WRITE VM_UNPAGED will
always be an odd choice.
In updating the comment on page_address_in_vma, noticed a potential NULL
dereference, in a path we don't actually take, but fixed it.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-22 05:32:18 +00:00
|
|
|
struct page *page;
|
2005-11-14 00:06:42 +00:00
|
|
|
|
2005-11-28 22:34:23 +00:00
|
|
|
page = vm_normal_page(vma, addr, ptent);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (unlikely(details) && page) {
|
|
|
|
/*
|
|
|
|
* unmap_shared_mapping_pages() wants to
|
|
|
|
* invalidate cache without truncating:
|
|
|
|
* unmap shared but keep private pages.
|
|
|
|
*/
|
|
|
|
if (details->check_mapping &&
|
|
|
|
details->check_mapping != page->mapping)
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* Each page->index must be checked when
|
|
|
|
* invalidating or truncating nonlinear.
|
|
|
|
*/
|
|
|
|
if (details->nonlinear_vma &&
|
|
|
|
(page->index < details->first_index ||
|
|
|
|
page->index > details->last_index))
|
|
|
|
continue;
|
|
|
|
}
|
2005-10-30 01:16:12 +00:00
|
|
|
ptent = ptep_get_and_clear_full(mm, addr, pte,
|
[PATCH] x86: ptep_clear optimization
Add a new accessor for PTEs, which passes the full hint from the mmu_gather
struct; this allows architectures with hardware pagetables to optimize away
atomic PTE operations when destroying an address space. Removing the
locked operation should allow better pipelining of memory access in this
loop. I measured an average savings of 30-35 cycles per zap_pte_range on
the first 500 destructions on Pentium-M, but I believe the optimization
would win more on older processors which still assert the bus lock on xchg
for an exclusive cacheline.
Update: I made some new measurements, and this saves exactly 26 cycles over
ptep_get_and_clear on Pentium M. On P4, with a PAE kernel, this saves 180
cycles per ptep_get_and_clear, for a whopping 92160 cycles savings for a
full address space destruction.
pte_clear_full is not yet used, but is provided for future optimizations
(in particular, when running inside of a hypervisor that queues page table
updates, the full hint allows us to avoid queueing unnecessary page table
update for an address space in the process of being destroyed.
This is not a huge win, but it does help a bit, and sets the stage for
further hypervisor optimization of the mm layer on all architectures.
Signed-off-by: Zachary Amsden <zach@vmware.com>
Cc: Christoph Lameter <christoph@lameter.com>
Cc: <linux-mm@kvack.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-03 22:55:04 +00:00
|
|
|
tlb->fullmm);
|
2005-04-16 22:20:36 +00:00
|
|
|
tlb_remove_tlb_entry(tlb, pte, addr);
|
|
|
|
if (unlikely(!page))
|
|
|
|
continue;
|
|
|
|
if (unlikely(details) && details->nonlinear_vma
|
|
|
|
&& linear_page_index(details->nonlinear_vma,
|
|
|
|
addr) != page->index)
|
2005-10-30 01:16:12 +00:00
|
|
|
set_pte_at(mm, addr, pte,
|
2005-04-16 22:20:36 +00:00
|
|
|
pgoff_to_pte(page->index));
|
|
|
|
if (PageAnon(page))
|
2005-10-30 01:16:14 +00:00
|
|
|
anon_rss--;
|
2005-10-30 01:15:54 +00:00
|
|
|
else {
|
|
|
|
if (pte_dirty(ptent))
|
|
|
|
set_page_dirty(page);
|
|
|
|
if (pte_young(ptent))
|
2007-02-10 09:43:18 +00:00
|
|
|
SetPageReferenced(page);
|
2005-10-30 01:16:14 +00:00
|
|
|
file_rss--;
|
2005-10-30 01:15:54 +00:00
|
|
|
}
|
2006-12-22 09:09:33 +00:00
|
|
|
page_remove_rmap(page, vma);
|
2005-04-16 22:20:36 +00:00
|
|
|
tlb_remove_page(tlb, page);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If details->check_mapping, we leave swap entries;
|
|
|
|
* if details->nonlinear_vma, we leave file entries.
|
|
|
|
*/
|
|
|
|
if (unlikely(details))
|
|
|
|
continue;
|
|
|
|
if (!pte_file(ptent))
|
|
|
|
free_swap_and_cache(pte_to_swp_entry(ptent));
|
2006-10-01 06:29:31 +00:00
|
|
|
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
|
2005-11-14 00:06:42 +00:00
|
|
|
} while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
|
2005-10-30 01:16:05 +00:00
|
|
|
|
2005-10-30 01:16:14 +00:00
|
|
|
add_mm_rss(mm, file_rss, anon_rss);
|
2006-10-01 06:29:33 +00:00
|
|
|
arch_leave_lazy_mmu_mode();
|
2005-10-30 01:16:30 +00:00
|
|
|
pte_unmap_unlock(pte - 1, ptl);
|
2005-11-14 00:06:42 +00:00
|
|
|
|
|
|
|
return addr;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-11-14 00:06:42 +00:00
|
|
|
static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
|
2005-10-30 01:16:12 +00:00
|
|
|
struct vm_area_struct *vma, pud_t *pud,
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long addr, unsigned long end,
|
2005-11-14 00:06:42 +00:00
|
|
|
long *zap_work, struct zap_details *details)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
pmd_t *pmd;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
2005-11-14 00:06:42 +00:00
|
|
|
if (pmd_none_or_clear_bad(pmd)) {
|
|
|
|
(*zap_work)--;
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
2005-11-14 00:06:42 +00:00
|
|
|
}
|
|
|
|
next = zap_pte_range(tlb, vma, pmd, addr, next,
|
|
|
|
zap_work, details);
|
|
|
|
} while (pmd++, addr = next, (addr != end && *zap_work > 0));
|
|
|
|
|
|
|
|
return addr;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-11-14 00:06:42 +00:00
|
|
|
static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
|
2005-10-30 01:16:12 +00:00
|
|
|
struct vm_area_struct *vma, pgd_t *pgd,
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long addr, unsigned long end,
|
2005-11-14 00:06:42 +00:00
|
|
|
long *zap_work, struct zap_details *details)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
pud_t *pud;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
|
|
do {
|
|
|
|
next = pud_addr_end(addr, end);
|
2005-11-14 00:06:42 +00:00
|
|
|
if (pud_none_or_clear_bad(pud)) {
|
|
|
|
(*zap_work)--;
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
2005-11-14 00:06:42 +00:00
|
|
|
}
|
|
|
|
next = zap_pmd_range(tlb, vma, pud, addr, next,
|
|
|
|
zap_work, details);
|
|
|
|
} while (pud++, addr = next, (addr != end && *zap_work > 0));
|
|
|
|
|
|
|
|
return addr;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-11-14 00:06:42 +00:00
|
|
|
static unsigned long unmap_page_range(struct mmu_gather *tlb,
|
|
|
|
struct vm_area_struct *vma,
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long addr, unsigned long end,
|
2005-11-14 00:06:42 +00:00
|
|
|
long *zap_work, struct zap_details *details)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
if (details && !details->check_mapping && !details->nonlinear_vma)
|
|
|
|
details = NULL;
|
|
|
|
|
|
|
|
BUG_ON(addr >= end);
|
|
|
|
tlb_start_vma(tlb, vma);
|
|
|
|
pgd = pgd_offset(vma->vm_mm, addr);
|
|
|
|
do {
|
|
|
|
next = pgd_addr_end(addr, end);
|
2005-11-14 00:06:42 +00:00
|
|
|
if (pgd_none_or_clear_bad(pgd)) {
|
|
|
|
(*zap_work)--;
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
2005-11-14 00:06:42 +00:00
|
|
|
}
|
|
|
|
next = zap_pud_range(tlb, vma, pgd, addr, next,
|
|
|
|
zap_work, details);
|
|
|
|
} while (pgd++, addr = next, (addr != end && *zap_work > 0));
|
2005-04-16 22:20:36 +00:00
|
|
|
tlb_end_vma(tlb, vma);
|
2005-11-14 00:06:42 +00:00
|
|
|
|
|
|
|
return addr;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
|
|
|
|
#else
|
|
|
|
/* No preempt: go for improved straight-line efficiency */
|
|
|
|
# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* unmap_vmas - unmap a range of memory covered by a list of vma's
|
|
|
|
* @tlbp: address of the caller's struct mmu_gather
|
|
|
|
* @vma: the starting vma
|
|
|
|
* @start_addr: virtual address at which to start unmapping
|
|
|
|
* @end_addr: virtual address at which to end unmapping
|
|
|
|
* @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
|
|
|
|
* @details: details of nonlinear truncation or shared cache invalidation
|
|
|
|
*
|
2005-04-19 20:29:15 +00:00
|
|
|
* Returns the end address of the unmapping (restart addr if interrupted).
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2005-10-30 01:16:30 +00:00
|
|
|
* Unmap all pages in the vma list.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2005-10-30 01:16:30 +00:00
|
|
|
* We aim to not hold locks for too long (for scheduling latency reasons).
|
|
|
|
* So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to
|
2005-04-16 22:20:36 +00:00
|
|
|
* return the ending mmu_gather to the caller.
|
|
|
|
*
|
|
|
|
* Only addresses between `start' and `end' will be unmapped.
|
|
|
|
*
|
|
|
|
* The VMA list must be sorted in ascending virtual address order.
|
|
|
|
*
|
|
|
|
* unmap_vmas() assumes that the caller will flush the whole unmapped address
|
|
|
|
* range after unmap_vmas() returns. So the only responsibility here is to
|
|
|
|
* ensure that any thus-far unmapped pages are flushed before unmap_vmas()
|
|
|
|
* drops the lock and schedules.
|
|
|
|
*/
|
2005-10-30 01:16:30 +00:00
|
|
|
unsigned long unmap_vmas(struct mmu_gather **tlbp,
|
2005-04-16 22:20:36 +00:00
|
|
|
struct vm_area_struct *vma, unsigned long start_addr,
|
|
|
|
unsigned long end_addr, unsigned long *nr_accounted,
|
|
|
|
struct zap_details *details)
|
|
|
|
{
|
2005-11-14 00:06:42 +00:00
|
|
|
long zap_work = ZAP_BLOCK_SIZE;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long tlb_start = 0; /* For tlb_finish_mmu */
|
|
|
|
int tlb_start_valid = 0;
|
2005-04-19 20:29:15 +00:00
|
|
|
unsigned long start = start_addr;
|
2005-04-16 22:20:36 +00:00
|
|
|
spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
|
2005-10-30 01:16:02 +00:00
|
|
|
int fullmm = (*tlbp)->fullmm;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
|
|
|
|
unsigned long end;
|
|
|
|
|
|
|
|
start = max(vma->vm_start, start_addr);
|
|
|
|
if (start >= vma->vm_end)
|
|
|
|
continue;
|
|
|
|
end = min(vma->vm_end, end_addr);
|
|
|
|
if (end <= vma->vm_start)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (vma->vm_flags & VM_ACCOUNT)
|
|
|
|
*nr_accounted += (end - start) >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
while (start != end) {
|
|
|
|
if (!tlb_start_valid) {
|
|
|
|
tlb_start = start;
|
|
|
|
tlb_start_valid = 1;
|
|
|
|
}
|
|
|
|
|
2005-11-14 00:06:42 +00:00
|
|
|
if (unlikely(is_vm_hugetlb_page(vma))) {
|
2005-04-16 22:20:36 +00:00
|
|
|
unmap_hugepage_range(vma, start, end);
|
2005-11-14 00:06:42 +00:00
|
|
|
zap_work -= (end - start) /
|
|
|
|
(HPAGE_SIZE / PAGE_SIZE);
|
|
|
|
start = end;
|
|
|
|
} else
|
|
|
|
start = unmap_page_range(*tlbp, vma,
|
|
|
|
start, end, &zap_work, details);
|
|
|
|
|
|
|
|
if (zap_work > 0) {
|
|
|
|
BUG_ON(start != end);
|
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tlb_finish_mmu(*tlbp, tlb_start, start);
|
|
|
|
|
|
|
|
if (need_resched() ||
|
|
|
|
(i_mmap_lock && need_lockbreak(i_mmap_lock))) {
|
|
|
|
if (i_mmap_lock) {
|
2005-10-30 01:16:30 +00:00
|
|
|
*tlbp = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
2005-10-30 01:16:30 +00:00
|
|
|
*tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
|
2005-04-16 22:20:36 +00:00
|
|
|
tlb_start_valid = 0;
|
2005-11-14 00:06:42 +00:00
|
|
|
zap_work = ZAP_BLOCK_SIZE;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
2005-04-19 20:29:15 +00:00
|
|
|
return start; /* which is now the end (or restart) address */
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* zap_page_range - remove user pages in a given range
|
|
|
|
* @vma: vm_area_struct holding the applicable pages
|
|
|
|
* @address: starting address of pages to zap
|
|
|
|
* @size: number of bytes to zap
|
|
|
|
* @details: details of nonlinear truncation or shared cache invalidation
|
|
|
|
*/
|
2005-04-19 20:29:15 +00:00
|
|
|
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long size, struct zap_details *details)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
struct mmu_gather *tlb;
|
|
|
|
unsigned long end = address + size;
|
|
|
|
unsigned long nr_accounted = 0;
|
|
|
|
|
|
|
|
lru_add_drain();
|
|
|
|
tlb = tlb_gather_mmu(mm, 0);
|
[PATCH] mm: update_hiwaters just in time
update_mem_hiwater has attracted various criticisms, in particular from those
concerned with mm scalability. Originally it was called whenever rss or
total_vm got raised. Then many of those callsites were replaced by a timer
tick call from account_system_time. Now Frank van Maarseveen reports that to
be found inadequate. How about this? Works for Frank.
Replace update_mem_hiwater, a poor combination of two unrelated ops, by macros
update_hiwater_rss and update_hiwater_vm. Don't attempt to keep
mm->hiwater_rss up to date at timer tick, nor every time we raise rss (usually
by 1): those are hot paths. Do the opposite, update only when about to lower
rss (usually by many), or just before final accounting in do_exit. Handle
mm->hiwater_vm in the same way, though it's much less of an issue. Demand
that whoever collects these hiwater statistics do the work of taking the
maximum with rss or total_vm.
And there has been no collector of these hiwater statistics in the tree. The
new convention needs an example, so match Frank's usage by adding a VmPeak
line above VmSize to /proc/<pid>/status, and also a VmHWM line above VmRSS
(High-Water-Mark or High-Water-Memory).
There was a particular anomaly during mremap move, that hiwater_vm might be
captured too high. A fleeting such anomaly remains, but it's quickly
corrected now, whereas before it would stick.
What locking? None: if the app is racy then these statistics will be racy,
it's not worth any overhead to make them exact. But whenever it suits,
hiwater_vm is updated under exclusive mmap_sem, and hiwater_rss under
page_table_lock (for now) or with preemption disabled (later on): without
going to any trouble, minimize the time between reading current values and
updating, to minimize those occasions when a racing thread bumps a count up
and back down in between.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:18 +00:00
|
|
|
update_hiwater_rss(mm);
|
2005-10-30 01:16:30 +00:00
|
|
|
end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
|
|
|
|
if (tlb)
|
|
|
|
tlb_finish_mmu(tlb, address, end);
|
2005-04-19 20:29:15 +00:00
|
|
|
return end;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do a quick page-table lookup for a single page.
|
|
|
|
*/
|
2005-11-28 22:34:23 +00:00
|
|
|
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
|
2005-10-30 01:16:33 +00:00
|
|
|
unsigned int flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *ptep, pte;
|
2005-10-30 01:16:33 +00:00
|
|
|
spinlock_t *ptl;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct page *page;
|
2005-11-28 22:34:23 +00:00
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-30 01:16:33 +00:00
|
|
|
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
|
|
|
|
if (!IS_ERR(page)) {
|
|
|
|
BUG_ON(flags & FOLL_GET);
|
|
|
|
goto out;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-30 01:16:33 +00:00
|
|
|
page = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
pgd = pgd_offset(mm, address);
|
|
|
|
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
2005-10-30 01:16:33 +00:00
|
|
|
goto no_page_table;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
pud = pud_offset(pgd, address);
|
|
|
|
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
|
2005-10-30 01:16:33 +00:00
|
|
|
goto no_page_table;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
pmd = pmd_offset(pud, address);
|
|
|
|
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
|
2005-10-30 01:16:33 +00:00
|
|
|
goto no_page_table;
|
|
|
|
|
|
|
|
if (pmd_huge(*pmd)) {
|
|
|
|
BUG_ON(flags & FOLL_GET);
|
|
|
|
page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out;
|
2005-10-30 01:16:33 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-30 01:16:33 +00:00
|
|
|
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!ptep)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
pte = *ptep;
|
2005-10-30 01:16:33 +00:00
|
|
|
if (!pte_present(pte))
|
|
|
|
goto unlock;
|
|
|
|
if ((flags & FOLL_WRITE) && !pte_write(pte))
|
|
|
|
goto unlock;
|
2005-11-28 22:34:23 +00:00
|
|
|
page = vm_normal_page(vma, address, pte);
|
|
|
|
if (unlikely(!page))
|
2005-10-30 01:16:33 +00:00
|
|
|
goto unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-30 01:16:33 +00:00
|
|
|
if (flags & FOLL_GET)
|
|
|
|
get_page(page);
|
|
|
|
if (flags & FOLL_TOUCH) {
|
|
|
|
if ((flags & FOLL_WRITE) &&
|
|
|
|
!pte_dirty(pte) && !PageDirty(page))
|
|
|
|
set_page_dirty(page);
|
|
|
|
mark_page_accessed(page);
|
|
|
|
}
|
|
|
|
unlock:
|
|
|
|
pte_unmap_unlock(ptep, ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
out:
|
2005-10-30 01:16:33 +00:00
|
|
|
return page;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-30 01:16:33 +00:00
|
|
|
no_page_table:
|
|
|
|
/*
|
|
|
|
* When core dumping an enormous anonymous area that nobody
|
|
|
|
* has touched so far, we don't want to allocate page tables.
|
|
|
|
*/
|
|
|
|
if (flags & FOLL_ANON) {
|
|
|
|
page = ZERO_PAGE(address);
|
|
|
|
if (flags & FOLL_GET)
|
|
|
|
get_page(page);
|
|
|
|
BUG_ON(flags & FOLL_WRITE);
|
|
|
|
}
|
|
|
|
return page;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
|
unsigned long start, int len, int write, int force,
|
|
|
|
struct page **pages, struct vm_area_struct **vmas)
|
|
|
|
{
|
|
|
|
int i;
|
2005-10-30 01:16:33 +00:00
|
|
|
unsigned int vm_flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Require read or write permissions.
|
|
|
|
* If 'force' is set, we only require the "MAY" flags.
|
|
|
|
*/
|
2005-10-30 01:16:33 +00:00
|
|
|
vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
|
|
|
|
vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
|
2005-04-16 22:20:36 +00:00
|
|
|
i = 0;
|
|
|
|
|
|
|
|
do {
|
2005-10-30 01:16:33 +00:00
|
|
|
struct vm_area_struct *vma;
|
|
|
|
unsigned int foll_flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
vma = find_extend_vma(mm, start);
|
|
|
|
if (!vma && in_gate_area(tsk, start)) {
|
|
|
|
unsigned long pg = start & PAGE_MASK;
|
|
|
|
struct vm_area_struct *gate_vma = get_gate_vma(tsk);
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte;
|
|
|
|
if (write) /* user gate pages are read-only */
|
|
|
|
return i ? : -EFAULT;
|
|
|
|
if (pg > TASK_SIZE)
|
|
|
|
pgd = pgd_offset_k(pg);
|
|
|
|
else
|
|
|
|
pgd = pgd_offset_gate(mm, pg);
|
|
|
|
BUG_ON(pgd_none(*pgd));
|
|
|
|
pud = pud_offset(pgd, pg);
|
|
|
|
BUG_ON(pud_none(*pud));
|
|
|
|
pmd = pmd_offset(pud, pg);
|
2005-08-02 04:11:42 +00:00
|
|
|
if (pmd_none(*pmd))
|
|
|
|
return i ? : -EFAULT;
|
2005-04-16 22:20:36 +00:00
|
|
|
pte = pte_offset_map(pmd, pg);
|
2005-08-02 04:11:42 +00:00
|
|
|
if (pte_none(*pte)) {
|
|
|
|
pte_unmap(pte);
|
|
|
|
return i ? : -EFAULT;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
if (pages) {
|
2005-11-29 07:43:17 +00:00
|
|
|
struct page *page = vm_normal_page(gate_vma, start, *pte);
|
2005-11-28 22:34:23 +00:00
|
|
|
pages[i] = page;
|
|
|
|
if (page)
|
|
|
|
get_page(page);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
pte_unmap(pte);
|
|
|
|
if (vmas)
|
|
|
|
vmas[i] = gate_vma;
|
|
|
|
i++;
|
|
|
|
start += PAGE_SIZE;
|
|
|
|
len--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2005-12-13 00:24:33 +00:00
|
|
|
if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
|
2005-10-30 01:16:33 +00:00
|
|
|
|| !(vm_flags & vma->vm_flags))
|
2005-04-16 22:20:36 +00:00
|
|
|
return i ? : -EFAULT;
|
|
|
|
|
|
|
|
if (is_vm_hugetlb_page(vma)) {
|
|
|
|
i = follow_hugetlb_page(mm, vma, pages, vmas,
|
|
|
|
&start, &len, i);
|
|
|
|
continue;
|
|
|
|
}
|
2005-10-30 01:16:33 +00:00
|
|
|
|
|
|
|
foll_flags = FOLL_TOUCH;
|
|
|
|
if (pages)
|
|
|
|
foll_flags |= FOLL_GET;
|
|
|
|
if (!write && !(vma->vm_flags & VM_LOCKED) &&
|
|
|
|
(!vma->vm_ops || !vma->vm_ops->nopage))
|
|
|
|
foll_flags |= FOLL_ANON;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
2005-06-22 00:15:10 +00:00
|
|
|
struct page *page;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-30 01:16:33 +00:00
|
|
|
if (write)
|
|
|
|
foll_flags |= FOLL_WRITE;
|
2005-08-03 17:07:09 +00:00
|
|
|
|
2005-10-30 01:16:33 +00:00
|
|
|
cond_resched();
|
2005-11-28 22:34:23 +00:00
|
|
|
while (!(page = follow_page(vma, start, foll_flags))) {
|
2005-10-30 01:16:33 +00:00
|
|
|
int ret;
|
|
|
|
ret = __handle_mm_fault(mm, vma, start,
|
|
|
|
foll_flags & FOLL_WRITE);
|
2005-08-03 17:07:09 +00:00
|
|
|
/*
|
|
|
|
* The VM_FAULT_WRITE bit tells us that do_wp_page has
|
|
|
|
* broken COW when necessary, even if maybe_mkwrite
|
|
|
|
* decided not to set pte_write. We can thus safely do
|
|
|
|
* subsequent page lookups as if they were reads.
|
|
|
|
*/
|
|
|
|
if (ret & VM_FAULT_WRITE)
|
2005-10-30 01:16:33 +00:00
|
|
|
foll_flags &= ~FOLL_WRITE;
|
2005-08-03 17:07:09 +00:00
|
|
|
|
|
|
|
switch (ret & ~VM_FAULT_WRITE) {
|
2005-04-16 22:20:36 +00:00
|
|
|
case VM_FAULT_MINOR:
|
|
|
|
tsk->min_flt++;
|
|
|
|
break;
|
|
|
|
case VM_FAULT_MAJOR:
|
|
|
|
tsk->maj_flt++;
|
|
|
|
break;
|
|
|
|
case VM_FAULT_SIGBUS:
|
|
|
|
return i ? i : -EFAULT;
|
|
|
|
case VM_FAULT_OOM:
|
|
|
|
return i ? i : -ENOMEM;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
[PATCH] page fault retry with NOPAGE_REFAULT
Add a way for a no_page() handler to request a retry of the faulting
instruction. It goes back to userland on page faults and just tries again
in get_user_pages(). I added a cond_resched() in the loop in that later
case.
The problem I have with signal and spufs is an actual bug affecting apps and I
don't see other ways of fixing it.
In addition, we are having issues with infiniband and 64k pages (related to
the way the hypervisor deals with some HV cards) that will require us to muck
around with the MMU from within the IB driver's no_page() (it's a pSeries
specific driver) and return to the caller the same way using NOPAGE_REFAULT.
And to add to this, the graphics folks have been following a new approach of
memory management that involves transparently swapping objects between video
ram and main meory. To do that, they need installing PTEs from a no_page()
handler as well and that also requires returning with NOPAGE_REFAULT.
(For the later, they are currently using io_remap_pfn_range to install one PTE
from no_page() which is a bit racy, we need to add a check for the PTE having
already been installed afer taking the lock, but that's ok, they are only at
the proof-of-concept stage. I'll send a patch adding a "clean" function to do
that, we can use that from spufs too and get rid of the sparsemem hacks we do
to create struct page for SPEs. Basically, that provides a generic solution
for being able to have no_page() map hardware devices, which is something that
I think sound driver folks have been asking for some time too).
All of these things depend on having the NOPAGE_REFAULT exit path from
no_page() handlers.
Signed-off-by: Benjamin Herrenchmidt <benh@kernel.crashing.org>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-06 07:43:53 +00:00
|
|
|
cond_resched();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
if (pages) {
|
2005-06-22 00:15:10 +00:00
|
|
|
pages[i] = page;
|
2006-03-26 09:36:57 +00:00
|
|
|
|
2006-12-30 22:24:19 +00:00
|
|
|
flush_anon_page(vma, page, start);
|
2005-06-22 00:15:10 +00:00
|
|
|
flush_dcache_page(page);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
if (vmas)
|
|
|
|
vmas[i] = vma;
|
|
|
|
i++;
|
|
|
|
start += PAGE_SIZE;
|
|
|
|
len--;
|
2005-06-22 00:15:10 +00:00
|
|
|
} while (len && start < vma->vm_end);
|
|
|
|
} while (len);
|
2005-04-16 22:20:36 +00:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(get_user_pages);
|
|
|
|
|
|
|
|
static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|
|
|
unsigned long addr, unsigned long end, pgprot_t prot)
|
|
|
|
{
|
|
|
|
pte_t *pte;
|
2005-10-30 01:16:23 +00:00
|
|
|
spinlock_t *ptl;
|
2006-12-10 10:18:43 +00:00
|
|
|
int err = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-30 01:16:23 +00:00
|
|
|
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!pte)
|
2006-12-10 10:18:43 +00:00
|
|
|
return -EAGAIN;
|
2006-10-01 06:29:33 +00:00
|
|
|
arch_enter_lazy_mmu_mode();
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
2005-10-30 01:16:12 +00:00
|
|
|
struct page *page = ZERO_PAGE(addr);
|
|
|
|
pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
|
2006-12-10 10:18:43 +00:00
|
|
|
|
|
|
|
if (unlikely(!pte_none(*pte))) {
|
|
|
|
err = -EEXIST;
|
|
|
|
pte++;
|
|
|
|
break;
|
|
|
|
}
|
2005-10-30 01:16:12 +00:00
|
|
|
page_cache_get(page);
|
|
|
|
page_add_file_rmap(page);
|
|
|
|
inc_mm_counter(mm, file_rss);
|
2005-04-16 22:20:36 +00:00
|
|
|
set_pte_at(mm, addr, pte, zero_pte);
|
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
2006-10-01 06:29:33 +00:00
|
|
|
arch_leave_lazy_mmu_mode();
|
2005-10-30 01:16:23 +00:00
|
|
|
pte_unmap_unlock(pte - 1, ptl);
|
2006-12-10 10:18:43 +00:00
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
|
|
unsigned long addr, unsigned long end, pgprot_t prot)
|
|
|
|
{
|
|
|
|
pmd_t *pmd;
|
|
|
|
unsigned long next;
|
2006-12-10 10:18:43 +00:00
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
pmd = pmd_alloc(mm, pud, addr);
|
|
|
|
if (!pmd)
|
2006-12-10 10:18:43 +00:00
|
|
|
return -EAGAIN;
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
2006-12-10 10:18:43 +00:00
|
|
|
err = zeromap_pte_range(mm, pmd, addr, next, prot);
|
|
|
|
if (err)
|
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (pmd++, addr = next, addr != end);
|
2006-12-10 10:18:43 +00:00
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|
|
|
unsigned long addr, unsigned long end, pgprot_t prot)
|
|
|
|
{
|
|
|
|
pud_t *pud;
|
|
|
|
unsigned long next;
|
2006-12-10 10:18:43 +00:00
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
pud = pud_alloc(mm, pgd, addr);
|
|
|
|
if (!pud)
|
2006-12-10 10:18:43 +00:00
|
|
|
return -EAGAIN;
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
|
|
|
next = pud_addr_end(addr, end);
|
2006-12-10 10:18:43 +00:00
|
|
|
err = zeromap_pmd_range(mm, pud, addr, next, prot);
|
|
|
|
if (err)
|
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (pud++, addr = next, addr != end);
|
2006-12-10 10:18:43 +00:00
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int zeromap_page_range(struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, unsigned long size, pgprot_t prot)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
unsigned long next;
|
|
|
|
unsigned long end = addr + size;
|
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
BUG_ON(addr >= end);
|
|
|
|
pgd = pgd_offset(mm, addr);
|
|
|
|
flush_cache_range(vma, addr, end);
|
|
|
|
do {
|
|
|
|
next = pgd_addr_end(addr, end);
|
|
|
|
err = zeromap_pud_range(mm, pgd, addr, next, prot);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2005-11-30 00:27:22 +00:00
|
|
|
pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
|
2005-11-29 22:03:14 +00:00
|
|
|
{
|
|
|
|
pgd_t * pgd = pgd_offset(mm, addr);
|
|
|
|
pud_t * pud = pud_alloc(mm, pgd, addr);
|
|
|
|
if (pud) {
|
2005-11-30 00:27:22 +00:00
|
|
|
pmd_t * pmd = pmd_alloc(mm, pud, addr);
|
2005-11-29 22:03:14 +00:00
|
|
|
if (pmd)
|
|
|
|
return pte_alloc_map_lock(mm, pmd, addr, ptl);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2005-11-29 21:01:56 +00:00
|
|
|
/*
|
|
|
|
* This is the old fallback for page remapping.
|
|
|
|
*
|
|
|
|
* For historical reasons, it only allows reserved pages. Only
|
|
|
|
* old drivers should use this, and they needed to mark their
|
|
|
|
* pages reserved for the old functions anyway.
|
|
|
|
*/
|
|
|
|
static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
|
|
|
|
{
|
|
|
|
int retval;
|
2005-11-29 22:03:14 +00:00
|
|
|
pte_t *pte;
|
2005-11-29 21:01:56 +00:00
|
|
|
spinlock_t *ptl;
|
|
|
|
|
|
|
|
retval = -EINVAL;
|
2005-11-30 17:35:19 +00:00
|
|
|
if (PageAnon(page))
|
2005-11-29 21:01:56 +00:00
|
|
|
goto out;
|
|
|
|
retval = -ENOMEM;
|
|
|
|
flush_dcache_page(page);
|
2005-11-29 22:03:14 +00:00
|
|
|
pte = get_locked_pte(mm, addr, &ptl);
|
2005-11-29 21:01:56 +00:00
|
|
|
if (!pte)
|
|
|
|
goto out;
|
|
|
|
retval = -EBUSY;
|
|
|
|
if (!pte_none(*pte))
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
/* Ok, finally just insert the thing.. */
|
|
|
|
get_page(page);
|
|
|
|
inc_mm_counter(mm, file_rss);
|
|
|
|
page_add_file_rmap(page);
|
|
|
|
set_pte_at(mm, addr, pte, mk_pte(page, prot));
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
out_unlock:
|
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2006-09-26 06:31:22 +00:00
|
|
|
/**
|
|
|
|
* vm_insert_page - insert single page into user vma
|
|
|
|
* @vma: user vma to map to
|
|
|
|
* @addr: target user address of this page
|
|
|
|
* @page: source kernel page
|
|
|
|
*
|
2005-11-30 17:35:19 +00:00
|
|
|
* This allows drivers to insert individual pages they've allocated
|
|
|
|
* into a user vma.
|
|
|
|
*
|
|
|
|
* The page has to be a nice clean _individual_ kernel allocation.
|
|
|
|
* If you allocate a compound page, you need to have marked it as
|
|
|
|
* such (__GFP_COMP), or manually just split the page up yourself
|
2006-03-22 08:08:05 +00:00
|
|
|
* (see split_page()).
|
2005-11-30 17:35:19 +00:00
|
|
|
*
|
|
|
|
* NOTE! Traditionally this was done with "remap_pfn_range()" which
|
|
|
|
* took an arbitrary page protection parameter. This doesn't allow
|
|
|
|
* that. Your vma protection will have to be set up correctly, which
|
|
|
|
* means that if you want a shared writable mapping, you'd better
|
|
|
|
* ask for a shared writable mapping!
|
|
|
|
*
|
|
|
|
* The page does not need to be reserved.
|
|
|
|
*/
|
|
|
|
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
|
|
|
|
{
|
|
|
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
|
|
|
return -EFAULT;
|
|
|
|
if (!page_count(page))
|
|
|
|
return -EINVAL;
|
2005-12-16 18:21:23 +00:00
|
|
|
vma->vm_flags |= VM_INSERTPAGE;
|
2005-11-30 17:35:19 +00:00
|
|
|
return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
|
|
|
|
}
|
2005-12-04 04:48:11 +00:00
|
|
|
EXPORT_SYMBOL(vm_insert_page);
|
2005-11-30 17:35:19 +00:00
|
|
|
|
2007-02-12 08:51:36 +00:00
|
|
|
/**
|
|
|
|
* vm_insert_pfn - insert single pfn into user vma
|
|
|
|
* @vma: user vma to map to
|
|
|
|
* @addr: target user address of this page
|
|
|
|
* @pfn: source kernel pfn
|
|
|
|
*
|
|
|
|
* Similar to vm_inert_page, this allows drivers to insert individual pages
|
|
|
|
* they've allocated into a user vma. Same comments apply.
|
|
|
|
*
|
|
|
|
* This function should only be called from a vm_ops->fault handler, and
|
|
|
|
* in that case the handler should return NULL.
|
|
|
|
*/
|
|
|
|
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|
|
|
unsigned long pfn)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
int retval;
|
|
|
|
pte_t *pte, entry;
|
|
|
|
spinlock_t *ptl;
|
|
|
|
|
|
|
|
BUG_ON(!(vma->vm_flags & VM_PFNMAP));
|
|
|
|
BUG_ON(is_cow_mapping(vma->vm_flags));
|
|
|
|
|
|
|
|
retval = -ENOMEM;
|
|
|
|
pte = get_locked_pte(mm, addr, &ptl);
|
|
|
|
if (!pte)
|
|
|
|
goto out;
|
|
|
|
retval = -EBUSY;
|
|
|
|
if (!pte_none(*pte))
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
/* Ok, finally just insert the thing.. */
|
|
|
|
entry = pfn_pte(pfn, vma->vm_page_prot);
|
|
|
|
set_pte_at(mm, addr, pte, entry);
|
|
|
|
update_mmu_cache(vma, addr, entry);
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
out_unlock:
|
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(vm_insert_pfn);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* maps a range of physical memory into the requested pages. the old
|
|
|
|
* mappings are removed. any references to nonexistent pages results
|
|
|
|
* in null mappings (currently treated as "copy-on-access")
|
|
|
|
*/
|
|
|
|
static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|
|
|
unsigned long addr, unsigned long end,
|
|
|
|
unsigned long pfn, pgprot_t prot)
|
|
|
|
{
|
|
|
|
pte_t *pte;
|
2005-10-30 01:16:23 +00:00
|
|
|
spinlock_t *ptl;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-30 01:16:23 +00:00
|
|
|
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!pte)
|
|
|
|
return -ENOMEM;
|
2006-10-01 06:29:33 +00:00
|
|
|
arch_enter_lazy_mmu_mode();
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
|
|
|
BUG_ON(!pte_none(*pte));
|
2005-10-30 01:16:12 +00:00
|
|
|
set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
|
2005-04-16 22:20:36 +00:00
|
|
|
pfn++;
|
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
2006-10-01 06:29:33 +00:00
|
|
|
arch_leave_lazy_mmu_mode();
|
2005-10-30 01:16:23 +00:00
|
|
|
pte_unmap_unlock(pte - 1, ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
|
|
unsigned long addr, unsigned long end,
|
|
|
|
unsigned long pfn, pgprot_t prot)
|
|
|
|
{
|
|
|
|
pmd_t *pmd;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
pfn -= addr >> PAGE_SHIFT;
|
|
|
|
pmd = pmd_alloc(mm, pud, addr);
|
|
|
|
if (!pmd)
|
|
|
|
return -ENOMEM;
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
if (remap_pte_range(mm, pmd, addr, next,
|
|
|
|
pfn + (addr >> PAGE_SHIFT), prot))
|
|
|
|
return -ENOMEM;
|
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|
|
|
unsigned long addr, unsigned long end,
|
|
|
|
unsigned long pfn, pgprot_t prot)
|
|
|
|
{
|
|
|
|
pud_t *pud;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
pfn -= addr >> PAGE_SHIFT;
|
|
|
|
pud = pud_alloc(mm, pgd, addr);
|
|
|
|
if (!pud)
|
|
|
|
return -ENOMEM;
|
|
|
|
do {
|
|
|
|
next = pud_addr_end(addr, end);
|
|
|
|
if (remap_pmd_range(mm, pud, addr, next,
|
|
|
|
pfn + (addr >> PAGE_SHIFT), prot))
|
|
|
|
return -ENOMEM;
|
|
|
|
} while (pud++, addr = next, addr != end);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-09-26 06:31:22 +00:00
|
|
|
/**
|
|
|
|
* remap_pfn_range - remap kernel memory to userspace
|
|
|
|
* @vma: user vma to map to
|
|
|
|
* @addr: target user address to start at
|
|
|
|
* @pfn: physical address of kernel memory
|
|
|
|
* @size: size of map area
|
|
|
|
* @prot: page protection flags for this mapping
|
|
|
|
*
|
|
|
|
* Note: this is only safe if the mm semaphore is held when called.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
|
|
|
unsigned long pfn, unsigned long size, pgprot_t prot)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
unsigned long next;
|
2005-06-25 21:54:33 +00:00
|
|
|
unsigned long end = addr + PAGE_ALIGN(size);
|
2005-04-16 22:20:36 +00:00
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Physically remapped pages are special. Tell the
|
|
|
|
* rest of the world about it:
|
|
|
|
* VM_IO tells people not to look at these pages
|
|
|
|
* (accesses can have side effects).
|
[PATCH] unpaged: VM_UNPAGED
Although we tend to associate VM_RESERVED with remap_pfn_range, quite a few
drivers set VM_RESERVED on areas which are then populated by nopage. The
PageReserved removal in 2.6.15-rc1 changed VM_RESERVED not to free pages in
zap_pte_range, without changing those drivers not to set it: so their pages
just leak away.
Let's not change miscellaneous drivers now: introduce VM_UNPAGED at the core,
to flag the special areas where the ptes may have no struct page, or if they
have then it's not to be touched. Replace most instances of VM_RESERVED in
core mm by VM_UNPAGED. Force it on in remap_pfn_range, and the sparc and
sparc64 io_remap_pfn_range.
Revert addition of VM_RESERVED to powerpc vdso, it's not needed there. Is it
needed anywhere? It still governs the mm->reserved_vm statistic, and special
vmas not to be merged, and areas not to be core dumped; but could probably be
eliminated later (the drivers are probably specifying it because in 2.4 it
kept swapout off the vma, but in 2.6 we work from the LRU, which these pages
don't get on).
Use the VM_SHM slot for VM_UNPAGED, and define VM_SHM to 0: it serves no
purpose whatsoever, and should be removed from drivers when we clean up.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: William Irwin <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-22 05:32:15 +00:00
|
|
|
* VM_RESERVED is specified all over the place, because
|
|
|
|
* in 2.4 it kept swapout's vma scan off this vma; but
|
|
|
|
* in 2.6 the LRU scan won't even find its pages, so this
|
|
|
|
* flag means no more than count its pages in reserved_vm,
|
|
|
|
* and omit it from core dump, even when VM_IO turned off.
|
2005-11-28 22:34:23 +00:00
|
|
|
* VM_PFNMAP tells the core MM that the base pages are just
|
|
|
|
* raw PFN mappings, and do not have a "struct page" associated
|
|
|
|
* with them.
|
2005-12-12 03:46:02 +00:00
|
|
|
*
|
|
|
|
* There's a horrible special case to handle copy-on-write
|
|
|
|
* behaviour that some programs depend on. We mark the "original"
|
|
|
|
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-12-12 04:38:17 +00:00
|
|
|
if (is_cow_mapping(vma->vm_flags)) {
|
2005-12-12 03:46:02 +00:00
|
|
|
if (addr != vma->vm_start || end != vma->vm_end)
|
2005-12-12 03:57:52 +00:00
|
|
|
return -EINVAL;
|
2005-12-12 03:46:02 +00:00
|
|
|
vma->vm_pgoff = pfn;
|
|
|
|
}
|
|
|
|
|
2005-11-28 22:34:23 +00:00
|
|
|
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
BUG_ON(addr >= end);
|
|
|
|
pfn -= addr >> PAGE_SHIFT;
|
|
|
|
pgd = pgd_offset(mm, addr);
|
|
|
|
flush_cache_range(vma, addr, end);
|
|
|
|
do {
|
|
|
|
next = pgd_addr_end(addr, end);
|
|
|
|
err = remap_pud_range(mm, pgd, addr, next,
|
|
|
|
pfn + (addr >> PAGE_SHIFT), prot);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(remap_pfn_range);
|
|
|
|
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
/*
|
|
|
|
* handle_pte_fault chooses page fault handler according to an entry
|
|
|
|
* which was read non-atomically. Before making any commitment, on
|
|
|
|
* those architectures or configurations (e.g. i386 with PAE) which
|
|
|
|
* might give a mix of unmatched parts, do_swap_page and do_file_page
|
|
|
|
* must check under lock before unmapping the pte and proceeding
|
|
|
|
* (but do_wp_page is only called after already making such a check;
|
|
|
|
* and do_anonymous_page and do_no_page can safely check later on).
|
|
|
|
*/
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
pte_t *page_table, pte_t orig_pte)
|
|
|
|
{
|
|
|
|
int same = 1;
|
|
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
|
|
|
|
if (sizeof(pte_t) > sizeof(unsigned long)) {
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
spinlock_t *ptl = pte_lockptr(mm, pmd);
|
|
|
|
spin_lock(ptl);
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
same = pte_same(*page_table, orig_pte);
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
spin_unlock(ptl);
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
pte_unmap(page_table);
|
|
|
|
return same;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
|
|
|
|
* servicing faults for write access. In the normal case, do always want
|
|
|
|
* pte_mkwrite. But get_user_pages can cause write faults for mappings
|
|
|
|
* that do not have writing enabled, when used by access_process_vm.
|
|
|
|
*/
|
|
|
|
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
if (likely(vma->vm_flags & VM_WRITE))
|
|
|
|
pte = pte_mkwrite(pte);
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
2006-12-12 17:14:55 +00:00
|
|
|
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
|
2005-11-28 22:34:23 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If the source page was a PFN mapping, we don't have
|
|
|
|
* a "struct page" for it. We do a best-effort copy by
|
|
|
|
* just copying from the original user address. If that
|
|
|
|
* fails, we just zero-fill it. Live with it.
|
|
|
|
*/
|
|
|
|
if (unlikely(!src)) {
|
|
|
|
void *kaddr = kmap_atomic(dst, KM_USER0);
|
2005-11-29 22:07:55 +00:00
|
|
|
void __user *uaddr = (void __user *)(va & PAGE_MASK);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This really shouldn't fail, because the page is there
|
|
|
|
* in the page tables. But it might just be unreadable,
|
|
|
|
* in which case we just give up and fill the result with
|
|
|
|
* zeroes.
|
|
|
|
*/
|
|
|
|
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
|
2005-11-28 22:34:23 +00:00
|
|
|
memset(kaddr, 0, PAGE_SIZE);
|
|
|
|
kunmap_atomic(kaddr, KM_USER0);
|
[PATCH] mm: D-cache aliasing issue in cow_user_page
--=-=-=
from mm/memory.c:
1434 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va)
1435 {
1436 /*
1437 * If the source page was a PFN mapping, we don't have
1438 * a "struct page" for it. We do a best-effort copy by
1439 * just copying from the original user address. If that
1440 * fails, we just zero-fill it. Live with it.
1441 */
1442 if (unlikely(!src)) {
1443 void *kaddr = kmap_atomic(dst, KM_USER0);
1444 void __user *uaddr = (void __user *)(va & PAGE_MASK);
1445
1446 /*
1447 * This really shouldn't fail, because the page is there
1448 * in the page tables. But it might just be unreadable,
1449 * in which case we just give up and fill the result with
1450 * zeroes.
1451 */
1452 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
1453 memset(kaddr, 0, PAGE_SIZE);
1454 kunmap_atomic(kaddr, KM_USER0);
#### D-cache have to be flushed here.
#### It seems it is just forgotten.
1455 return;
1456
1457 }
1458 copy_user_highpage(dst, src, va);
#### Ok here. flush_dcache_page() called from this func if arch need it
1459 }
Following is the patch fix this issue:
Signed-off-by: Dmitriy Monakhov <dmonakhov@openvz.org>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-20 06:29:08 +00:00
|
|
|
flush_dcache_page(dst);
|
2005-11-28 22:34:23 +00:00
|
|
|
return;
|
2006-12-12 17:14:55 +00:00
|
|
|
|
2005-11-28 22:34:23 +00:00
|
|
|
}
|
2006-12-12 17:14:55 +00:00
|
|
|
copy_user_highpage(dst, src, va, vma);
|
2005-11-28 22:34:23 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* This routine handles present pages, when users try to write
|
|
|
|
* to a shared page. It is done by copying the page to a new address
|
|
|
|
* and decrementing the shared-page counter for the old page.
|
|
|
|
*
|
|
|
|
* Note that this routine assumes that the protection checks have been
|
|
|
|
* done by the caller (the low-level page fault routine in most cases).
|
|
|
|
* Thus we can safely just mark it writable once we've done any necessary
|
|
|
|
* COW.
|
|
|
|
*
|
|
|
|
* We also mark the page dirty at this point even though the page will
|
|
|
|
* change only once the write actually happens. This avoids a few races,
|
|
|
|
* and potentially makes it more efficient.
|
|
|
|
*
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
|
|
* but allow concurrent faults), with pte both mapped and locked.
|
|
|
|
* We return with mmap_sem still held, but pte unmapped and unlocked.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
spinlock_t *ptl, pte_t orig_pte)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-11-29 16:54:51 +00:00
|
|
|
struct page *old_page, *new_page;
|
2005-04-16 22:20:36 +00:00
|
|
|
pte_t entry;
|
2006-09-26 06:30:57 +00:00
|
|
|
int reuse = 0, ret = VM_FAULT_MINOR;
|
|
|
|
struct page *dirty_page = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-11-28 22:34:23 +00:00
|
|
|
old_page = vm_normal_page(vma, address, orig_pte);
|
|
|
|
if (!old_page)
|
|
|
|
goto gotten;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-09-26 06:30:57 +00:00
|
|
|
/*
|
2006-09-26 06:31:00 +00:00
|
|
|
* Take out anonymous pages first, anonymous shared vmas are
|
|
|
|
* not dirty accountable.
|
2006-09-26 06:30:57 +00:00
|
|
|
*/
|
2006-09-26 06:31:00 +00:00
|
|
|
if (PageAnon(old_page)) {
|
|
|
|
if (!TestSetPageLocked(old_page)) {
|
|
|
|
reuse = can_share_swap_page(old_page);
|
|
|
|
unlock_page(old_page);
|
|
|
|
}
|
|
|
|
} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
|
2006-09-26 06:30:57 +00:00
|
|
|
(VM_WRITE|VM_SHARED))) {
|
2006-09-26 06:31:00 +00:00
|
|
|
/*
|
|
|
|
* Only catch write-faults on shared writable pages,
|
|
|
|
* read-only shared pages can get COWed by
|
|
|
|
* get_user_pages(.write=1, .force=1).
|
|
|
|
*/
|
2006-06-23 09:03:43 +00:00
|
|
|
if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
|
|
|
|
/*
|
|
|
|
* Notify the address space that the page is about to
|
|
|
|
* become writable so that it can prohibit this or wait
|
|
|
|
* for the page to get into an appropriate state.
|
|
|
|
*
|
|
|
|
* We do this without the lock held, so that it can
|
|
|
|
* sleep if it needs to.
|
|
|
|
*/
|
|
|
|
page_cache_get(old_page);
|
|
|
|
pte_unmap_unlock(page_table, ptl);
|
|
|
|
|
|
|
|
if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
|
|
|
|
goto unwritable_page;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since we dropped the lock we need to revalidate
|
|
|
|
* the PTE as someone else may have changed it. If
|
|
|
|
* they did, we just return, as we can count on the
|
|
|
|
* MMU to tell us if they didn't also make it writable.
|
|
|
|
*/
|
|
|
|
page_table = pte_offset_map_lock(mm, pmd, address,
|
|
|
|
&ptl);
|
2007-02-10 09:43:00 +00:00
|
|
|
page_cache_release(old_page);
|
2006-06-23 09:03:43 +00:00
|
|
|
if (!pte_same(*page_table, orig_pte))
|
|
|
|
goto unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-09-26 06:30:57 +00:00
|
|
|
dirty_page = old_page;
|
|
|
|
get_page(dirty_page);
|
2006-06-23 09:03:43 +00:00
|
|
|
reuse = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reuse) {
|
|
|
|
flush_cache_page(vma, address, pte_pfn(orig_pte));
|
|
|
|
entry = pte_mkyoung(orig_pte);
|
|
|
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
|
|
ptep_set_access_flags(vma, address, page_table, entry, 1);
|
|
|
|
update_mmu_cache(vma, address, entry);
|
|
|
|
lazy_mmu_prot_update(entry);
|
|
|
|
ret |= VM_FAULT_WRITE;
|
|
|
|
goto unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, we need to copy. Oh, well..
|
|
|
|
*/
|
2005-10-30 01:16:12 +00:00
|
|
|
page_cache_get(old_page);
|
2005-11-22 05:32:17 +00:00
|
|
|
gotten:
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
pte_unmap_unlock(page_table, ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (unlikely(anon_vma_prepare(vma)))
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
goto oom;
|
2005-11-29 16:54:51 +00:00
|
|
|
if (old_page == ZERO_PAGE(address)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
new_page = alloc_zeroed_user_highpage(vma, address);
|
|
|
|
if (!new_page)
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
goto oom;
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
|
|
|
new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
|
|
|
|
if (!new_page)
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
goto oom;
|
2006-12-12 17:14:55 +00:00
|
|
|
cow_user_page(new_page, old_page, address, vma);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Re-check the pte - we dropped the lock
|
|
|
|
*/
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
if (likely(pte_same(*page_table, orig_pte))) {
|
2005-11-22 05:32:17 +00:00
|
|
|
if (old_page) {
|
2006-12-22 09:09:33 +00:00
|
|
|
page_remove_rmap(old_page, vma);
|
2005-11-22 05:32:17 +00:00
|
|
|
if (!PageAnon(old_page)) {
|
|
|
|
dec_mm_counter(mm, file_rss);
|
|
|
|
inc_mm_counter(mm, anon_rss);
|
|
|
|
}
|
|
|
|
} else
|
2005-10-30 01:16:05 +00:00
|
|
|
inc_mm_counter(mm, anon_rss);
|
2005-11-29 19:45:26 +00:00
|
|
|
flush_cache_page(vma, address, pte_pfn(orig_pte));
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
entry = mk_pte(new_page, vma->vm_page_prot);
|
|
|
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
[PATCH] ia64: race flushing icache in COW path
There is a race condition that showed up in a threaded JIT environment.
The situation is that a process with a JIT code page forks, so the page is
marked read-only, then some threads are created in the child. One of the
threads attempts to add a new code block to the JIT page, so a
copy-on-write fault is taken, and the kernel allocates a new page, copies
the data, installs the new pte, and then calls lazy_mmu_prot_update() to
flush caches to make sure that the icache and dcache are in sync.
Unfortunately, the other thread runs right after the new pte is installed,
but before the caches have been flushed. It tries to execute some old JIT
code that was already in this page, but it sees some garbage in the i-cache
from the previous users of the new physical page.
Fix: we must make the caches consistent before installing the pte. This is
an ia64 only fix because lazy_mmu_prot_update() is a no-op on all other
architectures.
Signed-off-by: Anil Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-07-14 07:23:57 +00:00
|
|
|
lazy_mmu_prot_update(entry);
|
[PATCH] mm: fix a race condition under SMC + COW
Failing context is a multi threaded process context and the failing
sequence is as follows.
One thread T0 doing self modifying code on page X on processor P0 and
another thread T1 doing COW (breaking the COW setup as part of just
happened fork() in another thread T2) on the same page X on processor P1.
T0 doing SMC can endup modifying the new page Y (allocated by the T1 doing
COW on P1) but because of different I/D TLB's, P0 ITLB will not see the new
mapping till the flush TLB IPI from P1 is received. During this interval,
if T0 executes the code created by SMC it can result in an app error (as
ITLB still points to old page X and endup executing the content in page X
rather than using the content in page Y).
Fix this issue by first clearing the PTE and flushing it, before updating
it with new entry.
Hugh sayeth:
I was a bit sceptical, in the habit of thinking that Self Modifying Code
must look such issues itself: but I guess there's nothing it can do to avoid
this one.
Fair enough, what you're changing it to is pretty much what powerpc and
s390 were already doing, and is a more robust way of proceeding, consistent
with how ptes are set everywhere else.
The ptep_clear_flush is a bit heavy-handed (it's anxious to return the pte
that was atomically cleared), but we'd have to wander through lots of arches
to get the right minimal behaviour. It'd also be nice to eliminate
ptep_establish completely, now only used to define other macros/inlines: it
always seemed obfuscation to me, what you've got there now is clearer.
Let's put those cleanups on a TODO list.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Acked-by: "David S. Miller" <davem@davemloft.net>
Acked-by: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-29 08:58:42 +00:00
|
|
|
/*
|
|
|
|
* Clear the pte entry and flush it first, before updating the
|
|
|
|
* pte with the new entry. This will avoid a race condition
|
|
|
|
* seen in the presence of one thread doing SMC and another
|
|
|
|
* thread doing COW.
|
|
|
|
*/
|
|
|
|
ptep_clear_flush(vma, address, page_table);
|
|
|
|
set_pte_at(mm, address, page_table, entry);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
update_mmu_cache(vma, address, entry);
|
2005-04-16 22:20:36 +00:00
|
|
|
lru_cache_add_active(new_page);
|
2006-01-06 08:11:12 +00:00
|
|
|
page_add_new_anon_rmap(new_page, vma, address);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Free the old page.. */
|
|
|
|
new_page = old_page;
|
[PATCH] fix get_user_pages bug
Checking pte_dirty instead of pte_write in __follow_page is problematic
for s390, and for copy_one_pte which leaves dirty when clearing write.
So revert __follow_page to check pte_write as before, and make
do_wp_page pass back a special extra VM_FAULT_WRITE bit to say it has
done its full job: once get_user_pages receives this value, it no longer
requires pte_write in __follow_page.
But most callers of handle_mm_fault, in the various architectures, have
switch statements which do not expect this new case. To avoid changing
them all in a hurry, make an inline wrapper function (using the old
name) that masks off the new bit, and use the extended interface with
double underscores.
Yes, we do have a call to do_wp_page from do_swap_page, but no need to
change that: in rare case it's needed, another do_wp_page will follow.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
[ Cleanups by Nick Piggin ]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-08-03 10:24:01 +00:00
|
|
|
ret |= VM_FAULT_WRITE;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-11-22 05:32:17 +00:00
|
|
|
if (new_page)
|
|
|
|
page_cache_release(new_page);
|
|
|
|
if (old_page)
|
|
|
|
page_cache_release(old_page);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
unlock:
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
pte_unmap_unlock(page_table, ptl);
|
2006-09-26 06:30:57 +00:00
|
|
|
if (dirty_page) {
|
2006-09-26 06:30:58 +00:00
|
|
|
set_page_dirty_balance(dirty_page);
|
2006-09-26 06:30:57 +00:00
|
|
|
put_page(dirty_page);
|
|
|
|
}
|
[PATCH] fix get_user_pages bug
Checking pte_dirty instead of pte_write in __follow_page is problematic
for s390, and for copy_one_pte which leaves dirty when clearing write.
So revert __follow_page to check pte_write as before, and make
do_wp_page pass back a special extra VM_FAULT_WRITE bit to say it has
done its full job: once get_user_pages receives this value, it no longer
requires pte_write in __follow_page.
But most callers of handle_mm_fault, in the various architectures, have
switch statements which do not expect this new case. To avoid changing
them all in a hurry, make an inline wrapper function (using the old
name) that masks off the new bit, and use the extended interface with
double underscores.
Yes, we do have a call to do_wp_page from do_swap_page, but no need to
change that: in rare case it's needed, another do_wp_page will follow.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
[ Cleanups by Nick Piggin ]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-08-03 10:24:01 +00:00
|
|
|
return ret;
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
oom:
|
2005-11-22 05:32:17 +00:00
|
|
|
if (old_page)
|
|
|
|
page_cache_release(old_page);
|
2005-04-16 22:20:36 +00:00
|
|
|
return VM_FAULT_OOM;
|
2006-06-23 09:03:43 +00:00
|
|
|
|
|
|
|
unwritable_page:
|
|
|
|
page_cache_release(old_page);
|
|
|
|
return VM_FAULT_SIGBUS;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Helper functions for unmap_mapping_range().
|
|
|
|
*
|
|
|
|
* __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
|
|
|
|
*
|
|
|
|
* We have to restart searching the prio_tree whenever we drop the lock,
|
|
|
|
* since the iterator is only valid while the lock is held, and anyway
|
|
|
|
* a later vma might be split and reinserted earlier while lock dropped.
|
|
|
|
*
|
|
|
|
* The list of nonlinear vmas could be handled more efficiently, using
|
|
|
|
* a placeholder, but handle it in the same way until a need is shown.
|
|
|
|
* It is important to search the prio_tree before nonlinear list: a vma
|
|
|
|
* may become nonlinear and be shifted from prio_tree to nonlinear list
|
|
|
|
* while the lock is dropped; but never shifted from list to prio_tree.
|
|
|
|
*
|
|
|
|
* In order to make forward progress despite restarting the search,
|
|
|
|
* vm_truncate_count is used to mark a vma as now dealt with, so we can
|
|
|
|
* quickly skip it next time around. Since the prio_tree search only
|
|
|
|
* shows us those vmas affected by unmapping the range in question, we
|
|
|
|
* can't efficiently keep all vmas in step with mapping->truncate_count:
|
|
|
|
* so instead reset them all whenever it wraps back to 0 (then go to 1).
|
|
|
|
* mapping->truncate_count and vma->vm_truncate_count are protected by
|
|
|
|
* i_mmap_lock.
|
|
|
|
*
|
|
|
|
* In order to make forward progress despite repeatedly restarting some
|
2005-04-19 20:29:15 +00:00
|
|
|
* large vma, note the restart_addr from unmap_vmas when it breaks out:
|
2005-04-16 22:20:36 +00:00
|
|
|
* and restart from that address when we reach that vma again. It might
|
|
|
|
* have been split or merged, shrunk or extended, but never shifted: so
|
|
|
|
* restart_addr remains valid so long as it remains in the vma's range.
|
|
|
|
* unmap_mapping_range forces truncate_count to leap over page-aligned
|
|
|
|
* values so we can save vma's restart_addr in its truncate_count field.
|
|
|
|
*/
|
|
|
|
#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
|
|
|
|
|
|
|
|
static void reset_vma_truncate_counts(struct address_space *mapping)
|
|
|
|
{
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
struct prio_tree_iter iter;
|
|
|
|
|
|
|
|
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
|
|
|
|
vma->vm_truncate_count = 0;
|
|
|
|
list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
|
|
|
|
vma->vm_truncate_count = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int unmap_mapping_range_vma(struct vm_area_struct *vma,
|
|
|
|
unsigned long start_addr, unsigned long end_addr,
|
|
|
|
struct zap_details *details)
|
|
|
|
{
|
|
|
|
unsigned long restart_addr;
|
|
|
|
int need_break;
|
|
|
|
|
|
|
|
again:
|
|
|
|
restart_addr = vma->vm_truncate_count;
|
|
|
|
if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
|
|
|
|
start_addr = restart_addr;
|
|
|
|
if (start_addr >= end_addr) {
|
|
|
|
/* Top of vma has been split off since last time */
|
|
|
|
vma->vm_truncate_count = details->truncate_count;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-19 20:29:15 +00:00
|
|
|
restart_addr = zap_page_range(vma, start_addr,
|
|
|
|
end_addr - start_addr, details);
|
2005-04-16 22:20:36 +00:00
|
|
|
need_break = need_resched() ||
|
|
|
|
need_lockbreak(details->i_mmap_lock);
|
|
|
|
|
2005-04-19 20:29:15 +00:00
|
|
|
if (restart_addr >= end_addr) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/* We have now completed this vma: mark it so */
|
|
|
|
vma->vm_truncate_count = details->truncate_count;
|
|
|
|
if (!need_break)
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
/* Note restart_addr in vma's truncate_count field */
|
2005-04-19 20:29:15 +00:00
|
|
|
vma->vm_truncate_count = restart_addr;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!need_break)
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(details->i_mmap_lock);
|
|
|
|
cond_resched();
|
|
|
|
spin_lock(details->i_mmap_lock);
|
|
|
|
return -EINTR;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
|
|
|
|
struct zap_details *details)
|
|
|
|
{
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
struct prio_tree_iter iter;
|
|
|
|
pgoff_t vba, vea, zba, zea;
|
|
|
|
|
|
|
|
restart:
|
|
|
|
vma_prio_tree_foreach(vma, &iter, root,
|
|
|
|
details->first_index, details->last_index) {
|
|
|
|
/* Skip quickly over those we have already dealt with */
|
|
|
|
if (vma->vm_truncate_count == details->truncate_count)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
vba = vma->vm_pgoff;
|
|
|
|
vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
|
|
|
|
/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
|
|
|
|
zba = details->first_index;
|
|
|
|
if (zba < vba)
|
|
|
|
zba = vba;
|
|
|
|
zea = details->last_index;
|
|
|
|
if (zea > vea)
|
|
|
|
zea = vea;
|
|
|
|
|
|
|
|
if (unmap_mapping_range_vma(vma,
|
|
|
|
((zba - vba) << PAGE_SHIFT) + vma->vm_start,
|
|
|
|
((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
|
|
|
|
details) < 0)
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void unmap_mapping_range_list(struct list_head *head,
|
|
|
|
struct zap_details *details)
|
|
|
|
{
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In nonlinear VMAs there is no correspondence between virtual address
|
|
|
|
* offset and file offset. So we must perform an exhaustive search
|
|
|
|
* across *all* the pages in each nonlinear VMA, not just the pages
|
|
|
|
* whose virtual address lies outside the file truncation point.
|
|
|
|
*/
|
|
|
|
restart:
|
|
|
|
list_for_each_entry(vma, head, shared.vm_set.list) {
|
|
|
|
/* Skip quickly over those we have already dealt with */
|
|
|
|
if (vma->vm_truncate_count == details->truncate_count)
|
|
|
|
continue;
|
|
|
|
details->nonlinear_vma = vma;
|
|
|
|
if (unmap_mapping_range_vma(vma, vma->vm_start,
|
|
|
|
vma->vm_end, details) < 0)
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2007-02-10 09:45:59 +00:00
|
|
|
* unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
|
2005-06-24 05:05:21 +00:00
|
|
|
* @mapping: the address space containing mmaps to be unmapped.
|
2005-04-16 22:20:36 +00:00
|
|
|
* @holebegin: byte in first page to unmap, relative to the start of
|
|
|
|
* the underlying file. This will be rounded down to a PAGE_SIZE
|
|
|
|
* boundary. Note that this is different from vmtruncate(), which
|
|
|
|
* must keep the partial page. In contrast, we must get rid of
|
|
|
|
* partial pages.
|
|
|
|
* @holelen: size of prospective hole in bytes. This will be rounded
|
|
|
|
* up to a PAGE_SIZE boundary. A holelen of zero truncates to the
|
|
|
|
* end of the file.
|
|
|
|
* @even_cows: 1 when truncating a file, unmap even private COWed pages;
|
|
|
|
* but 0 when invalidating pagecache, don't throw away private data.
|
|
|
|
*/
|
|
|
|
void unmap_mapping_range(struct address_space *mapping,
|
|
|
|
loff_t const holebegin, loff_t const holelen, int even_cows)
|
|
|
|
{
|
|
|
|
struct zap_details details;
|
|
|
|
pgoff_t hba = holebegin >> PAGE_SHIFT;
|
|
|
|
pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
/* Check for overflow. */
|
|
|
|
if (sizeof(holelen) > sizeof(hlen)) {
|
|
|
|
long long holeend =
|
|
|
|
(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
if (holeend & ~(long long)ULONG_MAX)
|
|
|
|
hlen = ULONG_MAX - hba + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
details.check_mapping = even_cows? NULL: mapping;
|
|
|
|
details.nonlinear_vma = NULL;
|
|
|
|
details.first_index = hba;
|
|
|
|
details.last_index = hba + hlen - 1;
|
|
|
|
if (details.last_index < details.first_index)
|
|
|
|
details.last_index = ULONG_MAX;
|
|
|
|
details.i_mmap_lock = &mapping->i_mmap_lock;
|
|
|
|
|
|
|
|
spin_lock(&mapping->i_mmap_lock);
|
|
|
|
|
|
|
|
/* serialize i_size write against truncate_count write */
|
|
|
|
smp_wmb();
|
|
|
|
/* Protect against page faults, and endless unmapping loops */
|
|
|
|
mapping->truncate_count++;
|
|
|
|
/*
|
|
|
|
* For archs where spin_lock has inclusive semantics like ia64
|
|
|
|
* this smp_mb() will prevent to read pagetable contents
|
|
|
|
* before the truncate_count increment is visible to
|
|
|
|
* other cpus.
|
|
|
|
*/
|
|
|
|
smp_mb();
|
|
|
|
if (unlikely(is_restart_addr(mapping->truncate_count))) {
|
|
|
|
if (mapping->truncate_count == 0)
|
|
|
|
reset_vma_truncate_counts(mapping);
|
|
|
|
mapping->truncate_count++;
|
|
|
|
}
|
|
|
|
details.truncate_count = mapping->truncate_count;
|
|
|
|
|
|
|
|
if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
|
|
|
|
unmap_mapping_range_tree(&mapping->i_mmap, &details);
|
|
|
|
if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
|
|
|
|
unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
|
|
|
|
spin_unlock(&mapping->i_mmap_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(unmap_mapping_range);
|
|
|
|
|
2006-09-26 06:31:22 +00:00
|
|
|
/**
|
|
|
|
* vmtruncate - unmap mappings "freed" by truncate() syscall
|
|
|
|
* @inode: inode of the file used
|
|
|
|
* @offset: file offset to start truncating
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* NOTE! We have to be ready to update the memory sharing
|
|
|
|
* between the file and the memory map for a potential last
|
|
|
|
* incomplete page. Ugly, but necessary.
|
|
|
|
*/
|
|
|
|
int vmtruncate(struct inode * inode, loff_t offset)
|
|
|
|
{
|
|
|
|
struct address_space *mapping = inode->i_mapping;
|
|
|
|
unsigned long limit;
|
|
|
|
|
|
|
|
if (inode->i_size < offset)
|
|
|
|
goto do_expand;
|
|
|
|
/*
|
|
|
|
* truncation of in-use swapfiles is disallowed - it would cause
|
|
|
|
* subsequent swapout to scribble on the now-freed blocks.
|
|
|
|
*/
|
|
|
|
if (IS_SWAPFILE(inode))
|
|
|
|
goto out_busy;
|
|
|
|
i_size_write(inode, offset);
|
|
|
|
unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
|
|
|
|
truncate_inode_pages(mapping, offset);
|
|
|
|
goto out_truncate;
|
|
|
|
|
|
|
|
do_expand:
|
|
|
|
limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
|
|
|
|
if (limit != RLIM_INFINITY && offset > limit)
|
|
|
|
goto out_sig;
|
|
|
|
if (offset > inode->i_sb->s_maxbytes)
|
|
|
|
goto out_big;
|
|
|
|
i_size_write(inode, offset);
|
|
|
|
|
|
|
|
out_truncate:
|
|
|
|
if (inode->i_op && inode->i_op->truncate)
|
|
|
|
inode->i_op->truncate(inode);
|
|
|
|
return 0;
|
|
|
|
out_sig:
|
|
|
|
send_sig(SIGXFSZ, current, 0);
|
|
|
|
out_big:
|
|
|
|
return -EFBIG;
|
|
|
|
out_busy:
|
|
|
|
return -ETXTBSY;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(vmtruncate);
|
|
|
|
|
[PATCH] madvise(MADV_REMOVE): remove pages from tmpfs shm backing store
Here is the patch to implement madvise(MADV_REMOVE) - which frees up a
given range of pages & its associated backing store. Current
implementation supports only shmfs/tmpfs and other filesystems return
-ENOSYS.
"Some app allocates large tmpfs files, then when some task quits and some
client disconnect, some memory can be released. However the only way to
release tmpfs-swap is to MADV_REMOVE". - Andrea Arcangeli
Databases want to use this feature to drop a section of their bufferpool
(shared memory segments) - without writing back to disk/swap space.
This feature is also useful for supporting hot-plug memory on UML.
Concerns raised by Andrew Morton:
- "We have no plan for holepunching! If we _do_ have such a plan (or
might in the future) then what would the API look like? I think
sys_holepunch(fd, start, len), so we should start out with that."
- Using madvise is very weird, because people will ask "why do I need to
mmap my file before I can stick a hole in it?"
- None of the other madvise operations call into the filesystem in this
manner. A broad question is: is this capability an MM operation or a
filesytem operation? truncate, for example, is a filesystem operation
which sometimes has MM side-effects. madvise is an mm operation and with
this patch, it gains FS side-effects, only they're really, really
significant ones."
Comments:
- Andrea suggested the fs operation too but then it's more efficient to
have it as a mm operation with fs side effects, because they don't
immediatly know fd and physical offset of the range. It's possible to
fixup in userland and to use the fs operation but it's more expensive,
the vmas are already in the kernel and we can use them.
Short term plan & Future Direction:
- We seem to need this interface only for shmfs/tmpfs files in the short
term. We have to add hooks into the filesystem for correctness and
completeness. This is what this patch does.
- In the future, plan is to support both fs and mmap apis also. This
also involves (other) filesystem specific functions to be implemented.
- Current patch doesn't support VM_NONLINEAR - which can be addressed in
the future.
Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Andrea Arcangeli <andrea@suse.de>
Cc: Michael Kerrisk <mtk-manpages@gmx.net>
Cc: Ulrich Drepper <drepper@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-06 08:10:38 +00:00
|
|
|
int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
|
|
|
|
{
|
|
|
|
struct address_space *mapping = inode->i_mapping;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the underlying filesystem is not going to provide
|
|
|
|
* a way to truncate a range of blocks (punch a hole) -
|
|
|
|
* we should return failure right now.
|
|
|
|
*/
|
|
|
|
if (!inode->i_op || !inode->i_op->truncate_range)
|
|
|
|
return -ENOSYS;
|
|
|
|
|
2006-01-09 23:59:24 +00:00
|
|
|
mutex_lock(&inode->i_mutex);
|
[PATCH] madvise(MADV_REMOVE): remove pages from tmpfs shm backing store
Here is the patch to implement madvise(MADV_REMOVE) - which frees up a
given range of pages & its associated backing store. Current
implementation supports only shmfs/tmpfs and other filesystems return
-ENOSYS.
"Some app allocates large tmpfs files, then when some task quits and some
client disconnect, some memory can be released. However the only way to
release tmpfs-swap is to MADV_REMOVE". - Andrea Arcangeli
Databases want to use this feature to drop a section of their bufferpool
(shared memory segments) - without writing back to disk/swap space.
This feature is also useful for supporting hot-plug memory on UML.
Concerns raised by Andrew Morton:
- "We have no plan for holepunching! If we _do_ have such a plan (or
might in the future) then what would the API look like? I think
sys_holepunch(fd, start, len), so we should start out with that."
- Using madvise is very weird, because people will ask "why do I need to
mmap my file before I can stick a hole in it?"
- None of the other madvise operations call into the filesystem in this
manner. A broad question is: is this capability an MM operation or a
filesytem operation? truncate, for example, is a filesystem operation
which sometimes has MM side-effects. madvise is an mm operation and with
this patch, it gains FS side-effects, only they're really, really
significant ones."
Comments:
- Andrea suggested the fs operation too but then it's more efficient to
have it as a mm operation with fs side effects, because they don't
immediatly know fd and physical offset of the range. It's possible to
fixup in userland and to use the fs operation but it's more expensive,
the vmas are already in the kernel and we can use them.
Short term plan & Future Direction:
- We seem to need this interface only for shmfs/tmpfs files in the short
term. We have to add hooks into the filesystem for correctness and
completeness. This is what this patch does.
- In the future, plan is to support both fs and mmap apis also. This
also involves (other) filesystem specific functions to be implemented.
- Current patch doesn't support VM_NONLINEAR - which can be addressed in
the future.
Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Andrea Arcangeli <andrea@suse.de>
Cc: Michael Kerrisk <mtk-manpages@gmx.net>
Cc: Ulrich Drepper <drepper@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-06 08:10:38 +00:00
|
|
|
down_write(&inode->i_alloc_sem);
|
|
|
|
unmap_mapping_range(mapping, offset, (end - offset), 1);
|
|
|
|
truncate_inode_pages_range(mapping, offset, end);
|
|
|
|
inode->i_op->truncate_range(inode, offset, end);
|
|
|
|
up_write(&inode->i_alloc_sem);
|
2006-01-09 23:59:24 +00:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
[PATCH] madvise(MADV_REMOVE): remove pages from tmpfs shm backing store
Here is the patch to implement madvise(MADV_REMOVE) - which frees up a
given range of pages & its associated backing store. Current
implementation supports only shmfs/tmpfs and other filesystems return
-ENOSYS.
"Some app allocates large tmpfs files, then when some task quits and some
client disconnect, some memory can be released. However the only way to
release tmpfs-swap is to MADV_REMOVE". - Andrea Arcangeli
Databases want to use this feature to drop a section of their bufferpool
(shared memory segments) - without writing back to disk/swap space.
This feature is also useful for supporting hot-plug memory on UML.
Concerns raised by Andrew Morton:
- "We have no plan for holepunching! If we _do_ have such a plan (or
might in the future) then what would the API look like? I think
sys_holepunch(fd, start, len), so we should start out with that."
- Using madvise is very weird, because people will ask "why do I need to
mmap my file before I can stick a hole in it?"
- None of the other madvise operations call into the filesystem in this
manner. A broad question is: is this capability an MM operation or a
filesytem operation? truncate, for example, is a filesystem operation
which sometimes has MM side-effects. madvise is an mm operation and with
this patch, it gains FS side-effects, only they're really, really
significant ones."
Comments:
- Andrea suggested the fs operation too but then it's more efficient to
have it as a mm operation with fs side effects, because they don't
immediatly know fd and physical offset of the range. It's possible to
fixup in userland and to use the fs operation but it's more expensive,
the vmas are already in the kernel and we can use them.
Short term plan & Future Direction:
- We seem to need this interface only for shmfs/tmpfs files in the short
term. We have to add hooks into the filesystem for correctness and
completeness. This is what this patch does.
- In the future, plan is to support both fs and mmap apis also. This
also involves (other) filesystem specific functions to be implemented.
- Current patch doesn't support VM_NONLINEAR - which can be addressed in
the future.
Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Andrea Arcangeli <andrea@suse.de>
Cc: Michael Kerrisk <mtk-manpages@gmx.net>
Cc: Ulrich Drepper <drepper@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-06 08:10:38 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-09-26 06:31:22 +00:00
|
|
|
/**
|
|
|
|
* swapin_readahead - swap in pages in hope we need them soon
|
|
|
|
* @entry: swap entry of this memory
|
|
|
|
* @addr: address to start
|
|
|
|
* @vma: user vma this addresses belong to
|
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Primitive swap readahead code. We simply read an aligned block of
|
|
|
|
* (1 << page_cluster) entries in the swap area. This method is chosen
|
|
|
|
* because it doesn't cost us any seek time. We also make sure to queue
|
2006-09-26 06:31:22 +00:00
|
|
|
* the 'original' request together with the readahead ones...
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* This has been extended to use the NUMA policies from the mm triggering
|
|
|
|
* the readahead.
|
|
|
|
*
|
|
|
|
* Caller must hold down_read on the vma->vm_mm if vma is not NULL.
|
|
|
|
*/
|
|
|
|
void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
|
|
|
|
#endif
|
|
|
|
int i, num;
|
|
|
|
struct page *new_page;
|
|
|
|
unsigned long offset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the number of handles we should do readahead io to.
|
|
|
|
*/
|
|
|
|
num = valid_swaphandles(entry, &offset);
|
|
|
|
for (i = 0; i < num; offset++, i++) {
|
|
|
|
/* Ok, do the async read-ahead now */
|
|
|
|
new_page = read_swap_cache_async(swp_entry(swp_type(entry),
|
|
|
|
offset), vma, addr);
|
|
|
|
if (!new_page)
|
|
|
|
break;
|
|
|
|
page_cache_release(new_page);
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
/*
|
|
|
|
* Find the next applicable VMA for the NUMA policy.
|
|
|
|
*/
|
|
|
|
addr += PAGE_SIZE;
|
|
|
|
if (addr == 0)
|
|
|
|
vma = NULL;
|
|
|
|
if (vma) {
|
|
|
|
if (addr >= vma->vm_end) {
|
|
|
|
vma = next_vma;
|
|
|
|
next_vma = vma ? vma->vm_next : NULL;
|
|
|
|
}
|
|
|
|
if (vma && addr < vma->vm_start)
|
|
|
|
vma = NULL;
|
|
|
|
} else {
|
|
|
|
if (next_vma && addr >= next_vma->vm_start) {
|
|
|
|
vma = next_vma;
|
|
|
|
next_vma = vma->vm_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
lru_add_drain(); /* Push any new pages onto the LRU now */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
|
|
* but allow concurrent faults), and pte mapped but not yet locked.
|
|
|
|
* We return with mmap_sem still held, but pte unmapped and unlocked.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
|
|
int write_access, pte_t orig_pte)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
spinlock_t *ptl;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct page *page;
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
swp_entry_t entry;
|
2005-04-16 22:20:36 +00:00
|
|
|
pte_t pte;
|
|
|
|
int ret = VM_FAULT_MINOR;
|
|
|
|
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
goto out;
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
|
|
|
|
entry = pte_to_swp_entry(orig_pte);
|
[PATCH] Swapless page migration: add R/W migration entries
Implement read/write migration ptes
We take the upper two swapfiles for the two types of migration ptes and define
a series of macros in swapops.h.
The VM is modified to handle the migration entries. migration entries can
only be encountered when the page they are pointing to is locked. This limits
the number of places one has to fix. We also check in copy_pte_range and in
mprotect_pte_range() for migration ptes.
We check for migration ptes in do_swap_cache and call a function that will
then wait on the page lock. This allows us to effectively stop all accesses
to apge.
Migration entries are created by try_to_unmap if called for migration and
removed by local functions in migrate.c
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration (I've no NUMA, just
hacking it up to migrate recklessly while running load), I've hit the
BUG_ON(!PageLocked(p)) in migration_entry_to_page.
This comes from an orphaned migration entry, unrelated to the current
correctly locked migration, but hit by remove_anon_migration_ptes as it
checks an address in each vma of the anon_vma list.
Such an orphan may be left behind if an earlier migration raced with fork:
copy_one_pte can duplicate a migration entry from parent to child, after
remove_anon_migration_ptes has checked the child vma, but before it has
removed it from the parent vma. (If the process were later to fault on this
orphaned entry, it would hit the same BUG from migration_entry_wait.)
This could be fixed by locking anon_vma in copy_one_pte, but we'd rather
not. There's no such problem with file pages, because vma_prio_tree_add
adds child vma after parent vma, and the page table locking at each end is
enough to serialize. Follow that example with anon_vma: add new vmas to the
tail instead of the head.
(There's no corresponding problem when inserting migration entries,
because a missed pte will leave the page count and mapcount high, which is
allowed for. And there's no corresponding problem when migrating via swap,
because a leftover swap entry will be correctly faulted. But the swapless
method has no refcounting of its entries.)
From: Ingo Molnar <mingo@elte.hu>
pte_unmap_unlock() takes the pte pointer as an argument.
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration, gcc has tried to exec
a pointer instead of a string: smells like COW mappings are not being
properly write-protected on fork.
The protection in copy_one_pte looks very convincing, until at last you
realize that the second arg to make_migration_entry is a boolean "write",
and SWP_MIGRATION_READ is 30.
Anyway, it's better done like in change_pte_range, using
is_write_migration_entry and make_migration_entry_read.
From: Hugh Dickins <hugh@veritas.com>
Remove unnecessary obfuscation from sys_swapon's range check on swap type,
which blew up causing memory corruption once swapless migration made
MAX_SWAPFILES no longer 2 ^ MAX_SWAPFILES_SHIFT.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Christoph Lameter <clameter@engr.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
From: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:03:35 +00:00
|
|
|
if (is_migration_entry(entry)) {
|
|
|
|
migration_entry_wait(mm, pmd, address);
|
|
|
|
goto out;
|
|
|
|
}
|
2006-07-14 07:24:37 +00:00
|
|
|
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
|
2005-04-16 22:20:36 +00:00
|
|
|
page = lookup_swap_cache(entry);
|
|
|
|
if (!page) {
|
2006-12-07 04:31:54 +00:00
|
|
|
grab_swap_token(); /* Contend for token _before_ read-in */
|
2005-04-16 22:20:36 +00:00
|
|
|
swapin_readahead(entry, address, vma);
|
|
|
|
page = read_swap_cache_async(entry, vma, address);
|
|
|
|
if (!page) {
|
|
|
|
/*
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
* Back out if somebody else faulted in this pte
|
|
|
|
* while we released the pte lock.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (likely(pte_same(*page_table, orig_pte)))
|
|
|
|
ret = VM_FAULT_OOM;
|
2006-07-14 07:24:37 +00:00
|
|
|
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
goto unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Had to read the page from swap area: Major fault */
|
|
|
|
ret = VM_FAULT_MAJOR;
|
2006-06-30 08:55:45 +00:00
|
|
|
count_vm_event(PGMAJFAULT);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-07-14 07:24:37 +00:00
|
|
|
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
|
2005-04-16 22:20:36 +00:00
|
|
|
mark_page_accessed(page);
|
|
|
|
lock_page(page);
|
|
|
|
|
|
|
|
/*
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
* Back out if somebody else already faulted in this pte.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
2005-10-30 01:16:15 +00:00
|
|
|
if (unlikely(!pte_same(*page_table, orig_pte)))
|
2005-05-17 04:53:50 +00:00
|
|
|
goto out_nomap;
|
|
|
|
|
|
|
|
if (unlikely(!PageUptodate(page))) {
|
|
|
|
ret = VM_FAULT_SIGBUS;
|
|
|
|
goto out_nomap;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The page isn't present yet, go ahead with the fault. */
|
|
|
|
|
2005-10-30 01:16:05 +00:00
|
|
|
inc_mm_counter(mm, anon_rss);
|
2005-04-16 22:20:36 +00:00
|
|
|
pte = mk_pte(page, vma->vm_page_prot);
|
|
|
|
if (write_access && can_share_swap_page(page)) {
|
|
|
|
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
|
|
|
|
write_access = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
flush_icache_page(vma, page);
|
|
|
|
set_pte_at(mm, address, page_table, pte);
|
|
|
|
page_add_anon_rmap(page, vma, address);
|
|
|
|
|
[PATCH] can_share_swap_page: use page_mapcount
Remember that ironic get_user_pages race? when the raised page_count on a
page swapped out led do_wp_page to decide that it had to copy on write, so
substituted a different page into userspace. 2.6.7 onwards have Andrea's
solution, where try_to_unmap_one backs out if it finds page_count raised.
Which works, but is unsatisfying (rmap.c has no other page_count heuristics),
and was found a few months ago to hang an intensive page migration test. A
year ago I was hesitant to engage page_mapcount, now it seems the right fix.
So remove the page_count hack from try_to_unmap_one; and use activate_page in
unuse_mm when dropping lock, to replace its secondary effect of helping
swapoff to make progress in that case.
Simplify can_share_swap_page (now called only on anonymous pages) to check
page_mapcount + page_swapcount == 1: still needs the page lock to stabilize
their (pessimistic) sum, but does not need swapper_space.tree_lock for that.
In do_swap_page, move swap_free and unlock_page below page_add_anon_rmap, to
keep sum on the high side, and correct when can_share_swap_page called.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-22 00:15:12 +00:00
|
|
|
swap_free(entry);
|
|
|
|
if (vm_swap_full())
|
|
|
|
remove_exclusive_swap_page(page);
|
|
|
|
unlock_page(page);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (write_access) {
|
|
|
|
if (do_wp_page(mm, vma, address,
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
page_table, pmd, ptl, pte) == VM_FAULT_OOM)
|
2005-04-16 22:20:36 +00:00
|
|
|
ret = VM_FAULT_OOM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No need to invalidate - it was non-present before */
|
|
|
|
update_mmu_cache(vma, address, pte);
|
|
|
|
lazy_mmu_prot_update(pte);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
unlock:
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
pte_unmap_unlock(page_table, ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
out:
|
|
|
|
return ret;
|
2005-05-17 04:53:50 +00:00
|
|
|
out_nomap:
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
pte_unmap_unlock(page_table, ptl);
|
2005-05-17 04:53:50 +00:00
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
|
|
* but allow concurrent faults), and pte mapped but not yet locked.
|
|
|
|
* We return with mmap_sem still held, but pte unmapped and unlocked.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
|
|
int write_access)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
struct page *page;
|
|
|
|
spinlock_t *ptl;
|
2005-04-16 22:20:36 +00:00
|
|
|
pte_t entry;
|
|
|
|
|
2005-11-28 22:34:23 +00:00
|
|
|
if (write_access) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Allocate our own private page. */
|
|
|
|
pte_unmap(page_table);
|
|
|
|
|
|
|
|
if (unlikely(anon_vma_prepare(vma)))
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
goto oom;
|
|
|
|
page = alloc_zeroed_user_highpage(vma, address);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!page)
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
goto oom;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
entry = mk_pte(page, vma->vm_page_prot);
|
|
|
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
|
|
|
|
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
|
|
if (!pte_none(*page_table))
|
|
|
|
goto release;
|
|
|
|
inc_mm_counter(mm, anon_rss);
|
2005-04-16 22:20:36 +00:00
|
|
|
lru_cache_add_active(page);
|
2006-01-06 08:11:12 +00:00
|
|
|
page_add_new_anon_rmap(page, vma, address);
|
2005-10-30 01:16:12 +00:00
|
|
|
} else {
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
/* Map the ZERO_PAGE - vm_page_prot is readonly */
|
|
|
|
page = ZERO_PAGE(address);
|
|
|
|
page_cache_get(page);
|
|
|
|
entry = mk_pte(page, vma->vm_page_prot);
|
|
|
|
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
ptl = pte_lockptr(mm, pmd);
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
spin_lock(ptl);
|
|
|
|
if (!pte_none(*page_table))
|
|
|
|
goto release;
|
2005-10-30 01:16:12 +00:00
|
|
|
inc_mm_counter(mm, file_rss);
|
|
|
|
page_add_file_rmap(page);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
set_pte_at(mm, address, page_table, entry);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* No need to invalidate - it was non-present before */
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
update_mmu_cache(vma, address, entry);
|
2005-04-16 22:20:36 +00:00
|
|
|
lazy_mmu_prot_update(entry);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
unlock:
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
pte_unmap_unlock(page_table, ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
return VM_FAULT_MINOR;
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
release:
|
|
|
|
page_cache_release(page);
|
|
|
|
goto unlock;
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
oom:
|
2005-04-16 22:20:36 +00:00
|
|
|
return VM_FAULT_OOM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* do_no_page() tries to create a new page mapping. It aggressively
|
|
|
|
* tries to share with existing pages, but makes a separate copy if
|
|
|
|
* the "write_access" parameter is true in order to avoid the next
|
|
|
|
* page fault.
|
|
|
|
*
|
|
|
|
* As this is called only for pages that do not currently exist, we
|
|
|
|
* do not need to flush old virtual caches or the TLB.
|
|
|
|
*
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
|
|
* but allow concurrent faults), and pte mapped but not yet locked.
|
|
|
|
* We return with mmap_sem still held, but pte unmapped and unlocked.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
|
|
int write_access)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
spinlock_t *ptl;
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
struct page *new_page;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct address_space *mapping = NULL;
|
|
|
|
pte_t entry;
|
|
|
|
unsigned int sequence = 0;
|
|
|
|
int ret = VM_FAULT_MINOR;
|
|
|
|
int anon = 0;
|
2006-09-26 06:30:57 +00:00
|
|
|
struct page *dirty_page = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
pte_unmap(page_table);
|
2005-11-29 16:55:48 +00:00
|
|
|
BUG_ON(vma->vm_flags & VM_PFNMAP);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (vma->vm_file) {
|
|
|
|
mapping = vma->vm_file->f_mapping;
|
|
|
|
sequence = mapping->truncate_count;
|
|
|
|
smp_rmb(); /* serializes i_size against truncate_count */
|
|
|
|
}
|
|
|
|
retry:
|
|
|
|
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
|
|
|
|
/*
|
|
|
|
* No smp_rmb is needed here as long as there's a full
|
|
|
|
* spin_lock/unlock sequence inside the ->nopage callback
|
|
|
|
* (for the pagecache lookup) that acts as an implicit
|
|
|
|
* smp_mb() and prevents the i_size read to happen
|
|
|
|
* after the next truncate_count read.
|
|
|
|
*/
|
|
|
|
|
[PATCH] page fault retry with NOPAGE_REFAULT
Add a way for a no_page() handler to request a retry of the faulting
instruction. It goes back to userland on page faults and just tries again
in get_user_pages(). I added a cond_resched() in the loop in that later
case.
The problem I have with signal and spufs is an actual bug affecting apps and I
don't see other ways of fixing it.
In addition, we are having issues with infiniband and 64k pages (related to
the way the hypervisor deals with some HV cards) that will require us to muck
around with the MMU from within the IB driver's no_page() (it's a pSeries
specific driver) and return to the caller the same way using NOPAGE_REFAULT.
And to add to this, the graphics folks have been following a new approach of
memory management that involves transparently swapping objects between video
ram and main meory. To do that, they need installing PTEs from a no_page()
handler as well and that also requires returning with NOPAGE_REFAULT.
(For the later, they are currently using io_remap_pfn_range to install one PTE
from no_page() which is a bit racy, we need to add a check for the PTE having
already been installed afer taking the lock, but that's ok, they are only at
the proof-of-concept stage. I'll send a patch adding a "clean" function to do
that, we can use that from spufs too and get rid of the sparsemem hacks we do
to create struct page for SPEs. Basically, that provides a generic solution
for being able to have no_page() map hardware devices, which is something that
I think sound driver folks have been asking for some time too).
All of these things depend on having the NOPAGE_REFAULT exit path from
no_page() handlers.
Signed-off-by: Benjamin Herrenchmidt <benh@kernel.crashing.org>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-06 07:43:53 +00:00
|
|
|
/* no page was available -- either SIGBUS, OOM or REFAULT */
|
|
|
|
if (unlikely(new_page == NOPAGE_SIGBUS))
|
2005-04-16 22:20:36 +00:00
|
|
|
return VM_FAULT_SIGBUS;
|
[PATCH] page fault retry with NOPAGE_REFAULT
Add a way for a no_page() handler to request a retry of the faulting
instruction. It goes back to userland on page faults and just tries again
in get_user_pages(). I added a cond_resched() in the loop in that later
case.
The problem I have with signal and spufs is an actual bug affecting apps and I
don't see other ways of fixing it.
In addition, we are having issues with infiniband and 64k pages (related to
the way the hypervisor deals with some HV cards) that will require us to muck
around with the MMU from within the IB driver's no_page() (it's a pSeries
specific driver) and return to the caller the same way using NOPAGE_REFAULT.
And to add to this, the graphics folks have been following a new approach of
memory management that involves transparently swapping objects between video
ram and main meory. To do that, they need installing PTEs from a no_page()
handler as well and that also requires returning with NOPAGE_REFAULT.
(For the later, they are currently using io_remap_pfn_range to install one PTE
from no_page() which is a bit racy, we need to add a check for the PTE having
already been installed afer taking the lock, but that's ok, they are only at
the proof-of-concept stage. I'll send a patch adding a "clean" function to do
that, we can use that from spufs too and get rid of the sparsemem hacks we do
to create struct page for SPEs. Basically, that provides a generic solution
for being able to have no_page() map hardware devices, which is something that
I think sound driver folks have been asking for some time too).
All of these things depend on having the NOPAGE_REFAULT exit path from
no_page() handlers.
Signed-off-by: Benjamin Herrenchmidt <benh@kernel.crashing.org>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-06 07:43:53 +00:00
|
|
|
else if (unlikely(new_page == NOPAGE_OOM))
|
2005-04-16 22:20:36 +00:00
|
|
|
return VM_FAULT_OOM;
|
[PATCH] page fault retry with NOPAGE_REFAULT
Add a way for a no_page() handler to request a retry of the faulting
instruction. It goes back to userland on page faults and just tries again
in get_user_pages(). I added a cond_resched() in the loop in that later
case.
The problem I have with signal and spufs is an actual bug affecting apps and I
don't see other ways of fixing it.
In addition, we are having issues with infiniband and 64k pages (related to
the way the hypervisor deals with some HV cards) that will require us to muck
around with the MMU from within the IB driver's no_page() (it's a pSeries
specific driver) and return to the caller the same way using NOPAGE_REFAULT.
And to add to this, the graphics folks have been following a new approach of
memory management that involves transparently swapping objects between video
ram and main meory. To do that, they need installing PTEs from a no_page()
handler as well and that also requires returning with NOPAGE_REFAULT.
(For the later, they are currently using io_remap_pfn_range to install one PTE
from no_page() which is a bit racy, we need to add a check for the PTE having
already been installed afer taking the lock, but that's ok, they are only at
the proof-of-concept stage. I'll send a patch adding a "clean" function to do
that, we can use that from spufs too and get rid of the sparsemem hacks we do
to create struct page for SPEs. Basically, that provides a generic solution
for being able to have no_page() map hardware devices, which is something that
I think sound driver folks have been asking for some time too).
All of these things depend on having the NOPAGE_REFAULT exit path from
no_page() handlers.
Signed-off-by: Benjamin Herrenchmidt <benh@kernel.crashing.org>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-06 07:43:53 +00:00
|
|
|
else if (unlikely(new_page == NOPAGE_REFAULT))
|
|
|
|
return VM_FAULT_MINOR;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Should we do an early C-O-W break?
|
|
|
|
*/
|
2006-06-23 09:03:43 +00:00
|
|
|
if (write_access) {
|
|
|
|
if (!(vma->vm_flags & VM_SHARED)) {
|
|
|
|
struct page *page;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-23 09:03:43 +00:00
|
|
|
if (unlikely(anon_vma_prepare(vma)))
|
|
|
|
goto oom;
|
|
|
|
page = alloc_page_vma(GFP_HIGHUSER, vma, address);
|
|
|
|
if (!page)
|
|
|
|
goto oom;
|
2006-12-12 17:14:55 +00:00
|
|
|
copy_user_highpage(page, new_page, address, vma);
|
2006-06-23 09:03:43 +00:00
|
|
|
page_cache_release(new_page);
|
|
|
|
new_page = page;
|
|
|
|
anon = 1;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/* if the page will be shareable, see if the backing
|
|
|
|
* address space wants to know that the page is about
|
|
|
|
* to become writable */
|
|
|
|
if (vma->vm_ops->page_mkwrite &&
|
|
|
|
vma->vm_ops->page_mkwrite(vma, new_page) < 0
|
|
|
|
) {
|
|
|
|
page_cache_release(new_page);
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
}
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* For a file-backed vma, someone could have truncated or otherwise
|
|
|
|
* invalidated this page. If unmap_mapping_range got called,
|
|
|
|
* retry getting the page.
|
|
|
|
*/
|
|
|
|
if (mapping && unlikely(sequence != mapping->truncate_count)) {
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
pte_unmap_unlock(page_table, ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
page_cache_release(new_page);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
cond_resched();
|
|
|
|
sequence = mapping->truncate_count;
|
|
|
|
smp_rmb();
|
2005-04-16 22:20:36 +00:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This silly early PAGE_DIRTY setting removes a race
|
|
|
|
* due to the bad i386 page protection. But it's valid
|
|
|
|
* for other architectures too.
|
|
|
|
*
|
|
|
|
* Note that if write_access is true, we either now have
|
|
|
|
* an exclusive copy of the page, or this is a shared mapping,
|
|
|
|
* so we can make it writable and dirty to avoid having to
|
|
|
|
* handle that later.
|
|
|
|
*/
|
|
|
|
/* Only go through if we didn't race with anybody else... */
|
|
|
|
if (pte_none(*page_table)) {
|
|
|
|
flush_icache_page(vma, new_page);
|
|
|
|
entry = mk_pte(new_page, vma->vm_page_prot);
|
|
|
|
if (write_access)
|
|
|
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
|
|
set_pte_at(mm, address, page_table, entry);
|
|
|
|
if (anon) {
|
2005-10-30 01:16:05 +00:00
|
|
|
inc_mm_counter(mm, anon_rss);
|
2005-04-16 22:20:36 +00:00
|
|
|
lru_cache_add_active(new_page);
|
2006-01-06 08:11:12 +00:00
|
|
|
page_add_new_anon_rmap(new_page, vma, address);
|
2005-11-22 05:32:19 +00:00
|
|
|
} else {
|
2005-10-30 01:16:05 +00:00
|
|
|
inc_mm_counter(mm, file_rss);
|
2005-04-16 22:20:36 +00:00
|
|
|
page_add_file_rmap(new_page);
|
2006-09-26 06:30:57 +00:00
|
|
|
if (write_access) {
|
|
|
|
dirty_page = new_page;
|
|
|
|
get_page(dirty_page);
|
|
|
|
}
|
2005-10-30 01:16:05 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
|
|
|
/* One of our sibling threads was faster, back out. */
|
|
|
|
page_cache_release(new_page);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
goto unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* no need to invalidate: a not-present page shouldn't be cached */
|
|
|
|
update_mmu_cache(vma, address, entry);
|
|
|
|
lazy_mmu_prot_update(entry);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
unlock:
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
pte_unmap_unlock(page_table, ptl);
|
2006-09-26 06:30:57 +00:00
|
|
|
if (dirty_page) {
|
2006-09-26 06:30:58 +00:00
|
|
|
set_page_dirty_balance(dirty_page);
|
2006-09-26 06:30:57 +00:00
|
|
|
put_page(dirty_page);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
oom:
|
|
|
|
page_cache_release(new_page);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
return VM_FAULT_OOM;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-09-27 08:50:10 +00:00
|
|
|
/*
|
|
|
|
* do_no_pfn() tries to create a new page mapping for a page without
|
|
|
|
* a struct_page backing it
|
|
|
|
*
|
|
|
|
* As this is called only for pages that do not currently exist, we
|
|
|
|
* do not need to flush old virtual caches or the TLB.
|
|
|
|
*
|
|
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
|
|
* but allow concurrent faults), and pte mapped but not yet locked.
|
|
|
|
* We return with mmap_sem still held, but pte unmapped and unlocked.
|
|
|
|
*
|
|
|
|
* It is expected that the ->nopfn handler always returns the same pfn
|
|
|
|
* for a given virtual mapping.
|
|
|
|
*
|
|
|
|
* Mark this `noinline' to prevent it from bloating the main pagefault code.
|
|
|
|
*/
|
|
|
|
static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
|
|
int write_access)
|
|
|
|
{
|
|
|
|
spinlock_t *ptl;
|
|
|
|
pte_t entry;
|
|
|
|
unsigned long pfn;
|
|
|
|
int ret = VM_FAULT_MINOR;
|
|
|
|
|
|
|
|
pte_unmap(page_table);
|
|
|
|
BUG_ON(!(vma->vm_flags & VM_PFNMAP));
|
|
|
|
BUG_ON(is_cow_mapping(vma->vm_flags));
|
|
|
|
|
|
|
|
pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
|
2007-02-12 08:51:38 +00:00
|
|
|
if (unlikely(pfn == NOPFN_OOM))
|
2006-09-27 08:50:10 +00:00
|
|
|
return VM_FAULT_OOM;
|
2007-02-12 08:51:38 +00:00
|
|
|
else if (unlikely(pfn == NOPFN_SIGBUS))
|
2006-09-27 08:50:10 +00:00
|
|
|
return VM_FAULT_SIGBUS;
|
2007-02-12 08:51:38 +00:00
|
|
|
else if (unlikely(pfn == NOPFN_REFAULT))
|
|
|
|
return VM_FAULT_MINOR;
|
2006-09-27 08:50:10 +00:00
|
|
|
|
|
|
|
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
|
|
|
|
|
|
/* Only go through if we didn't race with anybody else... */
|
|
|
|
if (pte_none(*page_table)) {
|
|
|
|
entry = pfn_pte(pfn, vma->vm_page_prot);
|
|
|
|
if (write_access)
|
|
|
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
|
|
set_pte_at(mm, address, page_table, entry);
|
|
|
|
}
|
|
|
|
pte_unmap_unlock(page_table, ptl);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Fault of a previously existing named mapping. Repopulate the pte
|
|
|
|
* from the encoded file_pte if possible. This enables swappable
|
|
|
|
* nonlinear vmas.
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
*
|
|
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
|
|
* but allow concurrent faults), and pte mapped but not yet locked.
|
|
|
|
* We return with mmap_sem still held, but pte unmapped and unlocked.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
|
|
int write_access, pte_t orig_pte)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
pgoff_t pgoff;
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
return VM_FAULT_MINOR;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
|
|
|
|
/*
|
|
|
|
* Page table corrupted: show pte and kill process.
|
|
|
|
*/
|
2005-10-30 01:16:12 +00:00
|
|
|
print_bad_pte(vma, orig_pte, address);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
return VM_FAULT_OOM;
|
|
|
|
}
|
|
|
|
/* We can then assume vm->vm_ops && vma->vm_ops->populate */
|
|
|
|
|
|
|
|
pgoff = pte_to_pgoff(orig_pte);
|
|
|
|
err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
|
|
|
|
vma->vm_page_prot, pgoff, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (err == -ENOMEM)
|
|
|
|
return VM_FAULT_OOM;
|
|
|
|
if (err)
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
return VM_FAULT_MAJOR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These routines also need to handle stuff like marking pages dirty
|
|
|
|
* and/or accessed for architectures that don't do it in hardware (most
|
|
|
|
* RISC architectures). The early dirtying is also good on the i386.
|
|
|
|
*
|
|
|
|
* There is also a hook called "update_mmu_cache()" that architectures
|
|
|
|
* with external mmu caches can use to update those (ie the Sparc or
|
|
|
|
* PowerPC hashed page tables that act as extended TLBs).
|
|
|
|
*
|
2005-10-30 01:16:23 +00:00
|
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
|
|
* but allow concurrent faults), and pte mapped but not yet locked.
|
|
|
|
* We return with mmap_sem still held, but pte unmapped and unlocked.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
static inline int handle_pte_fault(struct mm_struct *mm,
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
struct vm_area_struct *vma, unsigned long address,
|
|
|
|
pte_t *pte, pmd_t *pmd, int write_access)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
pte_t entry;
|
2005-10-30 01:16:48 +00:00
|
|
|
pte_t old_entry;
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
spinlock_t *ptl;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-30 01:16:48 +00:00
|
|
|
old_entry = entry = *pte;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!pte_present(entry)) {
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
if (pte_none(entry)) {
|
2006-09-27 08:50:10 +00:00
|
|
|
if (vma->vm_ops) {
|
|
|
|
if (vma->vm_ops->nopage)
|
|
|
|
return do_no_page(mm, vma, address,
|
|
|
|
pte, pmd,
|
|
|
|
write_access);
|
|
|
|
if (unlikely(vma->vm_ops->nopfn))
|
|
|
|
return do_no_pfn(mm, vma, address, pte,
|
|
|
|
pmd, write_access);
|
|
|
|
}
|
|
|
|
return do_anonymous_page(mm, vma, address,
|
|
|
|
pte, pmd, write_access);
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
if (pte_file(entry))
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
return do_file_page(mm, vma, address,
|
|
|
|
pte, pmd, write_access, entry);
|
|
|
|
return do_swap_page(mm, vma, address,
|
|
|
|
pte, pmd, write_access, entry);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:40 +00:00
|
|
|
ptl = pte_lockptr(mm, pmd);
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
spin_lock(ptl);
|
|
|
|
if (unlikely(!pte_same(*pte, entry)))
|
|
|
|
goto unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (write_access) {
|
|
|
|
if (!pte_write(entry))
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
return do_wp_page(mm, vma, address,
|
|
|
|
pte, pmd, ptl, entry);
|
2005-04-16 22:20:36 +00:00
|
|
|
entry = pte_mkdirty(entry);
|
|
|
|
}
|
|
|
|
entry = pte_mkyoung(entry);
|
2005-10-30 01:16:48 +00:00
|
|
|
if (!pte_same(old_entry, entry)) {
|
|
|
|
ptep_set_access_flags(vma, address, pte, entry, write_access);
|
|
|
|
update_mmu_cache(vma, address, entry);
|
|
|
|
lazy_mmu_prot_update(entry);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* This is needed only for protection faults but the arch code
|
|
|
|
* is not yet telling us if this is a protection fault or not.
|
|
|
|
* This still avoids useless tlb flushes for .text page faults
|
|
|
|
* with threads.
|
|
|
|
*/
|
|
|
|
if (write_access)
|
|
|
|
flush_tlb_page(vma, address);
|
|
|
|
}
|
[PATCH] mm: page fault handler locking
On the page fault path, the patch before last pushed acquiring the
page_table_lock down to the head of handle_pte_fault (though it's also taken
and dropped earlier when a new page table has to be allocated).
Now delete that line, read "entry = *pte" without it, and go off to this or
that page fault handler on the basis of this unlocked peek. Usually the
handler can proceed without the lock, relying on the subsequent locked
pte_same or pte_none test to back out when necessary; though do_wp_page needs
the lock immediately, and do_file_page doesn't check (if there's a race,
install_page just zaps the entry and reinstalls it).
But on those architectures (notably i386 with PAE) whose pte is too big to be
read atomically, if SMP or preemption is enabled, do_swap_page and
do_file_page might cause irretrievable damage if passed a Frankenstein entry
stitched together from unrelated parts. In those configs, "pte_unmap_same"
has to take page_table_lock, validate orig_pte still the same, and drop
page_table_lock before unmapping, before proceeding.
Use pte_offset_map_lock and pte_unmap_unlock throughout the handlers; but lock
avoidance leaves more lone maps and unmaps than elsewhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:26 +00:00
|
|
|
unlock:
|
|
|
|
pte_unmap_unlock(pte, ptl);
|
2005-04-16 22:20:36 +00:00
|
|
|
return VM_FAULT_MINOR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* By the time we get here, we already hold the mm semaphore
|
|
|
|
*/
|
[PATCH] mm: page fault handlers tidyup
Impose a little more consistency on the page fault handlers do_wp_page,
do_swap_page, do_anonymous_page, do_no_page, do_file_page: why not pass their
arguments in the same order, called the same names?
break_cow is all very well, but what it did was inlined elsewhere: easier to
compare if it's brought back into do_wp_page.
do_file_page's fallback to do_no_page dates from a time when we were testing
pte_file by using it wherever possible: currently it's peculiar to nonlinear
vmas, so just check that. BUG_ON if not? Better not, it's probably page
table corruption, so just show the pte: hmm, there's a pte_ERROR macro, let's
use that for do_wp_page's invalid pfn too.
Hah! Someone in the ppc64 world noticed pte_ERROR was unused so removed it:
restored (and say "pud" not "pmd" in its pud_ERROR).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:15:59 +00:00
|
|
|
int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long address, int write_access)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
|
2006-06-30 08:55:45 +00:00
|
|
|
count_vm_event(PGFAULT);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-20 15:24:28 +00:00
|
|
|
if (unlikely(is_vm_hugetlb_page(vma)))
|
|
|
|
return hugetlb_fault(mm, vma, address, write_access);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
pgd = pgd_offset(mm, address);
|
|
|
|
pud = pud_alloc(mm, pgd, address);
|
|
|
|
if (!pud)
|
2005-10-30 01:16:23 +00:00
|
|
|
return VM_FAULT_OOM;
|
2005-04-16 22:20:36 +00:00
|
|
|
pmd = pmd_alloc(mm, pud, address);
|
|
|
|
if (!pmd)
|
2005-10-30 01:16:23 +00:00
|
|
|
return VM_FAULT_OOM;
|
2005-04-16 22:20:36 +00:00
|
|
|
pte = pte_alloc_map(mm, pmd, address);
|
|
|
|
if (!pte)
|
2005-10-30 01:16:23 +00:00
|
|
|
return VM_FAULT_OOM;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-30 01:16:23 +00:00
|
|
|
return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-11-15 20:53:48 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__handle_mm_fault);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
|
|
/*
|
|
|
|
* Allocate page upper directory.
|
[PATCH] mm: init_mm without ptlock
First step in pushing down the page_table_lock. init_mm.page_table_lock has
been used throughout the architectures (usually for ioremap): not to serialize
kernel address space allocation (that's usually vmlist_lock), but because
pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it.
Reverse that: don't lock or unlock init_mm.page_table_lock in any of the
architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take
and drop it when allocating a new one, to check lest a racing task already
did. Similarly no page_table_lock in vmalloc's map_vm_area.
Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle
user mms, which are converted only by a later patch, for now they have to lock
differently according to whether or not it's init_mm.
If sources get muddled, there's a danger that an arch source taking
init_mm.page_table_lock will be mixed with common source also taking it (or
neither take it). So break the rules and make another change, which should
break the build for such a mismatch: remove the redundant mm arg from
pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13).
Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64
used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to
pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64
map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free
took page_table_lock for no good reason.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:21 +00:00
|
|
|
* We've already handled the fast-path in-line.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-30 01:16:22 +00:00
|
|
|
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-10-30 01:16:23 +00:00
|
|
|
pud_t *new = pud_alloc_one(mm, address);
|
|
|
|
if (!new)
|
2005-10-30 01:16:22 +00:00
|
|
|
return -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[PATCH] mm: init_mm without ptlock
First step in pushing down the page_table_lock. init_mm.page_table_lock has
been used throughout the architectures (usually for ioremap): not to serialize
kernel address space allocation (that's usually vmlist_lock), but because
pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it.
Reverse that: don't lock or unlock init_mm.page_table_lock in any of the
architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take
and drop it when allocating a new one, to check lest a racing task already
did. Similarly no page_table_lock in vmalloc's map_vm_area.
Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle
user mms, which are converted only by a later patch, for now they have to lock
differently according to whether or not it's init_mm.
If sources get muddled, there's a danger that an arch source taking
init_mm.page_table_lock will be mixed with common source also taking it (or
neither take it). So break the rules and make another change, which should
break the build for such a mismatch: remove the redundant mm arg from
pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13).
Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64
used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to
pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64
map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free
took page_table_lock for no good reason.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:21 +00:00
|
|
|
spin_lock(&mm->page_table_lock);
|
2005-10-30 01:16:22 +00:00
|
|
|
if (pgd_present(*pgd)) /* Another has populated it */
|
2005-04-16 22:20:36 +00:00
|
|
|
pud_free(new);
|
2005-10-30 01:16:22 +00:00
|
|
|
else
|
|
|
|
pgd_populate(mm, pgd, new);
|
2005-10-30 01:16:23 +00:00
|
|
|
spin_unlock(&mm->page_table_lock);
|
2005-10-30 01:16:22 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
[PATCH] Workaround for gcc 2.96 (undefined references)
LD .tmp_vmlinux1
mm/built-in.o(.text+0x100d6): In function `copy_page_range':
: undefined reference to `__pud_alloc'
mm/built-in.o(.text+0x1010b): In function `copy_page_range':
: undefined reference to `__pmd_alloc'
mm/built-in.o(.text+0x11ef4): In function `__handle_mm_fault':
: undefined reference to `__pud_alloc'
fs/built-in.o(.text+0xc930): In function `install_arg_page':
: undefined reference to `__pud_alloc'
make: *** [.tmp_vmlinux1] Error 1
Those missing references in mm/memory.c arise from this code in
include/linux/mm.h, combined with the fact that __PGTABLE_PMD_FOLDED and
__PGTABLE_PUD_FOLDED are both set and __ARCH_HAS_4LEVEL_HACK is not:
/*
* The following ifdef needed to get the 4level-fixup.h header to work.
* Remove it when 4level-fixup.h has been removed.
*/
#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
NULL: pud_offset(pgd, address);
}
static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
NULL: pmd_offset(pud, address);
}
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
With my configuration the pgd_none and pud_none routines are inlines
returning a constant 0. Apparently the old compiler avoids generating
calls to __pud_alloc and __pmd_alloc but still lists them as undefined
references in the module's symbol table.
I don't know which change caused this problem. I think it was added
somewhere between 2.6.14 and 2.6.15-rc1, because I remember building
several 2.6.14-rc kernels without difficulty. However I can't point to an
individual culprit.
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-28 21:43:44 +00:00
|
|
|
#else
|
|
|
|
/* Workaround for gcc 2.96 */
|
|
|
|
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif /* __PAGETABLE_PUD_FOLDED */
|
|
|
|
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
|
/*
|
|
|
|
* Allocate page middle directory.
|
[PATCH] mm: init_mm without ptlock
First step in pushing down the page_table_lock. init_mm.page_table_lock has
been used throughout the architectures (usually for ioremap): not to serialize
kernel address space allocation (that's usually vmlist_lock), but because
pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it.
Reverse that: don't lock or unlock init_mm.page_table_lock in any of the
architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take
and drop it when allocating a new one, to check lest a racing task already
did. Similarly no page_table_lock in vmalloc's map_vm_area.
Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle
user mms, which are converted only by a later patch, for now they have to lock
differently according to whether or not it's init_mm.
If sources get muddled, there's a danger that an arch source taking
init_mm.page_table_lock will be mixed with common source also taking it (or
neither take it). So break the rules and make another change, which should
break the build for such a mismatch: remove the redundant mm arg from
pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13).
Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64
used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to
pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64
map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free
took page_table_lock for no good reason.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:21 +00:00
|
|
|
* We've already handled the fast-path in-line.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-10-30 01:16:22 +00:00
|
|
|
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-10-30 01:16:23 +00:00
|
|
|
pmd_t *new = pmd_alloc_one(mm, address);
|
|
|
|
if (!new)
|
2005-10-30 01:16:22 +00:00
|
|
|
return -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[PATCH] mm: init_mm without ptlock
First step in pushing down the page_table_lock. init_mm.page_table_lock has
been used throughout the architectures (usually for ioremap): not to serialize
kernel address space allocation (that's usually vmlist_lock), but because
pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it.
Reverse that: don't lock or unlock init_mm.page_table_lock in any of the
architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take
and drop it when allocating a new one, to check lest a racing task already
did. Similarly no page_table_lock in vmalloc's map_vm_area.
Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle
user mms, which are converted only by a later patch, for now they have to lock
differently according to whether or not it's init_mm.
If sources get muddled, there's a danger that an arch source taking
init_mm.page_table_lock will be mixed with common source also taking it (or
neither take it). So break the rules and make another change, which should
break the build for such a mismatch: remove the redundant mm arg from
pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13).
Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64
used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to
pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64
map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free
took page_table_lock for no good reason.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 01:16:21 +00:00
|
|
|
spin_lock(&mm->page_table_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifndef __ARCH_HAS_4LEVEL_HACK
|
2005-10-30 01:16:22 +00:00
|
|
|
if (pud_present(*pud)) /* Another has populated it */
|
2005-04-16 22:20:36 +00:00
|
|
|
pmd_free(new);
|
2005-10-30 01:16:22 +00:00
|
|
|
else
|
|
|
|
pud_populate(mm, pud, new);
|
2005-04-16 22:20:36 +00:00
|
|
|
#else
|
2005-10-30 01:16:22 +00:00
|
|
|
if (pgd_present(*pud)) /* Another has populated it */
|
2005-04-16 22:20:36 +00:00
|
|
|
pmd_free(new);
|
2005-10-30 01:16:22 +00:00
|
|
|
else
|
|
|
|
pgd_populate(mm, pud, new);
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif /* __ARCH_HAS_4LEVEL_HACK */
|
2005-10-30 01:16:23 +00:00
|
|
|
spin_unlock(&mm->page_table_lock);
|
2005-10-30 01:16:22 +00:00
|
|
|
return 0;
|
[PATCH] Workaround for gcc 2.96 (undefined references)
LD .tmp_vmlinux1
mm/built-in.o(.text+0x100d6): In function `copy_page_range':
: undefined reference to `__pud_alloc'
mm/built-in.o(.text+0x1010b): In function `copy_page_range':
: undefined reference to `__pmd_alloc'
mm/built-in.o(.text+0x11ef4): In function `__handle_mm_fault':
: undefined reference to `__pud_alloc'
fs/built-in.o(.text+0xc930): In function `install_arg_page':
: undefined reference to `__pud_alloc'
make: *** [.tmp_vmlinux1] Error 1
Those missing references in mm/memory.c arise from this code in
include/linux/mm.h, combined with the fact that __PGTABLE_PMD_FOLDED and
__PGTABLE_PUD_FOLDED are both set and __ARCH_HAS_4LEVEL_HACK is not:
/*
* The following ifdef needed to get the 4level-fixup.h header to work.
* Remove it when 4level-fixup.h has been removed.
*/
#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
NULL: pud_offset(pgd, address);
}
static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
NULL: pmd_offset(pud, address);
}
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
With my configuration the pgd_none and pud_none routines are inlines
returning a constant 0. Apparently the old compiler avoids generating
calls to __pud_alloc and __pmd_alloc but still lists them as undefined
references in the module's symbol table.
I don't know which change caused this problem. I think it was added
somewhere between 2.6.14 and 2.6.15-rc1, because I remember building
several 2.6.14-rc kernels without difficulty. However I can't point to an
individual culprit.
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-28 21:43:44 +00:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
/* Workaround for gcc 2.96 */
|
|
|
|
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
|
|
|
|
{
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
#endif /* __PAGETABLE_PMD_FOLDED */
|
|
|
|
|
|
|
|
int make_pages_present(unsigned long addr, unsigned long end)
|
|
|
|
{
|
|
|
|
int ret, len, write;
|
|
|
|
struct vm_area_struct * vma;
|
|
|
|
|
|
|
|
vma = find_vma(current->mm, addr);
|
|
|
|
if (!vma)
|
|
|
|
return -1;
|
|
|
|
write = (vma->vm_flags & VM_WRITE) != 0;
|
2006-03-26 16:30:52 +00:00
|
|
|
BUG_ON(addr >= end);
|
|
|
|
BUG_ON(end > vma->vm_end);
|
2005-04-16 22:20:36 +00:00
|
|
|
len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
|
|
|
|
ret = get_user_pages(current, current->mm, addr,
|
|
|
|
len, write, 0, NULL, NULL);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
return ret == len ? 0 : -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map a vmalloc()-space virtual address to the physical page.
|
|
|
|
*/
|
|
|
|
struct page * vmalloc_to_page(void * vmalloc_addr)
|
|
|
|
{
|
|
|
|
unsigned long addr = (unsigned long) vmalloc_addr;
|
|
|
|
struct page *page = NULL;
|
|
|
|
pgd_t *pgd = pgd_offset_k(addr);
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *ptep, pte;
|
|
|
|
|
|
|
|
if (!pgd_none(*pgd)) {
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
|
|
if (!pud_none(*pud)) {
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
if (!pmd_none(*pmd)) {
|
|
|
|
ptep = pte_offset_map(pmd, addr);
|
|
|
|
pte = *ptep;
|
|
|
|
if (pte_present(pte))
|
|
|
|
page = pte_page(pte);
|
|
|
|
pte_unmap(ptep);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(vmalloc_to_page);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map a vmalloc()-space virtual address to the physical page frame number.
|
|
|
|
*/
|
|
|
|
unsigned long vmalloc_to_pfn(void * vmalloc_addr)
|
|
|
|
{
|
|
|
|
return page_to_pfn(vmalloc_to_page(vmalloc_addr));
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(vmalloc_to_pfn);
|
|
|
|
|
|
|
|
#if !defined(__HAVE_ARCH_GATE_AREA)
|
|
|
|
|
|
|
|
#if defined(AT_SYSINFO_EHDR)
|
2005-09-10 07:26:28 +00:00
|
|
|
static struct vm_area_struct gate_vma;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
static int __init gate_vma_init(void)
|
|
|
|
{
|
|
|
|
gate_vma.vm_mm = NULL;
|
|
|
|
gate_vma.vm_start = FIXADDR_USER_START;
|
|
|
|
gate_vma.vm_end = FIXADDR_USER_END;
|
2007-01-26 08:56:47 +00:00
|
|
|
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
|
|
|
|
gate_vma.vm_page_prot = __P101;
|
2007-01-26 08:56:49 +00:00
|
|
|
/*
|
|
|
|
* Make sure the vDSO gets into every core dump.
|
|
|
|
* Dumping its contents makes post-mortem fully interpretable later
|
|
|
|
* without matching up the same kernel and hardware config to see
|
|
|
|
* what PC values meant.
|
|
|
|
*/
|
|
|
|
gate_vma.vm_flags |= VM_ALWAYSDUMP;
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
__initcall(gate_vma_init);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
#ifdef AT_SYSINFO_EHDR
|
|
|
|
return &gate_vma;
|
|
|
|
#else
|
|
|
|
return NULL;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
int in_gate_area_no_task(unsigned long addr)
|
|
|
|
{
|
|
|
|
#ifdef AT_SYSINFO_EHDR
|
|
|
|
if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
|
|
|
|
return 1;
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* __HAVE_ARCH_GATE_AREA */
|
2006-09-27 08:50:15 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Access another process' address space.
|
|
|
|
* Source/target buffer must be kernel space,
|
|
|
|
* Do not walk the page table directly, use get_user_pages
|
|
|
|
*/
|
|
|
|
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
struct page *page;
|
|
|
|
void *old_buf = buf;
|
|
|
|
|
|
|
|
mm = get_task_mm(tsk);
|
|
|
|
if (!mm)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
down_read(&mm->mmap_sem);
|
|
|
|
/* ignore errors, just check how much was sucessfully transfered */
|
|
|
|
while (len) {
|
|
|
|
int bytes, ret, offset;
|
|
|
|
void *maddr;
|
|
|
|
|
|
|
|
ret = get_user_pages(tsk, mm, addr, 1,
|
|
|
|
write, 1, &page, &vma);
|
|
|
|
if (ret <= 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
bytes = len;
|
|
|
|
offset = addr & (PAGE_SIZE-1);
|
|
|
|
if (bytes > PAGE_SIZE-offset)
|
|
|
|
bytes = PAGE_SIZE-offset;
|
|
|
|
|
|
|
|
maddr = kmap(page);
|
|
|
|
if (write) {
|
|
|
|
copy_to_user_page(vma, page, addr,
|
|
|
|
maddr + offset, buf, bytes);
|
|
|
|
set_page_dirty_lock(page);
|
|
|
|
} else {
|
|
|
|
copy_from_user_page(vma, page, addr,
|
|
|
|
buf, maddr + offset, bytes);
|
|
|
|
}
|
|
|
|
kunmap(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
len -= bytes;
|
|
|
|
buf += bytes;
|
|
|
|
addr += bytes;
|
|
|
|
}
|
|
|
|
up_read(&mm->mmap_sem);
|
|
|
|
mmput(mm);
|
|
|
|
|
|
|
|
return buf - old_buf;
|
|
|
|
}
|