2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* linux/kernel/fork.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 'fork.c' contains the help-routines for the 'fork' system call
|
|
|
|
* (see also entry.S and others).
|
|
|
|
* Fork is rather simple, once you get the hang of it, but the memory
|
|
|
|
* management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/unistd.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/personality.h>
|
|
|
|
#include <linux/mempolicy.h>
|
|
|
|
#include <linux/sem.h>
|
|
|
|
#include <linux/file.h>
|
2008-04-24 11:44:08 +00:00
|
|
|
#include <linux/fdtable.h>
|
2008-06-30 18:42:08 +00:00
|
|
|
#include <linux/iocontext.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/key.h>
|
|
|
|
#include <linux/binfmts.h>
|
|
|
|
#include <linux/mman.h>
|
mmu-notifiers: core
With KVM/GFP/XPMEM there isn't just the primary CPU MMU pointing to pages.
There are secondary MMUs (with secondary sptes and secondary tlbs) too.
sptes in the kvm case are shadow pagetables, but when I say spte in
mmu-notifier context, I mean "secondary pte". In GRU case there's no
actual secondary pte and there's only a secondary tlb because the GRU
secondary MMU has no knowledge about sptes and every secondary tlb miss
event in the MMU always generates a page fault that has to be resolved by
the CPU (this is not the case of KVM where the a secondary tlb miss will
walk sptes in hardware and it will refill the secondary tlb transparently
to software if the corresponding spte is present). The same way
zap_page_range has to invalidate the pte before freeing the page, the spte
(and secondary tlb) must also be invalidated before any page is freed and
reused.
Currently we take a page_count pin on every page mapped by sptes, but that
means the pages can't be swapped whenever they're mapped by any spte
because they're part of the guest working set. Furthermore a spte unmap
event can immediately lead to a page to be freed when the pin is released
(so requiring the same complex and relatively slow tlb_gather smp safe
logic we have in zap_page_range and that can be avoided completely if the
spte unmap event doesn't require an unpin of the page previously mapped in
the secondary MMU).
The mmu notifiers allow kvm/GRU/XPMEM to attach to the tsk->mm and know
when the VM is swapping or freeing or doing anything on the primary MMU so
that the secondary MMU code can drop sptes before the pages are freed,
avoiding all page pinning and allowing 100% reliable swapping of guest
physical address space. Furthermore it avoids the code that teardown the
mappings of the secondary MMU, to implement a logic like tlb_gather in
zap_page_range that would require many IPI to flush other cpu tlbs, for
each fixed number of spte unmapped.
To make an example: if what happens on the primary MMU is a protection
downgrade (from writeable to wrprotect) the secondary MMU mappings will be
invalidated, and the next secondary-mmu-page-fault will call
get_user_pages and trigger a do_wp_page through get_user_pages if it
called get_user_pages with write=1, and it'll re-establishing an updated
spte or secondary-tlb-mapping on the copied page. Or it will setup a
readonly spte or readonly tlb mapping if it's a guest-read, if it calls
get_user_pages with write=0. This is just an example.
This allows to map any page pointed by any pte (and in turn visible in the
primary CPU MMU), into a secondary MMU (be it a pure tlb like GRU, or an
full MMU with both sptes and secondary-tlb like the shadow-pagetable layer
with kvm), or a remote DMA in software like XPMEM (hence needing of
schedule in XPMEM code to send the invalidate to the remote node, while no
need to schedule in kvm/gru as it's an immediate event like invalidating
primary-mmu pte).
At least for KVM without this patch it's impossible to swap guests
reliably. And having this feature and removing the page pin allows
several other optimizations that simplify life considerably.
Dependencies:
1) mm_take_all_locks() to register the mmu notifier when the whole VM
isn't doing anything with "mm". This allows mmu notifier users to keep
track if the VM is in the middle of the invalidate_range_begin/end
critical section with an atomic counter incraese in range_begin and
decreased in range_end. No secondary MMU page fault is allowed to map
any spte or secondary tlb reference, while the VM is in the middle of
range_begin/end as any page returned by get_user_pages in that critical
section could later immediately be freed without any further
->invalidate_page notification (invalidate_range_begin/end works on
ranges and ->invalidate_page isn't called immediately before freeing
the page). To stop all page freeing and pagetable overwrites the
mmap_sem must be taken in write mode and all other anon_vma/i_mmap
locks must be taken too.
2) It'd be a waste to add branches in the VM if nobody could possibly
run KVM/GRU/XPMEM on the kernel, so mmu notifiers will only enabled if
CONFIG_KVM=m/y. In the current kernel kvm won't yet take advantage of
mmu notifiers, but this already allows to compile a KVM external module
against a kernel with mmu notifiers enabled and from the next pull from
kvm.git we'll start using them. And GRU/XPMEM will also be able to
continue the development by enabling KVM=m in their config, until they
submit all GRU/XPMEM GPLv2 code to the mainline kernel. Then they can
also enable MMU_NOTIFIERS in the same way KVM does it (even if KVM=n).
This guarantees nobody selects MMU_NOTIFIER=y if KVM and GRU and XPMEM
are all =n.
The mmu_notifier_register call can fail because mm_take_all_locks may be
interrupted by a signal and return -EINTR. Because mmu_notifier_reigster
is used when a driver startup, a failure can be gracefully handled. Here
an example of the change applied to kvm to register the mmu notifiers.
Usually when a driver startups other allocations are required anyway and
-ENOMEM failure paths exists already.
struct kvm *kvm_arch_create_vm(void)
{
struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ int err;
if (!kvm)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+ kvm->arch.mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ err = mmu_notifier_register(&kvm->arch.mmu_notifier, current->mm);
+ if (err) {
+ kfree(kvm);
+ return ERR_PTR(err);
+ }
+
return kvm;
}
mmu_notifier_unregister returns void and it's reliable.
The patch also adds a few needed but missing includes that would prevent
kernel to compile after these changes on non-x86 archs (x86 didn't need
them by luck).
[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: fix mm/filemap_xip.c build]
[akpm@linux-foundation.org: fix mm/mmu_notifier.c build]
Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Jack Steiner <steiner@sgi.com>
Cc: Robin Holt <holt@sgi.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Kanoj Sarcar <kanojsarcar@yahoo.com>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: Steve Wise <swise@opengridcomputing.com>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Chris Wright <chrisw@redhat.com>
Cc: Marcelo Tosatti <marcelo@kvack.org>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Cc: Izik Eidus <izike@qumranet.com>
Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-28 22:46:29 +00:00
|
|
|
#include <linux/mmu_notifier.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/fs.h>
|
mm: per-thread vma caching
This patch is a continuation of efforts trying to optimize find_vma(),
avoiding potentially expensive rbtree walks to locate a vma upon faults.
The original approach (https://lkml.org/lkml/2013/11/1/410), where the
largest vma was also cached, ended up being too specific and random,
thus further comparison with other approaches were needed. There are
two things to consider when dealing with this, the cache hit rate and
the latency of find_vma(). Improving the hit-rate does not necessarily
translate in finding the vma any faster, as the overhead of any fancy
caching schemes can be too high to consider.
We currently cache the last used vma for the whole address space, which
provides a nice optimization, reducing the total cycles in find_vma() by
up to 250%, for workloads with good locality. On the other hand, this
simple scheme is pretty much useless for workloads with poor locality.
Analyzing ebizzy runs shows that, no matter how many threads are
running, the mmap_cache hit rate is less than 2%, and in many situations
below 1%.
The proposed approach is to replace this scheme with a small per-thread
cache, maximizing hit rates at a very low maintenance cost.
Invalidations are performed by simply bumping up a 32-bit sequence
number. The only expensive operation is in the rare case of a seq
number overflow, where all caches that share the same address space are
flushed. Upon a miss, the proposed replacement policy is based on the
page number that contains the virtual address in question. Concretely,
the following results are seen on an 80 core, 8 socket x86-64 box:
1) System bootup: Most programs are single threaded, so the per-thread
scheme does improve ~50% hit rate by just adding a few more slots to
the cache.
+----------------+----------+------------------+
| caching scheme | hit-rate | cycles (billion) |
+----------------+----------+------------------+
| baseline | 50.61% | 19.90 |
| patched | 73.45% | 13.58 |
+----------------+----------+------------------+
2) Kernel build: This one is already pretty good with the current
approach as we're dealing with good locality.
+----------------+----------+------------------+
| caching scheme | hit-rate | cycles (billion) |
+----------------+----------+------------------+
| baseline | 75.28% | 11.03 |
| patched | 88.09% | 9.31 |
+----------------+----------+------------------+
3) Oracle 11g Data Mining (4k pages): Similar to the kernel build workload.
+----------------+----------+------------------+
| caching scheme | hit-rate | cycles (billion) |
+----------------+----------+------------------+
| baseline | 70.66% | 17.14 |
| patched | 91.15% | 12.57 |
+----------------+----------+------------------+
4) Ebizzy: There's a fair amount of variation from run to run, but this
approach always shows nearly perfect hit rates, while baseline is just
about non-existent. The amounts of cycles can fluctuate between
anywhere from ~60 to ~116 for the baseline scheme, but this approach
reduces it considerably. For instance, with 80 threads:
+----------------+----------+------------------+
| caching scheme | hit-rate | cycles (billion) |
+----------------+----------+------------------+
| baseline | 1.06% | 91.54 |
| patched | 99.97% | 14.18 |
+----------------+----------+------------------+
[akpm@linux-foundation.org: fix nommu build, per Davidlohr]
[akpm@linux-foundation.org: document vmacache_valid() logic]
[akpm@linux-foundation.org: attempt to untangle header files]
[akpm@linux-foundation.org: add vmacache_find() BUG_ON]
[hughd@google.com: add vmacache_valid_mm() (from Oleg)]
[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: adjust and enhance comments]
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: Michel Lespinasse <walken@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Tested-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-04-07 22:37:25 +00:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/vmacache.h>
|
2006-10-02 09:18:06 +00:00
|
|
|
#include <linux/nsproxy.h>
|
2006-01-11 20:17:46 +00:00
|
|
|
#include <linux/capability.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/cpu.h>
|
2007-10-19 06:39:33 +00:00
|
|
|
#include <linux/cgroup.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/security.h>
|
2008-07-24 04:27:23 +00:00
|
|
|
#include <linux/hugetlb.h>
|
seccomp: add system call filtering using BPF
[This patch depends on luto@mit.edu's no_new_privs patch:
https://lkml.org/lkml/2012/1/30/264
The whole series including Andrew's patches can be found here:
https://github.com/redpig/linux/tree/seccomp
Complete diff here:
https://github.com/redpig/linux/compare/1dc65fed...seccomp
]
This patch adds support for seccomp mode 2. Mode 2 introduces the
ability for unprivileged processes to install system call filtering
policy expressed in terms of a Berkeley Packet Filter (BPF) program.
This program will be evaluated in the kernel for each system call
the task makes and computes a result based on data in the format
of struct seccomp_data.
A filter program may be installed by calling:
struct sock_fprog fprog = { ... };
...
prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &fprog);
The return value of the filter program determines if the system call is
allowed to proceed or denied. If the first filter program installed
allows prctl(2) calls, then the above call may be made repeatedly
by a task to further reduce its access to the kernel. All attached
programs must be evaluated before a system call will be allowed to
proceed.
Filter programs will be inherited across fork/clone and execve.
However, if the task attaching the filter is unprivileged
(!CAP_SYS_ADMIN) the no_new_privs bit will be set on the task. This
ensures that unprivileged tasks cannot attach filters that affect
privileged tasks (e.g., setuid binary).
There are a number of benefits to this approach. A few of which are
as follows:
- BPF has been exposed to userland for a long time
- BPF optimization (and JIT'ing) are well understood
- Userland already knows its ABI: system call numbers and desired
arguments
- No time-of-check-time-of-use vulnerable data accesses are possible.
- system call arguments are loaded on access only to minimize copying
required for system call policy decisions.
Mode 2 support is restricted to architectures that enable
HAVE_ARCH_SECCOMP_FILTER. In this patch, the primary dependency is on
syscall_get_arguments(). The full desired scope of this feature will
add a few minor additional requirements expressed later in this series.
Based on discussion, SECCOMP_RET_ERRNO and SECCOMP_RET_TRACE seem to be
the desired additional functionality.
No architectures are enabled in this patch.
Signed-off-by: Will Drewry <wad@chromium.org>
Acked-by: Serge Hallyn <serge.hallyn@canonical.com>
Reviewed-by: Indan Zupancic <indan@nul.nu>
Acked-by: Eric Paris <eparis@redhat.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
v18: - rebase to v3.4-rc2
- s/chk/check/ (akpm@linux-foundation.org,jmorris@namei.org)
- allocate with GFP_KERNEL|__GFP_NOWARN (indan@nul.nu)
- add a comment for get_u32 regarding endianness (akpm@)
- fix other typos, style mistakes (akpm@)
- added acked-by
v17: - properly guard seccomp filter needed headers (leann@ubuntu.com)
- tighten return mask to 0x7fff0000
v16: - no change
v15: - add a 4 instr penalty when counting a path to account for seccomp_filter
size (indan@nul.nu)
- drop the max insns to 256KB (indan@nul.nu)
- return ENOMEM if the max insns limit has been hit (indan@nul.nu)
- move IP checks after args (indan@nul.nu)
- drop !user_filter check (indan@nul.nu)
- only allow explicit bpf codes (indan@nul.nu)
- exit_code -> exit_sig
v14: - put/get_seccomp_filter takes struct task_struct
(indan@nul.nu,keescook@chromium.org)
- adds seccomp_chk_filter and drops general bpf_run/chk_filter user
- add seccomp_bpf_load for use by net/core/filter.c
- lower max per-process/per-hierarchy: 1MB
- moved nnp/capability check prior to allocation
(all of the above: indan@nul.nu)
v13: - rebase on to 88ebdda6159ffc15699f204c33feb3e431bf9bdc
v12: - added a maximum instruction count per path (indan@nul.nu,oleg@redhat.com)
- removed copy_seccomp (keescook@chromium.org,indan@nul.nu)
- reworded the prctl_set_seccomp comment (indan@nul.nu)
v11: - reorder struct seccomp_data to allow future args expansion (hpa@zytor.com)
- style clean up, @compat dropped, compat_sock_fprog32 (indan@nul.nu)
- do_exit(SIGSYS) (keescook@chromium.org, luto@mit.edu)
- pare down Kconfig doc reference.
- extra comment clean up
v10: - seccomp_data has changed again to be more aesthetically pleasing
(hpa@zytor.com)
- calling convention is noted in a new u32 field using syscall_get_arch.
This allows for cross-calling convention tasks to use seccomp filters.
(hpa@zytor.com)
- lots of clean up (thanks, Indan!)
v9: - n/a
v8: - use bpf_chk_filter, bpf_run_filter. update load_fns
- Lots of fixes courtesy of indan@nul.nu:
-- fix up load behavior, compat fixups, and merge alloc code,
-- renamed pc and dropped __packed, use bool compat.
-- Added a hidden CONFIG_SECCOMP_FILTER to synthesize non-arch
dependencies
v7: (massive overhaul thanks to Indan, others)
- added CONFIG_HAVE_ARCH_SECCOMP_FILTER
- merged into seccomp.c
- minimal seccomp_filter.h
- no config option (part of seccomp)
- no new prctl
- doesn't break seccomp on systems without asm/syscall.h
(works but arg access always fails)
- dropped seccomp_init_task, extra free functions, ...
- dropped the no-asm/syscall.h code paths
- merges with network sk_run_filter and sk_chk_filter
v6: - fix memory leak on attach compat check failure
- require no_new_privs || CAP_SYS_ADMIN prior to filter
installation. (luto@mit.edu)
- s/seccomp_struct_/seccomp_/ for macros/functions (amwang@redhat.com)
- cleaned up Kconfig (amwang@redhat.com)
- on block, note if the call was compat (so the # means something)
v5: - uses syscall_get_arguments
(indan@nul.nu,oleg@redhat.com, mcgrathr@chromium.org)
- uses union-based arg storage with hi/lo struct to
handle endianness. Compromises between the two alternate
proposals to minimize extra arg shuffling and account for
endianness assuming userspace uses offsetof().
(mcgrathr@chromium.org, indan@nul.nu)
- update Kconfig description
- add include/seccomp_filter.h and add its installation
- (naive) on-demand syscall argument loading
- drop seccomp_t (eparis@redhat.com)
v4: - adjusted prctl to make room for PR_[SG]ET_NO_NEW_PRIVS
- now uses current->no_new_privs
(luto@mit.edu,torvalds@linux-foundation.com)
- assign names to seccomp modes (rdunlap@xenotime.net)
- fix style issues (rdunlap@xenotime.net)
- reworded Kconfig entry (rdunlap@xenotime.net)
v3: - macros to inline (oleg@redhat.com)
- init_task behavior fixed (oleg@redhat.com)
- drop creator entry and extra NULL check (oleg@redhat.com)
- alloc returns -EINVAL on bad sizing (serge.hallyn@canonical.com)
- adds tentative use of "always_unprivileged" as per
torvalds@linux-foundation.org and luto@mit.edu
v2: - (patch 2 only)
Signed-off-by: James Morris <james.l.morris@oracle.com>
2012-04-12 21:47:57 +00:00
|
|
|
#include <linux/seccomp.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
#include <linux/futex.h>
|
2008-11-15 18:20:36 +00:00
|
|
|
#include <linux/compat.h>
|
2011-03-22 23:30:44 +00:00
|
|
|
#include <linux/kthread.h>
|
2006-12-10 10:19:19 +00:00
|
|
|
#include <linux/task_io_accounting_ops.h>
|
2005-09-09 20:04:13 +00:00
|
|
|
#include <linux/rcupdate.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/audit.h>
|
2008-02-07 08:13:51 +00:00
|
|
|
#include <linux/memcontrol.h>
|
2008-11-23 05:22:56 +00:00
|
|
|
#include <linux/ftrace.h>
|
2012-05-10 20:01:45 +00:00
|
|
|
#include <linux/proc_fs.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/profile.h>
|
|
|
|
#include <linux/rmap.h>
|
2009-09-22 00:01:57 +00:00
|
|
|
#include <linux/ksm.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/acct.h>
|
2006-10-01 06:28:59 +00:00
|
|
|
#include <linux/tsacct_kern.h>
|
2005-11-07 08:59:16 +00:00
|
|
|
#include <linux/cn_proc.h>
|
2007-05-23 20:57:25 +00:00
|
|
|
#include <linux/freezer.h>
|
2006-07-14 07:24:36 +00:00
|
|
|
#include <linux/delayacct.h>
|
2006-07-14 07:24:44 +00:00
|
|
|
#include <linux/taskstats_kern.h>
|
2006-09-26 08:52:38 +00:00
|
|
|
#include <linux/random.h>
|
Audit: add TTY input auditing
Add TTY input auditing, used to audit system administrator's actions. This is
required by various security standards such as DCID 6/3 and PCI to provide
non-repudiation of administrator's actions and to allow a review of past
actions if the administrator seems to overstep their duties or if the system
becomes misconfigured for unknown reasons. These requirements do not make it
necessary to audit TTY output as well.
Compared to an user-space keylogger, this approach records TTY input using the
audit subsystem, correlated with other audit events, and it is completely
transparent to the user-space application (e.g. the console ioctls still
work).
TTY input auditing works on a higher level than auditing all system calls
within the session, which would produce an overwhelming amount of mostly
useless audit events.
Add an "audit_tty" attribute, inherited across fork (). Data read from TTYs
by process with the attribute is sent to the audit subsystem by the kernel.
The audit netlink interface is extended to allow modifying the audit_tty
attribute, and to allow sending explanatory audit events from user-space (for
example, a shell might send an event containing the final command, after the
interactive command-line editing and history expansion is performed, which
might be difficult to decipher from the TTY input alone).
Because the "audit_tty" attribute is inherited across fork (), it would be set
e.g. for sshd restarted within an audited session. To prevent this, the
audit_tty attribute is cleared when a process with no open TTY file
descriptors (e.g. after daemon startup) opens a TTY.
See https://www.redhat.com/archives/linux-audit/2007-June/msg00000.html for a
more detailed rationale document for an older version of this patch.
[akpm@linux-foundation.org: build fix]
Signed-off-by: Miloslav Trmac <mitr@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Cc: Paul Fulghum <paulkf@microgate.com>
Cc: Casey Schaufler <casey@schaufler-ca.com>
Cc: Steve Grubb <sgrubb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 06:40:56 +00:00
|
|
|
#include <linux/tty.h>
|
2008-01-24 07:52:45 +00:00
|
|
|
#include <linux/blkdev.h>
|
2009-03-29 23:50:06 +00:00
|
|
|
#include <linux/fs_struct.h>
|
2008-04-22 21:38:23 +00:00
|
|
|
#include <linux/magic.h>
|
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events!
In the past few months the perfcounters subsystem has grown out its
initial role of counting hardware events, and has become (and is
becoming) a much broader generic event enumeration, reporting, logging,
monitoring, analysis facility.
Naming its core object 'perf_counter' and naming the subsystem
'perfcounters' has become more and more of a misnomer. With pending
code like hw-breakpoints support the 'counter' name is less and
less appropriate.
All in one, we've decided to rename the subsystem to 'performance
events' and to propagate this rename through all fields, variables
and API names. (in an ABI compatible fashion)
The word 'event' is also a bit shorter than 'counter' - which makes
it slightly more convenient to write/handle as well.
Thanks goes to Stephane Eranian who first observed this misnomer and
suggested a rename.
User-space tooling and ABI compatibility is not affected - this patch
should be function-invariant. (Also, defconfigs were not touched to
keep the size down.)
This patch has been generated via the following script:
FILES=$(find * -type f | grep -vE 'oprofile|[^K]config')
sed -i \
-e 's/PERF_EVENT_/PERF_RECORD_/g' \
-e 's/PERF_COUNTER/PERF_EVENT/g' \
-e 's/perf_counter/perf_event/g' \
-e 's/nb_counters/nb_events/g' \
-e 's/swcounter/swevent/g' \
-e 's/tpcounter_event/tp_event/g' \
$FILES
for N in $(find . -name perf_counter.[ch]); do
M=$(echo $N | sed 's/perf_counter/perf_event/g')
mv $N $M
done
FILES=$(find . -name perf_event.*)
sed -i \
-e 's/COUNTER_MASK/REG_MASK/g' \
-e 's/COUNTER/EVENT/g' \
-e 's/\<event\>/event_id/g' \
-e 's/counter/event/g' \
-e 's/Counter/Event/g' \
$FILES
... to keep it as correct as possible. This script can also be
used by anyone who has pending perfcounters patches - it converts
a Linux kernel tree over to the new naming. We tried to time this
change to the point in time where the amount of pending patches
is the smallest: the end of the merge window.
Namespace clashes were fixed up in a preparatory patch - and some
stylistic fallout will be fixed up in a subsequent patch.
( NOTE: 'counters' are still the proper terminology when we deal
with hardware registers - and these sed scripts are a bit
over-eager in renaming them. I've undone some of that, but
in case there's something left where 'counter' would be
better than 'event' we can undo that on an individual basis
instead of touching an otherwise nicely automated patch. )
Suggested-by: Stephane Eranian <eranian@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Paul Mackerras <paulus@samba.org>
Reviewed-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-21 10:02:48 +00:00
|
|
|
#include <linux/perf_event.h>
|
2009-07-29 10:15:26 +00:00
|
|
|
#include <linux/posix-timers.h>
|
2009-11-29 14:34:48 +00:00
|
|
|
#include <linux/user-return-notifier.h>
|
2010-10-26 21:21:23 +00:00
|
|
|
#include <linux/oom.h>
|
2011-01-13 23:46:58 +00:00
|
|
|
#include <linux/khugepaged.h>
|
epoll: introduce POLLFREE to flush ->signalfd_wqh before kfree()
This patch is intentionally incomplete to simplify the review.
It ignores ep_unregister_pollwait() which plays with the same wqh.
See the next change.
epoll assumes that the EPOLL_CTL_ADD'ed file controls everything
f_op->poll() needs. In particular it assumes that the wait queue
can't go away until eventpoll_release(). This is not true in case
of signalfd, the task which does EPOLL_CTL_ADD uses its ->sighand
which is not connected to the file.
This patch adds the special event, POLLFREE, currently only for
epoll. It expects that init_poll_funcptr()'ed hook should do the
necessary cleanup. Perhaps it should be defined as EPOLLFREE in
eventpoll.
__cleanup_sighand() is changed to do wake_up_poll(POLLFREE) if
->signalfd_wqh is not empty, we add the new signalfd_cleanup()
helper.
ep_poll_callback(POLLFREE) simply does list_del_init(task_list).
This make this poll entry inconsistent, but we don't care. If you
share epoll fd which contains our sigfd with another process you
should blame yourself. signalfd is "really special". I simply do
not know how we can define the "right" semantics if it used with
epoll.
The main problem is, epoll calls signalfd_poll() once to establish
the connection with the wait queue, after that signalfd_poll(NULL)
returns the different/inconsistent results depending on who does
EPOLL_CTL_MOD/signalfd_read/etc. IOW: apart from sigmask, signalfd
has nothing to do with the file, it works with the current thread.
In short: this patch is the hack which tries to fix the symptoms.
It also assumes that nobody can take tasklist_lock under epoll
locks, this seems to be true.
Note:
- we do not have wake_up_all_poll() but wake_up_poll()
is fine, poll/epoll doesn't use WQ_FLAG_EXCLUSIVE.
- signalfd_cleanup() uses POLLHUP along with POLLFREE,
we need a couple of simple changes in eventpoll.c to
make sure it can't be "lost".
Reported-by: Maxime Bizon <mbizon@freebox.fr>
Cc: <stable@kernel.org>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-02-24 19:07:11 +00:00
|
|
|
#include <linux/signalfd.h>
|
uprobes/core: Handle breakpoint and singlestep exceptions
Uprobes uses exception notifiers to get to know if a thread hit
a breakpoint or a singlestep exception.
When a thread hits a uprobe or is singlestepping post a uprobe
hit, the uprobe exception notifier sets its TIF_UPROBE bit,
which will then be checked on its return to userspace path
(do_notify_resume() ->uprobe_notify_resume()), where the
consumers handlers are run (in task context) based on the
defined filters.
Uprobe hits are thread specific and hence we need to maintain
information about if a task hit a uprobe, what uprobe was hit,
the slot where the original instruction was copied for xol so
that it can be singlestepped with appropriate fixups.
In some cases, special care is needed for instructions that are
executed out of line (xol). These are architecture specific
artefacts, such as handling RIP relative instructions on x86_64.
Since the instruction at which the uprobe was inserted is
executed out of line, architecture specific fixups are added so
that the thread continues normal execution in the presence of a
uprobe.
Postpone the signals until we execute the probed insn.
post_xol() path does a recalc_sigpending() before return to
user-mode, this ensures the signal can't be lost.
Uprobes relies on DIE_DEBUG notification to notify if a
singlestep is complete.
Adds x86 specific uprobe exception notifiers and appropriate
hooks needed to determine a uprobe hit and subsequent post
processing.
Add requisite x86 fixups for xol for uprobes. Specific cases
needing fixups include relative jumps (x86_64), calls, etc.
Where possible, we check and skip singlestepping the
breakpointed instructions. For now we skip single byte as well
as few multibyte nop instructions. However this can be extended
to other instructions too.
Credits to Oleg Nesterov for suggestions/patches related to
signal, breakpoint, singlestep handling code.
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@linux.vnet.ibm.com>
Cc: Linux-mm <linux-mm@kvack.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20120313180011.29771.89027.sendpatchset@srdronam.in.ibm.com
[ Performed various cleanliness edits ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2012-03-13 18:00:11 +00:00
|
|
|
#include <linux/uprobes.h>
|
2013-05-07 23:19:08 +00:00
|
|
|
#include <linux/aio.h>
|
2014-04-07 22:39:20 +00:00
|
|
|
#include <linux/compiler.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
2009-04-14 23:39:12 +00:00
|
|
|
#include <trace/events/sched.h>
|
|
|
|
|
2012-01-10 23:08:09 +00:00
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/task.h>
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Protected counters by write_lock_irq(&tasklist_lock)
|
|
|
|
*/
|
|
|
|
unsigned long total_forks; /* Handle normal Linux uptimes. */
|
2011-07-26 23:08:39 +00:00
|
|
|
int nr_threads; /* The idle threads do not count.. */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
int max_threads; /* tunable limit on nr_threads */
|
|
|
|
|
|
|
|
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
|
|
|
|
|
2006-07-10 11:45:40 +00:00
|
|
|
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
|
2010-03-03 15:46:56 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_PROVE_RCU
|
|
|
|
int lockdep_tasklist_lock_is_held(void)
|
|
|
|
{
|
|
|
|
return lockdep_is_held(&tasklist_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
|
|
|
|
#endif /* #ifdef CONFIG_PROVE_RCU */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
int nr_processes(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
int total = 0;
|
|
|
|
|
2009-11-03 10:11:14 +00:00
|
|
|
for_each_possible_cpu(cpu)
|
2005-04-16 22:20:36 +00:00
|
|
|
total += per_cpu(process_counts, cpu);
|
|
|
|
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
fork: fix error handling in dup_task()
The function dup_task() may fail at the following function calls in the
following order.
0) alloc_task_struct_node()
1) alloc_thread_info_node()
2) arch_dup_task_struct()
Error by 0) is not a matter, it can just return. But error by 1) requires
releasing task_struct allocated by 0) before it returns. Likewise, error
by 2) requires releasing task_struct and thread_info allocated by 0) and
1).
The existing error handling calls free_task_struct() and
free_thread_info() which do not only release task_struct and thread_info,
but also call architecture specific arch_release_task_struct() and
arch_release_thread_info().
The problem is that task_struct and thread_info are not fully initialized
yet at this point, but arch_release_task_struct() and
arch_release_thread_info() are called with them.
For example, x86 defines its own arch_release_task_struct() that releases
a task_xstate. If alloc_thread_info_node() fails in dup_task(),
arch_release_task_struct() is called with task_struct which is just
allocated and filled with garbage in this error handling.
This actually happened with tools/testing/fault-injection/failcmd.sh
# env FAILCMD_TYPE=fail_page_alloc \
./tools/testing/fault-injection/failcmd.sh --times=100 \
--min-order=0 --ignore-gfp-wait=0 \
-- make -C tools/testing/selftests/ run_tests
In order to fix this issue, make free_{task_struct,thread_info}() not to
call arch_release_{task_struct,thread_info}() and call
arch_release_{task_struct,thread_info}() implicitly where needed.
Default arch_release_task_struct() and arch_release_thread_info() are
defined as empty by default. So this change only affects the
architectures which implement their own arch_release_task_struct() or
arch_release_thread_info() as listed below.
arch_release_task_struct(): x86, sh
arch_release_thread_info(): mn10300, tile
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Salman Qazi <sqazi@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-07-30 21:42:33 +00:00
|
|
|
void __weak arch_release_task_struct(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-05-05 15:05:48 +00:00
|
|
|
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
|
2006-12-07 04:33:20 +00:00
|
|
|
static struct kmem_cache *task_struct_cachep;
|
2012-05-05 15:05:41 +00:00
|
|
|
|
|
|
|
static inline struct task_struct *alloc_task_struct_node(int node)
|
|
|
|
{
|
|
|
|
return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_task_struct(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
kmem_cache_free(task_struct_cachep, tsk);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
fork: fix error handling in dup_task()
The function dup_task() may fail at the following function calls in the
following order.
0) alloc_task_struct_node()
1) alloc_thread_info_node()
2) arch_dup_task_struct()
Error by 0) is not a matter, it can just return. But error by 1) requires
releasing task_struct allocated by 0) before it returns. Likewise, error
by 2) requires releasing task_struct and thread_info allocated by 0) and
1).
The existing error handling calls free_task_struct() and
free_thread_info() which do not only release task_struct and thread_info,
but also call architecture specific arch_release_task_struct() and
arch_release_thread_info().
The problem is that task_struct and thread_info are not fully initialized
yet at this point, but arch_release_task_struct() and
arch_release_thread_info() are called with them.
For example, x86 defines its own arch_release_task_struct() that releases
a task_xstate. If alloc_thread_info_node() fails in dup_task(),
arch_release_task_struct() is called with task_struct which is just
allocated and filled with garbage in this error handling.
This actually happened with tools/testing/fault-injection/failcmd.sh
# env FAILCMD_TYPE=fail_page_alloc \
./tools/testing/fault-injection/failcmd.sh --times=100 \
--min-order=0 --ignore-gfp-wait=0 \
-- make -C tools/testing/selftests/ run_tests
In order to fix this issue, make free_{task_struct,thread_info}() not to
call arch_release_{task_struct,thread_info}() and call
arch_release_{task_struct,thread_info}() implicitly where needed.
Default arch_release_task_struct() and arch_release_thread_info() are
defined as empty by default. So this change only affects the
architectures which implement their own arch_release_task_struct() or
arch_release_thread_info() as listed below.
arch_release_task_struct(): x86, sh
arch_release_thread_info(): mn10300, tile
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Salman Qazi <sqazi@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-07-30 21:42:33 +00:00
|
|
|
void __weak arch_release_thread_info(struct thread_info *ti)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-05-05 15:05:48 +00:00
|
|
|
#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
|
2012-05-05 15:05:41 +00:00
|
|
|
|
2012-05-05 15:05:41 +00:00
|
|
|
/*
|
|
|
|
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
|
|
|
|
* kmemcache based allocator.
|
|
|
|
*/
|
|
|
|
# if THREAD_SIZE >= PAGE_SIZE
|
2011-03-22 23:30:42 +00:00
|
|
|
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
|
|
|
|
int node)
|
2008-07-25 08:45:40 +00:00
|
|
|
{
|
2014-06-04 23:06:39 +00:00
|
|
|
struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
|
|
|
|
THREAD_SIZE_ORDER);
|
2011-03-22 23:30:42 +00:00
|
|
|
|
|
|
|
return page ? page_address(page) : NULL;
|
2008-07-25 08:45:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_thread_info(struct thread_info *ti)
|
|
|
|
{
|
2014-06-04 23:06:39 +00:00
|
|
|
free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
|
2008-07-25 08:45:40 +00:00
|
|
|
}
|
2012-05-05 15:05:41 +00:00
|
|
|
# else
|
|
|
|
static struct kmem_cache *thread_info_cache;
|
|
|
|
|
|
|
|
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
|
|
|
|
int node)
|
|
|
|
{
|
|
|
|
return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_thread_info(struct thread_info *ti)
|
|
|
|
{
|
|
|
|
kmem_cache_free(thread_info_cache, ti);
|
|
|
|
}
|
|
|
|
|
|
|
|
void thread_info_cache_init(void)
|
|
|
|
{
|
|
|
|
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
|
|
|
|
THREAD_SIZE, 0, NULL);
|
|
|
|
BUG_ON(thread_info_cache == NULL);
|
|
|
|
}
|
|
|
|
# endif
|
2008-07-25 08:45:40 +00:00
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* SLAB cache for signal_struct structures (tsk->signal) */
|
2006-12-07 04:33:20 +00:00
|
|
|
static struct kmem_cache *signal_cachep;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* SLAB cache for sighand_struct structures (tsk->sighand) */
|
2006-12-07 04:33:20 +00:00
|
|
|
struct kmem_cache *sighand_cachep;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* SLAB cache for files_struct structures (tsk->files) */
|
2006-12-07 04:33:20 +00:00
|
|
|
struct kmem_cache *files_cachep;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* SLAB cache for fs_struct structures (tsk->fs) */
|
2006-12-07 04:33:20 +00:00
|
|
|
struct kmem_cache *fs_cachep;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* SLAB cache for vm_area_struct structures */
|
2006-12-07 04:33:20 +00:00
|
|
|
struct kmem_cache *vm_area_cachep;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* SLAB cache for mm_struct structures (tsk->mm) */
|
2006-12-07 04:33:20 +00:00
|
|
|
static struct kmem_cache *mm_cachep;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-09-22 00:01:32 +00:00
|
|
|
static void account_kernel_stack(struct thread_info *ti, int account)
|
|
|
|
{
|
|
|
|
struct zone *zone = page_zone(virt_to_page(ti));
|
|
|
|
|
|
|
|
mod_zone_page_state(zone, NR_KERNEL_STACK, account);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
void free_task(struct task_struct *tsk)
|
|
|
|
{
|
2009-09-22 00:01:32 +00:00
|
|
|
account_kernel_stack(tsk->stack, -1);
|
fork: fix error handling in dup_task()
The function dup_task() may fail at the following function calls in the
following order.
0) alloc_task_struct_node()
1) alloc_thread_info_node()
2) arch_dup_task_struct()
Error by 0) is not a matter, it can just return. But error by 1) requires
releasing task_struct allocated by 0) before it returns. Likewise, error
by 2) requires releasing task_struct and thread_info allocated by 0) and
1).
The existing error handling calls free_task_struct() and
free_thread_info() which do not only release task_struct and thread_info,
but also call architecture specific arch_release_task_struct() and
arch_release_thread_info().
The problem is that task_struct and thread_info are not fully initialized
yet at this point, but arch_release_task_struct() and
arch_release_thread_info() are called with them.
For example, x86 defines its own arch_release_task_struct() that releases
a task_xstate. If alloc_thread_info_node() fails in dup_task(),
arch_release_task_struct() is called with task_struct which is just
allocated and filled with garbage in this error handling.
This actually happened with tools/testing/fault-injection/failcmd.sh
# env FAILCMD_TYPE=fail_page_alloc \
./tools/testing/fault-injection/failcmd.sh --times=100 \
--min-order=0 --ignore-gfp-wait=0 \
-- make -C tools/testing/selftests/ run_tests
In order to fix this issue, make free_{task_struct,thread_info}() not to
call arch_release_{task_struct,thread_info}() and call
arch_release_{task_struct,thread_info}() implicitly where needed.
Default arch_release_task_struct() and arch_release_thread_info() are
defined as empty by default. So this change only affects the
architectures which implement their own arch_release_task_struct() or
arch_release_thread_info() as listed below.
arch_release_task_struct(): x86, sh
arch_release_thread_info(): mn10300, tile
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Salman Qazi <sqazi@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-07-30 21:42:33 +00:00
|
|
|
arch_release_thread_info(tsk->stack);
|
2007-05-09 09:35:17 +00:00
|
|
|
free_thread_info(tsk->stack);
|
2006-06-27 09:54:53 +00:00
|
|
|
rt_mutex_debug_task_free(tsk);
|
2008-11-25 20:07:04 +00:00
|
|
|
ftrace_graph_exit_task(tsk);
|
seccomp: add system call filtering using BPF
[This patch depends on luto@mit.edu's no_new_privs patch:
https://lkml.org/lkml/2012/1/30/264
The whole series including Andrew's patches can be found here:
https://github.com/redpig/linux/tree/seccomp
Complete diff here:
https://github.com/redpig/linux/compare/1dc65fed...seccomp
]
This patch adds support for seccomp mode 2. Mode 2 introduces the
ability for unprivileged processes to install system call filtering
policy expressed in terms of a Berkeley Packet Filter (BPF) program.
This program will be evaluated in the kernel for each system call
the task makes and computes a result based on data in the format
of struct seccomp_data.
A filter program may be installed by calling:
struct sock_fprog fprog = { ... };
...
prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &fprog);
The return value of the filter program determines if the system call is
allowed to proceed or denied. If the first filter program installed
allows prctl(2) calls, then the above call may be made repeatedly
by a task to further reduce its access to the kernel. All attached
programs must be evaluated before a system call will be allowed to
proceed.
Filter programs will be inherited across fork/clone and execve.
However, if the task attaching the filter is unprivileged
(!CAP_SYS_ADMIN) the no_new_privs bit will be set on the task. This
ensures that unprivileged tasks cannot attach filters that affect
privileged tasks (e.g., setuid binary).
There are a number of benefits to this approach. A few of which are
as follows:
- BPF has been exposed to userland for a long time
- BPF optimization (and JIT'ing) are well understood
- Userland already knows its ABI: system call numbers and desired
arguments
- No time-of-check-time-of-use vulnerable data accesses are possible.
- system call arguments are loaded on access only to minimize copying
required for system call policy decisions.
Mode 2 support is restricted to architectures that enable
HAVE_ARCH_SECCOMP_FILTER. In this patch, the primary dependency is on
syscall_get_arguments(). The full desired scope of this feature will
add a few minor additional requirements expressed later in this series.
Based on discussion, SECCOMP_RET_ERRNO and SECCOMP_RET_TRACE seem to be
the desired additional functionality.
No architectures are enabled in this patch.
Signed-off-by: Will Drewry <wad@chromium.org>
Acked-by: Serge Hallyn <serge.hallyn@canonical.com>
Reviewed-by: Indan Zupancic <indan@nul.nu>
Acked-by: Eric Paris <eparis@redhat.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
v18: - rebase to v3.4-rc2
- s/chk/check/ (akpm@linux-foundation.org,jmorris@namei.org)
- allocate with GFP_KERNEL|__GFP_NOWARN (indan@nul.nu)
- add a comment for get_u32 regarding endianness (akpm@)
- fix other typos, style mistakes (akpm@)
- added acked-by
v17: - properly guard seccomp filter needed headers (leann@ubuntu.com)
- tighten return mask to 0x7fff0000
v16: - no change
v15: - add a 4 instr penalty when counting a path to account for seccomp_filter
size (indan@nul.nu)
- drop the max insns to 256KB (indan@nul.nu)
- return ENOMEM if the max insns limit has been hit (indan@nul.nu)
- move IP checks after args (indan@nul.nu)
- drop !user_filter check (indan@nul.nu)
- only allow explicit bpf codes (indan@nul.nu)
- exit_code -> exit_sig
v14: - put/get_seccomp_filter takes struct task_struct
(indan@nul.nu,keescook@chromium.org)
- adds seccomp_chk_filter and drops general bpf_run/chk_filter user
- add seccomp_bpf_load for use by net/core/filter.c
- lower max per-process/per-hierarchy: 1MB
- moved nnp/capability check prior to allocation
(all of the above: indan@nul.nu)
v13: - rebase on to 88ebdda6159ffc15699f204c33feb3e431bf9bdc
v12: - added a maximum instruction count per path (indan@nul.nu,oleg@redhat.com)
- removed copy_seccomp (keescook@chromium.org,indan@nul.nu)
- reworded the prctl_set_seccomp comment (indan@nul.nu)
v11: - reorder struct seccomp_data to allow future args expansion (hpa@zytor.com)
- style clean up, @compat dropped, compat_sock_fprog32 (indan@nul.nu)
- do_exit(SIGSYS) (keescook@chromium.org, luto@mit.edu)
- pare down Kconfig doc reference.
- extra comment clean up
v10: - seccomp_data has changed again to be more aesthetically pleasing
(hpa@zytor.com)
- calling convention is noted in a new u32 field using syscall_get_arch.
This allows for cross-calling convention tasks to use seccomp filters.
(hpa@zytor.com)
- lots of clean up (thanks, Indan!)
v9: - n/a
v8: - use bpf_chk_filter, bpf_run_filter. update load_fns
- Lots of fixes courtesy of indan@nul.nu:
-- fix up load behavior, compat fixups, and merge alloc code,
-- renamed pc and dropped __packed, use bool compat.
-- Added a hidden CONFIG_SECCOMP_FILTER to synthesize non-arch
dependencies
v7: (massive overhaul thanks to Indan, others)
- added CONFIG_HAVE_ARCH_SECCOMP_FILTER
- merged into seccomp.c
- minimal seccomp_filter.h
- no config option (part of seccomp)
- no new prctl
- doesn't break seccomp on systems without asm/syscall.h
(works but arg access always fails)
- dropped seccomp_init_task, extra free functions, ...
- dropped the no-asm/syscall.h code paths
- merges with network sk_run_filter and sk_chk_filter
v6: - fix memory leak on attach compat check failure
- require no_new_privs || CAP_SYS_ADMIN prior to filter
installation. (luto@mit.edu)
- s/seccomp_struct_/seccomp_/ for macros/functions (amwang@redhat.com)
- cleaned up Kconfig (amwang@redhat.com)
- on block, note if the call was compat (so the # means something)
v5: - uses syscall_get_arguments
(indan@nul.nu,oleg@redhat.com, mcgrathr@chromium.org)
- uses union-based arg storage with hi/lo struct to
handle endianness. Compromises between the two alternate
proposals to minimize extra arg shuffling and account for
endianness assuming userspace uses offsetof().
(mcgrathr@chromium.org, indan@nul.nu)
- update Kconfig description
- add include/seccomp_filter.h and add its installation
- (naive) on-demand syscall argument loading
- drop seccomp_t (eparis@redhat.com)
v4: - adjusted prctl to make room for PR_[SG]ET_NO_NEW_PRIVS
- now uses current->no_new_privs
(luto@mit.edu,torvalds@linux-foundation.com)
- assign names to seccomp modes (rdunlap@xenotime.net)
- fix style issues (rdunlap@xenotime.net)
- reworded Kconfig entry (rdunlap@xenotime.net)
v3: - macros to inline (oleg@redhat.com)
- init_task behavior fixed (oleg@redhat.com)
- drop creator entry and extra NULL check (oleg@redhat.com)
- alloc returns -EINVAL on bad sizing (serge.hallyn@canonical.com)
- adds tentative use of "always_unprivileged" as per
torvalds@linux-foundation.org and luto@mit.edu
v2: - (patch 2 only)
Signed-off-by: James Morris <james.l.morris@oracle.com>
2012-04-12 21:47:57 +00:00
|
|
|
put_seccomp_filter(tsk);
|
fork: fix error handling in dup_task()
The function dup_task() may fail at the following function calls in the
following order.
0) alloc_task_struct_node()
1) alloc_thread_info_node()
2) arch_dup_task_struct()
Error by 0) is not a matter, it can just return. But error by 1) requires
releasing task_struct allocated by 0) before it returns. Likewise, error
by 2) requires releasing task_struct and thread_info allocated by 0) and
1).
The existing error handling calls free_task_struct() and
free_thread_info() which do not only release task_struct and thread_info,
but also call architecture specific arch_release_task_struct() and
arch_release_thread_info().
The problem is that task_struct and thread_info are not fully initialized
yet at this point, but arch_release_task_struct() and
arch_release_thread_info() are called with them.
For example, x86 defines its own arch_release_task_struct() that releases
a task_xstate. If alloc_thread_info_node() fails in dup_task(),
arch_release_task_struct() is called with task_struct which is just
allocated and filled with garbage in this error handling.
This actually happened with tools/testing/fault-injection/failcmd.sh
# env FAILCMD_TYPE=fail_page_alloc \
./tools/testing/fault-injection/failcmd.sh --times=100 \
--min-order=0 --ignore-gfp-wait=0 \
-- make -C tools/testing/selftests/ run_tests
In order to fix this issue, make free_{task_struct,thread_info}() not to
call arch_release_{task_struct,thread_info}() and call
arch_release_{task_struct,thread_info}() implicitly where needed.
Default arch_release_task_struct() and arch_release_thread_info() are
defined as empty by default. So this change only affects the
architectures which implement their own arch_release_task_struct() or
arch_release_thread_info() as listed below.
arch_release_task_struct(): x86, sh
arch_release_thread_info(): mn10300, tile
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Salman Qazi <sqazi@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-07-30 21:42:33 +00:00
|
|
|
arch_release_task_struct(tsk);
|
2005-04-16 22:20:36 +00:00
|
|
|
free_task_struct(tsk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(free_task);
|
|
|
|
|
signals: make task_struct->signal immutable/refcountable
We have a lot of problems with accessing task_struct->signal, it can
"disappear" at any moment. Even current can't use its ->signal safely
after exit_notify(). ->siglock helps, but it is not convenient, not
always possible, and sometimes it makes sense to use task->signal even
after this task has already dead.
This patch adds the reference counter, sigcnt, into signal_struct. This
reference is owned by task_struct and it is dropped in
__put_task_struct(). Perhaps it makes sense to export
get/put_signal_struct() later, but currently I don't see the immediate
reason.
Rename __cleanup_signal() to free_signal_struct() and unexport it. With
the previous changes it does nothing except kmem_cache_free().
Change __exit_signal() to not clear/free ->signal, it will be freed when
the last reference to any thread in the thread group goes away.
Note:
- when the last thead exits signal->tty can point to nowhere, see
the next patch.
- with or without this patch signal_struct->count should go away,
or at least it should be "int nr_threads" for fs/proc. This will
be addressed later.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Alan Cox <alan@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-26 21:43:16 +00:00
|
|
|
static inline void free_signal_struct(struct signal_struct *sig)
|
|
|
|
{
|
2010-05-26 21:43:20 +00:00
|
|
|
taskstats_tgid_free(sig);
|
2011-01-05 10:16:04 +00:00
|
|
|
sched_autogroup_exit(sig);
|
signals: make task_struct->signal immutable/refcountable
We have a lot of problems with accessing task_struct->signal, it can
"disappear" at any moment. Even current can't use its ->signal safely
after exit_notify(). ->siglock helps, but it is not convenient, not
always possible, and sometimes it makes sense to use task->signal even
after this task has already dead.
This patch adds the reference counter, sigcnt, into signal_struct. This
reference is owned by task_struct and it is dropped in
__put_task_struct(). Perhaps it makes sense to export
get/put_signal_struct() later, but currently I don't see the immediate
reason.
Rename __cleanup_signal() to free_signal_struct() and unexport it. With
the previous changes it does nothing except kmem_cache_free().
Change __exit_signal() to not clear/free ->signal, it will be freed when
the last reference to any thread in the thread group goes away.
Note:
- when the last thead exits signal->tty can point to nowhere, see
the next patch.
- with or without this patch signal_struct->count should go away,
or at least it should be "int nr_threads" for fs/proc. This will
be addressed later.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Alan Cox <alan@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-26 21:43:16 +00:00
|
|
|
kmem_cache_free(signal_cachep, sig);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void put_signal_struct(struct signal_struct *sig)
|
|
|
|
{
|
2011-01-05 10:16:04 +00:00
|
|
|
if (atomic_dec_and_test(&sig->sigcnt))
|
signals: make task_struct->signal immutable/refcountable
We have a lot of problems with accessing task_struct->signal, it can
"disappear" at any moment. Even current can't use its ->signal safely
after exit_notify(). ->siglock helps, but it is not convenient, not
always possible, and sometimes it makes sense to use task->signal even
after this task has already dead.
This patch adds the reference counter, sigcnt, into signal_struct. This
reference is owned by task_struct and it is dropped in
__put_task_struct(). Perhaps it makes sense to export
get/put_signal_struct() later, but currently I don't see the immediate
reason.
Rename __cleanup_signal() to free_signal_struct() and unexport it. With
the previous changes it does nothing except kmem_cache_free().
Change __exit_signal() to not clear/free ->signal, it will be freed when
the last reference to any thread in the thread group goes away.
Note:
- when the last thead exits signal->tty can point to nowhere, see
the next patch.
- with or without this patch signal_struct->count should go away,
or at least it should be "int nr_threads" for fs/proc. This will
be addressed later.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Alan Cox <alan@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-26 21:43:16 +00:00
|
|
|
free_signal_struct(sig);
|
|
|
|
}
|
|
|
|
|
2006-03-31 10:31:34 +00:00
|
|
|
void __put_task_struct(struct task_struct *tsk)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-10-19 06:40:38 +00:00
|
|
|
WARN_ON(!tsk->exit_state);
|
2005-04-16 22:20:36 +00:00
|
|
|
WARN_ON(atomic_read(&tsk->usage));
|
|
|
|
WARN_ON(tsk == current);
|
|
|
|
|
2014-02-28 06:23:11 +00:00
|
|
|
task_numa_free(tsk);
|
2011-12-21 20:17:03 +00:00
|
|
|
security_task_free(tsk);
|
2009-09-02 08:13:40 +00:00
|
|
|
exit_creds(tsk);
|
2006-09-01 04:27:38 +00:00
|
|
|
delayacct_tsk_free(tsk);
|
signals: make task_struct->signal immutable/refcountable
We have a lot of problems with accessing task_struct->signal, it can
"disappear" at any moment. Even current can't use its ->signal safely
after exit_notify(). ->siglock helps, but it is not convenient, not
always possible, and sometimes it makes sense to use task->signal even
after this task has already dead.
This patch adds the reference counter, sigcnt, into signal_struct. This
reference is owned by task_struct and it is dropped in
__put_task_struct(). Perhaps it makes sense to export
get/put_signal_struct() later, but currently I don't see the immediate
reason.
Rename __cleanup_signal() to free_signal_struct() and unexport it. With
the previous changes it does nothing except kmem_cache_free().
Change __exit_signal() to not clear/free ->signal, it will be freed when
the last reference to any thread in the thread group goes away.
Note:
- when the last thead exits signal->tty can point to nowhere, see
the next patch.
- with or without this patch signal_struct->count should go away,
or at least it should be "int nr_threads" for fs/proc. This will
be addressed later.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Alan Cox <alan@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-26 21:43:16 +00:00
|
|
|
put_signal_struct(tsk->signal);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!profile_handoff_task(tsk))
|
|
|
|
free_task(tsk);
|
|
|
|
}
|
2011-02-01 14:51:46 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__put_task_struct);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-05 15:05:40 +00:00
|
|
|
void __init __weak arch_task_cache_init(void) { }
|
2008-03-10 22:28:04 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
void __init fork_init(unsigned long mempages)
|
|
|
|
{
|
2012-05-05 15:05:48 +00:00
|
|
|
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifndef ARCH_MIN_TASKALIGN
|
|
|
|
#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
|
|
|
|
#endif
|
|
|
|
/* create a slab on which task_structs can be allocated */
|
|
|
|
task_struct_cachep =
|
|
|
|
kmem_cache_create("task_struct", sizeof(struct task_struct),
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 13:56:17 +00:00
|
|
|
ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
2008-03-10 22:28:04 +00:00
|
|
|
/* do the arch specific task caches init */
|
|
|
|
arch_task_cache_init();
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* The default maximum number of threads is set to a safe
|
|
|
|
* value: the thread structures can take up at most half
|
|
|
|
* of memory.
|
|
|
|
*/
|
|
|
|
max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we need to allow at least 20 threads to boot a system
|
|
|
|
*/
|
2011-07-26 23:08:39 +00:00
|
|
|
if (max_threads < 20)
|
2005-04-16 22:20:36 +00:00
|
|
|
max_threads = 20;
|
|
|
|
|
|
|
|
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
|
|
|
|
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
|
|
|
|
init_task.signal->rlim[RLIMIT_SIGPENDING] =
|
|
|
|
init_task.signal->rlim[RLIMIT_NPROC];
|
|
|
|
}
|
|
|
|
|
2014-04-07 22:39:20 +00:00
|
|
|
int __weak arch_dup_task_struct(struct task_struct *dst,
|
2008-03-10 22:28:04 +00:00
|
|
|
struct task_struct *src)
|
|
|
|
{
|
|
|
|
*dst = *src;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static struct task_struct *dup_task_struct(struct task_struct *orig)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk;
|
|
|
|
struct thread_info *ti;
|
2008-04-22 21:38:23 +00:00
|
|
|
unsigned long *stackend;
|
2011-03-22 23:30:44 +00:00
|
|
|
int node = tsk_fork_get_node(orig);
|
2007-10-17 06:25:50 +00:00
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-03-22 23:30:41 +00:00
|
|
|
tsk = alloc_task_struct_node(node);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!tsk)
|
|
|
|
return NULL;
|
|
|
|
|
2011-03-22 23:30:42 +00:00
|
|
|
ti = alloc_thread_info_node(tsk, node);
|
fork: fix error handling in dup_task()
The function dup_task() may fail at the following function calls in the
following order.
0) alloc_task_struct_node()
1) alloc_thread_info_node()
2) arch_dup_task_struct()
Error by 0) is not a matter, it can just return. But error by 1) requires
releasing task_struct allocated by 0) before it returns. Likewise, error
by 2) requires releasing task_struct and thread_info allocated by 0) and
1).
The existing error handling calls free_task_struct() and
free_thread_info() which do not only release task_struct and thread_info,
but also call architecture specific arch_release_task_struct() and
arch_release_thread_info().
The problem is that task_struct and thread_info are not fully initialized
yet at this point, but arch_release_task_struct() and
arch_release_thread_info() are called with them.
For example, x86 defines its own arch_release_task_struct() that releases
a task_xstate. If alloc_thread_info_node() fails in dup_task(),
arch_release_task_struct() is called with task_struct which is just
allocated and filled with garbage in this error handling.
This actually happened with tools/testing/fault-injection/failcmd.sh
# env FAILCMD_TYPE=fail_page_alloc \
./tools/testing/fault-injection/failcmd.sh --times=100 \
--min-order=0 --ignore-gfp-wait=0 \
-- make -C tools/testing/selftests/ run_tests
In order to fix this issue, make free_{task_struct,thread_info}() not to
call arch_release_{task_struct,thread_info}() and call
arch_release_{task_struct,thread_info}() implicitly where needed.
Default arch_release_task_struct() and arch_release_thread_info() are
defined as empty by default. So this change only affects the
architectures which implement their own arch_release_task_struct() or
arch_release_thread_info() as listed below.
arch_release_task_struct(): x86, sh
arch_release_thread_info(): mn10300, tile
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Salman Qazi <sqazi@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-07-30 21:42:33 +00:00
|
|
|
if (!ti)
|
|
|
|
goto free_tsk;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-26 23:08:39 +00:00
|
|
|
err = arch_dup_task_struct(tsk, orig);
|
2012-06-26 01:18:15 +00:00
|
|
|
if (err)
|
fork: fix error handling in dup_task()
The function dup_task() may fail at the following function calls in the
following order.
0) alloc_task_struct_node()
1) alloc_thread_info_node()
2) arch_dup_task_struct()
Error by 0) is not a matter, it can just return. But error by 1) requires
releasing task_struct allocated by 0) before it returns. Likewise, error
by 2) requires releasing task_struct and thread_info allocated by 0) and
1).
The existing error handling calls free_task_struct() and
free_thread_info() which do not only release task_struct and thread_info,
but also call architecture specific arch_release_task_struct() and
arch_release_thread_info().
The problem is that task_struct and thread_info are not fully initialized
yet at this point, but arch_release_task_struct() and
arch_release_thread_info() are called with them.
For example, x86 defines its own arch_release_task_struct() that releases
a task_xstate. If alloc_thread_info_node() fails in dup_task(),
arch_release_task_struct() is called with task_struct which is just
allocated and filled with garbage in this error handling.
This actually happened with tools/testing/fault-injection/failcmd.sh
# env FAILCMD_TYPE=fail_page_alloc \
./tools/testing/fault-injection/failcmd.sh --times=100 \
--min-order=0 --ignore-gfp-wait=0 \
-- make -C tools/testing/selftests/ run_tests
In order to fix this issue, make free_{task_struct,thread_info}() not to
call arch_release_{task_struct,thread_info}() and call
arch_release_{task_struct,thread_info}() implicitly where needed.
Default arch_release_task_struct() and arch_release_thread_info() are
defined as empty by default. So this change only affects the
architectures which implement their own arch_release_task_struct() or
arch_release_thread_info() as listed below.
arch_release_task_struct(): x86, sh
arch_release_thread_info(): mn10300, tile
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Salman Qazi <sqazi@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-07-30 21:42:33 +00:00
|
|
|
goto free_ti;
|
2012-06-26 01:18:15 +00:00
|
|
|
|
2012-07-30 21:42:31 +00:00
|
|
|
tsk->stack = ti;
|
2014-06-27 22:18:48 +00:00
|
|
|
#ifdef CONFIG_SECCOMP
|
|
|
|
/*
|
|
|
|
* We must handle setting up seccomp filters once we're under
|
|
|
|
* the sighand lock in case orig has changed between now and
|
|
|
|
* then. Until then, filter must be NULL to avoid messing up
|
|
|
|
* the usage counts on the error path calling free_task.
|
|
|
|
*/
|
|
|
|
tsk->seccomp.filter = NULL;
|
|
|
|
#endif
|
2012-07-30 21:42:31 +00:00
|
|
|
|
|
|
|
setup_thread_stack(tsk, orig);
|
2009-11-29 14:34:48 +00:00
|
|
|
clear_user_return_notifier(tsk);
|
2010-12-08 10:05:42 +00:00
|
|
|
clear_tsk_need_resched(tsk);
|
2008-04-22 21:38:23 +00:00
|
|
|
stackend = end_of_stack(tsk);
|
|
|
|
*stackend = STACK_END_MAGIC; /* for overflow detection */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-09-26 08:52:38 +00:00
|
|
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
|
|
|
tsk->stack_canary = get_random_int();
|
|
|
|
#endif
|
|
|
|
|
2011-07-26 23:08:39 +00:00
|
|
|
/*
|
|
|
|
* One for us, one for whoever does the "release_task()" (usually
|
|
|
|
* parent)
|
|
|
|
*/
|
|
|
|
atomic_set(&tsk->usage, 2);
|
2006-09-29 08:59:40 +00:00
|
|
|
#ifdef CONFIG_BLK_DEV_IO_TRACE
|
2006-03-23 19:00:26 +00:00
|
|
|
tsk->btrace_seq = 0;
|
2006-09-29 08:59:40 +00:00
|
|
|
#endif
|
2006-04-20 11:05:33 +00:00
|
|
|
tsk->splice_pipe = NULL;
|
net: use a per task frag allocator
We currently use a per socket order-0 page cache for tcp_sendmsg()
operations.
This page is used to build fragments for skbs.
Its done to increase probability of coalescing small write() into
single segments in skbs still in write queue (not yet sent)
But it wastes a lot of memory for applications handling many mostly
idle sockets, since each socket holds one page in sk->sk_sndmsg_page
Its also quite inefficient to build TSO 64KB packets, because we need
about 16 pages per skb on arches where PAGE_SIZE = 4096, so we hit
page allocator more than wanted.
This patch adds a per task frag allocator and uses bigger pages,
if available. An automatic fallback is done in case of memory pressure.
(up to 32768 bytes per frag, thats order-3 pages on x86)
This increases TCP stream performance by 20% on loopback device,
but also benefits on other network devices, since 8x less frags are
mapped on transmit and unmapped on tx completion. Alexander Duyck
mentioned a probable performance win on systems with IOMMU enabled.
Its possible some SG enabled hardware cant cope with bigger fragments,
but their ndo_start_xmit() should already handle this, splitting a
fragment in sub fragments, since some arches have PAGE_SIZE=65536
Successfully tested on various ethernet devices.
(ixgbe, igb, bnx2x, tg3, mellanox mlx4)
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Ben Hutchings <bhutchings@solarflare.com>
Cc: Vijay Subramanian <subramanian.vijay@gmail.com>
Cc: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Vijay Subramanian <subramanian.vijay@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-09-23 23:04:42 +00:00
|
|
|
tsk->task_frag.page = NULL;
|
2009-09-22 00:01:32 +00:00
|
|
|
|
|
|
|
account_kernel_stack(ti, 1);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return tsk;
|
2008-03-10 22:28:04 +00:00
|
|
|
|
fork: fix error handling in dup_task()
The function dup_task() may fail at the following function calls in the
following order.
0) alloc_task_struct_node()
1) alloc_thread_info_node()
2) arch_dup_task_struct()
Error by 0) is not a matter, it can just return. But error by 1) requires
releasing task_struct allocated by 0) before it returns. Likewise, error
by 2) requires releasing task_struct and thread_info allocated by 0) and
1).
The existing error handling calls free_task_struct() and
free_thread_info() which do not only release task_struct and thread_info,
but also call architecture specific arch_release_task_struct() and
arch_release_thread_info().
The problem is that task_struct and thread_info are not fully initialized
yet at this point, but arch_release_task_struct() and
arch_release_thread_info() are called with them.
For example, x86 defines its own arch_release_task_struct() that releases
a task_xstate. If alloc_thread_info_node() fails in dup_task(),
arch_release_task_struct() is called with task_struct which is just
allocated and filled with garbage in this error handling.
This actually happened with tools/testing/fault-injection/failcmd.sh
# env FAILCMD_TYPE=fail_page_alloc \
./tools/testing/fault-injection/failcmd.sh --times=100 \
--min-order=0 --ignore-gfp-wait=0 \
-- make -C tools/testing/selftests/ run_tests
In order to fix this issue, make free_{task_struct,thread_info}() not to
call arch_release_{task_struct,thread_info}() and call
arch_release_{task_struct,thread_info}() implicitly where needed.
Default arch_release_task_struct() and arch_release_thread_info() are
defined as empty by default. So this change only affects the
architectures which implement their own arch_release_task_struct() or
arch_release_thread_info() as listed below.
arch_release_task_struct(): x86, sh
arch_release_thread_info(): mn10300, tile
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Salman Qazi <sqazi@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-07-30 21:42:33 +00:00
|
|
|
free_ti:
|
2008-03-10 22:28:04 +00:00
|
|
|
free_thread_info(ti);
|
fork: fix error handling in dup_task()
The function dup_task() may fail at the following function calls in the
following order.
0) alloc_task_struct_node()
1) alloc_thread_info_node()
2) arch_dup_task_struct()
Error by 0) is not a matter, it can just return. But error by 1) requires
releasing task_struct allocated by 0) before it returns. Likewise, error
by 2) requires releasing task_struct and thread_info allocated by 0) and
1).
The existing error handling calls free_task_struct() and
free_thread_info() which do not only release task_struct and thread_info,
but also call architecture specific arch_release_task_struct() and
arch_release_thread_info().
The problem is that task_struct and thread_info are not fully initialized
yet at this point, but arch_release_task_struct() and
arch_release_thread_info() are called with them.
For example, x86 defines its own arch_release_task_struct() that releases
a task_xstate. If alloc_thread_info_node() fails in dup_task(),
arch_release_task_struct() is called with task_struct which is just
allocated and filled with garbage in this error handling.
This actually happened with tools/testing/fault-injection/failcmd.sh
# env FAILCMD_TYPE=fail_page_alloc \
./tools/testing/fault-injection/failcmd.sh --times=100 \
--min-order=0 --ignore-gfp-wait=0 \
-- make -C tools/testing/selftests/ run_tests
In order to fix this issue, make free_{task_struct,thread_info}() not to
call arch_release_{task_struct,thread_info}() and call
arch_release_{task_struct,thread_info}() implicitly where needed.
Default arch_release_task_struct() and arch_release_thread_info() are
defined as empty by default. So this change only affects the
architectures which implement their own arch_release_task_struct() or
arch_release_thread_info() as listed below.
arch_release_task_struct(): x86, sh
arch_release_thread_info(): mn10300, tile
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Salman Qazi <sqazi@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-07-30 21:42:33 +00:00
|
|
|
free_tsk:
|
2008-03-10 22:28:04 +00:00
|
|
|
free_task_struct(tsk);
|
|
|
|
return NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_MMU
|
2007-10-19 06:41:10 +00:00
|
|
|
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-08-20 23:24:55 +00:00
|
|
|
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct rb_node **rb_link, *rb_parent;
|
|
|
|
int retval;
|
|
|
|
unsigned long charge;
|
|
|
|
|
2012-11-14 18:03:42 +00:00
|
|
|
uprobe_start_dup_mmap();
|
2005-04-16 22:20:36 +00:00
|
|
|
down_write(&oldmm->mmap_sem);
|
2006-12-12 17:14:57 +00:00
|
|
|
flush_cache_dup_mm(oldmm);
|
2012-08-08 15:11:42 +00:00
|
|
|
uprobe_dup_mmap(oldmm, mm);
|
2006-07-03 07:25:15 +00:00
|
|
|
/*
|
|
|
|
* Not linked in yet - no deadlock potential:
|
|
|
|
*/
|
|
|
|
down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
|
2005-10-30 01:16:08 +00:00
|
|
|
|
2014-08-08 21:22:01 +00:00
|
|
|
mm->total_vm = oldmm->total_vm;
|
|
|
|
mm->shared_vm = oldmm->shared_vm;
|
|
|
|
mm->exec_vm = oldmm->exec_vm;
|
|
|
|
mm->stack_vm = oldmm->stack_vm;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
rb_link = &mm->mm_rb.rb_node;
|
|
|
|
rb_parent = NULL;
|
|
|
|
pprev = &mm->mmap;
|
2009-09-22 00:01:57 +00:00
|
|
|
retval = ksm_fork(mm, oldmm);
|
2011-01-13 23:46:58 +00:00
|
|
|
if (retval)
|
|
|
|
goto out;
|
|
|
|
retval = khugepaged_fork(mm, oldmm);
|
2009-09-22 00:01:57 +00:00
|
|
|
if (retval)
|
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-08-20 23:24:55 +00:00
|
|
|
prev = NULL;
|
2005-10-30 01:16:06 +00:00
|
|
|
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
|
2005-04-16 22:20:36 +00:00
|
|
|
struct file *file;
|
|
|
|
|
|
|
|
if (mpnt->vm_flags & VM_DONTCOPY) {
|
2005-10-30 01:15:56 +00:00
|
|
|
vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
|
2012-07-31 23:41:49 +00:00
|
|
|
-vma_pages(mpnt));
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
charge = 0;
|
|
|
|
if (mpnt->vm_flags & VM_ACCOUNT) {
|
2012-07-30 21:42:30 +00:00
|
|
|
unsigned long len = vma_pages(mpnt);
|
|
|
|
|
2012-02-13 03:58:52 +00:00
|
|
|
if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
|
2005-04-16 22:20:36 +00:00
|
|
|
goto fail_nomem;
|
|
|
|
charge = len;
|
|
|
|
}
|
2006-12-07 04:33:17 +00:00
|
|
|
tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!tmp)
|
|
|
|
goto fail_nomem;
|
|
|
|
*tmp = *mpnt;
|
mm: change anon_vma linking to fix multi-process server scalability issue
The old anon_vma code can lead to scalability issues with heavily forking
workloads. Specifically, each anon_vma will be shared between the parent
process and all its child processes.
In a workload with 1000 child processes and a VMA with 1000 anonymous
pages per process that get COWed, this leads to a system with a million
anonymous pages in the same anon_vma, each of which is mapped in just one
of the 1000 processes. However, the current rmap code needs to walk them
all, leading to O(N) scanning complexity for each page.
This can result in systems where one CPU is walking the page tables of
1000 processes in page_referenced_one, while all other CPUs are stuck on
the anon_vma lock. This leads to catastrophic failure for a benchmark
like AIM7, where the total number of processes can reach in the tens of
thousands. Real workloads are still a factor 10 less process intensive
than AIM7, but they are catching up.
This patch changes the way anon_vmas and VMAs are linked, which allows us
to associate multiple anon_vmas with a VMA. At fork time, each child
process gets its own anon_vmas, in which its COWed pages will be
instantiated. The parents' anon_vma is also linked to the VMA, because
non-COWed pages could be present in any of the children.
This reduces rmap scanning complexity to O(1) for the pages of the 1000
child processes, with O(N) complexity for at most 1/N pages in the system.
This reduces the average scanning cost in heavily forking workloads from
O(N) to 2.
The only real complexity in this patch stems from the fact that linking a
VMA to anon_vmas now involves memory allocations. This means vma_adjust
can fail, if it needs to attach a VMA to anon_vma structures. This in
turn means error handling needs to be added to the calling functions.
A second source of complexity is that, because there can be multiple
anon_vmas, the anon_vma linking in vma_adjust can no longer be done under
"the" anon_vma lock. To prevent the rmap code from walking up an
incomplete VMA, this patch introduces the VM_LOCK_RMAP VMA flag. This bit
flag uses the same slot as the NOMMU VM_MAPPED_COPY, with an ifdef in mm.h
to make sure it is impossible to compile a kernel that needs both symbolic
values for the same bitflag.
Some test results:
Without the anon_vma changes, when AIM7 hits around 9.7k users (on a test
box with 16GB RAM and not quite enough IO), the system ends up running
>99% in system time, with every CPU on the same anon_vma lock in the
pageout code.
With these changes, AIM7 hits the cross-over point around 29.7k users.
This happens with ~99% IO wait time, there never seems to be any spike in
system time. The anon_vma lock contention appears to be resolved.
[akpm@linux-foundation.org: cleanups]
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 21:42:07 +00:00
|
|
|
INIT_LIST_HEAD(&tmp->anon_vma_chain);
|
2013-09-11 21:20:14 +00:00
|
|
|
retval = vma_dup_policy(mpnt, tmp);
|
|
|
|
if (retval)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto fail_nomem_policy;
|
2010-09-22 20:05:12 +00:00
|
|
|
tmp->vm_mm = mm;
|
mm: change anon_vma linking to fix multi-process server scalability issue
The old anon_vma code can lead to scalability issues with heavily forking
workloads. Specifically, each anon_vma will be shared between the parent
process and all its child processes.
In a workload with 1000 child processes and a VMA with 1000 anonymous
pages per process that get COWed, this leads to a system with a million
anonymous pages in the same anon_vma, each of which is mapped in just one
of the 1000 processes. However, the current rmap code needs to walk them
all, leading to O(N) scanning complexity for each page.
This can result in systems where one CPU is walking the page tables of
1000 processes in page_referenced_one, while all other CPUs are stuck on
the anon_vma lock. This leads to catastrophic failure for a benchmark
like AIM7, where the total number of processes can reach in the tens of
thousands. Real workloads are still a factor 10 less process intensive
than AIM7, but they are catching up.
This patch changes the way anon_vmas and VMAs are linked, which allows us
to associate multiple anon_vmas with a VMA. At fork time, each child
process gets its own anon_vmas, in which its COWed pages will be
instantiated. The parents' anon_vma is also linked to the VMA, because
non-COWed pages could be present in any of the children.
This reduces rmap scanning complexity to O(1) for the pages of the 1000
child processes, with O(N) complexity for at most 1/N pages in the system.
This reduces the average scanning cost in heavily forking workloads from
O(N) to 2.
The only real complexity in this patch stems from the fact that linking a
VMA to anon_vmas now involves memory allocations. This means vma_adjust
can fail, if it needs to attach a VMA to anon_vma structures. This in
turn means error handling needs to be added to the calling functions.
A second source of complexity is that, because there can be multiple
anon_vmas, the anon_vma linking in vma_adjust can no longer be done under
"the" anon_vma lock. To prevent the rmap code from walking up an
incomplete VMA, this patch introduces the VM_LOCK_RMAP VMA flag. This bit
flag uses the same slot as the NOMMU VM_MAPPED_COPY, with an ifdef in mm.h
to make sure it is impossible to compile a kernel that needs both symbolic
values for the same bitflag.
Some test results:
Without the anon_vma changes, when AIM7 hits around 9.7k users (on a test
box with 16GB RAM and not quite enough IO), the system ends up running
>99% in system time, with every CPU on the same anon_vma lock in the
pageout code.
With these changes, AIM7 hits the cross-over point around 29.7k users.
This happens with ~99% IO wait time, there never seems to be any spike in
system time. The anon_vma lock contention appears to be resolved.
[akpm@linux-foundation.org: cleanups]
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 21:42:07 +00:00
|
|
|
if (anon_vma_fork(tmp, mpnt))
|
|
|
|
goto fail_nomem_anon_vma_fork;
|
2005-04-16 22:20:36 +00:00
|
|
|
tmp->vm_flags &= ~VM_LOCKED;
|
2010-08-20 23:24:55 +00:00
|
|
|
tmp->vm_next = tmp->vm_prev = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
file = tmp->vm_file;
|
|
|
|
if (file) {
|
2013-01-23 22:07:38 +00:00
|
|
|
struct inode *inode = file_inode(file);
|
fix mapping_writably_mapped()
Lee Schermerhorn noticed yesterday that I broke the mapping_writably_mapped
test in 2.6.7! Bad bad bug, good good find.
The i_mmap_writable count must be incremented for VM_SHARED (just as
i_writecount is for VM_DENYWRITE, but while holding the i_mmap_lock)
when dup_mmap() copies the vma for fork: it has its own more optimal
version of __vma_link_file(), and I missed this out. So the count
was later going down to 0 (dangerous) when one end unmapped, then
wrapping negative (inefficient) when the other end unmapped.
The only impact on x86 would have been that setting a mandatory lock on
a file which has at some time been opened O_RDWR and mapped MAP_SHARED
(but not necessarily PROT_WRITE) across a fork, might fail with -EAGAIN
when it should succeed, or succeed when it should fail.
But those architectures which rely on flush_dcache_page() to flush
userspace modifications back into the page before the kernel reads it,
may in some cases have skipped the flush after such a fork - though any
repetitive test will soon wrap the count negative, in which case it will
flush_dcache_page() unnecessarily.
Fix would be a two-liner, but mapping variable added, and comment moved.
Reported-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-12-10 20:48:52 +00:00
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
get_file(file);
|
|
|
|
if (tmp->vm_flags & VM_DENYWRITE)
|
|
|
|
atomic_dec(&inode->i_writecount);
|
2011-05-25 00:12:06 +00:00
|
|
|
mutex_lock(&mapping->i_mmap_mutex);
|
fix mapping_writably_mapped()
Lee Schermerhorn noticed yesterday that I broke the mapping_writably_mapped
test in 2.6.7! Bad bad bug, good good find.
The i_mmap_writable count must be incremented for VM_SHARED (just as
i_writecount is for VM_DENYWRITE, but while holding the i_mmap_lock)
when dup_mmap() copies the vma for fork: it has its own more optimal
version of __vma_link_file(), and I missed this out. So the count
was later going down to 0 (dangerous) when one end unmapped, then
wrapping negative (inefficient) when the other end unmapped.
The only impact on x86 would have been that setting a mandatory lock on
a file which has at some time been opened O_RDWR and mapped MAP_SHARED
(but not necessarily PROT_WRITE) across a fork, might fail with -EAGAIN
when it should succeed, or succeed when it should fail.
But those architectures which rely on flush_dcache_page() to flush
userspace modifications back into the page before the kernel reads it,
may in some cases have skipped the flush after such a fork - though any
repetitive test will soon wrap the count negative, in which case it will
flush_dcache_page() unnecessarily.
Fix would be a two-liner, but mapping variable added, and comment moved.
Reported-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-12-10 20:48:52 +00:00
|
|
|
if (tmp->vm_flags & VM_SHARED)
|
2014-08-08 21:25:25 +00:00
|
|
|
atomic_inc(&mapping->i_mmap_writable);
|
fix mapping_writably_mapped()
Lee Schermerhorn noticed yesterday that I broke the mapping_writably_mapped
test in 2.6.7! Bad bad bug, good good find.
The i_mmap_writable count must be incremented for VM_SHARED (just as
i_writecount is for VM_DENYWRITE, but while holding the i_mmap_lock)
when dup_mmap() copies the vma for fork: it has its own more optimal
version of __vma_link_file(), and I missed this out. So the count
was later going down to 0 (dangerous) when one end unmapped, then
wrapping negative (inefficient) when the other end unmapped.
The only impact on x86 would have been that setting a mandatory lock on
a file which has at some time been opened O_RDWR and mapped MAP_SHARED
(but not necessarily PROT_WRITE) across a fork, might fail with -EAGAIN
when it should succeed, or succeed when it should fail.
But those architectures which rely on flush_dcache_page() to flush
userspace modifications back into the page before the kernel reads it,
may in some cases have skipped the flush after such a fork - though any
repetitive test will soon wrap the count negative, in which case it will
flush_dcache_page() unnecessarily.
Fix would be a two-liner, but mapping variable added, and comment moved.
Reported-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-12-10 20:48:52 +00:00
|
|
|
flush_dcache_mmap_lock(mapping);
|
|
|
|
/* insert tmp into the share list, just after mpnt */
|
2012-10-08 23:31:35 +00:00
|
|
|
if (unlikely(tmp->vm_flags & VM_NONLINEAR))
|
|
|
|
vma_nonlinear_insert(tmp,
|
|
|
|
&mapping->i_mmap_nonlinear);
|
|
|
|
else
|
|
|
|
vma_interval_tree_insert_after(tmp, mpnt,
|
|
|
|
&mapping->i_mmap);
|
fix mapping_writably_mapped()
Lee Schermerhorn noticed yesterday that I broke the mapping_writably_mapped
test in 2.6.7! Bad bad bug, good good find.
The i_mmap_writable count must be incremented for VM_SHARED (just as
i_writecount is for VM_DENYWRITE, but while holding the i_mmap_lock)
when dup_mmap() copies the vma for fork: it has its own more optimal
version of __vma_link_file(), and I missed this out. So the count
was later going down to 0 (dangerous) when one end unmapped, then
wrapping negative (inefficient) when the other end unmapped.
The only impact on x86 would have been that setting a mandatory lock on
a file which has at some time been opened O_RDWR and mapped MAP_SHARED
(but not necessarily PROT_WRITE) across a fork, might fail with -EAGAIN
when it should succeed, or succeed when it should fail.
But those architectures which rely on flush_dcache_page() to flush
userspace modifications back into the page before the kernel reads it,
may in some cases have skipped the flush after such a fork - though any
repetitive test will soon wrap the count negative, in which case it will
flush_dcache_page() unnecessarily.
Fix would be a two-liner, but mapping variable added, and comment moved.
Reported-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-12-10 20:48:52 +00:00
|
|
|
flush_dcache_mmap_unlock(mapping);
|
2011-05-25 00:12:06 +00:00
|
|
|
mutex_unlock(&mapping->i_mmap_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-07-24 04:27:23 +00:00
|
|
|
/*
|
|
|
|
* Clear hugetlb-related page reserves for children. This only
|
|
|
|
* affects MAP_PRIVATE mappings. Faults generated by the child
|
|
|
|
* are not guaranteed to succeed, even if read-only
|
|
|
|
*/
|
|
|
|
if (is_vm_hugetlb_page(tmp))
|
|
|
|
reset_vma_resv_huge_pages(tmp);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-10-30 01:16:08 +00:00
|
|
|
* Link in the new vma and copy the page table entries.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
*pprev = tmp;
|
|
|
|
pprev = &tmp->vm_next;
|
2010-08-20 23:24:55 +00:00
|
|
|
tmp->vm_prev = prev;
|
|
|
|
prev = tmp;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
__vma_link_rb(mm, tmp, rb_link, rb_parent);
|
|
|
|
rb_link = &tmp->vm_rb.rb_right;
|
|
|
|
rb_parent = &tmp->vm_rb;
|
|
|
|
|
|
|
|
mm->map_count++;
|
2005-11-22 05:32:20 +00:00
|
|
|
retval = copy_page_range(mm, oldmm, mpnt);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (tmp->vm_ops && tmp->vm_ops->open)
|
|
|
|
tmp->vm_ops->open(tmp);
|
|
|
|
|
|
|
|
if (retval)
|
|
|
|
goto out;
|
|
|
|
}
|
2007-05-02 17:27:14 +00:00
|
|
|
/* a new mm has just been created */
|
|
|
|
arch_dup_mmap(oldmm, mm);
|
2005-04-16 22:20:36 +00:00
|
|
|
retval = 0;
|
|
|
|
out:
|
2005-10-30 01:16:08 +00:00
|
|
|
up_write(&mm->mmap_sem);
|
2005-10-30 01:16:06 +00:00
|
|
|
flush_tlb_mm(oldmm);
|
2005-04-16 22:20:36 +00:00
|
|
|
up_write(&oldmm->mmap_sem);
|
2012-11-14 18:03:42 +00:00
|
|
|
uprobe_end_dup_mmap();
|
2005-04-16 22:20:36 +00:00
|
|
|
return retval;
|
mm: change anon_vma linking to fix multi-process server scalability issue
The old anon_vma code can lead to scalability issues with heavily forking
workloads. Specifically, each anon_vma will be shared between the parent
process and all its child processes.
In a workload with 1000 child processes and a VMA with 1000 anonymous
pages per process that get COWed, this leads to a system with a million
anonymous pages in the same anon_vma, each of which is mapped in just one
of the 1000 processes. However, the current rmap code needs to walk them
all, leading to O(N) scanning complexity for each page.
This can result in systems where one CPU is walking the page tables of
1000 processes in page_referenced_one, while all other CPUs are stuck on
the anon_vma lock. This leads to catastrophic failure for a benchmark
like AIM7, where the total number of processes can reach in the tens of
thousands. Real workloads are still a factor 10 less process intensive
than AIM7, but they are catching up.
This patch changes the way anon_vmas and VMAs are linked, which allows us
to associate multiple anon_vmas with a VMA. At fork time, each child
process gets its own anon_vmas, in which its COWed pages will be
instantiated. The parents' anon_vma is also linked to the VMA, because
non-COWed pages could be present in any of the children.
This reduces rmap scanning complexity to O(1) for the pages of the 1000
child processes, with O(N) complexity for at most 1/N pages in the system.
This reduces the average scanning cost in heavily forking workloads from
O(N) to 2.
The only real complexity in this patch stems from the fact that linking a
VMA to anon_vmas now involves memory allocations. This means vma_adjust
can fail, if it needs to attach a VMA to anon_vma structures. This in
turn means error handling needs to be added to the calling functions.
A second source of complexity is that, because there can be multiple
anon_vmas, the anon_vma linking in vma_adjust can no longer be done under
"the" anon_vma lock. To prevent the rmap code from walking up an
incomplete VMA, this patch introduces the VM_LOCK_RMAP VMA flag. This bit
flag uses the same slot as the NOMMU VM_MAPPED_COPY, with an ifdef in mm.h
to make sure it is impossible to compile a kernel that needs both symbolic
values for the same bitflag.
Some test results:
Without the anon_vma changes, when AIM7 hits around 9.7k users (on a test
box with 16GB RAM and not quite enough IO), the system ends up running
>99% in system time, with every CPU on the same anon_vma lock in the
pageout code.
With these changes, AIM7 hits the cross-over point around 29.7k users.
This happens with ~99% IO wait time, there never seems to be any spike in
system time. The anon_vma lock contention appears to be resolved.
[akpm@linux-foundation.org: cleanups]
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-05 21:42:07 +00:00
|
|
|
fail_nomem_anon_vma_fork:
|
2013-09-11 21:20:14 +00:00
|
|
|
mpol_put(vma_policy(tmp));
|
2005-04-16 22:20:36 +00:00
|
|
|
fail_nomem_policy:
|
|
|
|
kmem_cache_free(vm_area_cachep, tmp);
|
|
|
|
fail_nomem:
|
|
|
|
retval = -ENOMEM;
|
|
|
|
vm_unacct_memory(charge);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2011-07-26 23:08:39 +00:00
|
|
|
static inline int mm_alloc_pgd(struct mm_struct *mm)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
mm->pgd = pgd_alloc(mm);
|
|
|
|
if (unlikely(!mm->pgd))
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-26 23:08:39 +00:00
|
|
|
static inline void mm_free_pgd(struct mm_struct *mm)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-02-05 06:29:14 +00:00
|
|
|
pgd_free(mm, mm->pgd);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define dup_mmap(mm, oldmm) (0)
|
|
|
|
#define mm_alloc_pgd(mm) (0)
|
|
|
|
#define mm_free_pgd(mm)
|
|
|
|
#endif /* CONFIG_MMU */
|
|
|
|
|
2007-10-18 10:06:07 +00:00
|
|
|
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-12-07 04:33:17 +00:00
|
|
|
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
|
2005-04-16 22:20:36 +00:00
|
|
|
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
|
|
|
|
|
2009-01-06 22:42:47 +00:00
|
|
|
static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
|
|
|
|
|
|
|
|
static int __init coredump_filter_setup(char *s)
|
|
|
|
{
|
|
|
|
default_dump_filter =
|
|
|
|
(simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
|
|
|
|
MMF_DUMP_FILTER_MASK;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("coredump_filter=", coredump_filter_setup);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/init_task.h>
|
|
|
|
|
2009-09-23 22:57:32 +00:00
|
|
|
static void mm_init_aio(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_AIO
|
|
|
|
spin_lock_init(&mm->ioctx_lock);
|
aio: convert the ioctx list to table lookup v3
On Wed, Jun 12, 2013 at 11:14:40AM -0700, Kent Overstreet wrote:
> On Mon, Apr 15, 2013 at 02:40:55PM +0300, Octavian Purdila wrote:
> > When using a large number of threads performing AIO operations the
> > IOCTX list may get a significant number of entries which will cause
> > significant overhead. For example, when running this fio script:
> >
> > rw=randrw; size=256k ;directory=/mnt/fio; ioengine=libaio; iodepth=1
> > blocksize=1024; numjobs=512; thread; loops=100
> >
> > on an EXT2 filesystem mounted on top of a ramdisk we can observe up to
> > 30% CPU time spent by lookup_ioctx:
> >
> > 32.51% [guest.kernel] [g] lookup_ioctx
> > 9.19% [guest.kernel] [g] __lock_acquire.isra.28
> > 4.40% [guest.kernel] [g] lock_release
> > 4.19% [guest.kernel] [g] sched_clock_local
> > 3.86% [guest.kernel] [g] local_clock
> > 3.68% [guest.kernel] [g] native_sched_clock
> > 3.08% [guest.kernel] [g] sched_clock_cpu
> > 2.64% [guest.kernel] [g] lock_release_holdtime.part.11
> > 2.60% [guest.kernel] [g] memcpy
> > 2.33% [guest.kernel] [g] lock_acquired
> > 2.25% [guest.kernel] [g] lock_acquire
> > 1.84% [guest.kernel] [g] do_io_submit
> >
> > This patchs converts the ioctx list to a radix tree. For a performance
> > comparison the above FIO script was run on a 2 sockets 8 core
> > machine. This are the results (average and %rsd of 10 runs) for the
> > original list based implementation and for the radix tree based
> > implementation:
> >
> > cores 1 2 4 8 16 32
> > list 109376 ms 69119 ms 35682 ms 22671 ms 19724 ms 16408 ms
> > %rsd 0.69% 1.15% 1.17% 1.21% 1.71% 1.43%
> > radix 73651 ms 41748 ms 23028 ms 16766 ms 15232 ms 13787 ms
> > %rsd 1.19% 0.98% 0.69% 1.13% 0.72% 0.75%
> > % of radix
> > relative 66.12% 65.59% 66.63% 72.31% 77.26% 83.66%
> > to list
> >
> > To consider the impact of the patch on the typical case of having
> > only one ctx per process the following FIO script was run:
> >
> > rw=randrw; size=100m ;directory=/mnt/fio; ioengine=libaio; iodepth=1
> > blocksize=1024; numjobs=1; thread; loops=100
> >
> > on the same system and the results are the following:
> >
> > list 58892 ms
> > %rsd 0.91%
> > radix 59404 ms
> > %rsd 0.81%
> > % of radix
> > relative 100.87%
> > to list
>
> So, I was just doing some benchmarking/profiling to get ready to send
> out the aio patches I've got for 3.11 - and it looks like your patch is
> causing a ~1.5% throughput regression in my testing :/
... <snip>
I've got an alternate approach for fixing this wart in lookup_ioctx()...
Instead of using an rbtree, just use the reserved id in the ring buffer
header to index an array pointing the ioctx. It's not finished yet, and
it needs to be tidied up, but is most of the way there.
-ben
--
"Thought is the essence of where you are now."
--
kmo> And, a rework of Ben's code, but this was entirely his idea
kmo> -Kent
bcrl> And fix the code to use the right mm_struct in kill_ioctx(), actually
free memory.
Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
2013-07-30 16:54:40 +00:00
|
|
|
mm->ioctx_table = NULL;
|
2009-09-23 22:57:32 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-08-08 21:22:03 +00:00
|
|
|
static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_MEMCG
|
|
|
|
mm->owner = p;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-07-26 23:08:39 +00:00
|
|
|
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
fork/exec: cleanup mm initialization
mm initialization on fork/exec is spread all over the place, which makes
the code look inconsistent.
We have mm_init(), which is supposed to init/nullify mm's internals, but
it doesn't init all the fields it should:
- on fork ->mmap,mm_rb,vmacache_seqnum,map_count,mm_cpumask,locked_vm
are zeroed in dup_mmap();
- on fork ->pmd_huge_pte is zeroed in dup_mm(), immediately before
calling mm_init();
- ->cpu_vm_mask_var ptr is initialized by mm_init_cpumask(), which is
called before mm_init() on both fork and exec;
- ->context is initialized by init_new_context(), which is called after
mm_init() on both fork and exec;
Let's consolidate all the initializations in mm_init() to make the code
look cleaner.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-08-08 21:21:56 +00:00
|
|
|
mm->mmap = NULL;
|
|
|
|
mm->mm_rb = RB_ROOT;
|
|
|
|
mm->vmacache_seqnum = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
atomic_set(&mm->mm_users, 1);
|
|
|
|
atomic_set(&mm->mm_count, 1);
|
|
|
|
init_rwsem(&mm->mmap_sem);
|
|
|
|
INIT_LIST_HEAD(&mm->mmlist);
|
2008-07-25 08:47:41 +00:00
|
|
|
mm->core_state = NULL;
|
2013-11-14 22:30:48 +00:00
|
|
|
atomic_long_set(&mm->nr_ptes, 0);
|
fork/exec: cleanup mm initialization
mm initialization on fork/exec is spread all over the place, which makes
the code look inconsistent.
We have mm_init(), which is supposed to init/nullify mm's internals, but
it doesn't init all the fields it should:
- on fork ->mmap,mm_rb,vmacache_seqnum,map_count,mm_cpumask,locked_vm
are zeroed in dup_mmap();
- on fork ->pmd_huge_pte is zeroed in dup_mm(), immediately before
calling mm_init();
- ->cpu_vm_mask_var ptr is initialized by mm_init_cpumask(), which is
called before mm_init() on both fork and exec;
- ->context is initialized by init_new_context(), which is called after
mm_init() on both fork and exec;
Let's consolidate all the initializations in mm_init() to make the code
look cleaner.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-08-08 21:21:56 +00:00
|
|
|
mm->map_count = 0;
|
|
|
|
mm->locked_vm = 0;
|
2014-08-08 21:21:58 +00:00
|
|
|
mm->pinned_vm = 0;
|
2010-03-05 21:41:39 +00:00
|
|
|
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_lock_init(&mm->page_table_lock);
|
fork/exec: cleanup mm initialization
mm initialization on fork/exec is spread all over the place, which makes
the code look inconsistent.
We have mm_init(), which is supposed to init/nullify mm's internals, but
it doesn't init all the fields it should:
- on fork ->mmap,mm_rb,vmacache_seqnum,map_count,mm_cpumask,locked_vm
are zeroed in dup_mmap();
- on fork ->pmd_huge_pte is zeroed in dup_mm(), immediately before
calling mm_init();
- ->cpu_vm_mask_var ptr is initialized by mm_init_cpumask(), which is
called before mm_init() on both fork and exec;
- ->context is initialized by init_new_context(), which is called after
mm_init() on both fork and exec;
Let's consolidate all the initializations in mm_init() to make the code
look cleaner.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-08-08 21:21:56 +00:00
|
|
|
mm_init_cpumask(mm);
|
2009-09-23 22:57:32 +00:00
|
|
|
mm_init_aio(mm);
|
cgroups: add an owner to the mm_struct
Remove the mem_cgroup member from mm_struct and instead adds an owner.
This approach was suggested by Paul Menage. The advantage of this approach
is that, once the mm->owner is known, using the subsystem id, the cgroup
can be determined. It also allows several control groups that are
virtually grouped by mm_struct, to exist independent of the memory
controller i.e., without adding mem_cgroup's for each controller, to
mm_struct.
A new config option CONFIG_MM_OWNER is added and the memory resource
controller selects this config option.
This patch also adds cgroup callbacks to notify subsystems when mm->owner
changes. The mm_cgroup_changed callback is called with the task_lock() of
the new task held and is called just prior to changing the mm->owner.
I am indebted to Paul Menage for the several reviews of this patchset and
helping me make it lighter and simpler.
This patch was tested on a powerpc box, it was compiled with both the
MM_OWNER config turned on and off.
After the thread group leader exits, it's moved to init_css_state by
cgroup_exit(), thus all future charges from runnings threads would be
redirected to the init_css_set's subsystem.
Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Sudhir Kumar <skumar@linux.vnet.ibm.com>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: David Rientjes <rientjes@google.com>,
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Reviewed-by: Paul Menage <menage@google.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-29 08:00:16 +00:00
|
|
|
mm_init_owner(mm, p);
|
fork/exec: cleanup mm initialization
mm initialization on fork/exec is spread all over the place, which makes
the code look inconsistent.
We have mm_init(), which is supposed to init/nullify mm's internals, but
it doesn't init all the fields it should:
- on fork ->mmap,mm_rb,vmacache_seqnum,map_count,mm_cpumask,locked_vm
are zeroed in dup_mmap();
- on fork ->pmd_huge_pte is zeroed in dup_mm(), immediately before
calling mm_init();
- ->cpu_vm_mask_var ptr is initialized by mm_init_cpumask(), which is
called before mm_init() on both fork and exec;
- ->context is initialized by init_new_context(), which is called after
mm_init() on both fork and exec;
Let's consolidate all the initializations in mm_init() to make the code
look cleaner.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-08-08 21:21:56 +00:00
|
|
|
mmu_notifier_mm_init(mm);
|
mm: fix TLB flush race between migration, and change_protection_range
There are a few subtle races, between change_protection_range (used by
mprotect and change_prot_numa) on one side, and NUMA page migration and
compaction on the other side.
The basic race is that there is a time window between when the PTE gets
made non-present (PROT_NONE or NUMA), and the TLB is flushed.
During that time, a CPU may continue writing to the page.
This is fine most of the time, however compaction or the NUMA migration
code may come in, and migrate the page away.
When that happens, the CPU may continue writing, through the cached
translation, to what is no longer the current memory location of the
process.
This only affects x86, which has a somewhat optimistic pte_accessible.
All other architectures appear to be safe, and will either always flush,
or flush whenever there is a valid mapping, even with no permissions
(SPARC).
The basic race looks like this:
CPU A CPU B CPU C
load TLB entry
make entry PTE/PMD_NUMA
fault on entry
read/write old page
start migrating page
change PTE/PMD to new page
read/write old page [*]
flush TLB
reload TLB from new entry
read/write new page
lose data
[*] the old page may belong to a new user at this point!
The obvious fix is to flush remote TLB entries, by making sure that
pte_accessible aware of the fact that PROT_NONE and PROT_NUMA memory may
still be accessible if there is a TLB flush pending for the mm.
This should fix both NUMA migration and compaction.
[mgorman@suse.de: fix build]
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Cc: Alex Thorlton <athorlton@sgi.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-12-19 01:08:44 +00:00
|
|
|
clear_tlb_flush_pending(mm);
|
fork/exec: cleanup mm initialization
mm initialization on fork/exec is spread all over the place, which makes
the code look inconsistent.
We have mm_init(), which is supposed to init/nullify mm's internals, but
it doesn't init all the fields it should:
- on fork ->mmap,mm_rb,vmacache_seqnum,map_count,mm_cpumask,locked_vm
are zeroed in dup_mmap();
- on fork ->pmd_huge_pte is zeroed in dup_mm(), immediately before
calling mm_init();
- ->cpu_vm_mask_var ptr is initialized by mm_init_cpumask(), which is
called before mm_init() on both fork and exec;
- ->context is initialized by init_new_context(), which is called after
mm_init() on both fork and exec;
Let's consolidate all the initializations in mm_init() to make the code
look cleaner.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-08-08 21:21:56 +00:00
|
|
|
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
|
|
|
|
mm->pmd_huge_pte = NULL;
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-04-07 22:37:10 +00:00
|
|
|
if (current->mm) {
|
|
|
|
mm->flags = current->mm->flags & MMF_INIT_MASK;
|
|
|
|
mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
|
|
|
|
} else {
|
|
|
|
mm->flags = default_dump_filter;
|
2005-04-16 22:20:36 +00:00
|
|
|
mm->def_flags = 0;
|
2014-04-07 22:37:10 +00:00
|
|
|
}
|
|
|
|
|
fork/exec: cleanup mm initialization
mm initialization on fork/exec is spread all over the place, which makes
the code look inconsistent.
We have mm_init(), which is supposed to init/nullify mm's internals, but
it doesn't init all the fields it should:
- on fork ->mmap,mm_rb,vmacache_seqnum,map_count,mm_cpumask,locked_vm
are zeroed in dup_mmap();
- on fork ->pmd_huge_pte is zeroed in dup_mm(), immediately before
calling mm_init();
- ->cpu_vm_mask_var ptr is initialized by mm_init_cpumask(), which is
called before mm_init() on both fork and exec;
- ->context is initialized by init_new_context(), which is called after
mm_init() on both fork and exec;
Let's consolidate all the initializations in mm_init() to make the code
look cleaner.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-08-08 21:21:56 +00:00
|
|
|
if (mm_alloc_pgd(mm))
|
|
|
|
goto fail_nopgd;
|
|
|
|
|
|
|
|
if (init_new_context(p, mm))
|
|
|
|
goto fail_nocontext;
|
2008-02-07 08:13:51 +00:00
|
|
|
|
fork/exec: cleanup mm initialization
mm initialization on fork/exec is spread all over the place, which makes
the code look inconsistent.
We have mm_init(), which is supposed to init/nullify mm's internals, but
it doesn't init all the fields it should:
- on fork ->mmap,mm_rb,vmacache_seqnum,map_count,mm_cpumask,locked_vm
are zeroed in dup_mmap();
- on fork ->pmd_huge_pte is zeroed in dup_mm(), immediately before
calling mm_init();
- ->cpu_vm_mask_var ptr is initialized by mm_init_cpumask(), which is
called before mm_init() on both fork and exec;
- ->context is initialized by init_new_context(), which is called after
mm_init() on both fork and exec;
Let's consolidate all the initializations in mm_init() to make the code
look cleaner.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-08-08 21:21:56 +00:00
|
|
|
return mm;
|
|
|
|
|
|
|
|
fail_nocontext:
|
|
|
|
mm_free_pgd(mm);
|
|
|
|
fail_nopgd:
|
2005-04-16 22:20:36 +00:00
|
|
|
free_mm(mm);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-03-21 23:33:48 +00:00
|
|
|
static void check_mm(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NR_MM_COUNTERS; i++) {
|
|
|
|
long x = atomic_long_read(&mm->rss_stat.count[i]);
|
|
|
|
|
|
|
|
if (unlikely(x))
|
|
|
|
printk(KERN_ALERT "BUG: Bad rss-counter state "
|
|
|
|
"mm:%p idx:%d val:%ld\n", mm, i, x);
|
|
|
|
}
|
|
|
|
|
2013-11-14 22:31:07 +00:00
|
|
|
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
|
2012-03-21 23:33:48 +00:00
|
|
|
VM_BUG_ON(mm->pmd_huge_pte);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Allocate and initialize an mm_struct.
|
|
|
|
*/
|
2011-07-26 23:08:39 +00:00
|
|
|
struct mm_struct *mm_alloc(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-07-26 23:08:39 +00:00
|
|
|
struct mm_struct *mm;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
mm = allocate_mm();
|
2011-05-25 00:12:15 +00:00
|
|
|
if (!mm)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
memset(mm, 0, sizeof(*mm));
|
2011-05-29 18:32:28 +00:00
|
|
|
return mm_init(mm, current);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called when the last reference to the mm
|
|
|
|
* is dropped: either by a lazy thread or by
|
|
|
|
* mmput. Free the page directory and the mm.
|
|
|
|
*/
|
2008-02-08 12:19:53 +00:00
|
|
|
void __mmdrop(struct mm_struct *mm)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
BUG_ON(mm == &init_mm);
|
|
|
|
mm_free_pgd(mm);
|
|
|
|
destroy_context(mm);
|
mmu-notifiers: core
With KVM/GFP/XPMEM there isn't just the primary CPU MMU pointing to pages.
There are secondary MMUs (with secondary sptes and secondary tlbs) too.
sptes in the kvm case are shadow pagetables, but when I say spte in
mmu-notifier context, I mean "secondary pte". In GRU case there's no
actual secondary pte and there's only a secondary tlb because the GRU
secondary MMU has no knowledge about sptes and every secondary tlb miss
event in the MMU always generates a page fault that has to be resolved by
the CPU (this is not the case of KVM where the a secondary tlb miss will
walk sptes in hardware and it will refill the secondary tlb transparently
to software if the corresponding spte is present). The same way
zap_page_range has to invalidate the pte before freeing the page, the spte
(and secondary tlb) must also be invalidated before any page is freed and
reused.
Currently we take a page_count pin on every page mapped by sptes, but that
means the pages can't be swapped whenever they're mapped by any spte
because they're part of the guest working set. Furthermore a spte unmap
event can immediately lead to a page to be freed when the pin is released
(so requiring the same complex and relatively slow tlb_gather smp safe
logic we have in zap_page_range and that can be avoided completely if the
spte unmap event doesn't require an unpin of the page previously mapped in
the secondary MMU).
The mmu notifiers allow kvm/GRU/XPMEM to attach to the tsk->mm and know
when the VM is swapping or freeing or doing anything on the primary MMU so
that the secondary MMU code can drop sptes before the pages are freed,
avoiding all page pinning and allowing 100% reliable swapping of guest
physical address space. Furthermore it avoids the code that teardown the
mappings of the secondary MMU, to implement a logic like tlb_gather in
zap_page_range that would require many IPI to flush other cpu tlbs, for
each fixed number of spte unmapped.
To make an example: if what happens on the primary MMU is a protection
downgrade (from writeable to wrprotect) the secondary MMU mappings will be
invalidated, and the next secondary-mmu-page-fault will call
get_user_pages and trigger a do_wp_page through get_user_pages if it
called get_user_pages with write=1, and it'll re-establishing an updated
spte or secondary-tlb-mapping on the copied page. Or it will setup a
readonly spte or readonly tlb mapping if it's a guest-read, if it calls
get_user_pages with write=0. This is just an example.
This allows to map any page pointed by any pte (and in turn visible in the
primary CPU MMU), into a secondary MMU (be it a pure tlb like GRU, or an
full MMU with both sptes and secondary-tlb like the shadow-pagetable layer
with kvm), or a remote DMA in software like XPMEM (hence needing of
schedule in XPMEM code to send the invalidate to the remote node, while no
need to schedule in kvm/gru as it's an immediate event like invalidating
primary-mmu pte).
At least for KVM without this patch it's impossible to swap guests
reliably. And having this feature and removing the page pin allows
several other optimizations that simplify life considerably.
Dependencies:
1) mm_take_all_locks() to register the mmu notifier when the whole VM
isn't doing anything with "mm". This allows mmu notifier users to keep
track if the VM is in the middle of the invalidate_range_begin/end
critical section with an atomic counter incraese in range_begin and
decreased in range_end. No secondary MMU page fault is allowed to map
any spte or secondary tlb reference, while the VM is in the middle of
range_begin/end as any page returned by get_user_pages in that critical
section could later immediately be freed without any further
->invalidate_page notification (invalidate_range_begin/end works on
ranges and ->invalidate_page isn't called immediately before freeing
the page). To stop all page freeing and pagetable overwrites the
mmap_sem must be taken in write mode and all other anon_vma/i_mmap
locks must be taken too.
2) It'd be a waste to add branches in the VM if nobody could possibly
run KVM/GRU/XPMEM on the kernel, so mmu notifiers will only enabled if
CONFIG_KVM=m/y. In the current kernel kvm won't yet take advantage of
mmu notifiers, but this already allows to compile a KVM external module
against a kernel with mmu notifiers enabled and from the next pull from
kvm.git we'll start using them. And GRU/XPMEM will also be able to
continue the development by enabling KVM=m in their config, until they
submit all GRU/XPMEM GPLv2 code to the mainline kernel. Then they can
also enable MMU_NOTIFIERS in the same way KVM does it (even if KVM=n).
This guarantees nobody selects MMU_NOTIFIER=y if KVM and GRU and XPMEM
are all =n.
The mmu_notifier_register call can fail because mm_take_all_locks may be
interrupted by a signal and return -EINTR. Because mmu_notifier_reigster
is used when a driver startup, a failure can be gracefully handled. Here
an example of the change applied to kvm to register the mmu notifiers.
Usually when a driver startups other allocations are required anyway and
-ENOMEM failure paths exists already.
struct kvm *kvm_arch_create_vm(void)
{
struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ int err;
if (!kvm)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+ kvm->arch.mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ err = mmu_notifier_register(&kvm->arch.mmu_notifier, current->mm);
+ if (err) {
+ kfree(kvm);
+ return ERR_PTR(err);
+ }
+
return kvm;
}
mmu_notifier_unregister returns void and it's reliable.
The patch also adds a few needed but missing includes that would prevent
kernel to compile after these changes on non-x86 archs (x86 didn't need
them by luck).
[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: fix mm/filemap_xip.c build]
[akpm@linux-foundation.org: fix mm/mmu_notifier.c build]
Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Jack Steiner <steiner@sgi.com>
Cc: Robin Holt <holt@sgi.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Kanoj Sarcar <kanojsarcar@yahoo.com>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: Steve Wise <swise@opengridcomputing.com>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Chris Wright <chrisw@redhat.com>
Cc: Marcelo Tosatti <marcelo@kvack.org>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Cc: Izik Eidus <izike@qumranet.com>
Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-28 22:46:29 +00:00
|
|
|
mmu_notifier_mm_destroy(mm);
|
2012-03-21 23:33:48 +00:00
|
|
|
check_mm(mm);
|
2005-04-16 22:20:36 +00:00
|
|
|
free_mm(mm);
|
|
|
|
}
|
2007-11-21 14:41:05 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__mmdrop);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Decrement the use count and release all resources for an mm.
|
|
|
|
*/
|
|
|
|
void mmput(struct mm_struct *mm)
|
|
|
|
{
|
2006-06-23 09:05:15 +00:00
|
|
|
might_sleep();
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (atomic_dec_and_test(&mm->mm_users)) {
|
2012-03-30 18:26:31 +00:00
|
|
|
uprobe_clear_state(mm);
|
2005-04-16 22:20:36 +00:00
|
|
|
exit_aio(mm);
|
ksm: fix deadlock with munlock in exit_mmap
Rawhide users have reported hang at startup when cryptsetup is run: the
same problem can be simply reproduced by running a program int main() {
mlockall(MCL_CURRENT | MCL_FUTURE); return 0; }
The problem is that exit_mmap() applies munlock_vma_pages_all() to
clean up VM_LOCKED areas, and its current implementation (stupidly)
tries to fault in absent pages, for example where PROT_NONE prevented
them being faulted in when mlocking. Whereas the "ksm: fix oom
deadlock" patch, knowing there's a race by which KSM might try to fault
in pages after exit_mmap() had finally zapped the range, backs out of
such faults doing nothing when its ksm_test_exit() notices mm_users 0.
So revert that part of "ksm: fix oom deadlock" which moved the
ksm_exit() call from before exit_mmap() to the middle of exit_mmap();
and remove those ksm_test_exit() checks from the page fault paths, so
allowing the munlocking to proceed without interference.
ksm_exit, if there are rmap_items still chained on this mm slot, takes
mmap_sem write side: so preventing KSM from working on an mm while
exit_mmap runs. And KSM will bail out as soon as it notices that
mm_users is already zero, thanks to its internal ksm_test_exit checks.
So that when a task is killed by OOM killer or the user, KSM will not
indefinitely prevent it from running exit_mmap to release its memory.
This does break a part of what "ksm: fix oom deadlock" was trying to
achieve. When unmerging KSM (echo 2 >/sys/kernel/mm/ksm), and even
when ksmd itself has to cancel a KSM page, it is possible that the
first OOM-kill victim would be the KSM process being faulted: then its
memory won't be freed until a second victim has been selected (freeing
memory for the unmerging fault to complete).
But the OOM killer is already liable to kill a second victim once the
intended victim's p->mm goes to NULL: so there's not much point in
rejecting this KSM patch before fixing that OOM behaviour. It is very
much more important to allow KSM users to boot up, than to haggle over
an unlikely and poorly supported OOM case.
We also intend to fix munlocking to not fault pages: at which point
this patch _could_ be reverted; though that would be controversial, so
we hope to find a better solution.
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Justin M. Forbes <jforbes@redhat.com>
Acked-for-now-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-09-22 00:02:22 +00:00
|
|
|
ksm_exit(mm);
|
2011-01-13 23:46:58 +00:00
|
|
|
khugepaged_exit(mm); /* must run before exit_mmap */
|
2005-04-16 22:20:36 +00:00
|
|
|
exit_mmap(mm);
|
2008-04-29 08:01:36 +00:00
|
|
|
set_mm_exe_file(mm, NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!list_empty(&mm->mmlist)) {
|
|
|
|
spin_lock(&mmlist_lock);
|
|
|
|
list_del(&mm->mmlist);
|
|
|
|
spin_unlock(&mmlist_lock);
|
|
|
|
}
|
2009-09-23 22:57:41 +00:00
|
|
|
if (mm->binfmt)
|
|
|
|
module_put(mm->binfmt->module);
|
2005-04-16 22:20:36 +00:00
|
|
|
mmdrop(mm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mmput);
|
|
|
|
|
2011-05-26 23:25:46 +00:00
|
|
|
void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
|
|
|
|
{
|
|
|
|
if (new_exe_file)
|
|
|
|
get_file(new_exe_file);
|
|
|
|
if (mm->exe_file)
|
|
|
|
fput(mm->exe_file);
|
|
|
|
mm->exe_file = new_exe_file;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct file *get_mm_exe_file(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
struct file *exe_file;
|
|
|
|
|
2012-10-08 23:28:51 +00:00
|
|
|
/* We need mmap_sem to protect against races with removal of exe_file */
|
2011-05-26 23:25:46 +00:00
|
|
|
down_read(&mm->mmap_sem);
|
|
|
|
exe_file = mm->exe_file;
|
|
|
|
if (exe_file)
|
|
|
|
get_file(exe_file);
|
|
|
|
up_read(&mm->mmap_sem);
|
|
|
|
return exe_file;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
|
|
|
|
{
|
|
|
|
/* It's safe to write the exe_file pointer without exe_file_lock because
|
|
|
|
* this is called during fork when the task is not yet in /proc */
|
|
|
|
newmm->exe_file = get_mm_exe_file(oldmm);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**
|
|
|
|
* get_task_mm - acquire a reference to the task's mm
|
|
|
|
*
|
2008-07-25 08:47:38 +00:00
|
|
|
* Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
|
2005-04-16 22:20:36 +00:00
|
|
|
* this kernel workthread has transiently adopted a user mm with use_mm,
|
|
|
|
* to do its AIO) is not set and if so returns a reference to it, after
|
|
|
|
* bumping up the use count. User must release the mm via mmput()
|
|
|
|
* after use. Typically used by /proc and ptrace.
|
|
|
|
*/
|
|
|
|
struct mm_struct *get_task_mm(struct task_struct *task)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm;
|
|
|
|
|
|
|
|
task_lock(task);
|
|
|
|
mm = task->mm;
|
|
|
|
if (mm) {
|
2008-07-25 08:47:38 +00:00
|
|
|
if (task->flags & PF_KTHREAD)
|
2005-04-16 22:20:36 +00:00
|
|
|
mm = NULL;
|
|
|
|
else
|
|
|
|
atomic_inc(&mm->mm_users);
|
|
|
|
}
|
|
|
|
task_unlock(task);
|
|
|
|
return mm;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(get_task_mm);
|
|
|
|
|
2012-02-02 01:04:09 +00:00
|
|
|
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = mutex_lock_killable(&task->signal->cred_guard_mutex);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
mm = get_task_mm(task);
|
|
|
|
if (mm && mm != current->mm &&
|
|
|
|
!ptrace_may_access(task, mode)) {
|
|
|
|
mmput(mm);
|
|
|
|
mm = ERR_PTR(-EACCES);
|
|
|
|
}
|
|
|
|
mutex_unlock(&task->signal->cred_guard_mutex);
|
|
|
|
|
|
|
|
return mm;
|
|
|
|
}
|
|
|
|
|
2012-03-05 22:59:13 +00:00
|
|
|
static void complete_vfork_done(struct task_struct *tsk)
|
2012-03-05 22:59:13 +00:00
|
|
|
{
|
2012-03-05 22:59:13 +00:00
|
|
|
struct completion *vfork;
|
2012-03-05 22:59:13 +00:00
|
|
|
|
2012-03-05 22:59:13 +00:00
|
|
|
task_lock(tsk);
|
|
|
|
vfork = tsk->vfork_done;
|
|
|
|
if (likely(vfork)) {
|
|
|
|
tsk->vfork_done = NULL;
|
|
|
|
complete(vfork);
|
|
|
|
}
|
|
|
|
task_unlock(tsk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int wait_for_vfork_done(struct task_struct *child,
|
|
|
|
struct completion *vfork)
|
|
|
|
{
|
|
|
|
int killed;
|
|
|
|
|
|
|
|
freezer_do_not_count();
|
|
|
|
killed = wait_for_completion_killable(vfork);
|
|
|
|
freezer_count();
|
|
|
|
|
|
|
|
if (killed) {
|
|
|
|
task_lock(child);
|
|
|
|
child->vfork_done = NULL;
|
|
|
|
task_unlock(child);
|
|
|
|
}
|
|
|
|
|
|
|
|
put_task_struct(child);
|
|
|
|
return killed;
|
2012-03-05 22:59:13 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Please note the differences between mmput and mm_release.
|
|
|
|
* mmput is called whenever we stop holding onto a mm_struct,
|
|
|
|
* error success whatever.
|
|
|
|
*
|
|
|
|
* mm_release is called after a mm_struct has been removed
|
|
|
|
* from the current process.
|
|
|
|
*
|
|
|
|
* This difference is important for error handling, when we
|
|
|
|
* only half set up a mm_struct for a new process and need to restore
|
|
|
|
* the old one. Because we mmput the new mm_struct before
|
|
|
|
* restoring the old one. . .
|
|
|
|
* Eric Biederman 10 January 1998
|
|
|
|
*/
|
|
|
|
void mm_release(struct task_struct *tsk, struct mm_struct *mm)
|
|
|
|
{
|
2008-11-15 18:20:36 +00:00
|
|
|
/* Get rid of any futexes when releasing the mm */
|
|
|
|
#ifdef CONFIG_FUTEX
|
2009-10-05 16:17:32 +00:00
|
|
|
if (unlikely(tsk->robust_list)) {
|
2008-11-15 18:20:36 +00:00
|
|
|
exit_robust_list(tsk);
|
2009-10-05 16:17:32 +00:00
|
|
|
tsk->robust_list = NULL;
|
|
|
|
}
|
2008-11-15 18:20:36 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
2009-10-05 16:17:32 +00:00
|
|
|
if (unlikely(tsk->compat_robust_list)) {
|
2008-11-15 18:20:36 +00:00
|
|
|
compat_exit_robust_list(tsk);
|
2009-10-05 16:17:32 +00:00
|
|
|
tsk->compat_robust_list = NULL;
|
|
|
|
}
|
2008-11-15 18:20:36 +00:00
|
|
|
#endif
|
2009-10-05 16:18:03 +00:00
|
|
|
if (unlikely(!list_empty(&tsk->pi_state_list)))
|
|
|
|
exit_pi_state_list(tsk);
|
2008-11-15 18:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
uprobes/core: Handle breakpoint and singlestep exceptions
Uprobes uses exception notifiers to get to know if a thread hit
a breakpoint or a singlestep exception.
When a thread hits a uprobe or is singlestepping post a uprobe
hit, the uprobe exception notifier sets its TIF_UPROBE bit,
which will then be checked on its return to userspace path
(do_notify_resume() ->uprobe_notify_resume()), where the
consumers handlers are run (in task context) based on the
defined filters.
Uprobe hits are thread specific and hence we need to maintain
information about if a task hit a uprobe, what uprobe was hit,
the slot where the original instruction was copied for xol so
that it can be singlestepped with appropriate fixups.
In some cases, special care is needed for instructions that are
executed out of line (xol). These are architecture specific
artefacts, such as handling RIP relative instructions on x86_64.
Since the instruction at which the uprobe was inserted is
executed out of line, architecture specific fixups are added so
that the thread continues normal execution in the presence of a
uprobe.
Postpone the signals until we execute the probed insn.
post_xol() path does a recalc_sigpending() before return to
user-mode, this ensures the signal can't be lost.
Uprobes relies on DIE_DEBUG notification to notify if a
singlestep is complete.
Adds x86 specific uprobe exception notifiers and appropriate
hooks needed to determine a uprobe hit and subsequent post
processing.
Add requisite x86 fixups for xol for uprobes. Specific cases
needing fixups include relative jumps (x86_64), calls, etc.
Where possible, we check and skip singlestepping the
breakpointed instructions. For now we skip single byte as well
as few multibyte nop instructions. However this can be extended
to other instructions too.
Credits to Oleg Nesterov for suggestions/patches related to
signal, breakpoint, singlestep handling code.
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@linux.vnet.ibm.com>
Cc: Linux-mm <linux-mm@kvack.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20120313180011.29771.89027.sendpatchset@srdronam.in.ibm.com
[ Performed various cleanliness edits ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2012-03-13 18:00:11 +00:00
|
|
|
uprobe_free_utask(tsk);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Get rid of any cached register state */
|
|
|
|
deactivate_mm(tsk, mm);
|
|
|
|
|
2006-12-07 04:36:34 +00:00
|
|
|
/*
|
|
|
|
* If we're exiting normally, clear a user-space tid field if
|
|
|
|
* requested. We leave this alone when dying by signal, to leave
|
|
|
|
* the value intact in a core dump, and to save the unnecessary
|
2012-03-05 22:59:13 +00:00
|
|
|
* trouble, say, a killed vfork parent shouldn't touch this mm.
|
|
|
|
* Userland only wants this done for a sys_exit.
|
2006-12-07 04:36:34 +00:00
|
|
|
*/
|
execve: must clear current->clear_child_tid
While looking at Jens Rosenboom bug report
(http://lkml.org/lkml/2009/7/27/35) about strange sys_futex call done from
a dying "ps" program, we found following problem.
clone() syscall has special support for TID of created threads. This
support includes two features.
One (CLONE_CHILD_SETTID) is to set an integer into user memory with the
TID value.
One (CLONE_CHILD_CLEARTID) is to clear this same integer once the created
thread dies.
The integer location is a user provided pointer, provided at clone()
time.
kernel keeps this pointer value into current->clear_child_tid.
At execve() time, we should make sure kernel doesnt keep this user
provided pointer, as full user memory is replaced by a new one.
As glibc fork() actually uses clone() syscall with CLONE_CHILD_SETTID and
CLONE_CHILD_CLEARTID set, chances are high that we might corrupt user
memory in forked processes.
Following sequence could happen:
1) bash (or any program) starts a new process, by a fork() call that
glibc maps to a clone( ... CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID
...) syscall
2) When new process starts, its current->clear_child_tid is set to a
location that has a meaning only in bash (or initial program) context
(&THREAD_SELF->tid)
3) This new process does the execve() syscall to start a new program.
current->clear_child_tid is left unchanged (a non NULL value)
4) If this new program creates some threads, and initial thread exits,
kernel will attempt to clear the integer pointed by
current->clear_child_tid from mm_release() :
if (tsk->clear_child_tid
&& !(tsk->flags & PF_SIGNALED)
&& atomic_read(&mm->mm_users) > 1) {
u32 __user * tidptr = tsk->clear_child_tid;
tsk->clear_child_tid = NULL;
/*
* We don't check the error code - if userspace has
* not set up a proper pointer then tough luck.
*/
<< here >> put_user(0, tidptr);
sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
}
5) OR : if new program is not multi-threaded, but spied by /proc/pid
users (ps command for example), mm_users > 1, and the exiting program
could corrupt 4 bytes in a persistent memory area (shm or memory mapped
file)
If current->clear_child_tid points to a writeable portion of memory of the
new program, kernel happily and silently corrupts 4 bytes of memory, with
unexpected effects.
Fix is straightforward and should not break any sane program.
Reported-by: Jens Rosenboom <jens@mcbone.net>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sonny Rao <sonnyrao@us.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@redhat.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-08-06 22:09:28 +00:00
|
|
|
if (tsk->clear_child_tid) {
|
|
|
|
if (!(tsk->flags & PF_SIGNALED) &&
|
|
|
|
atomic_read(&mm->mm_users) > 1) {
|
|
|
|
/*
|
|
|
|
* We don't check the error code - if userspace has
|
|
|
|
* not set up a proper pointer then tough luck.
|
|
|
|
*/
|
|
|
|
put_user(0, tsk->clear_child_tid);
|
|
|
|
sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
|
|
|
|
1, NULL, NULL, 0);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
tsk->clear_child_tid = NULL;
|
|
|
|
}
|
2012-05-31 23:26:21 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* All done, finally we can wake up parent and return this mm to him.
|
|
|
|
* Also kthread_stop() uses this completion for synchronization.
|
|
|
|
*/
|
|
|
|
if (tsk->vfork_done)
|
|
|
|
complete_vfork_done(tsk);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-02-07 20:59:01 +00:00
|
|
|
/*
|
|
|
|
* Allocate a new mm structure and copy contents from the
|
|
|
|
* mm structure of the passed in task structure.
|
|
|
|
*/
|
2014-01-23 23:55:46 +00:00
|
|
|
static struct mm_struct *dup_mm(struct task_struct *tsk)
|
2006-02-07 20:59:01 +00:00
|
|
|
{
|
|
|
|
struct mm_struct *mm, *oldmm = current->mm;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mm = allocate_mm();
|
|
|
|
if (!mm)
|
|
|
|
goto fail_nomem;
|
|
|
|
|
|
|
|
memcpy(mm, oldmm, sizeof(*mm));
|
|
|
|
|
2008-02-07 08:13:51 +00:00
|
|
|
if (!mm_init(mm, tsk))
|
2006-02-07 20:59:01 +00:00
|
|
|
goto fail_nomem;
|
|
|
|
|
2008-04-29 08:01:36 +00:00
|
|
|
dup_mm_exe_file(oldmm, mm);
|
|
|
|
|
2006-02-07 20:59:01 +00:00
|
|
|
err = dup_mmap(mm, oldmm);
|
|
|
|
if (err)
|
|
|
|
goto free_pt;
|
|
|
|
|
|
|
|
mm->hiwater_rss = get_mm_rss(mm);
|
|
|
|
mm->hiwater_vm = mm->total_vm;
|
|
|
|
|
2009-09-23 22:57:41 +00:00
|
|
|
if (mm->binfmt && !try_module_get(mm->binfmt->module))
|
|
|
|
goto free_pt;
|
|
|
|
|
2006-02-07 20:59:01 +00:00
|
|
|
return mm;
|
|
|
|
|
|
|
|
free_pt:
|
2009-09-23 22:57:41 +00:00
|
|
|
/* don't put binfmt in mmput, we haven't got module yet */
|
|
|
|
mm->binfmt = NULL;
|
2006-02-07 20:59:01 +00:00
|
|
|
mmput(mm);
|
|
|
|
|
|
|
|
fail_nomem:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-07-26 23:08:39 +00:00
|
|
|
static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-07-26 23:08:39 +00:00
|
|
|
struct mm_struct *mm, *oldmm;
|
2005-04-16 22:20:36 +00:00
|
|
|
int retval;
|
|
|
|
|
|
|
|
tsk->min_flt = tsk->maj_flt = 0;
|
|
|
|
tsk->nvcsw = tsk->nivcsw = 0;
|
2009-02-06 23:37:47 +00:00
|
|
|
#ifdef CONFIG_DETECT_HUNG_TASK
|
|
|
|
tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
tsk->mm = NULL;
|
|
|
|
tsk->active_mm = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Are we cloning a kernel thread?
|
|
|
|
*
|
|
|
|
* We need to steal a active VM for that..
|
|
|
|
*/
|
|
|
|
oldmm = current->mm;
|
|
|
|
if (!oldmm)
|
|
|
|
return 0;
|
|
|
|
|
mm: per-thread vma caching
This patch is a continuation of efforts trying to optimize find_vma(),
avoiding potentially expensive rbtree walks to locate a vma upon faults.
The original approach (https://lkml.org/lkml/2013/11/1/410), where the
largest vma was also cached, ended up being too specific and random,
thus further comparison with other approaches were needed. There are
two things to consider when dealing with this, the cache hit rate and
the latency of find_vma(). Improving the hit-rate does not necessarily
translate in finding the vma any faster, as the overhead of any fancy
caching schemes can be too high to consider.
We currently cache the last used vma for the whole address space, which
provides a nice optimization, reducing the total cycles in find_vma() by
up to 250%, for workloads with good locality. On the other hand, this
simple scheme is pretty much useless for workloads with poor locality.
Analyzing ebizzy runs shows that, no matter how many threads are
running, the mmap_cache hit rate is less than 2%, and in many situations
below 1%.
The proposed approach is to replace this scheme with a small per-thread
cache, maximizing hit rates at a very low maintenance cost.
Invalidations are performed by simply bumping up a 32-bit sequence
number. The only expensive operation is in the rare case of a seq
number overflow, where all caches that share the same address space are
flushed. Upon a miss, the proposed replacement policy is based on the
page number that contains the virtual address in question. Concretely,
the following results are seen on an 80 core, 8 socket x86-64 box:
1) System bootup: Most programs are single threaded, so the per-thread
scheme does improve ~50% hit rate by just adding a few more slots to
the cache.
+----------------+----------+------------------+
| caching scheme | hit-rate | cycles (billion) |
+----------------+----------+------------------+
| baseline | 50.61% | 19.90 |
| patched | 73.45% | 13.58 |
+----------------+----------+------------------+
2) Kernel build: This one is already pretty good with the current
approach as we're dealing with good locality.
+----------------+----------+------------------+
| caching scheme | hit-rate | cycles (billion) |
+----------------+----------+------------------+
| baseline | 75.28% | 11.03 |
| patched | 88.09% | 9.31 |
+----------------+----------+------------------+
3) Oracle 11g Data Mining (4k pages): Similar to the kernel build workload.
+----------------+----------+------------------+
| caching scheme | hit-rate | cycles (billion) |
+----------------+----------+------------------+
| baseline | 70.66% | 17.14 |
| patched | 91.15% | 12.57 |
+----------------+----------+------------------+
4) Ebizzy: There's a fair amount of variation from run to run, but this
approach always shows nearly perfect hit rates, while baseline is just
about non-existent. The amounts of cycles can fluctuate between
anywhere from ~60 to ~116 for the baseline scheme, but this approach
reduces it considerably. For instance, with 80 threads:
+----------------+----------+------------------+
| caching scheme | hit-rate | cycles (billion) |
+----------------+----------+------------------+
| baseline | 1.06% | 91.54 |
| patched | 99.97% | 14.18 |
+----------------+----------+------------------+
[akpm@linux-foundation.org: fix nommu build, per Davidlohr]
[akpm@linux-foundation.org: document vmacache_valid() logic]
[akpm@linux-foundation.org: attempt to untangle header files]
[akpm@linux-foundation.org: add vmacache_find() BUG_ON]
[hughd@google.com: add vmacache_valid_mm() (from Oleg)]
[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: adjust and enhance comments]
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: Michel Lespinasse <walken@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Tested-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-04-07 22:37:25 +00:00
|
|
|
/* initialize the new vmacache entries */
|
|
|
|
vmacache_flush(tsk);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (clone_flags & CLONE_VM) {
|
|
|
|
atomic_inc(&oldmm->mm_users);
|
|
|
|
mm = oldmm;
|
|
|
|
goto good_mm;
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = -ENOMEM;
|
2006-02-07 20:59:01 +00:00
|
|
|
mm = dup_mm(tsk);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!mm)
|
|
|
|
goto fail_nomem;
|
|
|
|
|
|
|
|
good_mm:
|
|
|
|
tsk->mm = mm;
|
|
|
|
tsk->active_mm = mm;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_nomem:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2007-10-19 06:41:10 +00:00
|
|
|
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-03-30 11:20:30 +00:00
|
|
|
struct fs_struct *fs = current->fs;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (clone_flags & CLONE_FS) {
|
2009-03-30 11:20:30 +00:00
|
|
|
/* tsk->fs is already what we want */
|
2010-08-17 18:37:33 +00:00
|
|
|
spin_lock(&fs->lock);
|
2009-03-30 11:20:30 +00:00
|
|
|
if (fs->in_exec) {
|
2010-08-17 18:37:33 +00:00
|
|
|
spin_unlock(&fs->lock);
|
2009-03-30 11:20:30 +00:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
fs->users++;
|
2010-08-17 18:37:33 +00:00
|
|
|
spin_unlock(&fs->lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2009-03-30 11:20:30 +00:00
|
|
|
tsk->fs = copy_fs_struct(fs);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!tsk->fs)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-26 23:08:39 +00:00
|
|
|
static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
|
2006-02-07 20:59:02 +00:00
|
|
|
{
|
|
|
|
struct files_struct *oldf, *newf;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A background process may not have any files ...
|
|
|
|
*/
|
|
|
|
oldf = current->files;
|
|
|
|
if (!oldf)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (clone_flags & CLONE_FILES) {
|
|
|
|
atomic_inc(&oldf->count);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
newf = dup_fd(oldf, &error);
|
|
|
|
if (!newf)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
tsk->files = newf;
|
|
|
|
error = 0;
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2008-01-24 07:54:47 +00:00
|
|
|
static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
|
2008-01-24 07:52:45 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
struct io_context *ioc = current->io_context;
|
block: make ioc get/put interface more conventional and fix race on alloction
Ignoring copy_io() during fork, io_context can be allocated from two
places - current_io_context() and set_task_ioprio(). The former is
always called from local task while the latter can be called from
different task. The synchornization between them are peculiar and
dubious.
* current_io_context() doesn't grab task_lock() and assumes that if it
saw %NULL ->io_context, it would stay that way until allocation and
assignment is complete. It has smp_wmb() between alloc/init and
assignment.
* set_task_ioprio() grabs task_lock() for assignment and does
smp_read_barrier_depends() between "ioc = task->io_context" and "if
(ioc)". Unfortunately, this doesn't achieve anything - the latter
is not a dependent load of the former. ie, if ioc itself were being
dereferenced "ioc->xxx", it would mean something (not sure what tho)
but as the code currently stands, the dependent read barrier is
noop.
As only one of the the two test-assignment sequences is task_lock()
protected, the task_lock() can't do much about race between the two.
Nothing prevents current_io_context() and set_task_ioprio() allocating
its own ioc for the same task and overwriting the other's.
Also, set_task_ioprio() can race with exiting task and create a new
ioc after exit_io_context() is finished.
ioc get/put doesn't have any reason to be complex. The only hot path
is accessing the existing ioc of %current, which is simple to achieve
given that ->io_context is never destroyed as long as the task is
alive. All other paths can happily go through task_lock() like all
other task sub structures without impacting anything.
This patch updates ioc get/put so that it becomes more conventional.
* alloc_io_context() is replaced with get_task_io_context(). This is
the only interface which can acquire access to ioc of another task.
On return, the caller has an explicit reference to the object which
should be put using put_io_context() afterwards.
* The functionality of current_io_context() remains the same but when
creating a new ioc, it shares the code path with
get_task_io_context() and always goes through task_lock().
* get_io_context() now means incrementing ref on an ioc which the
caller already has access to (be that an explicit refcnt or implicit
%current one).
* PF_EXITING inhibits creation of new io_context and once
exit_io_context() is finished, it's guaranteed that both ioc
acquisition functions return %NULL.
* All users are updated. Most are trivial but
smp_read_barrier_depends() removal from cfq_get_io_context() needs a
bit of explanation. I suppose the original intention was to ensure
ioc->ioprio is visible when set_task_ioprio() allocates new
io_context and installs it; however, this wouldn't have worked
because set_task_ioprio() doesn't have wmb between init and install.
There are other problems with this which will be fixed in another
patch.
* While at it, use NUMA_NO_NODE instead of -1 for wildcard node
specification.
-v2: Vivek spotted contamination from debug patch. Removed.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2011-12-13 23:33:38 +00:00
|
|
|
struct io_context *new_ioc;
|
2008-01-24 07:52:45 +00:00
|
|
|
|
|
|
|
if (!ioc)
|
|
|
|
return 0;
|
2008-01-24 07:54:47 +00:00
|
|
|
/*
|
|
|
|
* Share io context with parent, if CLONE_IO is set
|
|
|
|
*/
|
|
|
|
if (clone_flags & CLONE_IO) {
|
2012-03-05 21:15:25 +00:00
|
|
|
ioc_task_link(ioc);
|
|
|
|
tsk->io_context = ioc;
|
2008-01-24 07:54:47 +00:00
|
|
|
} else if (ioprio_valid(ioc->ioprio)) {
|
block: make ioc get/put interface more conventional and fix race on alloction
Ignoring copy_io() during fork, io_context can be allocated from two
places - current_io_context() and set_task_ioprio(). The former is
always called from local task while the latter can be called from
different task. The synchornization between them are peculiar and
dubious.
* current_io_context() doesn't grab task_lock() and assumes that if it
saw %NULL ->io_context, it would stay that way until allocation and
assignment is complete. It has smp_wmb() between alloc/init and
assignment.
* set_task_ioprio() grabs task_lock() for assignment and does
smp_read_barrier_depends() between "ioc = task->io_context" and "if
(ioc)". Unfortunately, this doesn't achieve anything - the latter
is not a dependent load of the former. ie, if ioc itself were being
dereferenced "ioc->xxx", it would mean something (not sure what tho)
but as the code currently stands, the dependent read barrier is
noop.
As only one of the the two test-assignment sequences is task_lock()
protected, the task_lock() can't do much about race between the two.
Nothing prevents current_io_context() and set_task_ioprio() allocating
its own ioc for the same task and overwriting the other's.
Also, set_task_ioprio() can race with exiting task and create a new
ioc after exit_io_context() is finished.
ioc get/put doesn't have any reason to be complex. The only hot path
is accessing the existing ioc of %current, which is simple to achieve
given that ->io_context is never destroyed as long as the task is
alive. All other paths can happily go through task_lock() like all
other task sub structures without impacting anything.
This patch updates ioc get/put so that it becomes more conventional.
* alloc_io_context() is replaced with get_task_io_context(). This is
the only interface which can acquire access to ioc of another task.
On return, the caller has an explicit reference to the object which
should be put using put_io_context() afterwards.
* The functionality of current_io_context() remains the same but when
creating a new ioc, it shares the code path with
get_task_io_context() and always goes through task_lock().
* get_io_context() now means incrementing ref on an ioc which the
caller already has access to (be that an explicit refcnt or implicit
%current one).
* PF_EXITING inhibits creation of new io_context and once
exit_io_context() is finished, it's guaranteed that both ioc
acquisition functions return %NULL.
* All users are updated. Most are trivial but
smp_read_barrier_depends() removal from cfq_get_io_context() needs a
bit of explanation. I suppose the original intention was to ensure
ioc->ioprio is visible when set_task_ioprio() allocates new
io_context and installs it; however, this wouldn't have worked
because set_task_ioprio() doesn't have wmb between init and install.
There are other problems with this which will be fixed in another
patch.
* While at it, use NUMA_NO_NODE instead of -1 for wildcard node
specification.
-v2: Vivek spotted contamination from debug patch. Removed.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2011-12-13 23:33:38 +00:00
|
|
|
new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
|
|
|
|
if (unlikely(!new_ioc))
|
2008-01-24 07:52:45 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
block: make ioc get/put interface more conventional and fix race on alloction
Ignoring copy_io() during fork, io_context can be allocated from two
places - current_io_context() and set_task_ioprio(). The former is
always called from local task while the latter can be called from
different task. The synchornization between them are peculiar and
dubious.
* current_io_context() doesn't grab task_lock() and assumes that if it
saw %NULL ->io_context, it would stay that way until allocation and
assignment is complete. It has smp_wmb() between alloc/init and
assignment.
* set_task_ioprio() grabs task_lock() for assignment and does
smp_read_barrier_depends() between "ioc = task->io_context" and "if
(ioc)". Unfortunately, this doesn't achieve anything - the latter
is not a dependent load of the former. ie, if ioc itself were being
dereferenced "ioc->xxx", it would mean something (not sure what tho)
but as the code currently stands, the dependent read barrier is
noop.
As only one of the the two test-assignment sequences is task_lock()
protected, the task_lock() can't do much about race between the two.
Nothing prevents current_io_context() and set_task_ioprio() allocating
its own ioc for the same task and overwriting the other's.
Also, set_task_ioprio() can race with exiting task and create a new
ioc after exit_io_context() is finished.
ioc get/put doesn't have any reason to be complex. The only hot path
is accessing the existing ioc of %current, which is simple to achieve
given that ->io_context is never destroyed as long as the task is
alive. All other paths can happily go through task_lock() like all
other task sub structures without impacting anything.
This patch updates ioc get/put so that it becomes more conventional.
* alloc_io_context() is replaced with get_task_io_context(). This is
the only interface which can acquire access to ioc of another task.
On return, the caller has an explicit reference to the object which
should be put using put_io_context() afterwards.
* The functionality of current_io_context() remains the same but when
creating a new ioc, it shares the code path with
get_task_io_context() and always goes through task_lock().
* get_io_context() now means incrementing ref on an ioc which the
caller already has access to (be that an explicit refcnt or implicit
%current one).
* PF_EXITING inhibits creation of new io_context and once
exit_io_context() is finished, it's guaranteed that both ioc
acquisition functions return %NULL.
* All users are updated. Most are trivial but
smp_read_barrier_depends() removal from cfq_get_io_context() needs a
bit of explanation. I suppose the original intention was to ensure
ioc->ioprio is visible when set_task_ioprio() allocates new
io_context and installs it; however, this wouldn't have worked
because set_task_ioprio() doesn't have wmb between init and install.
There are other problems with this which will be fixed in another
patch.
* While at it, use NUMA_NO_NODE instead of -1 for wildcard node
specification.
-v2: Vivek spotted contamination from debug patch. Removed.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2011-12-13 23:33:38 +00:00
|
|
|
new_ioc->ioprio = ioc->ioprio;
|
2012-02-07 06:51:30 +00:00
|
|
|
put_io_context(new_ioc);
|
2008-01-24 07:52:45 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-19 06:41:10 +00:00
|
|
|
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct sighand_struct *sig;
|
|
|
|
|
2009-01-06 22:40:46 +00:00
|
|
|
if (clone_flags & CLONE_SIGHAND) {
|
2005-04-16 22:20:36 +00:00
|
|
|
atomic_inc(¤t->sighand->count);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
|
2006-01-08 09:01:37 +00:00
|
|
|
rcu_assign_pointer(tsk->sighand, sig);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!sig)
|
|
|
|
return -ENOMEM;
|
|
|
|
atomic_set(&sig->count, 1);
|
|
|
|
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-03-29 00:11:27 +00:00
|
|
|
void __cleanup_sighand(struct sighand_struct *sighand)
|
2006-03-29 00:11:17 +00:00
|
|
|
{
|
epoll: introduce POLLFREE to flush ->signalfd_wqh before kfree()
This patch is intentionally incomplete to simplify the review.
It ignores ep_unregister_pollwait() which plays with the same wqh.
See the next change.
epoll assumes that the EPOLL_CTL_ADD'ed file controls everything
f_op->poll() needs. In particular it assumes that the wait queue
can't go away until eventpoll_release(). This is not true in case
of signalfd, the task which does EPOLL_CTL_ADD uses its ->sighand
which is not connected to the file.
This patch adds the special event, POLLFREE, currently only for
epoll. It expects that init_poll_funcptr()'ed hook should do the
necessary cleanup. Perhaps it should be defined as EPOLLFREE in
eventpoll.
__cleanup_sighand() is changed to do wake_up_poll(POLLFREE) if
->signalfd_wqh is not empty, we add the new signalfd_cleanup()
helper.
ep_poll_callback(POLLFREE) simply does list_del_init(task_list).
This make this poll entry inconsistent, but we don't care. If you
share epoll fd which contains our sigfd with another process you
should blame yourself. signalfd is "really special". I simply do
not know how we can define the "right" semantics if it used with
epoll.
The main problem is, epoll calls signalfd_poll() once to establish
the connection with the wait queue, after that signalfd_poll(NULL)
returns the different/inconsistent results depending on who does
EPOLL_CTL_MOD/signalfd_read/etc. IOW: apart from sigmask, signalfd
has nothing to do with the file, it works with the current thread.
In short: this patch is the hack which tries to fix the symptoms.
It also assumes that nobody can take tasklist_lock under epoll
locks, this seems to be true.
Note:
- we do not have wake_up_all_poll() but wake_up_poll()
is fine, poll/epoll doesn't use WQ_FLAG_EXCLUSIVE.
- signalfd_cleanup() uses POLLHUP along with POLLFREE,
we need a couple of simple changes in eventpoll.c to
make sure it can't be "lost".
Reported-by: Maxime Bizon <mbizon@freebox.fr>
Cc: <stable@kernel.org>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-02-24 19:07:11 +00:00
|
|
|
if (atomic_dec_and_test(&sighand->count)) {
|
|
|
|
signalfd_cleanup(sighand);
|
2006-03-29 00:11:17 +00:00
|
|
|
kmem_cache_free(sighand_cachep, sighand);
|
epoll: introduce POLLFREE to flush ->signalfd_wqh before kfree()
This patch is intentionally incomplete to simplify the review.
It ignores ep_unregister_pollwait() which plays with the same wqh.
See the next change.
epoll assumes that the EPOLL_CTL_ADD'ed file controls everything
f_op->poll() needs. In particular it assumes that the wait queue
can't go away until eventpoll_release(). This is not true in case
of signalfd, the task which does EPOLL_CTL_ADD uses its ->sighand
which is not connected to the file.
This patch adds the special event, POLLFREE, currently only for
epoll. It expects that init_poll_funcptr()'ed hook should do the
necessary cleanup. Perhaps it should be defined as EPOLLFREE in
eventpoll.
__cleanup_sighand() is changed to do wake_up_poll(POLLFREE) if
->signalfd_wqh is not empty, we add the new signalfd_cleanup()
helper.
ep_poll_callback(POLLFREE) simply does list_del_init(task_list).
This make this poll entry inconsistent, but we don't care. If you
share epoll fd which contains our sigfd with another process you
should blame yourself. signalfd is "really special". I simply do
not know how we can define the "right" semantics if it used with
epoll.
The main problem is, epoll calls signalfd_poll() once to establish
the connection with the wait queue, after that signalfd_poll(NULL)
returns the different/inconsistent results depending on who does
EPOLL_CTL_MOD/signalfd_read/etc. IOW: apart from sigmask, signalfd
has nothing to do with the file, it works with the current thread.
In short: this patch is the hack which tries to fix the symptoms.
It also assumes that nobody can take tasklist_lock under epoll
locks, this seems to be true.
Note:
- we do not have wake_up_all_poll() but wake_up_poll()
is fine, poll/epoll doesn't use WQ_FLAG_EXCLUSIVE.
- signalfd_cleanup() uses POLLHUP along with POLLFREE,
we need a couple of simple changes in eventpoll.c to
make sure it can't be "lost".
Reported-by: Maxime Bizon <mbizon@freebox.fr>
Cc: <stable@kernel.org>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-02-24 19:07:11 +00:00
|
|
|
}
|
2006-03-29 00:11:17 +00:00
|
|
|
}
|
|
|
|
|
timers: fix itimer/many thread hang
Overview
This patch reworks the handling of POSIX CPU timers, including the
ITIMER_PROF, ITIMER_VIRT timers and rlimit handling. It was put together
with the help of Roland McGrath, the owner and original writer of this code.
The problem we ran into, and the reason for this rework, has to do with using
a profiling timer in a process with a large number of threads. It appears
that the performance of the old implementation of run_posix_cpu_timers() was
at least O(n*3) (where "n" is the number of threads in a process) or worse.
Everything is fine with an increasing number of threads until the time taken
for that routine to run becomes the same as or greater than the tick time, at
which point things degrade rather quickly.
This patch fixes bug 9906, "Weird hang with NPTL and SIGPROF."
Code Changes
This rework corrects the implementation of run_posix_cpu_timers() to make it
run in constant time for a particular machine. (Performance may vary between
one machine and another depending upon whether the kernel is built as single-
or multiprocessor and, in the latter case, depending upon the number of
running processors.) To do this, at each tick we now update fields in
signal_struct as well as task_struct. The run_posix_cpu_timers() function
uses those fields to make its decisions.
We define a new structure, "task_cputime," to contain user, system and
scheduler times and use these in appropriate places:
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
This is included in the structure "thread_group_cputime," which is a new
substructure of signal_struct and which varies for uniprocessor versus
multiprocessor kernels. For uniprocessor kernels, it uses "task_cputime" as
a simple substructure, while for multiprocessor kernels it is a pointer:
struct thread_group_cputime {
struct task_cputime totals;
};
struct thread_group_cputime {
struct task_cputime *totals;
};
We also add a new task_cputime substructure directly to signal_struct, to
cache the earliest expiration of process-wide timers, and task_cputime also
replaces the it_*_expires fields of task_struct (used for earliest expiration
of thread timers). The "thread_group_cputime" structure contains process-wide
timers that are updated via account_user_time() and friends. In the non-SMP
case the structure is a simple aggregator; unfortunately in the SMP case that
simplicity was not achievable due to cache-line contention between CPUs (in
one measured case performance was actually _worse_ on a 16-cpu system than
the same test on a 4-cpu system, due to this contention). For SMP, the
thread_group_cputime counters are maintained as a per-cpu structure allocated
using alloc_percpu(). The timer functions update only the timer field in
the structure corresponding to the running CPU, obtained using per_cpu_ptr().
We define a set of inline functions in sched.h that we use to maintain the
thread_group_cputime structure and hide the differences between UP and SMP
implementations from the rest of the kernel. The thread_group_cputime_init()
function initializes the thread_group_cputime structure for the given task.
The thread_group_cputime_alloc() is a no-op for UP; for SMP it calls the
out-of-line function thread_group_cputime_alloc_smp() to allocate and fill
in the per-cpu structures and fields. The thread_group_cputime_free()
function, also a no-op for UP, in SMP frees the per-cpu structures. The
thread_group_cputime_clone_thread() function (also a UP no-op) for SMP calls
thread_group_cputime_alloc() if the per-cpu structures haven't yet been
allocated. The thread_group_cputime() function fills the task_cputime
structure it is passed with the contents of the thread_group_cputime fields;
in UP it's that simple but in SMP it must also safely check that tsk->signal
is non-NULL (if it is it just uses the appropriate fields of task_struct) and,
if so, sums the per-cpu values for each online CPU. Finally, the three
functions account_group_user_time(), account_group_system_time() and
account_group_exec_runtime() are used by timer functions to update the
respective fields of the thread_group_cputime structure.
Non-SMP operation is trivial and will not be mentioned further.
The per-cpu structure is always allocated when a task creates its first new
thread, via a call to thread_group_cputime_clone_thread() from copy_signal().
It is freed at process exit via a call to thread_group_cputime_free() from
cleanup_signal().
All functions that formerly summed utime/stime/sum_sched_runtime values from
from all threads in the thread group now use thread_group_cputime() to
snapshot the values in the thread_group_cputime structure or the values in
the task structure itself if the per-cpu structure hasn't been allocated.
Finally, the code in kernel/posix-cpu-timers.c has changed quite a bit.
The run_posix_cpu_timers() function has been split into a fast path and a
slow path; the former safely checks whether there are any expired thread
timers and, if not, just returns, while the slow path does the heavy lifting.
With the dedicated thread group fields, timers are no longer "rebalanced" and
the process_timer_rebalance() function and related code has gone away. All
summing loops are gone and all code that used them now uses the
thread_group_cputime() inline. When process-wide timers are set, the new
task_cputime structure in signal_struct is used to cache the earliest
expiration; this is checked in the fast path.
Performance
The fix appears not to add significant overhead to existing operations. It
generally performs the same as the current code except in two cases, one in
which it performs slightly worse (Case 5 below) and one in which it performs
very significantly better (Case 2 below). Overall it's a wash except in those
two cases.
I've since done somewhat more involved testing on a dual-core Opteron system.
Case 1: With no itimer running, for a test with 100,000 threads, the fixed
kernel took 1428.5 seconds, 513 seconds more than the unfixed system,
all of which was spent in the system. There were twice as many
voluntary context switches with the fix as without it.
Case 2: With an itimer running at .01 second ticks and 4000 threads (the most
an unmodified kernel can handle), the fixed kernel ran the test in
eight percent of the time (5.8 seconds as opposed to 70 seconds) and
had better tick accuracy (.012 seconds per tick as opposed to .023
seconds per tick).
Case 3: A 4000-thread test with an initial timer tick of .01 second and an
interval of 10,000 seconds (i.e. a timer that ticks only once) had
very nearly the same performance in both cases: 6.3 seconds elapsed
for the fixed kernel versus 5.5 seconds for the unfixed kernel.
With fewer threads (eight in these tests), the Case 1 test ran in essentially
the same time on both the modified and unmodified kernels (5.2 seconds versus
5.8 seconds). The Case 2 test ran in about the same time as well, 5.9 seconds
versus 5.4 seconds but again with much better tick accuracy, .013 seconds per
tick versus .025 seconds per tick for the unmodified kernel.
Since the fix affected the rlimit code, I also tested soft and hard CPU limits.
Case 4: With a hard CPU limit of 20 seconds and eight threads (and an itimer
running), the modified kernel was very slightly favored in that while
it killed the process in 19.997 seconds of CPU time (5.002 seconds of
wall time), only .003 seconds of that was system time, the rest was
user time. The unmodified kernel killed the process in 20.001 seconds
of CPU (5.014 seconds of wall time) of which .016 seconds was system
time. Really, though, the results were too close to call. The results
were essentially the same with no itimer running.
Case 5: With a soft limit of 20 seconds and a hard limit of 2000 seconds
(where the hard limit would never be reached) and an itimer running,
the modified kernel exhibited worse tick accuracy than the unmodified
kernel: .050 seconds/tick versus .028 seconds/tick. Otherwise,
performance was almost indistinguishable. With no itimer running this
test exhibited virtually identical behavior and times in both cases.
In times past I did some limited performance testing. those results are below.
On a four-cpu Opteron system without this fix, a sixteen-thread test executed
in 3569.991 seconds, of which user was 3568.435s and system was 1.556s. On
the same system with the fix, user and elapsed time were about the same, but
system time dropped to 0.007 seconds. Performance with eight, four and one
thread were comparable. Interestingly, the timer ticks with the fix seemed
more accurate: The sixteen-thread test with the fix received 149543 ticks
for 0.024 seconds per tick, while the same test without the fix received 58720
for 0.061 seconds per tick. Both cases were configured for an interval of
0.01 seconds. Again, the other tests were comparable. Each thread in this
test computed the primes up to 25,000,000.
I also did a test with a large number of threads, 100,000 threads, which is
impossible without the fix. In this case each thread computed the primes only
up to 10,000 (to make the runtime manageable). System time dominated, at
1546.968 seconds out of a total 2176.906 seconds (giving a user time of
629.938s). It received 147651 ticks for 0.015 seconds per tick, still quite
accurate. There is obviously no comparable test without the fix.
Signed-off-by: Frank Mayhar <fmayhar@google.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-12 16:54:39 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize POSIX timer handling for a thread group.
|
|
|
|
*/
|
|
|
|
static void posix_cpu_timers_init_group(struct signal_struct *sig)
|
|
|
|
{
|
2010-03-05 21:42:54 +00:00
|
|
|
unsigned long cpu_limit;
|
|
|
|
|
timers: fix itimer/many thread hang
Overview
This patch reworks the handling of POSIX CPU timers, including the
ITIMER_PROF, ITIMER_VIRT timers and rlimit handling. It was put together
with the help of Roland McGrath, the owner and original writer of this code.
The problem we ran into, and the reason for this rework, has to do with using
a profiling timer in a process with a large number of threads. It appears
that the performance of the old implementation of run_posix_cpu_timers() was
at least O(n*3) (where "n" is the number of threads in a process) or worse.
Everything is fine with an increasing number of threads until the time taken
for that routine to run becomes the same as or greater than the tick time, at
which point things degrade rather quickly.
This patch fixes bug 9906, "Weird hang with NPTL and SIGPROF."
Code Changes
This rework corrects the implementation of run_posix_cpu_timers() to make it
run in constant time for a particular machine. (Performance may vary between
one machine and another depending upon whether the kernel is built as single-
or multiprocessor and, in the latter case, depending upon the number of
running processors.) To do this, at each tick we now update fields in
signal_struct as well as task_struct. The run_posix_cpu_timers() function
uses those fields to make its decisions.
We define a new structure, "task_cputime," to contain user, system and
scheduler times and use these in appropriate places:
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
This is included in the structure "thread_group_cputime," which is a new
substructure of signal_struct and which varies for uniprocessor versus
multiprocessor kernels. For uniprocessor kernels, it uses "task_cputime" as
a simple substructure, while for multiprocessor kernels it is a pointer:
struct thread_group_cputime {
struct task_cputime totals;
};
struct thread_group_cputime {
struct task_cputime *totals;
};
We also add a new task_cputime substructure directly to signal_struct, to
cache the earliest expiration of process-wide timers, and task_cputime also
replaces the it_*_expires fields of task_struct (used for earliest expiration
of thread timers). The "thread_group_cputime" structure contains process-wide
timers that are updated via account_user_time() and friends. In the non-SMP
case the structure is a simple aggregator; unfortunately in the SMP case that
simplicity was not achievable due to cache-line contention between CPUs (in
one measured case performance was actually _worse_ on a 16-cpu system than
the same test on a 4-cpu system, due to this contention). For SMP, the
thread_group_cputime counters are maintained as a per-cpu structure allocated
using alloc_percpu(). The timer functions update only the timer field in
the structure corresponding to the running CPU, obtained using per_cpu_ptr().
We define a set of inline functions in sched.h that we use to maintain the
thread_group_cputime structure and hide the differences between UP and SMP
implementations from the rest of the kernel. The thread_group_cputime_init()
function initializes the thread_group_cputime structure for the given task.
The thread_group_cputime_alloc() is a no-op for UP; for SMP it calls the
out-of-line function thread_group_cputime_alloc_smp() to allocate and fill
in the per-cpu structures and fields. The thread_group_cputime_free()
function, also a no-op for UP, in SMP frees the per-cpu structures. The
thread_group_cputime_clone_thread() function (also a UP no-op) for SMP calls
thread_group_cputime_alloc() if the per-cpu structures haven't yet been
allocated. The thread_group_cputime() function fills the task_cputime
structure it is passed with the contents of the thread_group_cputime fields;
in UP it's that simple but in SMP it must also safely check that tsk->signal
is non-NULL (if it is it just uses the appropriate fields of task_struct) and,
if so, sums the per-cpu values for each online CPU. Finally, the three
functions account_group_user_time(), account_group_system_time() and
account_group_exec_runtime() are used by timer functions to update the
respective fields of the thread_group_cputime structure.
Non-SMP operation is trivial and will not be mentioned further.
The per-cpu structure is always allocated when a task creates its first new
thread, via a call to thread_group_cputime_clone_thread() from copy_signal().
It is freed at process exit via a call to thread_group_cputime_free() from
cleanup_signal().
All functions that formerly summed utime/stime/sum_sched_runtime values from
from all threads in the thread group now use thread_group_cputime() to
snapshot the values in the thread_group_cputime structure or the values in
the task structure itself if the per-cpu structure hasn't been allocated.
Finally, the code in kernel/posix-cpu-timers.c has changed quite a bit.
The run_posix_cpu_timers() function has been split into a fast path and a
slow path; the former safely checks whether there are any expired thread
timers and, if not, just returns, while the slow path does the heavy lifting.
With the dedicated thread group fields, timers are no longer "rebalanced" and
the process_timer_rebalance() function and related code has gone away. All
summing loops are gone and all code that used them now uses the
thread_group_cputime() inline. When process-wide timers are set, the new
task_cputime structure in signal_struct is used to cache the earliest
expiration; this is checked in the fast path.
Performance
The fix appears not to add significant overhead to existing operations. It
generally performs the same as the current code except in two cases, one in
which it performs slightly worse (Case 5 below) and one in which it performs
very significantly better (Case 2 below). Overall it's a wash except in those
two cases.
I've since done somewhat more involved testing on a dual-core Opteron system.
Case 1: With no itimer running, for a test with 100,000 threads, the fixed
kernel took 1428.5 seconds, 513 seconds more than the unfixed system,
all of which was spent in the system. There were twice as many
voluntary context switches with the fix as without it.
Case 2: With an itimer running at .01 second ticks and 4000 threads (the most
an unmodified kernel can handle), the fixed kernel ran the test in
eight percent of the time (5.8 seconds as opposed to 70 seconds) and
had better tick accuracy (.012 seconds per tick as opposed to .023
seconds per tick).
Case 3: A 4000-thread test with an initial timer tick of .01 second and an
interval of 10,000 seconds (i.e. a timer that ticks only once) had
very nearly the same performance in both cases: 6.3 seconds elapsed
for the fixed kernel versus 5.5 seconds for the unfixed kernel.
With fewer threads (eight in these tests), the Case 1 test ran in essentially
the same time on both the modified and unmodified kernels (5.2 seconds versus
5.8 seconds). The Case 2 test ran in about the same time as well, 5.9 seconds
versus 5.4 seconds but again with much better tick accuracy, .013 seconds per
tick versus .025 seconds per tick for the unmodified kernel.
Since the fix affected the rlimit code, I also tested soft and hard CPU limits.
Case 4: With a hard CPU limit of 20 seconds and eight threads (and an itimer
running), the modified kernel was very slightly favored in that while
it killed the process in 19.997 seconds of CPU time (5.002 seconds of
wall time), only .003 seconds of that was system time, the rest was
user time. The unmodified kernel killed the process in 20.001 seconds
of CPU (5.014 seconds of wall time) of which .016 seconds was system
time. Really, though, the results were too close to call. The results
were essentially the same with no itimer running.
Case 5: With a soft limit of 20 seconds and a hard limit of 2000 seconds
(where the hard limit would never be reached) and an itimer running,
the modified kernel exhibited worse tick accuracy than the unmodified
kernel: .050 seconds/tick versus .028 seconds/tick. Otherwise,
performance was almost indistinguishable. With no itimer running this
test exhibited virtually identical behavior and times in both cases.
In times past I did some limited performance testing. those results are below.
On a four-cpu Opteron system without this fix, a sixteen-thread test executed
in 3569.991 seconds, of which user was 3568.435s and system was 1.556s. On
the same system with the fix, user and elapsed time were about the same, but
system time dropped to 0.007 seconds. Performance with eight, four and one
thread were comparable. Interestingly, the timer ticks with the fix seemed
more accurate: The sixteen-thread test with the fix received 149543 ticks
for 0.024 seconds per tick, while the same test without the fix received 58720
for 0.061 seconds per tick. Both cases were configured for an interval of
0.01 seconds. Again, the other tests were comparable. Each thread in this
test computed the primes up to 25,000,000.
I also did a test with a large number of threads, 100,000 threads, which is
impossible without the fix. In this case each thread computed the primes only
up to 10,000 (to make the runtime manageable). System time dominated, at
1546.968 seconds out of a total 2176.906 seconds (giving a user time of
629.938s). It received 147651 ticks for 0.015 seconds per tick, still quite
accurate. There is obviously no comparable test without the fix.
Signed-off-by: Frank Mayhar <fmayhar@google.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-12 16:54:39 +00:00
|
|
|
/* Thread group counters. */
|
|
|
|
thread_group_cputime_init(sig);
|
|
|
|
|
2010-03-05 21:42:54 +00:00
|
|
|
cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
|
|
|
|
if (cpu_limit != RLIM_INFINITY) {
|
|
|
|
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
|
2009-03-27 00:06:07 +00:00
|
|
|
sig->cputimer.running = 1;
|
|
|
|
}
|
|
|
|
|
timers: fix itimer/many thread hang
Overview
This patch reworks the handling of POSIX CPU timers, including the
ITIMER_PROF, ITIMER_VIRT timers and rlimit handling. It was put together
with the help of Roland McGrath, the owner and original writer of this code.
The problem we ran into, and the reason for this rework, has to do with using
a profiling timer in a process with a large number of threads. It appears
that the performance of the old implementation of run_posix_cpu_timers() was
at least O(n*3) (where "n" is the number of threads in a process) or worse.
Everything is fine with an increasing number of threads until the time taken
for that routine to run becomes the same as or greater than the tick time, at
which point things degrade rather quickly.
This patch fixes bug 9906, "Weird hang with NPTL and SIGPROF."
Code Changes
This rework corrects the implementation of run_posix_cpu_timers() to make it
run in constant time for a particular machine. (Performance may vary between
one machine and another depending upon whether the kernel is built as single-
or multiprocessor and, in the latter case, depending upon the number of
running processors.) To do this, at each tick we now update fields in
signal_struct as well as task_struct. The run_posix_cpu_timers() function
uses those fields to make its decisions.
We define a new structure, "task_cputime," to contain user, system and
scheduler times and use these in appropriate places:
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
This is included in the structure "thread_group_cputime," which is a new
substructure of signal_struct and which varies for uniprocessor versus
multiprocessor kernels. For uniprocessor kernels, it uses "task_cputime" as
a simple substructure, while for multiprocessor kernels it is a pointer:
struct thread_group_cputime {
struct task_cputime totals;
};
struct thread_group_cputime {
struct task_cputime *totals;
};
We also add a new task_cputime substructure directly to signal_struct, to
cache the earliest expiration of process-wide timers, and task_cputime also
replaces the it_*_expires fields of task_struct (used for earliest expiration
of thread timers). The "thread_group_cputime" structure contains process-wide
timers that are updated via account_user_time() and friends. In the non-SMP
case the structure is a simple aggregator; unfortunately in the SMP case that
simplicity was not achievable due to cache-line contention between CPUs (in
one measured case performance was actually _worse_ on a 16-cpu system than
the same test on a 4-cpu system, due to this contention). For SMP, the
thread_group_cputime counters are maintained as a per-cpu structure allocated
using alloc_percpu(). The timer functions update only the timer field in
the structure corresponding to the running CPU, obtained using per_cpu_ptr().
We define a set of inline functions in sched.h that we use to maintain the
thread_group_cputime structure and hide the differences between UP and SMP
implementations from the rest of the kernel. The thread_group_cputime_init()
function initializes the thread_group_cputime structure for the given task.
The thread_group_cputime_alloc() is a no-op for UP; for SMP it calls the
out-of-line function thread_group_cputime_alloc_smp() to allocate and fill
in the per-cpu structures and fields. The thread_group_cputime_free()
function, also a no-op for UP, in SMP frees the per-cpu structures. The
thread_group_cputime_clone_thread() function (also a UP no-op) for SMP calls
thread_group_cputime_alloc() if the per-cpu structures haven't yet been
allocated. The thread_group_cputime() function fills the task_cputime
structure it is passed with the contents of the thread_group_cputime fields;
in UP it's that simple but in SMP it must also safely check that tsk->signal
is non-NULL (if it is it just uses the appropriate fields of task_struct) and,
if so, sums the per-cpu values for each online CPU. Finally, the three
functions account_group_user_time(), account_group_system_time() and
account_group_exec_runtime() are used by timer functions to update the
respective fields of the thread_group_cputime structure.
Non-SMP operation is trivial and will not be mentioned further.
The per-cpu structure is always allocated when a task creates its first new
thread, via a call to thread_group_cputime_clone_thread() from copy_signal().
It is freed at process exit via a call to thread_group_cputime_free() from
cleanup_signal().
All functions that formerly summed utime/stime/sum_sched_runtime values from
from all threads in the thread group now use thread_group_cputime() to
snapshot the values in the thread_group_cputime structure or the values in
the task structure itself if the per-cpu structure hasn't been allocated.
Finally, the code in kernel/posix-cpu-timers.c has changed quite a bit.
The run_posix_cpu_timers() function has been split into a fast path and a
slow path; the former safely checks whether there are any expired thread
timers and, if not, just returns, while the slow path does the heavy lifting.
With the dedicated thread group fields, timers are no longer "rebalanced" and
the process_timer_rebalance() function and related code has gone away. All
summing loops are gone and all code that used them now uses the
thread_group_cputime() inline. When process-wide timers are set, the new
task_cputime structure in signal_struct is used to cache the earliest
expiration; this is checked in the fast path.
Performance
The fix appears not to add significant overhead to existing operations. It
generally performs the same as the current code except in two cases, one in
which it performs slightly worse (Case 5 below) and one in which it performs
very significantly better (Case 2 below). Overall it's a wash except in those
two cases.
I've since done somewhat more involved testing on a dual-core Opteron system.
Case 1: With no itimer running, for a test with 100,000 threads, the fixed
kernel took 1428.5 seconds, 513 seconds more than the unfixed system,
all of which was spent in the system. There were twice as many
voluntary context switches with the fix as without it.
Case 2: With an itimer running at .01 second ticks and 4000 threads (the most
an unmodified kernel can handle), the fixed kernel ran the test in
eight percent of the time (5.8 seconds as opposed to 70 seconds) and
had better tick accuracy (.012 seconds per tick as opposed to .023
seconds per tick).
Case 3: A 4000-thread test with an initial timer tick of .01 second and an
interval of 10,000 seconds (i.e. a timer that ticks only once) had
very nearly the same performance in both cases: 6.3 seconds elapsed
for the fixed kernel versus 5.5 seconds for the unfixed kernel.
With fewer threads (eight in these tests), the Case 1 test ran in essentially
the same time on both the modified and unmodified kernels (5.2 seconds versus
5.8 seconds). The Case 2 test ran in about the same time as well, 5.9 seconds
versus 5.4 seconds but again with much better tick accuracy, .013 seconds per
tick versus .025 seconds per tick for the unmodified kernel.
Since the fix affected the rlimit code, I also tested soft and hard CPU limits.
Case 4: With a hard CPU limit of 20 seconds and eight threads (and an itimer
running), the modified kernel was very slightly favored in that while
it killed the process in 19.997 seconds of CPU time (5.002 seconds of
wall time), only .003 seconds of that was system time, the rest was
user time. The unmodified kernel killed the process in 20.001 seconds
of CPU (5.014 seconds of wall time) of which .016 seconds was system
time. Really, though, the results were too close to call. The results
were essentially the same with no itimer running.
Case 5: With a soft limit of 20 seconds and a hard limit of 2000 seconds
(where the hard limit would never be reached) and an itimer running,
the modified kernel exhibited worse tick accuracy than the unmodified
kernel: .050 seconds/tick versus .028 seconds/tick. Otherwise,
performance was almost indistinguishable. With no itimer running this
test exhibited virtually identical behavior and times in both cases.
In times past I did some limited performance testing. those results are below.
On a four-cpu Opteron system without this fix, a sixteen-thread test executed
in 3569.991 seconds, of which user was 3568.435s and system was 1.556s. On
the same system with the fix, user and elapsed time were about the same, but
system time dropped to 0.007 seconds. Performance with eight, four and one
thread were comparable. Interestingly, the timer ticks with the fix seemed
more accurate: The sixteen-thread test with the fix received 149543 ticks
for 0.024 seconds per tick, while the same test without the fix received 58720
for 0.061 seconds per tick. Both cases were configured for an interval of
0.01 seconds. Again, the other tests were comparable. Each thread in this
test computed the primes up to 25,000,000.
I also did a test with a large number of threads, 100,000 threads, which is
impossible without the fix. In this case each thread computed the primes only
up to 10,000 (to make the runtime manageable). System time dominated, at
1546.968 seconds out of a total 2176.906 seconds (giving a user time of
629.938s). It received 147651 ticks for 0.015 seconds per tick, still quite
accurate. There is obviously no comparable test without the fix.
Signed-off-by: Frank Mayhar <fmayhar@google.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-12 16:54:39 +00:00
|
|
|
/* The timer lists. */
|
|
|
|
INIT_LIST_HEAD(&sig->cpu_timers[0]);
|
|
|
|
INIT_LIST_HEAD(&sig->cpu_timers[1]);
|
|
|
|
INIT_LIST_HEAD(&sig->cpu_timers[2]);
|
|
|
|
}
|
|
|
|
|
2007-10-19 06:41:10 +00:00
|
|
|
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct signal_struct *sig;
|
|
|
|
|
clone(): fix race between copy_process() and de_thread()
Spotted by Hiroshi Shimamoto who also provided the test-case below.
copy_process() uses signal->count as a reference counter, but it is not.
This test case
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
#include <pthread.h>
void *null_thread(void *p)
{
for (;;)
sleep(1);
return NULL;
}
void *exec_thread(void *p)
{
execl("/bin/true", "/bin/true", NULL);
return null_thread(p);
}
int main(int argc, char **argv)
{
for (;;) {
pid_t pid;
int ret, status;
pid = fork();
if (pid < 0)
break;
if (!pid) {
pthread_t tid;
pthread_create(&tid, NULL, exec_thread, NULL);
for (;;)
pthread_create(&tid, NULL, null_thread, NULL);
}
do {
ret = waitpid(pid, &status, 0);
} while (ret == -1 && errno == EINTR);
}
return 0;
}
quickly creates an unkillable task.
If copy_process(CLONE_THREAD) races with de_thread()
copy_signal()->atomic(signal->count) breaks the signal->notify_count
logic, and the execing thread can hang forever in kernel space.
Change copy_process() to increment count/live only when we know for sure
we can't fail. In this case the forked thread will take care of its
reference to signal correctly.
If copy_process() fails, check CLONE_THREAD flag. If it it set - do
nothing, the counters were not changed and current belongs to the same
thread group. If it is not set, ->signal must be released in any case
(and ->count must be == 1), the forked child is the only thread in the
thread group.
We need more cleanups here, in particular signal->count should not be used
by de_thread/__exit_signal at all. This patch only fixes the bug.
Reported-by: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Tested-by: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Roland McGrath <roland@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-08-26 21:29:24 +00:00
|
|
|
if (clone_flags & CLONE_THREAD)
|
2008-11-24 16:06:57 +00:00
|
|
|
return 0;
|
|
|
|
|
2010-03-10 23:23:01 +00:00
|
|
|
sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
tsk->signal = sig;
|
|
|
|
if (!sig)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2010-05-26 21:43:24 +00:00
|
|
|
sig->nr_threads = 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
atomic_set(&sig->live, 1);
|
2010-05-26 21:43:24 +00:00
|
|
|
atomic_set(&sig->sigcnt, 1);
|
introduce for_each_thread() to replace the buggy while_each_thread()
while_each_thread() and next_thread() should die, almost every lockless
usage is wrong.
1. Unless g == current, the lockless while_each_thread() is not safe.
while_each_thread(g, t) can loop forever if g exits, next_thread()
can't reach the unhashed thread in this case. Note that this can
happen even if g is the group leader, it can exec.
2. Even if while_each_thread() itself was correct, people often use
it wrongly.
It was never safe to just take rcu_read_lock() and loop unless
you verify that pid_alive(g) == T, even the first next_thread()
can point to the already freed/reused memory.
This patch adds signal_struct->thread_head and task->thread_node to
create the normal rcu-safe list with the stable head. The new
for_each_thread(g, t) helper is always safe under rcu_read_lock() as
long as this task_struct can't go away.
Note: of course it is ugly to have both task_struct->thread_node and the
old task_struct->thread_group, we will kill it later, after we change
the users of while_each_thread() to use for_each_thread().
Perhaps we can kill it even before we convert all users, we can
reimplement next_thread(t) using the new thread_head/thread_node. But
we can't do this right now because this will lead to subtle behavioural
changes. For example, do/while_each_thread() always sees at least one
task, while for_each_thread() can do nothing if the whole thread group
has died. Or thread_group_empty(), currently its semantics is not clear
unless thread_group_leader(p) and we need to audit the callers before we
can change it.
So this patch adds the new interface which has to coexist with the old
one for some time, hopefully the next changes will be more or less
straightforward and the old one will go away soon.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Sergey Dyasly <dserrg@gmail.com>
Tested-by: Sergey Dyasly <dserrg@gmail.com>
Reviewed-by: Sameer Nanda <snanda@chromium.org>
Acked-by: David Rientjes <rientjes@google.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mandeep Singh Baines <msb@chromium.org>
Cc: "Ma, Xindong" <xindong.ma@intel.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: "Tu, Xiaobing" <xiaobing.tu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-01-21 23:49:56 +00:00
|
|
|
|
|
|
|
/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
|
|
|
|
sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
|
|
|
|
tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
init_waitqueue_head(&sig->wait_chldexit);
|
2008-04-30 07:52:52 +00:00
|
|
|
sig->curr_target = tsk;
|
2005-04-16 22:20:36 +00:00
|
|
|
init_sigpending(&sig->shared_pending);
|
|
|
|
INIT_LIST_HEAD(&sig->posix_timers);
|
|
|
|
|
2007-02-16 09:27:49 +00:00
|
|
|
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
2005-04-16 22:20:36 +00:00
|
|
|
sig->real_timer.function = it_real_fn;
|
|
|
|
|
|
|
|
task_lock(current->group_leader);
|
|
|
|
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
|
|
|
|
task_unlock(current->group_leader);
|
|
|
|
|
2009-03-27 00:06:07 +00:00
|
|
|
posix_cpu_timers_init_group(sig);
|
|
|
|
|
Audit: add TTY input auditing
Add TTY input auditing, used to audit system administrator's actions. This is
required by various security standards such as DCID 6/3 and PCI to provide
non-repudiation of administrator's actions and to allow a review of past
actions if the administrator seems to overstep their duties or if the system
becomes misconfigured for unknown reasons. These requirements do not make it
necessary to audit TTY output as well.
Compared to an user-space keylogger, this approach records TTY input using the
audit subsystem, correlated with other audit events, and it is completely
transparent to the user-space application (e.g. the console ioctls still
work).
TTY input auditing works on a higher level than auditing all system calls
within the session, which would produce an overwhelming amount of mostly
useless audit events.
Add an "audit_tty" attribute, inherited across fork (). Data read from TTYs
by process with the attribute is sent to the audit subsystem by the kernel.
The audit netlink interface is extended to allow modifying the audit_tty
attribute, and to allow sending explanatory audit events from user-space (for
example, a shell might send an event containing the final command, after the
interactive command-line editing and history expansion is performed, which
might be difficult to decipher from the TTY input alone).
Because the "audit_tty" attribute is inherited across fork (), it would be set
e.g. for sshd restarted within an audited session. To prevent this, the
audit_tty attribute is cleared when a process with no open TTY file
descriptors (e.g. after daemon startup) opens a TTY.
See https://www.redhat.com/archives/linux-audit/2007-June/msg00000.html for a
more detailed rationale document for an older version of this patch.
[akpm@linux-foundation.org: build fix]
Signed-off-by: Miloslav Trmac <mitr@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Cc: Paul Fulghum <paulkf@microgate.com>
Cc: Casey Schaufler <casey@schaufler-ca.com>
Cc: Steve Grubb <sgrubb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 06:40:56 +00:00
|
|
|
tty_audit_fork(sig);
|
sched: Add 'autogroup' scheduling feature: automated per session task groups
A recurring complaint from CFS users is that parallel kbuild has
a negative impact on desktop interactivity. This patch
implements an idea from Linus, to automatically create task
groups. Currently, only per session autogroups are implemented,
but the patch leaves the way open for enhancement.
Implementation: each task's signal struct contains an inherited
pointer to a refcounted autogroup struct containing a task group
pointer, the default for all tasks pointing to the
init_task_group. When a task calls setsid(), a new task group
is created, the process is moved into the new task group, and a
reference to the preveious task group is dropped. Child
processes inherit this task group thereafter, and increase it's
refcount. When the last thread of a process exits, the
process's reference is dropped, such that when the last process
referencing an autogroup exits, the autogroup is destroyed.
At runqueue selection time, IFF a task has no cgroup assignment,
its current autogroup is used.
Autogroup bandwidth is controllable via setting it's nice level
through the proc filesystem:
cat /proc/<pid>/autogroup
Displays the task's group and the group's nice level.
echo <nice level> > /proc/<pid>/autogroup
Sets the task group's shares to the weight of nice <level> task.
Setting nice level is rate limited for !admin users due to the
abuse risk of task group locking.
The feature is enabled from boot by default if
CONFIG_SCHED_AUTOGROUP=y is selected, but can be disabled via
the boot option noautogroup, and can also be turned on/off on
the fly via:
echo [01] > /proc/sys/kernel/sched_autogroup_enabled
... which will automatically move tasks to/from the root task group.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Markus Trippelsdorf <markus@trippelsdorf.de>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Paul Turner <pjt@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
[ Removed the task_group_path() debug code, and fixed !EVENTFD build failure. ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
LKML-Reference: <1290281700.28711.9.camel@maggy.simson.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-11-30 13:18:03 +00:00
|
|
|
sched_autogroup_fork(sig);
|
Audit: add TTY input auditing
Add TTY input auditing, used to audit system administrator's actions. This is
required by various security standards such as DCID 6/3 and PCI to provide
non-repudiation of administrator's actions and to allow a review of past
actions if the administrator seems to overstep their duties or if the system
becomes misconfigured for unknown reasons. These requirements do not make it
necessary to audit TTY output as well.
Compared to an user-space keylogger, this approach records TTY input using the
audit subsystem, correlated with other audit events, and it is completely
transparent to the user-space application (e.g. the console ioctls still
work).
TTY input auditing works on a higher level than auditing all system calls
within the session, which would produce an overwhelming amount of mostly
useless audit events.
Add an "audit_tty" attribute, inherited across fork (). Data read from TTYs
by process with the attribute is sent to the audit subsystem by the kernel.
The audit netlink interface is extended to allow modifying the audit_tty
attribute, and to allow sending explanatory audit events from user-space (for
example, a shell might send an event containing the final command, after the
interactive command-line editing and history expansion is performed, which
might be difficult to decipher from the TTY input alone).
Because the "audit_tty" attribute is inherited across fork (), it would be set
e.g. for sshd restarted within an audited session. To prevent this, the
audit_tty attribute is cleared when a process with no open TTY file
descriptors (e.g. after daemon startup) opens a TTY.
See https://www.redhat.com/archives/linux-audit/2007-June/msg00000.html for a
more detailed rationale document for an older version of this patch.
[akpm@linux-foundation.org: build fix]
Signed-off-by: Miloslav Trmac <mitr@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Cc: Paul Fulghum <paulkf@microgate.com>
Cc: Casey Schaufler <casey@schaufler-ca.com>
Cc: Steve Grubb <sgrubb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 06:40:56 +00:00
|
|
|
|
2011-05-26 23:25:18 +00:00
|
|
|
#ifdef CONFIG_CGROUPS
|
2011-12-13 02:12:21 +00:00
|
|
|
init_rwsem(&sig->group_rwsem);
|
2011-05-26 23:25:18 +00:00
|
|
|
#endif
|
|
|
|
|
oom: badness heuristic rewrite
This a complete rewrite of the oom killer's badness() heuristic which is
used to determine which task to kill in oom conditions. The goal is to
make it as simple and predictable as possible so the results are better
understood and we end up killing the task which will lead to the most
memory freeing while still respecting the fine-tuning from userspace.
Instead of basing the heuristic on mm->total_vm for each task, the task's
rss and swap space is used instead. This is a better indication of the
amount of memory that will be freeable if the oom killed task is chosen
and subsequently exits. This helps specifically in cases where KDE or
GNOME is chosen for oom kill on desktop systems instead of a memory
hogging task.
The baseline for the heuristic is a proportion of memory that each task is
currently using in memory plus swap compared to the amount of "allowable"
memory. "Allowable," in this sense, means the system-wide resources for
unconstrained oom conditions, the set of mempolicy nodes, the mems
attached to current's cpuset, or a memory controller's limit. The
proportion is given on a scale of 0 (never kill) to 1000 (always kill),
roughly meaning that if a task has a badness() score of 500 that the task
consumes approximately 50% of allowable memory resident in RAM or in swap
space.
The proportion is always relative to the amount of "allowable" memory and
not the total amount of RAM systemwide so that mempolicies and cpusets may
operate in isolation; they shall not need to know the true size of the
machine on which they are running if they are bound to a specific set of
nodes or mems, respectively.
Root tasks are given 3% extra memory just like __vm_enough_memory()
provides in LSMs. In the event of two tasks consuming similar amounts of
memory, it is generally better to save root's task.
Because of the change in the badness() heuristic's baseline, it is also
necessary to introduce a new user interface to tune it. It's not possible
to redefine the meaning of /proc/pid/oom_adj with a new scale since the
ABI cannot be changed for backward compatability. Instead, a new tunable,
/proc/pid/oom_score_adj, is added that ranges from -1000 to +1000. It may
be used to polarize the heuristic such that certain tasks are never
considered for oom kill while others may always be considered. The value
is added directly into the badness() score so a value of -500, for
example, means to discount 50% of its memory consumption in comparison to
other tasks either on the system, bound to the mempolicy, in the cpuset,
or sharing the same memory controller.
/proc/pid/oom_adj is changed so that its meaning is rescaled into the
units used by /proc/pid/oom_score_adj, and vice versa. Changing one of
these per-task tunables will rescale the value of the other to an
equivalent meaning. Although /proc/pid/oom_adj was originally defined as
a bitshift on the badness score, it now shares the same linear growth as
/proc/pid/oom_score_adj but with different granularity. This is required
so the ABI is not broken with userspace applications and allows oom_adj to
be deprecated for future removal.
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-10 00:19:46 +00:00
|
|
|
sig->oom_score_adj = current->signal->oom_score_adj;
|
2011-01-13 23:46:05 +00:00
|
|
|
sig->oom_score_adj_min = current->signal->oom_score_adj_min;
|
oom: move oom_adj value from task_struct to signal_struct
Currently, OOM logic callflow is here.
__out_of_memory()
select_bad_process() for each task
badness() calculate badness of one task
oom_kill_process() search child
oom_kill_task() kill target task and mm shared tasks with it
example, process-A have two thread, thread-A and thread-B and it have very
fat memory and each thread have following oom_adj and oom_score.
thread-A: oom_adj = OOM_DISABLE, oom_score = 0
thread-B: oom_adj = 0, oom_score = very-high
Then, select_bad_process() select thread-B, but oom_kill_task() refuse
kill the task because thread-A have OOM_DISABLE. Thus __out_of_memory()
call select_bad_process() again. but select_bad_process() select the same
task. It mean kernel fall in livelock.
The fact is, select_bad_process() must select killable task. otherwise
OOM logic go into livelock.
And root cause is, oom_adj shouldn't be per-thread value. it should be
per-process value because OOM-killer kill a process, not thread. Thus
This patch moves oomkilladj (now more appropriately named oom_adj) from
struct task_struct to struct signal_struct. it naturally prevent
select_bad_process() choose wrong task.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-09-22 00:03:13 +00:00
|
|
|
|
prctl: add PR_{SET,GET}_CHILD_SUBREAPER to allow simple process supervision
Userspace service managers/supervisors need to track their started
services. Many services daemonize by double-forking and get implicitly
re-parented to PID 1. The service manager will no longer be able to
receive the SIGCHLD signals for them, and is no longer in charge of
reaping the children with wait(). All information about the children is
lost at the moment PID 1 cleans up the re-parented processes.
With this prctl, a service manager process can mark itself as a sort of
'sub-init', able to stay as the parent for all orphaned processes
created by the started services. All SIGCHLD signals will be delivered
to the service manager.
Receiving SIGCHLD and doing wait() is in cases of a service-manager much
preferred over any possible asynchronous notification about specific
PIDs, because the service manager has full access to the child process
data in /proc and the PID can not be re-used until the wait(), the
service-manager itself is in charge of, has happened.
As a side effect, the relevant parent PID information does not get lost
by a double-fork, which results in a more elaborate process tree and
'ps' output:
before:
# ps afx
253 ? Ss 0:00 /bin/dbus-daemon --system --nofork
294 ? Sl 0:00 /usr/libexec/polkit-1/polkitd
328 ? S 0:00 /usr/sbin/modem-manager
608 ? Sl 0:00 /usr/libexec/colord
658 ? Sl 0:00 /usr/libexec/upowerd
819 ? Sl 0:00 /usr/libexec/imsettings-daemon
916 ? Sl 0:00 /usr/libexec/udisks-daemon
917 ? S 0:00 \_ udisks-daemon: not polling any devices
after:
# ps afx
294 ? Ss 0:00 /bin/dbus-daemon --system --nofork
426 ? Sl 0:00 \_ /usr/libexec/polkit-1/polkitd
449 ? S 0:00 \_ /usr/sbin/modem-manager
635 ? Sl 0:00 \_ /usr/libexec/colord
705 ? Sl 0:00 \_ /usr/libexec/upowerd
959 ? Sl 0:00 \_ /usr/libexec/udisks-daemon
960 ? S 0:00 | \_ udisks-daemon: not polling any devices
977 ? Sl 0:00 \_ /usr/libexec/packagekitd
This prctl is orthogonal to PID namespaces. PID namespaces are isolated
from each other, while a service management process usually requires the
services to live in the same namespace, to be able to talk to each
other.
Users of this will be the systemd per-user instance, which provides
init-like functionality for the user's login session and D-Bus, which
activates bus services on-demand. Both need init-like capabilities to
be able to properly keep track of the services they start.
Many thanks to Oleg for several rounds of review and insights.
[akpm@linux-foundation.org: fix comment layout and spelling]
[akpm@linux-foundation.org: add lengthy code comment from Oleg]
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Lennart Poettering <lennart@poettering.net>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Acked-by: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-23 22:01:54 +00:00
|
|
|
sig->has_child_subreaper = current->signal->has_child_subreaper ||
|
|
|
|
current->signal->is_child_subreaper;
|
|
|
|
|
2010-10-27 22:34:08 +00:00
|
|
|
mutex_init(&sig->cred_guard_mutex);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-27 22:18:48 +00:00
|
|
|
static void copy_seccomp(struct task_struct *p)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_SECCOMP
|
|
|
|
/*
|
|
|
|
* Must be called with sighand->lock held, which is common to
|
|
|
|
* all threads in the group. Holding cred_guard_mutex is not
|
|
|
|
* needed because this new task is not yet running and cannot
|
|
|
|
* be racing exec.
|
|
|
|
*/
|
2014-08-11 03:50:30 +00:00
|
|
|
assert_spin_locked(¤t->sighand->siglock);
|
2014-06-27 22:18:48 +00:00
|
|
|
|
|
|
|
/* Ref-count the new filter user, and assign it. */
|
|
|
|
get_seccomp_filter(current);
|
|
|
|
p->seccomp = current->seccomp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Explicitly enable no_new_privs here in case it got set
|
|
|
|
* between the task_struct being duplicated and holding the
|
|
|
|
* sighand lock. The seccomp state and nnp must be in sync.
|
|
|
|
*/
|
|
|
|
if (task_no_new_privs(current))
|
|
|
|
task_set_no_new_privs(p);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the parent gained a seccomp mode after copying thread
|
|
|
|
* flags and between before we held the sighand lock, we have
|
|
|
|
* to manually enable the seccomp thread flag here.
|
|
|
|
*/
|
|
|
|
if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
|
|
|
|
set_tsk_thread_flag(p, TIF_SECCOMP);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-01-14 13:14:10 +00:00
|
|
|
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
current->clear_child_tid = tidptr;
|
|
|
|
|
2007-10-19 06:40:14 +00:00
|
|
|
return task_pid_vnr(current);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-10-19 06:41:10 +00:00
|
|
|
static void rt_mutex_init_task(struct task_struct *p)
|
2006-06-27 09:54:53 +00:00
|
|
|
{
|
2009-11-17 13:54:03 +00:00
|
|
|
raw_spin_lock_init(&p->pi_lock);
|
2007-03-16 21:38:34 +00:00
|
|
|
#ifdef CONFIG_RT_MUTEXES
|
rtmutex: Turn the plist into an rb-tree
Turn the pi-chains from plist to rb-tree, in the rt_mutex code,
and provide a proper comparison function for -deadline and
-priority tasks.
This is done mainly because:
- classical prio field of the plist is just an int, which might
not be enough for representing a deadline;
- manipulating such a list would become O(nr_deadline_tasks),
which might be to much, as the number of -deadline task increases.
Therefore, an rb-tree is used, and tasks are queued in it according
to the following logic:
- among two -priority (i.e., SCHED_BATCH/OTHER/RR/FIFO) tasks, the
one with the higher (lower, actually!) prio wins;
- among a -priority and a -deadline task, the latter always wins;
- among two -deadline tasks, the one with the earliest deadline
wins.
Queueing and dequeueing functions are changed accordingly, for both
the list of a task's pi-waiters and the list of tasks blocked on
a pi-lock.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Dario Faggioli <raistlin@linux.it>
Signed-off-by: Juri Lelli <juri.lelli@gmail.com>
Signed-off-again-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1383831828-15501-10-git-send-email-juri.lelli@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-11-07 13:43:43 +00:00
|
|
|
p->pi_waiters = RB_ROOT;
|
|
|
|
p->pi_waiters_leftmost = NULL;
|
2006-06-27 09:54:53 +00:00
|
|
|
p->pi_blocked_on = NULL;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
timers: fix itimer/many thread hang
Overview
This patch reworks the handling of POSIX CPU timers, including the
ITIMER_PROF, ITIMER_VIRT timers and rlimit handling. It was put together
with the help of Roland McGrath, the owner and original writer of this code.
The problem we ran into, and the reason for this rework, has to do with using
a profiling timer in a process with a large number of threads. It appears
that the performance of the old implementation of run_posix_cpu_timers() was
at least O(n*3) (where "n" is the number of threads in a process) or worse.
Everything is fine with an increasing number of threads until the time taken
for that routine to run becomes the same as or greater than the tick time, at
which point things degrade rather quickly.
This patch fixes bug 9906, "Weird hang with NPTL and SIGPROF."
Code Changes
This rework corrects the implementation of run_posix_cpu_timers() to make it
run in constant time for a particular machine. (Performance may vary between
one machine and another depending upon whether the kernel is built as single-
or multiprocessor and, in the latter case, depending upon the number of
running processors.) To do this, at each tick we now update fields in
signal_struct as well as task_struct. The run_posix_cpu_timers() function
uses those fields to make its decisions.
We define a new structure, "task_cputime," to contain user, system and
scheduler times and use these in appropriate places:
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
This is included in the structure "thread_group_cputime," which is a new
substructure of signal_struct and which varies for uniprocessor versus
multiprocessor kernels. For uniprocessor kernels, it uses "task_cputime" as
a simple substructure, while for multiprocessor kernels it is a pointer:
struct thread_group_cputime {
struct task_cputime totals;
};
struct thread_group_cputime {
struct task_cputime *totals;
};
We also add a new task_cputime substructure directly to signal_struct, to
cache the earliest expiration of process-wide timers, and task_cputime also
replaces the it_*_expires fields of task_struct (used for earliest expiration
of thread timers). The "thread_group_cputime" structure contains process-wide
timers that are updated via account_user_time() and friends. In the non-SMP
case the structure is a simple aggregator; unfortunately in the SMP case that
simplicity was not achievable due to cache-line contention between CPUs (in
one measured case performance was actually _worse_ on a 16-cpu system than
the same test on a 4-cpu system, due to this contention). For SMP, the
thread_group_cputime counters are maintained as a per-cpu structure allocated
using alloc_percpu(). The timer functions update only the timer field in
the structure corresponding to the running CPU, obtained using per_cpu_ptr().
We define a set of inline functions in sched.h that we use to maintain the
thread_group_cputime structure and hide the differences between UP and SMP
implementations from the rest of the kernel. The thread_group_cputime_init()
function initializes the thread_group_cputime structure for the given task.
The thread_group_cputime_alloc() is a no-op for UP; for SMP it calls the
out-of-line function thread_group_cputime_alloc_smp() to allocate and fill
in the per-cpu structures and fields. The thread_group_cputime_free()
function, also a no-op for UP, in SMP frees the per-cpu structures. The
thread_group_cputime_clone_thread() function (also a UP no-op) for SMP calls
thread_group_cputime_alloc() if the per-cpu structures haven't yet been
allocated. The thread_group_cputime() function fills the task_cputime
structure it is passed with the contents of the thread_group_cputime fields;
in UP it's that simple but in SMP it must also safely check that tsk->signal
is non-NULL (if it is it just uses the appropriate fields of task_struct) and,
if so, sums the per-cpu values for each online CPU. Finally, the three
functions account_group_user_time(), account_group_system_time() and
account_group_exec_runtime() are used by timer functions to update the
respective fields of the thread_group_cputime structure.
Non-SMP operation is trivial and will not be mentioned further.
The per-cpu structure is always allocated when a task creates its first new
thread, via a call to thread_group_cputime_clone_thread() from copy_signal().
It is freed at process exit via a call to thread_group_cputime_free() from
cleanup_signal().
All functions that formerly summed utime/stime/sum_sched_runtime values from
from all threads in the thread group now use thread_group_cputime() to
snapshot the values in the thread_group_cputime structure or the values in
the task structure itself if the per-cpu structure hasn't been allocated.
Finally, the code in kernel/posix-cpu-timers.c has changed quite a bit.
The run_posix_cpu_timers() function has been split into a fast path and a
slow path; the former safely checks whether there are any expired thread
timers and, if not, just returns, while the slow path does the heavy lifting.
With the dedicated thread group fields, timers are no longer "rebalanced" and
the process_timer_rebalance() function and related code has gone away. All
summing loops are gone and all code that used them now uses the
thread_group_cputime() inline. When process-wide timers are set, the new
task_cputime structure in signal_struct is used to cache the earliest
expiration; this is checked in the fast path.
Performance
The fix appears not to add significant overhead to existing operations. It
generally performs the same as the current code except in two cases, one in
which it performs slightly worse (Case 5 below) and one in which it performs
very significantly better (Case 2 below). Overall it's a wash except in those
two cases.
I've since done somewhat more involved testing on a dual-core Opteron system.
Case 1: With no itimer running, for a test with 100,000 threads, the fixed
kernel took 1428.5 seconds, 513 seconds more than the unfixed system,
all of which was spent in the system. There were twice as many
voluntary context switches with the fix as without it.
Case 2: With an itimer running at .01 second ticks and 4000 threads (the most
an unmodified kernel can handle), the fixed kernel ran the test in
eight percent of the time (5.8 seconds as opposed to 70 seconds) and
had better tick accuracy (.012 seconds per tick as opposed to .023
seconds per tick).
Case 3: A 4000-thread test with an initial timer tick of .01 second and an
interval of 10,000 seconds (i.e. a timer that ticks only once) had
very nearly the same performance in both cases: 6.3 seconds elapsed
for the fixed kernel versus 5.5 seconds for the unfixed kernel.
With fewer threads (eight in these tests), the Case 1 test ran in essentially
the same time on both the modified and unmodified kernels (5.2 seconds versus
5.8 seconds). The Case 2 test ran in about the same time as well, 5.9 seconds
versus 5.4 seconds but again with much better tick accuracy, .013 seconds per
tick versus .025 seconds per tick for the unmodified kernel.
Since the fix affected the rlimit code, I also tested soft and hard CPU limits.
Case 4: With a hard CPU limit of 20 seconds and eight threads (and an itimer
running), the modified kernel was very slightly favored in that while
it killed the process in 19.997 seconds of CPU time (5.002 seconds of
wall time), only .003 seconds of that was system time, the rest was
user time. The unmodified kernel killed the process in 20.001 seconds
of CPU (5.014 seconds of wall time) of which .016 seconds was system
time. Really, though, the results were too close to call. The results
were essentially the same with no itimer running.
Case 5: With a soft limit of 20 seconds and a hard limit of 2000 seconds
(where the hard limit would never be reached) and an itimer running,
the modified kernel exhibited worse tick accuracy than the unmodified
kernel: .050 seconds/tick versus .028 seconds/tick. Otherwise,
performance was almost indistinguishable. With no itimer running this
test exhibited virtually identical behavior and times in both cases.
In times past I did some limited performance testing. those results are below.
On a four-cpu Opteron system without this fix, a sixteen-thread test executed
in 3569.991 seconds, of which user was 3568.435s and system was 1.556s. On
the same system with the fix, user and elapsed time were about the same, but
system time dropped to 0.007 seconds. Performance with eight, four and one
thread were comparable. Interestingly, the timer ticks with the fix seemed
more accurate: The sixteen-thread test with the fix received 149543 ticks
for 0.024 seconds per tick, while the same test without the fix received 58720
for 0.061 seconds per tick. Both cases were configured for an interval of
0.01 seconds. Again, the other tests were comparable. Each thread in this
test computed the primes up to 25,000,000.
I also did a test with a large number of threads, 100,000 threads, which is
impossible without the fix. In this case each thread computed the primes only
up to 10,000 (to make the runtime manageable). System time dominated, at
1546.968 seconds out of a total 2176.906 seconds (giving a user time of
629.938s). It received 147651 ticks for 0.015 seconds per tick, still quite
accurate. There is obviously no comparable test without the fix.
Signed-off-by: Frank Mayhar <fmayhar@google.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-12 16:54:39 +00:00
|
|
|
/*
|
|
|
|
* Initialize POSIX timer handling for a single task.
|
|
|
|
*/
|
|
|
|
static void posix_cpu_timers_init(struct task_struct *tsk)
|
|
|
|
{
|
2011-12-15 13:56:09 +00:00
|
|
|
tsk->cputime_expires.prof_exp = 0;
|
|
|
|
tsk->cputime_expires.virt_exp = 0;
|
timers: fix itimer/many thread hang
Overview
This patch reworks the handling of POSIX CPU timers, including the
ITIMER_PROF, ITIMER_VIRT timers and rlimit handling. It was put together
with the help of Roland McGrath, the owner and original writer of this code.
The problem we ran into, and the reason for this rework, has to do with using
a profiling timer in a process with a large number of threads. It appears
that the performance of the old implementation of run_posix_cpu_timers() was
at least O(n*3) (where "n" is the number of threads in a process) or worse.
Everything is fine with an increasing number of threads until the time taken
for that routine to run becomes the same as or greater than the tick time, at
which point things degrade rather quickly.
This patch fixes bug 9906, "Weird hang with NPTL and SIGPROF."
Code Changes
This rework corrects the implementation of run_posix_cpu_timers() to make it
run in constant time for a particular machine. (Performance may vary between
one machine and another depending upon whether the kernel is built as single-
or multiprocessor and, in the latter case, depending upon the number of
running processors.) To do this, at each tick we now update fields in
signal_struct as well as task_struct. The run_posix_cpu_timers() function
uses those fields to make its decisions.
We define a new structure, "task_cputime," to contain user, system and
scheduler times and use these in appropriate places:
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
This is included in the structure "thread_group_cputime," which is a new
substructure of signal_struct and which varies for uniprocessor versus
multiprocessor kernels. For uniprocessor kernels, it uses "task_cputime" as
a simple substructure, while for multiprocessor kernels it is a pointer:
struct thread_group_cputime {
struct task_cputime totals;
};
struct thread_group_cputime {
struct task_cputime *totals;
};
We also add a new task_cputime substructure directly to signal_struct, to
cache the earliest expiration of process-wide timers, and task_cputime also
replaces the it_*_expires fields of task_struct (used for earliest expiration
of thread timers). The "thread_group_cputime" structure contains process-wide
timers that are updated via account_user_time() and friends. In the non-SMP
case the structure is a simple aggregator; unfortunately in the SMP case that
simplicity was not achievable due to cache-line contention between CPUs (in
one measured case performance was actually _worse_ on a 16-cpu system than
the same test on a 4-cpu system, due to this contention). For SMP, the
thread_group_cputime counters are maintained as a per-cpu structure allocated
using alloc_percpu(). The timer functions update only the timer field in
the structure corresponding to the running CPU, obtained using per_cpu_ptr().
We define a set of inline functions in sched.h that we use to maintain the
thread_group_cputime structure and hide the differences between UP and SMP
implementations from the rest of the kernel. The thread_group_cputime_init()
function initializes the thread_group_cputime structure for the given task.
The thread_group_cputime_alloc() is a no-op for UP; for SMP it calls the
out-of-line function thread_group_cputime_alloc_smp() to allocate and fill
in the per-cpu structures and fields. The thread_group_cputime_free()
function, also a no-op for UP, in SMP frees the per-cpu structures. The
thread_group_cputime_clone_thread() function (also a UP no-op) for SMP calls
thread_group_cputime_alloc() if the per-cpu structures haven't yet been
allocated. The thread_group_cputime() function fills the task_cputime
structure it is passed with the contents of the thread_group_cputime fields;
in UP it's that simple but in SMP it must also safely check that tsk->signal
is non-NULL (if it is it just uses the appropriate fields of task_struct) and,
if so, sums the per-cpu values for each online CPU. Finally, the three
functions account_group_user_time(), account_group_system_time() and
account_group_exec_runtime() are used by timer functions to update the
respective fields of the thread_group_cputime structure.
Non-SMP operation is trivial and will not be mentioned further.
The per-cpu structure is always allocated when a task creates its first new
thread, via a call to thread_group_cputime_clone_thread() from copy_signal().
It is freed at process exit via a call to thread_group_cputime_free() from
cleanup_signal().
All functions that formerly summed utime/stime/sum_sched_runtime values from
from all threads in the thread group now use thread_group_cputime() to
snapshot the values in the thread_group_cputime structure or the values in
the task structure itself if the per-cpu structure hasn't been allocated.
Finally, the code in kernel/posix-cpu-timers.c has changed quite a bit.
The run_posix_cpu_timers() function has been split into a fast path and a
slow path; the former safely checks whether there are any expired thread
timers and, if not, just returns, while the slow path does the heavy lifting.
With the dedicated thread group fields, timers are no longer "rebalanced" and
the process_timer_rebalance() function and related code has gone away. All
summing loops are gone and all code that used them now uses the
thread_group_cputime() inline. When process-wide timers are set, the new
task_cputime structure in signal_struct is used to cache the earliest
expiration; this is checked in the fast path.
Performance
The fix appears not to add significant overhead to existing operations. It
generally performs the same as the current code except in two cases, one in
which it performs slightly worse (Case 5 below) and one in which it performs
very significantly better (Case 2 below). Overall it's a wash except in those
two cases.
I've since done somewhat more involved testing on a dual-core Opteron system.
Case 1: With no itimer running, for a test with 100,000 threads, the fixed
kernel took 1428.5 seconds, 513 seconds more than the unfixed system,
all of which was spent in the system. There were twice as many
voluntary context switches with the fix as without it.
Case 2: With an itimer running at .01 second ticks and 4000 threads (the most
an unmodified kernel can handle), the fixed kernel ran the test in
eight percent of the time (5.8 seconds as opposed to 70 seconds) and
had better tick accuracy (.012 seconds per tick as opposed to .023
seconds per tick).
Case 3: A 4000-thread test with an initial timer tick of .01 second and an
interval of 10,000 seconds (i.e. a timer that ticks only once) had
very nearly the same performance in both cases: 6.3 seconds elapsed
for the fixed kernel versus 5.5 seconds for the unfixed kernel.
With fewer threads (eight in these tests), the Case 1 test ran in essentially
the same time on both the modified and unmodified kernels (5.2 seconds versus
5.8 seconds). The Case 2 test ran in about the same time as well, 5.9 seconds
versus 5.4 seconds but again with much better tick accuracy, .013 seconds per
tick versus .025 seconds per tick for the unmodified kernel.
Since the fix affected the rlimit code, I also tested soft and hard CPU limits.
Case 4: With a hard CPU limit of 20 seconds and eight threads (and an itimer
running), the modified kernel was very slightly favored in that while
it killed the process in 19.997 seconds of CPU time (5.002 seconds of
wall time), only .003 seconds of that was system time, the rest was
user time. The unmodified kernel killed the process in 20.001 seconds
of CPU (5.014 seconds of wall time) of which .016 seconds was system
time. Really, though, the results were too close to call. The results
were essentially the same with no itimer running.
Case 5: With a soft limit of 20 seconds and a hard limit of 2000 seconds
(where the hard limit would never be reached) and an itimer running,
the modified kernel exhibited worse tick accuracy than the unmodified
kernel: .050 seconds/tick versus .028 seconds/tick. Otherwise,
performance was almost indistinguishable. With no itimer running this
test exhibited virtually identical behavior and times in both cases.
In times past I did some limited performance testing. those results are below.
On a four-cpu Opteron system without this fix, a sixteen-thread test executed
in 3569.991 seconds, of which user was 3568.435s and system was 1.556s. On
the same system with the fix, user and elapsed time were about the same, but
system time dropped to 0.007 seconds. Performance with eight, four and one
thread were comparable. Interestingly, the timer ticks with the fix seemed
more accurate: The sixteen-thread test with the fix received 149543 ticks
for 0.024 seconds per tick, while the same test without the fix received 58720
for 0.061 seconds per tick. Both cases were configured for an interval of
0.01 seconds. Again, the other tests were comparable. Each thread in this
test computed the primes up to 25,000,000.
I also did a test with a large number of threads, 100,000 threads, which is
impossible without the fix. In this case each thread computed the primes only
up to 10,000 (to make the runtime manageable). System time dominated, at
1546.968 seconds out of a total 2176.906 seconds (giving a user time of
629.938s). It received 147651 ticks for 0.015 seconds per tick, still quite
accurate. There is obviously no comparable test without the fix.
Signed-off-by: Frank Mayhar <fmayhar@google.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-12 16:54:39 +00:00
|
|
|
tsk->cputime_expires.sched_exp = 0;
|
|
|
|
INIT_LIST_HEAD(&tsk->cpu_timers[0]);
|
|
|
|
INIT_LIST_HEAD(&tsk->cpu_timers[1]);
|
|
|
|
INIT_LIST_HEAD(&tsk->cpu_timers[2]);
|
|
|
|
}
|
|
|
|
|
2013-07-03 22:08:31 +00:00
|
|
|
static inline void
|
|
|
|
init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
|
|
|
|
{
|
|
|
|
task->pids[type].pid = pid;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* This creates a new process as a copy of the old one,
|
|
|
|
* but does not actually start it yet.
|
|
|
|
*
|
|
|
|
* It copies the registers, and all the appropriate
|
|
|
|
* parts of the process environment (as per the clone
|
|
|
|
* flags). The actual kick-off is left to the caller.
|
|
|
|
*/
|
2006-07-03 07:25:41 +00:00
|
|
|
static struct task_struct *copy_process(unsigned long clone_flags,
|
|
|
|
unsigned long stack_start,
|
|
|
|
unsigned long stack_size,
|
|
|
|
int __user *child_tidptr,
|
2008-07-26 02:45:47 +00:00
|
|
|
struct pid *pid,
|
|
|
|
int trace)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int retval;
|
2007-10-19 06:41:09 +00:00
|
|
|
struct task_struct *p;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2013-03-13 18:51:49 +00:00
|
|
|
if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Thread groups must share signals as well, and detached threads
|
|
|
|
* can only be started up within the thread group.
|
|
|
|
*/
|
|
|
|
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Shared signal handlers imply shared VM. By way of the above,
|
|
|
|
* thread groups also imply shared VM. Blocking this case allows
|
|
|
|
* for various simplifications in other code.
|
|
|
|
*/
|
|
|
|
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2009-09-23 22:57:20 +00:00
|
|
|
/*
|
|
|
|
* Siblings of global init remain as zombies on exit since they are
|
|
|
|
* not reaped by their parent (swapper). To solve this and to avoid
|
|
|
|
* multi-rooted process trees, prevent global and container-inits
|
|
|
|
* from creating siblings.
|
|
|
|
*/
|
|
|
|
if ((clone_flags & CLONE_PARENT) &&
|
|
|
|
current->signal->flags & SIGNAL_UNKILLABLE)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2012-12-21 03:26:06 +00:00
|
|
|
/*
|
2013-09-11 21:19:41 +00:00
|
|
|
* If the new process will be in a different pid or user namespace
|
|
|
|
* do not allow it to share a thread group or signal handlers or
|
|
|
|
* parent with the forking task.
|
2012-12-21 03:26:06 +00:00
|
|
|
*/
|
2013-11-15 05:10:16 +00:00
|
|
|
if (clone_flags & CLONE_SIGHAND) {
|
2013-09-11 21:19:41 +00:00
|
|
|
if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
|
|
|
|
(task_active_pid_ns(current) !=
|
|
|
|
current->nsproxy->pid_ns_for_children))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
2012-12-21 03:26:06 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
retval = security_task_create(clone_flags);
|
|
|
|
if (retval)
|
|
|
|
goto fork_out;
|
|
|
|
|
|
|
|
retval = -ENOMEM;
|
|
|
|
p = dup_task_struct(current);
|
|
|
|
if (!p)
|
|
|
|
goto fork_out;
|
|
|
|
|
2009-06-02 20:39:48 +00:00
|
|
|
ftrace_graph_init_task(p);
|
|
|
|
|
2006-10-17 07:10:33 +00:00
|
|
|
rt_mutex_init_task(p);
|
|
|
|
|
2008-07-14 10:09:28 +00:00
|
|
|
#ifdef CONFIG_PROVE_LOCKING
|
2006-07-03 07:24:42 +00:00
|
|
|
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
|
|
|
|
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
retval = -EAGAIN;
|
2008-11-13 23:39:26 +00:00
|
|
|
if (atomic_read(&p->real_cred->user->processes) >=
|
2010-03-05 21:42:54 +00:00
|
|
|
task_rlimit(p, RLIMIT_NPROC)) {
|
2013-07-03 22:08:29 +00:00
|
|
|
if (p->real_cred->user != INIT_USER &&
|
|
|
|
!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad_fork_free;
|
|
|
|
}
|
2011-08-08 15:02:04 +00:00
|
|
|
current->flags &= ~PF_NPROC_EXCEEDED;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-13 23:39:17 +00:00
|
|
|
retval = copy_creds(p, clone_flags);
|
|
|
|
if (retval < 0)
|
|
|
|
goto bad_fork_free;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If multiple threads are within copy_process(), then this check
|
|
|
|
* triggers too late. This doesn't hurt, the check is only there
|
|
|
|
* to stop root fork bombs.
|
|
|
|
*/
|
2009-02-06 08:17:19 +00:00
|
|
|
retval = -EAGAIN;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (nr_threads >= max_threads)
|
|
|
|
goto bad_fork_cleanup_count;
|
|
|
|
|
2005-11-14 00:06:55 +00:00
|
|
|
if (!try_module_get(task_thread_info(p)->exec_domain->module))
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad_fork_cleanup_count;
|
|
|
|
|
2006-07-14 07:24:36 +00:00
|
|
|
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
|
2014-04-07 22:37:27 +00:00
|
|
|
p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
|
|
|
|
p->flags |= PF_FORKNOEXEC;
|
2005-04-16 22:20:36 +00:00
|
|
|
INIT_LIST_HEAD(&p->children);
|
|
|
|
INIT_LIST_HEAD(&p->sibling);
|
rcu: Merge preemptable-RCU functionality into hierarchical RCU
Create a kernel/rcutree_plugin.h file that contains definitions
for preemptable RCU (or, under the #else branch of the #ifdef,
empty definitions for the classic non-preemptable semantics).
These definitions fit into plugins defined in kernel/rcutree.c
for this purpose.
This variant of preemptable RCU uses a new algorithm whose
read-side expense is roughly that of classic hierarchical RCU
under CONFIG_PREEMPT. This new algorithm's update-side expense
is similar to that of classic hierarchical RCU, and, in absence
of read-side preemption or blocking, is exactly that of classic
hierarchical RCU. Perhaps more important, this new algorithm
has a much simpler implementation, saving well over 1,000 lines
of code compared to mainline's implementation of preemptable
RCU, which will hopefully be retired in favor of this new
algorithm.
The simplifications are obtained by maintaining per-task
nesting state for running tasks, and using a simple
lock-protected algorithm to handle accounting when tasks block
within RCU read-side critical sections, making use of lessons
learned while creating numerous user-level RCU implementations
over the past 18 months.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
LKML-Reference: <12509746134003-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-08-22 20:56:52 +00:00
|
|
|
rcu_copy_process(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
p->vfork_done = NULL;
|
|
|
|
spin_lock_init(&p->alloc_lock);
|
|
|
|
|
|
|
|
init_sigpending(&p->pending);
|
|
|
|
|
2011-12-15 13:56:09 +00:00
|
|
|
p->utime = p->stime = p->gtime = 0;
|
|
|
|
p->utimescaled = p->stimescaled = 0;
|
2013-02-25 16:25:39 +00:00
|
|
|
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
2012-11-21 23:58:35 +00:00
|
|
|
p->prev_cputime.utime = p->prev_cputime.stime = 0;
|
2009-12-02 08:26:47 +00:00
|
|
|
#endif
|
2012-12-16 19:00:34 +00:00
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
|
|
seqlock_init(&p->vtime_seqlock);
|
|
|
|
p->vtime_snap = 0;
|
|
|
|
p->vtime_snap_whence = VTIME_SLEEPING;
|
|
|
|
#endif
|
|
|
|
|
2010-04-06 21:34:42 +00:00
|
|
|
#if defined(SPLIT_RSS_COUNTING)
|
|
|
|
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
|
|
|
|
#endif
|
2007-07-09 16:52:00 +00:00
|
|
|
|
2008-09-01 22:52:40 +00:00
|
|
|
p->default_timer_slack_ns = current->timer_slack_ns;
|
|
|
|
|
2008-07-27 15:29:15 +00:00
|
|
|
task_io_accounting_init(&p->ioac);
|
2005-04-16 22:20:36 +00:00
|
|
|
acct_clear_integrals(p);
|
|
|
|
|
timers: fix itimer/many thread hang
Overview
This patch reworks the handling of POSIX CPU timers, including the
ITIMER_PROF, ITIMER_VIRT timers and rlimit handling. It was put together
with the help of Roland McGrath, the owner and original writer of this code.
The problem we ran into, and the reason for this rework, has to do with using
a profiling timer in a process with a large number of threads. It appears
that the performance of the old implementation of run_posix_cpu_timers() was
at least O(n*3) (where "n" is the number of threads in a process) or worse.
Everything is fine with an increasing number of threads until the time taken
for that routine to run becomes the same as or greater than the tick time, at
which point things degrade rather quickly.
This patch fixes bug 9906, "Weird hang with NPTL and SIGPROF."
Code Changes
This rework corrects the implementation of run_posix_cpu_timers() to make it
run in constant time for a particular machine. (Performance may vary between
one machine and another depending upon whether the kernel is built as single-
or multiprocessor and, in the latter case, depending upon the number of
running processors.) To do this, at each tick we now update fields in
signal_struct as well as task_struct. The run_posix_cpu_timers() function
uses those fields to make its decisions.
We define a new structure, "task_cputime," to contain user, system and
scheduler times and use these in appropriate places:
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
This is included in the structure "thread_group_cputime," which is a new
substructure of signal_struct and which varies for uniprocessor versus
multiprocessor kernels. For uniprocessor kernels, it uses "task_cputime" as
a simple substructure, while for multiprocessor kernels it is a pointer:
struct thread_group_cputime {
struct task_cputime totals;
};
struct thread_group_cputime {
struct task_cputime *totals;
};
We also add a new task_cputime substructure directly to signal_struct, to
cache the earliest expiration of process-wide timers, and task_cputime also
replaces the it_*_expires fields of task_struct (used for earliest expiration
of thread timers). The "thread_group_cputime" structure contains process-wide
timers that are updated via account_user_time() and friends. In the non-SMP
case the structure is a simple aggregator; unfortunately in the SMP case that
simplicity was not achievable due to cache-line contention between CPUs (in
one measured case performance was actually _worse_ on a 16-cpu system than
the same test on a 4-cpu system, due to this contention). For SMP, the
thread_group_cputime counters are maintained as a per-cpu structure allocated
using alloc_percpu(). The timer functions update only the timer field in
the structure corresponding to the running CPU, obtained using per_cpu_ptr().
We define a set of inline functions in sched.h that we use to maintain the
thread_group_cputime structure and hide the differences between UP and SMP
implementations from the rest of the kernel. The thread_group_cputime_init()
function initializes the thread_group_cputime structure for the given task.
The thread_group_cputime_alloc() is a no-op for UP; for SMP it calls the
out-of-line function thread_group_cputime_alloc_smp() to allocate and fill
in the per-cpu structures and fields. The thread_group_cputime_free()
function, also a no-op for UP, in SMP frees the per-cpu structures. The
thread_group_cputime_clone_thread() function (also a UP no-op) for SMP calls
thread_group_cputime_alloc() if the per-cpu structures haven't yet been
allocated. The thread_group_cputime() function fills the task_cputime
structure it is passed with the contents of the thread_group_cputime fields;
in UP it's that simple but in SMP it must also safely check that tsk->signal
is non-NULL (if it is it just uses the appropriate fields of task_struct) and,
if so, sums the per-cpu values for each online CPU. Finally, the three
functions account_group_user_time(), account_group_system_time() and
account_group_exec_runtime() are used by timer functions to update the
respective fields of the thread_group_cputime structure.
Non-SMP operation is trivial and will not be mentioned further.
The per-cpu structure is always allocated when a task creates its first new
thread, via a call to thread_group_cputime_clone_thread() from copy_signal().
It is freed at process exit via a call to thread_group_cputime_free() from
cleanup_signal().
All functions that formerly summed utime/stime/sum_sched_runtime values from
from all threads in the thread group now use thread_group_cputime() to
snapshot the values in the thread_group_cputime structure or the values in
the task structure itself if the per-cpu structure hasn't been allocated.
Finally, the code in kernel/posix-cpu-timers.c has changed quite a bit.
The run_posix_cpu_timers() function has been split into a fast path and a
slow path; the former safely checks whether there are any expired thread
timers and, if not, just returns, while the slow path does the heavy lifting.
With the dedicated thread group fields, timers are no longer "rebalanced" and
the process_timer_rebalance() function and related code has gone away. All
summing loops are gone and all code that used them now uses the
thread_group_cputime() inline. When process-wide timers are set, the new
task_cputime structure in signal_struct is used to cache the earliest
expiration; this is checked in the fast path.
Performance
The fix appears not to add significant overhead to existing operations. It
generally performs the same as the current code except in two cases, one in
which it performs slightly worse (Case 5 below) and one in which it performs
very significantly better (Case 2 below). Overall it's a wash except in those
two cases.
I've since done somewhat more involved testing on a dual-core Opteron system.
Case 1: With no itimer running, for a test with 100,000 threads, the fixed
kernel took 1428.5 seconds, 513 seconds more than the unfixed system,
all of which was spent in the system. There were twice as many
voluntary context switches with the fix as without it.
Case 2: With an itimer running at .01 second ticks and 4000 threads (the most
an unmodified kernel can handle), the fixed kernel ran the test in
eight percent of the time (5.8 seconds as opposed to 70 seconds) and
had better tick accuracy (.012 seconds per tick as opposed to .023
seconds per tick).
Case 3: A 4000-thread test with an initial timer tick of .01 second and an
interval of 10,000 seconds (i.e. a timer that ticks only once) had
very nearly the same performance in both cases: 6.3 seconds elapsed
for the fixed kernel versus 5.5 seconds for the unfixed kernel.
With fewer threads (eight in these tests), the Case 1 test ran in essentially
the same time on both the modified and unmodified kernels (5.2 seconds versus
5.8 seconds). The Case 2 test ran in about the same time as well, 5.9 seconds
versus 5.4 seconds but again with much better tick accuracy, .013 seconds per
tick versus .025 seconds per tick for the unmodified kernel.
Since the fix affected the rlimit code, I also tested soft and hard CPU limits.
Case 4: With a hard CPU limit of 20 seconds and eight threads (and an itimer
running), the modified kernel was very slightly favored in that while
it killed the process in 19.997 seconds of CPU time (5.002 seconds of
wall time), only .003 seconds of that was system time, the rest was
user time. The unmodified kernel killed the process in 20.001 seconds
of CPU (5.014 seconds of wall time) of which .016 seconds was system
time. Really, though, the results were too close to call. The results
were essentially the same with no itimer running.
Case 5: With a soft limit of 20 seconds and a hard limit of 2000 seconds
(where the hard limit would never be reached) and an itimer running,
the modified kernel exhibited worse tick accuracy than the unmodified
kernel: .050 seconds/tick versus .028 seconds/tick. Otherwise,
performance was almost indistinguishable. With no itimer running this
test exhibited virtually identical behavior and times in both cases.
In times past I did some limited performance testing. those results are below.
On a four-cpu Opteron system without this fix, a sixteen-thread test executed
in 3569.991 seconds, of which user was 3568.435s and system was 1.556s. On
the same system with the fix, user and elapsed time were about the same, but
system time dropped to 0.007 seconds. Performance with eight, four and one
thread were comparable. Interestingly, the timer ticks with the fix seemed
more accurate: The sixteen-thread test with the fix received 149543 ticks
for 0.024 seconds per tick, while the same test without the fix received 58720
for 0.061 seconds per tick. Both cases were configured for an interval of
0.01 seconds. Again, the other tests were comparable. Each thread in this
test computed the primes up to 25,000,000.
I also did a test with a large number of threads, 100,000 threads, which is
impossible without the fix. In this case each thread computed the primes only
up to 10,000 (to make the runtime manageable). System time dominated, at
1546.968 seconds out of a total 2176.906 seconds (giving a user time of
629.938s). It received 147651 ticks for 0.015 seconds per tick, still quite
accurate. There is obviously no comparable test without the fix.
Signed-off-by: Frank Mayhar <fmayhar@google.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-09-12 16:54:39 +00:00
|
|
|
posix_cpu_timers_init(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-07-16 21:04:34 +00:00
|
|
|
p->start_time = ktime_get_ns();
|
2014-07-16 21:04:32 +00:00
|
|
|
p->real_start_time = ktime_get_boot_ns();
|
2005-04-16 22:20:36 +00:00
|
|
|
p->io_context = NULL;
|
|
|
|
p->audit_context = NULL;
|
2011-05-26 23:25:18 +00:00
|
|
|
if (clone_flags & CLONE_THREAD)
|
2011-12-13 02:12:21 +00:00
|
|
|
threadgroup_change_begin(current);
|
2007-10-19 06:39:33 +00:00
|
|
|
cgroup_fork(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_NUMA
|
2008-04-28 09:13:09 +00:00
|
|
|
p->mempolicy = mpol_dup(p->mempolicy);
|
2011-07-26 23:08:39 +00:00
|
|
|
if (IS_ERR(p->mempolicy)) {
|
|
|
|
retval = PTR_ERR(p->mempolicy);
|
|
|
|
p->mempolicy = NULL;
|
2014-03-28 07:18:27 +00:00
|
|
|
goto bad_fork_cleanup_threadgroup_lock;
|
2011-07-26 23:08:39 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
2011-07-26 23:08:30 +00:00
|
|
|
#ifdef CONFIG_CPUSETS
|
|
|
|
p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
|
|
|
|
p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
|
cpuset: mm: reduce large amounts of memory barrier related damage v3
Commit c0ff7453bb5c ("cpuset,mm: fix no node to alloc memory when
changing cpuset's mems") wins a super prize for the largest number of
memory barriers entered into fast paths for one commit.
[get|put]_mems_allowed is incredibly heavy with pairs of full memory
barriers inserted into a number of hot paths. This was detected while
investigating at large page allocator slowdown introduced some time
after 2.6.32. The largest portion of this overhead was shown by
oprofile to be at an mfence introduced by this commit into the page
allocator hot path.
For extra style points, the commit introduced the use of yield() in an
implementation of what looks like a spinning mutex.
This patch replaces the full memory barriers on both read and write
sides with a sequence counter with just read barriers on the fast path
side. This is much cheaper on some architectures, including x86. The
main bulk of the patch is the retry logic if the nodemask changes in a
manner that can cause a false failure.
While updating the nodemask, a check is made to see if a false failure
is a risk. If it is, the sequence number gets bumped and parallel
allocators will briefly stall while the nodemask update takes place.
In a page fault test microbenchmark, oprofile samples from
__alloc_pages_nodemask went from 4.53% of all samples to 1.15%. The
actual results were
3.3.0-rc3 3.3.0-rc3
rc3-vanilla nobarrier-v2r1
Clients 1 UserTime 0.07 ( 0.00%) 0.08 (-14.19%)
Clients 2 UserTime 0.07 ( 0.00%) 0.07 ( 2.72%)
Clients 4 UserTime 0.08 ( 0.00%) 0.07 ( 3.29%)
Clients 1 SysTime 0.70 ( 0.00%) 0.65 ( 6.65%)
Clients 2 SysTime 0.85 ( 0.00%) 0.82 ( 3.65%)
Clients 4 SysTime 1.41 ( 0.00%) 1.41 ( 0.32%)
Clients 1 WallTime 0.77 ( 0.00%) 0.74 ( 4.19%)
Clients 2 WallTime 0.47 ( 0.00%) 0.45 ( 3.73%)
Clients 4 WallTime 0.38 ( 0.00%) 0.37 ( 1.58%)
Clients 1 Flt/sec/cpu 497620.28 ( 0.00%) 520294.53 ( 4.56%)
Clients 2 Flt/sec/cpu 414639.05 ( 0.00%) 429882.01 ( 3.68%)
Clients 4 Flt/sec/cpu 257959.16 ( 0.00%) 258761.48 ( 0.31%)
Clients 1 Flt/sec 495161.39 ( 0.00%) 517292.87 ( 4.47%)
Clients 2 Flt/sec 820325.95 ( 0.00%) 850289.77 ( 3.65%)
Clients 4 Flt/sec 1020068.93 ( 0.00%) 1022674.06 ( 0.26%)
MMTests Statistics: duration
Sys Time Running Test (seconds) 135.68 132.17
User+Sys Time Running Test (seconds) 164.2 160.13
Total Elapsed Time (seconds) 123.46 120.87
The overall improvement is small but the System CPU time is much
improved and roughly in correlation to what oprofile reported (these
performance figures are without profiling so skew is expected). The
actual number of page faults is noticeably improved.
For benchmarks like kernel builds, the overall benefit is marginal but
the system CPU time is slightly reduced.
To test the actual bug the commit fixed I opened two terminals. The
first ran within a cpuset and continually ran a small program that
faulted 100M of anonymous data. In a second window, the nodemask of the
cpuset was continually randomised in a loop.
Without the commit, the program would fail every so often (usually
within 10 seconds) and obviously with the commit everything worked fine.
With this patch applied, it also worked fine so the fix should be
functionally equivalent.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Cc: Miao Xie <miaox@cn.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-21 23:34:11 +00:00
|
|
|
seqcount_init(&p->mems_allowed_seq);
|
2011-07-26 23:08:30 +00:00
|
|
|
#endif
|
2006-07-03 07:24:42 +00:00
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
p->irq_events = 0;
|
|
|
|
p->hardirqs_enabled = 0;
|
|
|
|
p->hardirq_enable_ip = 0;
|
|
|
|
p->hardirq_enable_event = 0;
|
|
|
|
p->hardirq_disable_ip = _THIS_IP_;
|
|
|
|
p->hardirq_disable_event = 0;
|
|
|
|
p->softirqs_enabled = 1;
|
|
|
|
p->softirq_enable_ip = _THIS_IP_;
|
|
|
|
p->softirq_enable_event = 0;
|
|
|
|
p->softirq_disable_ip = 0;
|
|
|
|
p->softirq_disable_event = 0;
|
|
|
|
p->hardirq_context = 0;
|
|
|
|
p->softirq_context = 0;
|
|
|
|
#endif
|
[PATCH] lockdep: core
Do 'make oldconfig' and accept all the defaults for new config options -
reboot into the kernel and if everything goes well it should boot up fine and
you should have /proc/lockdep and /proc/lockdep_stats files.
Typically if the lock validator finds some problem it will print out
voluminous debug output that begins with "BUG: ..." and which syslog output
can be used by kernel developers to figure out the precise locking scenario.
What does the lock validator do? It "observes" and maps all locking rules as
they occur dynamically (as triggered by the kernel's natural use of spinlocks,
rwlocks, mutexes and rwsems). Whenever the lock validator subsystem detects a
new locking scenario, it validates this new rule against the existing set of
rules. If this new rule is consistent with the existing set of rules then the
new rule is added transparently and the kernel continues as normal. If the
new rule could create a deadlock scenario then this condition is printed out.
When determining validity of locking, all possible "deadlock scenarios" are
considered: assuming arbitrary number of CPUs, arbitrary irq context and task
context constellations, running arbitrary combinations of all the existing
locking scenarios. In a typical system this means millions of separate
scenarios. This is why we call it a "locking correctness" validator - for all
rules that are observed the lock validator proves it with mathematical
certainty that a deadlock could not occur (assuming that the lock validator
implementation itself is correct and its internal data structures are not
corrupted by some other kernel subsystem). [see more details and conditionals
of this statement in include/linux/lockdep.h and
Documentation/lockdep-design.txt]
Furthermore, this "all possible scenarios" property of the validator also
enables the finding of complex, highly unlikely multi-CPU multi-context races
via single single-context rules, increasing the likelyhood of finding bugs
drastically. In practical terms: the lock validator already found a bug in
the upstream kernel that could only occur on systems with 3 or more CPUs, and
which needed 3 very unlikely code sequences to occur at once on the 3 CPUs.
That bug was found and reported on a single-CPU system (!). So in essence a
race will be found "piecemail-wise", triggering all the necessary components
for the race, without having to reproduce the race scenario itself! In its
short existence the lock validator found and reported many bugs before they
actually caused a real deadlock.
To further increase the efficiency of the validator, the mapping is not per
"lock instance", but per "lock-class". For example, all struct inode objects
in the kernel have inode->inotify_mutex. If there are 10,000 inodes cached,
then there are 10,000 lock objects. But ->inotify_mutex is a single "lock
type", and all locking activities that occur against ->inotify_mutex are
"unified" into this single lock-class. The advantage of the lock-class
approach is that all historical ->inotify_mutex uses are mapped into a single
(and as narrow as possible) set of locking rules - regardless of how many
different tasks or inode structures it took to build this set of rules. The
set of rules persist during the lifetime of the kernel.
To see the rough magnitude of checking that the lock validator does, here's a
portion of /proc/lockdep_stats, fresh after bootup:
lock-classes: 694 [max: 2048]
direct dependencies: 1598 [max: 8192]
indirect dependencies: 17896
all direct dependencies: 16206
dependency chains: 1910 [max: 8192]
in-hardirq chains: 17
in-softirq chains: 105
in-process chains: 1065
stack-trace entries: 38761 [max: 131072]
combined max dependencies: 2033928
hardirq-safe locks: 24
hardirq-unsafe locks: 176
softirq-safe locks: 53
softirq-unsafe locks: 137
irq-safe locks: 59
irq-unsafe locks: 176
The lock validator has observed 1598 actual single-thread locking patterns,
and has validated all possible 2033928 distinct locking scenarios.
More details about the design of the lock validator can be found in
Documentation/lockdep-design.txt, which can also found at:
http://redhat.com/~mingo/lockdep-patches/lockdep-design.txt
[bunk@stusta.de: cleanups]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-07-03 07:24:50 +00:00
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
p->lockdep_depth = 0; /* no locks held yet */
|
|
|
|
p->curr_chain_key = 0;
|
|
|
|
p->lockdep_recursion = 0;
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-09 23:59:20 +00:00
|
|
|
#ifdef CONFIG_DEBUG_MUTEXES
|
|
|
|
p->blocked_on = NULL; /* not blocked yet */
|
|
|
|
#endif
|
2013-03-23 23:11:31 +00:00
|
|
|
#ifdef CONFIG_BCACHE
|
|
|
|
p->sequential_io = 0;
|
|
|
|
p->sequential_io_avg = 0;
|
|
|
|
#endif
|
2009-04-03 14:43:48 +00:00
|
|
|
|
sched: fix copy_namespace() <-> sched_fork() dependency in do_fork
Sukadev Bhattiprolu reported a kernel crash with control groups.
There are couple of problems discovered by Suka's test:
- The test requires the cgroup filesystem to be mounted with
atleast the cpu and ns options (i.e both namespace and cpu
controllers are active in the same hierarchy).
# mkdir /dev/cpuctl
# mount -t cgroup -ocpu,ns none cpuctl
(or simply)
# mount -t cgroup none cpuctl -> Will activate all controllers
in same hierarchy.
- The test invokes clone() with CLONE_NEWNS set. This causes a a new child
to be created, also a new group (do_fork->copy_namespaces->ns_cgroup_clone->
cgroup_clone) and the child is attached to the new group (cgroup_clone->
attach_task->sched_move_task). At this point in time, the child's scheduler
related fields are uninitialized (including its on_rq field, which it has
inherited from parent). As a result sched_move_task thinks its on
runqueue, when it isn't.
As a solution to this problem, I moved sched_fork() call, which
initializes scheduler related fields on a new task, before
copy_namespaces(). I am not sure though whether moving up will
cause other side-effects. Do you see any issue?
- The second problem exposed by this test is that task_new_fair()
assumes that parent and child will be part of the same group (which
needn't be as this test shows). As a result, cfs_rq->curr can be NULL
for the child.
The solution is to test for curr pointer being NULL in
task_new_fair().
With the patch below, I could run ns_exec() fine w/o a crash.
Reported-by: Sukadev Bhattiprolu <sukadev@us.ibm.com>
Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2007-11-09 21:39:39 +00:00
|
|
|
/* Perform scheduler related setup. Assign this task to a CPU. */
|
sched/deadline: Add SCHED_DEADLINE structures & implementation
Introduces the data structures, constants and symbols needed for
SCHED_DEADLINE implementation.
Core data structure of SCHED_DEADLINE are defined, along with their
initializers. Hooks for checking if a task belong to the new policy
are also added where they are needed.
Adds a scheduling class, in sched/dl.c and a new policy called
SCHED_DEADLINE. It is an implementation of the Earliest Deadline
First (EDF) scheduling algorithm, augmented with a mechanism (called
Constant Bandwidth Server, CBS) that makes it possible to isolate
the behaviour of tasks between each other.
The typical -deadline task will be made up of a computation phase
(instance) which is activated on a periodic or sporadic fashion. The
expected (maximum) duration of such computation is called the task's
runtime; the time interval by which each instance need to be completed
is called the task's relative deadline. The task's absolute deadline
is dynamically calculated as the time instant a task (better, an
instance) activates plus the relative deadline.
The EDF algorithms selects the task with the smallest absolute
deadline as the one to be executed first, while the CBS ensures each
task to run for at most its runtime every (relative) deadline
length time interval, avoiding any interference between different
tasks (bandwidth isolation).
Thanks to this feature, also tasks that do not strictly comply with
the computational model sketched above can effectively use the new
policy.
To summarize, this patch:
- introduces the data structures, constants and symbols needed;
- implements the core logic of the scheduling algorithm in the new
scheduling class file;
- provides all the glue code between the new scheduling class and
the core scheduler and refines the interactions between sched/dl
and the other existing scheduling classes.
Signed-off-by: Dario Faggioli <raistlin@linux.it>
Signed-off-by: Michael Trimarchi <michael@amarulasolutions.com>
Signed-off-by: Fabio Checconi <fchecconi@gmail.com>
Signed-off-by: Juri Lelli <juri.lelli@gmail.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1383831828-15501-4-git-send-email-juri.lelli@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-11-28 10:14:43 +00:00
|
|
|
retval = sched_fork(clone_flags, p);
|
|
|
|
if (retval)
|
|
|
|
goto bad_fork_cleanup_policy;
|
2009-05-25 12:45:27 +00:00
|
|
|
|
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events!
In the past few months the perfcounters subsystem has grown out its
initial role of counting hardware events, and has become (and is
becoming) a much broader generic event enumeration, reporting, logging,
monitoring, analysis facility.
Naming its core object 'perf_counter' and naming the subsystem
'perfcounters' has become more and more of a misnomer. With pending
code like hw-breakpoints support the 'counter' name is less and
less appropriate.
All in one, we've decided to rename the subsystem to 'performance
events' and to propagate this rename through all fields, variables
and API names. (in an ABI compatible fashion)
The word 'event' is also a bit shorter than 'counter' - which makes
it slightly more convenient to write/handle as well.
Thanks goes to Stephane Eranian who first observed this misnomer and
suggested a rename.
User-space tooling and ABI compatibility is not affected - this patch
should be function-invariant. (Also, defconfigs were not touched to
keep the size down.)
This patch has been generated via the following script:
FILES=$(find * -type f | grep -vE 'oprofile|[^K]config')
sed -i \
-e 's/PERF_EVENT_/PERF_RECORD_/g' \
-e 's/PERF_COUNTER/PERF_EVENT/g' \
-e 's/perf_counter/perf_event/g' \
-e 's/nb_counters/nb_events/g' \
-e 's/swcounter/swevent/g' \
-e 's/tpcounter_event/tp_event/g' \
$FILES
for N in $(find . -name perf_counter.[ch]); do
M=$(echo $N | sed 's/perf_counter/perf_event/g')
mv $N $M
done
FILES=$(find . -name perf_event.*)
sed -i \
-e 's/COUNTER_MASK/REG_MASK/g' \
-e 's/COUNTER/EVENT/g' \
-e 's/\<event\>/event_id/g' \
-e 's/counter/event/g' \
-e 's/Counter/Event/g' \
$FILES
... to keep it as correct as possible. This script can also be
used by anyone who has pending perfcounters patches - it converts
a Linux kernel tree over to the new naming. We tried to time this
change to the point in time where the amount of pending patches
is the smallest: the end of the merge window.
Namespace clashes were fixed up in a preparatory patch - and some
stylistic fallout will be fixed up in a subsequent patch.
( NOTE: 'counters' are still the proper terminology when we deal
with hardware registers - and these sed scripts are a bit
over-eager in renaming them. I've undone some of that, but
in case there's something left where 'counter' would be
better than 'event' we can undo that on an individual basis
instead of touching an otherwise nicely automated patch. )
Suggested-by: Stephane Eranian <eranian@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Paul Mackerras <paulus@samba.org>
Reviewed-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-21 10:02:48 +00:00
|
|
|
retval = perf_event_init_task(p);
|
2009-05-25 12:45:27 +00:00
|
|
|
if (retval)
|
|
|
|
goto bad_fork_cleanup_policy;
|
2011-07-26 23:08:39 +00:00
|
|
|
retval = audit_alloc(p);
|
|
|
|
if (retval)
|
2008-11-13 23:39:17 +00:00
|
|
|
goto bad_fork_cleanup_policy;
|
2005-04-16 22:20:36 +00:00
|
|
|
/* copy all the process information */
|
shm: make exit_shm work proportional to task activity
This is small set of patches our team has had kicking around for a few
versions internally that fixes tasks getting hung on shm_exit when there
are many threads hammering it at once.
Anton wrote a simple test to cause the issue:
http://ozlabs.org/~anton/junkcode/bust_shm_exit.c
Before applying this patchset, this test code will cause either hanging
tracebacks or pthread out of memory errors.
After this patchset, it will still produce output like:
root@somehost:~# ./bust_shm_exit 1024 160
...
INFO: rcu_sched detected stalls on CPUs/tasks: {} (detected by 116, t=2111 jiffies, g=241, c=240, q=7113)
INFO: Stall ended before state dump start
...
But the task will continue to run along happily, so we consider this an
improvement over hanging, even if it's a bit noisy.
This patch (of 3):
exit_shm obtains the ipc_ns shm rwsem for write and holds it while it
walks every shared memory segment in the namespace. Thus the amount of
work is related to the number of shm segments in the namespace not the
number of segments that might need to be cleaned.
In addition, this occurs after the task has been notified the thread has
exited, so the number of tasks waiting for the ns shm rwsem can grow
without bound until memory is exausted.
Add a list to the task struct of all shmids allocated by this task. Init
the list head in copy_process. Use the ns->rwsem for locking. Add
segments after id is added, remove before removing from id.
On unshare of NEW_IPCNS orphan any ids as if the task had exited, similar
to handling of semaphore undo.
I chose a define for the init sequence since its a simple list init,
otherwise it would require a function call to avoid include loops between
the semaphore code and the task struct. Converting the list_del to
list_del_init for the unshare cases would remove the exit followed by
init, but I left it blow up if not inited.
Signed-off-by: Milton Miller <miltonm@bga.com>
Signed-off-by: Jack Miller <millerjo@us.ibm.com>
Cc: Davidlohr Bueso <davidlohr@hp.com>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Anton Blanchard <anton@samba.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-08-08 21:23:19 +00:00
|
|
|
shm_init_task(p);
|
2011-07-26 23:08:39 +00:00
|
|
|
retval = copy_semundo(clone_flags, p);
|
|
|
|
if (retval)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad_fork_cleanup_audit;
|
2011-07-26 23:08:39 +00:00
|
|
|
retval = copy_files(clone_flags, p);
|
|
|
|
if (retval)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad_fork_cleanup_semundo;
|
2011-07-26 23:08:39 +00:00
|
|
|
retval = copy_fs(clone_flags, p);
|
|
|
|
if (retval)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad_fork_cleanup_files;
|
2011-07-26 23:08:39 +00:00
|
|
|
retval = copy_sighand(clone_flags, p);
|
|
|
|
if (retval)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad_fork_cleanup_fs;
|
2011-07-26 23:08:39 +00:00
|
|
|
retval = copy_signal(clone_flags, p);
|
|
|
|
if (retval)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad_fork_cleanup_sighand;
|
2011-07-26 23:08:39 +00:00
|
|
|
retval = copy_mm(clone_flags, p);
|
|
|
|
if (retval)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad_fork_cleanup_signal;
|
2011-07-26 23:08:39 +00:00
|
|
|
retval = copy_namespaces(clone_flags, p);
|
|
|
|
if (retval)
|
CRED: Inaugurate COW credentials
Inaugurate copy-on-write credentials management. This uses RCU to manage the
credentials pointer in the task_struct with respect to accesses by other tasks.
A process may only modify its own credentials, and so does not need locking to
access or modify its own credentials.
A mutex (cred_replace_mutex) is added to the task_struct to control the effect
of PTRACE_ATTACHED on credential calculations, particularly with respect to
execve().
With this patch, the contents of an active credentials struct may not be
changed directly; rather a new set of credentials must be prepared, modified
and committed using something like the following sequence of events:
struct cred *new = prepare_creds();
int ret = blah(new);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
There are some exceptions to this rule: the keyrings pointed to by the active
credentials may be instantiated - keyrings violate the COW rule as managing
COW keyrings is tricky, given that it is possible for a task to directly alter
the keys in a keyring in use by another task.
To help enforce this, various pointers to sets of credentials, such as those in
the task_struct, are declared const. The purpose of this is compile-time
discouragement of altering credentials through those pointers. Once a set of
credentials has been made public through one of these pointers, it may not be
modified, except under special circumstances:
(1) Its reference count may incremented and decremented.
(2) The keyrings to which it points may be modified, but not replaced.
The only safe way to modify anything else is to create a replacement and commit
using the functions described in Documentation/credentials.txt (which will be
added by a later patch).
This patch and the preceding patches have been tested with the LTP SELinux
testsuite.
This patch makes several logical sets of alteration:
(1) execve().
This now prepares and commits credentials in various places in the
security code rather than altering the current creds directly.
(2) Temporary credential overrides.
do_coredump() and sys_faccessat() now prepare their own credentials and
temporarily override the ones currently on the acting thread, whilst
preventing interference from other threads by holding cred_replace_mutex
on the thread being dumped.
This will be replaced in a future patch by something that hands down the
credentials directly to the functions being called, rather than altering
the task's objective credentials.
(3) LSM interface.
A number of functions have been changed, added or removed:
(*) security_capset_check(), ->capset_check()
(*) security_capset_set(), ->capset_set()
Removed in favour of security_capset().
(*) security_capset(), ->capset()
New. This is passed a pointer to the new creds, a pointer to the old
creds and the proposed capability sets. It should fill in the new
creds or return an error. All pointers, barring the pointer to the
new creds, are now const.
(*) security_bprm_apply_creds(), ->bprm_apply_creds()
Changed; now returns a value, which will cause the process to be
killed if it's an error.
(*) security_task_alloc(), ->task_alloc_security()
Removed in favour of security_prepare_creds().
(*) security_cred_free(), ->cred_free()
New. Free security data attached to cred->security.
(*) security_prepare_creds(), ->cred_prepare()
New. Duplicate any security data attached to cred->security.
(*) security_commit_creds(), ->cred_commit()
New. Apply any security effects for the upcoming installation of new
security by commit_creds().
(*) security_task_post_setuid(), ->task_post_setuid()
Removed in favour of security_task_fix_setuid().
(*) security_task_fix_setuid(), ->task_fix_setuid()
Fix up the proposed new credentials for setuid(). This is used by
cap_set_fix_setuid() to implicitly adjust capabilities in line with
setuid() changes. Changes are made to the new credentials, rather
than the task itself as in security_task_post_setuid().
(*) security_task_reparent_to_init(), ->task_reparent_to_init()
Removed. Instead the task being reparented to init is referred
directly to init's credentials.
NOTE! This results in the loss of some state: SELinux's osid no
longer records the sid of the thread that forked it.
(*) security_key_alloc(), ->key_alloc()
(*) security_key_permission(), ->key_permission()
Changed. These now take cred pointers rather than task pointers to
refer to the security context.
(4) sys_capset().
This has been simplified and uses less locking. The LSM functions it
calls have been merged.
(5) reparent_to_kthreadd().
This gives the current thread the same credentials as init by simply using
commit_thread() to point that way.
(6) __sigqueue_alloc() and switch_uid()
__sigqueue_alloc() can't stop the target task from changing its creds
beneath it, so this function gets a reference to the currently applicable
user_struct which it then passes into the sigqueue struct it returns if
successful.
switch_uid() is now called from commit_creds(), and possibly should be
folded into that. commit_creds() should take care of protecting
__sigqueue_alloc().
(7) [sg]et[ug]id() and co and [sg]et_current_groups.
The set functions now all use prepare_creds(), commit_creds() and
abort_creds() to build and check a new set of credentials before applying
it.
security_task_set[ug]id() is called inside the prepared section. This
guarantees that nothing else will affect the creds until we've finished.
The calling of set_dumpable() has been moved into commit_creds().
Much of the functionality of set_user() has been moved into
commit_creds().
The get functions all simply access the data directly.
(8) security_task_prctl() and cap_task_prctl().
security_task_prctl() has been modified to return -ENOSYS if it doesn't
want to handle a function, or otherwise return the return value directly
rather than through an argument.
Additionally, cap_task_prctl() now prepares a new set of credentials, even
if it doesn't end up using it.
(9) Keyrings.
A number of changes have been made to the keyrings code:
(a) switch_uid_keyring(), copy_keys(), exit_keys() and suid_keys() have
all been dropped and built in to the credentials functions directly.
They may want separating out again later.
(b) key_alloc() and search_process_keyrings() now take a cred pointer
rather than a task pointer to specify the security context.
(c) copy_creds() gives a new thread within the same thread group a new
thread keyring if its parent had one, otherwise it discards the thread
keyring.
(d) The authorisation key now points directly to the credentials to extend
the search into rather pointing to the task that carries them.
(e) Installing thread, process or session keyrings causes a new set of
credentials to be created, even though it's not strictly necessary for
process or session keyrings (they're shared).
(10) Usermode helper.
The usermode helper code now carries a cred struct pointer in its
subprocess_info struct instead of a new session keyring pointer. This set
of credentials is derived from init_cred and installed on the new process
after it has been cloned.
call_usermodehelper_setup() allocates the new credentials and
call_usermodehelper_freeinfo() discards them if they haven't been used. A
special cred function (prepare_usermodeinfo_creds()) is provided
specifically for call_usermodehelper_setup() to call.
call_usermodehelper_setkeys() adjusts the credentials to sport the
supplied keyring as the new session keyring.
(11) SELinux.
SELinux has a number of changes, in addition to those to support the LSM
interface changes mentioned above:
(a) selinux_setprocattr() no longer does its check for whether the
current ptracer can access processes with the new SID inside the lock
that covers getting the ptracer's SID. Whilst this lock ensures that
the check is done with the ptracer pinned, the result is only valid
until the lock is released, so there's no point doing it inside the
lock.
(12) is_single_threaded().
This function has been extracted from selinux_setprocattr() and put into
a file of its own in the lib/ directory as join_session_keyring() now
wants to use it too.
The code in SELinux just checked to see whether a task shared mm_structs
with other tasks (CLONE_VM), but that isn't good enough. We really want
to know if they're part of the same thread group (CLONE_THREAD).
(13) nfsd.
The NFS server daemon now has to use the COW credentials to set the
credentials it is going to use. It really needs to pass the credentials
down to the functions it calls, but it can't do that until other patches
in this series have been applied.
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: James Morris <jmorris@namei.org>
Signed-off-by: James Morris <jmorris@namei.org>
2008-11-13 23:39:23 +00:00
|
|
|
goto bad_fork_cleanup_mm;
|
2011-07-26 23:08:39 +00:00
|
|
|
retval = copy_io(clone_flags, p);
|
|
|
|
if (retval)
|
2008-01-24 07:52:45 +00:00
|
|
|
goto bad_fork_cleanup_namespaces;
|
2012-10-23 02:51:14 +00:00
|
|
|
retval = copy_thread(clone_flags, stack_start, stack_size, p);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (retval)
|
2008-01-24 07:52:45 +00:00
|
|
|
goto bad_fork_cleanup_io;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-10-19 06:40:07 +00:00
|
|
|
if (pid != &init_struct_pid) {
|
|
|
|
retval = -ENOMEM;
|
2013-08-22 18:39:16 +00:00
|
|
|
pid = alloc_pid(p->nsproxy->pid_ns_for_children);
|
2007-10-19 06:40:07 +00:00
|
|
|
if (!pid)
|
2008-01-24 07:52:45 +00:00
|
|
|
goto bad_fork_cleanup_io;
|
2007-10-19 06:40:07 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
|
|
|
|
/*
|
|
|
|
* Clear TID on mm_release()?
|
|
|
|
*/
|
2011-07-26 23:08:39 +00:00
|
|
|
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
|
2011-03-08 12:19:51 +00:00
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
p->plug = NULL;
|
|
|
|
#endif
|
2007-10-17 06:27:30 +00:00
|
|
|
#ifdef CONFIG_FUTEX
|
2006-03-27 09:16:27 +00:00
|
|
|
p->robust_list = NULL;
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
p->compat_robust_list = NULL;
|
|
|
|
#endif
|
2006-06-27 09:54:58 +00:00
|
|
|
INIT_LIST_HEAD(&p->pi_state_list);
|
|
|
|
p->pi_state_cache = NULL;
|
2007-10-17 06:27:30 +00:00
|
|
|
#endif
|
[PATCH] Fix sigaltstack corruption among cloned threads
This patch fixes alternate signal stack corruption among cloned threads
with CLONE_SIGHAND (and CLONE_VM) for linux-2.6.16-rc6.
The value of alternate signal stack is currently inherited after a call of
clone(... CLONE_SIGHAND | CLONE_VM). But if sigaltstack is set by a
parent thread, and then if multiple cloned child threads (+ parent threads)
call signal handler at the same time, some threads may be conflicted -
because they share to use the same alternative signal stack region.
Finally they get sigsegv. It's an undesirable race condition. Note that
child threads created from NPTL pthread_create() also hit this conflict
when the parent thread uses sigaltstack, without my patch.
To fix this problem, this patch clears the child threads' sigaltstack
information like exec(). This behavior follows the SUSv3 specification.
In SUSv3, pthread_create() says "The alternate stack shall not be inherited
(when new threads are initialized)". It means that sigaltstack should be
cleared when sigaltstack memory space is shared by cloned threads with
CLONE_SIGHAND.
Note that I chose "if (clone_flags & CLONE_SIGHAND)" line because:
- If clone_flags line is not existed, fork() does not inherit sigaltstack.
- CLONE_VM is another choice, but vfork() does not inherit sigaltstack.
- CLONE_SIGHAND implies CLONE_VM, and it looks suitable.
- CLONE_THREAD is another candidate, and includes CLONE_SIGHAND + CLONE_VM,
but this flag has a bit different semantics.
I decided to use CLONE_SIGHAND.
[ Changed to test for CLONE_VM && !CLONE_VFORK after discussion --Linus ]
Signed-off-by: GOTO Masanori <gotom@sanori.org>
Cc: Roland McGrath <roland@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Acked-by: Linus Torvalds <torvalds@osdl.org>
Cc: Ulrich Drepper <drepper@redhat.com>
Cc: Jakub Jelinek <jakub@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-14 05:20:44 +00:00
|
|
|
/*
|
|
|
|
* sigaltstack should be cleared when sharing the same VM
|
|
|
|
*/
|
|
|
|
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
|
|
|
|
p->sas_ss_sp = p->sas_ss_size = 0;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2009-12-16 00:47:16 +00:00
|
|
|
* Syscall tracing and stepping should be turned off in the
|
|
|
|
* child regardless of CLONE_PTRACE.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2009-12-16 00:47:16 +00:00
|
|
|
user_disable_single_step(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
|
[PATCH] UML Support - Ptrace: adds the host SYSEMU support, for UML and general usage
Jeff Dike <jdike@addtoit.com>,
Paolo 'Blaisorblade' Giarrusso <blaisorblade_spam@yahoo.it>,
Bodo Stroesser <bstroesser@fujitsu-siemens.com>
Adds a new ptrace(2) mode, called PTRACE_SYSEMU, resembling PTRACE_SYSCALL
except that the kernel does not execute the requested syscall; this is useful
to improve performance for virtual environments, like UML, which want to run
the syscall on their own.
In fact, using PTRACE_SYSCALL means stopping child execution twice, on entry
and on exit, and each time you also have two context switches; with SYSEMU you
avoid the 2nd stop and so save two context switches per syscall.
Also, some architectures don't have support in the host for changing the
syscall number via ptrace(), which is currently needed to skip syscall
execution (UML turns any syscall into getpid() to avoid it being executed on
the host). Fixing that is hard, while SYSEMU is easier to implement.
* This version of the patch includes some suggestions of Jeff Dike to avoid
adding any instructions to the syscall fast path, plus some other little
changes, by myself, to make it work even when the syscall is executed with
SYSENTER (but I'm unsure about them). It has been widely tested for quite a
lot of time.
* Various fixed were included to handle the various switches between
various states, i.e. when for instance a syscall entry is traced with one of
PT_SYSCALL / _SYSEMU / _SINGLESTEP and another one is used on exit.
Basically, this is done by remembering which one of them was used even after
the call to ptrace_notify().
* We're combining TIF_SYSCALL_EMU with TIF_SYSCALL_TRACE or TIF_SINGLESTEP
to make do_syscall_trace() notice that the current syscall was started with
SYSEMU on entry, so that no notification ought to be done in the exit path;
this is a bit of a hack, so this problem is solved in another way in next
patches.
* Also, the effects of the patch:
"Ptrace - i386: fix Syscall Audit interaction with singlestep"
are cancelled; they are restored back in the last patch of this series.
Detailed descriptions of the patches doing this kind of processing follow (but
I've already summed everything up).
* Fix behaviour when changing interception kind #1.
In do_syscall_trace(), we check the status of the TIF_SYSCALL_EMU flag
only after doing the debugger notification; but the debugger might have
changed the status of this flag because he continued execution with
PTRACE_SYSCALL, so this is wrong. This patch fixes it by saving the flag
status before calling ptrace_notify().
* Fix behaviour when changing interception kind #2:
avoid intercepting syscall on return when using SYSCALL again.
A guest process switching from using PTRACE_SYSEMU to PTRACE_SYSCALL
crashes.
The problem is in arch/i386/kernel/entry.S. The current SYSEMU patch
inhibits the syscall-handler to be called, but does not prevent
do_syscall_trace() to be called after this for syscall completion
interception.
The appended patch fixes this. It reuses the flag TIF_SYSCALL_EMU to
remember "we come from PTRACE_SYSEMU and now are in PTRACE_SYSCALL", since
the flag is unused in the depicted situation.
* Fix behaviour when changing interception kind #3:
avoid intercepting syscall on return when using SINGLESTEP.
When testing 2.6.9 and the skas3.v6 patch, with my latest patch and had
problems with singlestepping on UML in SKAS with SYSEMU. It looped
receiving SIGTRAPs without moving forward. EIP of the traced process was
the same for all SIGTRAPs.
What's missing is to handle switching from PTRACE_SYSCALL_EMU to
PTRACE_SINGLESTEP in a way very similar to what is done for the change from
PTRACE_SYSCALL_EMU to PTRACE_SYSCALL_TRACE.
I.e., after calling ptrace(PTRACE_SYSEMU), on the return path, the debugger is
notified and then wake ups the process; the syscall is executed (or skipped,
when do_syscall_trace() returns 0, i.e. when using PTRACE_SYSEMU), and
do_syscall_trace() is called again. Since we are on the return path of a
SYSEMU'd syscall, if the wake up is performed through ptrace(PTRACE_SYSCALL),
we must still avoid notifying the parent of the syscall exit. Now, this
behaviour is extended even to resuming with PTRACE_SINGLESTEP.
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Cc: Jeff Dike <jdike@addtoit.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-03 22:57:18 +00:00
|
|
|
#ifdef TIF_SYSCALL_EMU
|
|
|
|
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
|
|
|
|
#endif
|
2008-01-25 20:08:34 +00:00
|
|
|
clear_all_latency_tracing(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* ok, now we should be set up.. */
|
2013-07-03 22:08:32 +00:00
|
|
|
p->pid = pid_nr(pid);
|
|
|
|
if (clone_flags & CLONE_THREAD) {
|
2012-03-14 18:55:38 +00:00
|
|
|
p->exit_signal = -1;
|
2013-07-03 22:08:32 +00:00
|
|
|
p->group_leader = current->group_leader;
|
|
|
|
p->tgid = current->tgid;
|
|
|
|
} else {
|
|
|
|
if (clone_flags & CLONE_PARENT)
|
|
|
|
p->exit_signal = current->group_leader->exit_signal;
|
|
|
|
else
|
|
|
|
p->exit_signal = (clone_flags & CSIGNAL);
|
|
|
|
p->group_leader = p;
|
|
|
|
p->tgid = p->pid;
|
|
|
|
}
|
2012-03-14 18:55:38 +00:00
|
|
|
|
writeback: per task dirty rate limit
Add two fields to task_struct.
1) account dirtied pages in the individual tasks, for accuracy
2) per-task balance_dirty_pages() call intervals, for flexibility
The balance_dirty_pages() call interval (ie. nr_dirtied_pause) will
scale near-sqrt to the safety gap between dirty pages and threshold.
The main problem of per-task nr_dirtied is, if 1k+ tasks start dirtying
pages at exactly the same time, each task will be assigned a large
initial nr_dirtied_pause, so that the dirty threshold will be exceeded
long before each task reached its nr_dirtied_pause and hence call
balance_dirty_pages().
The solution is to watch for the number of pages dirtied on each CPU in
between the calls into balance_dirty_pages(). If it exceeds ratelimit_pages
(3% dirty threshold), force call balance_dirty_pages() for a chance to
set bdi->dirty_exceeded. In normal situations, this safeguarding
condition is not expected to trigger at all.
On the sqrt in dirty_poll_interval():
It will serve as an initial guess when dirty pages are still in the
freerun area.
When dirty pages are floating inside the dirty control scope [freerun,
limit], a followup patch will use some refined dirty poll interval to
get the desired pause time.
thresh-dirty (MB) sqrt
1 16
2 22
4 32
8 45
16 64
32 90
64 128
128 181
256 256
512 362
1024 512
The above table means, given 1MB (or 1GB) gap and the dd tasks polling
balance_dirty_pages() on every 16 (or 512) pages, the dirty limit won't
be exceeded as long as there are less than 16 (or 512) concurrent dd's.
So sqrt naturally leads to less overheads and more safe concurrent tasks
for large memory servers, which have large (thresh-freerun) gaps.
peter: keep the per-CPU ratelimit for safeguarding the 1k+ tasks case
CC: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Andrea Righi <andrea@betterlinux.com>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
2011-06-12 00:10:12 +00:00
|
|
|
p->nr_dirtied = 0;
|
|
|
|
p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
|
2011-06-12 01:25:42 +00:00
|
|
|
p->dirty_paused_when = 0;
|
writeback: per task dirty rate limit
Add two fields to task_struct.
1) account dirtied pages in the individual tasks, for accuracy
2) per-task balance_dirty_pages() call intervals, for flexibility
The balance_dirty_pages() call interval (ie. nr_dirtied_pause) will
scale near-sqrt to the safety gap between dirty pages and threshold.
The main problem of per-task nr_dirtied is, if 1k+ tasks start dirtying
pages at exactly the same time, each task will be assigned a large
initial nr_dirtied_pause, so that the dirty threshold will be exceeded
long before each task reached its nr_dirtied_pause and hence call
balance_dirty_pages().
The solution is to watch for the number of pages dirtied on each CPU in
between the calls into balance_dirty_pages(). If it exceeds ratelimit_pages
(3% dirty threshold), force call balance_dirty_pages() for a chance to
set bdi->dirty_exceeded. In normal situations, this safeguarding
condition is not expected to trigger at all.
On the sqrt in dirty_poll_interval():
It will serve as an initial guess when dirty pages are still in the
freerun area.
When dirty pages are floating inside the dirty control scope [freerun,
limit], a followup patch will use some refined dirty poll interval to
get the desired pause time.
thresh-dirty (MB) sqrt
1 16
2 22
4 32
8 45
16 64
32 90
64 128
128 181
256 256
512 362
1024 512
The above table means, given 1MB (or 1GB) gap and the dd tasks polling
balance_dirty_pages() on every 16 (or 512) pages, the dirty limit won't
be exceeded as long as there are less than 16 (or 512) concurrent dd's.
So sqrt naturally leads to less overheads and more safe concurrent tasks
for large memory servers, which have large (thresh-freerun) gaps.
peter: keep the per-CPU ratelimit for safeguarding the 1k+ tasks case
CC: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Andrea Righi <andrea@betterlinux.com>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
2011-06-12 00:10:12 +00:00
|
|
|
|
2013-11-13 14:36:12 +00:00
|
|
|
p->pdeath_signal = 0;
|
2006-03-29 00:11:25 +00:00
|
|
|
INIT_LIST_HEAD(&p->thread_group);
|
2012-06-27 05:24:13 +00:00
|
|
|
p->task_works = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-07-03 22:08:32 +00:00
|
|
|
/*
|
|
|
|
* Make it visible to the rest of the system, but dont wake it up yet.
|
|
|
|
* Need tasklist lock for parent etc handling!
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
write_lock_irq(&tasklist_lock);
|
|
|
|
|
|
|
|
/* CLONE_PARENT re-uses the old parent */
|
2009-03-02 21:58:45 +00:00
|
|
|
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
p->real_parent = current->real_parent;
|
2009-03-02 21:58:45 +00:00
|
|
|
p->parent_exec_id = current->parent_exec_id;
|
|
|
|
} else {
|
2005-04-16 22:20:36 +00:00
|
|
|
p->real_parent = current;
|
2009-03-02 21:58:45 +00:00
|
|
|
p->parent_exec_id = current->self_exec_id;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-02-15 19:13:24 +00:00
|
|
|
spin_lock(¤t->sighand->siglock);
|
2006-03-29 00:11:26 +00:00
|
|
|
|
2014-06-27 22:18:48 +00:00
|
|
|
/*
|
|
|
|
* Copy seccomp details explicitly here, in case they were changed
|
|
|
|
* before holding sighand lock.
|
|
|
|
*/
|
|
|
|
copy_seccomp(p);
|
|
|
|
|
2006-03-29 00:11:26 +00:00
|
|
|
/*
|
|
|
|
* Process group and session signals need to be delivered to just the
|
|
|
|
* parent before the fork or both the parent and the child after the
|
|
|
|
* fork. Restart if a signal comes in before we add the new process to
|
|
|
|
* it's process group.
|
|
|
|
* A fatal signal pending means that current will exit, so the new
|
|
|
|
* thread can't slip out of an OOM kill (or normal SIGKILL).
|
2011-07-26 23:08:39 +00:00
|
|
|
*/
|
2007-10-18 10:06:07 +00:00
|
|
|
recalc_sigpending();
|
2006-03-29 00:11:26 +00:00
|
|
|
if (signal_pending(current)) {
|
|
|
|
spin_unlock(¤t->sighand->siglock);
|
|
|
|
write_unlock_irq(&tasklist_lock);
|
|
|
|
retval = -ERESTARTNOINTR;
|
2009-06-02 20:39:48 +00:00
|
|
|
goto bad_fork_free_pid;
|
2006-03-29 00:11:26 +00:00
|
|
|
}
|
|
|
|
|
2006-03-29 00:11:07 +00:00
|
|
|
if (likely(p->pid)) {
|
2011-06-17 14:50:38 +00:00
|
|
|
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
|
2006-03-29 00:11:07 +00:00
|
|
|
|
2013-07-03 22:08:31 +00:00
|
|
|
init_task_pid(p, PIDTYPE_PID, pid);
|
2006-03-29 00:11:07 +00:00
|
|
|
if (thread_group_leader(p)) {
|
2013-07-03 22:08:31 +00:00
|
|
|
init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
|
|
|
|
init_task_pid(p, PIDTYPE_SID, task_session(current));
|
|
|
|
|
2010-07-13 00:10:36 +00:00
|
|
|
if (is_child_reaper(pid)) {
|
2010-03-02 22:51:53 +00:00
|
|
|
ns_of_pid(pid)->child_reaper = p;
|
2010-07-13 00:10:36 +00:00
|
|
|
p->signal->flags |= SIGNAL_UNKILLABLE;
|
|
|
|
}
|
2006-03-29 00:11:07 +00:00
|
|
|
|
2008-02-08 12:19:19 +00:00
|
|
|
p->signal->leader_pid = pid;
|
2008-10-13 09:37:26 +00:00
|
|
|
p->signal->tty = tty_kref_get(current->signal->tty);
|
2009-12-17 23:27:15 +00:00
|
|
|
list_add_tail(&p->sibling, &p->real_parent->children);
|
2006-04-19 05:20:16 +00:00
|
|
|
list_add_tail_rcu(&p->tasks, &init_task.tasks);
|
2013-07-03 22:08:31 +00:00
|
|
|
attach_pid(p, PIDTYPE_PGID);
|
|
|
|
attach_pid(p, PIDTYPE_SID);
|
2010-12-08 15:22:55 +00:00
|
|
|
__this_cpu_inc(process_counts);
|
2013-07-03 22:08:30 +00:00
|
|
|
} else {
|
|
|
|
current->signal->nr_threads++;
|
|
|
|
atomic_inc(¤t->signal->live);
|
|
|
|
atomic_inc(¤t->signal->sigcnt);
|
|
|
|
list_add_tail_rcu(&p->thread_group,
|
|
|
|
&p->group_leader->thread_group);
|
introduce for_each_thread() to replace the buggy while_each_thread()
while_each_thread() and next_thread() should die, almost every lockless
usage is wrong.
1. Unless g == current, the lockless while_each_thread() is not safe.
while_each_thread(g, t) can loop forever if g exits, next_thread()
can't reach the unhashed thread in this case. Note that this can
happen even if g is the group leader, it can exec.
2. Even if while_each_thread() itself was correct, people often use
it wrongly.
It was never safe to just take rcu_read_lock() and loop unless
you verify that pid_alive(g) == T, even the first next_thread()
can point to the already freed/reused memory.
This patch adds signal_struct->thread_head and task->thread_node to
create the normal rcu-safe list with the stable head. The new
for_each_thread(g, t) helper is always safe under rcu_read_lock() as
long as this task_struct can't go away.
Note: of course it is ugly to have both task_struct->thread_node and the
old task_struct->thread_group, we will kill it later, after we change
the users of while_each_thread() to use for_each_thread().
Perhaps we can kill it even before we convert all users, we can
reimplement next_thread(t) using the new thread_head/thread_node. But
we can't do this right now because this will lead to subtle behavioural
changes. For example, do/while_each_thread() always sees at least one
task, while for_each_thread() can do nothing if the whole thread group
has died. Or thread_group_empty(), currently its semantics is not clear
unless thread_group_leader(p) and we need to audit the callers before we
can change it.
So this patch adds the new interface which has to coexist with the old
one for some time, hopefully the next changes will be more or less
straightforward and the old one will go away soon.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Sergey Dyasly <dserrg@gmail.com>
Tested-by: Sergey Dyasly <dserrg@gmail.com>
Reviewed-by: Sameer Nanda <snanda@chromium.org>
Acked-by: David Rientjes <rientjes@google.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mandeep Singh Baines <msb@chromium.org>
Cc: "Ma, Xindong" <xindong.ma@intel.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: "Tu, Xiaobing" <xiaobing.tu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-01-21 23:49:56 +00:00
|
|
|
list_add_tail_rcu(&p->thread_node,
|
|
|
|
&p->signal->thread_head);
|
2006-03-29 00:11:07 +00:00
|
|
|
}
|
2013-07-03 22:08:31 +00:00
|
|
|
attach_pid(p, PIDTYPE_PID);
|
2006-03-29 00:11:07 +00:00
|
|
|
nr_threads++;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
total_forks++;
|
2006-02-15 19:13:24 +00:00
|
|
|
spin_unlock(¤t->sighand->siglock);
|
2014-04-13 18:58:54 +00:00
|
|
|
syscall_tracepoint_update(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
write_unlock_irq(&tasklist_lock);
|
2014-04-13 18:58:54 +00:00
|
|
|
|
2005-11-28 21:43:48 +00:00
|
|
|
proc_fork_connector(p);
|
2007-10-19 06:39:36 +00:00
|
|
|
cgroup_post_fork(p);
|
2011-05-26 23:25:18 +00:00
|
|
|
if (clone_flags & CLONE_THREAD)
|
2011-12-13 02:12:21 +00:00
|
|
|
threadgroup_change_end(current);
|
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events!
In the past few months the perfcounters subsystem has grown out its
initial role of counting hardware events, and has become (and is
becoming) a much broader generic event enumeration, reporting, logging,
monitoring, analysis facility.
Naming its core object 'perf_counter' and naming the subsystem
'perfcounters' has become more and more of a misnomer. With pending
code like hw-breakpoints support the 'counter' name is less and
less appropriate.
All in one, we've decided to rename the subsystem to 'performance
events' and to propagate this rename through all fields, variables
and API names. (in an ABI compatible fashion)
The word 'event' is also a bit shorter than 'counter' - which makes
it slightly more convenient to write/handle as well.
Thanks goes to Stephane Eranian who first observed this misnomer and
suggested a rename.
User-space tooling and ABI compatibility is not affected - this patch
should be function-invariant. (Also, defconfigs were not touched to
keep the size down.)
This patch has been generated via the following script:
FILES=$(find * -type f | grep -vE 'oprofile|[^K]config')
sed -i \
-e 's/PERF_EVENT_/PERF_RECORD_/g' \
-e 's/PERF_COUNTER/PERF_EVENT/g' \
-e 's/perf_counter/perf_event/g' \
-e 's/nb_counters/nb_events/g' \
-e 's/swcounter/swevent/g' \
-e 's/tpcounter_event/tp_event/g' \
$FILES
for N in $(find . -name perf_counter.[ch]); do
M=$(echo $N | sed 's/perf_counter/perf_event/g')
mv $N $M
done
FILES=$(find . -name perf_event.*)
sed -i \
-e 's/COUNTER_MASK/REG_MASK/g' \
-e 's/COUNTER/EVENT/g' \
-e 's/\<event\>/event_id/g' \
-e 's/counter/event/g' \
-e 's/Counter/Event/g' \
$FILES
... to keep it as correct as possible. This script can also be
used by anyone who has pending perfcounters patches - it converts
a Linux kernel tree over to the new naming. We tried to time this
change to the point in time where the amount of pending patches
is the smallest: the end of the merge window.
Namespace clashes were fixed up in a preparatory patch - and some
stylistic fallout will be fixed up in a subsequent patch.
( NOTE: 'counters' are still the proper terminology when we deal
with hardware registers - and these sed scripts are a bit
over-eager in renaming them. I've undone some of that, but
in case there's something left where 'counter' would be
better than 'event' we can undo that on an individual basis
instead of touching an otherwise nicely automated patch. )
Suggested-by: Stephane Eranian <eranian@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Paul Mackerras <paulus@samba.org>
Reviewed-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-21 10:02:48 +00:00
|
|
|
perf_event_fork(p);
|
2012-01-10 23:08:09 +00:00
|
|
|
|
|
|
|
trace_task_newtask(p, clone_flags);
|
2013-10-16 17:39:37 +00:00
|
|
|
uprobe_copy_process(p, clone_flags);
|
2012-01-10 23:08:09 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return p;
|
|
|
|
|
2007-10-19 06:40:07 +00:00
|
|
|
bad_fork_free_pid:
|
|
|
|
if (pid != &init_struct_pid)
|
|
|
|
free_pid(pid);
|
2008-01-24 07:52:45 +00:00
|
|
|
bad_fork_cleanup_io:
|
2009-12-04 13:52:42 +00:00
|
|
|
if (p->io_context)
|
|
|
|
exit_io_context(p);
|
2006-10-02 09:18:06 +00:00
|
|
|
bad_fork_cleanup_namespaces:
|
2007-01-30 21:35:18 +00:00
|
|
|
exit_task_namespaces(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
bad_fork_cleanup_mm:
|
2011-11-01 00:07:15 +00:00
|
|
|
if (p->mm)
|
2005-04-16 22:20:36 +00:00
|
|
|
mmput(p->mm);
|
|
|
|
bad_fork_cleanup_signal:
|
clone(): fix race between copy_process() and de_thread()
Spotted by Hiroshi Shimamoto who also provided the test-case below.
copy_process() uses signal->count as a reference counter, but it is not.
This test case
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
#include <pthread.h>
void *null_thread(void *p)
{
for (;;)
sleep(1);
return NULL;
}
void *exec_thread(void *p)
{
execl("/bin/true", "/bin/true", NULL);
return null_thread(p);
}
int main(int argc, char **argv)
{
for (;;) {
pid_t pid;
int ret, status;
pid = fork();
if (pid < 0)
break;
if (!pid) {
pthread_t tid;
pthread_create(&tid, NULL, exec_thread, NULL);
for (;;)
pthread_create(&tid, NULL, null_thread, NULL);
}
do {
ret = waitpid(pid, &status, 0);
} while (ret == -1 && errno == EINTR);
}
return 0;
}
quickly creates an unkillable task.
If copy_process(CLONE_THREAD) races with de_thread()
copy_signal()->atomic(signal->count) breaks the signal->notify_count
logic, and the execing thread can hang forever in kernel space.
Change copy_process() to increment count/live only when we know for sure
we can't fail. In this case the forked thread will take care of its
reference to signal correctly.
If copy_process() fails, check CLONE_THREAD flag. If it it set - do
nothing, the counters were not changed and current belongs to the same
thread group. If it is not set, ->signal must be released in any case
(and ->count must be == 1), the forked child is the only thread in the
thread group.
We need more cleanups here, in particular signal->count should not be used
by de_thread/__exit_signal at all. This patch only fixes the bug.
Reported-by: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Tested-by: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Roland McGrath <roland@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-08-26 21:29:24 +00:00
|
|
|
if (!(clone_flags & CLONE_THREAD))
|
2011-01-05 10:16:04 +00:00
|
|
|
free_signal_struct(p->signal);
|
2005-04-16 22:20:36 +00:00
|
|
|
bad_fork_cleanup_sighand:
|
2006-03-29 00:11:27 +00:00
|
|
|
__cleanup_sighand(p->sighand);
|
2005-04-16 22:20:36 +00:00
|
|
|
bad_fork_cleanup_fs:
|
|
|
|
exit_fs(p); /* blocking */
|
|
|
|
bad_fork_cleanup_files:
|
|
|
|
exit_files(p); /* blocking */
|
|
|
|
bad_fork_cleanup_semundo:
|
|
|
|
exit_sem(p);
|
|
|
|
bad_fork_cleanup_audit:
|
|
|
|
audit_free(p);
|
|
|
|
bad_fork_cleanup_policy:
|
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events!
In the past few months the perfcounters subsystem has grown out its
initial role of counting hardware events, and has become (and is
becoming) a much broader generic event enumeration, reporting, logging,
monitoring, analysis facility.
Naming its core object 'perf_counter' and naming the subsystem
'perfcounters' has become more and more of a misnomer. With pending
code like hw-breakpoints support the 'counter' name is less and
less appropriate.
All in one, we've decided to rename the subsystem to 'performance
events' and to propagate this rename through all fields, variables
and API names. (in an ABI compatible fashion)
The word 'event' is also a bit shorter than 'counter' - which makes
it slightly more convenient to write/handle as well.
Thanks goes to Stephane Eranian who first observed this misnomer and
suggested a rename.
User-space tooling and ABI compatibility is not affected - this patch
should be function-invariant. (Also, defconfigs were not touched to
keep the size down.)
This patch has been generated via the following script:
FILES=$(find * -type f | grep -vE 'oprofile|[^K]config')
sed -i \
-e 's/PERF_EVENT_/PERF_RECORD_/g' \
-e 's/PERF_COUNTER/PERF_EVENT/g' \
-e 's/perf_counter/perf_event/g' \
-e 's/nb_counters/nb_events/g' \
-e 's/swcounter/swevent/g' \
-e 's/tpcounter_event/tp_event/g' \
$FILES
for N in $(find . -name perf_counter.[ch]); do
M=$(echo $N | sed 's/perf_counter/perf_event/g')
mv $N $M
done
FILES=$(find . -name perf_event.*)
sed -i \
-e 's/COUNTER_MASK/REG_MASK/g' \
-e 's/COUNTER/EVENT/g' \
-e 's/\<event\>/event_id/g' \
-e 's/counter/event/g' \
-e 's/Counter/Event/g' \
$FILES
... to keep it as correct as possible. This script can also be
used by anyone who has pending perfcounters patches - it converts
a Linux kernel tree over to the new naming. We tried to time this
change to the point in time where the amount of pending patches
is the smallest: the end of the merge window.
Namespace clashes were fixed up in a preparatory patch - and some
stylistic fallout will be fixed up in a subsequent patch.
( NOTE: 'counters' are still the proper terminology when we deal
with hardware registers - and these sed scripts are a bit
over-eager in renaming them. I've undone some of that, but
in case there's something left where 'counter' would be
better than 'event' we can undo that on an individual basis
instead of touching an otherwise nicely automated patch. )
Suggested-by: Stephane Eranian <eranian@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Paul Mackerras <paulus@samba.org>
Reviewed-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-21 10:02:48 +00:00
|
|
|
perf_event_free_task(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_NUMA
|
2008-04-28 09:13:08 +00:00
|
|
|
mpol_put(p->mempolicy);
|
2014-03-28 07:18:27 +00:00
|
|
|
bad_fork_cleanup_threadgroup_lock:
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
2011-05-26 23:25:18 +00:00
|
|
|
if (clone_flags & CLONE_THREAD)
|
2011-12-13 02:12:21 +00:00
|
|
|
threadgroup_change_end(current);
|
2006-09-01 04:27:38 +00:00
|
|
|
delayacct_tsk_free(p);
|
2005-11-14 00:06:55 +00:00
|
|
|
module_put(task_thread_info(p)->exec_domain->module);
|
2005-04-16 22:20:36 +00:00
|
|
|
bad_fork_cleanup_count:
|
CRED: Inaugurate COW credentials
Inaugurate copy-on-write credentials management. This uses RCU to manage the
credentials pointer in the task_struct with respect to accesses by other tasks.
A process may only modify its own credentials, and so does not need locking to
access or modify its own credentials.
A mutex (cred_replace_mutex) is added to the task_struct to control the effect
of PTRACE_ATTACHED on credential calculations, particularly with respect to
execve().
With this patch, the contents of an active credentials struct may not be
changed directly; rather a new set of credentials must be prepared, modified
and committed using something like the following sequence of events:
struct cred *new = prepare_creds();
int ret = blah(new);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
There are some exceptions to this rule: the keyrings pointed to by the active
credentials may be instantiated - keyrings violate the COW rule as managing
COW keyrings is tricky, given that it is possible for a task to directly alter
the keys in a keyring in use by another task.
To help enforce this, various pointers to sets of credentials, such as those in
the task_struct, are declared const. The purpose of this is compile-time
discouragement of altering credentials through those pointers. Once a set of
credentials has been made public through one of these pointers, it may not be
modified, except under special circumstances:
(1) Its reference count may incremented and decremented.
(2) The keyrings to which it points may be modified, but not replaced.
The only safe way to modify anything else is to create a replacement and commit
using the functions described in Documentation/credentials.txt (which will be
added by a later patch).
This patch and the preceding patches have been tested with the LTP SELinux
testsuite.
This patch makes several logical sets of alteration:
(1) execve().
This now prepares and commits credentials in various places in the
security code rather than altering the current creds directly.
(2) Temporary credential overrides.
do_coredump() and sys_faccessat() now prepare their own credentials and
temporarily override the ones currently on the acting thread, whilst
preventing interference from other threads by holding cred_replace_mutex
on the thread being dumped.
This will be replaced in a future patch by something that hands down the
credentials directly to the functions being called, rather than altering
the task's objective credentials.
(3) LSM interface.
A number of functions have been changed, added or removed:
(*) security_capset_check(), ->capset_check()
(*) security_capset_set(), ->capset_set()
Removed in favour of security_capset().
(*) security_capset(), ->capset()
New. This is passed a pointer to the new creds, a pointer to the old
creds and the proposed capability sets. It should fill in the new
creds or return an error. All pointers, barring the pointer to the
new creds, are now const.
(*) security_bprm_apply_creds(), ->bprm_apply_creds()
Changed; now returns a value, which will cause the process to be
killed if it's an error.
(*) security_task_alloc(), ->task_alloc_security()
Removed in favour of security_prepare_creds().
(*) security_cred_free(), ->cred_free()
New. Free security data attached to cred->security.
(*) security_prepare_creds(), ->cred_prepare()
New. Duplicate any security data attached to cred->security.
(*) security_commit_creds(), ->cred_commit()
New. Apply any security effects for the upcoming installation of new
security by commit_creds().
(*) security_task_post_setuid(), ->task_post_setuid()
Removed in favour of security_task_fix_setuid().
(*) security_task_fix_setuid(), ->task_fix_setuid()
Fix up the proposed new credentials for setuid(). This is used by
cap_set_fix_setuid() to implicitly adjust capabilities in line with
setuid() changes. Changes are made to the new credentials, rather
than the task itself as in security_task_post_setuid().
(*) security_task_reparent_to_init(), ->task_reparent_to_init()
Removed. Instead the task being reparented to init is referred
directly to init's credentials.
NOTE! This results in the loss of some state: SELinux's osid no
longer records the sid of the thread that forked it.
(*) security_key_alloc(), ->key_alloc()
(*) security_key_permission(), ->key_permission()
Changed. These now take cred pointers rather than task pointers to
refer to the security context.
(4) sys_capset().
This has been simplified and uses less locking. The LSM functions it
calls have been merged.
(5) reparent_to_kthreadd().
This gives the current thread the same credentials as init by simply using
commit_thread() to point that way.
(6) __sigqueue_alloc() and switch_uid()
__sigqueue_alloc() can't stop the target task from changing its creds
beneath it, so this function gets a reference to the currently applicable
user_struct which it then passes into the sigqueue struct it returns if
successful.
switch_uid() is now called from commit_creds(), and possibly should be
folded into that. commit_creds() should take care of protecting
__sigqueue_alloc().
(7) [sg]et[ug]id() and co and [sg]et_current_groups.
The set functions now all use prepare_creds(), commit_creds() and
abort_creds() to build and check a new set of credentials before applying
it.
security_task_set[ug]id() is called inside the prepared section. This
guarantees that nothing else will affect the creds until we've finished.
The calling of set_dumpable() has been moved into commit_creds().
Much of the functionality of set_user() has been moved into
commit_creds().
The get functions all simply access the data directly.
(8) security_task_prctl() and cap_task_prctl().
security_task_prctl() has been modified to return -ENOSYS if it doesn't
want to handle a function, or otherwise return the return value directly
rather than through an argument.
Additionally, cap_task_prctl() now prepares a new set of credentials, even
if it doesn't end up using it.
(9) Keyrings.
A number of changes have been made to the keyrings code:
(a) switch_uid_keyring(), copy_keys(), exit_keys() and suid_keys() have
all been dropped and built in to the credentials functions directly.
They may want separating out again later.
(b) key_alloc() and search_process_keyrings() now take a cred pointer
rather than a task pointer to specify the security context.
(c) copy_creds() gives a new thread within the same thread group a new
thread keyring if its parent had one, otherwise it discards the thread
keyring.
(d) The authorisation key now points directly to the credentials to extend
the search into rather pointing to the task that carries them.
(e) Installing thread, process or session keyrings causes a new set of
credentials to be created, even though it's not strictly necessary for
process or session keyrings (they're shared).
(10) Usermode helper.
The usermode helper code now carries a cred struct pointer in its
subprocess_info struct instead of a new session keyring pointer. This set
of credentials is derived from init_cred and installed on the new process
after it has been cloned.
call_usermodehelper_setup() allocates the new credentials and
call_usermodehelper_freeinfo() discards them if they haven't been used. A
special cred function (prepare_usermodeinfo_creds()) is provided
specifically for call_usermodehelper_setup() to call.
call_usermodehelper_setkeys() adjusts the credentials to sport the
supplied keyring as the new session keyring.
(11) SELinux.
SELinux has a number of changes, in addition to those to support the LSM
interface changes mentioned above:
(a) selinux_setprocattr() no longer does its check for whether the
current ptracer can access processes with the new SID inside the lock
that covers getting the ptracer's SID. Whilst this lock ensures that
the check is done with the ptracer pinned, the result is only valid
until the lock is released, so there's no point doing it inside the
lock.
(12) is_single_threaded().
This function has been extracted from selinux_setprocattr() and put into
a file of its own in the lib/ directory as join_session_keyring() now
wants to use it too.
The code in SELinux just checked to see whether a task shared mm_structs
with other tasks (CLONE_VM), but that isn't good enough. We really want
to know if they're part of the same thread group (CLONE_THREAD).
(13) nfsd.
The NFS server daemon now has to use the COW credentials to set the
credentials it is going to use. It really needs to pass the credentials
down to the functions it calls, but it can't do that until other patches
in this series have been applied.
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: James Morris <jmorris@namei.org>
Signed-off-by: James Morris <jmorris@namei.org>
2008-11-13 23:39:23 +00:00
|
|
|
atomic_dec(&p->cred->user->processes);
|
2009-09-02 08:13:40 +00:00
|
|
|
exit_creds(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
bad_fork_free:
|
|
|
|
free_task(p);
|
2006-01-08 09:04:02 +00:00
|
|
|
fork_out:
|
|
|
|
return ERR_PTR(retval);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-05-26 21:44:11 +00:00
|
|
|
static inline void init_idle_pids(struct pid_link *links)
|
|
|
|
{
|
|
|
|
enum pid_type type;
|
|
|
|
|
|
|
|
for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
|
|
|
|
INIT_HLIST_NODE(&links[type].node); /* not really needed */
|
|
|
|
links[type].pid = &init_struct_pid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-19 18:53:51 +00:00
|
|
|
struct task_struct *fork_idle(int cpu)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-07-03 07:25:41 +00:00
|
|
|
struct task_struct *task;
|
2012-10-23 02:52:26 +00:00
|
|
|
task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0);
|
2010-05-26 21:44:11 +00:00
|
|
|
if (!IS_ERR(task)) {
|
|
|
|
init_idle_pids(task->pids);
|
2006-11-25 19:09:34 +00:00
|
|
|
init_idle(task, cpu);
|
2010-05-26 21:44:11 +00:00
|
|
|
}
|
2006-03-29 00:11:07 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return task;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, this is the main fork-routine.
|
|
|
|
*
|
|
|
|
* It copies the process, and if successful kick-starts
|
|
|
|
* it and waits for it to finish using the VM if required.
|
|
|
|
*/
|
|
|
|
long do_fork(unsigned long clone_flags,
|
|
|
|
unsigned long stack_start,
|
|
|
|
unsigned long stack_size,
|
|
|
|
int __user *parent_tidptr,
|
|
|
|
int __user *child_tidptr)
|
|
|
|
{
|
|
|
|
struct task_struct *p;
|
|
|
|
int trace = 0;
|
[PATCH] pidhash: Refactor the pid hash table
Simplifies the code, reduces the need for 4 pid hash tables, and makes the
code more capable.
In the discussions I had with Oleg it was felt that to a large extent the
cleanup itself justified the work. With struct pid being dynamically
allocated meant we could create the hash table entry when the pid was
allocated and free the hash table entry when the pid was freed. Instead of
playing with the hash lists when ever a process would attach or detach to a
process.
For myself the fact that it gave what my previous task_ref patch gave for free
with simpler code was a big win. The problem is that if you hold a reference
to struct task_struct you lock in 10K of low memory. If you do that in a user
controllable way like /proc does, with an unprivileged but hostile user space
application with typical resource limits of 1000 fds and 100 processes I can
trigger the OOM killer by consuming all of low memory with task structs, on a
machine wight 1GB of low memory.
If I instead hold a reference to struct pid which holds a pointer to my
task_struct, I don't suffer from that problem because struct pid is 2 orders
of magnitude smaller. In fact struct pid is small enough that most other
kernel data structures dwarf it, so simply limiting the number of referring
data structures is enough to prevent exhaustion of low memory.
This splits the current struct pid into two structures, struct pid and struct
pid_link, and reduces our number of hash tables from PIDTYPE_MAX to just one.
struct pid_link is the per process linkage into the hash tables and lives in
struct task_struct. struct pid is given an indepedent lifetime, and holds
pointers to each of the pid types.
The independent life of struct pid simplifies attach_pid, and detach_pid,
because we are always manipulating the list of pids and not the hash table.
In addition in giving struct pid an indpendent life it makes the concept much
more powerful.
Kernel data structures can now embed a struct pid * instead of a pid_t and
not suffer from pid wrap around problems or from keeping unnecessarily
large amounts of memory allocated.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-31 10:31:42 +00:00
|
|
|
long nr;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-26 02:45:47 +00:00
|
|
|
/*
|
2011-06-17 14:50:38 +00:00
|
|
|
* Determine whether and which event to report to ptracer. When
|
|
|
|
* called from kernel_thread or CLONE_UNTRACED is explicitly
|
|
|
|
* requested, no event is reported; otherwise, report if the event
|
|
|
|
* for the type of forking is enabled.
|
2008-07-26 02:45:47 +00:00
|
|
|
*/
|
2012-10-23 03:10:08 +00:00
|
|
|
if (!(clone_flags & CLONE_UNTRACED)) {
|
2011-06-17 14:50:38 +00:00
|
|
|
if (clone_flags & CLONE_VFORK)
|
|
|
|
trace = PTRACE_EVENT_VFORK;
|
|
|
|
else if ((clone_flags & CSIGNAL) != SIGCHLD)
|
|
|
|
trace = PTRACE_EVENT_CLONE;
|
|
|
|
else
|
|
|
|
trace = PTRACE_EVENT_FORK;
|
|
|
|
|
|
|
|
if (likely(!ptrace_event_enabled(current, trace)))
|
|
|
|
trace = 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-10-23 02:52:26 +00:00
|
|
|
p = copy_process(clone_flags, stack_start, stack_size,
|
2008-07-26 02:45:47 +00:00
|
|
|
child_tidptr, NULL, trace);
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Do this prior waking up the new thread - the thread pointer
|
|
|
|
* might get invalid after that point, if the thread exits quickly.
|
|
|
|
*/
|
|
|
|
if (!IS_ERR(p)) {
|
|
|
|
struct completion vfork;
|
2014-06-06 21:36:42 +00:00
|
|
|
struct pid *pid;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
tracing, sched: LTTng instrumentation - scheduler
Instrument the scheduler activity (sched_switch, migration, wakeups,
wait for a task, signal delivery) and process/thread
creation/destruction (fork, exit, kthread stop). Actually, kthread
creation is not instrumented in this patch because it is architecture
dependent. It allows to connect tracers such as ftrace which detects
scheduling latencies, good/bad scheduler decisions. Tools like LTTng can
export this scheduler information along with instrumentation of the rest
of the kernel activity to perform post-mortem analysis on the scheduler
activity.
About the performance impact of tracepoints (which is comparable to
markers), even without immediate values optimizations, tests done by
Hideo Aoki on ia64 show no regression. His test case was using hackbench
on a kernel where scheduler instrumentation (about 5 events in code
scheduler code) was added. See the "Tracepoints" patch header for
performance result detail.
Changelog :
- Change instrumentation location and parameter to match ftrace
instrumentation, previously done with kernel markers.
[ mingo@elte.hu: conflict resolutions ]
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Acked-by: 'Peter Zijlstra' <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-18 16:16:17 +00:00
|
|
|
trace_sched_process_fork(current, p);
|
|
|
|
|
2014-06-06 21:36:42 +00:00
|
|
|
pid = get_task_pid(p, PIDTYPE_PID);
|
|
|
|
nr = pid_vnr(pid);
|
2007-10-19 06:40:10 +00:00
|
|
|
|
|
|
|
if (clone_flags & CLONE_PARENT_SETTID)
|
|
|
|
put_user(nr, parent_tidptr);
|
2007-10-19 06:39:53 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (clone_flags & CLONE_VFORK) {
|
|
|
|
p->vfork_done = &vfork;
|
|
|
|
init_completion(&vfork);
|
2012-03-05 22:59:13 +00:00
|
|
|
get_task_struct(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-05-11 16:18:05 +00:00
|
|
|
wake_up_new_task(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-06-17 14:50:38 +00:00
|
|
|
/* forking complete and child started to run, tell ptracer */
|
|
|
|
if (unlikely(trace))
|
2014-06-06 21:36:42 +00:00
|
|
|
ptrace_event_pid(trace, pid);
|
2008-07-26 02:45:47 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (clone_flags & CLONE_VFORK) {
|
2012-03-05 22:59:13 +00:00
|
|
|
if (!wait_for_vfork_done(p, &vfork))
|
2014-06-06 21:36:42 +00:00
|
|
|
ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2014-06-06 21:36:42 +00:00
|
|
|
|
|
|
|
put_pid(pid);
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
[PATCH] pidhash: Refactor the pid hash table
Simplifies the code, reduces the need for 4 pid hash tables, and makes the
code more capable.
In the discussions I had with Oleg it was felt that to a large extent the
cleanup itself justified the work. With struct pid being dynamically
allocated meant we could create the hash table entry when the pid was
allocated and free the hash table entry when the pid was freed. Instead of
playing with the hash lists when ever a process would attach or detach to a
process.
For myself the fact that it gave what my previous task_ref patch gave for free
with simpler code was a big win. The problem is that if you hold a reference
to struct task_struct you lock in 10K of low memory. If you do that in a user
controllable way like /proc does, with an unprivileged but hostile user space
application with typical resource limits of 1000 fds and 100 processes I can
trigger the OOM killer by consuming all of low memory with task structs, on a
machine wight 1GB of low memory.
If I instead hold a reference to struct pid which holds a pointer to my
task_struct, I don't suffer from that problem because struct pid is 2 orders
of magnitude smaller. In fact struct pid is small enough that most other
kernel data structures dwarf it, so simply limiting the number of referring
data structures is enough to prevent exhaustion of low memory.
This splits the current struct pid into two structures, struct pid and struct
pid_link, and reduces our number of hash tables from PIDTYPE_MAX to just one.
struct pid_link is the per process linkage into the hash tables and lives in
struct task_struct. struct pid is given an indepedent lifetime, and holds
pointers to each of the pid types.
The independent life of struct pid simplifies attach_pid, and detach_pid,
because we are always manipulating the list of pids and not the hash table.
In addition in giving struct pid an indpendent life it makes the concept much
more powerful.
Kernel data structures can now embed a struct pid * instead of a pid_t and
not suffer from pid wrap around problems or from keeping unnecessarily
large amounts of memory allocated.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-31 10:31:42 +00:00
|
|
|
nr = PTR_ERR(p);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
[PATCH] pidhash: Refactor the pid hash table
Simplifies the code, reduces the need for 4 pid hash tables, and makes the
code more capable.
In the discussions I had with Oleg it was felt that to a large extent the
cleanup itself justified the work. With struct pid being dynamically
allocated meant we could create the hash table entry when the pid was
allocated and free the hash table entry when the pid was freed. Instead of
playing with the hash lists when ever a process would attach or detach to a
process.
For myself the fact that it gave what my previous task_ref patch gave for free
with simpler code was a big win. The problem is that if you hold a reference
to struct task_struct you lock in 10K of low memory. If you do that in a user
controllable way like /proc does, with an unprivileged but hostile user space
application with typical resource limits of 1000 fds and 100 processes I can
trigger the OOM killer by consuming all of low memory with task structs, on a
machine wight 1GB of low memory.
If I instead hold a reference to struct pid which holds a pointer to my
task_struct, I don't suffer from that problem because struct pid is 2 orders
of magnitude smaller. In fact struct pid is small enough that most other
kernel data structures dwarf it, so simply limiting the number of referring
data structures is enough to prevent exhaustion of low memory.
This splits the current struct pid into two structures, struct pid and struct
pid_link, and reduces our number of hash tables from PIDTYPE_MAX to just one.
struct pid_link is the per process linkage into the hash tables and lives in
struct task_struct. struct pid is given an indepedent lifetime, and holds
pointers to each of the pid types.
The independent life of struct pid simplifies attach_pid, and detach_pid,
because we are always manipulating the list of pids and not the hash table.
In addition in giving struct pid an indpendent life it makes the concept much
more powerful.
Kernel data structures can now embed a struct pid * instead of a pid_t and
not suffer from pid wrap around problems or from keeping unnecessarily
large amounts of memory allocated.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-31 10:31:42 +00:00
|
|
|
return nr;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-09-21 23:55:31 +00:00
|
|
|
/*
|
|
|
|
* Create a kernel thread.
|
|
|
|
*/
|
|
|
|
pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
|
|
|
|
{
|
2012-10-23 03:10:08 +00:00
|
|
|
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
|
2012-09-21 23:55:31 +00:00
|
|
|
(unsigned long)arg, NULL, NULL);
|
|
|
|
}
|
|
|
|
|
2012-10-23 17:17:59 +00:00
|
|
|
#ifdef __ARCH_WANT_SYS_FORK
|
|
|
|
SYSCALL_DEFINE0(fork)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_MMU
|
2012-10-23 03:10:08 +00:00
|
|
|
return do_fork(SIGCHLD, 0, 0, NULL, NULL);
|
2012-10-23 17:17:59 +00:00
|
|
|
#else
|
|
|
|
/* can not support in nommu mode */
|
2014-01-23 23:55:47 +00:00
|
|
|
return -EINVAL;
|
2012-10-23 17:17:59 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef __ARCH_WANT_SYS_VFORK
|
|
|
|
SYSCALL_DEFINE0(vfork)
|
|
|
|
{
|
2014-01-23 23:55:47 +00:00
|
|
|
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
|
2012-10-23 17:17:59 +00:00
|
|
|
0, NULL, NULL);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef __ARCH_WANT_SYS_CLONE
|
|
|
|
#ifdef CONFIG_CLONE_BACKWARDS
|
|
|
|
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
|
|
|
|
int __user *, parent_tidptr,
|
|
|
|
int, tls_val,
|
|
|
|
int __user *, child_tidptr)
|
|
|
|
#elif defined(CONFIG_CLONE_BACKWARDS2)
|
|
|
|
SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
|
|
|
|
int __user *, parent_tidptr,
|
|
|
|
int __user *, child_tidptr,
|
|
|
|
int, tls_val)
|
2013-08-13 23:00:53 +00:00
|
|
|
#elif defined(CONFIG_CLONE_BACKWARDS3)
|
|
|
|
SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
|
|
|
|
int, stack_size,
|
|
|
|
int __user *, parent_tidptr,
|
|
|
|
int __user *, child_tidptr,
|
|
|
|
int, tls_val)
|
2012-10-23 17:17:59 +00:00
|
|
|
#else
|
|
|
|
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
|
|
|
|
int __user *, parent_tidptr,
|
|
|
|
int __user *, child_tidptr,
|
|
|
|
int, tls_val)
|
|
|
|
#endif
|
|
|
|
{
|
2013-01-21 20:25:54 +00:00
|
|
|
return do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr);
|
2012-10-23 17:17:59 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-01-11 21:46:15 +00:00
|
|
|
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
|
|
|
|
#define ARCH_MIN_MMSTRUCT_ALIGN 0
|
|
|
|
#endif
|
|
|
|
|
2008-07-26 02:45:34 +00:00
|
|
|
static void sighand_ctor(void *data)
|
2006-03-29 00:11:12 +00:00
|
|
|
{
|
|
|
|
struct sighand_struct *sighand = data;
|
|
|
|
|
2007-05-17 05:10:57 +00:00
|
|
|
spin_lock_init(&sighand->siglock);
|
2007-09-20 19:40:16 +00:00
|
|
|
init_waitqueue_head(&sighand->signalfd_wqh);
|
2006-03-29 00:11:12 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
void __init proc_caches_init(void)
|
|
|
|
{
|
|
|
|
sighand_cachep = kmem_cache_create("sighand_cache",
|
|
|
|
sizeof(struct sighand_struct), 0,
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 13:56:17 +00:00
|
|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
|
|
|
|
SLAB_NOTRACK, sighand_ctor);
|
2005-04-16 22:20:36 +00:00
|
|
|
signal_cachep = kmem_cache_create("signal_cache",
|
|
|
|
sizeof(struct signal_struct), 0,
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 13:56:17 +00:00
|
|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
|
2007-07-20 01:11:58 +00:00
|
|
|
files_cachep = kmem_cache_create("files_cache",
|
2005-04-16 22:20:36 +00:00
|
|
|
sizeof(struct files_struct), 0,
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 13:56:17 +00:00
|
|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
|
2007-07-20 01:11:58 +00:00
|
|
|
fs_cachep = kmem_cache_create("fs_cache",
|
2005-04-16 22:20:36 +00:00
|
|
|
sizeof(struct fs_struct), 0,
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 13:56:17 +00:00
|
|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
|
2011-05-29 18:32:28 +00:00
|
|
|
/*
|
|
|
|
* FIXME! The "sizeof(struct mm_struct)" currently includes the
|
|
|
|
* whole struct cpumask for the OFFSTACK case. We could change
|
|
|
|
* this to *only* allocate as much of it as required by the
|
|
|
|
* maximum number of CPU's we can ever have. The cpumask_allocation
|
|
|
|
* is at the end of the structure, exactly for that reason.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
mm_cachep = kmem_cache_create("mm_struct",
|
2006-01-11 21:46:15 +00:00
|
|
|
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 13:56:17 +00:00
|
|
|
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
|
2009-04-02 23:56:32 +00:00
|
|
|
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
|
2009-01-08 12:04:47 +00:00
|
|
|
mmap_init();
|
2011-06-28 19:41:10 +00:00
|
|
|
nsproxy_cache_init();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-02-07 20:58:58 +00:00
|
|
|
|
|
|
|
/*
|
2011-03-22 23:34:09 +00:00
|
|
|
* Check constraints on flags passed to the unshare system call.
|
2006-02-07 20:58:58 +00:00
|
|
|
*/
|
2011-03-22 23:34:09 +00:00
|
|
|
static int check_unshare_flags(unsigned long unshare_flags)
|
2006-02-07 20:58:58 +00:00
|
|
|
{
|
2011-03-22 23:34:09 +00:00
|
|
|
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
|
|
|
|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
|
2010-03-02 23:41:50 +00:00
|
|
|
CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
|
2012-07-26 12:15:35 +00:00
|
|
|
CLONE_NEWUSER|CLONE_NEWPID))
|
2011-03-22 23:34:09 +00:00
|
|
|
return -EINVAL;
|
2006-02-07 20:58:58 +00:00
|
|
|
/*
|
2011-03-22 23:34:09 +00:00
|
|
|
* Not implemented, but pretend it works if there is nothing to
|
|
|
|
* unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
|
|
|
|
* needs to unshare vm.
|
2006-02-07 20:58:58 +00:00
|
|
|
*/
|
2011-03-22 23:34:09 +00:00
|
|
|
if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
|
|
|
|
/* FIXME: get_task_mm() increments ->mm_users */
|
|
|
|
if (atomic_read(¤t->mm->mm_users) > 1)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2006-02-07 20:58:58 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-02-07 20:58:59 +00:00
|
|
|
* Unshare the filesystem structure if it is being shared
|
2006-02-07 20:58:58 +00:00
|
|
|
*/
|
|
|
|
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
|
|
|
|
{
|
|
|
|
struct fs_struct *fs = current->fs;
|
|
|
|
|
2009-03-30 11:20:30 +00:00
|
|
|
if (!(unshare_flags & CLONE_FS) || !fs)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* don't need lock here; in the worst case we'll do useless copy */
|
|
|
|
if (fs->users == 1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*new_fsp = copy_fs_struct(fs);
|
|
|
|
if (!*new_fsp)
|
|
|
|
return -ENOMEM;
|
2006-02-07 20:58:58 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-02-07 20:59:02 +00:00
|
|
|
* Unshare file descriptor table if it is being shared
|
2006-02-07 20:58:58 +00:00
|
|
|
*/
|
|
|
|
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
|
|
|
|
{
|
|
|
|
struct files_struct *fd = current->files;
|
2006-02-07 20:59:02 +00:00
|
|
|
int error = 0;
|
2006-02-07 20:58:58 +00:00
|
|
|
|
|
|
|
if ((unshare_flags & CLONE_FILES) &&
|
2006-02-07 20:59:02 +00:00
|
|
|
(fd && atomic_read(&fd->count) > 1)) {
|
|
|
|
*new_fdp = dup_fd(fd, &error);
|
|
|
|
if (!*new_fdp)
|
|
|
|
return error;
|
|
|
|
}
|
2006-02-07 20:58:58 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unshare allows a process to 'unshare' part of the process
|
|
|
|
* context which was originally shared using clone. copy_*
|
|
|
|
* functions used by do_fork() cannot be used here directly
|
|
|
|
* because they modify an inactive task_struct that is being
|
|
|
|
* constructed. Here we are modifying the current, active,
|
|
|
|
* task_struct.
|
|
|
|
*/
|
2009-01-14 13:14:32 +00:00
|
|
|
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
|
2006-02-07 20:58:58 +00:00
|
|
|
{
|
|
|
|
struct fs_struct *fs, *new_fs = NULL;
|
|
|
|
struct files_struct *fd, *new_fd = NULL;
|
2012-07-26 12:15:35 +00:00
|
|
|
struct cred *new_cred = NULL;
|
2007-10-19 06:39:54 +00:00
|
|
|
struct nsproxy *new_nsproxy = NULL;
|
2008-04-29 08:00:57 +00:00
|
|
|
int do_sysvsem = 0;
|
2011-03-22 23:34:09 +00:00
|
|
|
int err;
|
2006-02-07 20:58:58 +00:00
|
|
|
|
2012-07-26 12:15:35 +00:00
|
|
|
/*
|
|
|
|
* If unsharing a user namespace must also unshare the thread.
|
|
|
|
*/
|
|
|
|
if (unshare_flags & CLONE_NEWUSER)
|
2013-03-13 18:51:49 +00:00
|
|
|
unshare_flags |= CLONE_THREAD | CLONE_FS;
|
2010-03-02 23:41:50 +00:00
|
|
|
/*
|
|
|
|
* If unsharing a thread from a thread group, must also unshare vm.
|
|
|
|
*/
|
|
|
|
if (unshare_flags & CLONE_THREAD)
|
|
|
|
unshare_flags |= CLONE_VM;
|
|
|
|
/*
|
|
|
|
* If unsharing vm, must also unshare signal handlers.
|
|
|
|
*/
|
|
|
|
if (unshare_flags & CLONE_VM)
|
|
|
|
unshare_flags |= CLONE_SIGHAND;
|
2011-03-22 23:34:09 +00:00
|
|
|
/*
|
|
|
|
* If unsharing namespace, must also unshare filesystem information.
|
|
|
|
*/
|
|
|
|
if (unshare_flags & CLONE_NEWNS)
|
|
|
|
unshare_flags |= CLONE_FS;
|
2010-03-02 23:41:50 +00:00
|
|
|
|
|
|
|
err = check_unshare_flags(unshare_flags);
|
|
|
|
if (err)
|
|
|
|
goto bad_unshare_out;
|
2008-04-29 08:00:59 +00:00
|
|
|
/*
|
|
|
|
* CLONE_NEWIPC must also detach from the undolist: after switching
|
|
|
|
* to a new ipc namespace, the semaphore arrays from the old
|
|
|
|
* namespace are unreachable.
|
|
|
|
*/
|
|
|
|
if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
|
2008-04-29 08:00:57 +00:00
|
|
|
do_sysvsem = 1;
|
2011-07-26 23:08:39 +00:00
|
|
|
err = unshare_fs(unshare_flags, &new_fs);
|
|
|
|
if (err)
|
2011-03-22 23:34:09 +00:00
|
|
|
goto bad_unshare_out;
|
2011-07-26 23:08:39 +00:00
|
|
|
err = unshare_fd(unshare_flags, &new_fd);
|
|
|
|
if (err)
|
2011-03-22 23:34:09 +00:00
|
|
|
goto bad_unshare_cleanup_fs;
|
2012-07-26 12:15:35 +00:00
|
|
|
err = unshare_userns(unshare_flags, &new_cred);
|
2011-07-26 23:08:39 +00:00
|
|
|
if (err)
|
2008-04-29 08:00:57 +00:00
|
|
|
goto bad_unshare_cleanup_fd;
|
2012-07-26 12:15:35 +00:00
|
|
|
err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
|
|
|
|
new_cred, new_fs);
|
|
|
|
if (err)
|
|
|
|
goto bad_unshare_cleanup_cred;
|
2006-10-02 09:18:18 +00:00
|
|
|
|
2012-07-26 12:15:35 +00:00
|
|
|
if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
|
2008-04-29 08:00:57 +00:00
|
|
|
if (do_sysvsem) {
|
|
|
|
/*
|
|
|
|
* CLONE_SYSVSEM is equivalent to sys_exit().
|
|
|
|
*/
|
|
|
|
exit_sem(current);
|
|
|
|
}
|
shm: make exit_shm work proportional to task activity
This is small set of patches our team has had kicking around for a few
versions internally that fixes tasks getting hung on shm_exit when there
are many threads hammering it at once.
Anton wrote a simple test to cause the issue:
http://ozlabs.org/~anton/junkcode/bust_shm_exit.c
Before applying this patchset, this test code will cause either hanging
tracebacks or pthread out of memory errors.
After this patchset, it will still produce output like:
root@somehost:~# ./bust_shm_exit 1024 160
...
INFO: rcu_sched detected stalls on CPUs/tasks: {} (detected by 116, t=2111 jiffies, g=241, c=240, q=7113)
INFO: Stall ended before state dump start
...
But the task will continue to run along happily, so we consider this an
improvement over hanging, even if it's a bit noisy.
This patch (of 3):
exit_shm obtains the ipc_ns shm rwsem for write and holds it while it
walks every shared memory segment in the namespace. Thus the amount of
work is related to the number of shm segments in the namespace not the
number of segments that might need to be cleaned.
In addition, this occurs after the task has been notified the thread has
exited, so the number of tasks waiting for the ns shm rwsem can grow
without bound until memory is exausted.
Add a list to the task struct of all shmids allocated by this task. Init
the list head in copy_process. Use the ns->rwsem for locking. Add
segments after id is added, remove before removing from id.
On unshare of NEW_IPCNS orphan any ids as if the task had exited, similar
to handling of semaphore undo.
I chose a define for the init sequence since its a simple list init,
otherwise it would require a function call to avoid include loops between
the semaphore code and the task struct. Converting the list_del to
list_del_init for the unshare cases would remove the exit followed by
init, but I left it blow up if not inited.
Signed-off-by: Milton Miller <miltonm@bga.com>
Signed-off-by: Jack Miller <millerjo@us.ibm.com>
Cc: Davidlohr Bueso <davidlohr@hp.com>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Anton Blanchard <anton@samba.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-08-08 21:23:19 +00:00
|
|
|
if (unshare_flags & CLONE_NEWIPC) {
|
|
|
|
/* Orphan segments in old ns (see sem above). */
|
|
|
|
exit_shm(current);
|
|
|
|
shm_init_task(current);
|
|
|
|
}
|
2006-10-02 09:18:06 +00:00
|
|
|
|
2013-02-28 01:03:23 +00:00
|
|
|
if (new_nsproxy)
|
2007-10-19 06:39:54 +00:00
|
|
|
switch_task_namespaces(current, new_nsproxy);
|
2006-02-07 20:58:58 +00:00
|
|
|
|
2007-10-19 06:39:54 +00:00
|
|
|
task_lock(current);
|
|
|
|
|
2006-02-07 20:58:58 +00:00
|
|
|
if (new_fs) {
|
|
|
|
fs = current->fs;
|
2010-08-17 18:37:33 +00:00
|
|
|
spin_lock(&fs->lock);
|
2006-02-07 20:58:58 +00:00
|
|
|
current->fs = new_fs;
|
2009-03-30 11:20:30 +00:00
|
|
|
if (--fs->users)
|
|
|
|
new_fs = NULL;
|
|
|
|
else
|
|
|
|
new_fs = fs;
|
2010-08-17 18:37:33 +00:00
|
|
|
spin_unlock(&fs->lock);
|
2006-02-07 20:58:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (new_fd) {
|
|
|
|
fd = current->files;
|
|
|
|
current->files = new_fd;
|
|
|
|
new_fd = fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
task_unlock(current);
|
2012-07-26 12:15:35 +00:00
|
|
|
|
|
|
|
if (new_cred) {
|
|
|
|
/* Install the new user namespace */
|
|
|
|
commit_creds(new_cred);
|
|
|
|
new_cred = NULL;
|
|
|
|
}
|
2006-02-07 20:58:58 +00:00
|
|
|
}
|
|
|
|
|
2012-07-26 12:15:35 +00:00
|
|
|
bad_unshare_cleanup_cred:
|
|
|
|
if (new_cred)
|
|
|
|
put_cred(new_cred);
|
2006-02-07 20:58:58 +00:00
|
|
|
bad_unshare_cleanup_fd:
|
|
|
|
if (new_fd)
|
|
|
|
put_files_struct(new_fd);
|
|
|
|
|
|
|
|
bad_unshare_cleanup_fs:
|
|
|
|
if (new_fs)
|
2009-03-30 11:20:30 +00:00
|
|
|
free_fs_struct(new_fs);
|
2006-02-07 20:58:58 +00:00
|
|
|
|
|
|
|
bad_unshare_out:
|
|
|
|
return err;
|
|
|
|
}
|
2008-04-22 09:31:30 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Helper to unshare the files of the current task.
|
|
|
|
* We don't want to expose copy_files internals to
|
|
|
|
* the exec layer of the kernel.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int unshare_files(struct files_struct **displaced)
|
|
|
|
{
|
|
|
|
struct task_struct *task = current;
|
2008-04-26 04:25:00 +00:00
|
|
|
struct files_struct *copy = NULL;
|
2008-04-22 09:31:30 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
error = unshare_fd(CLONE_FILES, ©);
|
|
|
|
if (error || !copy) {
|
|
|
|
*displaced = NULL;
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
*displaced = task->files;
|
|
|
|
task_lock(task);
|
|
|
|
task->files = copy;
|
|
|
|
task_unlock(task);
|
|
|
|
return 0;
|
|
|
|
}
|