kernel/linux-2.6-utrace-ptrace.patch
Kyle McMartin 71f70cd13e utrace update
Conflicts:

	kernel.spec
2011-01-22 13:43:13 -05:00

2029 lines
50 KiB
Diff

implement utrace-ptrace
The patch adds the new file, kernel/ptrace-utrace.c, which contains
the new implementation of ptrace over utrace.
This file is not compiled until we have CONFIG_UTRACE option, will be
added by the next "utrace core" patch.
It's supposed to be an invisible implementation change, nothing should
change to userland when CONFIG_UTRACE is enabled.
Signed-off-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
---
include/linux/ptrace.h | 1 +
kernel/Makefile | 1 +
kernel/ptrace-utrace.c | 1187 ++++++++++++++++++++++++++++++++++++++++++++++++
kernel/ptrace.c | 690 ++++++++++++++--------------
kernel/utrace.c | 16 +
5 files changed, 1544 insertions(+), 351 deletions(-)
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 619cdf0..e391bdb 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -100,6 +100,7 @@
#include <linux/sched.h> /* For struct task_struct. */
+extern void ptrace_notify_stop(struct task_struct *tracee);
extern long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern int ptrace_traceme(void);
diff --git a/kernel/Makefile b/kernel/Makefile
index 1172528..9a815a5 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_RESOURCE_COUNTERS) += res_c
obj-$(CONFIG_SMP) += stop_machine.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_UTRACE) += utrace.o
+obj-$(CONFIG_UTRACE) += ptrace-utrace.o
obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o
diff --git a/kernel/ptrace-utrace.c b/kernel/ptrace-utrace.c
new file mode 100644
index ...a5bcb9e 100644
--- /dev/null
+++ b/kernel/ptrace-utrace.c
@@ -0,0 +1,1187 @@
+/*
+ * linux/kernel/ptrace.c
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ *
+ * Common interfaces for "ptrace()" which we do not want
+ * to continually duplicate across every architecture.
+ */
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include <linux/ptrace.h>
+#include <linux/utrace.h>
+#include <linux/security.h>
+#include <linux/signal.h>
+#include <linux/audit.h>
+#include <linux/pid_namespace.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+/*
+ * ptrace a task: make the debugger its new parent and
+ * move it to the ptrace list.
+ *
+ * Must be called with the tasklist lock write-held.
+ */
+void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
+{
+ BUG_ON(!list_empty(&child->ptrace_entry));
+ list_add(&child->ptrace_entry, &new_parent->ptraced);
+ child->parent = new_parent;
+}
+
+/*
+ * unptrace a task: move it back to its original parent and
+ * remove it from the ptrace list.
+ *
+ * Must be called with the tasklist lock write-held.
+ */
+void __ptrace_unlink(struct task_struct *child)
+{
+ BUG_ON(!child->ptrace);
+
+ child->ptrace = 0;
+ child->parent = child->real_parent;
+ list_del_init(&child->ptrace_entry);
+}
+
+struct ptrace_context {
+ int options;
+
+ int signr;
+ siginfo_t *siginfo;
+
+ int stop_code;
+ unsigned long eventmsg;
+
+ enum utrace_resume_action resume;
+};
+
+#define PT_UTRACED 0x00001000
+
+#define PTRACE_O_SYSEMU 0x100
+#define PTRACE_O_DETACHED 0x200
+
+#define PTRACE_EVENT_SYSCALL (1 << 16)
+#define PTRACE_EVENT_SIGTRAP (2 << 16)
+#define PTRACE_EVENT_SIGNAL (3 << 16)
+/* events visible to user-space */
+#define PTRACE_EVENT_MASK 0xFFFF
+
+static inline bool ptrace_event_pending(struct ptrace_context *ctx)
+{
+ return ctx->stop_code != 0;
+}
+
+static inline int get_stop_event(struct ptrace_context *ctx)
+{
+ return ctx->stop_code >> 8;
+}
+
+static inline void set_stop_code(struct ptrace_context *ctx, int event)
+{
+ ctx->stop_code = (event << 8) | SIGTRAP;
+}
+
+static inline struct ptrace_context *
+ptrace_context(struct utrace_engine *engine)
+{
+ return engine->data;
+}
+
+static const struct utrace_engine_ops ptrace_utrace_ops; /* forward decl */
+
+static struct utrace_engine *ptrace_lookup_engine(struct task_struct *tracee)
+{
+ return utrace_attach_task(tracee, UTRACE_ATTACH_MATCH_OPS,
+ &ptrace_utrace_ops, NULL);
+}
+
+static int utrace_barrier_uninterruptible(struct task_struct *target,
+ struct utrace_engine *engine)
+{
+ for (;;) {
+ int err = utrace_barrier(target, engine);
+
+ if (err != -ERESTARTSYS)
+ return err;
+
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
+static struct utrace_engine *
+ptrace_reuse_engine(struct task_struct *tracee)
+{
+ struct utrace_engine *engine;
+ struct ptrace_context *ctx;
+ int err = -EPERM;
+
+ engine = ptrace_lookup_engine(tracee);
+ if (IS_ERR(engine))
+ return engine;
+
+ ctx = ptrace_context(engine);
+ if (unlikely(ctx->options == PTRACE_O_DETACHED)) {
+ /*
+ * Try to reuse this self-detaching engine.
+ * The only caller which can hit this case is ptrace_attach(),
+ * it holds ->cred_guard_mutex.
+ */
+ ctx->options = 0;
+ ctx->eventmsg = 0;
+
+ /* make sure we don't get unwanted reports */
+ err = utrace_set_events(tracee, engine, UTRACE_EVENT(QUIESCE));
+ if (!err || err == -EINPROGRESS) {
+ ctx->resume = UTRACE_RESUME;
+ /* synchronize with ptrace_report_signal() */
+ err = utrace_barrier_uninterruptible(tracee, engine);
+ }
+
+ if (!err) {
+ WARN_ON(engine->ops != &ptrace_utrace_ops &&
+ !tracee->exit_state);
+ return engine;
+ }
+
+ WARN_ON(engine->ops == &ptrace_utrace_ops);
+ }
+
+ utrace_engine_put(engine);
+ return ERR_PTR(err);
+}
+
+static struct utrace_engine *
+ptrace_attach_engine(struct task_struct *tracee)
+{
+ struct utrace_engine *engine;
+ struct ptrace_context *ctx;
+
+ if (unlikely(task_utrace_flags(tracee))) {
+ engine = ptrace_reuse_engine(tracee);
+ if (!IS_ERR(engine) || IS_ERR(engine) == -EPERM)
+ return engine;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (unlikely(!ctx))
+ return ERR_PTR(-ENOMEM);
+
+ ctx->resume = UTRACE_RESUME;
+
+ engine = utrace_attach_task(tracee, UTRACE_ATTACH_CREATE |
+ UTRACE_ATTACH_EXCLUSIVE |
+ UTRACE_ATTACH_MATCH_OPS,
+ &ptrace_utrace_ops, ctx);
+ if (unlikely(IS_ERR(engine))) {
+ if (engine != ERR_PTR(-ESRCH) &&
+ engine != ERR_PTR(-ERESTARTNOINTR))
+ engine = ERR_PTR(-EPERM);
+ kfree(ctx);
+ }
+
+ return engine;
+}
+
+static inline int ptrace_set_events(struct task_struct *target,
+ struct utrace_engine *engine,
+ unsigned long options)
+{
+ struct ptrace_context *ctx = ptrace_context(engine);
+ /*
+ * We need QUIESCE for resume handling, CLONE to check
+ * for CLONE_PTRACE, other events are always reported.
+ */
+ unsigned long events = UTRACE_EVENT(QUIESCE) | UTRACE_EVENT(CLONE) |
+ UTRACE_EVENT(EXEC) | UTRACE_EVENT_SIGNAL_ALL;
+
+ ctx->options = options;
+ if (options & PTRACE_O_TRACEEXIT)
+ events |= UTRACE_EVENT(EXIT);
+
+ return utrace_set_events(target, engine, events);
+}
+
+/*
+ * Attach a utrace engine for ptrace and set up its event mask.
+ * Returns error code or 0 on success.
+ */
+static int ptrace_attach_task(struct task_struct *tracee, int options)
+{
+ struct utrace_engine *engine;
+ int err;
+
+ engine = ptrace_attach_engine(tracee);
+ if (IS_ERR(engine))
+ return PTR_ERR(engine);
+ /*
+ * It can fail only if the tracee is dead, the caller
+ * must notice this before setting PT_UTRACED.
+ */
+ err = ptrace_set_events(tracee, engine, options);
+ WARN_ON(err && !tracee->exit_state);
+ utrace_engine_put(engine);
+ return 0;
+}
+
+static int ptrace_wake_up(struct task_struct *tracee,
+ struct utrace_engine *engine,
+ enum utrace_resume_action action,
+ bool force_wakeup)
+{
+ if (force_wakeup) {
+ unsigned long flags;
+ /*
+ * Preserve the compatibility bug. Historically ptrace
+ * wakes up the tracee even if it should not. Clear
+ * SIGNAL_STOP_STOPPED for utrace_wakeup().
+ */
+ if (lock_task_sighand(tracee, &flags)) {
+ tracee->signal->flags &= ~SIGNAL_STOP_STOPPED;
+ unlock_task_sighand(tracee, &flags);
+ }
+ }
+
+ if (action != UTRACE_REPORT)
+ ptrace_context(engine)->stop_code = 0;
+
+ return utrace_control(tracee, engine, action);
+}
+
+static void ptrace_detach_task(struct task_struct *tracee, int sig)
+{
+ /*
+ * If true, the caller is PTRACE_DETACH, otherwise
+ * the tracer detaches implicitly during exit.
+ */
+ bool explicit = (sig >= 0);
+ struct utrace_engine *engine = ptrace_lookup_engine(tracee);
+ enum utrace_resume_action action = UTRACE_DETACH;
+ struct ptrace_context *ctx;
+
+ if (unlikely(IS_ERR(engine)))
+ return;
+
+ ctx = ptrace_context(engine);
+
+ if (!explicit) {
+ int err;
+
+ /*
+ * We are going to detach, the tracee can be running.
+ * Ensure ptrace_report_signal() won't report a signal.
+ */
+ ctx->resume = UTRACE_DETACH;
+ err = utrace_barrier_uninterruptible(tracee, engine);
+
+ if (!err && ctx->siginfo) {
+ /*
+ * The tracee has already reported a signal
+ * before utrace_barrier().
+ *
+ * Resume it like we do in PTRACE_EVENT_SIGNAL
+ * case below. The difference is that we can race
+ * with ptrace_report_signal() if the tracee is
+ * running but this doesn't matter. In any case
+ * UTRACE_SIGNAL_REPORT must be pending and it
+ * can return nothing but UTRACE_DETACH.
+ */
+ action = UTRACE_RESUME;
+ }
+
+ } else if (sig) {
+ switch (get_stop_event(ctx)) {
+ case PTRACE_EVENT_SYSCALL:
+ send_sig_info(sig, SEND_SIG_PRIV, tracee);
+ break;
+
+ case PTRACE_EVENT_SIGNAL:
+ ctx->signr = sig;
+ ctx->resume = UTRACE_DETACH;
+ action = UTRACE_RESUME;
+ break;
+ }
+ }
+
+ ptrace_wake_up(tracee, engine, action, explicit);
+
+ if (action != UTRACE_DETACH)
+ ctx->options = PTRACE_O_DETACHED;
+
+ utrace_engine_put(engine);
+}
+
+static void ptrace_abort_attach(struct task_struct *tracee)
+{
+ ptrace_detach_task(tracee, 0);
+}
+
+static u32 ptrace_report_exit(u32 action, struct utrace_engine *engine,
+ long orig_code, long *code)
+{
+ struct ptrace_context *ctx = ptrace_context(engine);
+
+ WARN_ON(ptrace_event_pending(ctx) &&
+ !signal_group_exit(current->signal));
+
+ set_stop_code(ctx, PTRACE_EVENT_EXIT);
+ ctx->eventmsg = *code;
+
+ return UTRACE_STOP;
+}
+
+static void ptrace_clone_attach(struct task_struct *child,
+ int options)
+{
+ struct task_struct *parent = current;
+ struct task_struct *tracer;
+ bool abort = true;
+
+ if (unlikely(ptrace_attach_task(child, options))) {
+ WARN_ON(1);
+ return;
+ }
+
+ write_lock_irq(&tasklist_lock);
+ tracer = parent->parent;
+ if (!(tracer->flags & PF_EXITING) && parent->ptrace) {
+ child->ptrace = parent->ptrace;
+ __ptrace_link(child, tracer);
+ abort = false;
+ }
+ write_unlock_irq(&tasklist_lock);
+ if (unlikely(abort)) {
+ ptrace_abort_attach(child);
+ return;
+ }
+
+ sigaddset(&child->pending.signal, SIGSTOP);
+ set_tsk_thread_flag(child, TIF_SIGPENDING);
+}
+
+static u32 ptrace_report_clone(u32 action, struct utrace_engine *engine,
+ unsigned long clone_flags,
+ struct task_struct *child)
+{
+ struct ptrace_context *ctx = ptrace_context(engine);
+ int event = 0;
+
+ WARN_ON(ptrace_event_pending(ctx));
+
+ if (clone_flags & CLONE_UNTRACED) {
+ /* no events reported */
+ } else if (clone_flags & CLONE_VFORK) {
+ if (ctx->options & PTRACE_O_TRACEVFORK)
+ event = PTRACE_EVENT_VFORK;
+ else if (ctx->options & PTRACE_O_TRACEVFORKDONE)
+ event = PTRACE_EVENT_VFORK_DONE;
+ } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
+ if (ctx->options & PTRACE_O_TRACECLONE)
+ event = PTRACE_EVENT_CLONE;
+ } else if (ctx->options & PTRACE_O_TRACEFORK) {
+ event = PTRACE_EVENT_FORK;
+ }
+ /*
+ * Any of these reports implies auto-attaching the new child.
+ * So does CLONE_PTRACE, even with no event to report.
+ */
+ if ((event && event != PTRACE_EVENT_VFORK_DONE) ||
+ (clone_flags & CLONE_PTRACE))
+ ptrace_clone_attach(child, ctx->options);
+
+ if (!event)
+ return UTRACE_RESUME;
+
+ set_stop_code(ctx, event);
+ ctx->eventmsg = child->pid;
+ /*
+ * We shouldn't stop now, inside the do_fork() path.
+ * We will stop later, before return to user-mode.
+ */
+ if (event == PTRACE_EVENT_VFORK_DONE)
+ return UTRACE_REPORT;
+ else
+ return UTRACE_STOP;
+}
+
+static inline void set_syscall_code(struct ptrace_context *ctx)
+{
+ set_stop_code(ctx, PTRACE_EVENT_SYSCALL);
+ if (ctx->options & PTRACE_O_TRACESYSGOOD)
+ ctx->stop_code |= 0x80;
+}
+
+static u32 ptrace_report_syscall_entry(u32 action, struct utrace_engine *engine,
+ struct pt_regs *regs)
+{
+ struct ptrace_context *ctx = ptrace_context(engine);
+
+ if (action & UTRACE_SYSCALL_RESUMED) {
+ /*
+ * We already reported the first time.
+ * Nothing more to do now.
+ */
+ if (unlikely(ctx->options & PTRACE_O_SYSEMU))
+ return UTRACE_SYSCALL_ABORT | UTRACE_REPORT;
+ return utrace_syscall_action(action) | UTRACE_RESUME;
+ }
+
+ WARN_ON(ptrace_event_pending(ctx));
+
+ set_syscall_code(ctx);
+
+ if (unlikely(ctx->options & PTRACE_O_SYSEMU))
+ return UTRACE_SYSCALL_ABORT | UTRACE_REPORT;
+ /*
+ * Stop now to report. We will get another callback after
+ * we resume, with the UTRACE_SYSCALL_RESUMED flag set.
+ */
+ return UTRACE_SYSCALL_RUN | UTRACE_STOP;
+}
+
+static inline bool is_step_resume(enum utrace_resume_action resume)
+{
+ return resume == UTRACE_BLOCKSTEP || resume == UTRACE_SINGLESTEP;
+}
+
+static u32 ptrace_report_syscall_exit(u32 action, struct utrace_engine *engine,
+ struct pt_regs *regs)
+{
+ struct ptrace_context *ctx = ptrace_context(engine);
+
+ if (ptrace_event_pending(ctx))
+ return UTRACE_STOP;
+
+ if (is_step_resume(ctx->resume)) {
+ ctx->signr = SIGTRAP;
+ return UTRACE_INTERRUPT;
+ }
+
+ set_syscall_code(ctx);
+ return UTRACE_STOP;
+}
+
+static u32 ptrace_report_exec(u32 action, struct utrace_engine *engine,
+ const struct linux_binfmt *fmt,
+ const struct linux_binprm *bprm,
+ struct pt_regs *regs)
+{
+ struct ptrace_context *ctx = ptrace_context(engine);
+
+ WARN_ON(ptrace_event_pending(ctx));
+
+ if (!(ctx->options & PTRACE_O_TRACEEXEC)) {
+ /*
+ * Old-fashioned ptrace'd exec just posts a plain signal.
+ */
+ send_sig(SIGTRAP, current, 0);
+ return UTRACE_RESUME;
+ }
+
+ set_stop_code(ctx, PTRACE_EVENT_EXEC);
+ return UTRACE_STOP;
+}
+
+static enum utrace_signal_action resume_signal(struct ptrace_context *ctx,
+ struct k_sigaction *return_ka)
+{
+ siginfo_t *info = ctx->siginfo;
+ int signr = ctx->signr;
+
+ ctx->siginfo = NULL;
+ ctx->signr = 0;
+
+ /* Did the debugger cancel the sig? */
+ if (!signr)
+ return UTRACE_SIGNAL_IGN;
+ /*
+ * Update the siginfo structure if the signal has changed.
+ * If the debugger wanted something specific in the siginfo
+ * then it should have updated *info via PTRACE_SETSIGINFO.
+ */
+ if (info->si_signo != signr) {
+ info->si_signo = signr;
+ info->si_errno = 0;
+ info->si_code = SI_USER;
+ info->si_pid = task_pid_vnr(current->parent);
+ info->si_uid = task_uid(current->parent);
+ }
+
+ /* If the (new) signal is now blocked, requeue it. */
+ if (sigismember(&current->blocked, signr)) {
+ send_sig_info(signr, info, current);
+ return UTRACE_SIGNAL_IGN;
+ }
+
+ spin_lock_irq(&current->sighand->siglock);
+ *return_ka = current->sighand->action[signr - 1];
+ spin_unlock_irq(&current->sighand->siglock);
+
+ return UTRACE_SIGNAL_DELIVER;
+}
+
+static u32 ptrace_report_signal(u32 action, struct utrace_engine *engine,
+ struct pt_regs *regs,
+ siginfo_t *info,
+ const struct k_sigaction *orig_ka,
+ struct k_sigaction *return_ka)
+{
+ struct ptrace_context *ctx = ptrace_context(engine);
+ enum utrace_resume_action resume = ctx->resume;
+
+ if (ptrace_event_pending(ctx)) {
+ action = utrace_signal_action(action);
+ WARN_ON(action != UTRACE_SIGNAL_REPORT);
+ return action | UTRACE_STOP;
+ }
+
+ switch (utrace_signal_action(action)) {
+ case UTRACE_SIGNAL_HANDLER:
+ if (WARN_ON(ctx->siginfo))
+ ctx->siginfo = NULL;
+
+ if (is_step_resume(resume)) {
+ set_stop_code(ctx, PTRACE_EVENT_SIGTRAP);
+ return UTRACE_STOP | UTRACE_SIGNAL_IGN;
+ }
+
+ case UTRACE_SIGNAL_REPORT:
+ if (!ctx->siginfo) {
+ if (ctx->signr) {
+ /* set by ptrace_resume(SYSCALL_EXIT) */
+ WARN_ON(ctx->signr != SIGTRAP);
+ user_single_step_siginfo(current, regs, info);
+ force_sig_info(SIGTRAP, info, current);
+ }
+
+ return resume | UTRACE_SIGNAL_IGN;
+ }
+
+ if (WARN_ON(ctx->siginfo != info))
+ return resume | UTRACE_SIGNAL_IGN;
+
+ return resume | resume_signal(ctx, return_ka);
+
+ default:
+ break;
+ }
+
+ WARN_ON(ctx->siginfo);
+
+ /* Raced with the exiting tracer ? */
+ if (resume == UTRACE_DETACH)
+ return action;
+
+ ctx->siginfo = info;
+ /*
+ * ctx->siginfo points to the caller's stack.
+ * Make sure the subsequent UTRACE_SIGNAL_REPORT clears
+ * ->siginfo before return from get_signal_to_deliver().
+ */
+ if (utrace_control(current, engine, UTRACE_INTERRUPT))
+ WARN_ON(1);
+
+ ctx->signr = info->si_signo;
+ ctx->stop_code = (PTRACE_EVENT_SIGNAL << 8) | ctx->signr;
+
+ return UTRACE_STOP | UTRACE_SIGNAL_IGN;
+}
+
+static u32 ptrace_report_quiesce(u32 action, struct utrace_engine *engine,
+ unsigned long event)
+{
+ struct ptrace_context *ctx = ptrace_context(engine);
+
+ if (ptrace_event_pending(ctx))
+ return UTRACE_STOP;
+
+ return event ? UTRACE_RESUME : ctx->resume;
+}
+
+static void ptrace_release(void *data)
+{
+ kfree(data);
+}
+
+static const struct utrace_engine_ops ptrace_utrace_ops = {
+ .report_signal = ptrace_report_signal,
+ .report_quiesce = ptrace_report_quiesce,
+ .report_exec = ptrace_report_exec,
+ .report_exit = ptrace_report_exit,
+ .report_clone = ptrace_report_clone,
+ .report_syscall_entry = ptrace_report_syscall_entry,
+ .report_syscall_exit = ptrace_report_syscall_exit,
+ .release = ptrace_release,
+};
+
+int ptrace_check_attach(struct task_struct *child, int kill)
+{
+ struct utrace_engine *engine;
+ struct utrace_examiner exam;
+ int ret = -ESRCH;
+
+ engine = ptrace_lookup_engine(child);
+ if (IS_ERR(engine))
+ return ret;
+
+ if (child->parent != current)
+ goto out;
+
+ if (unlikely(kill))
+ ret = 0;
+
+ if (!task_is_stopped_or_traced(child))
+ goto out;
+ /*
+ * Make sure our engine has already stopped the child.
+ * Then wait for it to be off the CPU.
+ */
+ if (!utrace_control(child, engine, UTRACE_STOP) &&
+ !utrace_prepare_examine(child, engine, &exam))
+ ret = 0;
+out:
+ utrace_engine_put(engine);
+ return ret;
+}
+
+int ptrace_attach(struct task_struct *task)
+{
+ int retval;
+
+ audit_ptrace(task);
+
+ retval = -EPERM;
+ if (unlikely(task->flags & PF_KTHREAD))
+ goto out;
+ if (same_thread_group(task, current))
+ goto out;
+
+ /*
+ * Protect exec's credential calculations against our interference;
+ * interference; SUID, SGID and LSM creds get determined differently
+ * under ptrace.
+ */
+ retval = -ERESTARTNOINTR;
+ if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
+ goto out;
+
+ task_lock(task);
+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
+ task_unlock(task);
+ if (retval)
+ goto unlock_creds;
+
+ retval = ptrace_attach_task(task, 0);
+ if (unlikely(retval))
+ goto unlock_creds;
+
+ write_lock_irq(&tasklist_lock);
+ retval = -EPERM;
+ if (unlikely(task->exit_state))
+ goto unlock_tasklist;
+
+ BUG_ON(task->ptrace);
+ task->ptrace = PT_UTRACED;
+ if (capable(CAP_SYS_PTRACE))
+ task->ptrace |= PT_PTRACE_CAP;
+
+ __ptrace_link(task, current);
+ send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
+
+ retval = 0;
+unlock_tasklist:
+ write_unlock_irq(&tasklist_lock);
+unlock_creds:
+ mutex_unlock(&task->signal->cred_guard_mutex);
+out:
+ return retval;
+}
+
+/*
+ * Performs checks and sets PT_UTRACED.
+ * Should be used by all ptrace implementations for PTRACE_TRACEME.
+ */
+int ptrace_traceme(void)
+{
+ bool detach = true;
+ int ret = ptrace_attach_task(current, 0);
+
+ if (unlikely(ret))
+ return ret;
+
+ ret = -EPERM;
+ write_lock_irq(&tasklist_lock);
+ BUG_ON(current->ptrace);
+ ret = security_ptrace_traceme(current->parent);
+ /*
+ * Check PF_EXITING to ensure ->real_parent has not passed
+ * exit_ptrace(). Otherwise we don't report the error but
+ * pretend ->real_parent untraces us right after return.
+ */
+ if (!ret && !(current->real_parent->flags & PF_EXITING)) {
+ current->ptrace = PT_UTRACED;
+ __ptrace_link(current, current->real_parent);
+ detach = false;
+ }
+ write_unlock_irq(&tasklist_lock);
+
+ if (detach)
+ ptrace_abort_attach(current);
+ return ret;
+}
+
+static void ptrace_do_detach(struct task_struct *tracee, unsigned int data)
+{
+ bool detach, release;
+
+ write_lock_irq(&tasklist_lock);
+ /*
+ * This tracee can be already killed. Make sure de_thread() or
+ * our sub-thread doing do_wait() didn't do release_task() yet.
+ */
+ detach = tracee->ptrace != 0;
+ release = false;
+ if (likely(detach))
+ release = __ptrace_detach(current, tracee);
+ write_unlock_irq(&tasklist_lock);
+
+ if (unlikely(release))
+ release_task(tracee);
+ else if (likely(detach))
+ ptrace_detach_task(tracee, data);
+}
+
+int ptrace_detach(struct task_struct *child, unsigned int data)
+{
+ if (!valid_signal(data))
+ return -EIO;
+
+ ptrace_do_detach(child, data);
+
+ return 0;
+}
+
+/*
+ * Detach all tasks we were using ptrace on. Called with tasklist held
+ * for writing, and returns with it held too. But note it can release
+ * and reacquire the lock.
+ */
+void exit_ptrace(struct task_struct *tracer)
+{
+ bool locked = true;
+
+ for (;;) {
+ struct task_struct *tracee = NULL;
+
+ if (!locked)
+ read_lock(&tasklist_lock);
+ if (!list_empty(&tracer->ptraced)) {
+ tracee = list_first_entry(&tracer->ptraced,
+ struct task_struct, ptrace_entry);
+ get_task_struct(tracee);
+ }
+ if (!locked)
+ read_unlock(&tasklist_lock);
+ if (!tracee)
+ break;
+
+ if (locked) {
+ write_unlock_irq(&tasklist_lock);
+ locked = false;
+ }
+ ptrace_do_detach(tracee, -1);
+ put_task_struct(tracee);
+ }
+
+ if (!locked)
+ write_lock_irq(&tasklist_lock);
+}
+
+static int ptrace_set_options(struct task_struct *tracee,
+ struct utrace_engine *engine, long data)
+{
+ BUILD_BUG_ON(PTRACE_O_MASK & (PTRACE_O_SYSEMU | PTRACE_O_DETACHED));
+
+ ptrace_set_events(tracee, engine, data & PTRACE_O_MASK);
+ return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
+}
+
+static int ptrace_rw_siginfo(struct task_struct *tracee,
+ struct ptrace_context *ctx,
+ siginfo_t *info, bool write)
+{
+ unsigned long flags;
+ int err;
+
+ switch (get_stop_event(ctx)) {
+ case 0: /* jctl stop */
+ return -EINVAL;
+
+ case PTRACE_EVENT_SIGNAL:
+ err = -ESRCH;
+ if (lock_task_sighand(tracee, &flags)) {
+ if (likely(task_is_traced(tracee))) {
+ if (write)
+ *ctx->siginfo = *info;
+ else
+ *info = *ctx->siginfo;
+ err = 0;
+ }
+ unlock_task_sighand(tracee, &flags);
+ }
+
+ return err;
+
+ default:
+ if (!write) {
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+ info->si_code = ctx->stop_code & PTRACE_EVENT_MASK;
+ info->si_pid = task_pid_vnr(tracee);
+ info->si_uid = task_uid(tracee);
+ }
+
+ return 0;
+ }
+}
+
+static void do_ptrace_notify_stop(struct ptrace_context *ctx,
+ struct task_struct *tracee)
+{
+ /*
+ * This can race with SIGKILL, but we borrow this race from
+ * the old ptrace implementation. ->exit_code is only needed
+ * for wait_task_stopped()->task_stopped_code(), we should
+ * change it to use ptrace_context.
+ */
+ tracee->exit_code = ctx->stop_code & PTRACE_EVENT_MASK;
+ WARN_ON(!tracee->exit_code);
+
+ read_lock(&tasklist_lock);
+ /*
+ * Don't want to allow preemption here, because
+ * sys_ptrace() needs this task to be inactive.
+ */
+ preempt_disable();
+ /*
+ * It can be killed and then released by our subthread,
+ * or ptrace_attach() has not completed yet.
+ */
+ if (task_ptrace(tracee))
+ do_notify_parent_cldstop(tracee, CLD_TRAPPED);
+ read_unlock(&tasklist_lock);
+ preempt_enable_no_resched();
+}
+
+void ptrace_notify_stop(struct task_struct *tracee)
+{
+ struct utrace_engine *engine = ptrace_lookup_engine(tracee);
+
+ if (IS_ERR(engine))
+ return;
+
+ do_ptrace_notify_stop(ptrace_context(engine), tracee);
+ utrace_engine_put(engine);
+}
+
+static int ptrace_resume_action(struct task_struct *tracee,
+ struct utrace_engine *engine, long request)
+{
+ struct ptrace_context *ctx = ptrace_context(engine);
+ unsigned long events;
+ int action;
+
+ ctx->options &= ~PTRACE_O_SYSEMU;
+ events = engine->flags & ~UTRACE_EVENT_SYSCALL;
+ action = UTRACE_RESUME;
+
+ switch (request) {
+#ifdef PTRACE_SINGLEBLOCK
+ case PTRACE_SINGLEBLOCK:
+ if (unlikely(!arch_has_block_step()))
+ return -EIO;
+ action = UTRACE_BLOCKSTEP;
+ events |= UTRACE_EVENT(SYSCALL_EXIT);
+ break;
+#endif
+
+#ifdef PTRACE_SINGLESTEP
+ case PTRACE_SINGLESTEP:
+ if (unlikely(!arch_has_single_step()))
+ return -EIO;
+ action = UTRACE_SINGLESTEP;
+ events |= UTRACE_EVENT(SYSCALL_EXIT);
+ break;
+#endif
+
+#ifdef PTRACE_SYSEMU
+ case PTRACE_SYSEMU_SINGLESTEP:
+ if (unlikely(!arch_has_single_step()))
+ return -EIO;
+ action = UTRACE_SINGLESTEP;
+ case PTRACE_SYSEMU:
+ ctx->options |= PTRACE_O_SYSEMU;
+ events |= UTRACE_EVENT(SYSCALL_ENTRY);
+ break;
+#endif
+
+ case PTRACE_SYSCALL:
+ events |= UTRACE_EVENT_SYSCALL;
+ break;
+
+ case PTRACE_CONT:
+ break;
+ default:
+ return -EIO;
+ }
+
+ if (events != engine->flags &&
+ utrace_set_events(tracee, engine, events))
+ return -ESRCH;
+
+ return action;
+}
+
+static int ptrace_resume(struct task_struct *tracee,
+ struct utrace_engine *engine,
+ long request, long data)
+{
+ struct ptrace_context *ctx = ptrace_context(engine);
+ int action;
+
+ if (!valid_signal(data))
+ return -EIO;
+
+ action = ptrace_resume_action(tracee, engine, request);
+ if (action < 0)
+ return action;
+
+ switch (get_stop_event(ctx)) {
+ case PTRACE_EVENT_VFORK:
+ if (ctx->options & PTRACE_O_TRACEVFORKDONE) {
+ set_stop_code(ctx, PTRACE_EVENT_VFORK_DONE);
+ action = UTRACE_REPORT;
+ }
+ break;
+
+ case PTRACE_EVENT_EXEC:
+ case PTRACE_EVENT_FORK:
+ case PTRACE_EVENT_CLONE:
+ case PTRACE_EVENT_VFORK_DONE:
+ if (request == PTRACE_SYSCALL) {
+ set_syscall_code(ctx);
+ do_ptrace_notify_stop(ctx, tracee);
+ return 0;
+ }
+
+ if (action != UTRACE_RESUME) {
+ /*
+ * single-stepping. UTRACE_SIGNAL_REPORT will
+ * synthesize a trap to follow the syscall insn.
+ */
+ ctx->signr = SIGTRAP;
+ action = UTRACE_INTERRUPT;
+ }
+ break;
+
+ case PTRACE_EVENT_SYSCALL:
+ if (data)
+ send_sig_info(data, SEND_SIG_PRIV, tracee);
+ break;
+
+ case PTRACE_EVENT_SIGNAL:
+ ctx->signr = data;
+ break;
+ }
+
+ ctx->resume = action;
+ ptrace_wake_up(tracee, engine, action, true);
+ return 0;
+}
+
+extern int ptrace_regset(struct task_struct *task, int req, unsigned int type,
+ struct iovec *kiov);
+
+int ptrace_request(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ struct utrace_engine *engine = ptrace_lookup_engine(child);
+ siginfo_t siginfo;
+ int ret;
+
+ if (unlikely(IS_ERR(engine)))
+ return -ESRCH;
+
+ switch (request) {
+ case PTRACE_PEEKTEXT:
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
+ break;
+ case PTRACE_POKETEXT:
+ case PTRACE_POKEDATA:
+ ret = generic_ptrace_pokedata(child, addr, data);
+ break;
+
+#ifdef PTRACE_OLDSETOPTIONS
+ case PTRACE_OLDSETOPTIONS:
+#endif
+ case PTRACE_SETOPTIONS:
+ ret = ptrace_set_options(child, engine, data);
+ break;
+ case PTRACE_GETEVENTMSG:
+ ret = put_user(ptrace_context(engine)->eventmsg,
+ (unsigned long __user *) data);
+ break;
+
+ case PTRACE_GETSIGINFO:
+ ret = ptrace_rw_siginfo(child, ptrace_context(engine),
+ &siginfo, false);
+ if (!ret)
+ ret = copy_siginfo_to_user((siginfo_t __user *) data,
+ &siginfo);
+ break;
+
+ case PTRACE_SETSIGINFO:
+ if (copy_from_user(&siginfo, (siginfo_t __user *) data,
+ sizeof siginfo))
+ ret = -EFAULT;
+ else
+ ret = ptrace_rw_siginfo(child, ptrace_context(engine),
+ &siginfo, true);
+ break;
+
+ case PTRACE_DETACH: /* detach a process that was attached. */
+ ret = ptrace_detach(child, data);
+ break;
+
+ case PTRACE_KILL:
+ /* Ugly historical behaviour. */
+ if (task_is_traced(child))
+ ptrace_resume(child, engine, PTRACE_CONT, SIGKILL);
+ ret = 0;
+ break;
+
+ case PTRACE_GETREGSET:
+ case PTRACE_SETREGSET:
+ {
+ struct iovec kiov;
+ struct iovec __user *uiov = (struct iovec __user *) data;
+
+ if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
+ return -EFAULT;
+
+ if (__get_user(kiov.iov_base, &uiov->iov_base) ||
+ __get_user(kiov.iov_len, &uiov->iov_len))
+ return -EFAULT;
+
+ ret = ptrace_regset(child, request, addr, &kiov);
+ if (!ret)
+ ret = __put_user(kiov.iov_len, &uiov->iov_len);
+ break;
+ }
+
+ default:
+ ret = ptrace_resume(child, engine, request, data);
+ break;
+ }
+
+ utrace_engine_put(engine);
+ return ret;
+}
+
+#if defined CONFIG_COMPAT
+#include <linux/compat.h>
+
+int compat_ptrace_request(struct task_struct *child, compat_long_t request,
+ compat_ulong_t addr, compat_ulong_t data)
+{
+ struct utrace_engine *engine = ptrace_lookup_engine(child);
+ compat_ulong_t __user *datap = compat_ptr(data);
+ compat_ulong_t word;
+ siginfo_t siginfo;
+ int ret;
+
+ if (unlikely(IS_ERR(engine)))
+ return -ESRCH;
+
+ switch (request) {
+ case PTRACE_PEEKTEXT:
+ case PTRACE_PEEKDATA:
+ ret = access_process_vm(child, addr, &word, sizeof(word), 0);
+ if (ret != sizeof(word))
+ ret = -EIO;
+ else
+ ret = put_user(word, datap);
+ break;
+
+ case PTRACE_POKETEXT:
+ case PTRACE_POKEDATA:
+ ret = access_process_vm(child, addr, &data, sizeof(data), 1);
+ ret = (ret != sizeof(data) ? -EIO : 0);
+ break;
+
+ case PTRACE_GETEVENTMSG:
+ ret = put_user((compat_ulong_t)ptrace_context(engine)->eventmsg,
+ datap);
+ break;
+
+ case PTRACE_GETSIGINFO:
+ ret = ptrace_rw_siginfo(child, ptrace_context(engine),
+ &siginfo, false);
+ if (!ret)
+ ret = copy_siginfo_to_user32(
+ (struct compat_siginfo __user *) datap,
+ &siginfo);
+ break;
+
+ case PTRACE_SETSIGINFO:
+ memset(&siginfo, 0, sizeof siginfo);
+ if (copy_siginfo_from_user32(
+ &siginfo, (struct compat_siginfo __user *) datap))
+ ret = -EFAULT;
+ else
+ ret = ptrace_rw_siginfo(child, ptrace_context(engine),
+ &siginfo, true);
+ break;
+
+ case PTRACE_GETREGSET:
+ case PTRACE_SETREGSET:
+ {
+ struct iovec kiov;
+ struct compat_iovec __user *uiov =
+ (struct compat_iovec __user *) datap;
+ compat_uptr_t ptr;
+ compat_size_t len;
+
+ if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
+ return -EFAULT;
+
+ if (__get_user(ptr, &uiov->iov_base) ||
+ __get_user(len, &uiov->iov_len))
+ return -EFAULT;
+
+ kiov.iov_base = compat_ptr(ptr);
+ kiov.iov_len = len;
+
+ ret = ptrace_regset(child, request, addr, &kiov);
+ if (!ret)
+ ret = __put_user(kiov.iov_len, &uiov->iov_len);
+ break;
+ }
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ }
+
+ utrace_engine_put(engine);
+ return ret;
+}
+#endif /* CONFIG_COMPAT */
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index e275608..72ea65c 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -15,7 +15,6 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/ptrace.h>
-#include <linux/utrace.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/audit.h>
@@ -24,7 +23,320 @@
#include <linux/uaccess.h>
#include <linux/regset.h>
+int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+{
+ const struct cred *cred = current_cred(), *tcred;
+
+ /* May we inspect the given task?
+ * This check is used both for attaching with ptrace
+ * and for allowing access to sensitive information in /proc.
+ *
+ * ptrace_attach denies several cases that /proc allows
+ * because setting up the necessary parent/child relationship
+ * or halting the specified task is impossible.
+ */
+ int dumpable = 0;
+ /* Don't let security modules deny introspection */
+ if (task == current)
+ return 0;
+ rcu_read_lock();
+ tcred = __task_cred(task);
+ if ((cred->uid != tcred->euid ||
+ cred->uid != tcred->suid ||
+ cred->uid != tcred->uid ||
+ cred->gid != tcred->egid ||
+ cred->gid != tcred->sgid ||
+ cred->gid != tcred->gid) &&
+ !capable(CAP_SYS_PTRACE)) {
+ rcu_read_unlock();
+ return -EPERM;
+ }
+ rcu_read_unlock();
+ smp_rmb();
+ if (task->mm)
+ dumpable = get_dumpable(task->mm);
+ if (!dumpable && !capable(CAP_SYS_PTRACE))
+ return -EPERM;
+
+ return security_ptrace_access_check(task, mode);
+}
+
+bool ptrace_may_access(struct task_struct *task, unsigned int mode)
+{
+ int err;
+ task_lock(task);
+ err = __ptrace_may_access(task, mode);
+ task_unlock(task);
+ return !err;
+}
+
+/*
+ * Called with irqs disabled, returns true if childs should reap themselves.
+ */
+static int ignoring_children(struct sighand_struct *sigh)
+{
+ int ret;
+ spin_lock(&sigh->siglock);
+ ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
+ (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
+ spin_unlock(&sigh->siglock);
+ return ret;
+}
+
+/*
+ * Called with tasklist_lock held for writing.
+ * Unlink a traced task, and clean it up if it was a traced zombie.
+ * Return true if it needs to be reaped with release_task().
+ * (We can't call release_task() here because we already hold tasklist_lock.)
+ *
+ * If it's a zombie, our attachedness prevented normal parent notification
+ * or self-reaping. Do notification now if it would have happened earlier.
+ * If it should reap itself, return true.
+ *
+ * If it's our own child, there is no notification to do. But if our normal
+ * children self-reap, then this child was prevented by ptrace and we must
+ * reap it now, in that case we must also wake up sub-threads sleeping in
+ * do_wait().
+ */
+bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
+{
+ __ptrace_unlink(p);
+
+ if (p->exit_state == EXIT_ZOMBIE) {
+ if (!task_detached(p) && thread_group_empty(p)) {
+ if (!same_thread_group(p->real_parent, tracer))
+ do_notify_parent(p, p->exit_signal);
+ else if (ignoring_children(tracer->sighand)) {
+ __wake_up_parent(p, tracer);
+ p->exit_signal = -1;
+ }
+ }
+ if (task_detached(p)) {
+ /* Mark it as in the process of being reaped. */
+ p->exit_state = EXIT_DEAD;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
+{
+ int copied = 0;
+
+ while (len > 0) {
+ char buf[128];
+ int this_len, retval;
+
+ this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
+ retval = access_process_vm(tsk, src, buf, this_len, 0);
+ if (!retval) {
+ if (copied)
+ break;
+ return -EIO;
+ }
+ if (copy_to_user(dst, buf, retval))
+ return -EFAULT;
+ copied += retval;
+ src += retval;
+ dst += retval;
+ len -= retval;
+ }
+ return copied;
+}
+
+int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
+{
+ int copied = 0;
+
+ while (len > 0) {
+ char buf[128];
+ int this_len, retval;
+
+ this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
+ if (copy_from_user(buf, src, this_len))
+ return -EFAULT;
+ retval = access_process_vm(tsk, dst, buf, this_len, 1);
+ if (!retval) {
+ if (copied)
+ break;
+ return -EIO;
+ }
+ copied += retval;
+ src += retval;
+ dst += retval;
+ len -= retval;
+ }
+ return copied;
+}
+
+#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
+
+static const struct user_regset *
+find_regset(const struct user_regset_view *view, unsigned int type)
+{
+ const struct user_regset *regset;
+ int n;
+
+ for (n = 0; n < view->n; ++n) {
+ regset = view->regsets + n;
+ if (regset->core_note_type == type)
+ return regset;
+ }
+
+ return NULL;
+}
+
+int ptrace_regset(struct task_struct *task, int req, unsigned int type,
+ struct iovec *kiov)
+{
+ const struct user_regset_view *view = task_user_regset_view(task);
+ const struct user_regset *regset = find_regset(view, type);
+ int regset_no;
+
+ if (!regset || (kiov->iov_len % regset->size) != 0)
+ return -EINVAL;
+
+ regset_no = regset - view->regsets;
+ kiov->iov_len = min(kiov->iov_len,
+ (__kernel_size_t) (regset->n * regset->size));
+
+ if (req == PTRACE_GETREGSET)
+ return copy_regset_to_user(task, view, regset_no, 0,
+ kiov->iov_len, kiov->iov_base);
+ else
+ return copy_regset_from_user(task, view, regset_no, 0,
+ kiov->iov_len, kiov->iov_base);
+}
+
+#endif
+
+static struct task_struct *ptrace_get_task_struct(pid_t pid)
+{
+ struct task_struct *child;
+
+ rcu_read_lock();
+ child = find_task_by_vpid(pid);
+ if (child)
+ get_task_struct(child);
+ rcu_read_unlock();
+
+ if (!child)
+ return ERR_PTR(-ESRCH);
+ return child;
+}
+
+#ifndef arch_ptrace_attach
+#define arch_ptrace_attach(child) do { } while (0)
+#endif
+
+SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+ unsigned long, data)
+{
+ struct task_struct *child;
+ long ret;
+
+ if (request == PTRACE_TRACEME) {
+ ret = ptrace_traceme();
+ if (!ret)
+ arch_ptrace_attach(current);
+ goto out;
+ }
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
+ goto out;
+ }
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ /*
+ * Some architectures need to do book-keeping after
+ * a ptrace attach.
+ */
+ if (!ret)
+ arch_ptrace_attach(child);
+ goto out_put_task_struct;
+ }
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret < 0)
+ goto out_put_task_struct;
+
+ ret = arch_ptrace(child, request, addr, data);
+
+ out_put_task_struct:
+ put_task_struct(child);
+ out:
+ return ret;
+}
+
+int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+ unsigned long data)
+{
+ unsigned long tmp;
+ int copied;
+
+ copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+ if (copied != sizeof(tmp))
+ return -EIO;
+ return put_user(tmp, (unsigned long __user *)data);
+}
+
+int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+ unsigned long data)
+{
+ int copied;
+
+ copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
+ return (copied == sizeof(data)) ? 0 : -EIO;
+}
+
+#if defined CONFIG_COMPAT
+#include <linux/compat.h>
+
+asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+ compat_long_t addr, compat_long_t data)
+{
+ struct task_struct *child;
+ long ret;
+
+ if (request == PTRACE_TRACEME) {
+ ret = ptrace_traceme();
+ goto out;
+ }
+
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
+ goto out;
+ }
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ /*
+ * Some architectures need to do book-keeping after
+ * a ptrace attach.
+ */
+ if (!ret)
+ arch_ptrace_attach(child);
+ goto out_put_task_struct;
+ }
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (!ret)
+ ret = compat_arch_ptrace(child, request, addr, data);
+
+ out_put_task_struct:
+ put_task_struct(child);
+ out:
+ return ret;
+}
+#endif /* CONFIG_COMPAT */
+
+#ifndef CONFIG_UTRACE
/*
* ptrace a task: make the debugger its new parent and
* move it to the ptrace list.
@@ -117,61 +429,6 @@ int ptrace_check_attach(struct task_stru
return ret;
}
-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
-{
- const struct cred *cred = current_cred(), *tcred;
-
- /* May we inspect the given task?
- * This check is used both for attaching with ptrace
- * and for allowing access to sensitive information in /proc.
- *
- * ptrace_attach denies several cases that /proc allows
- * because setting up the necessary parent/child relationship
- * or halting the specified task is impossible.
- */
- int dumpable = 0;
- /* Don't let security modules deny introspection */
- if (task == current)
- return 0;
- rcu_read_lock();
- tcred = __task_cred(task);
- if ((cred->uid != tcred->euid ||
- cred->uid != tcred->suid ||
- cred->uid != tcred->uid ||
- cred->gid != tcred->egid ||
- cred->gid != tcred->sgid ||
- cred->gid != tcred->gid) &&
- !capable(CAP_SYS_PTRACE)) {
- rcu_read_unlock();
- return -EPERM;
- }
- rcu_read_unlock();
- smp_rmb();
- if (task->mm)
- dumpable = get_dumpable(task->mm);
- if (!dumpable && !capable(CAP_SYS_PTRACE))
- return -EPERM;
-
- return security_ptrace_access_check(task, mode);
-}
-
-bool ptrace_may_access(struct task_struct *task, unsigned int mode)
-{
- int err;
- task_lock(task);
- err = __ptrace_may_access(task, mode);
- task_unlock(task);
- return !err;
-}
-
-/*
- * For experimental use of utrace, exclude ptrace on the same task.
- */
-static inline bool exclude_ptrace(struct task_struct *task)
-{
- return unlikely(!!task_utrace_flags(task));
-}
-
int ptrace_attach(struct task_struct *task)
{
int retval;
@@ -195,8 +452,6 @@ int ptrace_attach(struct task_struct *ta
task_lock(task);
retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
- if (!retval && exclude_ptrace(task))
- retval = -EBUSY;
task_unlock(task);
if (retval)
goto unlock_creds;
@@ -220,91 +475,37 @@ unlock_tasklist:
write_unlock_irq(&tasklist_lock);
unlock_creds:
mutex_unlock(&task->signal->cred_guard_mutex);
-out:
- return retval;
-}
-
-/**
- * ptrace_traceme -- helper for PTRACE_TRACEME
- *
- * Performs checks and sets PT_PTRACED.
- * Should be used by all ptrace implementations for PTRACE_TRACEME.
- */
-int ptrace_traceme(void)
-{
- int ret = -EPERM;
-
- if (exclude_ptrace(current)) /* XXX locking */
- return -EBUSY;
-
- write_lock_irq(&tasklist_lock);
- /* Are we already being traced? */
- if (!current->ptrace) {
- ret = security_ptrace_traceme(current->parent);
- /*
- * Check PF_EXITING to ensure ->real_parent has not passed
- * exit_ptrace(). Otherwise we don't report the error but
- * pretend ->real_parent untraces us right after return.
- */
- if (!ret && !(current->real_parent->flags & PF_EXITING)) {
- current->ptrace = PT_PTRACED;
- __ptrace_link(current, current->real_parent);
- }
- }
- write_unlock_irq(&tasklist_lock);
-
- return ret;
-}
-
-/*
- * Called with irqs disabled, returns true if childs should reap themselves.
- */
-static int ignoring_children(struct sighand_struct *sigh)
-{
- int ret;
- spin_lock(&sigh->siglock);
- ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
- (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
- spin_unlock(&sigh->siglock);
- return ret;
-}
-
-/*
- * Called with tasklist_lock held for writing.
- * Unlink a traced task, and clean it up if it was a traced zombie.
- * Return true if it needs to be reaped with release_task().
- * (We can't call release_task() here because we already hold tasklist_lock.)
- *
- * If it's a zombie, our attachedness prevented normal parent notification
- * or self-reaping. Do notification now if it would have happened earlier.
- * If it should reap itself, return true.
+out:
+ return retval;
+}
+
+/**
+ * ptrace_traceme -- helper for PTRACE_TRACEME
*
- * If it's our own child, there is no notification to do. But if our normal
- * children self-reap, then this child was prevented by ptrace and we must
- * reap it now, in that case we must also wake up sub-threads sleeping in
- * do_wait().
+ * Performs checks and sets PT_PTRACED.
+ * Should be used by all ptrace implementations for PTRACE_TRACEME.
*/
-bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
+int ptrace_traceme(void)
{
- __ptrace_unlink(p);
+ int ret = -EPERM;
- if (p->exit_state == EXIT_ZOMBIE) {
- if (!task_detached(p) && thread_group_empty(p)) {
- if (!same_thread_group(p->real_parent, tracer))
- do_notify_parent(p, p->exit_signal);
- else if (ignoring_children(tracer->sighand)) {
- __wake_up_parent(p, tracer);
- p->exit_signal = -1;
- }
- }
- if (task_detached(p)) {
- /* Mark it as in the process of being reaped. */
- p->exit_state = EXIT_DEAD;
- return true;
+ write_lock_irq(&tasklist_lock);
+ /* Are we already being traced? */
+ if (!current->ptrace) {
+ ret = security_ptrace_traceme(current->parent);
+ /*
+ * Check PF_EXITING to ensure ->real_parent has not passed
+ * exit_ptrace(). Otherwise we don't report the error but
+ * pretend ->real_parent untraces us right after return.
+ */
+ if (!ret && !(current->real_parent->flags & PF_EXITING)) {
+ current->ptrace = PT_PTRACED;
+ __ptrace_link(current, current->real_parent);
}
}
+ write_unlock_irq(&tasklist_lock);
- return false;
+ return ret;
}
int ptrace_detach(struct task_struct *child, unsigned int data)
@@ -368,57 +569,7 @@ void exit_ptrace(struct task_struct *tra
write_lock_irq(&tasklist_lock);
}
-int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
-{
- int copied = 0;
-
- while (len > 0) {
- char buf[128];
- int this_len, retval;
-
- this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
- retval = access_process_vm(tsk, src, buf, this_len, 0);
- if (!retval) {
- if (copied)
- break;
- return -EIO;
- }
- if (copy_to_user(dst, buf, retval))
- return -EFAULT;
- copied += retval;
- src += retval;
- dst += retval;
- len -= retval;
- }
- return copied;
-}
-
-int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
-{
- int copied = 0;
-
- while (len > 0) {
- char buf[128];
- int this_len, retval;
-
- this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
- if (copy_from_user(buf, src, this_len))
- return -EFAULT;
- retval = access_process_vm(tsk, dst, buf, this_len, 1);
- if (!retval) {
- if (copied)
- break;
- return -EIO;
- }
- copied += retval;
- src += retval;
- dst += retval;
- len -= retval;
- }
- return copied;
-}
-
-static int ptrace_setoptions(struct task_struct *child, unsigned long data)
+static int ptrace_setoptions(struct task_struct *child, long data)
{
child->ptrace &= ~PT_TRACE_MASK;
@@ -533,47 +683,6 @@ static int ptrace_resume(struct task_str
return 0;
}
-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
-
-static const struct user_regset *
-find_regset(const struct user_regset_view *view, unsigned int type)
-{
- const struct user_regset *regset;
- int n;
-
- for (n = 0; n < view->n; ++n) {
- regset = view->regsets + n;
- if (regset->core_note_type == type)
- return regset;
- }
-
- return NULL;
-}
-
-static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
- struct iovec *kiov)
-{
- const struct user_regset_view *view = task_user_regset_view(task);
- const struct user_regset *regset = find_regset(view, type);
- int regset_no;
-
- if (!regset || (kiov->iov_len % regset->size) != 0)
- return -EINVAL;
-
- regset_no = regset - view->regsets;
- kiov->iov_len = min(kiov->iov_len,
- (__kernel_size_t) (regset->n * regset->size));
-
- if (req == PTRACE_GETREGSET)
- return copy_regset_to_user(task, view, regset_no, 0,
- kiov->iov_len, kiov->iov_base);
- else
- return copy_regset_from_user(task, view, regset_no, 0,
- kiov->iov_len, kiov->iov_base);
-}
-
-#endif
-
int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
@@ -689,91 +798,7 @@ int ptrace_request(struct task_struct *c
return ret;
}
-static struct task_struct *ptrace_get_task_struct(pid_t pid)
-{
- struct task_struct *child;
-
- rcu_read_lock();
- child = find_task_by_vpid(pid);
- if (child)
- get_task_struct(child);
- rcu_read_unlock();
-
- if (!child)
- return ERR_PTR(-ESRCH);
- return child;
-}
-
-#ifndef arch_ptrace_attach
-#define arch_ptrace_attach(child) do { } while (0)
-#endif
-
-SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
- unsigned long, data)
-{
- struct task_struct *child;
- long ret;
-
- if (request == PTRACE_TRACEME) {
- ret = ptrace_traceme();
- if (!ret)
- arch_ptrace_attach(current);
- goto out;
- }
-
- child = ptrace_get_task_struct(pid);
- if (IS_ERR(child)) {
- ret = PTR_ERR(child);
- goto out;
- }
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- /*
- * Some architectures need to do book-keeping after
- * a ptrace attach.
- */
- if (!ret)
- arch_ptrace_attach(child);
- goto out_put_task_struct;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_put_task_struct;
-
- ret = arch_ptrace(child, request, addr, data);
-
- out_put_task_struct:
- put_task_struct(child);
- out:
- return ret;
-}
-
-int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
- unsigned long data)
-{
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
- if (copied != sizeof(tmp))
- return -EIO;
- return put_user(tmp, (unsigned long __user *)data);
-}
-
-int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
- unsigned long data)
-{
- int copied;
-
- copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
- return (copied == sizeof(data)) ? 0 : -EIO;
-}
-
#if defined CONFIG_COMPAT
-#include <linux/compat.h>
-
int compat_ptrace_request(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data)
{
@@ -851,42 +876,5 @@ int compat_ptrace_request(struct task_st
return ret;
}
-
-asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
- compat_long_t addr, compat_long_t data)
-{
- struct task_struct *child;
- long ret;
-
- if (request == PTRACE_TRACEME) {
- ret = ptrace_traceme();
- goto out;
- }
-
- child = ptrace_get_task_struct(pid);
- if (IS_ERR(child)) {
- ret = PTR_ERR(child);
- goto out;
- }
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- /*
- * Some architectures need to do book-keeping after
- * a ptrace attach.
- */
- if (!ret)
- arch_ptrace_attach(child);
- goto out_put_task_struct;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (!ret)
- ret = compat_arch_ptrace(child, request, addr, data);
-
- out_put_task_struct:
- put_task_struct(child);
- out:
- return ret;
-}
#endif /* CONFIG_COMPAT */
+#endif /* CONFIG_UTRACE */
diff --git a/kernel/utrace.c b/kernel/utrace.c
index 26d6faf..37dce16 100644
--- a/kernel/utrace.c
+++ b/kernel/utrace.c
@@ -816,6 +816,22 @@ relock:
spin_unlock_irq(&task->sighand->siglock);
spin_unlock(&utrace->lock);
+ /*
+ * If ptrace is among the reasons for this stop, do its
+ * notification now. This could not just be done in
+ * ptrace's own event report callbacks because it has to
+ * be done after we are in TASK_TRACED. This makes the
+ * synchronization with ptrace_do_wait() work right.
+ *
+ * It's only because of the bad old overloading of the do_wait()
+ * logic for handling ptrace stops that we need this special case
+ * here. One day we will clean up ptrace so it does not need to
+ * work this way. New things that are designed sensibly don't need
+ * a wakeup that synchronizes with tasklist_lock and ->state, so
+ * the proper utrace API does not try to support this weirdness.
+ */
+ ptrace_notify_stop(task);
+
schedule();
utrace_finish_stop();