2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/file.h>
|
2008-05-04 15:12:55 +00:00
|
|
|
#include <linux/fdtable.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/mount.h>
|
2007-05-08 07:26:04 +00:00
|
|
|
#include <linux/ptrace.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include "internal.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Logic: we've got two memory sums for each process, "shared", and
|
|
|
|
* "non-shared". Shared memory may get counted more then once, for
|
|
|
|
* each process that owns it. Non-shared memory is counted
|
|
|
|
* accurately.
|
|
|
|
*/
|
2008-02-08 12:18:33 +00:00
|
|
|
void task_mem(struct seq_file *m, struct mm_struct *mm)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct vm_list_struct *vml;
|
|
|
|
unsigned long bytes = 0, sbytes = 0, slack = 0;
|
|
|
|
|
|
|
|
down_read(&mm->mmap_sem);
|
|
|
|
for (vml = mm->context.vmlist; vml; vml = vml->next) {
|
|
|
|
if (!vml->vma)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bytes += kobjsize(vml);
|
|
|
|
if (atomic_read(&mm->mm_count) > 1 ||
|
|
|
|
atomic_read(&vml->vma->vm_usage) > 1
|
|
|
|
) {
|
|
|
|
sbytes += kobjsize((void *) vml->vma->vm_start);
|
|
|
|
sbytes += kobjsize(vml->vma);
|
|
|
|
} else {
|
|
|
|
bytes += kobjsize((void *) vml->vma->vm_start);
|
|
|
|
bytes += kobjsize(vml->vma);
|
|
|
|
slack += kobjsize((void *) vml->vma->vm_start) -
|
|
|
|
(vml->vma->vm_end - vml->vma->vm_start);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (atomic_read(&mm->mm_count) > 1)
|
|
|
|
sbytes += kobjsize(mm);
|
|
|
|
else
|
|
|
|
bytes += kobjsize(mm);
|
|
|
|
|
|
|
|
if (current->fs && atomic_read(¤t->fs->count) > 1)
|
|
|
|
sbytes += kobjsize(current->fs);
|
|
|
|
else
|
|
|
|
bytes += kobjsize(current->fs);
|
|
|
|
|
|
|
|
if (current->files && atomic_read(¤t->files->count) > 1)
|
|
|
|
sbytes += kobjsize(current->files);
|
|
|
|
else
|
|
|
|
bytes += kobjsize(current->files);
|
|
|
|
|
|
|
|
if (current->sighand && atomic_read(¤t->sighand->count) > 1)
|
|
|
|
sbytes += kobjsize(current->sighand);
|
|
|
|
else
|
|
|
|
bytes += kobjsize(current->sighand);
|
|
|
|
|
|
|
|
bytes += kobjsize(current); /* includes kernel stack */
|
|
|
|
|
2008-02-08 12:18:33 +00:00
|
|
|
seq_printf(m,
|
2005-04-16 22:20:36 +00:00
|
|
|
"Mem:\t%8lu bytes\n"
|
|
|
|
"Slack:\t%8lu bytes\n"
|
|
|
|
"Shared:\t%8lu bytes\n",
|
|
|
|
bytes, slack, sbytes);
|
|
|
|
|
|
|
|
up_read(&mm->mmap_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long task_vsize(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
struct vm_list_struct *tbp;
|
|
|
|
unsigned long vsize = 0;
|
|
|
|
|
|
|
|
down_read(&mm->mmap_sem);
|
|
|
|
for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
|
|
|
|
if (tbp->vma)
|
|
|
|
vsize += kobjsize((void *) tbp->vma->vm_start);
|
|
|
|
}
|
|
|
|
up_read(&mm->mmap_sem);
|
|
|
|
return vsize;
|
|
|
|
}
|
|
|
|
|
|
|
|
int task_statm(struct mm_struct *mm, int *shared, int *text,
|
|
|
|
int *data, int *resident)
|
|
|
|
{
|
|
|
|
struct vm_list_struct *tbp;
|
|
|
|
int size = kobjsize(mm);
|
|
|
|
|
|
|
|
down_read(&mm->mmap_sem);
|
|
|
|
for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
|
|
|
|
size += kobjsize(tbp);
|
|
|
|
if (tbp->vma) {
|
|
|
|
size += kobjsize(tbp->vma);
|
|
|
|
size += kobjsize((void *) tbp->vma->vm_start);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
size += (*text = mm->end_code - mm->start_code);
|
|
|
|
size += (*data = mm->start_stack - mm->start_data);
|
|
|
|
up_read(&mm->mmap_sem);
|
|
|
|
*resident = size;
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-09-27 08:50:19 +00:00
|
|
|
* display mapping lines for a particular process's /proc/pid/maps
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2006-09-27 08:50:19 +00:00
|
|
|
static int show_map(struct seq_file *m, void *_vml)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-09-27 08:50:19 +00:00
|
|
|
struct vm_list_struct *vml = _vml;
|
2007-05-08 07:26:04 +00:00
|
|
|
struct proc_maps_private *priv = m->private;
|
|
|
|
struct task_struct *task = priv->task;
|
|
|
|
|
Security: split proc ptrace checking into read vs. attach
Enable security modules to distinguish reading of process state via
proc from full ptrace access by renaming ptrace_may_attach to
ptrace_may_access and adding a mode argument indicating whether only
read access or full attach access is requested. This allows security
modules to permit access to reading process state without granting
full ptrace access. The base DAC/capability checking remains unchanged.
Read access to /proc/pid/mem continues to apply a full ptrace attach
check since check_mem_permission() already requires the current task
to already be ptracing the target. The other ptrace checks within
proc for elements like environ, maps, and fds are changed to pass the
read mode instead of attach.
In the SELinux case, we model such reading of process state as a
reading of a proc file labeled with the target process' label. This
enables SELinux policy to permit such reading of process state without
permitting control or manipulation of the target process, as there are
a number of cases where programs probe for such information via proc
but do not need to be able to control the target (e.g. procps,
lsof, PolicyKit, ConsoleKit). At present we have to choose between
allowing full ptrace in policy (more permissive than required/desired)
or breaking functionality (or in some cases just silencing the denials
via dontaudit rules but this can hide genuine attacks).
This version of the patch incorporates comments from Casey Schaufler
(change/replace existing ptrace_may_attach interface, pass access
mode), and Chris Wright (provide greater consistency in the checking).
Note that like their predecessors __ptrace_may_attach and
ptrace_may_attach, the __ptrace_may_access and ptrace_may_access
interfaces use different return value conventions from each other (0
or -errno vs. 1 or 0). I retained this difference to avoid any
changes to the caller logic but made the difference clearer by
changing the latter interface to return a bool rather than an int and
by adding a comment about it to ptrace.h for any future callers.
Signed-off-by: Stephen Smalley <sds@tycho.nsa.gov>
Acked-by: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: James Morris <jmorris@namei.org>
2008-05-19 12:32:49 +00:00
|
|
|
if (maps_protect && !ptrace_may_access(task, PTRACE_MODE_READ))
|
2007-05-08 07:26:04 +00:00
|
|
|
return -EACCES;
|
|
|
|
|
2006-09-27 08:50:19 +00:00
|
|
|
return nommu_vma_show(m, vml->vma);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-09-27 08:50:19 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static void *m_start(struct seq_file *m, loff_t *pos)
|
|
|
|
{
|
2006-09-27 08:50:19 +00:00
|
|
|
struct proc_maps_private *priv = m->private;
|
|
|
|
struct vm_list_struct *vml;
|
|
|
|
struct mm_struct *mm;
|
|
|
|
loff_t n = *pos;
|
|
|
|
|
|
|
|
/* pin the task and mm whilst we play with them */
|
|
|
|
priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
|
|
|
|
if (!priv->task)
|
|
|
|
return NULL;
|
|
|
|
|
2008-01-02 14:09:57 +00:00
|
|
|
mm = mm_for_maps(priv->task);
|
2006-09-27 08:50:19 +00:00
|
|
|
if (!mm) {
|
|
|
|
put_task_struct(priv->task);
|
|
|
|
priv->task = NULL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* start from the Nth VMA */
|
|
|
|
for (vml = mm->context.vmlist; vml; vml = vml->next)
|
|
|
|
if (n-- == 0)
|
|
|
|
return vml;
|
2005-04-16 22:20:36 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2006-09-27 08:50:19 +00:00
|
|
|
|
|
|
|
static void m_stop(struct seq_file *m, void *_vml)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-09-27 08:50:19 +00:00
|
|
|
struct proc_maps_private *priv = m->private;
|
|
|
|
|
|
|
|
if (priv->task) {
|
|
|
|
struct mm_struct *mm = priv->task->mm;
|
|
|
|
up_read(&mm->mmap_sem);
|
|
|
|
mmput(mm);
|
|
|
|
put_task_struct(priv->task);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-09-27 08:50:19 +00:00
|
|
|
|
|
|
|
static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-09-27 08:50:19 +00:00
|
|
|
struct vm_list_struct *vml = _vml;
|
|
|
|
|
|
|
|
(*pos)++;
|
|
|
|
return vml ? vml->next : NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-09-27 08:50:19 +00:00
|
|
|
|
2008-02-08 12:21:19 +00:00
|
|
|
static const struct seq_operations proc_pid_maps_ops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.start = m_start,
|
|
|
|
.next = m_next,
|
|
|
|
.stop = m_stop,
|
|
|
|
.show = show_map
|
|
|
|
};
|
2006-06-26 07:25:48 +00:00
|
|
|
|
|
|
|
static int maps_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2006-09-27 08:50:19 +00:00
|
|
|
struct proc_maps_private *priv;
|
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
|
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
|
|
if (priv) {
|
|
|
|
priv->pid = proc_pid(inode);
|
|
|
|
ret = seq_open(file, &proc_pid_maps_ops);
|
|
|
|
if (!ret) {
|
|
|
|
struct seq_file *m = file->private_data;
|
|
|
|
m->private = priv;
|
|
|
|
} else {
|
|
|
|
kfree(priv);
|
|
|
|
}
|
2006-06-26 07:25:48 +00:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
const struct file_operations proc_maps_operations = {
|
2006-06-26 07:25:48 +00:00
|
|
|
.open = maps_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
2006-09-27 08:50:19 +00:00
|
|
|
.release = seq_release_private,
|
2006-06-26 07:25:48 +00:00
|
|
|
};
|
|
|
|
|