perf_counter: Remove munmap stuff

In name of keeping it simple, only track mmap events. Userspace
will have to remove old overlapping maps when it encounters them.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2009-06-04 17:08:58 +02:00 коммит произвёл Ingo Molnar
Родитель 60313ebed7
Коммит d99e944620
3 изменённых файлов: 4 добавлений и 51 удалений

Просмотреть файл

@ -148,11 +148,10 @@ struct perf_counter_attr {
exclude_hv : 1, /* ditto hypervisor */
exclude_idle : 1, /* don't count when idle */
mmap : 1, /* include mmap data */
munmap : 1, /* include munmap data */
comm : 1, /* include comm data */
freq : 1, /* use freq, not period */
__reserved_1 : 52;
__reserved_1 : 53;
__u32 wakeup_events; /* wakeup every n events */
__u32 __reserved_2;
@ -246,7 +245,6 @@ enum perf_event_type {
* };
*/
PERF_EVENT_MMAP = 1,
PERF_EVENT_MUNMAP = 2,
/*
* struct {
@ -622,9 +620,6 @@ extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
extern void perf_counter_mmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file);
extern void perf_counter_munmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file);
extern void perf_counter_comm(struct task_struct *tsk);
extern void perf_counter_fork(struct task_struct *tsk);
@ -677,10 +672,6 @@ static inline void
perf_counter_mmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file) { }
static inline void
perf_counter_munmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file) { }
static inline void perf_counter_comm(struct task_struct *tsk) { }
static inline void perf_counter_fork(struct task_struct *tsk) { }
static inline void perf_counter_init(void) { }

Просмотреть файл

@ -41,7 +41,6 @@ static int perf_overcommit __read_mostly = 1;
static atomic_t nr_counters __read_mostly;
static atomic_t nr_mmap_counters __read_mostly;
static atomic_t nr_munmap_counters __read_mostly;
static atomic_t nr_comm_counters __read_mostly;
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
@ -1448,8 +1447,6 @@ static void free_counter(struct perf_counter *counter)
atomic_dec(&nr_counters);
if (counter->attr.mmap)
atomic_dec(&nr_mmap_counters);
if (counter->attr.munmap)
atomic_dec(&nr_munmap_counters);
if (counter->attr.comm)
atomic_dec(&nr_comm_counters);
@ -2510,7 +2507,7 @@ static void perf_counter_fork_output(struct perf_counter *counter,
static int perf_counter_fork_match(struct perf_counter *counter)
{
if (counter->attr.comm || counter->attr.mmap || counter->attr.munmap)
if (counter->attr.comm || counter->attr.mmap)
return 1;
return 0;
@ -2557,8 +2554,7 @@ void perf_counter_fork(struct task_struct *task)
struct perf_fork_event fork_event;
if (!atomic_read(&nr_comm_counters) &&
!atomic_read(&nr_mmap_counters) &&
!atomic_read(&nr_munmap_counters))
!atomic_read(&nr_mmap_counters))
return;
fork_event = (struct perf_fork_event){
@ -2722,12 +2718,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter,
static int perf_counter_mmap_match(struct perf_counter *counter,
struct perf_mmap_event *mmap_event)
{
if (counter->attr.mmap &&
mmap_event->event.header.type == PERF_EVENT_MMAP)
return 1;
if (counter->attr.munmap &&
mmap_event->event.header.type == PERF_EVENT_MUNMAP)
if (counter->attr.mmap)
return 1;
return 0;
@ -2821,27 +2812,6 @@ void perf_counter_mmap(unsigned long addr, unsigned long len,
perf_counter_mmap_event(&mmap_event);
}
void perf_counter_munmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file)
{
struct perf_mmap_event mmap_event;
if (!atomic_read(&nr_munmap_counters))
return;
mmap_event = (struct perf_mmap_event){
.file = file,
.event = {
.header = { .type = PERF_EVENT_MUNMAP, },
.start = addr,
.len = len,
.pgoff = pgoff,
},
};
perf_counter_mmap_event(&mmap_event);
}
/*
* Log sample_period changes so that analyzing tools can re-normalize the
* event flow.
@ -3525,8 +3495,6 @@ done:
atomic_inc(&nr_counters);
if (counter->attr.mmap)
atomic_inc(&nr_mmap_counters);
if (counter->attr.munmap)
atomic_inc(&nr_munmap_counters);
if (counter->attr.comm)
atomic_inc(&nr_comm_counters);

Просмотреть файл

@ -1756,12 +1756,6 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
do {
long nrpages = vma_pages(vma);
if (vma->vm_flags & VM_EXEC) {
perf_counter_munmap(vma->vm_start,
nrpages << PAGE_SHIFT,
vma->vm_pgoff, vma->vm_file);
}
mm->total_vm -= nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
vma = remove_vma(vma);