perf: Simplify the ring-buffer logic: make perf_buffer_alloc() do everything needed

Currently there are perf_buffer_alloc() + perf_buffer_init() + some
separate bits, fold it all into a single perf_buffer_alloc() and only
leave the attachment to the event separate.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2010-05-28 19:41:35 +02:00 коммит произвёл Ingo Molnar
Родитель ca5135e6b4
Коммит d57e34fdd6
2 изменённых файлов: 36 добавлений и 27 удалений

Просмотреть файл

@ -602,6 +602,8 @@ enum perf_event_active_state {
struct file; struct file;
#define PERF_BUFFER_WRITABLE 0x01
struct perf_buffer { struct perf_buffer {
atomic_t refcount; atomic_t refcount;
struct rcu_head rcu_head; struct rcu_head rcu_head;

Просмотреть файл

@ -2369,6 +2369,25 @@ unlock:
rcu_read_unlock(); rcu_read_unlock();
} }
static unsigned long perf_data_size(struct perf_buffer *buffer);
static void
perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
{
long max_size = perf_data_size(buffer);
if (watermark)
buffer->watermark = min(max_size, watermark);
if (!buffer->watermark)
buffer->watermark = max_size / 2;
if (flags & PERF_BUFFER_WRITABLE)
buffer->writable = 1;
atomic_set(&buffer->refcount, 1);
}
#ifndef CONFIG_PERF_USE_VMALLOC #ifndef CONFIG_PERF_USE_VMALLOC
/* /*
@ -2401,7 +2420,7 @@ static void *perf_mmap_alloc_page(int cpu)
} }
static struct perf_buffer * static struct perf_buffer *
perf_buffer_alloc(struct perf_event *event, int nr_pages) perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
{ {
struct perf_buffer *buffer; struct perf_buffer *buffer;
unsigned long size; unsigned long size;
@ -2414,18 +2433,20 @@ perf_buffer_alloc(struct perf_event *event, int nr_pages)
if (!buffer) if (!buffer)
goto fail; goto fail;
buffer->user_page = perf_mmap_alloc_page(event->cpu); buffer->user_page = perf_mmap_alloc_page(cpu);
if (!buffer->user_page) if (!buffer->user_page)
goto fail_user_page; goto fail_user_page;
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
buffer->data_pages[i] = perf_mmap_alloc_page(event->cpu); buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
if (!buffer->data_pages[i]) if (!buffer->data_pages[i])
goto fail_data_pages; goto fail_data_pages;
} }
buffer->nr_pages = nr_pages; buffer->nr_pages = nr_pages;
perf_buffer_init(buffer, watermark, flags);
return buffer; return buffer;
fail_data_pages: fail_data_pages:
@ -2516,7 +2537,7 @@ static void perf_buffer_free(struct perf_buffer *buffer)
} }
static struct perf_buffer * static struct perf_buffer *
perf_buffer_alloc(struct perf_event *event, int nr_pages) perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
{ {
struct perf_buffer *buffer; struct perf_buffer *buffer;
unsigned long size; unsigned long size;
@ -2540,6 +2561,8 @@ perf_buffer_alloc(struct perf_event *event, int nr_pages)
buffer->page_order = ilog2(nr_pages); buffer->page_order = ilog2(nr_pages);
buffer->nr_pages = 1; buffer->nr_pages = 1;
perf_buffer_init(buffer, watermark, flags);
return buffer; return buffer;
fail_all_buf: fail_all_buf:
@ -2591,23 +2614,6 @@ unlock:
return ret; return ret;
} }
static void
perf_buffer_init(struct perf_event *event, struct perf_buffer *buffer)
{
long max_size = perf_data_size(buffer);
if (event->attr.watermark) {
buffer->watermark = min_t(long, max_size,
event->attr.wakeup_watermark);
}
if (!buffer->watermark)
buffer->watermark = max_size / 2;
atomic_set(&buffer->refcount, 1);
rcu_assign_pointer(event->buffer, buffer);
}
static void perf_buffer_free_rcu(struct rcu_head *rcu_head) static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
{ {
struct perf_buffer *buffer; struct perf_buffer *buffer;
@ -2682,7 +2688,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
unsigned long vma_size; unsigned long vma_size;
unsigned long nr_pages; unsigned long nr_pages;
long user_extra, extra; long user_extra, extra;
int ret = 0; int ret = 0, flags = 0;
/* /*
* Don't allow mmap() of inherited per-task counters. This would * Don't allow mmap() of inherited per-task counters. This would
@ -2747,15 +2753,16 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
WARN_ON(event->buffer); WARN_ON(event->buffer);
buffer = perf_buffer_alloc(event, nr_pages); if (vma->vm_flags & VM_WRITE)
flags |= PERF_BUFFER_WRITABLE;
buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
event->cpu, flags);
if (!buffer) { if (!buffer) {
ret = -ENOMEM; ret = -ENOMEM;
goto unlock; goto unlock;
} }
rcu_assign_pointer(event->buffer, buffer);
perf_buffer_init(event, buffer);
if (vma->vm_flags & VM_WRITE)
event->buffer->writable = 1;
atomic_long_add(user_extra, &user->locked_vm); atomic_long_add(user_extra, &user->locked_vm);
event->mmap_locked = extra; event->mmap_locked = extra;