perf/ring_buffer: Matching the memory allocate and free, in rb_alloc()

Currently perf_mmap_alloc_page() is used to allocate memory in
rb_alloc(), but using free_page() to free memory in the failure path.

It's better to use perf_mmap_free_page() instead.

Signed-off-by: Yunfeng Ye <yeyunfeng@huawei.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <jolsa@redhat.co>
Cc: <acme@kernel.org>
Cc: <mingo@redhat.com>
Cc: <mark.rutland@arm.com>
Cc: <namhyung@kernel.org>
Cc: <alexander.shishkin@linux.intel.com>
Link: https://lkml.kernel.org/r/575c7e8c-90c7-4e3a-b41d-f894d8cdbd7f@huawei.com
This commit is contained in:
Yunfeng Ye 2019-10-14 16:15:57 +08:00 коммит произвёл Peter Zijlstra
Родитель 8a9f91c51e
Коммит d7e78706e4
1 изменённых файлов: 10 добавлений и 10 удалений

Просмотреть файл

@ -754,6 +754,14 @@ static void *perf_mmap_alloc_page(int cpu)
return page_address(page);
}
static void perf_mmap_free_page(void *addr)
{
struct page *page = virt_to_page(addr);
page->mapping = NULL;
__free_page(page);
}
struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{
struct ring_buffer *rb;
@ -788,9 +796,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
fail_data_pages:
for (i--; i >= 0; i--)
free_page((unsigned long)rb->data_pages[i]);
perf_mmap_free_page(rb->data_pages[i]);
free_page((unsigned long)rb->user_page);
perf_mmap_free_page(rb->user_page);
fail_user_page:
kfree(rb);
@ -799,14 +807,6 @@ fail:
return NULL;
}
static void perf_mmap_free_page(void *addr)
{
struct page *page = virt_to_page(addr);
page->mapping = NULL;
__free_page(page);
}
void rb_free(struct ring_buffer *rb)
{
int i;