ring-buffer: Add missing unlock

In some error handling cases the lock is not unlocked.  The return is
converted to a goto, to share the unlock at the end of the function.

A simplified version of the semantic patch that finds this problem is as
follows: (http://coccinelle.lip6.fr/)

// <smpl>
@r exists@
expression E1;
identifier f;
@@

f (...) { <+...
* spin_lock_irq (E1,...);
... when != E1
* return ...;
...+> }
// </smpl>

Signed-off-by: Julia Lawall <julia@diku.dk>
LKML-Reference: <Pine.LNX.4.64.1003291736440.21896@ask.diku.dk>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
Julia Lawall 2010-03-29 17:37:02 +02:00 коммит произвёл Steven Rostedt
Родитель e36673ec51
Коммит 292f60c0c4
1 изменённых файлов: 5 добавлений и 3 удалений

Просмотреть файл

@ -1209,18 +1209,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
return; goto out;
p = cpu_buffer->pages->next; p = cpu_buffer->pages->next;
bpage = list_entry(p, struct buffer_page, list); bpage = list_entry(p, struct buffer_page, list);
list_del_init(&bpage->list); list_del_init(&bpage->list);
free_buffer_page(bpage); free_buffer_page(bpage);
} }
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
return; goto out;
rb_reset_cpu(cpu_buffer); rb_reset_cpu(cpu_buffer);
rb_check_pages(cpu_buffer); rb_check_pages(cpu_buffer);
out:
spin_unlock_irq(&cpu_buffer->reader_lock); spin_unlock_irq(&cpu_buffer->reader_lock);
} }
@ -1237,7 +1238,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
if (RB_WARN_ON(cpu_buffer, list_empty(pages))) if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
return; goto out;
p = pages->next; p = pages->next;
bpage = list_entry(p, struct buffer_page, list); bpage = list_entry(p, struct buffer_page, list);
list_del_init(&bpage->list); list_del_init(&bpage->list);
@ -1246,6 +1247,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_reset_cpu(cpu_buffer); rb_reset_cpu(cpu_buffer);
rb_check_pages(cpu_buffer); rb_check_pages(cpu_buffer);
out:
spin_unlock_irq(&cpu_buffer->reader_lock); spin_unlock_irq(&cpu_buffer->reader_lock);
} }