ring-buffer: Add ring_buffer_wake_waiters()
On closing of a file that represents a ring buffer or flushing the file,
there may be waiters on the ring buffer that needs to be woken up and exit
the ring_buffer_wait() function.
Add ring_buffer_wake_waiters() to wake up the waiters on the ring buffer
and allow them to exit the wait loop.
Link: https://lkml.kernel.org/r/20220928133938.28dc2c27@gandalf.local.home
Cc: stable@vger.kernel.org
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Fixes: 15693458c4
("tracing/ring-buffer: Move poll wake ups into ring buffer code")
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
Родитель
ec0bbc5ec5
Коммит
7e9fbbb1b7
|
@ -101,7 +101,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
|
|||
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
|
||||
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
|
||||
struct file *filp, poll_table *poll_table);
|
||||
|
||||
void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
|
||||
|
||||
#define RING_BUFFER_ALL_CPUS -1
|
||||
|
||||
|
|
|
@ -413,6 +413,7 @@ struct rb_irq_work {
|
|||
struct irq_work work;
|
||||
wait_queue_head_t waiters;
|
||||
wait_queue_head_t full_waiters;
|
||||
long wait_index;
|
||||
bool waiters_pending;
|
||||
bool full_waiters_pending;
|
||||
bool wakeup_full;
|
||||
|
@ -924,6 +925,37 @@ static void rb_wake_up_waiters(struct irq_work *work)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_wake_waiters - wake up any waiters on this ring buffer
|
||||
* @buffer: The ring buffer to wake waiters on
|
||||
*
|
||||
* In the case of a file that represents a ring buffer is closing,
|
||||
* it is prudent to wake up any waiters that are on this.
|
||||
*/
|
||||
void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct rb_irq_work *rbwork;
|
||||
|
||||
if (cpu == RING_BUFFER_ALL_CPUS) {
|
||||
|
||||
/* Wake up individual ones too. One level recursion */
|
||||
for_each_buffer_cpu(buffer, cpu)
|
||||
ring_buffer_wake_waiters(buffer, cpu);
|
||||
|
||||
rbwork = &buffer->irq_work;
|
||||
} else {
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
rbwork = &cpu_buffer->irq_work;
|
||||
}
|
||||
|
||||
rbwork->wait_index++;
|
||||
/* make sure the waiters see the new index */
|
||||
smp_wmb();
|
||||
|
||||
rb_wake_up_waiters(&rbwork->work);
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_wait - wait for input to the ring buffer
|
||||
* @buffer: buffer to wait on
|
||||
|
@ -939,6 +971,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
|
|||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
DEFINE_WAIT(wait);
|
||||
struct rb_irq_work *work;
|
||||
long wait_index;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
|
@ -957,6 +990,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
|
|||
work = &cpu_buffer->irq_work;
|
||||
}
|
||||
|
||||
wait_index = READ_ONCE(work->wait_index);
|
||||
|
||||
while (true) {
|
||||
if (full)
|
||||
|
@ -1021,6 +1055,11 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
|
|||
}
|
||||
|
||||
schedule();
|
||||
|
||||
/* Make sure to see the new wait index */
|
||||
smp_rmb();
|
||||
if (wait_index != work->wait_index)
|
||||
break;
|
||||
}
|
||||
|
||||
if (full)
|
||||
|
|
Загрузка…
Ссылка в новой задаче