One bug fix that goes back to 3.10. Accessing a non existent buffer
if "possible cpus" is greater than actual CPUs (including offline CPUs). Namhyung Kim did some reviews of the patches I sent this merge window and found a memory leak and had a few clean ups. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJTmcYfAAoJEKQekfcNnQGusVIH/2wvfh1D4Mu+qCUsG7HqMLWM wHhTHcJsiE5rBpfcvc+XoLLBMGn9IKCeClGG59KYJGzznbJHHLmk1dy4qdSqNAel POTEtGh+AUX0wFBZtVLl2AesFZRsbtaxoNqD0ZBLhkHCV9FBEKbsm8uDCtJIf8wR vDDz27nPXkiFWX+wM9Z+tFSL7rohxvwREKlQjfk5Z9plyUcURE8GDbrWl870ZSJX uYzSYzioCyYdy/cpasDuTX22/I+nNUxA4dWTeyciigYC+6IqLJRIwqEXJ4RjYmfm v9+hf6KkqF8CY6mDjObLkKmy0gubPBQ8Op1G/3ChluUrjkwdffgb6oiLb352yHY= =XD1A -----END PGP SIGNATURE----- Merge tag 'trace-3.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing cleanups and bugfixes from Steven Rostedt: "One bug fix that goes back to 3.10. Accessing a non existent buffer if "possible cpus" is greater than actual CPUs (including offline CPUs). Namhyung Kim did some reviews of the patches I sent this merge window and found a memory leak and had a few clean ups" * tag 'trace-3.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Fix check of ftrace_trace_arrays list_empty() check tracing: Fix leak of per cpu max data in instances tracing: Cleanup saved_cmdlines_size changes ring-buffer: Check if buffer exists before polling
This commit is contained in:
Коммит
8841c8b3c4
|
@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
|
|||
__ring_buffer_alloc((size), (flags), &__key); \
|
||||
})
|
||||
|
||||
void ring_buffer_wait(struct ring_buffer *buffer, int cpu);
|
||||
int ring_buffer_wait(struct ring_buffer *buffer, int cpu);
|
||||
int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
|
||||
struct file *filp, poll_table *poll_table);
|
||||
|
||||
|
|
|
@ -543,7 +543,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
|
|||
* as data is added to any of the @buffer's cpu buffers. Otherwise
|
||||
* it will wait for data to be added to a specific cpu buffer.
|
||||
*/
|
||||
void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
|
||||
int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
DEFINE_WAIT(wait);
|
||||
|
@ -557,6 +557,8 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
|
|||
if (cpu == RING_BUFFER_ALL_CPUS)
|
||||
work = &buffer->irq_work;
|
||||
else {
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
return -ENODEV;
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
work = &cpu_buffer->irq_work;
|
||||
}
|
||||
|
@ -591,6 +593,7 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
|
|||
schedule();
|
||||
|
||||
finish_wait(&work->waiters, &wait);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1085,13 +1085,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|||
}
|
||||
#endif /* CONFIG_TRACER_MAX_TRACE */
|
||||
|
||||
static void wait_on_pipe(struct trace_iterator *iter)
|
||||
static int wait_on_pipe(struct trace_iterator *iter)
|
||||
{
|
||||
/* Iterators are static, they should be filled or empty */
|
||||
if (trace_buffer_iter(iter, iter->cpu_file))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
|
||||
return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||
|
@ -1338,7 +1338,7 @@ static int trace_create_savedcmd(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
savedcmd = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL);
|
||||
savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
|
||||
if (!savedcmd)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3840,7 +3840,7 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
|
|||
int r;
|
||||
|
||||
arch_spin_lock(&trace_cmdline_lock);
|
||||
r = sprintf(buf, "%u\n", savedcmd->cmdline_num);
|
||||
r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
|
||||
arch_spin_unlock(&trace_cmdline_lock);
|
||||
|
||||
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
||||
|
@ -3857,7 +3857,7 @@ static int tracing_resize_saved_cmdlines(unsigned int val)
|
|||
{
|
||||
struct saved_cmdlines_buffer *s, *savedcmd_temp;
|
||||
|
||||
s = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL);
|
||||
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -4378,6 +4378,7 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
|
|||
static int tracing_wait_pipe(struct file *filp)
|
||||
{
|
||||
struct trace_iterator *iter = filp->private_data;
|
||||
int ret;
|
||||
|
||||
while (trace_empty(iter)) {
|
||||
|
||||
|
@ -4399,10 +4400,13 @@ static int tracing_wait_pipe(struct file *filp)
|
|||
|
||||
mutex_unlock(&iter->mutex);
|
||||
|
||||
wait_on_pipe(iter);
|
||||
ret = wait_on_pipe(iter);
|
||||
|
||||
mutex_lock(&iter->mutex);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (signal_pending(current))
|
||||
return -EINTR;
|
||||
}
|
||||
|
@ -5327,8 +5331,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
|||
goto out_unlock;
|
||||
}
|
||||
mutex_unlock(&trace_types_lock);
|
||||
wait_on_pipe(iter);
|
||||
ret = wait_on_pipe(iter);
|
||||
mutex_lock(&trace_types_lock);
|
||||
if (ret) {
|
||||
size = ret;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (signal_pending(current)) {
|
||||
size = -EINTR;
|
||||
goto out_unlock;
|
||||
|
@ -5538,8 +5546,10 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
|||
goto out;
|
||||
}
|
||||
mutex_unlock(&trace_types_lock);
|
||||
wait_on_pipe(iter);
|
||||
ret = wait_on_pipe(iter);
|
||||
mutex_lock(&trace_types_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (signal_pending(current)) {
|
||||
ret = -EINTR;
|
||||
goto out;
|
||||
|
@ -6232,22 +6242,25 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void free_trace_buffer(struct trace_buffer *buf)
|
||||
{
|
||||
if (buf->buffer) {
|
||||
ring_buffer_free(buf->buffer);
|
||||
buf->buffer = NULL;
|
||||
free_percpu(buf->data);
|
||||
buf->data = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void free_trace_buffers(struct trace_array *tr)
|
||||
{
|
||||
if (!tr)
|
||||
return;
|
||||
|
||||
if (tr->trace_buffer.buffer) {
|
||||
ring_buffer_free(tr->trace_buffer.buffer);
|
||||
tr->trace_buffer.buffer = NULL;
|
||||
free_percpu(tr->trace_buffer.data);
|
||||
}
|
||||
free_trace_buffer(&tr->trace_buffer);
|
||||
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
if (tr->max_buffer.buffer) {
|
||||
ring_buffer_free(tr->max_buffer.buffer);
|
||||
tr->max_buffer.buffer = NULL;
|
||||
}
|
||||
free_trace_buffer(&tr->max_buffer);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -252,7 +252,7 @@ static inline struct trace_array *top_trace_array(void)
|
|||
{
|
||||
struct trace_array *tr;
|
||||
|
||||
if (list_empty(ftrace_trace_arrays.prev))
|
||||
if (list_empty(&ftrace_trace_arrays))
|
||||
return NULL;
|
||||
|
||||
tr = list_entry(ftrace_trace_arrays.prev,
|
||||
|
|
Загрузка…
Ссылка в новой задаче