bpf, events: fix offset in skb copy handler
This patch fixes the __output_custom() routine we currently use with bpf_skb_copy(). I missed that when len is larger than the size of the current handle, we can issue multiple invocations of copy_func, and __output_custom() advances destination but also source buffer by the written amount of bytes. When we have __output_custom(), this is actually wrong since in that case the source buffer points to a non-linear object, in our case an skb, which the copy_func helper is supposed to walk. Therefore, since this is non-linear we thus need to pass the offset into the helper, so that copy_func can use it for extracting the data from the source object. Therefore, adjust the callback signatures properly and pass offset into the skb_header_pointer() invoked from bpf_skb_copy() callback. The __DEFINE_OUTPUT_COPY_BODY() is adjusted to accommodate for two things: i) to pass in whether we should advance source buffer or not; this is a compile-time constant condition, ii) to pass in the offset for __output_custom(), which we do with help of __VA_ARGS__, so everything can stay inlined as is currently. Both changes allow for adapting the __output_* fast-path helpers w/o extra overhead. Fixes:555c8a8623
("bpf: avoid stack copy and use skb ctx for event output") Fixes:7e3f977edd
("perf, events: add non-linear data support for raw records") Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
a1b43eddae
Коммит
aa7145c16d
|
@ -211,7 +211,7 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f
|
||||||
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
|
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
|
||||||
|
|
||||||
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
|
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
|
||||||
unsigned long len);
|
unsigned long off, unsigned long len);
|
||||||
|
|
||||||
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
|
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
|
||||||
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
|
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
|
||||||
|
|
|
@ -70,7 +70,7 @@ struct perf_callchain_entry_ctx {
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
|
typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
|
||||||
unsigned long len);
|
unsigned long off, unsigned long len);
|
||||||
|
|
||||||
struct perf_raw_frag {
|
struct perf_raw_frag {
|
||||||
union {
|
union {
|
||||||
|
|
|
@ -123,18 +123,19 @@ static inline unsigned long perf_aux_size(struct ring_buffer *rb)
|
||||||
return rb->aux_nr_pages << PAGE_SHIFT;
|
return rb->aux_nr_pages << PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __DEFINE_OUTPUT_COPY_BODY(memcpy_func) \
|
#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
|
||||||
{ \
|
{ \
|
||||||
unsigned long size, written; \
|
unsigned long size, written; \
|
||||||
\
|
\
|
||||||
do { \
|
do { \
|
||||||
size = min(handle->size, len); \
|
size = min(handle->size, len); \
|
||||||
written = memcpy_func(handle->addr, buf, size); \
|
written = memcpy_func(__VA_ARGS__); \
|
||||||
written = size - written; \
|
written = size - written; \
|
||||||
\
|
\
|
||||||
len -= written; \
|
len -= written; \
|
||||||
handle->addr += written; \
|
handle->addr += written; \
|
||||||
buf += written; \
|
if (advance_buf) \
|
||||||
|
buf += written; \
|
||||||
handle->size -= written; \
|
handle->size -= written; \
|
||||||
if (!handle->size) { \
|
if (!handle->size) { \
|
||||||
struct ring_buffer *rb = handle->rb; \
|
struct ring_buffer *rb = handle->rb; \
|
||||||
|
@ -153,12 +154,16 @@ static inline unsigned long perf_aux_size(struct ring_buffer *rb)
|
||||||
static inline unsigned long \
|
static inline unsigned long \
|
||||||
func_name(struct perf_output_handle *handle, \
|
func_name(struct perf_output_handle *handle, \
|
||||||
const void *buf, unsigned long len) \
|
const void *buf, unsigned long len) \
|
||||||
__DEFINE_OUTPUT_COPY_BODY(memcpy_func)
|
__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
|
__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
|
||||||
const void *buf, unsigned long len)
|
const void *buf, unsigned long len)
|
||||||
__DEFINE_OUTPUT_COPY_BODY(copy_func)
|
{
|
||||||
|
unsigned long orig_len = len;
|
||||||
|
__DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
|
||||||
|
orig_len - len, size)
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
memcpy_common(void *dst, const void *src, unsigned long n)
|
memcpy_common(void *dst, const void *src, unsigned long n)
|
||||||
|
|
|
@ -2026,9 +2026,9 @@ bool bpf_helper_changes_skb_data(void *func)
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
|
static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
|
||||||
unsigned long len)
|
unsigned long off, unsigned long len)
|
||||||
{
|
{
|
||||||
void *ptr = skb_header_pointer(skb, 0, len, dst_buff);
|
void *ptr = skb_header_pointer(skb, off, len, dst_buff);
|
||||||
|
|
||||||
if (unlikely(!ptr))
|
if (unlikely(!ptr))
|
||||||
return len;
|
return len;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче