The recent expansion of the sched switch tracepoint inserted a new argument
in the middle of the arguments. This reordering broke BPF programs which relied on the old argument list. While tracepoints are not considered stable ABI, it's not trivial to make BPF cope with such a change, but it's being worked on. For now restore the original argument order and move the new argument to the end of the argument list. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmKAxKQTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoThID/4jp/8GiKsf1jPgKkU39Yw7qAePzObQ V9K2XLxSwH27D+UpmOPODnckzHJMtX0M4Z+sGMgGSPe/IvOVj+NEmUiQGU29sDwg T7If2FSHMutPCB9QL26kxjmebU+SdllRwrJylOA1ZNduunczxKlpATJ5vneCC/Qt D5VpB3XlwT31pd9UdoW/kV5uQK6bFR7qREWXhONZ+HyzsKJdV0vGe2ZX6U7ek2/d XJxETE1eXlsMr+2VY5lkxhr596uPJgDAM9g+OknO/Lal/I7WoUchDN2giItzn6RY XWxPK85mE59MwTa6PQCJcO8A7r2KcHfGrbFVjA9h1jhREtsZigb9ZemDgQ+s8goT znIIlTO2l7ed2VDMU/mt3zZuS0rMshn/8Axk+AN3N6gKffV6F4q0BpZUUccGe+FM tfQ34YGmMKx6uuyHPPZCQd1buJuDuXNyZF7XFO3uxv9BGt3x42aswAbx1zYIV+ZR Uj/Vnojoc1aBdffVSUL0he+vjutYixx4gb8nh0ZFa5FTe70XDvPGTUTTOSW6BOq0 yiFOWtG8MbziVBDE2iKmfUMT+dPQd0+PW8szk8J9yOJyOnTu9y6KkyWl2JRllSxT Qv7icnMN5P1xqN/c4P+8Iq0CrVItyxMJ0Ouc29tsNPHYkzsBo4c0XAn94mib1O17 zyJYW0F9UVHOSg== =6Bvx -----END PGP SIGNATURE----- Merge tag 'sched-urgent-2022-05-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull scheduler fix from Thomas Gleixner: "The recent expansion of the sched switch tracepoint inserted a new argument in the middle of the arguments. This reordering broke BPF programs which relied on the old argument list. While tracepoints are not considered stable ABI, it's not trivial to make BPF cope with such a change, but it's being worked on. For now restore the original argument order and move the new argument to the end of the argument list" * tag 'sched-urgent-2022-05-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/tracing: Append prev_state to tp args instead
This commit is contained in:
Коммит
990e798d18
|
@ -222,11 +222,11 @@ static inline long __trace_sched_switch_state(bool preempt,
|
|||
TRACE_EVENT(sched_switch,
|
||||
|
||||
TP_PROTO(bool preempt,
|
||||
unsigned int prev_state,
|
||||
struct task_struct *prev,
|
||||
struct task_struct *next),
|
||||
struct task_struct *next,
|
||||
unsigned int prev_state),
|
||||
|
||||
TP_ARGS(preempt, prev_state, prev, next),
|
||||
TP_ARGS(preempt, prev, next, prev_state),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array( char, prev_comm, TASK_COMM_LEN )
|
||||
|
|
|
@ -6382,7 +6382,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
|
|||
migrate_disable_switch(rq, prev);
|
||||
psi_sched_switch(prev, next, !task_on_rq_queued(prev));
|
||||
|
||||
trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev_state, prev, next);
|
||||
trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
|
||||
|
||||
/* Also unlocks the rq: */
|
||||
rq = context_switch(rq, prev, next, &rf);
|
||||
|
|
|
@ -404,9 +404,9 @@ free:
|
|||
|
||||
static void
|
||||
ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
|
||||
unsigned int prev_state,
|
||||
struct task_struct *prev,
|
||||
struct task_struct *next)
|
||||
struct task_struct *next,
|
||||
unsigned int prev_state)
|
||||
{
|
||||
unsigned long long timestamp;
|
||||
int index;
|
||||
|
|
|
@ -7420,9 +7420,9 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
|
|||
|
||||
static void
|
||||
ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
|
||||
unsigned int prev_state,
|
||||
struct task_struct *prev,
|
||||
struct task_struct *next)
|
||||
struct task_struct *next,
|
||||
unsigned int prev_state)
|
||||
{
|
||||
struct trace_array *tr = data;
|
||||
struct trace_pid_list *pid_list;
|
||||
|
|
|
@ -773,9 +773,9 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable)
|
|||
|
||||
static void
|
||||
event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
|
||||
unsigned int prev_state,
|
||||
struct task_struct *prev,
|
||||
struct task_struct *next)
|
||||
struct task_struct *next,
|
||||
unsigned int prev_state)
|
||||
{
|
||||
struct trace_array *tr = data;
|
||||
struct trace_pid_list *no_pid_list;
|
||||
|
@ -799,9 +799,9 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
|
|||
|
||||
static void
|
||||
event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
|
||||
unsigned int prev_state,
|
||||
struct task_struct *prev,
|
||||
struct task_struct *next)
|
||||
struct task_struct *next,
|
||||
unsigned int prev_state)
|
||||
{
|
||||
struct trace_array *tr = data;
|
||||
struct trace_pid_list *no_pid_list;
|
||||
|
|
|
@ -1168,9 +1168,9 @@ thread_exit(struct osnoise_variables *osn_var, struct task_struct *t)
|
|||
*/
|
||||
static void
|
||||
trace_sched_switch_callback(void *data, bool preempt,
|
||||
unsigned int prev_state,
|
||||
struct task_struct *p,
|
||||
struct task_struct *n)
|
||||
struct task_struct *n,
|
||||
unsigned int prev_state)
|
||||
{
|
||||
struct osnoise_variables *osn_var = this_cpu_osn_var();
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@ static DEFINE_MUTEX(sched_register_mutex);
|
|||
|
||||
static void
|
||||
probe_sched_switch(void *ignore, bool preempt,
|
||||
unsigned int prev_state,
|
||||
struct task_struct *prev, struct task_struct *next)
|
||||
struct task_struct *prev, struct task_struct *next,
|
||||
unsigned int prev_state)
|
||||
{
|
||||
int flags;
|
||||
|
||||
|
|
|
@ -426,8 +426,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
|
|||
|
||||
static void notrace
|
||||
probe_wakeup_sched_switch(void *ignore, bool preempt,
|
||||
unsigned int prev_state,
|
||||
struct task_struct *prev, struct task_struct *next)
|
||||
struct task_struct *prev, struct task_struct *next,
|
||||
unsigned int prev_state)
|
||||
{
|
||||
struct trace_array_cpu *data;
|
||||
u64 T0, T1, delta;
|
||||
|
|
|
@ -25,11 +25,11 @@ TRACE_CUSTOM_EVENT(sched_switch,
|
|||
* that the custom event is using.
|
||||
*/
|
||||
TP_PROTO(bool preempt,
|
||||
unsigned int prev_state,
|
||||
struct task_struct *prev,
|
||||
struct task_struct *next),
|
||||
struct task_struct *next,
|
||||
unsigned int prev_state),
|
||||
|
||||
TP_ARGS(preempt, prev_state, prev, next),
|
||||
TP_ARGS(preempt, prev, next, prev_state),
|
||||
|
||||
/*
|
||||
* The next fields are where the customization happens.
|
||||
|
|
Загрузка…
Ссылка в новой задаче