rcu: Remove CONFIG_PREEMPT_RCU
Now that CONFIG_TREE_PREEMPT_RCU is in place, there is no further need for CONFIG_PREEMPT_RCU. Remove it, along with whatever subtle bugs it may (or may not) contain. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josht@linux.vnet.ibm.com Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org LKML-Reference: <125097461396-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
f41d911f8c
Коммит
6b3ef48adf
|
@ -36,7 +36,7 @@ o How can the updater tell when a grace period has completed
|
|||
executed in user mode, or executed in the idle loop, we can
|
||||
safely free up that item.
|
||||
|
||||
Preemptible variants of RCU (CONFIG_PREEMPT_RCU) get the
|
||||
Preemptible variants of RCU (CONFIG_TREE_PREEMPT_RCU) get the
|
||||
same effect, but require that the readers manipulate CPU-local
|
||||
counters. These counters allow limited types of blocking
|
||||
within RCU read-side critical sections. SRCU also uses
|
||||
|
@ -79,10 +79,10 @@ o I hear that RCU is patented? What is with that?
|
|||
o I hear that RCU needs work in order to support realtime kernels?
|
||||
|
||||
This work is largely completed. Realtime-friendly RCU can be
|
||||
enabled via the CONFIG_PREEMPT_RCU kernel configuration parameter.
|
||||
However, work is in progress for enabling priority boosting of
|
||||
preempted RCU read-side critical sections. This is needed if you
|
||||
have CPU-bound realtime threads.
|
||||
enabled via the CONFIG_TREE_PREEMPT_RCU kernel configuration
|
||||
parameter. However, work is in progress for enabling priority
|
||||
boosting of preempted RCU read-side critical sections. This is
|
||||
needed if you have CPU-bound realtime threads.
|
||||
|
||||
o Where can I find more information on RCU?
|
||||
|
||||
|
|
|
@ -136,10 +136,10 @@ rcu_read_lock()
|
|||
Used by a reader to inform the reclaimer that the reader is
|
||||
entering an RCU read-side critical section. It is illegal
|
||||
to block while in an RCU read-side critical section, though
|
||||
kernels built with CONFIG_PREEMPT_RCU can preempt RCU read-side
|
||||
critical sections. Any RCU-protected data structure accessed
|
||||
during an RCU read-side critical section is guaranteed to remain
|
||||
unreclaimed for the full duration of that critical section.
|
||||
kernels built with CONFIG_TREE_PREEMPT_RCU can preempt RCU
|
||||
read-side critical sections. Any RCU-protected data structure
|
||||
accessed during an RCU read-side critical section is guaranteed to
|
||||
remain unreclaimed for the full duration of that critical section.
|
||||
Reference counts may be used in conjunction with RCU to maintain
|
||||
longer-term references to data structures.
|
||||
|
||||
|
|
|
@ -94,11 +94,7 @@ extern struct group_info init_groups;
|
|||
# define CAP_INIT_BSET CAP_INIT_EFF_SET
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
#define INIT_TASK_RCU_PREEMPT(tsk) \
|
||||
.rcu_read_lock_nesting = 0, \
|
||||
.rcu_flipctr_idx = 0,
|
||||
#elif defined(CONFIG_TREE_PREEMPT_RCU)
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
#define INIT_TASK_RCU_PREEMPT(tsk) \
|
||||
.rcu_read_lock_nesting = 0, \
|
||||
.rcu_read_unlock_special = 0, \
|
||||
|
|
|
@ -68,11 +68,9 @@ extern int rcu_scheduler_active;
|
|||
|
||||
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
|
||||
#include <linux/rcutree.h>
|
||||
#elif defined(CONFIG_PREEMPT_RCU)
|
||||
#include <linux/rcupreempt.h>
|
||||
#else
|
||||
#error "Unknown RCU implementation specified to kernel configuration"
|
||||
#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */
|
||||
#endif
|
||||
|
||||
#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
|
||||
#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
|
||||
|
|
|
@ -1,140 +0,0 @@
|
|||
/*
|
||||
* Read-Copy Update mechanism for mutual exclusion (RT implementation)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2006
|
||||
*
|
||||
* Author: Paul McKenney <paulmck@us.ibm.com>
|
||||
*
|
||||
* Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
|
||||
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
|
||||
* Papers:
|
||||
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
|
||||
* http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
|
||||
*
|
||||
* For detailed explanation of Read-Copy Update mechanism see -
|
||||
* Documentation/RCU
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_RCUPREEMPT_H
|
||||
#define __LINUX_RCUPREEMPT_H
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/seqlock.h>
|
||||
|
||||
extern void rcu_sched_qs(int cpu);
|
||||
static inline void rcu_bh_qs(int cpu) { }
|
||||
|
||||
/*
|
||||
* Someone might want to pass call_rcu_bh as a function pointer.
|
||||
* So this needs to just be a rename and not a macro function.
|
||||
* (no parentheses)
|
||||
*/
|
||||
#define call_rcu_bh call_rcu
|
||||
|
||||
/**
|
||||
* call_rcu_sched - Queue RCU callback for invocation after sched grace period.
|
||||
* @head: structure to be used for queueing the RCU updates.
|
||||
* @func: actual update function to be invoked after the grace period
|
||||
*
|
||||
* The update function will be invoked some time after a full
|
||||
* synchronize_sched()-style grace period elapses, in other words after
|
||||
* all currently executing preempt-disabled sections of code (including
|
||||
* hardirq handlers, NMI handlers, and local_irq_save() blocks) have
|
||||
* completed.
|
||||
*/
|
||||
extern void call_rcu_sched(struct rcu_head *head,
|
||||
void (*func)(struct rcu_head *head));
|
||||
|
||||
extern void __rcu_read_lock(void);
|
||||
extern void __rcu_read_unlock(void);
|
||||
extern int rcu_needs_cpu(int cpu);
|
||||
|
||||
#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); }
|
||||
#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); }
|
||||
|
||||
extern void __synchronize_sched(void);
|
||||
|
||||
static inline void synchronize_rcu_expedited(void)
|
||||
{
|
||||
synchronize_rcu(); /* Placeholder for new rcupreempt implementation. */
|
||||
}
|
||||
|
||||
static inline void synchronize_rcu_bh_expedited(void)
|
||||
{
|
||||
synchronize_rcu_bh(); /* Placeholder for new rcupreempt impl. */
|
||||
}
|
||||
|
||||
extern void __rcu_init(void);
|
||||
extern void rcu_init_sched(void);
|
||||
extern void rcu_check_callbacks(int cpu, int user);
|
||||
extern void rcu_restart_cpu(int cpu);
|
||||
extern long rcu_batches_completed(void);
|
||||
|
||||
/*
|
||||
* Return the number of RCU batches processed thus far. Useful for debug
|
||||
* and statistic. The _bh variant is identifcal to straight RCU
|
||||
*/
|
||||
static inline long rcu_batches_completed_bh(void)
|
||||
{
|
||||
return rcu_batches_completed();
|
||||
}
|
||||
|
||||
static inline void exit_rcu(void)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RCU_TRACE
|
||||
struct rcupreempt_trace;
|
||||
extern long *rcupreempt_flipctr(int cpu);
|
||||
extern long rcupreempt_data_completed(void);
|
||||
extern int rcupreempt_flip_flag(int cpu);
|
||||
extern int rcupreempt_mb_flag(int cpu);
|
||||
extern char *rcupreempt_try_flip_state_name(void);
|
||||
extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
|
||||
#endif
|
||||
|
||||
struct softirq_action;
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
extern void rcu_enter_nohz(void);
|
||||
extern void rcu_exit_nohz(void);
|
||||
#else
|
||||
# define rcu_enter_nohz() do { } while (0)
|
||||
# define rcu_exit_nohz() do { } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* A context switch is a grace period for rcupreempt synchronize_rcu()
|
||||
* only during early boot, before the scheduler has been initialized.
|
||||
* So, how the heck do we get a context switch? Well, if the caller
|
||||
* invokes synchronize_rcu(), they are willing to accept a context
|
||||
* switch, so we simply pretend that one happened.
|
||||
*
|
||||
* After boot, there might be a blocked or preempted task in an RCU
|
||||
* read-side critical section, so we cannot then take the fastpath.
|
||||
*/
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
return num_online_cpus() == 1 && !rcu_scheduler_active;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_RCUPREEMPT_H */
|
|
@ -1,97 +0,0 @@
|
|||
/*
|
||||
* Read-Copy Update mechanism for mutual exclusion (RT implementation)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2006
|
||||
*
|
||||
* Author: Paul McKenney <paulmck@us.ibm.com>
|
||||
*
|
||||
* Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
|
||||
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
|
||||
* Papers:
|
||||
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
|
||||
* http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
|
||||
*
|
||||
* For detailed explanation of the Preemptible Read-Copy Update mechanism see -
|
||||
* http://lwn.net/Articles/253651/
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_RCUPREEMPT_TRACE_H
|
||||
#define __LINUX_RCUPREEMPT_TRACE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
* PREEMPT_RCU data structures.
|
||||
*/
|
||||
|
||||
struct rcupreempt_trace {
|
||||
long next_length;
|
||||
long next_add;
|
||||
long wait_length;
|
||||
long wait_add;
|
||||
long done_length;
|
||||
long done_add;
|
||||
long done_remove;
|
||||
atomic_t done_invoked;
|
||||
long rcu_check_callbacks;
|
||||
atomic_t rcu_try_flip_1;
|
||||
atomic_t rcu_try_flip_e1;
|
||||
long rcu_try_flip_i1;
|
||||
long rcu_try_flip_ie1;
|
||||
long rcu_try_flip_g1;
|
||||
long rcu_try_flip_a1;
|
||||
long rcu_try_flip_ae1;
|
||||
long rcu_try_flip_a2;
|
||||
long rcu_try_flip_z1;
|
||||
long rcu_try_flip_ze1;
|
||||
long rcu_try_flip_z2;
|
||||
long rcu_try_flip_m1;
|
||||
long rcu_try_flip_me1;
|
||||
long rcu_try_flip_m2;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_RCU_TRACE
|
||||
#define RCU_TRACE(fn, arg) fn(arg);
|
||||
#else
|
||||
#define RCU_TRACE(fn, arg)
|
||||
#endif
|
||||
|
||||
extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace);
|
||||
extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace);
|
||||
|
||||
#endif /* __LINUX_RCUPREEMPT_TRACE_H */
|
|
@ -1205,11 +1205,6 @@ struct task_struct {
|
|||
unsigned int policy;
|
||||
cpumask_t cpus_allowed;
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
int rcu_read_lock_nesting;
|
||||
int rcu_flipctr_idx;
|
||||
#endif /* #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
int rcu_read_lock_nesting;
|
||||
char rcu_read_unlock_special;
|
||||
|
@ -1744,14 +1739,6 @@ static inline void rcu_copy_process(struct task_struct *p)
|
|||
INIT_LIST_HEAD(&p->rcu_node_entry);
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_PREEMPT_RCU)
|
||||
|
||||
static inline void rcu_copy_process(struct task_struct *p)
|
||||
{
|
||||
p->rcu_read_lock_nesting = 0;
|
||||
p->rcu_flipctr_idx = 0;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void rcu_copy_process(struct task_struct *p)
|
||||
|
|
20
init/Kconfig
20
init/Kconfig
|
@ -324,17 +324,6 @@ config TREE_RCU
|
|||
thousands of CPUs. It also scales down nicely to
|
||||
smaller systems.
|
||||
|
||||
config PREEMPT_RCU
|
||||
bool "Preemptible RCU"
|
||||
depends on PREEMPT
|
||||
help
|
||||
This option reduces the latency of the kernel by making certain
|
||||
RCU sections preemptible. Normally RCU code is non-preemptible, if
|
||||
this option is selected then read-only RCU sections become
|
||||
preemptible. This helps latency, but may expose bugs due to
|
||||
now-naive assumptions about each RCU read-side critical section
|
||||
remaining on a given CPU through its execution.
|
||||
|
||||
config TREE_PREEMPT_RCU
|
||||
bool "Preemptable tree-based hierarchical RCU"
|
||||
depends on PREEMPT
|
||||
|
@ -348,7 +337,7 @@ endchoice
|
|||
|
||||
config RCU_TRACE
|
||||
bool "Enable tracing for RCU"
|
||||
depends on TREE_RCU || PREEMPT_RCU || TREE_PREEMPT_RCU
|
||||
depends on TREE_RCU || TREE_PREEMPT_RCU
|
||||
help
|
||||
This option provides tracing in RCU which presents stats
|
||||
in debugfs for debugging RCU implementation.
|
||||
|
@ -395,13 +384,6 @@ config TREE_RCU_TRACE
|
|||
TREE_PREEMPT_RCU implementations, permitting Makefile to
|
||||
trivially select kernel/rcutree_trace.c.
|
||||
|
||||
config PREEMPT_RCU_TRACE
|
||||
def_bool RCU_TRACE && PREEMPT_RCU
|
||||
select DEBUG_FS
|
||||
help
|
||||
This option provides tracing for the PREEMPT_RCU implementation,
|
||||
permitting Makefile to trivially select kernel/rcupreempt_trace.c.
|
||||
|
||||
endmenu # "RCU Subsystem"
|
||||
|
||||
config IKCONFIG
|
||||
|
|
|
@ -82,9 +82,7 @@ obj-$(CONFIG_SECCOMP) += seccomp.o
|
|||
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
|
||||
obj-$(CONFIG_TREE_RCU) += rcutree.o
|
||||
obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
|
||||
obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o
|
||||
obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
|
||||
obj-$(CONFIG_PREEMPT_RCU_TRACE) += rcupreempt_trace.o
|
||||
obj-$(CONFIG_RELAY) += relay.o
|
||||
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
|
||||
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
|
||||
|
|
1518
kernel/rcupreempt.c
1518
kernel/rcupreempt.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,335 +0,0 @@
|
|||
/*
|
||||
* Read-Copy Update tracing for realtime implementation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright IBM Corporation, 2006
|
||||
*
|
||||
* Papers: http://www.rdrop.com/users/paulmck/RCU
|
||||
*
|
||||
* For detailed explanation of Read-Copy Update mechanism see -
|
||||
* Documentation/RCU/ *.txt
|
||||
*
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rcupreempt_trace.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
static struct mutex rcupreempt_trace_mutex;
|
||||
static char *rcupreempt_trace_buf;
|
||||
#define RCUPREEMPT_TRACE_BUF_SIZE 4096
|
||||
|
||||
void rcupreempt_trace_move2done(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->done_length += trace->wait_length;
|
||||
trace->done_add += trace->wait_length;
|
||||
trace->wait_length = 0;
|
||||
}
|
||||
void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->wait_length += trace->next_length;
|
||||
trace->wait_add += trace->next_length;
|
||||
trace->next_length = 0;
|
||||
}
|
||||
void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace)
|
||||
{
|
||||
atomic_inc(&trace->rcu_try_flip_1);
|
||||
}
|
||||
void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace)
|
||||
{
|
||||
atomic_inc(&trace->rcu_try_flip_e1);
|
||||
}
|
||||
void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_try_flip_i1++;
|
||||
}
|
||||
void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_try_flip_ie1++;
|
||||
}
|
||||
void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_try_flip_g1++;
|
||||
}
|
||||
void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_try_flip_a1++;
|
||||
}
|
||||
void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_try_flip_ae1++;
|
||||
}
|
||||
void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_try_flip_a2++;
|
||||
}
|
||||
void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_try_flip_z1++;
|
||||
}
|
||||
void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_try_flip_ze1++;
|
||||
}
|
||||
void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_try_flip_z2++;
|
||||
}
|
||||
void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_try_flip_m1++;
|
||||
}
|
||||
void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_try_flip_me1++;
|
||||
}
|
||||
void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_try_flip_m2++;
|
||||
}
|
||||
void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->rcu_check_callbacks++;
|
||||
}
|
||||
void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->done_remove += trace->done_length;
|
||||
trace->done_length = 0;
|
||||
}
|
||||
void rcupreempt_trace_invoke(struct rcupreempt_trace *trace)
|
||||
{
|
||||
atomic_inc(&trace->done_invoked);
|
||||
}
|
||||
void rcupreempt_trace_next_add(struct rcupreempt_trace *trace)
|
||||
{
|
||||
trace->next_add++;
|
||||
trace->next_length++;
|
||||
}
|
||||
|
||||
static void rcupreempt_trace_sum(struct rcupreempt_trace *sp)
|
||||
{
|
||||
struct rcupreempt_trace *cp;
|
||||
int cpu;
|
||||
|
||||
memset(sp, 0, sizeof(*sp));
|
||||
for_each_possible_cpu(cpu) {
|
||||
cp = rcupreempt_trace_cpu(cpu);
|
||||
sp->next_length += cp->next_length;
|
||||
sp->next_add += cp->next_add;
|
||||
sp->wait_length += cp->wait_length;
|
||||
sp->wait_add += cp->wait_add;
|
||||
sp->done_length += cp->done_length;
|
||||
sp->done_add += cp->done_add;
|
||||
sp->done_remove += cp->done_remove;
|
||||
atomic_add(atomic_read(&cp->done_invoked), &sp->done_invoked);
|
||||
sp->rcu_check_callbacks += cp->rcu_check_callbacks;
|
||||
atomic_add(atomic_read(&cp->rcu_try_flip_1),
|
||||
&sp->rcu_try_flip_1);
|
||||
atomic_add(atomic_read(&cp->rcu_try_flip_e1),
|
||||
&sp->rcu_try_flip_e1);
|
||||
sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1;
|
||||
sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1;
|
||||
sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1;
|
||||
sp->rcu_try_flip_a1 += cp->rcu_try_flip_a1;
|
||||
sp->rcu_try_flip_ae1 += cp->rcu_try_flip_ae1;
|
||||
sp->rcu_try_flip_a2 += cp->rcu_try_flip_a2;
|
||||
sp->rcu_try_flip_z1 += cp->rcu_try_flip_z1;
|
||||
sp->rcu_try_flip_ze1 += cp->rcu_try_flip_ze1;
|
||||
sp->rcu_try_flip_z2 += cp->rcu_try_flip_z2;
|
||||
sp->rcu_try_flip_m1 += cp->rcu_try_flip_m1;
|
||||
sp->rcu_try_flip_me1 += cp->rcu_try_flip_me1;
|
||||
sp->rcu_try_flip_m2 += cp->rcu_try_flip_m2;
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t rcustats_read(struct file *filp, char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct rcupreempt_trace trace;
|
||||
ssize_t bcount;
|
||||
int cnt = 0;
|
||||
|
||||
rcupreempt_trace_sum(&trace);
|
||||
mutex_lock(&rcupreempt_trace_mutex);
|
||||
snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt,
|
||||
"ggp=%ld rcc=%ld\n",
|
||||
rcu_batches_completed(),
|
||||
trace.rcu_check_callbacks);
|
||||
snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt,
|
||||
"na=%ld nl=%ld wa=%ld wl=%ld da=%ld dl=%ld dr=%ld di=%d\n"
|
||||
"1=%d e1=%d i1=%ld ie1=%ld g1=%ld a1=%ld ae1=%ld a2=%ld\n"
|
||||
"z1=%ld ze1=%ld z2=%ld m1=%ld me1=%ld m2=%ld\n",
|
||||
|
||||
trace.next_add, trace.next_length,
|
||||
trace.wait_add, trace.wait_length,
|
||||
trace.done_add, trace.done_length,
|
||||
trace.done_remove, atomic_read(&trace.done_invoked),
|
||||
atomic_read(&trace.rcu_try_flip_1),
|
||||
atomic_read(&trace.rcu_try_flip_e1),
|
||||
trace.rcu_try_flip_i1, trace.rcu_try_flip_ie1,
|
||||
trace.rcu_try_flip_g1,
|
||||
trace.rcu_try_flip_a1, trace.rcu_try_flip_ae1,
|
||||
trace.rcu_try_flip_a2,
|
||||
trace.rcu_try_flip_z1, trace.rcu_try_flip_ze1,
|
||||
trace.rcu_try_flip_z2,
|
||||
trace.rcu_try_flip_m1, trace.rcu_try_flip_me1,
|
||||
trace.rcu_try_flip_m2);
|
||||
bcount = simple_read_from_buffer(buffer, count, ppos,
|
||||
rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
|
||||
mutex_unlock(&rcupreempt_trace_mutex);
|
||||
return bcount;
|
||||
}
|
||||
|
||||
static ssize_t rcugp_read(struct file *filp, char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
long oldgp = rcu_batches_completed();
|
||||
ssize_t bcount;
|
||||
|
||||
mutex_lock(&rcupreempt_trace_mutex);
|
||||
synchronize_rcu();
|
||||
snprintf(rcupreempt_trace_buf, RCUPREEMPT_TRACE_BUF_SIZE,
|
||||
"oldggp=%ld newggp=%ld\n", oldgp, rcu_batches_completed());
|
||||
bcount = simple_read_from_buffer(buffer, count, ppos,
|
||||
rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
|
||||
mutex_unlock(&rcupreempt_trace_mutex);
|
||||
return bcount;
|
||||
}
|
||||
|
||||
static ssize_t rcuctrs_read(struct file *filp, char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
int cnt = 0;
|
||||
int cpu;
|
||||
int f = rcu_batches_completed() & 0x1;
|
||||
ssize_t bcount;
|
||||
|
||||
mutex_lock(&rcupreempt_trace_mutex);
|
||||
|
||||
cnt += snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE,
|
||||
"CPU last cur F M\n");
|
||||
for_each_possible_cpu(cpu) {
|
||||
long *flipctr = rcupreempt_flipctr(cpu);
|
||||
cnt += snprintf(&rcupreempt_trace_buf[cnt],
|
||||
RCUPREEMPT_TRACE_BUF_SIZE - cnt,
|
||||
"%3d%c %4ld %3ld %d %d\n",
|
||||
cpu,
|
||||
cpu_is_offline(cpu) ? '!' : ' ',
|
||||
flipctr[!f],
|
||||
flipctr[f],
|
||||
rcupreempt_flip_flag(cpu),
|
||||
rcupreempt_mb_flag(cpu));
|
||||
}
|
||||
cnt += snprintf(&rcupreempt_trace_buf[cnt],
|
||||
RCUPREEMPT_TRACE_BUF_SIZE - cnt,
|
||||
"ggp = %ld, state = %s\n",
|
||||
rcu_batches_completed(),
|
||||
rcupreempt_try_flip_state_name());
|
||||
cnt += snprintf(&rcupreempt_trace_buf[cnt],
|
||||
RCUPREEMPT_TRACE_BUF_SIZE - cnt,
|
||||
"\n");
|
||||
bcount = simple_read_from_buffer(buffer, count, ppos,
|
||||
rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
|
||||
mutex_unlock(&rcupreempt_trace_mutex);
|
||||
return bcount;
|
||||
}
|
||||
|
||||
static struct file_operations rcustats_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = rcustats_read,
|
||||
};
|
||||
|
||||
static struct file_operations rcugp_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = rcugp_read,
|
||||
};
|
||||
|
||||
static struct file_operations rcuctrs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = rcuctrs_read,
|
||||
};
|
||||
|
||||
static struct dentry *rcudir, *statdir, *ctrsdir, *gpdir;
|
||||
static int rcupreempt_debugfs_init(void)
|
||||
{
|
||||
rcudir = debugfs_create_dir("rcu", NULL);
|
||||
if (!rcudir)
|
||||
goto out;
|
||||
statdir = debugfs_create_file("rcustats", 0444, rcudir,
|
||||
NULL, &rcustats_fops);
|
||||
if (!statdir)
|
||||
goto free_out;
|
||||
|
||||
gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
|
||||
if (!gpdir)
|
||||
goto free_out;
|
||||
|
||||
ctrsdir = debugfs_create_file("rcuctrs", 0444, rcudir,
|
||||
NULL, &rcuctrs_fops);
|
||||
if (!ctrsdir)
|
||||
goto free_out;
|
||||
return 0;
|
||||
free_out:
|
||||
if (statdir)
|
||||
debugfs_remove(statdir);
|
||||
if (gpdir)
|
||||
debugfs_remove(gpdir);
|
||||
debugfs_remove(rcudir);
|
||||
out:
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __init rcupreempt_trace_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_init(&rcupreempt_trace_mutex);
|
||||
rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL);
|
||||
if (!rcupreempt_trace_buf)
|
||||
return 1;
|
||||
ret = rcupreempt_debugfs_init();
|
||||
if (ret)
|
||||
kfree(rcupreempt_trace_buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit rcupreempt_trace_cleanup(void)
|
||||
{
|
||||
debugfs_remove(statdir);
|
||||
debugfs_remove(gpdir);
|
||||
debugfs_remove(ctrsdir);
|
||||
debugfs_remove(rcudir);
|
||||
kfree(rcupreempt_trace_buf);
|
||||
}
|
||||
|
||||
|
||||
module_init(rcupreempt_trace_init);
|
||||
module_exit(rcupreempt_trace_cleanup);
|
|
@ -725,7 +725,7 @@ config RCU_TORTURE_TEST_RUNNABLE
|
|||
|
||||
config RCU_CPU_STALL_DETECTOR
|
||||
bool "Check for stalled CPUs delaying RCU grace periods"
|
||||
depends on CLASSIC_RCU || TREE_RCU || TREE_PREEMPT_RCU
|
||||
depends on TREE_RCU || TREE_PREEMPT_RCU
|
||||
default n
|
||||
help
|
||||
This option causes RCU to printk information on which
|
||||
|
|
Загрузка…
Ссылка в новой задаче