236 строки
8.8 KiB
C
236 строки
8.8 KiB
C
/*
|
|
* Routines to manage notifier chains for passing status changes to any
|
|
* interested routines. We need this instead of hard coded call lists so
|
|
* that modules can poke their nose into the innards. The network devices
|
|
* needed them so here they are for the rest of you.
|
|
*
|
|
* Alan Cox <Alan.Cox@linux.org>
|
|
*/
|
|
|
|
#ifndef _LINUX_NOTIFIER_H
|
|
#define _LINUX_NOTIFIER_H
|
|
#include <linux/errno.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/rwsem.h>
|
|
#include <linux/srcu.h>
|
|
|
|
/*
|
|
* Notifier chains are of four types:
|
|
*
|
|
* Atomic notifier chains: Chain callbacks run in interrupt/atomic
|
|
* context. Callouts are not allowed to block.
|
|
* Blocking notifier chains: Chain callbacks run in process context.
|
|
* Callouts are allowed to block.
|
|
* Raw notifier chains: There are no restrictions on callbacks,
|
|
* registration, or unregistration. All locking and protection
|
|
* must be provided by the caller.
|
|
* SRCU notifier chains: A variant of blocking notifier chains, with
|
|
* the same restrictions.
|
|
*
|
|
* atomic_notifier_chain_register() may be called from an atomic context,
|
|
* but blocking_notifier_chain_register() and srcu_notifier_chain_register()
|
|
* must be called from a process context. Ditto for the corresponding
|
|
* _unregister() routines.
|
|
*
|
|
* atomic_notifier_chain_unregister(), blocking_notifier_chain_unregister(),
|
|
* and srcu_notifier_chain_unregister() _must not_ be called from within
|
|
* the call chain.
|
|
*
|
|
* SRCU notifier chains are an alternative form of blocking notifier chains.
|
|
* They use SRCU (Sleepable Read-Copy Update) instead of rw-semaphores for
|
|
* protection of the chain links. This means there is _very_ low overhead
|
|
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
|
|
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
|
|
* SRCU notifier chains should be used when the chain will be called very
|
|
* often but notifier_blocks will seldom be removed. Also, SRCU notifier
|
|
* chains are slightly more difficult to use because they require special
|
|
* runtime initialization.
|
|
*/
|
|
|
|
struct notifier_block {
|
|
int (*notifier_call)(struct notifier_block *, unsigned long, void *);
|
|
struct notifier_block *next;
|
|
int priority;
|
|
};
|
|
|
|
struct atomic_notifier_head {
|
|
spinlock_t lock;
|
|
struct notifier_block *head;
|
|
};
|
|
|
|
struct blocking_notifier_head {
|
|
struct rw_semaphore rwsem;
|
|
struct notifier_block *head;
|
|
};
|
|
|
|
struct raw_notifier_head {
|
|
struct notifier_block *head;
|
|
};
|
|
|
|
struct srcu_notifier_head {
|
|
struct mutex mutex;
|
|
struct srcu_struct srcu;
|
|
struct notifier_block *head;
|
|
};
|
|
|
|
#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
|
|
spin_lock_init(&(name)->lock); \
|
|
(name)->head = NULL; \
|
|
} while (0)
|
|
#define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \
|
|
init_rwsem(&(name)->rwsem); \
|
|
(name)->head = NULL; \
|
|
} while (0)
|
|
#define RAW_INIT_NOTIFIER_HEAD(name) do { \
|
|
(name)->head = NULL; \
|
|
} while (0)
|
|
|
|
/* srcu_notifier_heads must be initialized and cleaned up dynamically */
|
|
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
|
|
#define srcu_cleanup_notifier_head(name) \
|
|
cleanup_srcu_struct(&(name)->srcu);
|
|
|
|
#define ATOMIC_NOTIFIER_INIT(name) { \
|
|
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
|
.head = NULL }
|
|
#define BLOCKING_NOTIFIER_INIT(name) { \
|
|
.rwsem = __RWSEM_INITIALIZER((name).rwsem), \
|
|
.head = NULL }
|
|
#define RAW_NOTIFIER_INIT(name) { \
|
|
.head = NULL }
|
|
/* srcu_notifier_heads cannot be initialized statically */
|
|
|
|
#define ATOMIC_NOTIFIER_HEAD(name) \
|
|
struct atomic_notifier_head name = \
|
|
ATOMIC_NOTIFIER_INIT(name)
|
|
#define BLOCKING_NOTIFIER_HEAD(name) \
|
|
struct blocking_notifier_head name = \
|
|
BLOCKING_NOTIFIER_INIT(name)
|
|
#define RAW_NOTIFIER_HEAD(name) \
|
|
struct raw_notifier_head name = \
|
|
RAW_NOTIFIER_INIT(name)
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
|
|
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
|
|
struct notifier_block *nb);
|
|
|
|
extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
|
|
unsigned long val, void *v);
|
|
extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
|
|
unsigned long val, void *v, int nr_to_call, int *nr_calls);
|
|
extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
|
|
unsigned long val, void *v);
|
|
extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
|
|
unsigned long val, void *v, int nr_to_call, int *nr_calls);
|
|
extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
|
|
unsigned long val, void *v);
|
|
extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
|
|
unsigned long val, void *v, int nr_to_call, int *nr_calls);
|
|
extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
|
|
unsigned long val, void *v);
|
|
extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
|
|
unsigned long val, void *v, int nr_to_call, int *nr_calls);
|
|
|
|
#define NOTIFY_DONE 0x0000 /* Don't care */
|
|
#define NOTIFY_OK 0x0001 /* Suits me */
|
|
#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
|
|
#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
|
|
/* Bad/Veto action */
|
|
/*
|
|
* Clean way to return from the notifier and stop further calls.
|
|
*/
|
|
#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
|
|
|
|
/* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */
|
|
static inline int notifier_from_errno(int err)
|
|
{
|
|
return NOTIFY_STOP_MASK | (NOTIFY_OK - err);
|
|
}
|
|
|
|
/* Restore (negative) errno value from notify return value. */
|
|
static inline int notifier_to_errno(int ret)
|
|
{
|
|
ret &= ~NOTIFY_STOP_MASK;
|
|
return ret > NOTIFY_OK ? NOTIFY_OK - ret : 0;
|
|
}
|
|
|
|
/*
|
|
* Declared notifiers so far. I can imagine quite a few more chains
|
|
* over time (eg laptop power reset chains, reboot chain (to clean
|
|
* device units up), device [un]mount chain, module load/unload chain,
|
|
* low memory chain, screenblank chain (for plug in modular screenblankers)
|
|
* VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
|
|
*/
|
|
|
|
/* netdevice notifier chain */
|
|
#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
|
|
#define NETDEV_DOWN 0x0002
|
|
#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
|
|
detected a hardware crash and restarted
|
|
- we can use this eg to kick tcp sessions
|
|
once done */
|
|
#define NETDEV_CHANGE 0x0004 /* Notify device state change */
|
|
#define NETDEV_REGISTER 0x0005
|
|
#define NETDEV_UNREGISTER 0x0006
|
|
#define NETDEV_CHANGEMTU 0x0007
|
|
#define NETDEV_CHANGEADDR 0x0008
|
|
#define NETDEV_GOING_DOWN 0x0009
|
|
#define NETDEV_CHANGENAME 0x000A
|
|
#define NETDEV_FEAT_CHANGE 0x000B
|
|
|
|
#define SYS_DOWN 0x0001 /* Notify of system down */
|
|
#define SYS_RESTART SYS_DOWN
|
|
#define SYS_HALT 0x0002 /* Notify of system halt */
|
|
#define SYS_POWER_OFF 0x0003 /* Notify of system power off */
|
|
|
|
#define NETLINK_URELEASE 0x0001 /* Unicast netlink socket released */
|
|
|
|
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
|
|
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
|
|
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
|
|
#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
|
|
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
|
|
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
|
|
#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */
|
|
#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */
|
|
#define CPU_DYING 0x000A /* CPU (unsigned)v not running any task,
|
|
* not handling interrupts, soon dead */
|
|
|
|
/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
|
|
* operation in progress
|
|
*/
|
|
#define CPU_TASKS_FROZEN 0x0010
|
|
|
|
#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
|
|
#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
|
|
#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
|
|
#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
|
|
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
|
|
#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
|
|
#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
|
|
|
|
/* Hibernation and suspend events */
|
|
#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
|
|
#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
|
|
#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
|
|
#define PM_POST_SUSPEND 0x0004 /* Suspend finished */
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _LINUX_NOTIFIER_H */
|