2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* kernel userspace event delivery
|
|
|
|
*
|
|
|
|
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
|
|
|
|
* Copyright (C) 2004 Novell, Inc. All rights reserved.
|
|
|
|
* Copyright (C) 2004 IBM, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* Licensed under the GNU GPL v2.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Robert Love <rml@novell.com>
|
|
|
|
* Kay Sievers <kay.sievers@vrfy.org>
|
|
|
|
* Arjan van de Ven <arjanv@redhat.com>
|
|
|
|
* Greg Kroah-Hartman <greg@kroah.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/spinlock.h>
|
2008-03-28 00:26:30 +03:00
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/kobject.h>
|
2011-11-17 06:29:17 +04:00
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/kmod.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
|
|
|
#include <linux/slab.h>
|
2010-05-05 04:36:48 +04:00
|
|
|
#include <linux/user_namespace.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/netlink.h>
|
|
|
|
#include <net/sock.h>
|
2010-05-05 04:36:44 +04:00
|
|
|
#include <net/net_namespace.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
|
2007-07-20 15:58:13 +04:00
|
|
|
u64 uevent_seqnum;
|
2014-04-11 01:09:31 +04:00
|
|
|
#ifdef CONFIG_UEVENT_HELPER
|
2007-08-15 17:38:28 +04:00
|
|
|
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
|
2014-04-11 01:09:31 +04:00
|
|
|
#endif
|
2010-05-05 04:36:44 +04:00
|
|
|
#ifdef CONFIG_NET
|
|
|
|
struct uevent_sock {
|
|
|
|
struct list_head list;
|
|
|
|
struct sock *sk;
|
|
|
|
};
|
|
|
|
static LIST_HEAD(uevent_sock_list);
|
2007-07-20 15:58:13 +04:00
|
|
|
#endif
|
|
|
|
|
2012-03-07 14:49:56 +04:00
|
|
|
/* This lock protects uevent_seqnum and uevent_sock_list */
|
|
|
|
static DEFINE_MUTEX(uevent_sock_mutex);
|
|
|
|
|
2007-08-12 22:43:55 +04:00
|
|
|
/* the strings here must match the enum in include/linux/kobject.h */
|
|
|
|
static const char *kobject_actions[] = {
|
|
|
|
[KOBJ_ADD] = "add",
|
|
|
|
[KOBJ_REMOVE] = "remove",
|
|
|
|
[KOBJ_CHANGE] = "change",
|
|
|
|
[KOBJ_MOVE] = "move",
|
|
|
|
[KOBJ_ONLINE] = "online",
|
|
|
|
[KOBJ_OFFLINE] = "offline",
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* kobject_action_type - translate action string to numeric type
|
|
|
|
*
|
|
|
|
* @buf: buffer containing the action string, newline is ignored
|
|
|
|
* @len: length of buffer
|
|
|
|
* @type: pointer to the location to store the action type
|
|
|
|
*
|
|
|
|
* Returns 0 if the action string was recognized.
|
|
|
|
*/
|
|
|
|
int kobject_action_type(const char *buf, size_t count,
|
|
|
|
enum kobject_action *type)
|
|
|
|
{
|
|
|
|
enum kobject_action action;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
|
2008-03-29 02:05:25 +03:00
|
|
|
if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
|
2007-08-12 22:43:55 +04:00
|
|
|
count--;
|
|
|
|
|
|
|
|
if (!count)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) {
|
|
|
|
if (strncmp(kobject_actions[action], buf, count) != 0)
|
|
|
|
continue;
|
|
|
|
if (kobject_actions[action][count] != '\0')
|
|
|
|
continue;
|
|
|
|
*type = action;
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-05-22 02:05:21 +04:00
|
|
|
#ifdef CONFIG_NET
|
2010-05-05 04:36:47 +04:00
|
|
|
static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
|
|
|
|
{
|
2014-01-16 13:24:31 +04:00
|
|
|
struct kobject *kobj = data, *ksobj;
|
2010-05-05 04:36:47 +04:00
|
|
|
const struct kobj_ns_type_operations *ops;
|
|
|
|
|
|
|
|
ops = kobj_ns_ops(kobj);
|
2014-01-16 13:24:31 +04:00
|
|
|
if (!ops && kobj->kset) {
|
|
|
|
ksobj = &kobj->kset->kobj;
|
|
|
|
if (ksobj->parent != NULL)
|
|
|
|
ops = kobj_ns_ops(ksobj->parent);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ops && ops->netlink_ns && kobj->ktype->namespace) {
|
2010-05-05 04:36:47 +04:00
|
|
|
const void *sock_ns, *ns;
|
|
|
|
ns = kobj->ktype->namespace(kobj);
|
|
|
|
sock_ns = ops->netlink_ns(dsk);
|
|
|
|
return sock_ns != ns;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2010-05-22 02:05:21 +04:00
|
|
|
#endif
|
2010-05-05 04:36:47 +04:00
|
|
|
|
2014-04-11 01:09:31 +04:00
|
|
|
#ifdef CONFIG_UEVENT_HELPER
|
2010-05-05 04:36:48 +04:00
|
|
|
static int kobj_usermode_filter(struct kobject *kobj)
|
|
|
|
{
|
|
|
|
const struct kobj_ns_type_operations *ops;
|
|
|
|
|
|
|
|
ops = kobj_ns_ops(kobj);
|
|
|
|
if (ops) {
|
|
|
|
const void *init_ns, *ns;
|
|
|
|
ns = kobj->ktype->namespace(kobj);
|
|
|
|
init_ns = ops->initial_ns();
|
|
|
|
return ns != init_ns;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
kobject: don't block for each kobject_uevent
Currently kobject_uevent has somewhat unpredictable semantics. The
point is, since it may call a usermode helper and wait for it to execute
(UMH_WAIT_EXEC), it is impossible to say for sure what lock dependencies
it will introduce for the caller - strictly speaking it depends on what
fs the binary is located on and the set of locks fork may take. There
are quite a few kobject_uevent's users that do not take this into
account and call it with various mutexes taken, e.g. rtnl_mutex,
net_mutex, which might potentially lead to a deadlock.
Since there is actually no reason to wait for the usermode helper to
execute there, let's make kobject_uevent start the helper asynchronously
with the aid of the UMH_NO_WAIT flag.
Personally, I'm interested in this, because I really want kobject_uevent
to be called under the slab_mutex in the slub implementation as it used
to be some time ago, because it greatly simplifies synchronization and
automatically fixes a kmemcg-related race. However, there was a
deadlock detected on an attempt to call kobject_uevent under the
slab_mutex (see https://lkml.org/lkml/2012/1/14/45), which was reported
to be fixed by releasing the slab_mutex for kobject_uevent.
Unfortunately, there was no information about who exactly blocked on the
slab_mutex causing the usermode helper to stall, neither have I managed
to find this out or reproduce the issue.
BTW, this is not the first attempt to make kobject_uevent use
UMH_NO_WAIT. Previous one was made by commit f520360d93cd ("kobject:
don't block for each kobject_uevent"), but it was wrong (it passed
arguments allocated on stack to async thread) so it was reverted in
05f54c13cd0c ("Revert "kobject: don't block for each kobject_uevent".").
It targeted on speeding up the boot process though.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Greg KH <greg@kroah.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-04-04 01:48:21 +04:00
|
|
|
static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
|
|
|
|
{
|
|
|
|
int len;
|
|
|
|
|
|
|
|
len = strlcpy(&env->buf[env->buflen], subsystem,
|
|
|
|
sizeof(env->buf) - env->buflen);
|
|
|
|
if (len >= (sizeof(env->buf) - env->buflen)) {
|
|
|
|
WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->argv[0] = uevent_helper;
|
|
|
|
env->argv[1] = &env->buf[env->buflen];
|
|
|
|
env->argv[2] = NULL;
|
|
|
|
|
|
|
|
env->buflen += len + 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cleanup_uevent_env(struct subprocess_info *info)
|
|
|
|
{
|
|
|
|
kfree(info->data);
|
|
|
|
}
|
2014-04-11 01:09:31 +04:00
|
|
|
#endif
|
kobject: don't block for each kobject_uevent
Currently kobject_uevent has somewhat unpredictable semantics. The
point is, since it may call a usermode helper and wait for it to execute
(UMH_WAIT_EXEC), it is impossible to say for sure what lock dependencies
it will introduce for the caller - strictly speaking it depends on what
fs the binary is located on and the set of locks fork may take. There
are quite a few kobject_uevent's users that do not take this into
account and call it with various mutexes taken, e.g. rtnl_mutex,
net_mutex, which might potentially lead to a deadlock.
Since there is actually no reason to wait for the usermode helper to
execute there, let's make kobject_uevent start the helper asynchronously
with the aid of the UMH_NO_WAIT flag.
Personally, I'm interested in this, because I really want kobject_uevent
to be called under the slab_mutex in the slub implementation as it used
to be some time ago, because it greatly simplifies synchronization and
automatically fixes a kmemcg-related race. However, there was a
deadlock detected on an attempt to call kobject_uevent under the
slab_mutex (see https://lkml.org/lkml/2012/1/14/45), which was reported
to be fixed by releasing the slab_mutex for kobject_uevent.
Unfortunately, there was no information about who exactly blocked on the
slab_mutex causing the usermode helper to stall, neither have I managed
to find this out or reproduce the issue.
BTW, this is not the first attempt to make kobject_uevent use
UMH_NO_WAIT. Previous one was made by commit f520360d93cd ("kobject:
don't block for each kobject_uevent"), but it was wrong (it passed
arguments allocated on stack to async thread) so it was reverted in
05f54c13cd0c ("Revert "kobject: don't block for each kobject_uevent".").
It targeted on speeding up the boot process though.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Greg KH <greg@kroah.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-04-04 01:48:21 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/**
|
2006-11-20 19:07:51 +03:00
|
|
|
* kobject_uevent_env - send an uevent with environmental data
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
2007-08-12 22:43:55 +04:00
|
|
|
* @action: action that is happening
|
2005-04-17 02:20:36 +04:00
|
|
|
* @kobj: struct kobject that the action is happening to
|
2006-11-20 19:07:51 +03:00
|
|
|
* @envp_ext: pointer to environmental data
|
2006-12-20 00:01:27 +03:00
|
|
|
*
|
2010-08-13 14:58:10 +04:00
|
|
|
* Returns 0 if kobject_uevent_env() is completed with success or the
|
2006-12-20 00:01:27 +03:00
|
|
|
* corresponding error when it fails.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2006-12-20 00:01:27 +03:00
|
|
|
int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
|
2007-08-14 17:15:12 +04:00
|
|
|
char *envp_ext[])
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2007-08-14 17:15:12 +04:00
|
|
|
struct kobj_uevent_env *env;
|
|
|
|
const char *action_string = kobject_actions[action];
|
2005-11-11 16:43:07 +03:00
|
|
|
const char *devpath = NULL;
|
|
|
|
const char *subsystem;
|
|
|
|
struct kobject *top_kobj;
|
|
|
|
struct kset *kset;
|
2009-12-31 16:52:51 +03:00
|
|
|
const struct kset_uevent_ops *uevent_ops;
|
2005-04-17 02:20:36 +04:00
|
|
|
int i = 0;
|
2006-12-20 00:01:27 +03:00
|
|
|
int retval = 0;
|
2010-05-05 04:36:44 +04:00
|
|
|
#ifdef CONFIG_NET
|
|
|
|
struct uevent_sock *ue_sk;
|
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-11-29 10:49:41 +03:00
|
|
|
pr_debug("kobject: '%s' (%p): %s\n",
|
2008-04-30 11:55:08 +04:00
|
|
|
kobject_name(kobj), kobj, __func__);
|
2005-11-11 16:43:07 +03:00
|
|
|
|
|
|
|
/* search the kset we belong to */
|
|
|
|
top_kobj = kobj;
|
2007-08-12 22:43:55 +04:00
|
|
|
while (!top_kobj->kset && top_kobj->parent)
|
2007-04-04 15:39:17 +04:00
|
|
|
top_kobj = top_kobj->parent;
|
2007-08-12 22:43:55 +04:00
|
|
|
|
2006-12-20 00:01:27 +03:00
|
|
|
if (!top_kobj->kset) {
|
2007-11-29 10:49:41 +03:00
|
|
|
pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
|
|
|
|
"without kset!\n", kobject_name(kobj), kobj,
|
2008-04-30 11:55:08 +04:00
|
|
|
__func__);
|
2006-12-20 00:01:27 +03:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-11-11 16:43:07 +03:00
|
|
|
kset = top_kobj->kset;
|
2005-11-16 11:00:00 +03:00
|
|
|
uevent_ops = kset->uevent_ops;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-03-01 16:10:49 +03:00
|
|
|
/* skip the event, if uevent_suppress is set*/
|
|
|
|
if (kobj->uevent_suppress) {
|
|
|
|
pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
|
|
|
|
"caused the event to drop!\n",
|
|
|
|
kobject_name(kobj), kobj, __func__);
|
|
|
|
return 0;
|
|
|
|
}
|
2007-08-14 17:15:12 +04:00
|
|
|
/* skip the event, if the filter returns zero. */
|
2005-11-16 11:00:00 +03:00
|
|
|
if (uevent_ops && uevent_ops->filter)
|
2006-12-20 00:01:27 +03:00
|
|
|
if (!uevent_ops->filter(kset, kobj)) {
|
2007-11-29 10:49:41 +03:00
|
|
|
pr_debug("kobject: '%s' (%p): %s: filter function "
|
|
|
|
"caused the event to drop!\n",
|
2008-04-30 11:55:08 +04:00
|
|
|
kobject_name(kobj), kobj, __func__);
|
2006-12-20 00:01:27 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-03-14 05:25:56 +03:00
|
|
|
/* originating subsystem */
|
|
|
|
if (uevent_ops && uevent_ops->name)
|
|
|
|
subsystem = uevent_ops->name(kset, kobj);
|
|
|
|
else
|
|
|
|
subsystem = kobject_name(&kset->kobj);
|
|
|
|
if (!subsystem) {
|
2007-11-29 10:49:41 +03:00
|
|
|
pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
|
|
|
|
"event to drop!\n", kobject_name(kobj), kobj,
|
2008-04-30 11:55:08 +04:00
|
|
|
__func__);
|
2007-03-14 05:25:56 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-08-14 17:15:12 +04:00
|
|
|
/* environment buffer */
|
|
|
|
env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
|
|
|
|
if (!env)
|
2006-12-20 00:01:27 +03:00
|
|
|
return -ENOMEM;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-11-11 16:43:07 +03:00
|
|
|
/* complete object path */
|
|
|
|
devpath = kobject_get_path(kobj, GFP_KERNEL);
|
2006-12-20 00:01:27 +03:00
|
|
|
if (!devpath) {
|
|
|
|
retval = -ENOENT;
|
2005-11-11 16:43:07 +03:00
|
|
|
goto exit;
|
2006-12-20 00:01:27 +03:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-11-11 16:43:07 +03:00
|
|
|
/* default keys */
|
2007-08-14 17:15:12 +04:00
|
|
|
retval = add_uevent_var(env, "ACTION=%s", action_string);
|
|
|
|
if (retval)
|
|
|
|
goto exit;
|
|
|
|
retval = add_uevent_var(env, "DEVPATH=%s", devpath);
|
|
|
|
if (retval)
|
|
|
|
goto exit;
|
|
|
|
retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
|
|
|
|
if (retval)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
/* keys passed in from the caller */
|
|
|
|
if (envp_ext) {
|
|
|
|
for (i = 0; envp_ext[i]; i++) {
|
2008-11-13 07:20:00 +03:00
|
|
|
retval = add_uevent_var(env, "%s", envp_ext[i]);
|
2007-08-14 17:15:12 +04:00
|
|
|
if (retval)
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-11-11 16:43:07 +03:00
|
|
|
/* let the kset specific function add its stuff */
|
2005-11-16 11:00:00 +03:00
|
|
|
if (uevent_ops && uevent_ops->uevent) {
|
2007-08-14 17:15:12 +04:00
|
|
|
retval = uevent_ops->uevent(kset, kobj, env);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (retval) {
|
2007-11-29 10:49:41 +03:00
|
|
|
pr_debug("kobject: '%s' (%p): %s: uevent() returned "
|
|
|
|
"%d\n", kobject_name(kobj), kobj,
|
2008-04-30 11:55:08 +04:00
|
|
|
__func__, retval);
|
2005-04-17 02:20:36 +04:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-19 03:40:42 +03:00
|
|
|
/*
|
|
|
|
* Mark "add" and "remove" events in the object to ensure proper
|
|
|
|
* events to userspace during automatic cleanup. If the object did
|
|
|
|
* send an "add" event, "remove" will automatically generated by
|
|
|
|
* the core, if not already done by the caller.
|
|
|
|
*/
|
|
|
|
if (action == KOBJ_ADD)
|
|
|
|
kobj->state_add_uevent_sent = 1;
|
|
|
|
else if (action == KOBJ_REMOVE)
|
|
|
|
kobj->state_remove_uevent_sent = 1;
|
|
|
|
|
2012-03-07 14:49:56 +04:00
|
|
|
mutex_lock(&uevent_sock_mutex);
|
2007-08-14 17:15:12 +04:00
|
|
|
/* we will send an event, so request a new sequence number */
|
2012-03-07 14:49:56 +04:00
|
|
|
retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
|
|
|
|
if (retval) {
|
|
|
|
mutex_unlock(&uevent_sock_mutex);
|
2007-08-14 17:15:12 +04:00
|
|
|
goto exit;
|
2012-03-07 14:49:56 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-04-25 17:37:26 +04:00
|
|
|
#if defined(CONFIG_NET)
|
2005-11-11 16:43:07 +03:00
|
|
|
/* send netlink message */
|
2010-05-05 04:36:44 +04:00
|
|
|
list_for_each_entry(ue_sk, &uevent_sock_list, list) {
|
|
|
|
struct sock *uevent_sock = ue_sk->sk;
|
2005-11-11 16:43:07 +03:00
|
|
|
struct sk_buff *skb;
|
|
|
|
size_t len;
|
|
|
|
|
2011-12-05 22:12:47 +04:00
|
|
|
if (!netlink_has_listeners(uevent_sock, 1))
|
|
|
|
continue;
|
|
|
|
|
2005-11-11 16:43:07 +03:00
|
|
|
/* allocate message with the maximum possible size */
|
|
|
|
len = strlen(action_string) + strlen(devpath) + 2;
|
2007-08-14 17:15:12 +04:00
|
|
|
skb = alloc_skb(len + env->buflen, GFP_KERNEL);
|
2005-11-11 16:43:07 +03:00
|
|
|
if (skb) {
|
2007-08-14 17:15:12 +04:00
|
|
|
char *scratch;
|
|
|
|
|
2005-11-11 16:43:07 +03:00
|
|
|
/* add header */
|
|
|
|
scratch = skb_put(skb, len);
|
|
|
|
sprintf(scratch, "%s@%s", action_string, devpath);
|
|
|
|
|
|
|
|
/* copy keys to our continuous event payload buffer */
|
2007-08-14 17:15:12 +04:00
|
|
|
for (i = 0; i < env->envp_idx; i++) {
|
|
|
|
len = strlen(env->envp[i]) + 1;
|
2005-11-11 16:43:07 +03:00
|
|
|
scratch = skb_put(skb, len);
|
2007-08-14 17:15:12 +04:00
|
|
|
strcpy(scratch, env->envp[i]);
|
2005-11-11 16:43:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
NETLINK_CB(skb).dst_group = 1;
|
2010-05-05 04:36:47 +04:00
|
|
|
retval = netlink_broadcast_filtered(uevent_sock, skb,
|
|
|
|
0, 1, GFP_KERNEL,
|
|
|
|
kobj_bcast_filter,
|
|
|
|
kobj);
|
netlink: change return-value logic of netlink_broadcast()
Currently, netlink_broadcast() reports errors to the caller if no
messages at all were delivered:
1) If, at least, one message has been delivered correctly, returns 0.
2) Otherwise, if no messages at all were delivered due to skb_clone()
failure, return -ENOBUFS.
3) Otherwise, if there are no listeners, return -ESRCH.
With this patch, the caller knows if the delivery of any of the
messages to the listeners have failed:
1) If it fails to deliver any message (for whatever reason), return
-ENOBUFS.
2) Otherwise, if all messages were delivered OK, returns 0.
3) Otherwise, if no listeners, return -ESRCH.
In the current ctnetlink code and in Netfilter in general, we can add
reliable logging and connection tracking event delivery by dropping the
packets whose events were not successfully delivered over Netlink. Of
course, this option would be settable via /proc as this approach reduces
performance (in terms of filtered connections per seconds by a stateful
firewall) but providing reliable logging and event delivery (for
conntrackd) in return.
This patch also changes some clients of netlink_broadcast() that
may report ENOBUFS errors via printk. This error handling is not
of any help. Instead, the userspace daemons that are listening to
those netlink messages should resync themselves with the kernel-side
if they hit ENOBUFS.
BTW, netlink_broadcast() clients include those that call
cn_netlink_send(), nlmsg_multicast() and genlmsg_multicast() since they
internally call netlink_broadcast() and return its error value.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-02-06 10:56:36 +03:00
|
|
|
/* ENOBUFS should be handled in userspace */
|
2011-08-22 17:51:34 +04:00
|
|
|
if (retval == -ENOBUFS || retval == -ESRCH)
|
netlink: change return-value logic of netlink_broadcast()
Currently, netlink_broadcast() reports errors to the caller if no
messages at all were delivered:
1) If, at least, one message has been delivered correctly, returns 0.
2) Otherwise, if no messages at all were delivered due to skb_clone()
failure, return -ENOBUFS.
3) Otherwise, if there are no listeners, return -ESRCH.
With this patch, the caller knows if the delivery of any of the
messages to the listeners have failed:
1) If it fails to deliver any message (for whatever reason), return
-ENOBUFS.
2) Otherwise, if all messages were delivered OK, returns 0.
3) Otherwise, if no listeners, return -ESRCH.
In the current ctnetlink code and in Netfilter in general, we can add
reliable logging and connection tracking event delivery by dropping the
packets whose events were not successfully delivered over Netlink. Of
course, this option would be settable via /proc as this approach reduces
performance (in terms of filtered connections per seconds by a stateful
firewall) but providing reliable logging and event delivery (for
conntrackd) in return.
This patch also changes some clients of netlink_broadcast() that
may report ENOBUFS errors via printk. This error handling is not
of any help. Instead, the userspace daemons that are listening to
those netlink messages should resync themselves with the kernel-side
if they hit ENOBUFS.
BTW, netlink_broadcast() clients include those that call
cn_netlink_send(), nlmsg_multicast() and genlmsg_multicast() since they
internally call netlink_broadcast() and return its error value.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-02-06 10:56:36 +03:00
|
|
|
retval = 0;
|
2008-11-16 13:23:27 +03:00
|
|
|
} else
|
|
|
|
retval = -ENOMEM;
|
2005-11-11 16:43:07 +03:00
|
|
|
}
|
2006-04-25 17:37:26 +04:00
|
|
|
#endif
|
2012-03-07 14:49:56 +04:00
|
|
|
mutex_unlock(&uevent_sock_mutex);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2014-04-11 01:09:31 +04:00
|
|
|
#ifdef CONFIG_UEVENT_HELPER
|
2005-11-11 16:43:07 +03:00
|
|
|
/* call uevent_helper, usually only enabled during early boot */
|
2010-05-05 04:36:48 +04:00
|
|
|
if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
|
kobject: don't block for each kobject_uevent
Currently kobject_uevent has somewhat unpredictable semantics. The
point is, since it may call a usermode helper and wait for it to execute
(UMH_WAIT_EXEC), it is impossible to say for sure what lock dependencies
it will introduce for the caller - strictly speaking it depends on what
fs the binary is located on and the set of locks fork may take. There
are quite a few kobject_uevent's users that do not take this into
account and call it with various mutexes taken, e.g. rtnl_mutex,
net_mutex, which might potentially lead to a deadlock.
Since there is actually no reason to wait for the usermode helper to
execute there, let's make kobject_uevent start the helper asynchronously
with the aid of the UMH_NO_WAIT flag.
Personally, I'm interested in this, because I really want kobject_uevent
to be called under the slab_mutex in the slub implementation as it used
to be some time ago, because it greatly simplifies synchronization and
automatically fixes a kmemcg-related race. However, there was a
deadlock detected on an attempt to call kobject_uevent under the
slab_mutex (see https://lkml.org/lkml/2012/1/14/45), which was reported
to be fixed by releasing the slab_mutex for kobject_uevent.
Unfortunately, there was no information about who exactly blocked on the
slab_mutex causing the usermode helper to stall, neither have I managed
to find this out or reproduce the issue.
BTW, this is not the first attempt to make kobject_uevent use
UMH_NO_WAIT. Previous one was made by commit f520360d93cd ("kobject:
don't block for each kobject_uevent"), but it was wrong (it passed
arguments allocated on stack to async thread) so it was reverted in
05f54c13cd0c ("Revert "kobject: don't block for each kobject_uevent".").
It targeted on speeding up the boot process though.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Greg KH <greg@kroah.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-04-04 01:48:21 +04:00
|
|
|
struct subprocess_info *info;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-08-14 17:15:12 +04:00
|
|
|
retval = add_uevent_var(env, "HOME=/");
|
|
|
|
if (retval)
|
|
|
|
goto exit;
|
2008-01-25 08:59:04 +03:00
|
|
|
retval = add_uevent_var(env,
|
|
|
|
"PATH=/sbin:/bin:/usr/sbin:/usr/bin");
|
2007-08-14 17:15:12 +04:00
|
|
|
if (retval)
|
|
|
|
goto exit;
|
kobject: don't block for each kobject_uevent
Currently kobject_uevent has somewhat unpredictable semantics. The
point is, since it may call a usermode helper and wait for it to execute
(UMH_WAIT_EXEC), it is impossible to say for sure what lock dependencies
it will introduce for the caller - strictly speaking it depends on what
fs the binary is located on and the set of locks fork may take. There
are quite a few kobject_uevent's users that do not take this into
account and call it with various mutexes taken, e.g. rtnl_mutex,
net_mutex, which might potentially lead to a deadlock.
Since there is actually no reason to wait for the usermode helper to
execute there, let's make kobject_uevent start the helper asynchronously
with the aid of the UMH_NO_WAIT flag.
Personally, I'm interested in this, because I really want kobject_uevent
to be called under the slab_mutex in the slub implementation as it used
to be some time ago, because it greatly simplifies synchronization and
automatically fixes a kmemcg-related race. However, there was a
deadlock detected on an attempt to call kobject_uevent under the
slab_mutex (see https://lkml.org/lkml/2012/1/14/45), which was reported
to be fixed by releasing the slab_mutex for kobject_uevent.
Unfortunately, there was no information about who exactly blocked on the
slab_mutex causing the usermode helper to stall, neither have I managed
to find this out or reproduce the issue.
BTW, this is not the first attempt to make kobject_uevent use
UMH_NO_WAIT. Previous one was made by commit f520360d93cd ("kobject:
don't block for each kobject_uevent"), but it was wrong (it passed
arguments allocated on stack to async thread) so it was reverted in
05f54c13cd0c ("Revert "kobject: don't block for each kobject_uevent".").
It targeted on speeding up the boot process though.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Greg KH <greg@kroah.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-04-04 01:48:21 +04:00
|
|
|
retval = init_uevent_argv(env, subsystem);
|
|
|
|
if (retval)
|
|
|
|
goto exit;
|
2007-08-14 17:15:12 +04:00
|
|
|
|
kobject: don't block for each kobject_uevent
Currently kobject_uevent has somewhat unpredictable semantics. The
point is, since it may call a usermode helper and wait for it to execute
(UMH_WAIT_EXEC), it is impossible to say for sure what lock dependencies
it will introduce for the caller - strictly speaking it depends on what
fs the binary is located on and the set of locks fork may take. There
are quite a few kobject_uevent's users that do not take this into
account and call it with various mutexes taken, e.g. rtnl_mutex,
net_mutex, which might potentially lead to a deadlock.
Since there is actually no reason to wait for the usermode helper to
execute there, let's make kobject_uevent start the helper asynchronously
with the aid of the UMH_NO_WAIT flag.
Personally, I'm interested in this, because I really want kobject_uevent
to be called under the slab_mutex in the slub implementation as it used
to be some time ago, because it greatly simplifies synchronization and
automatically fixes a kmemcg-related race. However, there was a
deadlock detected on an attempt to call kobject_uevent under the
slab_mutex (see https://lkml.org/lkml/2012/1/14/45), which was reported
to be fixed by releasing the slab_mutex for kobject_uevent.
Unfortunately, there was no information about who exactly blocked on the
slab_mutex causing the usermode helper to stall, neither have I managed
to find this out or reproduce the issue.
BTW, this is not the first attempt to make kobject_uevent use
UMH_NO_WAIT. Previous one was made by commit f520360d93cd ("kobject:
don't block for each kobject_uevent"), but it was wrong (it passed
arguments allocated on stack to async thread) so it was reverted in
05f54c13cd0c ("Revert "kobject: don't block for each kobject_uevent".").
It targeted on speeding up the boot process though.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Greg KH <greg@kroah.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-04-04 01:48:21 +04:00
|
|
|
retval = -ENOMEM;
|
|
|
|
info = call_usermodehelper_setup(env->argv[0], env->argv,
|
|
|
|
env->envp, GFP_KERNEL,
|
|
|
|
NULL, cleanup_uevent_env, env);
|
|
|
|
if (info) {
|
|
|
|
retval = call_usermodehelper_exec(info, UMH_NO_WAIT);
|
|
|
|
env = NULL; /* freed by cleanup_uevent_env */
|
|
|
|
}
|
2005-11-11 16:43:07 +03:00
|
|
|
}
|
2014-04-11 01:09:31 +04:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
exit:
|
2005-11-11 16:43:07 +03:00
|
|
|
kfree(devpath);
|
2007-08-14 17:15:12 +04:00
|
|
|
kfree(env);
|
2006-12-20 00:01:27 +03:00
|
|
|
return retval;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2006-11-20 19:07:51 +03:00
|
|
|
EXPORT_SYMBOL_GPL(kobject_uevent_env);
|
|
|
|
|
|
|
|
/**
|
2010-08-13 14:58:10 +04:00
|
|
|
* kobject_uevent - notify userspace by sending an uevent
|
2006-11-20 19:07:51 +03:00
|
|
|
*
|
2007-08-12 22:43:55 +04:00
|
|
|
* @action: action that is happening
|
2006-11-20 19:07:51 +03:00
|
|
|
* @kobj: struct kobject that the action is happening to
|
2006-12-20 00:01:27 +03:00
|
|
|
*
|
|
|
|
* Returns 0 if kobject_uevent() is completed with success or the
|
|
|
|
* corresponding error when it fails.
|
2006-11-20 19:07:51 +03:00
|
|
|
*/
|
2006-12-20 00:01:27 +03:00
|
|
|
int kobject_uevent(struct kobject *kobj, enum kobject_action action)
|
2006-11-20 19:07:51 +03:00
|
|
|
{
|
2006-12-20 00:01:27 +03:00
|
|
|
return kobject_uevent_env(kobj, action, NULL);
|
2006-11-20 19:07:51 +03:00
|
|
|
}
|
2005-11-16 11:00:00 +03:00
|
|
|
EXPORT_SYMBOL_GPL(kobject_uevent);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/**
|
2007-08-14 17:15:12 +04:00
|
|
|
* add_uevent_var - add key value string to the environment buffer
|
|
|
|
* @env: environment buffer structure
|
|
|
|
* @format: printf format for the key=value pair
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* Returns 0 if environment variable was added successfully or -ENOMEM
|
|
|
|
* if no space was available.
|
|
|
|
*/
|
2007-08-14 17:15:12 +04:00
|
|
|
int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
va_list args;
|
2007-08-14 17:15:12 +04:00
|
|
|
int len;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-08-14 17:15:12 +04:00
|
|
|
if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
|
2008-07-26 06:45:39 +04:00
|
|
|
WARN(1, KERN_ERR "add_uevent_var: too many keys\n");
|
2005-04-17 02:20:36 +04:00
|
|
|
return -ENOMEM;
|
2007-08-14 17:15:12 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
va_start(args, format);
|
2007-08-14 17:15:12 +04:00
|
|
|
len = vsnprintf(&env->buf[env->buflen],
|
|
|
|
sizeof(env->buf) - env->buflen,
|
|
|
|
format, args);
|
2005-04-17 02:20:36 +04:00
|
|
|
va_end(args);
|
|
|
|
|
2007-08-14 17:15:12 +04:00
|
|
|
if (len >= (sizeof(env->buf) - env->buflen)) {
|
2008-07-26 06:45:39 +04:00
|
|
|
WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n");
|
2005-04-17 02:20:36 +04:00
|
|
|
return -ENOMEM;
|
2007-08-14 17:15:12 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-08-14 17:15:12 +04:00
|
|
|
env->envp[env->envp_idx++] = &env->buf[env->buflen];
|
|
|
|
env->buflen += len + 1;
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2005-11-16 11:00:00 +03:00
|
|
|
EXPORT_SYMBOL_GPL(add_uevent_var);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-04-25 17:37:26 +04:00
|
|
|
#if defined(CONFIG_NET)
|
2010-05-05 04:36:44 +04:00
|
|
|
static int uevent_net_init(struct net *net)
|
2005-11-11 16:43:07 +03:00
|
|
|
{
|
2010-05-05 04:36:44 +04:00
|
|
|
struct uevent_sock *ue_sk;
|
2012-06-29 10:15:21 +04:00
|
|
|
struct netlink_kernel_cfg cfg = {
|
|
|
|
.groups = 1,
|
2012-09-08 06:53:53 +04:00
|
|
|
.flags = NL_CFG_F_NONROOT_RECV,
|
2012-06-29 10:15:21 +04:00
|
|
|
};
|
2010-05-05 04:36:44 +04:00
|
|
|
|
|
|
|
ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
|
|
|
|
if (!ue_sk)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2012-09-08 06:53:54 +04:00
|
|
|
ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
|
2010-05-05 04:36:44 +04:00
|
|
|
if (!ue_sk->sk) {
|
2005-11-11 16:43:07 +03:00
|
|
|
printk(KERN_ERR
|
|
|
|
"kobject_uevent: unable to create netlink socket!\n");
|
2010-05-25 13:51:10 +04:00
|
|
|
kfree(ue_sk);
|
2005-11-11 16:43:07 +03:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
2010-05-05 04:36:44 +04:00
|
|
|
mutex_lock(&uevent_sock_mutex);
|
|
|
|
list_add_tail(&ue_sk->list, &uevent_sock_list);
|
|
|
|
mutex_unlock(&uevent_sock_mutex);
|
2005-11-11 16:43:07 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-05-05 04:36:44 +04:00
|
|
|
static void uevent_net_exit(struct net *net)
|
|
|
|
{
|
|
|
|
struct uevent_sock *ue_sk;
|
|
|
|
|
|
|
|
mutex_lock(&uevent_sock_mutex);
|
|
|
|
list_for_each_entry(ue_sk, &uevent_sock_list, list) {
|
|
|
|
if (sock_net(ue_sk->sk) == net)
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
mutex_unlock(&uevent_sock_mutex);
|
|
|
|
return;
|
|
|
|
|
|
|
|
found:
|
|
|
|
list_del(&ue_sk->list);
|
|
|
|
mutex_unlock(&uevent_sock_mutex);
|
|
|
|
|
|
|
|
netlink_kernel_release(ue_sk->sk);
|
|
|
|
kfree(ue_sk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations uevent_net_ops = {
|
|
|
|
.init = uevent_net_init,
|
|
|
|
.exit = uevent_net_exit,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init kobject_uevent_init(void)
|
|
|
|
{
|
|
|
|
return register_pernet_subsys(&uevent_net_ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-11-11 16:43:07 +03:00
|
|
|
postcore_initcall(kobject_uevent_init);
|
2006-04-25 17:37:26 +04:00
|
|
|
#endif
|