ipc: reorganize initialization of kern_ipc_perm.seq

ipc_addid() initializes kern_ipc_perm.seq after having called idr_alloc()
(within ipc_idr_alloc()).

Thus a parallel semop() or msgrcv() that uses ipc_obtain_object_check()
may see an uninitialized value.

The patch moves the initialization of kern_ipc_perm.seq before the calls
of idr_alloc().

Notes:
1) This patch has a user space visible side effect:
If /proc/sys/kernel/*_next_id is used (i.e.: checkpoint/restore) and
if semget()/msgget()/shmget() fails in the final step of adding the id
to the rhash tree, then .._next_id is cleared. Before the patch, is
remained unmodified.

There is no change of the behavior after a successful ..get() call: It
always clears .._next_id, there is no impact to non checkpoint/restore
code as that code does not use .._next_id.

2) The patch correctly documents that after a call to ipc_idr_alloc(),
the full tear-down sequence must be used. The callers of ipc_addid()
do not fullfill that, i.e. more bugfixes are required.

The patch is a squash of a patch from Dmitry and my own changes.

Link: http://lkml.kernel.org/r/20180712185241.4017-3-manfred@colorfullife.com
Reported-by: syzbot+2827ef6b3385deb07eaf@syzkaller.appspotmail.com
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Manfred Spraul 2018-08-21 22:01:25 -07:00 коммит произвёл Linus Torvalds
Родитель 615c999cd8
Коммит e2652ae6bd
2 изменённых файлов: 50 добавлений и 44 удалений

Просмотреть файл

@ -464,7 +464,8 @@ Notes:
1) kernel doesn't guarantee, that new object will have desired id. So,
it's up to userspace, how to handle an object with "wrong" id.
2) Toggle with non-default value will be set back to -1 by kernel after
successful IPC object allocation.
successful IPC object allocation. If an IPC object allocation syscall
fails, it is undefined if the value remains unmodified or is reset to -1.
==============================================================

Просмотреть файл

@ -194,46 +194,54 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
return NULL;
}
#ifdef CONFIG_CHECKPOINT_RESTORE
/*
* Specify desired id for next allocated IPC object.
* Insert new IPC object into idr tree, and set sequence number and id
* in the correct order.
* Especially:
* - the sequence number must be set before inserting the object into the idr,
* because the sequence number is accessed without a lock.
* - the id can/must be set after inserting the object into the idr.
* All accesses must be done after getting kern_ipc_perm.lock.
*
* The caller must own kern_ipc_perm.lock.of the new object.
* On error, the function returns a (negative) error code.
*/
#define ipc_idr_alloc(ids, new) \
idr_alloc(&(ids)->ipcs_idr, (new), \
(ids)->next_id < 0 ? 0 : ipcid_to_idx((ids)->next_id),\
0, GFP_NOWAIT)
static inline int ipc_buildid(int id, struct ipc_ids *ids,
struct kern_ipc_perm *new)
static inline int ipc_idr_alloc(struct ipc_ids *ids, struct kern_ipc_perm *new)
{
if (ids->next_id < 0) { /* default, behave as !CHECKPOINT_RESTORE */
int idx, next_id = -1;
#ifdef CONFIG_CHECKPOINT_RESTORE
next_id = ids->next_id;
ids->next_id = -1;
#endif
/*
* As soon as a new object is inserted into the idr,
* ipc_obtain_object_idr() or ipc_obtain_object_check() can find it,
* and the lockless preparations for ipc operations can start.
* This means especially: permission checks, audit calls, allocation
* of undo structures, ...
*
* Thus the object must be fully initialized, and if something fails,
* then the full tear-down sequence must be followed.
* (i.e.: set new->deleted, reduce refcount, call_rcu())
*/
if (next_id < 0) { /* !CHECKPOINT_RESTORE or next_id is unset */
new->seq = ids->seq++;
if (ids->seq > IPCID_SEQ_MAX)
ids->seq = 0;
idx = idr_alloc(&ids->ipcs_idr, new, 0, 0, GFP_NOWAIT);
} else {
new->seq = ipcid_to_seqx(ids->next_id);
ids->next_id = -1;
new->seq = ipcid_to_seqx(next_id);
idx = idr_alloc(&ids->ipcs_idr, new, ipcid_to_idx(next_id),
0, GFP_NOWAIT);
}
return SEQ_MULTIPLIER * new->seq + id;
if (idx >= 0)
new->id = SEQ_MULTIPLIER * new->seq + idx;
return idx;
}
#else
#define ipc_idr_alloc(ids, new) \
idr_alloc(&(ids)->ipcs_idr, (new), 0, 0, GFP_NOWAIT)
static inline int ipc_buildid(int id, struct ipc_ids *ids,
struct kern_ipc_perm *new)
{
new->seq = ids->seq++;
if (ids->seq > IPCID_SEQ_MAX)
ids->seq = 0;
return SEQ_MULTIPLIER * new->seq + id;
}
#endif /* CONFIG_CHECKPOINT_RESTORE */
/**
* ipc_addid - add an ipc identifier
* @ids: ipc identifier set
@ -251,7 +259,7 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int limit)
{
kuid_t euid;
kgid_t egid;
int id, err;
int idx, err;
if (limit > IPCMNI)
limit = IPCMNI;
@ -271,30 +279,27 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int limit)
new->cuid = new->uid = euid;
new->gid = new->cgid = egid;
id = ipc_idr_alloc(ids, new);
idx = ipc_idr_alloc(ids, new);
idr_preload_end();
if (id >= 0 && new->key != IPC_PRIVATE) {
if (idx >= 0 && new->key != IPC_PRIVATE) {
err = rhashtable_insert_fast(&ids->key_ht, &new->khtnode,
ipc_kht_params);
if (err < 0) {
idr_remove(&ids->ipcs_idr, id);
id = err;
idr_remove(&ids->ipcs_idr, idx);
idx = err;
}
}
if (id < 0) {
if (idx < 0) {
spin_unlock(&new->lock);
rcu_read_unlock();
return id;
return idx;
}
ids->in_use++;
if (id > ids->max_id)
ids->max_id = id;
new->id = ipc_buildid(id, ids, new);
return id;
if (idx > ids->max_id)
ids->max_id = idx;
return idx;
}
/**