This includes some fixes for vhost net and scsi drivers.
 The test module has already been reworked to avoid rcu
 usage, but the necessary core changes are missing,
 we fixed this.
 Unlikely to affect any real-world users, but it's
 early in the cycle so, let's merge them.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.13 (GNU/Linux)
 
 iQEcBAABAgAGBQJR3qu5AAoJECgfDbjSjVRp66UIAKZFjSnjWBhQbqI+LkrrUJ+H
 Zv4xnEgTU+5k7zLpArqhDABbWMPVUSHm91ecjS/OX9WWsXVYG2jAT+PK6F3PrexK
 BpUA3RDX8a5drXleidhwGf2dHmsBhTTwZyn31h/pvKAiM27kVjFOS0S9Rhnpu/0x
 UI/z/s//CZ/kNKbCvYVqu0+KjbrwDiRD1N56hGDLqwsVoFIn1sP9CBIg/PHCqebK
 GnPSidTFdRu4yvFvhwJqQ3RgfpCxbgBc619knWr8XX4PVoH25NdpLZ4yHrR58Ms0
 Hb2IJ5q9bOGf0hbLFKOeHO29uVyOtTGGq0UG7NnfS1aToIT/Zw2kYICV5kBlSvs=
 =XGuu
 -----END PGP SIGNATURE-----

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull vhost fixes from Michael Tsirkin:
 "vhost: more fixes for 3.11

  This includes some fixes for vhost net and scsi drivers.

  The test module has already been reworked to avoid rcu usage, but the
  necessary core changes are missing, we fixed this.

  Unlikely to affect any real-world users, but it's early in the cycle
  so, let's merge them"

(It was earlier when Michael originally sent the email, but it somehot
got missed in the flood, so here it is after -rc2)

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  vhost: Remove custom vhost rcu usage
  vhost-scsi: Always access vq->private_data under vq mutex
  vhost-net: Always access vq->private_data under vq mutex
This commit is contained in:
Linus Torvalds 2013-07-23 14:38:20 -07:00
Родитель 55c62960b0 22fa90c7fb
Коммит a030cbc350
4 изменённых файлов: 27 добавлений и 45 удалений

Просмотреть файл

@ -15,7 +15,6 @@
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/rcupdate.h>
#include <linux/file.h>
#include <linux/slab.h>
@ -346,12 +345,11 @@ static void handle_tx(struct vhost_net *net)
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
bool zcopy, zcopy_used;
/* TODO: check that we are running from vhost_worker? */
sock = rcu_dereference_check(vq->private_data, 1);
if (!sock)
return;
mutex_lock(&vq->mutex);
sock = vq->private_data;
if (!sock)
goto out;
vhost_disable_notify(&net->dev, vq);
hdr_size = nvq->vhost_hlen;
@ -461,7 +459,7 @@ static void handle_tx(struct vhost_net *net)
break;
}
}
out:
mutex_unlock(&vq->mutex);
}
@ -570,14 +568,14 @@ static void handle_rx(struct vhost_net *net)
s16 headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
/* TODO: check that we are running from vhost_worker? */
struct socket *sock = rcu_dereference_check(vq->private_data, 1);
if (!sock)
return;
struct socket *sock;
mutex_lock(&vq->mutex);
sock = vq->private_data;
if (!sock)
goto out;
vhost_disable_notify(&net->dev, vq);
vhost_hlen = nvq->vhost_hlen;
sock_hlen = nvq->sock_hlen;
@ -652,7 +650,7 @@ static void handle_rx(struct vhost_net *net)
break;
}
}
out:
mutex_unlock(&vq->mutex);
}
@ -750,8 +748,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
struct socket *sock;
sock = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
sock = vq->private_data;
if (!sock)
return 0;
@ -764,10 +761,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
struct socket *sock;
mutex_lock(&vq->mutex);
sock = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
sock = vq->private_data;
vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, NULL);
vq->private_data = NULL;
mutex_unlock(&vq->mutex);
return sock;
}
@ -923,8 +919,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
/* start polling new socket */
oldsock = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
oldsock = vq->private_data;
if (sock != oldsock) {
ubufs = vhost_net_ubuf_alloc(vq,
sock && vhost_sock_zcopy(sock));
@ -934,7 +929,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, sock);
vq->private_data = sock;
r = vhost_init_used(vq);
if (r)
goto err_used;
@ -968,7 +963,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
return 0;
err_used:
rcu_assign_pointer(vq->private_data, oldsock);
vq->private_data = oldsock;
vhost_net_enable_vq(n, vq);
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);

Просмотреть файл

@ -902,19 +902,15 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
int head, ret;
u8 target;
mutex_lock(&vq->mutex);
/*
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl.
*
* TODO: Check that we are running from vhost_worker which acts
* as read-side critical section for vhost kind of RCU.
* See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
*/
vs_tpg = rcu_dereference_check(vq->private_data, 1);
vs_tpg = vq->private_data;
if (!vs_tpg)
return;
goto out;
mutex_lock(&vq->mutex);
vhost_disable_notify(&vs->dev, vq);
for (;;) {
@ -1064,6 +1060,7 @@ err_free:
vhost_scsi_free_cmd(cmd);
err_cmd:
vhost_scsi_send_bad_target(vs, vq, head, out);
out:
mutex_unlock(&vq->mutex);
}
@ -1232,9 +1229,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
sizeof(vs->vs_vhost_wwpn));
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq;
/* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex);
rcu_assign_pointer(vq->private_data, vs_tpg);
vq->private_data = vs_tpg;
vhost_init_used(vq);
mutex_unlock(&vq->mutex);
}
@ -1313,9 +1309,8 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
if (match) {
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq;
/* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex);
rcu_assign_pointer(vq->private_data, NULL);
vq->private_data = NULL;
mutex_unlock(&vq->mutex);
}
}

Просмотреть файл

@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/rcupdate.h>
#include <linux/file.h>
#include <linux/slab.h>
@ -200,9 +199,8 @@ static long vhost_test_run(struct vhost_test *n, int test)
priv = test ? n : NULL;
/* start polling new socket */
oldpriv = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
rcu_assign_pointer(vq->private_data, priv);
oldpriv = vq->private_data;
vq->private_data = priv;
r = vhost_init_used(&n->vqs[index]);

Просмотреть файл

@ -103,14 +103,8 @@ struct vhost_virtqueue {
struct iovec iov[UIO_MAXIOV];
struct iovec *indirect;
struct vring_used_elem *heads;
/* We use a kind of RCU to access private pointer.
* All readers access it from worker, which makes it possible to
* flush the vhost_work instead of synchronize_rcu. Therefore readers do
* not need to call rcu_read_lock/rcu_read_unlock: the beginning of
* vhost_work execution acts instead of rcu_read_lock() and the end of
* vhost_work execution acts instead of rcu_read_unlock().
* Writers use virtqueue mutex. */
void __rcu *private_data;
/* Protected by virtqueue mutex. */
void *private_data;
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;