Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-virtio

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-virtio:
  virtio: enhance id_matching for virtio drivers
  virtio: fix id_matching for virtio drivers
  virtio: handle short buffers in virtio_rng.
  virtio_blk: add missing __dev{init,exit} markings
  virtio: indirect ring entries (VIRTIO_RING_F_INDIRECT_DESC)
  virtio: teach virtio_has_feature() about transport features
  virtio: expose features in sysfs
  virtio_pci: optional MSI-X support
  virtio_pci: split up vp_interrupt
  virtio: find_vqs/del_vqs virtio operations
  virtio: add names to virtqueue struct, mapping from devices to queues.
  virtio: meet virtio spec by finalizing features before using device
  virtio: fix obsolete documentation on probe function
This commit is contained in:
Linus Torvalds 2009-06-12 09:31:52 -07:00
Родитель c34752bc8b e335385373
Коммит 16ffc3eeaa
16 изменённых файлов: 604 добавлений и 170 удалений

Просмотреть файл

@ -254,7 +254,7 @@ static int index_to_minor(int index)
return index << PART_BITS;
}
static int virtblk_probe(struct virtio_device *vdev)
static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
int err;
@ -288,7 +288,7 @@ static int virtblk_probe(struct virtio_device *vdev)
sg_init_table(vblk->sg, vblk->sg_elems);
/* We expect one virtqueue, for output. */
vblk->vq = vdev->config->find_vq(vdev, 0, blk_done);
vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
if (IS_ERR(vblk->vq)) {
err = PTR_ERR(vblk->vq);
goto out_free_vblk;
@ -388,14 +388,14 @@ out_put_disk:
out_mempool:
mempool_destroy(vblk->pool);
out_free_vq:
vdev->config->del_vq(vblk->vq);
vdev->config->del_vqs(vdev);
out_free_vblk:
kfree(vblk);
out:
return err;
}
static void virtblk_remove(struct virtio_device *vdev)
static void __devexit virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
@ -409,7 +409,7 @@ static void virtblk_remove(struct virtio_device *vdev)
blk_cleanup_queue(vblk->disk->queue);
put_disk(vblk->disk);
mempool_destroy(vblk->pool);
vdev->config->del_vq(vblk->vq);
vdev->config->del_vqs(vdev);
kfree(vblk);
}

Просмотреть файл

@ -35,13 +35,13 @@ static DECLARE_COMPLETION(have_data);
static void random_recv_done(struct virtqueue *vq)
{
int len;
unsigned int len;
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
if (!vq->vq_ops->get_buf(vq, &len))
return;
data_left = len / sizeof(random_data[0]);
data_left += len;
complete(&have_data);
}
@ -49,7 +49,7 @@ static void register_buffer(void)
{
struct scatterlist sg;
sg_init_one(&sg, random_data, RANDOM_DATA_SIZE);
sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left);
/* There should always be room for one buffer. */
if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0)
BUG();
@ -59,24 +59,32 @@ static void register_buffer(void)
/* At least we don't udelay() in a loop like some other drivers. */
static int virtio_data_present(struct hwrng *rng, int wait)
{
if (data_left)
if (data_left >= sizeof(u32))
return 1;
again:
if (!wait)
return 0;
wait_for_completion(&have_data);
/* Not enough? Re-register. */
if (unlikely(data_left < sizeof(u32))) {
register_buffer();
goto again;
}
return 1;
}
/* virtio_data_present() must have succeeded before this is called. */
static int virtio_data_read(struct hwrng *rng, u32 *data)
{
BUG_ON(!data_left);
BUG_ON(data_left < sizeof(u32));
data_left -= sizeof(u32);
*data = random_data[data_left / 4];
*data = random_data[--data_left];
if (!data_left) {
if (data_left < sizeof(u32)) {
init_completion(&have_data);
register_buffer();
}
@ -94,13 +102,13 @@ static int virtrng_probe(struct virtio_device *vdev)
int err;
/* We expect a single virtqueue. */
vq = vdev->config->find_vq(vdev, 0, random_recv_done);
vq = virtio_find_single_vq(vdev, random_recv_done, "input");
if (IS_ERR(vq))
return PTR_ERR(vq);
err = hwrng_register(&virtio_hwrng);
if (err) {
vdev->config->del_vq(vq);
vdev->config->del_vqs(vdev);
return err;
}
@ -112,7 +120,7 @@ static void virtrng_remove(struct virtio_device *vdev)
{
vdev->config->reset(vdev);
hwrng_unregister(&virtio_hwrng);
vdev->config->del_vq(vq);
vdev->config->del_vqs(vdev);
}
static struct virtio_device_id id_table[] = {

Просмотреть файл

@ -188,6 +188,9 @@ static void hvc_handle_input(struct virtqueue *vq)
* Finally we put our input buffer in the input queue, ready to receive. */
static int __devinit virtcons_probe(struct virtio_device *dev)
{
vq_callback_t *callbacks[] = { hvc_handle_input, NULL};
const char *names[] = { "input", "output" };
struct virtqueue *vqs[2];
int err;
vdev = dev;
@ -199,20 +202,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
goto fail;
}
/* Find the input queue. */
/* Find the queues. */
/* FIXME: This is why we want to wean off hvc: we do nothing
* when input comes in. */
in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input);
if (IS_ERR(in_vq)) {
err = PTR_ERR(in_vq);
err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
if (err)
goto free;
}
out_vq = vdev->config->find_vq(vdev, 1, NULL);
if (IS_ERR(out_vq)) {
err = PTR_ERR(out_vq);
goto free_in_vq;
}
in_vq = vqs[0];
out_vq = vqs[1];
/* Start using the new console output. */
virtio_cons.get_chars = get_chars;
@ -233,17 +231,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE);
if (IS_ERR(hvc)) {
err = PTR_ERR(hvc);
goto free_out_vq;
goto free_vqs;
}
/* Register the input buffer the first time. */
add_inbuf();
return 0;
free_out_vq:
vdev->config->del_vq(out_vq);
free_in_vq:
vdev->config->del_vq(in_vq);
free_vqs:
vdev->config->del_vqs(vdev);
free:
kfree(inbuf);
fail:

Просмотреть файл

@ -228,7 +228,8 @@ extern void lguest_setup_irq(unsigned int irq);
* function. */
static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
unsigned index,
void (*callback)(struct virtqueue *vq))
void (*callback)(struct virtqueue *vq),
const char *name)
{
struct lguest_device *ldev = to_lgdev(vdev);
struct lguest_vq_info *lvq;
@ -263,7 +264,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
/* OK, tell virtio_ring.c to set up a virtqueue now we know its size
* and we've got a pointer to its pages. */
vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN,
vdev, lvq->pages, lg_notify, callback);
vdev, lvq->pages, lg_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto unmap;
@ -312,6 +313,38 @@ static void lg_del_vq(struct virtqueue *vq)
kfree(lvq);
}
static void lg_del_vqs(struct virtio_device *vdev)
{
struct virtqueue *vq, *n;
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
lg_del_vq(vq);
}
static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char *names[])
{
struct lguest_device *ldev = to_lgdev(vdev);
int i;
/* We must have this many virtqueues. */
if (nvqs > ldev->desc->num_vq)
return -ENOENT;
for (i = 0; i < nvqs; ++i) {
vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]);
if (IS_ERR(vqs[i]))
goto error;
}
return 0;
error:
lg_del_vqs(vdev);
return PTR_ERR(vqs[i]);
}
/* The ops structure which hooks everything together. */
static struct virtio_config_ops lguest_config_ops = {
.get_features = lg_get_features,
@ -321,8 +354,8 @@ static struct virtio_config_ops lguest_config_ops = {
.get_status = lg_get_status,
.set_status = lg_set_status,
.reset = lg_reset,
.find_vq = lg_find_vq,
.del_vq = lg_del_vq,
.find_vqs = lg_find_vqs,
.del_vqs = lg_del_vqs,
};
/* The root device for the lguest virtio devices. This makes them appear as

Просмотреть файл

@ -845,6 +845,10 @@ static int virtnet_probe(struct virtio_device *vdev)
int err;
struct net_device *dev;
struct virtnet_info *vi;
struct virtqueue *vqs[3];
vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
const char *names[] = { "input", "output", "control" };
int nvqs;
/* Allocate ourselves a network device with room for our info */
dev = alloc_etherdev(sizeof(struct virtnet_info));
@ -905,25 +909,19 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
/* We expect two virtqueues, receive then send. */
vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
if (IS_ERR(vi->rvq)) {
err = PTR_ERR(vi->rvq);
goto free;
}
/* We expect two virtqueues, receive then send,
* and optionally control. */
nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done);
if (IS_ERR(vi->svq)) {
err = PTR_ERR(vi->svq);
goto free_recv;
}
err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
if (err)
goto free;
vi->rvq = vqs[0];
vi->svq = vqs[1];
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
vi->cvq = vdev->config->find_vq(vdev, 2, NULL);
if (IS_ERR(vi->cvq)) {
err = PTR_ERR(vi->svq);
goto free_send;
}
vi->cvq = vqs[2];
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
dev->features |= NETIF_F_HW_VLAN_FILTER;
@ -941,7 +939,7 @@ static int virtnet_probe(struct virtio_device *vdev)
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
goto free_ctrl;
goto free_vqs;
}
/* Last of all, set up some receive buffers. */
@ -962,13 +960,8 @@ static int virtnet_probe(struct virtio_device *vdev)
unregister:
unregister_netdev(dev);
free_ctrl:
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
vdev->config->del_vq(vi->cvq);
free_send:
vdev->config->del_vq(vi->svq);
free_recv:
vdev->config->del_vq(vi->rvq);
free_vqs:
vdev->config->del_vqs(vdev);
free:
free_netdev(dev);
return err;
@ -994,12 +987,10 @@ static void virtnet_remove(struct virtio_device *vdev)
BUG_ON(vi->num != 0);
vdev->config->del_vq(vi->svq);
vdev->config->del_vq(vi->rvq);
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
vdev->config->del_vq(vi->cvq);
unregister_netdev(vi->dev);
vdev->config->del_vqs(vi->vdev);
while (vi->pages)
__free_pages(get_a_page(vi, GFP_KERNEL), 0);

Просмотреть файл

@ -173,8 +173,9 @@ static void kvm_notify(struct virtqueue *vq)
* this device and sets it up.
*/
static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
unsigned index,
void (*callback)(struct virtqueue *vq))
unsigned index,
void (*callback)(struct virtqueue *vq),
const char *name)
{
struct kvm_device *kdev = to_kvmdev(vdev);
struct kvm_vqconfig *config;
@ -194,7 +195,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
vdev, (void *) config->address,
kvm_notify, callback);
kvm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto unmap;
@ -226,6 +227,38 @@ static void kvm_del_vq(struct virtqueue *vq)
KVM_S390_VIRTIO_RING_ALIGN));
}
static void kvm_del_vqs(struct virtio_device *vdev)
{
struct virtqueue *vq, *n;
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
kvm_del_vq(vq);
}
static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char *names[])
{
struct kvm_device *kdev = to_kvmdev(vdev);
int i;
/* We must have this many virtqueues. */
if (nvqs > kdev->desc->num_vq)
return -ENOENT;
for (i = 0; i < nvqs; ++i) {
vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]);
if (IS_ERR(vqs[i]))
goto error;
}
return 0;
error:
kvm_del_vqs(vdev);
return PTR_ERR(vqs[i]);
}
/*
* The config ops structure as defined by virtio config
*/
@ -237,8 +270,8 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
.get_status = kvm_get_status,
.set_status = kvm_set_status,
.reset = kvm_reset,
.find_vq = kvm_find_vq,
.del_vq = kvm_del_vq,
.find_vqs = kvm_find_vqs,
.del_vqs = kvm_del_vqs,
};
/*

Просмотреть файл

@ -31,21 +31,37 @@ static ssize_t modalias_show(struct device *_d,
return sprintf(buf, "virtio:d%08Xv%08X\n",
dev->id.device, dev->id.vendor);
}
static ssize_t features_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
unsigned int i;
ssize_t len = 0;
/* We actually represent this as a bitstring, as it could be
* arbitrary length in future. */
for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++)
len += sprintf(buf+len, "%c",
test_bit(i, dev->features) ? '1' : '0');
len += sprintf(buf+len, "\n");
return len;
}
static struct device_attribute virtio_dev_attrs[] = {
__ATTR_RO(device),
__ATTR_RO(vendor),
__ATTR_RO(status),
__ATTR_RO(modalias),
__ATTR_RO(features),
__ATTR_NULL
};
static inline int virtio_id_match(const struct virtio_device *dev,
const struct virtio_device_id *id)
{
if (id->device != dev->id.device)
if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID)
return 0;
return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor != dev->id.vendor;
return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor;
}
/* This looks through all the IDs a driver claims to support. If any of them
@ -118,13 +134,14 @@ static int virtio_dev_probe(struct device *_d)
if (device_features & (1 << i))
set_bit(i, dev->features);
dev->config->finalize_features(dev);
err = drv->probe(dev);
if (err)
add_status(dev, VIRTIO_CONFIG_S_FAILED);
else {
dev->config->finalize_features(dev);
else
add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
}
return err;
}
@ -185,6 +202,8 @@ int register_virtio_device(struct virtio_device *dev)
/* Acknowledge that we've seen the device. */
add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
INIT_LIST_HEAD(&dev->vqs);
/* device_register() causes the bus infrastructure to look for a
* matching driver. */
err = device_register(&dev->dev);

Просмотреть файл

@ -204,6 +204,9 @@ static int balloon(void *_vballoon)
static int virtballoon_probe(struct virtio_device *vdev)
{
struct virtio_balloon *vb;
struct virtqueue *vqs[2];
vq_callback_t *callbacks[] = { balloon_ack, balloon_ack };
const char *names[] = { "inflate", "deflate" };
int err;
vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
@ -218,22 +221,17 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->vdev = vdev;
/* We expect two virtqueues. */
vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack);
if (IS_ERR(vb->inflate_vq)) {
err = PTR_ERR(vb->inflate_vq);
err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
if (err)
goto out_free_vb;
}
vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack);
if (IS_ERR(vb->deflate_vq)) {
err = PTR_ERR(vb->deflate_vq);
goto out_del_inflate_vq;
}
vb->inflate_vq = vqs[0];
vb->deflate_vq = vqs[1];
vb->thread = kthread_run(balloon, vb, "vballoon");
if (IS_ERR(vb->thread)) {
err = PTR_ERR(vb->thread);
goto out_del_deflate_vq;
goto out_del_vqs;
}
vb->tell_host_first
@ -241,10 +239,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
return 0;
out_del_deflate_vq:
vdev->config->del_vq(vb->deflate_vq);
out_del_inflate_vq:
vdev->config->del_vq(vb->inflate_vq);
out_del_vqs:
vdev->config->del_vqs(vdev);
out_free_vb:
kfree(vb);
out:
@ -264,8 +260,7 @@ static void virtballoon_remove(struct virtio_device *vdev)
/* Now we reset the device so we can clean up the queues. */
vdev->config->reset(vdev);
vdev->config->del_vq(vb->deflate_vq);
vdev->config->del_vq(vb->inflate_vq);
vdev->config->del_vqs(vdev);
kfree(vb);
}

Просмотреть файл

@ -42,6 +42,26 @@ struct virtio_pci_device
/* a list of queues so we can dispatch IRQs */
spinlock_t lock;
struct list_head virtqueues;
/* MSI-X support */
int msix_enabled;
int intx_enabled;
struct msix_entry *msix_entries;
/* Name strings for interrupts. This size should be enough,
* and I'm too lazy to allocate each name separately. */
char (*msix_names)[256];
/* Number of available vectors */
unsigned msix_vectors;
/* Vectors allocated */
unsigned msix_used_vectors;
};
/* Constants for MSI-X */
/* Use first vector for configuration changes, second and the rest for
* virtqueues Thus, we need at least 2 vectors for MSI. */
enum {
VP_MSIX_CONFIG_VECTOR = 0,
VP_MSIX_VQ_VECTOR = 1,
};
struct virtio_pci_vq_info
@ -60,6 +80,9 @@ struct virtio_pci_vq_info
/* the list node for the virtqueues list */
struct list_head node;
/* MSI-X vector (or none) */
unsigned vector;
};
/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
@ -109,7 +132,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset;
void __iomem *ioaddr = vp_dev->ioaddr +
VIRTIO_PCI_CONFIG(vp_dev) + offset;
u8 *ptr = buf;
int i;
@ -123,7 +147,8 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset;
void __iomem *ioaddr = vp_dev->ioaddr +
VIRTIO_PCI_CONFIG(vp_dev) + offset;
const u8 *ptr = buf;
int i;
@ -164,37 +189,26 @@ static void vp_notify(struct virtqueue *vq)
iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
}
/* A small wrapper to also acknowledge the interrupt when it's handled.
* I really need an EIO hook for the vring so I can ack the interrupt once we
* know that we'll be handling the IRQ but before we invoke the callback since
* the callback may notify the host which results in the host attempting to
* raise an interrupt that we would then mask once we acknowledged the
* interrupt. */
static irqreturn_t vp_interrupt(int irq, void *opaque)
/* Handle a configuration change: Tell driver if it wants to know. */
static irqreturn_t vp_config_changed(int irq, void *opaque)
{
struct virtio_pci_device *vp_dev = opaque;
struct virtio_driver *drv;
drv = container_of(vp_dev->vdev.dev.driver,
struct virtio_driver, driver);
if (drv && drv->config_changed)
drv->config_changed(&vp_dev->vdev);
return IRQ_HANDLED;
}
/* Notify all virtqueues on an interrupt. */
static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
{
struct virtio_pci_device *vp_dev = opaque;
struct virtio_pci_vq_info *info;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
u8 isr;
/* reading the ISR has the effect of also clearing it so it's very
* important to save off the value. */
isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
/* It's definitely not us if the ISR was not high */
if (!isr)
return IRQ_NONE;
/* Configuration change? Tell driver if it wants to know. */
if (isr & VIRTIO_PCI_ISR_CONFIG) {
struct virtio_driver *drv;
drv = container_of(vp_dev->vdev.dev.driver,
struct virtio_driver, driver);
if (drv && drv->config_changed)
drv->config_changed(&vp_dev->vdev);
}
spin_lock_irqsave(&vp_dev->lock, flags);
list_for_each_entry(info, &vp_dev->virtqueues, node) {
@ -206,15 +220,157 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
return ret;
}
/* the config->find_vq() implementation */
/* A small wrapper to also acknowledge the interrupt when it's handled.
* I really need an EIO hook for the vring so I can ack the interrupt once we
* know that we'll be handling the IRQ but before we invoke the callback since
* the callback may notify the host which results in the host attempting to
* raise an interrupt that we would then mask once we acknowledged the
* interrupt. */
static irqreturn_t vp_interrupt(int irq, void *opaque)
{
struct virtio_pci_device *vp_dev = opaque;
u8 isr;
/* reading the ISR has the effect of also clearing it so it's very
* important to save off the value. */
isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
/* It's definitely not us if the ISR was not high */
if (!isr)
return IRQ_NONE;
/* Configuration change? Tell driver if it wants to know. */
if (isr & VIRTIO_PCI_ISR_CONFIG)
vp_config_changed(irq, opaque);
return vp_vring_interrupt(irq, opaque);
}
static void vp_free_vectors(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i;
if (vp_dev->intx_enabled) {
free_irq(vp_dev->pci_dev->irq, vp_dev);
vp_dev->intx_enabled = 0;
}
for (i = 0; i < vp_dev->msix_used_vectors; ++i)
free_irq(vp_dev->msix_entries[i].vector, vp_dev);
vp_dev->msix_used_vectors = 0;
if (vp_dev->msix_enabled) {
/* Disable the vector used for configuration */
iowrite16(VIRTIO_MSI_NO_VECTOR,
vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
/* Flush the write out to device */
ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
vp_dev->msix_enabled = 0;
pci_disable_msix(vp_dev->pci_dev);
}
}
static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
int *options, int noptions)
{
int i;
for (i = 0; i < noptions; ++i)
if (!pci_enable_msix(dev, entries, options[i]))
return options[i];
return -EBUSY;
}
static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
const char *name = dev_name(&vp_dev->vdev.dev);
unsigned i, v;
int err = -ENOMEM;
/* We want at most one vector per queue and one for config changes.
* Fallback to separate vectors for config and a shared for queues.
* Finally fall back to regular interrupts. */
int options[] = { max_vqs + 1, 2 };
int nvectors = max(options[0], options[1]);
vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
GFP_KERNEL);
if (!vp_dev->msix_entries)
goto error_entries;
vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
GFP_KERNEL);
if (!vp_dev->msix_names)
goto error_names;
for (i = 0; i < nvectors; ++i)
vp_dev->msix_entries[i].entry = i;
err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries,
options, ARRAY_SIZE(options));
if (err < 0) {
/* Can't allocate enough MSI-X vectors, use regular interrupt */
vp_dev->msix_vectors = 0;
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
IRQF_SHARED, name, vp_dev);
if (err)
goto error_irq;
vp_dev->intx_enabled = 1;
} else {
vp_dev->msix_vectors = err;
vp_dev->msix_enabled = 1;
/* Set the vector used for configuration */
v = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-config", name);
err = request_irq(vp_dev->msix_entries[v].vector,
vp_config_changed, 0, vp_dev->msix_names[v],
vp_dev);
if (err)
goto error_irq;
++vp_dev->msix_used_vectors;
iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
/* Verify we had enough resources to assign the vector */
v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
if (v == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto error_irq;
}
}
if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) {
/* Shared vector for all VQs */
v = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-virtqueues", name);
err = request_irq(vp_dev->msix_entries[v].vector,
vp_vring_interrupt, 0, vp_dev->msix_names[v],
vp_dev);
if (err)
goto error_irq;
++vp_dev->msix_used_vectors;
}
return 0;
error_irq:
vp_free_vectors(vdev);
kfree(vp_dev->msix_names);
error_names:
kfree(vp_dev->msix_entries);
error_entries:
return err;
}
static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
void (*callback)(struct virtqueue *vq))
void (*callback)(struct virtqueue *vq),
const char *name)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info;
struct virtqueue *vq;
unsigned long flags, size;
u16 num;
u16 num, vector;
int err;
/* Select the queue we're interested in */
@ -233,6 +389,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
info->queue_index = index;
info->num = num;
info->vector = VIRTIO_MSI_NO_VECTOR;
size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
@ -247,7 +404,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
/* create the vring */
vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN,
vdev, info->queue, vp_notify, callback);
vdev, info->queue, vp_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto out_activate_queue;
@ -256,12 +413,43 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
vq->priv = info;
info->vq = vq;
/* allocate per-vq vector if available and necessary */
if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) {
vector = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names,
"%s-%s", dev_name(&vp_dev->vdev.dev), name);
err = request_irq(vp_dev->msix_entries[vector].vector,
vring_interrupt, 0,
vp_dev->msix_names[vector], vq);
if (err)
goto out_request_irq;
info->vector = vector;
++vp_dev->msix_used_vectors;
} else
vector = VP_MSIX_VQ_VECTOR;
if (callback && vp_dev->msix_enabled) {
iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
if (vector == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto out_assign;
}
}
spin_lock_irqsave(&vp_dev->lock, flags);
list_add(&info->node, &vp_dev->virtqueues);
spin_unlock_irqrestore(&vp_dev->lock, flags);
return vq;
out_assign:
if (info->vector != VIRTIO_MSI_NO_VECTOR) {
free_irq(vp_dev->msix_entries[info->vector].vector, vq);
--vp_dev->msix_used_vectors;
}
out_request_irq:
vring_del_virtqueue(vq);
out_activate_queue:
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
free_pages_exact(info->queue, size);
@ -270,21 +458,27 @@ out_info:
return ERR_PTR(err);
}
/* the config->del_vq() implementation */
static void vp_del_vq(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_vq_info *info = vq->priv;
unsigned long flags, size;
unsigned long size;
spin_lock_irqsave(&vp_dev->lock, flags);
list_del(&info->node);
spin_unlock_irqrestore(&vp_dev->lock, flags);
iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
if (info->vector != VIRTIO_MSI_NO_VECTOR)
free_irq(vp_dev->msix_entries[info->vector].vector, vq);
if (vp_dev->msix_enabled) {
iowrite16(VIRTIO_MSI_NO_VECTOR,
vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
/* Flush the write out to device */
ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
}
vring_del_virtqueue(vq);
/* Select and deactivate the queue */
iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
@ -292,14 +486,57 @@ static void vp_del_vq(struct virtqueue *vq)
kfree(info);
}
/* the config->del_vqs() implementation */
static void vp_del_vqs(struct virtio_device *vdev)
{
struct virtqueue *vq, *n;
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
vp_del_vq(vq);
vp_free_vectors(vdev);
}
/* the config->find_vqs() implementation */
static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char *names[])
{
int vectors = 0;
int i, err;
/* How many vectors would we like? */
for (i = 0; i < nvqs; ++i)
if (callbacks[i])
++vectors;
err = vp_request_vectors(vdev, vectors);
if (err)
goto error_request;
for (i = 0; i < nvqs; ++i) {
vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]);
if (IS_ERR(vqs[i]))
goto error_find;
}
return 0;
error_find:
vp_del_vqs(vdev);
error_request:
return PTR_ERR(vqs[i]);
}
static struct virtio_config_ops virtio_pci_config_ops = {
.get = vp_get,
.set = vp_set,
.get_status = vp_get_status,
.set_status = vp_set_status,
.reset = vp_reset,
.find_vq = vp_find_vq,
.del_vq = vp_del_vq,
.find_vqs = vp_find_vqs,
.del_vqs = vp_del_vqs,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
};
@ -310,7 +547,7 @@ static void virtio_pci_release_dev(struct device *_d)
struct virtio_pci_device *vp_dev = to_vp_device(dev);
struct pci_dev *pci_dev = vp_dev->pci_dev;
free_irq(pci_dev->irq, vp_dev);
vp_del_vqs(dev);
pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
@ -369,21 +606,13 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
vp_dev->vdev.id.device = pci_dev->subsystem_device;
/* register a handler for the queue with the PCI device's interrupt */
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
dev_name(&vp_dev->vdev.dev), vp_dev);
if (err)
goto out_set_drvdata;
/* finally register the virtio device */
err = register_virtio_device(&vp_dev->vdev);
if (err)
goto out_req_irq;
goto out_set_drvdata;
return 0;
out_req_irq:
free_irq(pci_dev->irq, vp_dev);
out_set_drvdata:
pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);

Просмотреть файл

@ -23,21 +23,30 @@
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
#define BAD_RING(_vq, fmt...) \
do { dev_err(&(_vq)->vq.vdev->dev, fmt); BUG(); } while(0)
#define BAD_RING(_vq, fmt, args...) \
do { \
dev_err(&(_vq)->vq.vdev->dev, \
"%s:"fmt, (_vq)->vq.name, ##args); \
BUG(); \
} while (0)
/* Caller is supposed to guarantee no reentry. */
#define START_USE(_vq) \
do { \
if ((_vq)->in_use) \
panic("in_use = %i\n", (_vq)->in_use); \
panic("%s:in_use = %i\n", \
(_vq)->vq.name, (_vq)->in_use); \
(_vq)->in_use = __LINE__; \
mb(); \
} while(0)
} while (0)
#define END_USE(_vq) \
do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0)
#else
#define BAD_RING(_vq, fmt...) \
do { dev_err(&_vq->vq.vdev->dev, fmt); (_vq)->broken = true; } while(0)
#define BAD_RING(_vq, fmt, args...) \
do { \
dev_err(&_vq->vq.vdev->dev, \
"%s:"fmt, (_vq)->vq.name, ##args); \
(_vq)->broken = true; \
} while (0)
#define START_USE(vq)
#define END_USE(vq)
#endif
@ -52,6 +61,9 @@ struct vring_virtqueue
/* Other side has made a mess, don't try any more. */
bool broken;
/* Host supports indirect buffers */
bool indirect;
/* Number of free buffers */
unsigned int num_free;
/* Head of free buffer list. */
@ -76,6 +88,55 @@ struct vring_virtqueue
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
/* Set up an indirect table of descriptors and add it to the queue. */
static int vring_add_indirect(struct vring_virtqueue *vq,
struct scatterlist sg[],
unsigned int out,
unsigned int in)
{
struct vring_desc *desc;
unsigned head;
int i;
desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC);
if (!desc)
return vq->vring.num;
/* Transfer entries from the sg list into the indirect page */
for (i = 0; i < out; i++) {
desc[i].flags = VRING_DESC_F_NEXT;
desc[i].addr = sg_phys(sg);
desc[i].len = sg->length;
desc[i].next = i+1;
sg++;
}
for (; i < (out + in); i++) {
desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
desc[i].addr = sg_phys(sg);
desc[i].len = sg->length;
desc[i].next = i+1;
sg++;
}
/* Last one doesn't continue. */
desc[i-1].flags &= ~VRING_DESC_F_NEXT;
desc[i-1].next = 0;
/* We're about to use a buffer */
vq->num_free--;
/* Use a single buffer which doesn't continue */
head = vq->free_head;
vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
vq->vring.desc[head].addr = virt_to_phys(desc);
vq->vring.desc[head].len = i * sizeof(struct vring_desc);
/* Update free pointer */
vq->free_head = vq->vring.desc[head].next;
return head;
}
static int vring_add_buf(struct virtqueue *_vq,
struct scatterlist sg[],
unsigned int out,
@ -85,12 +146,21 @@ static int vring_add_buf(struct virtqueue *_vq,
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i, avail, head, uninitialized_var(prev);
START_USE(vq);
BUG_ON(data == NULL);
/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
if (vq->indirect && (out + in) > 1 && vq->num_free) {
head = vring_add_indirect(vq, sg, out, in);
if (head != vq->vring.num)
goto add_head;
}
BUG_ON(out + in > vq->vring.num);
BUG_ON(out + in == 0);
START_USE(vq);
if (vq->num_free < out + in) {
pr_debug("Can't add buf len %i - avail = %i\n",
out + in, vq->num_free);
@ -127,6 +197,7 @@ static int vring_add_buf(struct virtqueue *_vq,
/* Update free pointer */
vq->free_head = i;
add_head:
/* Set token. */
vq->data[head] = data;
@ -170,6 +241,11 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
/* Put back on free list: find end */
i = head;
/* Free the indirect table */
if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
kfree(phys_to_virt(vq->vring.desc[i].addr));
while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
i = vq->vring.desc[i].next;
vq->num_free++;
@ -284,7 +360,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
struct virtio_device *vdev,
void *pages,
void (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *))
void (*callback)(struct virtqueue *),
const char *name)
{
struct vring_virtqueue *vq;
unsigned int i;
@ -303,14 +380,18 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.vq_ops = &vring_vq_ops;
vq->vq.name = name;
vq->notify = notify;
vq->broken = false;
vq->last_used_idx = 0;
vq->num_added = 0;
list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
/* No callback? Tell other side not to bother us. */
if (!callback)
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
@ -327,6 +408,7 @@ EXPORT_SYMBOL_GPL(vring_new_virtqueue);
void vring_del_virtqueue(struct virtqueue *vq)
{
list_del(&vq->list);
kfree(to_vvq(vq));
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@ -338,6 +420,8 @@ void vring_transport_features(struct virtio_device *vdev)
for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
switch (i) {
case VIRTIO_RING_F_INDIRECT_DESC:
break;
default:
/* We don't understand this bit. */
clear_bit(i, vdev->features);

Просмотреть файл

@ -10,14 +10,17 @@
/**
* virtqueue - a queue to register buffers for sending or receiving.
* @list: the chain of virtqueues for this device
* @callback: the function to call when buffers are consumed (can be NULL).
* @name: the name of this virtqueue (mainly for debugging)
* @vdev: the virtio device this queue was created for.
* @vq_ops: the operations for this virtqueue (see below).
* @priv: a pointer for the virtqueue implementation to use.
*/
struct virtqueue
{
struct virtqueue {
struct list_head list;
void (*callback)(struct virtqueue *vq);
const char *name;
struct virtio_device *vdev;
struct virtqueue_ops *vq_ops;
void *priv;
@ -76,15 +79,16 @@ struct virtqueue_ops {
* @dev: underlying device.
* @id: the device type identification (used to match it with a driver).
* @config: the configuration ops for this device.
* @vqs: the list of virtqueues for this device.
* @features: the features supported by both driver and device.
* @priv: private pointer for the driver's use.
*/
struct virtio_device
{
struct virtio_device {
int index;
struct device dev;
struct virtio_device_id id;
struct virtio_config_ops *config;
struct list_head vqs;
/* Note that this is a Linux set_bit-style bitmap. */
unsigned long features[1];
void *priv;
@ -99,8 +103,7 @@ void unregister_virtio_device(struct virtio_device *dev);
* @id_table: the ids serviced by this driver.
* @feature_table: an array of feature numbers supported by this device.
* @feature_table_size: number of entries in the feature table array.
* @probe: the function to call when a device is found. Returns a token for
* remove, or PTR_ERR().
* @probe: the function to call when a device is found. Returns 0 or -errno.
* @remove: the function when a device is removed.
* @config_changed: optional function to call when the device configuration
* changes; may be called in interrupt context.

Просмотреть файл

@ -29,6 +29,7 @@
#define VIRTIO_F_NOTIFY_ON_EMPTY 24
#ifdef __KERNEL__
#include <linux/err.h>
#include <linux/virtio.h>
/**
@ -49,15 +50,26 @@
* @set_status: write the status byte
* vdev: the virtio_device
* status: the new status byte
* @request_vqs: request the specified number of virtqueues
* vdev: the virtio_device
* max_vqs: the max number of virtqueues we want
* If supplied, must call before any virtqueues are instantiated.
* To modify the max number of virtqueues after request_vqs has been
* called, call free_vqs and then request_vqs with a new value.
* @free_vqs: cleanup resources allocated by request_vqs
* vdev: the virtio_device
* If supplied, must call after all virtqueues have been deleted.
* @reset: reset the device
* vdev: the virtio device
* After this, status and feature negotiation must be done again
* @find_vq: find a virtqueue and instantiate it.
* @find_vqs: find virtqueues and instantiate them.
* vdev: the virtio_device
* index: the 0-based virtqueue number in case there's more than one.
* callback: the virqtueue callback
* Returns the new virtqueue or ERR_PTR() (eg. -ENOENT).
* @del_vq: free a virtqueue found by find_vq().
* nvqs: the number of virtqueues to find
* vqs: on success, includes new virtqueues
* callbacks: array of callbacks, for each virtqueue
* names: array of virtqueue names (mainly for debugging)
* Returns 0 on success or error status
* @del_vqs: free virtqueues found by find_vqs().
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
* Returns the first 32 feature bits (all we currently need).
@ -66,6 +78,7 @@
* This gives the final feature bits for the device: it can change
* the dev->feature bits if it wants.
*/
typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops
{
void (*get)(struct virtio_device *vdev, unsigned offset,
@ -75,10 +88,11 @@ struct virtio_config_ops
u8 (*get_status)(struct virtio_device *vdev);
void (*set_status)(struct virtio_device *vdev, u8 status);
void (*reset)(struct virtio_device *vdev);
struct virtqueue *(*find_vq)(struct virtio_device *vdev,
unsigned index,
void (*callback)(struct virtqueue *));
void (*del_vq)(struct virtqueue *vq);
int (*find_vqs)(struct virtio_device *, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char *names[]);
void (*del_vqs)(struct virtio_device *);
u32 (*get_features)(struct virtio_device *vdev);
void (*finalize_features)(struct virtio_device *vdev);
};
@ -99,7 +113,9 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
if (__builtin_constant_p(fbit))
BUILD_BUG_ON(fbit >= 32);
virtio_check_driver_offered_feature(vdev, fbit);
if (fbit < VIRTIO_TRANSPORT_F_START)
virtio_check_driver_offered_feature(vdev, fbit);
return test_bit(fbit, vdev->features);
}
@ -126,5 +142,18 @@ static inline int virtio_config_buf(struct virtio_device *vdev,
vdev->config->get(vdev, offset, buf, len);
return 0;
}
static inline
struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
vq_callback_t *c, const char *n)
{
vq_callback_t *callbacks[] = { c };
const char *names[] = { n };
struct virtqueue *vq;
int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names);
if (err < 0)
return ERR_PTR(err);
return vq;
}
#endif /* __KERNEL__ */
#endif /* _LINUX_VIRTIO_CONFIG_H */

Просмотреть файл

@ -47,9 +47,17 @@
/* The bit of the ISR which indicates a device configuration change. */
#define VIRTIO_PCI_ISR_CONFIG 0x2
/* MSI-X registers: only enabled if MSI-X is enabled. */
/* A 16-bit vector for configuration changes. */
#define VIRTIO_MSI_CONFIG_VECTOR 20
/* A 16-bit vector for selected queue notifications. */
#define VIRTIO_MSI_QUEUE_VECTOR 22
/* Vector value used to disable MSI for queue */
#define VIRTIO_MSI_NO_VECTOR 0xffff
/* The remaining space is defined by each driver as the per-driver
* configuration space */
#define VIRTIO_PCI_CONFIG 20
#define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20)
/* Virtio ABI version, this must match exactly */
#define VIRTIO_PCI_ABI_VERSION 0

Просмотреть файл

@ -14,6 +14,8 @@
#define VRING_DESC_F_NEXT 1
/* This marks a buffer as write-only (otherwise read-only). */
#define VRING_DESC_F_WRITE 2
/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT 4
/* The Host uses this in used->flags to advise the Guest: don't kick me when
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
@ -24,6 +26,9 @@
* optimization. */
#define VRING_AVAIL_F_NO_INTERRUPT 1
/* We support indirect buffer descriptors */
#define VIRTIO_RING_F_INDIRECT_DESC 28
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
struct vring_desc
{
@ -119,7 +124,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
struct virtio_device *vdev,
void *pages,
void (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq));
void (*callback)(struct virtqueue *vq),
const char *name);
void vring_del_virtqueue(struct virtqueue *vq);
/* Filter out transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev);

Просмотреть файл

@ -246,7 +246,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
chan->vdev = vdev;
/* We expect one virtqueue, for requests. */
chan->vq = vdev->config->find_vq(vdev, 0, req_done);
chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
if (IS_ERR(chan->vq)) {
err = PTR_ERR(chan->vq);
goto out_free_vq;
@ -261,7 +261,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
return 0;
out_free_vq:
vdev->config->del_vq(chan->vq);
vdev->config->del_vqs(vdev);
fail:
mutex_lock(&virtio_9p_lock);
chan_index--;
@ -332,7 +332,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
BUG_ON(chan->inuse);
if (chan->initialized) {
vdev->config->del_vq(chan->vq);
vdev->config->del_vqs(vdev);
chan->initialized = false;
}
}

Просмотреть файл

@ -641,7 +641,7 @@ static int do_virtio_entry(const char *filename, struct virtio_device_id *id,
id->vendor = TO_NATIVE(id->vendor);
strcpy(alias, "virtio:");
ADD(alias, "d", 1, id->device);
ADD(alias, "d", id->device != VIRTIO_DEV_ANY_ID, id->device);
ADD(alias, "v", id->vendor != VIRTIO_DEV_ANY_ID, id->vendor);
add_wildcard(alias);