remoteproc: allocate vrings on demand, free when not needed

Dynamically allocate the vrings' DMA when the remote processor
is about to be powered on (i.e. when ->find_vqs() is invoked),
and release them as soon as it is powered off (i.e. when ->del_vqs()
is invoked).

The obvious and immediate benefit is better memory utilization, since
memory for the vrings is now only allocated when the relevant remote
processor is used.

Additionally, this approach also makes recovery of a (crashing)
remote processor easier: one just needs to remove the relevant
vdevs, and the entire vrings cleanup takes place automagically.

Tested-by: Fernando Guzman Lugo <fernando.lugo@ti.com>
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
This commit is contained in:
Ohad Ben-Cohen 2012-05-17 14:23:59 +03:00
Родитель 485802a6c5
Коммит 6db20ea8d8
3 изменённых файлов: 81 добавлений и 71 удалений

Просмотреть файл

@ -279,15 +279,64 @@ rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
return ret;
}
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
{
struct rproc *rproc = rvdev->rproc;
struct device *dev = rproc->dev;
struct rproc_vring *rvring = &rvdev->vring[i];
dma_addr_t dma;
void *va;
int ret, size, notifyid;
/* actual size of vring (in bytes) */
size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) {
dev_err(dev, "idr_pre_get failed\n");
return -ENOMEM;
}
/*
* Allocate non-cacheable memory for the vring. In the future
* this call will also configure the IOMMU for us
* TODO: let the rproc know the da of this vring
*/
va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL);
if (!va) {
dev_err(dev, "dma_alloc_coherent failed\n");
return -EINVAL;
}
/*
* Assign an rproc-wide unique index for this vring
* TODO: assign a notifyid for rvdev updates as well
* TODO: let the rproc know the notifyid of this vring
* TODO: support predefined notifyids (via resource table)
*/
ret = idr_get_new(&rproc->notifyids, rvring, &notifyid);
if (ret) {
dev_err(dev, "idr_get_new failed: %d\n", ret);
dma_free_coherent(dev, size, va, dma);
return ret;
}
dev_dbg(dev, "vring%d: va %p dma %x size %x idr %d\n", i, va,
dma, size, notifyid);
rvring->va = va;
rvring->dma = dma;
rvring->notifyid = notifyid;
return 0;
}
static int
__rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
{
struct rproc *rproc = rvdev->rproc;
struct device *dev = rproc->dev;
struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
dma_addr_t dma;
void *va;
int ret, size, notifyid;
struct rproc_vring *rvring = &rvdev->vring[i];
dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n",
i, vring->da, vring->num, vring->align);
@ -305,62 +354,20 @@ __rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
return -EINVAL;
}
/* actual size of vring (in bytes) */
size = PAGE_ALIGN(vring_size(vring->num, vring->align));
if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) {
dev_err(dev, "idr_pre_get failed\n");
return -ENOMEM;
}
/*
* Allocate non-cacheable memory for the vring. In the future
* this call will also configure the IOMMU for us
*/
va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL);
if (!va) {
dev_err(dev, "dma_alloc_coherent failed\n");
return -EINVAL;
}
/* assign an rproc-wide unique index for this vring */
/* TODO: assign a notifyid for rvdev updates as well */
ret = idr_get_new(&rproc->notifyids, &rvdev->vring[i], &notifyid);
if (ret) {
dev_err(dev, "idr_get_new failed: %d\n", ret);
dma_free_coherent(dev, size, va, dma);
return ret;
}
/* let the rproc know the da and notifyid of this vring */
/* TODO: expose this to remote processor */
vring->da = dma;
vring->notifyid = notifyid;
dev_dbg(dev, "vring%d: va %p dma %x size %x idr %d\n", i, va,
dma, size, notifyid);
rvdev->vring[i].len = vring->num;
rvdev->vring[i].align = vring->align;
rvdev->vring[i].va = va;
rvdev->vring[i].dma = dma;
rvdev->vring[i].notifyid = notifyid;
rvdev->vring[i].rvdev = rvdev;
rvring->len = vring->num;
rvring->align = vring->align;
rvring->rvdev = rvdev;
return 0;
}
static void __rproc_free_vrings(struct rproc_vdev *rvdev, int i)
void rproc_free_vring(struct rproc_vring *rvring)
{
struct rproc *rproc = rvdev->rproc;
int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
struct rproc *rproc = rvring->rvdev->rproc;
for (i--; i >= 0; i--) {
struct rproc_vring *rvring = &rvdev->vring[i];
int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
dma_free_coherent(rproc->dev, size, rvring->va, rvring->dma);
idr_remove(&rproc->notifyids, rvring->notifyid);
}
dma_free_coherent(rproc->dev, size, rvring->va, rvring->dma);
idr_remove(&rproc->notifyids, rvring->notifyid);
}
/**
@ -425,11 +432,11 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
rvdev->rproc = rproc;
/* allocate the vrings */
/* parse the vrings */
for (i = 0; i < rsc->num_of_vrings; i++) {
ret = __rproc_handle_vring(rvdev, rsc, i);
ret = rproc_parse_vring(rvdev, rsc, i);
if (ret)
goto free_vrings;
goto free_rvdev;
}
/* remember the device features */
@ -440,12 +447,11 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
/* it is now safe to add the virtio device */
ret = rproc_add_virtio_dev(rvdev, rsc->id);
if (ret)
goto free_vrings;
goto free_rvdev;
return 0;
free_vrings:
__rproc_free_vrings(rvdev, i);
free_rvdev:
kfree(rvdev);
return ret;
}
@ -1264,18 +1270,11 @@ EXPORT_SYMBOL(rproc_shutdown);
void rproc_release(struct kref *kref)
{
struct rproc *rproc = container_of(kref, struct rproc, refcount);
struct rproc_vdev *rvdev, *rvtmp;
dev_info(rproc->dev, "removing %s\n", rproc->name);
rproc_delete_debug_dir(rproc);
/* clean up remote vdev entries */
list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) {
__rproc_free_vrings(rvdev, RVDEV_NUM_VRINGS);
list_del(&rvdev->node);
}
/*
* At this point no one holds a reference to rproc anymore,
* so we can directly unroll rproc_alloc()
@ -1546,7 +1545,7 @@ EXPORT_SYMBOL(rproc_free);
*/
int rproc_unregister(struct rproc *rproc)
{
struct rproc_vdev *rvdev;
struct rproc_vdev *rvdev, *tmp;
if (!rproc)
return -EINVAL;
@ -1555,7 +1554,7 @@ int rproc_unregister(struct rproc *rproc)
wait_for_completion(&rproc->firmware_loading_complete);
/* clean up remote vdev entries */
list_for_each_entry(rvdev, &rproc->rvdevs, node)
list_for_each_entry_safe(rvdev, tmp, &rproc->rvdevs, node)
rproc_remove_virtio_dev(rvdev);
/* the rproc is downref'ed as soon as it's removed from the klist */

Просмотреть файл

@ -41,4 +41,6 @@ void rproc_create_debug_dir(struct rproc *rproc);
void rproc_init_debugfs(void);
void rproc_exit_debugfs(void);
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
#endif /* REMOTEPROC_INTERNAL_H */

Просмотреть файл

@ -77,14 +77,17 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
struct rproc_vring *rvring;
struct virtqueue *vq;
void *addr;
int len, size;
int len, size, ret;
/* we're temporarily limited to two virtqueues per rvdev */
if (id >= ARRAY_SIZE(rvdev->vring))
return ERR_PTR(-EINVAL);
rvring = &rvdev->vring[id];
ret = rproc_alloc_vring(rvdev, id);
if (ret)
return ERR_PTR(ret);
rvring = &rvdev->vring[id];
addr = rvring->va;
len = rvring->len;
@ -103,6 +106,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
rproc_virtio_notify, callback, name);
if (!vq) {
dev_err(rproc->dev, "vring_new_virtqueue %s failed\n", name);
rproc_free_vring(rvring);
return ERR_PTR(-ENOMEM);
}
@ -125,6 +129,7 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev)
rvring = vq->priv;
rvring->vq = NULL;
vring_del_virtqueue(vq);
rproc_free_vring(rvring);
}
}
@ -228,8 +233,12 @@ static struct virtio_config_ops rproc_virtio_config_ops = {
static void rproc_vdev_release(struct device *dev)
{
struct virtio_device *vdev = dev_to_virtio(dev);
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct rproc *rproc = vdev_to_rproc(vdev);
list_del(&rvdev->node);
kfree(rvdev);
kref_put(&rproc->refcount, rproc_release);
}