2019-05-27 09:55:21 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2010-03-01 11:14:18 +03:00
|
|
|
/*
|
|
|
|
* v4l2-event.c
|
|
|
|
*
|
|
|
|
* V4L2 events.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2009--2010 Nokia Corporation.
|
|
|
|
*
|
2012-10-28 13:44:17 +04:00
|
|
|
* Contact: Sakari Ailus <sakari.ailus@iki.fi>
|
2010-03-01 11:14:18 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <media/v4l2-dev.h>
|
|
|
|
#include <media/v4l2-fh.h>
|
|
|
|
#include <media/v4l2-event.h>
|
|
|
|
|
[media] v4l2-core: Use kvmalloc() for potentially big allocations
There are multiple places where arrays or otherwise variable sized
buffer are allocated through V4L2 core code, including things like
controls, memory pages, staging buffers for ioctls and so on. Such
allocations can potentially require an order > 0 allocation from the
page allocator, which is not guaranteed to be fulfilled and is likely to
fail on a system with severe memory fragmentation (e.g. a system with
very long uptime).
Since the memory being allocated is intended to be used by the CPU
exclusively, we can consider using vmalloc() as a fallback and this is
exactly what the recently merged kvmalloc() helpers do. A kmalloc() call
is still attempted, even for order > 0 allocations, but it is done
with __GFP_NORETRY and __GFP_NOWARN, with expectation of failing if
requested memory is not available instantly. Only then the vmalloc()
fallback is used. This should give us fast and more reliable allocations
even on systems with higher memory pressure and/or more fragmentation,
while still retaining the same performance level on systems not
suffering from such conditions.
While at it, replace explicit array size calculations on changed
allocations with kvmalloc_array().
Purposedly not touching videobuf1, as it is deprecated, has only few
users remaining and would rather be seen removed instead.
Signed-off-by: Tomasz Figa <tfiga@chromium.org>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2017-06-19 06:53:43 +03:00
|
|
|
#include <linux/mm.h>
|
2010-03-01 11:14:18 +03:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/slab.h>
|
2011-08-01 23:26:38 +04:00
|
|
|
#include <linux/export.h>
|
2010-03-01 11:14:18 +03:00
|
|
|
|
2021-05-27 05:46:43 +03:00
|
|
|
static unsigned int sev_pos(const struct v4l2_subscribed_event *sev, unsigned int idx)
|
2010-03-01 11:14:18 +03:00
|
|
|
{
|
2011-06-14 02:24:17 +04:00
|
|
|
idx += sev->first;
|
|
|
|
return idx >= sev->elems ? idx - sev->elems : idx;
|
2010-03-01 11:14:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
|
|
|
|
{
|
|
|
|
struct v4l2_kevent *kev;
|
2019-12-16 17:15:03 +03:00
|
|
|
struct timespec64 ts;
|
2010-03-01 11:14:18 +03:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
|
|
|
|
2011-06-14 00:44:42 +04:00
|
|
|
if (list_empty(&fh->available)) {
|
2010-03-01 11:14:18 +03:00
|
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2011-06-14 00:44:42 +04:00
|
|
|
WARN_ON(fh->navailable == 0);
|
2010-03-01 11:14:18 +03:00
|
|
|
|
2011-06-14 00:44:42 +04:00
|
|
|
kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
|
2011-06-14 02:24:17 +04:00
|
|
|
list_del(&kev->list);
|
2011-06-14 00:44:42 +04:00
|
|
|
fh->navailable--;
|
2010-03-01 11:14:18 +03:00
|
|
|
|
2011-06-14 00:44:42 +04:00
|
|
|
kev->event.pending = fh->navailable;
|
2010-03-01 11:14:18 +03:00
|
|
|
*event = kev->event;
|
2019-12-16 17:15:03 +03:00
|
|
|
ts = ns_to_timespec64(kev->ts);
|
|
|
|
event->timestamp.tv_sec = ts.tv_sec;
|
|
|
|
event->timestamp.tv_nsec = ts.tv_nsec;
|
2011-06-14 02:24:17 +04:00
|
|
|
kev->sev->first = sev_pos(kev->sev, 1);
|
|
|
|
kev->sev->in_use--;
|
2010-03-01 11:14:18 +03:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
|
|
|
|
int nonblocking)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (nonblocking)
|
|
|
|
return __v4l2_event_dequeue(fh, event);
|
|
|
|
|
2010-09-26 15:47:38 +04:00
|
|
|
/* Release the vdev lock while waiting */
|
|
|
|
if (fh->vdev->lock)
|
|
|
|
mutex_unlock(fh->vdev->lock);
|
|
|
|
|
2010-03-01 11:14:18 +03:00
|
|
|
do {
|
2011-06-14 00:44:42 +04:00
|
|
|
ret = wait_event_interruptible(fh->wait,
|
|
|
|
fh->navailable != 0);
|
2010-03-01 11:14:18 +03:00
|
|
|
if (ret < 0)
|
2010-09-26 15:47:38 +04:00
|
|
|
break;
|
2010-03-01 11:14:18 +03:00
|
|
|
|
|
|
|
ret = __v4l2_event_dequeue(fh, event);
|
|
|
|
} while (ret == -ENOENT);
|
|
|
|
|
2010-09-26 15:47:38 +04:00
|
|
|
if (fh->vdev->lock)
|
|
|
|
mutex_lock(fh->vdev->lock);
|
|
|
|
|
2010-03-01 11:14:18 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2010-05-02 21:32:43 +04:00
|
|
|
EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
|
2010-03-01 11:14:18 +03:00
|
|
|
|
2011-06-07 18:13:44 +04:00
|
|
|
/* Caller must hold fh->vdev->fh_lock! */
|
2010-03-01 11:14:18 +03:00
|
|
|
static struct v4l2_subscribed_event *v4l2_event_subscribed(
|
2011-06-07 18:13:44 +04:00
|
|
|
struct v4l2_fh *fh, u32 type, u32 id)
|
2010-03-01 11:14:18 +03:00
|
|
|
{
|
|
|
|
struct v4l2_subscribed_event *sev;
|
|
|
|
|
2010-05-03 19:42:46 +04:00
|
|
|
assert_spin_locked(&fh->vdev->fh_lock);
|
2010-03-01 11:14:18 +03:00
|
|
|
|
2011-06-20 18:56:24 +04:00
|
|
|
list_for_each_entry(sev, &fh->subscribed, list)
|
2011-06-07 18:13:44 +04:00
|
|
|
if (sev->type == type && sev->id == id)
|
2010-03-01 11:14:18 +03:00
|
|
|
return sev;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-01-21 16:32:22 +03:00
|
|
|
static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
|
|
|
|
const struct v4l2_event *ev, u64 ts)
|
2011-06-07 18:13:44 +04:00
|
|
|
{
|
|
|
|
struct v4l2_subscribed_event *sev;
|
|
|
|
struct v4l2_kevent *kev;
|
2011-06-18 14:02:20 +04:00
|
|
|
bool copy_payload = true;
|
2011-06-07 18:13:44 +04:00
|
|
|
|
|
|
|
/* Are we subscribed? */
|
|
|
|
sev = v4l2_event_subscribed(fh, ev->type, ev->id);
|
|
|
|
if (sev == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Increase event sequence number on fh. */
|
2011-06-14 00:44:42 +04:00
|
|
|
fh->sequence++;
|
2011-06-07 18:13:44 +04:00
|
|
|
|
|
|
|
/* Do we have any free events? */
|
2011-06-14 02:24:17 +04:00
|
|
|
if (sev->in_use == sev->elems) {
|
|
|
|
/* no, remove the oldest one */
|
|
|
|
kev = sev->events + sev_pos(sev, 0);
|
|
|
|
list_del(&kev->list);
|
|
|
|
sev->in_use--;
|
|
|
|
sev->first = sev_pos(sev, 1);
|
|
|
|
fh->navailable--;
|
2011-06-18 14:02:20 +04:00
|
|
|
if (sev->elems == 1) {
|
2012-04-08 19:59:46 +04:00
|
|
|
if (sev->ops && sev->ops->replace) {
|
|
|
|
sev->ops->replace(&kev->event, ev);
|
2011-06-18 14:02:20 +04:00
|
|
|
copy_payload = false;
|
|
|
|
}
|
2012-04-08 19:59:46 +04:00
|
|
|
} else if (sev->ops && sev->ops->merge) {
|
2011-06-18 14:02:20 +04:00
|
|
|
struct v4l2_kevent *second_oldest =
|
|
|
|
sev->events + sev_pos(sev, 0);
|
2012-04-08 19:59:46 +04:00
|
|
|
sev->ops->merge(&kev->event, &second_oldest->event);
|
2011-06-18 14:02:20 +04:00
|
|
|
}
|
2011-06-14 02:24:17 +04:00
|
|
|
}
|
2011-06-07 18:13:44 +04:00
|
|
|
|
|
|
|
/* Take one and fill it. */
|
2011-06-14 02:24:17 +04:00
|
|
|
kev = sev->events + sev_pos(sev, sev->in_use);
|
2011-06-07 18:13:44 +04:00
|
|
|
kev->event.type = ev->type;
|
2011-06-18 14:02:20 +04:00
|
|
|
if (copy_payload)
|
|
|
|
kev->event.u = ev->u;
|
2011-06-07 18:13:44 +04:00
|
|
|
kev->event.id = ev->id;
|
2019-01-21 16:32:22 +03:00
|
|
|
kev->ts = ts;
|
2011-06-14 00:44:42 +04:00
|
|
|
kev->event.sequence = fh->sequence;
|
2011-06-14 02:24:17 +04:00
|
|
|
sev->in_use++;
|
|
|
|
list_add_tail(&kev->list, &fh->available);
|
2011-06-07 18:13:44 +04:00
|
|
|
|
2011-06-14 00:44:42 +04:00
|
|
|
fh->navailable++;
|
2011-06-07 18:13:44 +04:00
|
|
|
|
2011-06-14 00:44:42 +04:00
|
|
|
wake_up_all(&fh->wait);
|
2011-06-07 18:13:44 +04:00
|
|
|
}
|
|
|
|
|
2010-03-01 11:14:18 +03:00
|
|
|
void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
|
|
|
|
{
|
|
|
|
struct v4l2_fh *fh;
|
|
|
|
unsigned long flags;
|
2019-01-21 16:32:22 +03:00
|
|
|
u64 ts;
|
2010-03-01 11:14:18 +03:00
|
|
|
|
2015-06-23 12:20:23 +03:00
|
|
|
if (vdev == NULL)
|
|
|
|
return;
|
|
|
|
|
2019-01-21 16:32:22 +03:00
|
|
|
ts = ktime_get_ns();
|
2010-03-01 11:14:18 +03:00
|
|
|
|
|
|
|
spin_lock_irqsave(&vdev->fh_lock, flags);
|
|
|
|
|
2011-06-20 18:56:24 +04:00
|
|
|
list_for_each_entry(fh, &vdev->fh_list, list)
|
2019-01-21 16:32:22 +03:00
|
|
|
__v4l2_event_queue_fh(fh, ev, ts);
|
2010-03-01 11:14:18 +03:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&vdev->fh_lock, flags);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(v4l2_event_queue);
|
|
|
|
|
2011-06-07 18:13:44 +04:00
|
|
|
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
2019-01-21 16:32:22 +03:00
|
|
|
u64 ts = ktime_get_ns();
|
2011-06-07 18:13:44 +04:00
|
|
|
|
|
|
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
2019-01-21 16:32:22 +03:00
|
|
|
__v4l2_event_queue_fh(fh, ev, ts);
|
2011-06-07 18:13:44 +04:00
|
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
|
|
|
|
|
2010-03-01 11:14:18 +03:00
|
|
|
int v4l2_event_pending(struct v4l2_fh *fh)
|
|
|
|
{
|
2011-06-14 00:44:42 +04:00
|
|
|
return fh->navailable;
|
2010-03-01 11:14:18 +03:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(v4l2_event_pending);
|
|
|
|
|
2020-12-01 15:44:43 +03:00
|
|
|
void v4l2_event_wake_all(struct video_device *vdev)
|
|
|
|
{
|
|
|
|
struct v4l2_fh *fh;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!vdev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&vdev->fh_lock, flags);
|
|
|
|
|
|
|
|
list_for_each_entry(fh, &vdev->fh_list, list)
|
|
|
|
wake_up_all(&fh->wait);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&vdev->fh_lock, flags);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(v4l2_event_wake_all);
|
|
|
|
|
2018-11-05 17:35:44 +03:00
|
|
|
static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
|
|
|
|
{
|
|
|
|
struct v4l2_fh *fh = sev->fh;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
lockdep_assert_held(&fh->subscribe_lock);
|
|
|
|
assert_spin_locked(&fh->vdev->fh_lock);
|
|
|
|
|
|
|
|
/* Remove any pending events for this subscription */
|
|
|
|
for (i = 0; i < sev->in_use; i++) {
|
|
|
|
list_del(&sev->events[sev_pos(sev, i)].list);
|
|
|
|
fh->navailable--;
|
|
|
|
}
|
|
|
|
list_del(&sev->list);
|
|
|
|
}
|
|
|
|
|
2010-03-01 11:14:18 +03:00
|
|
|
int v4l2_event_subscribe(struct v4l2_fh *fh,
|
2021-05-27 05:46:43 +03:00
|
|
|
const struct v4l2_event_subscription *sub, unsigned int elems,
|
2012-04-08 19:59:46 +04:00
|
|
|
const struct v4l2_subscribed_event_ops *ops)
|
2010-03-01 11:14:18 +03:00
|
|
|
{
|
2011-06-07 18:13:44 +04:00
|
|
|
struct v4l2_subscribed_event *sev, *found_ev;
|
2010-03-01 11:14:18 +03:00
|
|
|
unsigned long flags;
|
2021-05-27 05:46:43 +03:00
|
|
|
unsigned int i;
|
2018-09-11 12:32:37 +03:00
|
|
|
int ret = 0;
|
2010-03-01 11:14:18 +03:00
|
|
|
|
2011-10-24 12:03:27 +04:00
|
|
|
if (sub->type == V4L2_EVENT_ALL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2011-06-14 02:24:17 +04:00
|
|
|
if (elems < 1)
|
|
|
|
elems = 1;
|
2011-06-07 18:13:44 +04:00
|
|
|
|
2018-06-07 17:57:17 +03:00
|
|
|
sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
|
2010-03-01 11:14:18 +03:00
|
|
|
if (!sev)
|
|
|
|
return -ENOMEM;
|
2011-06-14 02:24:17 +04:00
|
|
|
for (i = 0; i < elems; i++)
|
|
|
|
sev->events[i].sev = sev;
|
|
|
|
sev->type = sub->type;
|
|
|
|
sev->id = sub->id;
|
|
|
|
sev->flags = sub->flags;
|
|
|
|
sev->fh = fh;
|
2012-04-08 19:59:46 +04:00
|
|
|
sev->ops = ops;
|
2018-09-11 12:32:37 +03:00
|
|
|
sev->elems = elems;
|
|
|
|
|
|
|
|
mutex_lock(&fh->subscribe_lock);
|
2010-03-01 11:14:18 +03:00
|
|
|
|
|
|
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
2011-06-07 18:13:44 +04:00
|
|
|
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
2018-11-05 17:35:44 +03:00
|
|
|
if (!found_ev)
|
|
|
|
list_add(&sev->list, &fh->subscribed);
|
2010-03-01 11:14:18 +03:00
|
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
|
|
|
|
2012-04-08 19:59:46 +04:00
|
|
|
if (found_ev) {
|
2018-09-11 12:32:37 +03:00
|
|
|
/* Already listening */
|
[media] v4l2-core: Use kvmalloc() for potentially big allocations
There are multiple places where arrays or otherwise variable sized
buffer are allocated through V4L2 core code, including things like
controls, memory pages, staging buffers for ioctls and so on. Such
allocations can potentially require an order > 0 allocation from the
page allocator, which is not guaranteed to be fulfilled and is likely to
fail on a system with severe memory fragmentation (e.g. a system with
very long uptime).
Since the memory being allocated is intended to be used by the CPU
exclusively, we can consider using vmalloc() as a fallback and this is
exactly what the recently merged kvmalloc() helpers do. A kmalloc() call
is still attempted, even for order > 0 allocations, but it is done
with __GFP_NORETRY and __GFP_NOWARN, with expectation of failing if
requested memory is not available instantly. Only then the vmalloc()
fallback is used. This should give us fast and more reliable allocations
even on systems with higher memory pressure and/or more fragmentation,
while still retaining the same performance level on systems not
suffering from such conditions.
While at it, replace explicit array size calculations on changed
allocations with kvmalloc_array().
Purposedly not touching videobuf1, as it is deprecated, has only few
users remaining and would rather be seen removed instead.
Signed-off-by: Tomasz Figa <tfiga@chromium.org>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2017-06-19 06:53:43 +03:00
|
|
|
kvfree(sev);
|
2018-11-05 17:35:44 +03:00
|
|
|
} else if (sev->ops && sev->ops->add) {
|
2018-09-11 12:32:37 +03:00
|
|
|
ret = sev->ops->add(sev, elems);
|
2012-04-08 19:59:46 +04:00
|
|
|
if (ret) {
|
2018-11-05 17:35:44 +03:00
|
|
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
|
|
|
__v4l2_event_unsubscribe(sev);
|
|
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
2018-09-11 12:32:37 +03:00
|
|
|
kvfree(sev);
|
2012-04-08 19:59:46 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-11 12:32:37 +03:00
|
|
|
mutex_unlock(&fh->subscribe_lock);
|
|
|
|
|
|
|
|
return ret;
|
2010-03-01 11:14:18 +03:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
|
|
|
|
|
2011-06-14 02:24:17 +04:00
|
|
|
void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
|
2010-03-01 11:14:18 +03:00
|
|
|
{
|
2011-06-07 18:13:44 +04:00
|
|
|
struct v4l2_event_subscription sub;
|
2010-03-01 11:14:18 +03:00
|
|
|
struct v4l2_subscribed_event *sev;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
do {
|
|
|
|
sev = NULL;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
2011-06-14 00:44:42 +04:00
|
|
|
if (!list_empty(&fh->subscribed)) {
|
|
|
|
sev = list_first_entry(&fh->subscribed,
|
2011-06-07 18:13:44 +04:00
|
|
|
struct v4l2_subscribed_event, list);
|
|
|
|
sub.type = sev->type;
|
|
|
|
sub.id = sev->id;
|
2010-03-01 11:14:18 +03:00
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
2011-06-07 18:13:44 +04:00
|
|
|
if (sev)
|
|
|
|
v4l2_event_unsubscribe(fh, &sub);
|
2010-03-01 11:14:18 +03:00
|
|
|
} while (sev);
|
|
|
|
}
|
2011-06-14 02:24:17 +04:00
|
|
|
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
|
2010-03-01 11:14:18 +03:00
|
|
|
|
|
|
|
int v4l2_event_unsubscribe(struct v4l2_fh *fh,
|
2012-09-04 18:46:09 +04:00
|
|
|
const struct v4l2_event_subscription *sub)
|
2010-03-01 11:14:18 +03:00
|
|
|
{
|
|
|
|
struct v4l2_subscribed_event *sev;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (sub->type == V4L2_EVENT_ALL) {
|
|
|
|
v4l2_event_unsubscribe_all(fh);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-11 12:32:37 +03:00
|
|
|
mutex_lock(&fh->subscribe_lock);
|
|
|
|
|
2010-03-01 11:14:18 +03:00
|
|
|
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
|
|
|
|
2011-06-07 18:13:44 +04:00
|
|
|
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
2018-11-05 17:35:44 +03:00
|
|
|
if (sev != NULL)
|
|
|
|
__v4l2_event_unsubscribe(sev);
|
2010-03-01 11:14:18 +03:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
2012-04-08 19:59:46 +04:00
|
|
|
|
|
|
|
if (sev && sev->ops && sev->ops->del)
|
|
|
|
sev->ops->del(sev);
|
|
|
|
|
2018-09-11 12:32:37 +03:00
|
|
|
mutex_unlock(&fh->subscribe_lock);
|
|
|
|
|
[media] v4l2-core: Use kvmalloc() for potentially big allocations
There are multiple places where arrays or otherwise variable sized
buffer are allocated through V4L2 core code, including things like
controls, memory pages, staging buffers for ioctls and so on. Such
allocations can potentially require an order > 0 allocation from the
page allocator, which is not guaranteed to be fulfilled and is likely to
fail on a system with severe memory fragmentation (e.g. a system with
very long uptime).
Since the memory being allocated is intended to be used by the CPU
exclusively, we can consider using vmalloc() as a fallback and this is
exactly what the recently merged kvmalloc() helpers do. A kmalloc() call
is still attempted, even for order > 0 allocations, but it is done
with __GFP_NORETRY and __GFP_NOWARN, with expectation of failing if
requested memory is not available instantly. Only then the vmalloc()
fallback is used. This should give us fast and more reliable allocations
even on systems with higher memory pressure and/or more fragmentation,
while still retaining the same performance level on systems not
suffering from such conditions.
While at it, replace explicit array size calculations on changed
allocations with kvmalloc_array().
Purposedly not touching videobuf1, as it is deprecated, has only few
users remaining and would rather be seen removed instead.
Signed-off-by: Tomasz Figa <tfiga@chromium.org>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2017-06-19 06:53:43 +03:00
|
|
|
kvfree(sev);
|
2010-03-01 11:14:18 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
|
2013-01-23 01:58:57 +04:00
|
|
|
|
|
|
|
int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
|
|
|
|
struct v4l2_event_subscription *sub)
|
|
|
|
{
|
|
|
|
return v4l2_event_unsubscribe(fh, sub);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
|
2014-05-14 10:59:42 +04:00
|
|
|
|
|
|
|
static void v4l2_event_src_replace(struct v4l2_event *old,
|
|
|
|
const struct v4l2_event *new)
|
|
|
|
{
|
|
|
|
u32 old_changes = old->u.src_change.changes;
|
|
|
|
|
|
|
|
old->u.src_change = new->u.src_change;
|
|
|
|
old->u.src_change.changes |= old_changes;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void v4l2_event_src_merge(const struct v4l2_event *old,
|
|
|
|
struct v4l2_event *new)
|
|
|
|
{
|
|
|
|
new->u.src_change.changes |= old->u.src_change.changes;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
|
|
|
|
.replace = v4l2_event_src_replace,
|
|
|
|
.merge = v4l2_event_src_merge,
|
|
|
|
};
|
|
|
|
|
|
|
|
int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
|
|
|
|
const struct v4l2_event_subscription *sub)
|
|
|
|
{
|
|
|
|
if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
|
|
|
|
return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
|
|
|
|
|
|
|
|
int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
|
|
|
|
struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
|
|
|
|
{
|
|
|
|
return v4l2_src_change_event_subscribe(fh, sub);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
|