V4L/DVB (9801): cx18: Allow more than 63 capture buffers in rotation per stream
cx18: Allow more than 63 capture buffers in rotation per stream. Implement q_busy to hold buffers the firmware has for use. q_free holds truly unused buffers in a pool. New buffers are given to the firmware as soon as the firmware returns one, if there are any to give to the firmware. Signed-off-by: Andy Walls <awalls@radix.net> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
This commit is contained in:
Родитель
b80e1074c7
Коммит
66c2a6b0bc
|
@ -289,7 +289,8 @@ struct cx18_stream {
|
|||
|
||||
/* Buffer Queues */
|
||||
struct cx18_queue q_free; /* free buffers */
|
||||
struct cx18_queue q_full; /* full buffers */
|
||||
struct cx18_queue q_busy; /* busy buffers - in use by firmware */
|
||||
struct cx18_queue q_full; /* full buffers - data for user apps */
|
||||
|
||||
/* DVB / Digital Transport */
|
||||
struct cx18_dvb dvb;
|
||||
|
|
|
@ -187,7 +187,7 @@ static struct cx18_buffer *cx18_get_buffer(struct cx18_stream *s, int non_block,
|
|||
while ((buf = cx18_dequeue(s_vbi, &s_vbi->q_full))) {
|
||||
/* byteswap and process VBI data */
|
||||
/* cx18_process_vbi_data(cx, buf, s_vbi->dma_pts, s_vbi->type); */
|
||||
cx18_enqueue(s_vbi, buf, &s_vbi->q_free);
|
||||
cx18_stream_put_buf_fw(s_vbi, buf);
|
||||
}
|
||||
}
|
||||
buf = &cx->vbi.sliced_mpeg_buf;
|
||||
|
@ -361,15 +361,9 @@ static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf,
|
|||
tot_count - tot_written);
|
||||
|
||||
if (buf != &cx->vbi.sliced_mpeg_buf) {
|
||||
if (buf->readpos == buf->bytesused) {
|
||||
cx18_buf_sync_for_device(s, buf);
|
||||
cx18_enqueue(s, buf, &s->q_free);
|
||||
cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5,
|
||||
s->handle,
|
||||
(void __iomem *)&cx->scb->cpu_mdl[buf->id] -
|
||||
cx->enc_mem,
|
||||
1, buf->id, s->buf_size);
|
||||
} else
|
||||
if (buf->readpos == buf->bytesused)
|
||||
cx18_stream_put_buf_fw(s, buf);
|
||||
else
|
||||
cx18_push(s, buf, &s->q_full);
|
||||
} else if (buf->readpos == buf->bytesused) {
|
||||
int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES;
|
||||
|
|
|
@ -746,8 +746,7 @@ static int cx18_log_status(struct file *file, void *fh)
|
|||
continue;
|
||||
CX18_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n",
|
||||
s->name, s->s_flags,
|
||||
(s->buffers - atomic_read(&s->q_free.buffers))
|
||||
* 100 / s->buffers,
|
||||
atomic_read(&s->q_full.buffers) * 100 / s->buffers,
|
||||
(s->buffers * s->buf_size) / 1024, s->buffers);
|
||||
}
|
||||
CX18_INFO("Read MPEG/VBI: %lld/%lld bytes\n",
|
||||
|
|
|
@ -189,16 +189,7 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|||
dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
|
||||
buf->bytesused);
|
||||
|
||||
cx18_buf_sync_for_device(s, buf);
|
||||
cx18_enqueue(s, buf, &s->q_free);
|
||||
|
||||
if (s->handle != CX18_INVALID_TASK_HANDLE &&
|
||||
test_bit(CX18_F_S_STREAMING, &s->s_flags))
|
||||
cx18_vapi(cx,
|
||||
CX18_CPU_DE_SET_MDL, 5, s->handle,
|
||||
(void __iomem *)
|
||||
&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
|
||||
1, buf->id, s->buf_size);
|
||||
cx18_stream_put_buf_fw(s, buf);
|
||||
} else
|
||||
set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
|
||||
}
|
||||
|
|
|
@ -42,24 +42,32 @@ void cx18_queue_init(struct cx18_queue *q)
|
|||
q->bytesused = 0;
|
||||
}
|
||||
|
||||
void _cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
|
||||
struct cx18_queue *q, int to_front)
|
||||
struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
|
||||
struct cx18_queue *q, int to_front)
|
||||
{
|
||||
/* clear the buffer if it is going to be enqueued to the free queue */
|
||||
if (q == &s->q_free) {
|
||||
/* clear the buffer if it is not to be enqueued to the full queue */
|
||||
if (q != &s->q_full) {
|
||||
buf->bytesused = 0;
|
||||
buf->readpos = 0;
|
||||
buf->b_flags = 0;
|
||||
buf->skipped = 0;
|
||||
}
|
||||
|
||||
mutex_lock(&s->qlock);
|
||||
|
||||
/* q_busy is restricted to 63 buffers to stay within firmware limits */
|
||||
if (q == &s->q_busy && atomic_read(&q->buffers) >= 63)
|
||||
q = &s->q_free;
|
||||
|
||||
if (to_front)
|
||||
list_add(&buf->list, &q->list); /* LIFO */
|
||||
else
|
||||
list_add_tail(&buf->list, &q->list); /* FIFO */
|
||||
atomic_inc(&q->buffers);
|
||||
q->bytesused += buf->bytesused - buf->readpos;
|
||||
atomic_inc(&q->buffers);
|
||||
|
||||
mutex_unlock(&s->qlock);
|
||||
return q;
|
||||
}
|
||||
|
||||
struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
|
||||
|
@ -70,9 +78,9 @@ struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
|
|||
if (!list_empty(&q->list)) {
|
||||
buf = list_entry(q->list.next, struct cx18_buffer, list);
|
||||
list_del_init(q->list.next);
|
||||
atomic_dec(&q->buffers);
|
||||
q->bytesused -= buf->bytesused - buf->readpos;
|
||||
buf->skipped = 0;
|
||||
atomic_dec(&q->buffers);
|
||||
}
|
||||
mutex_unlock(&s->qlock);
|
||||
return buf;
|
||||
|
@ -85,28 +93,30 @@ struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
|
|||
struct cx18_buffer *buf;
|
||||
struct cx18_buffer *ret = NULL;
|
||||
struct list_head *p, *t;
|
||||
LIST_HEAD(r);
|
||||
|
||||
mutex_lock(&s->qlock);
|
||||
list_for_each_safe(p, t, &s->q_free.list) {
|
||||
list_for_each_safe(p, t, &s->q_busy.list) {
|
||||
buf = list_entry(p, struct cx18_buffer, list);
|
||||
|
||||
if (buf->id != id) {
|
||||
buf->skipped++;
|
||||
if (buf->skipped >= atomic_read(&s->q_free.buffers)-1) {
|
||||
if (buf->skipped >= atomic_read(&s->q_busy.buffers)-1) {
|
||||
/* buffer must have fallen out of rotation */
|
||||
atomic_dec(&s->q_free.buffers);
|
||||
list_move_tail(&buf->list, &r);
|
||||
CX18_WARN("Skipped %s, buffer %d, %d "
|
||||
"times - it must have dropped out of "
|
||||
"rotation\n", s->name, buf->id,
|
||||
buf->skipped);
|
||||
/* move it to q_free */
|
||||
list_move_tail(&buf->list, &s->q_free.list);
|
||||
buf->bytesused = buf->readpos = buf->b_flags =
|
||||
buf->skipped = 0;
|
||||
atomic_dec(&s->q_busy.buffers);
|
||||
atomic_inc(&s->q_free.buffers);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
buf->bytesused = bytesused;
|
||||
atomic_dec(&s->q_free.buffers);
|
||||
if (s->type == CX18_ENC_STREAM_TYPE_TS) {
|
||||
/*
|
||||
* TS doesn't use q_full, but for sweeping up lost
|
||||
|
@ -116,28 +126,19 @@ struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
|
|||
*/
|
||||
list_del_init(&buf->list);
|
||||
} else {
|
||||
atomic_inc(&s->q_full.buffers);
|
||||
s->q_full.bytesused += buf->bytesused;
|
||||
list_move_tail(&buf->list, &s->q_full.list);
|
||||
s->q_full.bytesused += buf->bytesused;
|
||||
atomic_inc(&s->q_full.buffers);
|
||||
}
|
||||
atomic_dec(&s->q_busy.buffers);
|
||||
|
||||
ret = buf;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&s->qlock);
|
||||
|
||||
/* Put lost buffers back into firmware transfer rotation */
|
||||
while (!list_empty(&r)) {
|
||||
buf = list_entry(r.next, struct cx18_buffer, list);
|
||||
list_del_init(r.next);
|
||||
cx18_enqueue(s, buf, &s->q_free);
|
||||
cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
|
||||
(void __iomem *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
|
||||
1, buf->id, s->buf_size);
|
||||
CX18_INFO("Returning %s, buffer %d back to transfer rotation\n",
|
||||
s->name, buf->id);
|
||||
/* and there was much rejoicing... */
|
||||
}
|
||||
/* Put more buffers into the transfer rotation from q_free, if we can */
|
||||
cx18_stream_load_fw_queue_nolock(s);
|
||||
mutex_unlock(&s->qlock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -162,6 +163,7 @@ static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
|
|||
|
||||
void cx18_flush_queues(struct cx18_stream *s)
|
||||
{
|
||||
cx18_queue_flush(s, &s->q_busy);
|
||||
cx18_queue_flush(s, &s->q_full);
|
||||
}
|
||||
|
||||
|
|
|
@ -43,21 +43,21 @@ static inline void cx18_buf_sync_for_device(struct cx18_stream *s,
|
|||
void cx18_buf_swap(struct cx18_buffer *buf);
|
||||
|
||||
/* cx18_queue utility functions */
|
||||
void _cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
|
||||
struct cx18_queue *q, int to_front);
|
||||
struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
|
||||
struct cx18_queue *q, int to_front);
|
||||
|
||||
static inline
|
||||
void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
|
||||
struct cx18_queue *q)
|
||||
struct cx18_queue *cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
|
||||
struct cx18_queue *q)
|
||||
{
|
||||
_cx18_enqueue(s, buf, q, 0); /* FIFO */
|
||||
return _cx18_enqueue(s, buf, q, 0); /* FIFO */
|
||||
}
|
||||
|
||||
static inline
|
||||
void cx18_push(struct cx18_stream *s, struct cx18_buffer *buf,
|
||||
struct cx18_queue *q)
|
||||
struct cx18_queue *cx18_push(struct cx18_stream *s, struct cx18_buffer *buf,
|
||||
struct cx18_queue *q)
|
||||
{
|
||||
_cx18_enqueue(s, buf, q, 1); /* LIFO */
|
||||
return _cx18_enqueue(s, buf, q, 1); /* LIFO */
|
||||
}
|
||||
|
||||
void cx18_queue_init(struct cx18_queue *q);
|
||||
|
|
|
@ -127,16 +127,11 @@ static void cx18_stream_init(struct cx18 *cx, int type)
|
|||
s->buf_size = cx->stream_buf_size[type];
|
||||
if (s->buf_size)
|
||||
s->buffers = max_size / s->buf_size;
|
||||
if (s->buffers > 63) {
|
||||
/* Each stream has a maximum of 63 buffers,
|
||||
ensure we do not exceed that. */
|
||||
s->buffers = 63;
|
||||
s->buf_size = (max_size / s->buffers) & ~0xfff;
|
||||
}
|
||||
mutex_init(&s->qlock);
|
||||
init_waitqueue_head(&s->waitq);
|
||||
s->id = -1;
|
||||
cx18_queue_init(&s->q_free);
|
||||
cx18_queue_init(&s->q_busy);
|
||||
cx18_queue_init(&s->q_full);
|
||||
}
|
||||
|
||||
|
@ -401,11 +396,61 @@ static void cx18_vbi_setup(struct cx18_stream *s)
|
|||
cx18_api(cx, CX18_CPU_SET_RAW_VBI_PARAM, 6, data);
|
||||
}
|
||||
|
||||
struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s,
|
||||
struct cx18_buffer *buf)
|
||||
{
|
||||
struct cx18 *cx = s->cx;
|
||||
struct cx18_queue *q;
|
||||
|
||||
/* Don't give it to the firmware, if we're not running a capture */
|
||||
if (s->handle == CX18_INVALID_TASK_HANDLE ||
|
||||
!test_bit(CX18_F_S_STREAMING, &s->s_flags))
|
||||
return cx18_enqueue(s, buf, &s->q_free);
|
||||
|
||||
q = cx18_enqueue(s, buf, &s->q_busy);
|
||||
if (q != &s->q_busy)
|
||||
return q; /* The firmware has the max buffers it can handle */
|
||||
|
||||
cx18_buf_sync_for_device(s, buf);
|
||||
cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
|
||||
(void __iomem *) &cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
|
||||
1, buf->id, s->buf_size);
|
||||
return q;
|
||||
}
|
||||
|
||||
/* Must hold s->qlock when calling */
|
||||
void cx18_stream_load_fw_queue_nolock(struct cx18_stream *s)
|
||||
{
|
||||
struct cx18_buffer *buf;
|
||||
struct cx18 *cx = s->cx;
|
||||
|
||||
/* Move from q_free to q_busy notifying the firmware: 63 buf limit */
|
||||
while (s->handle != CX18_INVALID_TASK_HANDLE &&
|
||||
test_bit(CX18_F_S_STREAMING, &s->s_flags) &&
|
||||
atomic_read(&s->q_busy.buffers) < 63 &&
|
||||
!list_empty(&s->q_free.list)) {
|
||||
|
||||
/* Move from q_free to q_busy */
|
||||
buf = list_entry(s->q_free.list.next, struct cx18_buffer, list);
|
||||
list_move_tail(&buf->list, &s->q_busy.list);
|
||||
buf->bytesused = buf->readpos = buf->b_flags = buf->skipped = 0;
|
||||
atomic_dec(&s->q_free.buffers);
|
||||
atomic_inc(&s->q_busy.buffers);
|
||||
|
||||
/* Notify firmware */
|
||||
cx18_buf_sync_for_device(s, buf);
|
||||
cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
|
||||
(void __iomem *) &cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
|
||||
1, buf->id, s->buf_size);
|
||||
}
|
||||
}
|
||||
|
||||
int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
|
||||
{
|
||||
u32 data[MAX_MB_ARGUMENTS];
|
||||
struct cx18 *cx = s->cx;
|
||||
struct list_head *p;
|
||||
struct cx18_buffer *buf;
|
||||
int ts = 0;
|
||||
int captype = 0;
|
||||
|
||||
|
@ -488,16 +533,18 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
|
|||
(void __iomem *)&cx->scb->cpu_mdl_ack[s->type][0] - cx->enc_mem,
|
||||
(void __iomem *)&cx->scb->cpu_mdl_ack[s->type][1] - cx->enc_mem);
|
||||
|
||||
/* Init all the cpu_mdls for this stream */
|
||||
cx18_flush_queues(s);
|
||||
mutex_lock(&s->qlock);
|
||||
list_for_each(p, &s->q_free.list) {
|
||||
struct cx18_buffer *buf = list_entry(p, struct cx18_buffer, list);
|
||||
|
||||
buf = list_entry(p, struct cx18_buffer, list);
|
||||
cx18_writel(cx, buf->dma_handle,
|
||||
&cx->scb->cpu_mdl[buf->id].paddr);
|
||||
cx18_writel(cx, s->buf_size, &cx->scb->cpu_mdl[buf->id].length);
|
||||
cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
|
||||
(void __iomem *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
|
||||
1, buf->id, s->buf_size);
|
||||
}
|
||||
cx18_stream_load_fw_queue_nolock(s);
|
||||
mutex_unlock(&s->qlock);
|
||||
|
||||
/* begin_capture */
|
||||
if (cx18_vapi(cx, CX18_CPU_CAPTURE_START, 1, s->handle)) {
|
||||
CX18_DEBUG_WARN("Error starting capture!\n");
|
||||
|
@ -506,9 +553,15 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
|
|||
cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, 1);
|
||||
else
|
||||
cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 1, s->handle);
|
||||
clear_bit(CX18_F_S_STREAMING, &s->s_flags);
|
||||
/* FIXME - CX18_F_S_STREAMOFF as well? */
|
||||
cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle);
|
||||
cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
|
||||
/* FIXME - clean-up DSP0_INT mask, i_flags, s_flags, etc. */
|
||||
s->handle = CX18_INVALID_TASK_HANDLE;
|
||||
if (atomic_read(&cx->tot_capturing) == 0) {
|
||||
set_bit(CX18_F_I_EOS, &cx->i_flags);
|
||||
cx18_write_reg(cx, 5, CX18_DSP0_INTERRUPT_MASK);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,9 @@ int cx18_streams_register(struct cx18 *cx);
|
|||
void cx18_streams_cleanup(struct cx18 *cx, int unregister);
|
||||
|
||||
/* Capture related */
|
||||
void cx18_stream_load_fw_queue_nolock(struct cx18_stream *s);
|
||||
struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s,
|
||||
struct cx18_buffer *buf);
|
||||
int cx18_start_v4l2_encode_stream(struct cx18_stream *s);
|
||||
int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче