iio: core: Introduce iio_push_to_buffers_with_ts_unaligned()

Whilst it is almost always possible to arrange for scan data to be
read directly into a buffer that is suitable for passing to
iio_push_to_buffers_with_timestamp(), there are a few places where
leading data needs to be skipped over.

For these cases introduce a function that will allocate an appropriate
sized and aligned bounce buffer (if not already allocated) and copy
the unaligned data into that before calling
iio_push_to_buffers_with_timestamp() on the bounce buffer.
We tie the lifespace of this buffer to that of the iio_dev.dev
which should ensure no memory leaks occur.

Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Reviewed-by: Nuno Sá <nuno.sa@analog.com>
Link: https://lore.kernel.org/r/20210613151039.569883-2-jic23@kernel.org
This commit is contained in:
Jonathan Cameron 2021-06-13 16:10:36 +01:00
Родитель b18831cc99
Коммит 95ec3fdf2b
3 изменённых файлов: 54 добавлений и 0 удалений

Просмотреть файл

@ -1731,6 +1731,52 @@ int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
}
EXPORT_SYMBOL_GPL(iio_push_to_buffers);
/**
* iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
* no alignment or space requirements.
* @indio_dev: iio_dev structure for device.
* @data: channel data excluding the timestamp.
* @data_sz: size of data.
* @timestamp: timestamp for the sample data.
*
* This special variant of iio_push_to_buffers_with_timestamp() does
* not require space for the timestamp, or 8 byte alignment of data.
* It does however require an allocation on first call and additional
* copies on all calls, so should be avoided if possible.
*/
int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
const void *data,
size_t data_sz,
int64_t timestamp)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
/*
* Conservative estimate - we can always safely copy the minimum
* of either the data provided or the length of the destination buffer.
* This relaxed limit allows the calling drivers to be lax about
* tracking the size of the data they are pushing, at the cost of
* unnecessary copying of padding.
*/
data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz);
if (iio_dev_opaque->bounce_buffer_size != indio_dev->scan_bytes) {
void *bb;
bb = devm_krealloc(&indio_dev->dev,
iio_dev_opaque->bounce_buffer,
indio_dev->scan_bytes, GFP_KERNEL);
if (!bb)
return -ENOMEM;
iio_dev_opaque->bounce_buffer = bb;
iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes;
}
memcpy(iio_dev_opaque->bounce_buffer, data, data_sz);
return iio_push_to_buffers_with_timestamp(indio_dev,
iio_dev_opaque->bounce_buffer,
timestamp);
}
EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned);
/**
* iio_buffer_release() - Free a buffer's resources
* @ref: Pointer to the kref embedded in the iio_buffer struct

Просмотреть файл

@ -38,6 +38,10 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
return iio_push_to_buffers(indio_dev, data);
}
int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
const void *data, size_t data_sz,
int64_t timestamp);
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask);

Просмотреть файл

@ -23,6 +23,8 @@
* @groupcounter: index of next attribute group
* @legacy_scan_el_group: attribute group for legacy scan elements attribute group
* @legacy_buffer_group: attribute group for legacy buffer attributes group
* @bounce_buffer: for devices that call iio_push_to_buffers_with_timestamp_unaligned()
* @bounce_buffer_size: size of currently allocate bounce buffer
* @scan_index_timestamp: cache of the index to the timestamp
* @clock_id: timestamping clock posix identifier
* @chrdev: associated character device
@ -50,6 +52,8 @@ struct iio_dev_opaque {
int groupcounter;
struct attribute_group legacy_scan_el_group;
struct attribute_group legacy_buffer_group;
void *bounce_buffer;
size_t bounce_buffer_size;
unsigned int scan_index_timestamp;
clockid_t clock_id;