iov_iter stuff, part 2, rebased
* more new_sync_{read,write}() speedups - ITER_UBUF introduction * ITER_PIPE cleanups * unification of iov_iter_get_pages/iov_iter_get_pages_alloc and switching them to advancing semantics * making ITER_PIPE take high-order pages without splitting them * handling copy_page_from_iter() for high-order pages properly Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQQqUNBr3gm4hGXdBJlZ7Krx/gZQ6wUCYvHI8QAKCRBZ7Krx/gZQ 62CQAPsGlbebqBeAT2pMulaGDxfLAsgz5Yf4BEaMLhPtRqFOQgD+KrZQId7Sd8O0 3IWucpTb2c4jvLlXhGMS+XWnusQH+AQ= =pBux -----END PGP SIGNATURE----- Merge tag 'pull-work.iov_iter-rebased' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs Pull more iov_iter updates from Al Viro: - more new_sync_{read,write}() speedups - ITER_UBUF introduction - ITER_PIPE cleanups - unification of iov_iter_get_pages/iov_iter_get_pages_alloc and switching them to advancing semantics - making ITER_PIPE take high-order pages without splitting them - handling copy_page_from_iter() for high-order pages properly * tag 'pull-work.iov_iter-rebased' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (32 commits) fix copy_page_from_iter() for compound destinations hugetlbfs: copy_page_to_iter() can deal with compound pages copy_page_to_iter(): don't split high-order page in case of ITER_PIPE expand those iov_iter_advance()... pipe_get_pages(): switch to append_pipe() get rid of non-advancing variants ceph: switch the last caller of iov_iter_get_pages_alloc() 9p: convert to advancing variant of iov_iter_get_pages_alloc() af_alg_make_sg(): switch to advancing variant of iov_iter_get_pages() iter_to_pipe(): switch to advancing variant of iov_iter_get_pages() block: convert to advancing variants of iov_iter_get_pages{,_alloc}() iov_iter: advancing variants of iov_iter_get_pages{,_alloc}() iov_iter: saner helper for page array allocation fold __pipe_get_pages() into pipe_get_pages() ITER_XARRAY: don't open-code DIV_ROUND_UP() unify the rest of iov_iter_get_pages()/iov_iter_get_pages_alloc() guts unify xarray_get_pages() and xarray_get_pages_alloc() unify pipe_get_pages() and pipe_get_pages_alloc() iov_iter_get_pages(): sanity-check arguments iov_iter_get_pages_alloc(): lift freeing pages array on failure exits into wrapper ...
This commit is contained in:
Коммит
f30adc0d33
23
block/bio.c
23
block/bio.c
|
@ -1200,7 +1200,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
|||
struct page **pages = (struct page **)bv;
|
||||
ssize_t size, left;
|
||||
unsigned len, i = 0;
|
||||
size_t offset;
|
||||
size_t offset, trim;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
|
@ -1218,16 +1218,19 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
|||
* result to ensure the bio's total size is correct. The remainder of
|
||||
* the iov data will be picked up in the next bio iteration.
|
||||
*/
|
||||
size = iov_iter_get_pages(iter, pages, UINT_MAX - bio->bi_iter.bi_size,
|
||||
size = iov_iter_get_pages2(iter, pages, UINT_MAX - bio->bi_iter.bi_size,
|
||||
nr_pages, &offset);
|
||||
if (size > 0) {
|
||||
nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
|
||||
size = ALIGN_DOWN(size, bdev_logical_block_size(bio->bi_bdev));
|
||||
} else
|
||||
nr_pages = 0;
|
||||
if (unlikely(size <= 0))
|
||||
return size ? size : -EFAULT;
|
||||
|
||||
if (unlikely(size <= 0)) {
|
||||
ret = size ? size : -EFAULT;
|
||||
nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
|
||||
|
||||
trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
|
||||
iov_iter_revert(iter, trim);
|
||||
|
||||
size -= trim;
|
||||
if (unlikely(!size)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1246,7 +1249,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
|||
offset = 0;
|
||||
}
|
||||
|
||||
iov_iter_advance(iter, size - left);
|
||||
iov_iter_revert(iter, left);
|
||||
out:
|
||||
while (i < nr_pages)
|
||||
put_page(pages[i++]);
|
||||
|
|
|
@ -254,7 +254,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
|||
size_t offs, added = 0;
|
||||
int npages;
|
||||
|
||||
bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
|
||||
bytes = iov_iter_get_pages_alloc2(iter, &pages, LONG_MAX, &offs);
|
||||
if (unlikely(bytes <= 0)) {
|
||||
ret = bytes ? bytes : -EFAULT;
|
||||
goto out_unmap;
|
||||
|
@ -284,7 +284,6 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
|||
bytes -= n;
|
||||
offs = 0;
|
||||
}
|
||||
iov_iter_advance(iter, added);
|
||||
}
|
||||
/*
|
||||
* release the pages we didn't map into the bio, if any
|
||||
|
@ -293,8 +292,10 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
|||
put_page(pages[j++]);
|
||||
kvfree(pages);
|
||||
/* couldn't stuff something into bio? */
|
||||
if (bytes)
|
||||
if (bytes) {
|
||||
iov_iter_revert(iter, bytes);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ret = blk_rq_append_bio(rq, bio);
|
||||
|
|
|
@ -75,7 +75,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
|
|||
|
||||
if (iov_iter_rw(iter) == READ) {
|
||||
bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
|
||||
if (iter_is_iovec(iter))
|
||||
if (user_backed_iter(iter))
|
||||
should_dirty = true;
|
||||
} else {
|
||||
bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
|
||||
|
@ -204,7 +204,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|||
}
|
||||
|
||||
dio->size = 0;
|
||||
if (is_read && iter_is_iovec(iter))
|
||||
if (is_read && user_backed_iter(iter))
|
||||
dio->flags |= DIO_SHOULD_DIRTY;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
|
@ -335,7 +335,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
|
|||
dio->size = bio->bi_iter.bi_size;
|
||||
|
||||
if (is_read) {
|
||||
if (iter_is_iovec(iter)) {
|
||||
if (user_backed_iter(iter)) {
|
||||
dio->flags |= DIO_SHOULD_DIRTY;
|
||||
bio_set_pages_dirty(bio);
|
||||
}
|
||||
|
|
|
@ -404,7 +404,7 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
|
|||
ssize_t n;
|
||||
int npages, i;
|
||||
|
||||
n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
|
||||
n = iov_iter_get_pages2(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
|
||||
if (n < 0)
|
||||
return n;
|
||||
|
||||
|
@ -1191,7 +1191,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
|
|||
len += err;
|
||||
atomic_add(err, &ctx->rcvused);
|
||||
rsgl->sg_num_bytes = err;
|
||||
iov_iter_advance(&msg->msg_iter, err);
|
||||
}
|
||||
|
||||
*outlen = len;
|
||||
|
|
|
@ -102,11 +102,12 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
err = crypto_wait_req(crypto_ahash_update(&ctx->req),
|
||||
&ctx->wait);
|
||||
af_alg_free_sg(&ctx->sgl);
|
||||
if (err)
|
||||
if (err) {
|
||||
iov_iter_revert(&msg->msg_iter, len);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
copied += len;
|
||||
iov_iter_advance(&msg->msg_iter, len);
|
||||
}
|
||||
|
||||
err = 0;
|
||||
|
|
|
@ -643,14 +643,12 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
|
|||
size_t offset;
|
||||
unsigned int npages = 0;
|
||||
|
||||
bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
|
||||
bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
|
||||
VHOST_SCSI_PREALLOC_UPAGES, &offset);
|
||||
/* No pages were pinned */
|
||||
if (bytes <= 0)
|
||||
return bytes < 0 ? bytes : -EFAULT;
|
||||
|
||||
iov_iter_advance(iter, bytes);
|
||||
|
||||
while (bytes) {
|
||||
unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
|
||||
sg_set_page(sg++, pages[npages++], n, offset);
|
||||
|
|
|
@ -329,7 +329,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
|
|||
|
||||
dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
|
||||
iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len);
|
||||
err = iov_iter_get_pages_alloc(&iter, &pages, len, &page_off);
|
||||
err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
|
||||
if (err < 0) {
|
||||
dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err);
|
||||
goto out;
|
||||
|
|
|
@ -95,12 +95,11 @@ static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
|
|||
size_t start;
|
||||
int idx = 0;
|
||||
|
||||
bytes = iov_iter_get_pages(iter, pages, maxsize - size,
|
||||
bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
|
||||
ITER_GET_BVECS_PAGES, &start);
|
||||
if (bytes < 0)
|
||||
return size ?: bytes;
|
||||
|
||||
iov_iter_advance(iter, bytes);
|
||||
size += bytes;
|
||||
|
||||
for ( ; bytes; idx++, bvec_idx++) {
|
||||
|
@ -1262,7 +1261,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
|
|||
size_t count = iov_iter_count(iter);
|
||||
loff_t pos = iocb->ki_pos;
|
||||
bool write = iov_iter_rw(iter) == WRITE;
|
||||
bool should_dirty = !write && iter_is_iovec(iter);
|
||||
bool should_dirty = !write && user_backed_iter(iter);
|
||||
|
||||
if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
|
||||
return -EROFS;
|
||||
|
|
|
@ -3276,7 +3276,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
|
|||
if (ctx->direct_io) {
|
||||
ssize_t result;
|
||||
|
||||
result = iov_iter_get_pages_alloc(
|
||||
result = iov_iter_get_pages_alloc2(
|
||||
from, &pagevec, cur_len, &start);
|
||||
if (result < 0) {
|
||||
cifs_dbg(VFS,
|
||||
|
@ -3290,7 +3290,6 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
|
|||
break;
|
||||
}
|
||||
cur_len = (size_t)result;
|
||||
iov_iter_advance(from, cur_len);
|
||||
|
||||
nr_pages =
|
||||
(cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
|
@ -4012,7 +4011,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
|
|||
if (ctx->direct_io) {
|
||||
ssize_t result;
|
||||
|
||||
result = iov_iter_get_pages_alloc(
|
||||
result = iov_iter_get_pages_alloc2(
|
||||
&direct_iov, &pagevec,
|
||||
cur_len, &start);
|
||||
if (result < 0) {
|
||||
|
@ -4028,7 +4027,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
|
|||
break;
|
||||
}
|
||||
cur_len = (size_t)result;
|
||||
iov_iter_advance(&direct_iov, cur_len);
|
||||
|
||||
rdata = cifs_readdata_direct_alloc(
|
||||
pagevec, cifs_uncached_readv_complete);
|
||||
|
@ -4258,7 +4256,7 @@ static ssize_t __cifs_readv(
|
|||
if (!is_sync_kiocb(iocb))
|
||||
ctx->iocb = iocb;
|
||||
|
||||
if (iter_is_iovec(to))
|
||||
if (user_backed_iter(to))
|
||||
ctx->should_dirty = true;
|
||||
|
||||
if (direct) {
|
||||
|
|
|
@ -1022,7 +1022,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
|
|||
saved_len = count;
|
||||
|
||||
while (count && npages < max_pages) {
|
||||
rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
|
||||
rc = iov_iter_get_pages2(iter, pages, count, max_pages, &start);
|
||||
if (rc < 0) {
|
||||
cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
|
||||
break;
|
||||
|
@ -1034,7 +1034,6 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
|
|||
break;
|
||||
}
|
||||
|
||||
iov_iter_advance(iter, rc);
|
||||
count -= rc;
|
||||
rc += start;
|
||||
cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
|
||||
|
|
|
@ -169,7 +169,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
|
|||
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
|
||||
ssize_t ret;
|
||||
|
||||
ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
|
||||
ret = iov_iter_get_pages2(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
|
||||
&sdio->from);
|
||||
|
||||
if (ret < 0 && sdio->blocks_available && dio_op == REQ_OP_WRITE) {
|
||||
|
@ -191,7 +191,6 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
|
|||
}
|
||||
|
||||
if (ret >= 0) {
|
||||
iov_iter_advance(sdio->iter, ret);
|
||||
ret += sdio->from;
|
||||
sdio->head = 0;
|
||||
sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
|
@ -1251,7 +1250,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
|||
spin_lock_init(&dio->bio_lock);
|
||||
dio->refcount = 1;
|
||||
|
||||
dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ;
|
||||
dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ;
|
||||
sdio.iter = iter;
|
||||
sdio.final_block_in_request = end >> blkbits;
|
||||
|
||||
|
|
|
@ -730,14 +730,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
|
|||
}
|
||||
} else {
|
||||
size_t off;
|
||||
err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
|
||||
err = iov_iter_get_pages2(cs->iter, &page, PAGE_SIZE, 1, &off);
|
||||
if (err < 0)
|
||||
return err;
|
||||
BUG_ON(!err);
|
||||
cs->len = err;
|
||||
cs->offset = off;
|
||||
cs->pg = page;
|
||||
iov_iter_advance(cs->iter, err);
|
||||
}
|
||||
|
||||
return lock_request(cs->req);
|
||||
|
@ -1356,7 +1355,7 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
|
|||
if (!fud)
|
||||
return -EPERM;
|
||||
|
||||
if (!iter_is_iovec(to))
|
||||
if (!user_backed_iter(to))
|
||||
return -EINVAL;
|
||||
|
||||
fuse_copy_init(&cs, 1, to);
|
||||
|
@ -1949,7 +1948,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
if (!fud)
|
||||
return -EPERM;
|
||||
|
||||
if (!iter_is_iovec(from))
|
||||
if (!user_backed_iter(from))
|
||||
return -EINVAL;
|
||||
|
||||
fuse_copy_init(&cs, 0, from);
|
||||
|
|
|
@ -1414,14 +1414,13 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
|
|||
while (nbytes < *nbytesp && ap->num_pages < max_pages) {
|
||||
unsigned npages;
|
||||
size_t start;
|
||||
ret = iov_iter_get_pages(ii, &ap->pages[ap->num_pages],
|
||||
ret = iov_iter_get_pages2(ii, &ap->pages[ap->num_pages],
|
||||
*nbytesp - nbytes,
|
||||
max_pages - ap->num_pages,
|
||||
&start);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
iov_iter_advance(ii, ret);
|
||||
nbytes += ret;
|
||||
|
||||
ret += start;
|
||||
|
@ -1478,7 +1477,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
|
|||
inode_unlock(inode);
|
||||
}
|
||||
|
||||
io->should_dirty = !write && iter_is_iovec(iter);
|
||||
io->should_dirty = !write && user_backed_iter(iter);
|
||||
while (count) {
|
||||
ssize_t nres;
|
||||
fl_owner_t owner = current->files;
|
||||
|
|
|
@ -780,7 +780,7 @@ static inline bool should_fault_in_pages(struct iov_iter *i,
|
|||
|
||||
if (!count)
|
||||
return false;
|
||||
if (!iter_is_iovec(i))
|
||||
if (!user_backed_iter(i))
|
||||
return false;
|
||||
|
||||
size = PAGE_SIZE;
|
||||
|
|
|
@ -282,35 +282,6 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|||
}
|
||||
#endif
|
||||
|
||||
static size_t
|
||||
hugetlbfs_read_actor(struct page *page, unsigned long offset,
|
||||
struct iov_iter *to, unsigned long size)
|
||||
{
|
||||
size_t copied = 0;
|
||||
int i, chunksize;
|
||||
|
||||
/* Find which 4k chunk and offset with in that chunk */
|
||||
i = offset >> PAGE_SHIFT;
|
||||
offset = offset & ~PAGE_MASK;
|
||||
|
||||
while (size) {
|
||||
size_t n;
|
||||
chunksize = PAGE_SIZE;
|
||||
if (offset)
|
||||
chunksize -= offset;
|
||||
if (chunksize > size)
|
||||
chunksize = size;
|
||||
n = copy_page_to_iter(&page[i], offset, chunksize, to);
|
||||
copied += n;
|
||||
if (n != chunksize)
|
||||
return copied;
|
||||
offset = 0;
|
||||
size -= chunksize;
|
||||
i++;
|
||||
}
|
||||
return copied;
|
||||
}
|
||||
|
||||
/*
|
||||
* Support for read() - Find the page attached to f_mapping and copy out the
|
||||
* data. This provides functionality similar to filemap_read().
|
||||
|
@ -360,7 +331,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||
/*
|
||||
* We have the page, copy it to user space buffer.
|
||||
*/
|
||||
copied = hugetlbfs_read_actor(page, offset, to, nr);
|
||||
copied = copy_page_to_iter(page, offset, nr, to);
|
||||
put_page(page);
|
||||
}
|
||||
offset += copied;
|
||||
|
|
|
@ -533,7 +533,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
iomi.flags |= IOMAP_NOWAIT;
|
||||
}
|
||||
|
||||
if (iter_is_iovec(iter))
|
||||
if (user_backed_iter(iter))
|
||||
dio->flags |= IOMAP_DIO_DIRTY;
|
||||
} else {
|
||||
iomi.flags |= IOMAP_WRITE;
|
||||
|
|
|
@ -364,13 +364,12 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
|
|||
size_t pgbase;
|
||||
unsigned npages, i;
|
||||
|
||||
result = iov_iter_get_pages_alloc(iter, &pagevec,
|
||||
result = iov_iter_get_pages_alloc2(iter, &pagevec,
|
||||
rsize, &pgbase);
|
||||
if (result < 0)
|
||||
break;
|
||||
|
||||
bytes = result;
|
||||
iov_iter_advance(iter, bytes);
|
||||
npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
for (i = 0; i < npages; i++) {
|
||||
struct nfs_page *req;
|
||||
|
@ -478,7 +477,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
|
|||
if (!is_sync_kiocb(iocb))
|
||||
dreq->iocb = iocb;
|
||||
|
||||
if (iter_is_iovec(iter))
|
||||
if (user_backed_iter(iter))
|
||||
dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
|
||||
|
||||
if (!swap)
|
||||
|
@ -812,13 +811,12 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
|
|||
size_t pgbase;
|
||||
unsigned npages, i;
|
||||
|
||||
result = iov_iter_get_pages_alloc(iter, &pagevec,
|
||||
result = iov_iter_get_pages_alloc2(iter, &pagevec,
|
||||
wsize, &pgbase);
|
||||
if (result < 0)
|
||||
break;
|
||||
|
||||
bytes = result;
|
||||
iov_iter_advance(iter, bytes);
|
||||
npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
for (i = 0; i < npages; i++) {
|
||||
struct nfs_page *req;
|
||||
|
|
|
@ -378,14 +378,13 @@ EXPORT_SYMBOL(rw_verify_area);
|
|||
|
||||
static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
|
||||
{
|
||||
struct iovec iov = { .iov_base = buf, .iov_len = len };
|
||||
struct kiocb kiocb;
|
||||
struct iov_iter iter;
|
||||
ssize_t ret;
|
||||
|
||||
init_sync_kiocb(&kiocb, filp);
|
||||
kiocb.ki_pos = (ppos ? *ppos : 0);
|
||||
iov_iter_init(&iter, READ, &iov, 1, len);
|
||||
iov_iter_ubuf(&iter, READ, buf, len);
|
||||
|
||||
ret = call_read_iter(filp, &kiocb, &iter);
|
||||
BUG_ON(ret == -EIOCBQUEUED);
|
||||
|
@ -481,14 +480,13 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
|
|||
|
||||
static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
|
||||
{
|
||||
struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
|
||||
struct kiocb kiocb;
|
||||
struct iov_iter iter;
|
||||
ssize_t ret;
|
||||
|
||||
init_sync_kiocb(&kiocb, filp);
|
||||
kiocb.ki_pos = (ppos ? *ppos : 0);
|
||||
iov_iter_init(&iter, WRITE, &iov, 1, len);
|
||||
iov_iter_ubuf(&iter, WRITE, (void __user *)buf, len);
|
||||
|
||||
ret = call_write_iter(filp, &kiocb, &iter);
|
||||
BUG_ON(ret == -EIOCBQUEUED);
|
||||
|
|
54
fs/splice.c
54
fs/splice.c
|
@ -301,11 +301,9 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
|
|||
{
|
||||
struct iov_iter to;
|
||||
struct kiocb kiocb;
|
||||
unsigned int i_head;
|
||||
int ret;
|
||||
|
||||
iov_iter_pipe(&to, READ, pipe, len);
|
||||
i_head = to.head;
|
||||
init_sync_kiocb(&kiocb, in);
|
||||
kiocb.ki_pos = *ppos;
|
||||
ret = call_read_iter(in, &kiocb, &to);
|
||||
|
@ -313,9 +311,8 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
|
|||
*ppos = kiocb.ki_pos;
|
||||
file_accessed(in);
|
||||
} else if (ret < 0) {
|
||||
to.head = i_head;
|
||||
to.iov_offset = 0;
|
||||
iov_iter_advance(&to, 0); /* to free what was emitted */
|
||||
/* free what was emitted */
|
||||
pipe_discard_from(pipe, to.start_head);
|
||||
/*
|
||||
* callers of ->splice_read() expect -EAGAIN on
|
||||
* "can't put anything in there", rather than -EFAULT.
|
||||
|
@ -1161,39 +1158,40 @@ static int iter_to_pipe(struct iov_iter *from,
|
|||
};
|
||||
size_t total = 0;
|
||||
int ret = 0;
|
||||
bool failed = false;
|
||||
|
||||
while (iov_iter_count(from) && !failed) {
|
||||
while (iov_iter_count(from)) {
|
||||
struct page *pages[16];
|
||||
ssize_t copied;
|
||||
ssize_t left;
|
||||
size_t start;
|
||||
int n;
|
||||
int i, n;
|
||||
|
||||
copied = iov_iter_get_pages(from, pages, ~0UL, 16, &start);
|
||||
if (copied <= 0) {
|
||||
ret = copied;
|
||||
left = iov_iter_get_pages2(from, pages, ~0UL, 16, &start);
|
||||
if (left <= 0) {
|
||||
ret = left;
|
||||
break;
|
||||
}
|
||||
|
||||
for (n = 0; copied; n++, start = 0) {
|
||||
int size = min_t(int, copied, PAGE_SIZE - start);
|
||||
if (!failed) {
|
||||
buf.page = pages[n];
|
||||
buf.offset = start;
|
||||
buf.len = size;
|
||||
ret = add_to_pipe(pipe, &buf);
|
||||
if (unlikely(ret < 0)) {
|
||||
failed = true;
|
||||
} else {
|
||||
iov_iter_advance(from, ret);
|
||||
total += ret;
|
||||
}
|
||||
} else {
|
||||
put_page(pages[n]);
|
||||
n = DIV_ROUND_UP(left + start, PAGE_SIZE);
|
||||
for (i = 0; i < n; i++) {
|
||||
int size = min_t(int, left, PAGE_SIZE - start);
|
||||
|
||||
buf.page = pages[i];
|
||||
buf.offset = start;
|
||||
buf.len = size;
|
||||
ret = add_to_pipe(pipe, &buf);
|
||||
if (unlikely(ret < 0)) {
|
||||
iov_iter_revert(from, left);
|
||||
// this one got dropped by add_to_pipe()
|
||||
while (++i < n)
|
||||
put_page(pages[i]);
|
||||
goto out;
|
||||
}
|
||||
copied -= size;
|
||||
total += ret;
|
||||
left -= size;
|
||||
start = 0;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return total ? total : ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -156,26 +156,6 @@ static inline bool pipe_full(unsigned int head, unsigned int tail,
|
|||
return pipe_occupancy(head, tail) >= limit;
|
||||
}
|
||||
|
||||
/**
|
||||
* pipe_space_for_user - Return number of slots available to userspace
|
||||
* @head: The pipe ring head pointer
|
||||
* @tail: The pipe ring tail pointer
|
||||
* @pipe: The pipe info structure
|
||||
*/
|
||||
static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail,
|
||||
struct pipe_inode_info *pipe)
|
||||
{
|
||||
unsigned int p_occupancy, p_space;
|
||||
|
||||
p_occupancy = pipe_occupancy(head, tail);
|
||||
if (p_occupancy >= pipe->max_usage)
|
||||
return 0;
|
||||
p_space = pipe->ring_size - p_occupancy;
|
||||
if (p_space > pipe->max_usage)
|
||||
p_space = pipe->max_usage;
|
||||
return p_space;
|
||||
}
|
||||
|
||||
/**
|
||||
* pipe_buf_get - get a reference to a pipe_buffer
|
||||
* @pipe: the pipe that the buffer belongs to
|
||||
|
|
|
@ -26,6 +26,7 @@ enum iter_type {
|
|||
ITER_PIPE,
|
||||
ITER_XARRAY,
|
||||
ITER_DISCARD,
|
||||
ITER_UBUF,
|
||||
};
|
||||
|
||||
struct iov_iter_state {
|
||||
|
@ -38,7 +39,11 @@ struct iov_iter {
|
|||
u8 iter_type;
|
||||
bool nofault;
|
||||
bool data_source;
|
||||
size_t iov_offset;
|
||||
bool user_backed;
|
||||
union {
|
||||
size_t iov_offset;
|
||||
int last_offset;
|
||||
};
|
||||
size_t count;
|
||||
union {
|
||||
const struct iovec *iov;
|
||||
|
@ -46,6 +51,7 @@ struct iov_iter {
|
|||
const struct bio_vec *bvec;
|
||||
struct xarray *xarray;
|
||||
struct pipe_inode_info *pipe;
|
||||
void __user *ubuf;
|
||||
};
|
||||
union {
|
||||
unsigned long nr_segs;
|
||||
|
@ -70,6 +76,11 @@ static inline void iov_iter_save_state(struct iov_iter *iter,
|
|||
state->nr_segs = iter->nr_segs;
|
||||
}
|
||||
|
||||
static inline bool iter_is_ubuf(const struct iov_iter *i)
|
||||
{
|
||||
return iov_iter_type(i) == ITER_UBUF;
|
||||
}
|
||||
|
||||
static inline bool iter_is_iovec(const struct iov_iter *i)
|
||||
{
|
||||
return iov_iter_type(i) == ITER_IOVEC;
|
||||
|
@ -105,6 +116,11 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i)
|
|||
return i->data_source ? WRITE : READ;
|
||||
}
|
||||
|
||||
static inline bool user_backed_iter(const struct iov_iter *i)
|
||||
{
|
||||
return i->user_backed;
|
||||
}
|
||||
|
||||
/*
|
||||
* Total number of bytes covered by an iovec.
|
||||
*
|
||||
|
@ -231,9 +247,9 @@ void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode
|
|||
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
|
||||
void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
|
||||
loff_t start, size_t count);
|
||||
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
|
||||
ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
|
||||
size_t maxsize, unsigned maxpages, size_t *start);
|
||||
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
|
||||
ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
|
||||
size_t maxsize, size_t *start);
|
||||
int iov_iter_npages(const struct iov_iter *i, int maxpages);
|
||||
void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
|
||||
|
@ -322,4 +338,17 @@ ssize_t __import_iovec(int type, const struct iovec __user *uvec,
|
|||
int import_single_range(int type, void __user *buf, size_t len,
|
||||
struct iovec *iov, struct iov_iter *i);
|
||||
|
||||
static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
|
||||
void __user *buf, size_t count)
|
||||
{
|
||||
WARN_ON(direction & ~(READ | WRITE));
|
||||
*i = (struct iov_iter) {
|
||||
.iter_type = ITER_UBUF,
|
||||
.user_backed = true,
|
||||
.data_source = direction,
|
||||
.ubuf = buf,
|
||||
.count = count
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
836
lib/iov_iter.c
836
lib/iov_iter.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -2626,7 +2626,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||
ret = copy_page_to_iter(page, offset, nr, to);
|
||||
put_page(page);
|
||||
|
||||
} else if (iter_is_iovec(to)) {
|
||||
} else if (user_backed_iter(to)) {
|
||||
/*
|
||||
* Copy to user tends to be so well optimized, but
|
||||
* clear_user() not so much, that it is noticeably
|
||||
|
|
|
@ -1495,7 +1495,7 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
|
|||
struct p9_client *clnt = fid->clnt;
|
||||
struct p9_req_t *req;
|
||||
int count = iov_iter_count(to);
|
||||
int rsize, non_zc = 0;
|
||||
int rsize, received, non_zc = 0;
|
||||
char *dataptr;
|
||||
|
||||
*err = 0;
|
||||
|
@ -1524,36 +1524,40 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
|
|||
}
|
||||
if (IS_ERR(req)) {
|
||||
*err = PTR_ERR(req);
|
||||
if (!non_zc)
|
||||
iov_iter_revert(to, count - iov_iter_count(to));
|
||||
return 0;
|
||||
}
|
||||
|
||||
*err = p9pdu_readf(&req->rc, clnt->proto_version,
|
||||
"D", &count, &dataptr);
|
||||
"D", &received, &dataptr);
|
||||
if (*err) {
|
||||
if (!non_zc)
|
||||
iov_iter_revert(to, count - iov_iter_count(to));
|
||||
trace_9p_protocol_dump(clnt, &req->rc);
|
||||
p9_req_put(clnt, req);
|
||||
return 0;
|
||||
}
|
||||
if (rsize < count) {
|
||||
pr_err("bogus RREAD count (%d > %d)\n", count, rsize);
|
||||
count = rsize;
|
||||
if (rsize < received) {
|
||||
pr_err("bogus RREAD count (%d > %d)\n", received, rsize);
|
||||
received = rsize;
|
||||
}
|
||||
|
||||
p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
|
||||
|
||||
if (non_zc) {
|
||||
int n = copy_to_iter(dataptr, count, to);
|
||||
int n = copy_to_iter(dataptr, received, to);
|
||||
|
||||
if (n != count) {
|
||||
if (n != received) {
|
||||
*err = -EFAULT;
|
||||
p9_req_put(clnt, req);
|
||||
return n;
|
||||
}
|
||||
} else {
|
||||
iov_iter_advance(to, count);
|
||||
iov_iter_revert(to, count - received - iov_iter_count(to));
|
||||
}
|
||||
p9_req_put(clnt, req);
|
||||
return count;
|
||||
return received;
|
||||
}
|
||||
EXPORT_SYMBOL(p9_client_read_once);
|
||||
|
||||
|
@ -1571,6 +1575,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
|
|||
while (iov_iter_count(from)) {
|
||||
int count = iov_iter_count(from);
|
||||
int rsize = fid->iounit;
|
||||
int written;
|
||||
|
||||
if (!rsize || rsize > clnt->msize - P9_IOHDRSZ)
|
||||
rsize = clnt->msize - P9_IOHDRSZ;
|
||||
|
@ -1588,27 +1593,29 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
|
|||
offset, rsize, from);
|
||||
}
|
||||
if (IS_ERR(req)) {
|
||||
iov_iter_revert(from, count - iov_iter_count(from));
|
||||
*err = PTR_ERR(req);
|
||||
break;
|
||||
}
|
||||
|
||||
*err = p9pdu_readf(&req->rc, clnt->proto_version, "d", &count);
|
||||
*err = p9pdu_readf(&req->rc, clnt->proto_version, "d", &written);
|
||||
if (*err) {
|
||||
iov_iter_revert(from, count - iov_iter_count(from));
|
||||
trace_9p_protocol_dump(clnt, &req->rc);
|
||||
p9_req_put(clnt, req);
|
||||
break;
|
||||
}
|
||||
if (rsize < count) {
|
||||
pr_err("bogus RWRITE count (%d > %d)\n", count, rsize);
|
||||
count = rsize;
|
||||
if (rsize < written) {
|
||||
pr_err("bogus RWRITE count (%d > %d)\n", written, rsize);
|
||||
written = rsize;
|
||||
}
|
||||
|
||||
p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
|
||||
|
||||
p9_req_put(clnt, req);
|
||||
iov_iter_advance(from, count);
|
||||
total += count;
|
||||
offset += count;
|
||||
iov_iter_revert(from, count - written - iov_iter_count(from));
|
||||
total += written;
|
||||
offset += written;
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
|
|
@ -63,9 +63,8 @@ static size_t
|
|||
pdu_write_u(struct p9_fcall *pdu, struct iov_iter *from, size_t size)
|
||||
{
|
||||
size_t len = min(pdu->capacity - pdu->size, size);
|
||||
struct iov_iter i = *from;
|
||||
|
||||
if (!copy_from_iter_full(&pdu->sdata[pdu->size], len, &i))
|
||||
if (!copy_from_iter_full(&pdu->sdata[pdu->size], len, from))
|
||||
len = 0;
|
||||
|
||||
pdu->size += len;
|
||||
|
|
|
@ -331,7 +331,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
|
|||
if (err == -ERESTARTSYS)
|
||||
return err;
|
||||
}
|
||||
n = iov_iter_get_pages_alloc(data, pages, count, offs);
|
||||
n = iov_iter_get_pages_alloc2(data, pages, count, offs);
|
||||
if (n < 0)
|
||||
return n;
|
||||
*need_drop = 1;
|
||||
|
@ -373,6 +373,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
|
|||
(*pages)[index] = kmap_to_page(p);
|
||||
p += PAGE_SIZE;
|
||||
}
|
||||
iov_iter_advance(data, len);
|
||||
return len;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -632,12 +632,11 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
|
|||
if (frag == MAX_SKB_FRAGS)
|
||||
return -EMSGSIZE;
|
||||
|
||||
copied = iov_iter_get_pages(from, pages, length,
|
||||
copied = iov_iter_get_pages2(from, pages, length,
|
||||
MAX_SKB_FRAGS - frag, &start);
|
||||
if (copied < 0)
|
||||
return -EFAULT;
|
||||
|
||||
iov_iter_advance(from, copied);
|
||||
length -= copied;
|
||||
|
||||
truesize = PAGE_ALIGN(copied + start);
|
||||
|
|
|
@ -324,14 +324,13 @@ int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
|
|||
goto out;
|
||||
}
|
||||
|
||||
copied = iov_iter_get_pages(from, pages, bytes, maxpages,
|
||||
copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
|
||||
&offset);
|
||||
if (copied <= 0) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
iov_iter_advance(from, copied);
|
||||
bytes -= copied;
|
||||
msg->sg.size += copied;
|
||||
|
||||
|
|
|
@ -391,7 +391,7 @@ static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *
|
|||
size_t start;
|
||||
ssize_t copied;
|
||||
|
||||
copied = iov_iter_get_pages(from, &pages, PAGE_SIZE,
|
||||
copied = iov_iter_get_pages2(from, &pages, PAGE_SIZE,
|
||||
1, &start);
|
||||
if (copied < 0) {
|
||||
struct mmpin *mmp;
|
||||
|
@ -405,7 +405,6 @@ static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *
|
|||
goto err;
|
||||
}
|
||||
total_copied += copied;
|
||||
iov_iter_advance(from, copied);
|
||||
length -= copied;
|
||||
sg_set_page(sg, pages, copied, start);
|
||||
rm->data.op_nents++;
|
||||
|
|
|
@ -1352,7 +1352,7 @@ static int tls_setup_from_iter(struct iov_iter *from,
|
|||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
copied = iov_iter_get_pages(from, pages,
|
||||
copied = iov_iter_get_pages2(from, pages,
|
||||
length,
|
||||
maxpages, &offset);
|
||||
if (copied <= 0) {
|
||||
|
@ -1360,8 +1360,6 @@ static int tls_setup_from_iter(struct iov_iter *from,
|
|||
goto out;
|
||||
}
|
||||
|
||||
iov_iter_advance(from, copied);
|
||||
|
||||
length -= copied;
|
||||
size += copied;
|
||||
while (copied) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче