switch hugetlbfs to ->read_iter()
... and fix the case when the area we are asked to read crosses a hugepage boundary Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Родитель
c12c49e702
Коммит
34d0640e26
|
@ -34,6 +34,7 @@
|
||||||
#include <linux/security.h>
|
#include <linux/security.h>
|
||||||
#include <linux/magic.h>
|
#include <linux/magic.h>
|
||||||
#include <linux/migrate.h>
|
#include <linux/migrate.h>
|
||||||
|
#include <linux/uio.h>
|
||||||
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
|
||||||
|
@ -179,42 +180,33 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int
|
static size_t
|
||||||
hugetlbfs_read_actor(struct page *page, unsigned long offset,
|
hugetlbfs_read_actor(struct page *page, unsigned long offset,
|
||||||
char __user *buf, unsigned long count,
|
struct iov_iter *to, unsigned long size)
|
||||||
unsigned long size)
|
|
||||||
{
|
{
|
||||||
char *kaddr;
|
size_t copied = 0;
|
||||||
unsigned long left, copied = 0;
|
|
||||||
int i, chunksize;
|
int i, chunksize;
|
||||||
|
|
||||||
if (size > count)
|
|
||||||
size = count;
|
|
||||||
|
|
||||||
/* Find which 4k chunk and offset with in that chunk */
|
/* Find which 4k chunk and offset with in that chunk */
|
||||||
i = offset >> PAGE_CACHE_SHIFT;
|
i = offset >> PAGE_CACHE_SHIFT;
|
||||||
offset = offset & ~PAGE_CACHE_MASK;
|
offset = offset & ~PAGE_CACHE_MASK;
|
||||||
|
|
||||||
while (size) {
|
while (size) {
|
||||||
|
size_t n;
|
||||||
chunksize = PAGE_CACHE_SIZE;
|
chunksize = PAGE_CACHE_SIZE;
|
||||||
if (offset)
|
if (offset)
|
||||||
chunksize -= offset;
|
chunksize -= offset;
|
||||||
if (chunksize > size)
|
if (chunksize > size)
|
||||||
chunksize = size;
|
chunksize = size;
|
||||||
kaddr = kmap(&page[i]);
|
n = copy_page_to_iter(&page[i], offset, chunksize, to);
|
||||||
left = __copy_to_user(buf, kaddr + offset, chunksize);
|
copied += n;
|
||||||
kunmap(&page[i]);
|
if (n != chunksize)
|
||||||
if (left) {
|
return copied;
|
||||||
copied += (chunksize - left);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
offset = 0;
|
offset = 0;
|
||||||
size -= chunksize;
|
size -= chunksize;
|
||||||
buf += chunksize;
|
|
||||||
copied += chunksize;
|
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
return copied ? copied : -EFAULT;
|
return copied;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -222,39 +214,34 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
|
||||||
* data. Its *very* similar to do_generic_mapping_read(), we can't use that
|
* data. Its *very* similar to do_generic_mapping_read(), we can't use that
|
||||||
* since it has PAGE_CACHE_SIZE assumptions.
|
* since it has PAGE_CACHE_SIZE assumptions.
|
||||||
*/
|
*/
|
||||||
static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
|
static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||||
size_t len, loff_t *ppos)
|
|
||||||
{
|
{
|
||||||
struct hstate *h = hstate_file(filp);
|
struct file *file = iocb->ki_filp;
|
||||||
struct address_space *mapping = filp->f_mapping;
|
struct hstate *h = hstate_file(file);
|
||||||
|
struct address_space *mapping = file->f_mapping;
|
||||||
struct inode *inode = mapping->host;
|
struct inode *inode = mapping->host;
|
||||||
unsigned long index = *ppos >> huge_page_shift(h);
|
unsigned long index = iocb->ki_pos >> huge_page_shift(h);
|
||||||
unsigned long offset = *ppos & ~huge_page_mask(h);
|
unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
|
||||||
unsigned long end_index;
|
unsigned long end_index;
|
||||||
loff_t isize;
|
loff_t isize;
|
||||||
ssize_t retval = 0;
|
ssize_t retval = 0;
|
||||||
|
|
||||||
/* validate length */
|
while (iov_iter_count(to)) {
|
||||||
if (len == 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long nr, ret;
|
size_t nr, copied;
|
||||||
int ra;
|
|
||||||
|
|
||||||
/* nr is the maximum number of bytes to copy from this page */
|
/* nr is the maximum number of bytes to copy from this page */
|
||||||
nr = huge_page_size(h);
|
nr = huge_page_size(h);
|
||||||
isize = i_size_read(inode);
|
isize = i_size_read(inode);
|
||||||
if (!isize)
|
if (!isize)
|
||||||
goto out;
|
break;
|
||||||
end_index = (isize - 1) >> huge_page_shift(h);
|
end_index = (isize - 1) >> huge_page_shift(h);
|
||||||
if (index >= end_index) {
|
|
||||||
if (index > end_index)
|
if (index > end_index)
|
||||||
goto out;
|
break;
|
||||||
|
if (index == end_index) {
|
||||||
nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
|
nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
|
||||||
if (nr <= offset)
|
if (nr <= offset)
|
||||||
goto out;
|
break;
|
||||||
}
|
}
|
||||||
nr = nr - offset;
|
nr = nr - offset;
|
||||||
|
|
||||||
|
@ -265,39 +252,27 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
|
||||||
* We have a HOLE, zero out the user-buffer for the
|
* We have a HOLE, zero out the user-buffer for the
|
||||||
* length of the hole or request.
|
* length of the hole or request.
|
||||||
*/
|
*/
|
||||||
ret = len < nr ? len : nr;
|
copied = iov_iter_zero(nr, to);
|
||||||
if (clear_user(buf, ret))
|
|
||||||
ra = -EFAULT;
|
|
||||||
else
|
|
||||||
ra = 0;
|
|
||||||
} else {
|
} else {
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have the page, copy it to user space buffer.
|
* We have the page, copy it to user space buffer.
|
||||||
*/
|
*/
|
||||||
ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
|
copied = hugetlbfs_read_actor(page, offset, to, nr);
|
||||||
ret = ra;
|
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
}
|
}
|
||||||
if (ra < 0) {
|
offset += copied;
|
||||||
if (retval == 0)
|
retval += copied;
|
||||||
retval = ra;
|
if (copied != nr && iov_iter_count(to)) {
|
||||||
goto out;
|
if (!retval)
|
||||||
}
|
retval = -EFAULT;
|
||||||
|
|
||||||
offset += ret;
|
|
||||||
retval += ret;
|
|
||||||
len -= ret;
|
|
||||||
index += offset >> huge_page_shift(h);
|
|
||||||
offset &= ~huge_page_mask(h);
|
|
||||||
|
|
||||||
/* short read or no more work */
|
|
||||||
if ((ret != nr) || (len == 0))
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
out:
|
index += offset >> huge_page_shift(h);
|
||||||
*ppos = ((loff_t)index << huge_page_shift(h)) + offset;
|
offset &= ~huge_page_mask(h);
|
||||||
|
}
|
||||||
|
iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -721,7 +696,8 @@ static void init_once(void *foo)
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct file_operations hugetlbfs_file_operations = {
|
const struct file_operations hugetlbfs_file_operations = {
|
||||||
.read = hugetlbfs_read,
|
.read = new_sync_read,
|
||||||
|
.read_iter = hugetlbfs_read_iter,
|
||||||
.mmap = hugetlbfs_file_mmap,
|
.mmap = hugetlbfs_file_mmap,
|
||||||
.fsync = noop_fsync,
|
.fsync = noop_fsync,
|
||||||
.get_unmapped_area = hugetlb_get_unmapped_area,
|
.get_unmapped_area = hugetlb_get_unmapped_area,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче