vmcore: convert __read_vmcore to use an iov_iter
This gets rid of copy_to() and let us use proc_read_iter() instead of proc_read(). Link: https://lkml.kernel.org/r/20220408090636.560886-3-bhe@redhat.com Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Baoquan He <bhe@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Родитель
5d8de293c2
Коммит
4a22fd2037
|
@ -249,22 +249,8 @@ ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
|
||||||
return copy_oldmem_page(iter, pfn, csize, offset);
|
return copy_oldmem_page(iter, pfn, csize, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Copy to either kernel or user space
|
|
||||||
*/
|
|
||||||
static int copy_to(void *target, void *src, size_t size, int userbuf)
|
|
||||||
{
|
|
||||||
if (userbuf) {
|
|
||||||
if (copy_to_user((char __user *) target, src, size))
|
|
||||||
return -EFAULT;
|
|
||||||
} else {
|
|
||||||
memcpy(target, src, size);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
|
#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
|
||||||
static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
|
static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
|
||||||
{
|
{
|
||||||
struct vmcoredd_node *dump;
|
struct vmcoredd_node *dump;
|
||||||
u64 offset = 0;
|
u64 offset = 0;
|
||||||
|
@ -277,14 +263,13 @@ static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
|
||||||
if (start < offset + dump->size) {
|
if (start < offset + dump->size) {
|
||||||
tsz = min(offset + (u64)dump->size - start, (u64)size);
|
tsz = min(offset + (u64)dump->size - start, (u64)size);
|
||||||
buf = dump->buf + start - offset;
|
buf = dump->buf + start - offset;
|
||||||
if (copy_to(dst, buf, tsz, userbuf)) {
|
if (copy_to_iter(buf, tsz, iter) < tsz) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
size -= tsz;
|
size -= tsz;
|
||||||
start += tsz;
|
start += tsz;
|
||||||
dst += tsz;
|
|
||||||
|
|
||||||
/* Leave now if buffer filled already */
|
/* Leave now if buffer filled already */
|
||||||
if (!size)
|
if (!size)
|
||||||
|
@ -340,33 +325,28 @@ out_unlock:
|
||||||
/* Read from the ELF header and then the crash dump. On error, negative value is
|
/* Read from the ELF header and then the crash dump. On error, negative value is
|
||||||
* returned otherwise number of bytes read are returned.
|
* returned otherwise number of bytes read are returned.
|
||||||
*/
|
*/
|
||||||
static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
|
static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
|
||||||
int userbuf)
|
|
||||||
{
|
{
|
||||||
ssize_t acc = 0, tmp;
|
ssize_t acc = 0, tmp;
|
||||||
size_t tsz;
|
size_t tsz;
|
||||||
u64 start;
|
u64 start;
|
||||||
struct vmcore *m = NULL;
|
struct vmcore *m = NULL;
|
||||||
|
|
||||||
if (buflen == 0 || *fpos >= vmcore_size)
|
if (!iov_iter_count(iter) || *fpos >= vmcore_size)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* trim buflen to not go beyond EOF */
|
iov_iter_truncate(iter, vmcore_size - *fpos);
|
||||||
if (buflen > vmcore_size - *fpos)
|
|
||||||
buflen = vmcore_size - *fpos;
|
|
||||||
|
|
||||||
/* Read ELF core header */
|
/* Read ELF core header */
|
||||||
if (*fpos < elfcorebuf_sz) {
|
if (*fpos < elfcorebuf_sz) {
|
||||||
tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
|
tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter));
|
||||||
if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
|
if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
buflen -= tsz;
|
|
||||||
*fpos += tsz;
|
*fpos += tsz;
|
||||||
buffer += tsz;
|
|
||||||
acc += tsz;
|
acc += tsz;
|
||||||
|
|
||||||
/* leave now if filled buffer already */
|
/* leave now if filled buffer already */
|
||||||
if (buflen == 0)
|
if (!iov_iter_count(iter))
|
||||||
return acc;
|
return acc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -387,35 +367,32 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
|
||||||
/* Read device dumps */
|
/* Read device dumps */
|
||||||
if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
|
if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
|
||||||
tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
|
tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
|
||||||
(size_t)*fpos, buflen);
|
(size_t)*fpos, iov_iter_count(iter));
|
||||||
start = *fpos - elfcorebuf_sz;
|
start = *fpos - elfcorebuf_sz;
|
||||||
if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
|
if (vmcoredd_copy_dumps(iter, start, tsz))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
buflen -= tsz;
|
|
||||||
*fpos += tsz;
|
*fpos += tsz;
|
||||||
buffer += tsz;
|
|
||||||
acc += tsz;
|
acc += tsz;
|
||||||
|
|
||||||
/* leave now if filled buffer already */
|
/* leave now if filled buffer already */
|
||||||
if (!buflen)
|
if (!iov_iter_count(iter))
|
||||||
return acc;
|
return acc;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
|
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
|
||||||
|
|
||||||
/* Read remaining elf notes */
|
/* Read remaining elf notes */
|
||||||
tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
|
tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos,
|
||||||
|
iov_iter_count(iter));
|
||||||
kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
|
kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
|
||||||
if (copy_to(buffer, kaddr, tsz, userbuf))
|
if (copy_to_iter(kaddr, tsz, iter) < tsz)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
buflen -= tsz;
|
|
||||||
*fpos += tsz;
|
*fpos += tsz;
|
||||||
buffer += tsz;
|
|
||||||
acc += tsz;
|
acc += tsz;
|
||||||
|
|
||||||
/* leave now if filled buffer already */
|
/* leave now if filled buffer already */
|
||||||
if (buflen == 0)
|
if (!iov_iter_count(iter))
|
||||||
return acc;
|
return acc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -423,19 +400,17 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
|
||||||
if (*fpos < m->offset + m->size) {
|
if (*fpos < m->offset + m->size) {
|
||||||
tsz = (size_t)min_t(unsigned long long,
|
tsz = (size_t)min_t(unsigned long long,
|
||||||
m->offset + m->size - *fpos,
|
m->offset + m->size - *fpos,
|
||||||
buflen);
|
iov_iter_count(iter));
|
||||||
start = m->paddr + *fpos - m->offset;
|
start = m->paddr + *fpos - m->offset;
|
||||||
tmp = read_from_oldmem(buffer, tsz, &start,
|
tmp = read_from_oldmem_iter(iter, tsz, &start,
|
||||||
userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
|
cc_platform_has(CC_ATTR_MEM_ENCRYPT));
|
||||||
if (tmp < 0)
|
if (tmp < 0)
|
||||||
return tmp;
|
return tmp;
|
||||||
buflen -= tsz;
|
|
||||||
*fpos += tsz;
|
*fpos += tsz;
|
||||||
buffer += tsz;
|
|
||||||
acc += tsz;
|
acc += tsz;
|
||||||
|
|
||||||
/* leave now if filled buffer already */
|
/* leave now if filled buffer already */
|
||||||
if (buflen == 0)
|
if (!iov_iter_count(iter))
|
||||||
return acc;
|
return acc;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -443,15 +418,14 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
|
||||||
return acc;
|
return acc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t read_vmcore(struct file *file, char __user *buffer,
|
static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
|
||||||
size_t buflen, loff_t *fpos)
|
|
||||||
{
|
{
|
||||||
return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
|
return __read_vmcore(iter, &iocb->ki_pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The vmcore fault handler uses the page cache and fills data using the
|
* The vmcore fault handler uses the page cache and fills data using the
|
||||||
* standard __vmcore_read() function.
|
* standard __read_vmcore() function.
|
||||||
*
|
*
|
||||||
* On s390 the fault handler is used for memory regions that can't be mapped
|
* On s390 the fault handler is used for memory regions that can't be mapped
|
||||||
* directly with remap_pfn_range().
|
* directly with remap_pfn_range().
|
||||||
|
@ -461,9 +435,10 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
|
||||||
#ifdef CONFIG_S390
|
#ifdef CONFIG_S390
|
||||||
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
|
||||||
pgoff_t index = vmf->pgoff;
|
pgoff_t index = vmf->pgoff;
|
||||||
|
struct iov_iter iter;
|
||||||
|
struct kvec kvec;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
loff_t offset;
|
loff_t offset;
|
||||||
char *buf;
|
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
page = find_or_create_page(mapping, index, GFP_KERNEL);
|
page = find_or_create_page(mapping, index, GFP_KERNEL);
|
||||||
|
@ -471,8 +446,11 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
|
||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
if (!PageUptodate(page)) {
|
if (!PageUptodate(page)) {
|
||||||
offset = (loff_t) index << PAGE_SHIFT;
|
offset = (loff_t) index << PAGE_SHIFT;
|
||||||
buf = __va((page_to_pfn(page) << PAGE_SHIFT));
|
kvec.iov_base = page_address(page);
|
||||||
rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
|
kvec.iov_len = PAGE_SIZE;
|
||||||
|
iov_iter_kvec(&iter, READ, &kvec, 1, PAGE_SIZE);
|
||||||
|
|
||||||
|
rc = __read_vmcore(&iter, &offset);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
|
@ -722,7 +700,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
|
||||||
|
|
||||||
static const struct proc_ops vmcore_proc_ops = {
|
static const struct proc_ops vmcore_proc_ops = {
|
||||||
.proc_open = open_vmcore,
|
.proc_open = open_vmcore,
|
||||||
.proc_read = read_vmcore,
|
.proc_read_iter = read_vmcore,
|
||||||
.proc_lseek = default_llseek,
|
.proc_lseek = default_llseek,
|
||||||
.proc_mmap = mmap_vmcore,
|
.proc_mmap = mmap_vmcore,
|
||||||
};
|
};
|
||||||
|
|
Загрузка…
Ссылка в новой задаче