Merge branch 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs update from Al Viro: - misc stable fixes - trivial kernel-doc and comment fixups - remove never-used block_page_mkwrite() wrapper function, and rename the function that is _actually_ used to not have double underscores. * 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: fs: 9p: cache.h: Add #define of include guard vfs: remove stale comment in inode_operations vfs: remove unused wrapper block_page_mkwrite() binfmt_elf: Correct `arch_check_elf's description fs: fix writeback.c kernel-doc warnings fs: fix inode.c kernel-doc warning fs/pipe.c: return error code rather than 0 in pipe_write() fs/pipe.c: preserve alloc_file() error code binfmt_elf: Don't clobber passed executable's file header FS-Cache: Handle a write to the page immediately beyond the EOF marker cachefiles: perform test on s_blocksize when opening cache file. FS-Cache: Don't override netfs's primary_index if registering failed FS-Cache: Increase reference of parent after registering, netfs success debugfs: fix refcount imbalance in start_creating
This commit is contained in:
Коммит
842cf0b952
|
@ -21,6 +21,7 @@
|
|||
*/
|
||||
|
||||
#ifndef _9P_CACHE_H
|
||||
#define _9P_CACHE_H
|
||||
#ifdef CONFIG_9P_FSCACHE
|
||||
#include <linux/fscache.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
|
|
@ -488,7 +488,7 @@ static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
|
|||
}
|
||||
|
||||
/**
|
||||
* arch_check_elf() - check a PT_LOPROC..PT_HIPROC ELF program header
|
||||
* arch_check_elf() - check an ELF executable
|
||||
* @ehdr: The main ELF header
|
||||
* @has_interp: True if the ELF has an interpreter, else false.
|
||||
* @state: Architecture-specific state preserved throughout the process
|
||||
|
@ -760,16 +760,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
|
|||
*/
|
||||
would_dump(bprm, interpreter);
|
||||
|
||||
retval = kernel_read(interpreter, 0, bprm->buf,
|
||||
BINPRM_BUF_SIZE);
|
||||
if (retval != BINPRM_BUF_SIZE) {
|
||||
/* Get the exec headers */
|
||||
retval = kernel_read(interpreter, 0,
|
||||
(void *)&loc->interp_elf_ex,
|
||||
sizeof(loc->interp_elf_ex));
|
||||
if (retval != sizeof(loc->interp_elf_ex)) {
|
||||
if (retval >= 0)
|
||||
retval = -EIO;
|
||||
goto out_free_dentry;
|
||||
}
|
||||
|
||||
/* Get the exec headers */
|
||||
loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
|
||||
break;
|
||||
}
|
||||
elf_ppnt++;
|
||||
|
|
24
fs/buffer.c
24
fs/buffer.c
|
@ -2420,9 +2420,9 @@ EXPORT_SYMBOL(block_commit_write);
|
|||
* unlock the page.
|
||||
*
|
||||
* Direct callers of this function should protect against filesystem freezing
|
||||
* using sb_start_write() - sb_end_write() functions.
|
||||
* using sb_start_pagefault() - sb_end_pagefault() functions.
|
||||
*/
|
||||
int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
get_block_t get_block)
|
||||
{
|
||||
struct page *page = vmf->page;
|
||||
|
@ -2459,26 +2459,6 @@ out_unlock:
|
|||
unlock_page(page);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__block_page_mkwrite);
|
||||
|
||||
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
get_block_t get_block)
|
||||
{
|
||||
int ret;
|
||||
struct super_block *sb = file_inode(vma->vm_file)->i_sb;
|
||||
|
||||
sb_start_pagefault(sb);
|
||||
|
||||
/*
|
||||
* Update file times before taking page lock. We may end up failing the
|
||||
* fault so this update may be superfluous but who really cares...
|
||||
*/
|
||||
file_update_time(vma->vm_file);
|
||||
|
||||
ret = __block_page_mkwrite(vma, vmf, get_block);
|
||||
sb_end_pagefault(sb);
|
||||
return block_page_mkwrite_return(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(block_page_mkwrite);
|
||||
|
||||
/*
|
||||
|
|
|
@ -655,6 +655,8 @@ lookup_again:
|
|||
aops = d_backing_inode(object->dentry)->i_mapping->a_ops;
|
||||
if (!aops->bmap)
|
||||
goto check_error;
|
||||
if (object->dentry->d_sb->s_blocksize > PAGE_SIZE)
|
||||
goto check_error;
|
||||
|
||||
object->backer = object->dentry;
|
||||
} else {
|
||||
|
|
|
@ -414,9 +414,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
|
|||
ASSERT(inode->i_mapping->a_ops->readpages);
|
||||
|
||||
/* calculate the shift required to use bmap */
|
||||
if (inode->i_sb->s_blocksize > PAGE_SIZE)
|
||||
goto enobufs;
|
||||
|
||||
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
|
||||
|
||||
op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
|
||||
|
@ -711,9 +708,6 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
|
|||
ASSERT(inode->i_mapping->a_ops->readpages);
|
||||
|
||||
/* calculate the shift required to use bmap */
|
||||
if (inode->i_sb->s_blocksize > PAGE_SIZE)
|
||||
goto all_enobufs;
|
||||
|
||||
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
|
||||
|
||||
pagevec_init(&pagevec, 0);
|
||||
|
@ -905,6 +899,15 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
|
|||
cache = container_of(object->fscache.cache,
|
||||
struct cachefiles_cache, cache);
|
||||
|
||||
pos = (loff_t)page->index << PAGE_SHIFT;
|
||||
|
||||
/* We mustn't write more data than we have, so we have to beware of a
|
||||
* partial page at EOF.
|
||||
*/
|
||||
eof = object->fscache.store_limit_l;
|
||||
if (pos >= eof)
|
||||
goto error;
|
||||
|
||||
/* write the page to the backing filesystem and let it store it in its
|
||||
* own time */
|
||||
path.mnt = cache->mnt;
|
||||
|
@ -912,40 +915,38 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
|
|||
file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
|
||||
if (IS_ERR(file)) {
|
||||
ret = PTR_ERR(file);
|
||||
} else {
|
||||
pos = (loff_t) page->index << PAGE_SHIFT;
|
||||
goto error_2;
|
||||
}
|
||||
|
||||
/* we mustn't write more data than we have, so we have
|
||||
* to beware of a partial page at EOF */
|
||||
eof = object->fscache.store_limit_l;
|
||||
len = PAGE_SIZE;
|
||||
if (eof & ~PAGE_MASK) {
|
||||
ASSERTCMP(pos, <, eof);
|
||||
if (eof - pos < PAGE_SIZE) {
|
||||
_debug("cut short %llx to %llx",
|
||||
pos, eof);
|
||||
len = eof - pos;
|
||||
ASSERTCMP(pos + len, ==, eof);
|
||||
}
|
||||
len = PAGE_SIZE;
|
||||
if (eof & ~PAGE_MASK) {
|
||||
if (eof - pos < PAGE_SIZE) {
|
||||
_debug("cut short %llx to %llx",
|
||||
pos, eof);
|
||||
len = eof - pos;
|
||||
ASSERTCMP(pos + len, ==, eof);
|
||||
}
|
||||
|
||||
data = kmap(page);
|
||||
ret = __kernel_write(file, data, len, &pos);
|
||||
kunmap(page);
|
||||
if (ret != len)
|
||||
ret = -EIO;
|
||||
fput(file);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
if (ret == -EIO)
|
||||
cachefiles_io_error_obj(
|
||||
object, "Write page to backing file failed");
|
||||
ret = -ENOBUFS;
|
||||
}
|
||||
data = kmap(page);
|
||||
ret = __kernel_write(file, data, len, &pos);
|
||||
kunmap(page);
|
||||
fput(file);
|
||||
if (ret != len)
|
||||
goto error_eio;
|
||||
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
||||
error_eio:
|
||||
ret = -EIO;
|
||||
error_2:
|
||||
if (ret == -EIO)
|
||||
cachefiles_io_error_obj(object,
|
||||
"Write page to backing file failed");
|
||||
error:
|
||||
_leave(" = -ENOBUFS [%d]", ret);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -271,8 +271,12 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
|
|||
dput(dentry);
|
||||
dentry = ERR_PTR(-EEXIST);
|
||||
}
|
||||
if (IS_ERR(dentry))
|
||||
|
||||
if (IS_ERR(dentry)) {
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
|
||||
}
|
||||
|
||||
return dentry;
|
||||
}
|
||||
|
||||
|
|
|
@ -5283,7 +5283,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
!ext4_should_journal_data(inode) &&
|
||||
!ext4_nonda_switch(inode->i_sb)) {
|
||||
do {
|
||||
ret = __block_page_mkwrite(vma, vmf,
|
||||
ret = block_page_mkwrite(vma, vmf,
|
||||
ext4_da_get_block_prep);
|
||||
} while (ret == -ENOSPC &&
|
||||
ext4_should_retry_alloc(inode->i_sb, &retries));
|
||||
|
@ -5330,7 +5330,7 @@ retry_alloc:
|
|||
ret = VM_FAULT_SIGBUS;
|
||||
goto out;
|
||||
}
|
||||
ret = __block_page_mkwrite(vma, vmf, get_block);
|
||||
ret = block_page_mkwrite(vma, vmf, get_block);
|
||||
if (!ret && ext4_should_journal_data(inode)) {
|
||||
if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
|
||||
PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
|
||||
|
|
|
@ -22,6 +22,7 @@ static LIST_HEAD(fscache_netfs_list);
|
|||
int __fscache_register_netfs(struct fscache_netfs *netfs)
|
||||
{
|
||||
struct fscache_netfs *ptr;
|
||||
struct fscache_cookie *cookie;
|
||||
int ret;
|
||||
|
||||
_enter("{%s}", netfs->name);
|
||||
|
@ -29,29 +30,25 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
|
|||
INIT_LIST_HEAD(&netfs->link);
|
||||
|
||||
/* allocate a cookie for the primary index */
|
||||
netfs->primary_index =
|
||||
kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
|
||||
cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
|
||||
|
||||
if (!netfs->primary_index) {
|
||||
if (!cookie) {
|
||||
_leave(" = -ENOMEM");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* initialise the primary index cookie */
|
||||
atomic_set(&netfs->primary_index->usage, 1);
|
||||
atomic_set(&netfs->primary_index->n_children, 0);
|
||||
atomic_set(&netfs->primary_index->n_active, 1);
|
||||
atomic_set(&cookie->usage, 1);
|
||||
atomic_set(&cookie->n_children, 0);
|
||||
atomic_set(&cookie->n_active, 1);
|
||||
|
||||
netfs->primary_index->def = &fscache_fsdef_netfs_def;
|
||||
netfs->primary_index->parent = &fscache_fsdef_index;
|
||||
netfs->primary_index->netfs_data = netfs;
|
||||
netfs->primary_index->flags = 1 << FSCACHE_COOKIE_ENABLED;
|
||||
cookie->def = &fscache_fsdef_netfs_def;
|
||||
cookie->parent = &fscache_fsdef_index;
|
||||
cookie->netfs_data = netfs;
|
||||
cookie->flags = 1 << FSCACHE_COOKIE_ENABLED;
|
||||
|
||||
atomic_inc(&netfs->primary_index->parent->usage);
|
||||
atomic_inc(&netfs->primary_index->parent->n_children);
|
||||
|
||||
spin_lock_init(&netfs->primary_index->lock);
|
||||
INIT_HLIST_HEAD(&netfs->primary_index->backing_objects);
|
||||
spin_lock_init(&cookie->lock);
|
||||
INIT_HLIST_HEAD(&cookie->backing_objects);
|
||||
|
||||
/* check the netfs type is not already present */
|
||||
down_write(&fscache_addremove_sem);
|
||||
|
@ -62,6 +59,10 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
|
|||
goto already_registered;
|
||||
}
|
||||
|
||||
atomic_inc(&cookie->parent->usage);
|
||||
atomic_inc(&cookie->parent->n_children);
|
||||
|
||||
netfs->primary_index = cookie;
|
||||
list_add(&netfs->link, &fscache_netfs_list);
|
||||
ret = 0;
|
||||
|
||||
|
@ -70,11 +71,8 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
|
|||
already_registered:
|
||||
up_write(&fscache_addremove_sem);
|
||||
|
||||
if (ret < 0) {
|
||||
netfs->primary_index->parent = NULL;
|
||||
__fscache_cookie_put(netfs->primary_index);
|
||||
netfs->primary_index = NULL;
|
||||
}
|
||||
if (ret < 0)
|
||||
kmem_cache_free(fscache_cookie_jar, cookie);
|
||||
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
|
|
|
@ -816,7 +816,7 @@ static void fscache_write_op(struct fscache_operation *_op)
|
|||
goto superseded;
|
||||
page = results[0];
|
||||
_debug("gang %d [%lx]", n, page->index);
|
||||
if (page->index > op->store_limit) {
|
||||
if (page->index >= op->store_limit) {
|
||||
fscache_stat(&fscache_n_store_pages_over_limit);
|
||||
goto superseded;
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
goto out;
|
||||
|
||||
file_update_time(vma->vm_file);
|
||||
ret = __block_page_mkwrite(vma, vmf, nilfs_get_block);
|
||||
ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
|
||||
if (ret) {
|
||||
nilfs_transaction_abort(inode->i_sb);
|
||||
goto out;
|
||||
|
|
18
fs/pipe.c
18
fs/pipe.c
|
@ -366,18 +366,17 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
int offset = buf->offset + buf->len;
|
||||
|
||||
if (ops->can_merge && offset + chars <= PAGE_SIZE) {
|
||||
int error = ops->confirm(pipe, buf);
|
||||
if (error)
|
||||
ret = ops->confirm(pipe, buf);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = copy_page_from_iter(buf->page, offset, chars, from);
|
||||
if (unlikely(ret < chars)) {
|
||||
error = -EFAULT;
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
do_wakeup = 1;
|
||||
buf->len += chars;
|
||||
ret = chars;
|
||||
buf->len += ret;
|
||||
if (!iov_iter_count(from))
|
||||
goto out;
|
||||
}
|
||||
|
@ -693,17 +692,20 @@ int create_pipe_files(struct file **res, int flags)
|
|||
|
||||
d_instantiate(path.dentry, inode);
|
||||
|
||||
err = -ENFILE;
|
||||
f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
|
||||
if (IS_ERR(f))
|
||||
if (IS_ERR(f)) {
|
||||
err = PTR_ERR(f);
|
||||
goto err_dentry;
|
||||
}
|
||||
|
||||
f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
|
||||
f->private_data = inode->i_pipe;
|
||||
|
||||
res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
|
||||
if (IS_ERR(res[0]))
|
||||
if (IS_ERR(res[0])) {
|
||||
err = PTR_ERR(res[0]);
|
||||
goto err_file;
|
||||
}
|
||||
|
||||
path_get(&path);
|
||||
res[0]->private_data = inode->i_pipe;
|
||||
|
|
|
@ -1506,7 +1506,7 @@ xfs_filemap_page_mkwrite(
|
|||
ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_direct,
|
||||
xfs_end_io_dax_write);
|
||||
} else {
|
||||
ret = __block_page_mkwrite(vma, vmf, xfs_get_blocks);
|
||||
ret = block_page_mkwrite(vma, vmf, xfs_get_blocks);
|
||||
ret = block_page_mkwrite_return(ret);
|
||||
}
|
||||
|
||||
|
|
|
@ -227,8 +227,6 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
|
|||
get_block_t *, loff_t *);
|
||||
int generic_cont_expand_simple(struct inode *inode, loff_t size);
|
||||
int block_commit_write(struct page *page, unsigned from, unsigned to);
|
||||
int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
get_block_t get_block);
|
||||
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
get_block_t get_block);
|
||||
/* Convert errno to return value from ->page_mkwrite() call */
|
||||
|
|
|
@ -1665,8 +1665,6 @@ struct inode_operations {
|
|||
umode_t create_mode, int *opened);
|
||||
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
|
||||
int (*set_acl)(struct inode *, struct posix_acl *, int);
|
||||
|
||||
/* WARNING: probably going away soon, do not use! */
|
||||
} ____cacheline_aligned;
|
||||
|
||||
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
|
||||
|
|
Загрузка…
Ссылка в новой задаче