2019-05-28 20:10:12 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2008-07-26 06:45:16 +04:00
|
|
|
/*
|
|
|
|
* OMFS (as used by RIO Karma) file operations.
|
|
|
|
* Copyright (C) 2005 Bob Copeland <me@bobcopeland.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/buffer_head.h>
|
|
|
|
#include <linux/mpage.h>
|
|
|
|
#include "omfs.h"
|
|
|
|
|
2008-08-15 11:40:47 +04:00
|
|
|
static u32 omfs_max_extents(struct omfs_sb_info *sbi, int offset)
|
|
|
|
{
|
|
|
|
return (sbi->s_sys_blocksize - offset -
|
|
|
|
sizeof(struct omfs_extent)) /
|
|
|
|
sizeof(struct omfs_extent_entry) + 1;
|
|
|
|
}
|
|
|
|
|
2008-07-26 06:45:16 +04:00
|
|
|
void omfs_make_empty_table(struct buffer_head *bh, int offset)
|
|
|
|
{
|
|
|
|
struct omfs_extent *oe = (struct omfs_extent *) &bh->b_data[offset];
|
|
|
|
|
2008-07-30 09:33:46 +04:00
|
|
|
oe->e_next = ~cpu_to_be64(0ULL);
|
2008-07-26 06:45:16 +04:00
|
|
|
oe->e_extent_count = cpu_to_be32(1),
|
|
|
|
oe->e_fill = cpu_to_be32(0x22),
|
2008-07-30 09:33:46 +04:00
|
|
|
oe->e_entry.e_cluster = ~cpu_to_be64(0ULL);
|
|
|
|
oe->e_entry.e_blocks = ~cpu_to_be64(0ULL);
|
2008-07-26 06:45:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int omfs_shrink_inode(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
|
|
|
|
struct omfs_extent *oe;
|
|
|
|
struct omfs_extent_entry *entry;
|
|
|
|
struct buffer_head *bh;
|
|
|
|
u64 next, last;
|
|
|
|
u32 extent_count;
|
2008-08-15 11:40:47 +04:00
|
|
|
u32 max_extents;
|
2008-07-26 06:45:16 +04:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* traverse extent table, freeing each entry that is greater
|
|
|
|
* than inode->i_size;
|
|
|
|
*/
|
|
|
|
next = inode->i_ino;
|
|
|
|
|
|
|
|
/* only support truncate -> 0 for now */
|
|
|
|
ret = -EIO;
|
|
|
|
if (inode->i_size != 0)
|
|
|
|
goto out;
|
|
|
|
|
2008-09-07 01:51:53 +04:00
|
|
|
bh = omfs_bread(inode->i_sb, next);
|
2008-07-26 06:45:16 +04:00
|
|
|
if (!bh)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]);
|
2008-08-15 11:40:47 +04:00
|
|
|
max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START);
|
2008-07-26 06:45:16 +04:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
|
2008-08-15 11:40:47 +04:00
|
|
|
if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next))
|
|
|
|
goto out_brelse;
|
2008-07-26 06:45:16 +04:00
|
|
|
|
|
|
|
extent_count = be32_to_cpu(oe->e_extent_count);
|
2008-08-15 11:40:47 +04:00
|
|
|
|
|
|
|
if (extent_count > max_extents)
|
|
|
|
goto out_brelse;
|
|
|
|
|
2008-07-26 06:45:16 +04:00
|
|
|
last = next;
|
|
|
|
next = be64_to_cpu(oe->e_next);
|
|
|
|
entry = &oe->e_entry;
|
|
|
|
|
|
|
|
/* ignore last entry as it is the terminator */
|
|
|
|
for (; extent_count > 1; extent_count--) {
|
|
|
|
u64 start, count;
|
|
|
|
start = be64_to_cpu(entry->e_cluster);
|
|
|
|
count = be64_to_cpu(entry->e_blocks);
|
|
|
|
|
|
|
|
omfs_clear_range(inode->i_sb, start, (int) count);
|
|
|
|
entry++;
|
|
|
|
}
|
|
|
|
omfs_make_empty_table(bh, (char *) oe - bh->b_data);
|
|
|
|
mark_buffer_dirty(bh);
|
|
|
|
brelse(bh);
|
|
|
|
|
|
|
|
if (last != inode->i_ino)
|
|
|
|
omfs_clear_range(inode->i_sb, last, sbi->s_mirrors);
|
|
|
|
|
|
|
|
if (next == ~0)
|
|
|
|
break;
|
|
|
|
|
2008-09-07 01:51:53 +04:00
|
|
|
bh = omfs_bread(inode->i_sb, next);
|
2008-07-26 06:45:16 +04:00
|
|
|
if (!bh)
|
|
|
|
goto out;
|
|
|
|
oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
|
2008-08-15 11:40:47 +04:00
|
|
|
max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT);
|
2008-07-26 06:45:16 +04:00
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
return ret;
|
2008-08-15 11:40:47 +04:00
|
|
|
out_brelse:
|
|
|
|
brelse(bh);
|
|
|
|
return ret;
|
2008-07-26 06:45:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void omfs_truncate(struct inode *inode)
|
|
|
|
{
|
|
|
|
omfs_shrink_inode(inode);
|
|
|
|
mark_inode_dirty(inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add new blocks to the current extent, or create new entries/continuations
|
|
|
|
* as necessary.
|
|
|
|
*/
|
|
|
|
static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe,
|
|
|
|
u64 *ret_block)
|
|
|
|
{
|
|
|
|
struct omfs_extent_entry *terminator;
|
|
|
|
struct omfs_extent_entry *entry = &oe->e_entry;
|
|
|
|
struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
|
|
|
|
u32 extent_count = be32_to_cpu(oe->e_extent_count);
|
|
|
|
u64 new_block = 0;
|
|
|
|
u32 max_count;
|
|
|
|
int new_count;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* reached the end of the extent table with no blocks mapped.
|
|
|
|
* there are three possibilities for adding: grow last extent,
|
|
|
|
* add a new extent to the current extent table, and add a
|
|
|
|
* continuation inode. in last two cases need an allocator for
|
|
|
|
* sbi->s_cluster_size
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* TODO: handle holes */
|
|
|
|
|
|
|
|
/* should always have a terminator */
|
|
|
|
if (extent_count < 1)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/* trivially grow current extent, if next block is not taken */
|
|
|
|
terminator = entry + extent_count - 1;
|
|
|
|
if (extent_count > 1) {
|
|
|
|
entry = terminator-1;
|
|
|
|
new_block = be64_to_cpu(entry->e_cluster) +
|
|
|
|
be64_to_cpu(entry->e_blocks);
|
|
|
|
|
|
|
|
if (omfs_allocate_block(inode->i_sb, new_block)) {
|
2012-10-05 04:16:53 +04:00
|
|
|
be64_add_cpu(&entry->e_blocks, 1);
|
2008-07-26 06:45:16 +04:00
|
|
|
terminator->e_blocks = ~(cpu_to_be64(
|
|
|
|
be64_to_cpu(~terminator->e_blocks) + 1));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
2008-08-15 11:40:47 +04:00
|
|
|
max_count = omfs_max_extents(sbi, OMFS_EXTENT_START);
|
2008-07-26 06:45:16 +04:00
|
|
|
|
|
|
|
/* TODO: add a continuation block here */
|
|
|
|
if (be32_to_cpu(oe->e_extent_count) > max_count-1)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/* try to allocate a new cluster */
|
|
|
|
ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize,
|
|
|
|
&new_block, &new_count);
|
|
|
|
if (ret)
|
|
|
|
goto out_fail;
|
|
|
|
|
|
|
|
/* copy terminator down an entry */
|
|
|
|
entry = terminator;
|
|
|
|
terminator++;
|
|
|
|
memcpy(terminator, entry, sizeof(struct omfs_extent_entry));
|
|
|
|
|
|
|
|
entry->e_cluster = cpu_to_be64(new_block);
|
|
|
|
entry->e_blocks = cpu_to_be64((u64) new_count);
|
|
|
|
|
|
|
|
terminator->e_blocks = ~(cpu_to_be64(
|
|
|
|
be64_to_cpu(~terminator->e_blocks) + (u64) new_count));
|
|
|
|
|
|
|
|
/* write in new entry */
|
2012-10-05 04:16:53 +04:00
|
|
|
be32_add_cpu(&oe->e_extent_count, 1);
|
2008-07-26 06:45:16 +04:00
|
|
|
|
|
|
|
out:
|
|
|
|
*ret_block = new_block;
|
|
|
|
out_fail:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scans across the directory table for a given file block number.
|
|
|
|
* If block not found, return 0.
|
|
|
|
*/
|
|
|
|
static sector_t find_block(struct inode *inode, struct omfs_extent_entry *ent,
|
|
|
|
sector_t block, int count, int *left)
|
|
|
|
{
|
|
|
|
/* count > 1 because of terminator */
|
|
|
|
sector_t searched = 0;
|
|
|
|
for (; count > 1; count--) {
|
|
|
|
int numblocks = clus_to_blk(OMFS_SB(inode->i_sb),
|
|
|
|
be64_to_cpu(ent->e_blocks));
|
|
|
|
|
|
|
|
if (block >= searched &&
|
|
|
|
block < searched + numblocks) {
|
|
|
|
/*
|
|
|
|
* found it at cluster + (block - searched)
|
|
|
|
* numblocks - (block - searched) is remainder
|
|
|
|
*/
|
|
|
|
*left = numblocks - (block - searched);
|
|
|
|
return clus_to_blk(OMFS_SB(inode->i_sb),
|
|
|
|
be64_to_cpu(ent->e_cluster)) +
|
|
|
|
block - searched;
|
|
|
|
}
|
|
|
|
searched += numblocks;
|
|
|
|
ent++;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int omfs_get_block(struct inode *inode, sector_t block,
|
|
|
|
struct buffer_head *bh_result, int create)
|
|
|
|
{
|
|
|
|
struct buffer_head *bh;
|
|
|
|
sector_t next, offset;
|
|
|
|
int ret;
|
treewide: Remove uninitialized_var() usage
Using uninitialized_var() is dangerous as it papers over real bugs[1]
(or can in the future), and suppresses unrelated compiler warnings
(e.g. "unused variable"). If the compiler thinks it is uninitialized,
either simply initialize the variable or make compiler changes.
In preparation for removing[2] the[3] macro[4], remove all remaining
needless uses with the following script:
git grep '\buninitialized_var\b' | cut -d: -f1 | sort -u | \
xargs perl -pi -e \
's/\buninitialized_var\(([^\)]+)\)/\1/g;
s:\s*/\* (GCC be quiet|to make compiler happy) \*/$::g;'
drivers/video/fbdev/riva/riva_hw.c was manually tweaked to avoid
pathological white-space.
No outstanding warnings were found building allmodconfig with GCC 9.3.0
for x86_64, i386, arm64, arm, powerpc, powerpc64le, s390x, mips, sparc64,
alpha, and m68k.
[1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/
[2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/
[3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/
[4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/
Reviewed-by: Leon Romanovsky <leonro@mellanox.com> # drivers/infiniband and mlx4/mlx5
Acked-by: Jason Gunthorpe <jgg@mellanox.com> # IB
Acked-by: Kalle Valo <kvalo@codeaurora.org> # wireless drivers
Reviewed-by: Chao Yu <yuchao0@huawei.com> # erofs
Signed-off-by: Kees Cook <keescook@chromium.org>
2020-06-03 23:09:38 +03:00
|
|
|
u64 new_block;
|
2008-08-15 11:40:47 +04:00
|
|
|
u32 max_extents;
|
2008-07-26 06:45:16 +04:00
|
|
|
int extent_count;
|
|
|
|
struct omfs_extent *oe;
|
|
|
|
struct omfs_extent_entry *entry;
|
|
|
|
struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
|
|
|
|
int max_blocks = bh_result->b_size >> inode->i_blkbits;
|
|
|
|
int remain;
|
|
|
|
|
|
|
|
ret = -EIO;
|
2008-09-07 01:51:53 +04:00
|
|
|
bh = omfs_bread(inode->i_sb, inode->i_ino);
|
2008-07-26 06:45:16 +04:00
|
|
|
if (!bh)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]);
|
2008-08-15 11:40:47 +04:00
|
|
|
max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START);
|
2008-07-26 06:45:16 +04:00
|
|
|
next = inode->i_ino;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
|
|
|
|
if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next))
|
|
|
|
goto out_brelse;
|
|
|
|
|
|
|
|
extent_count = be32_to_cpu(oe->e_extent_count);
|
|
|
|
next = be64_to_cpu(oe->e_next);
|
|
|
|
entry = &oe->e_entry;
|
|
|
|
|
2008-08-15 11:40:47 +04:00
|
|
|
if (extent_count > max_extents)
|
|
|
|
goto out_brelse;
|
|
|
|
|
2008-07-26 06:45:16 +04:00
|
|
|
offset = find_block(inode, entry, block, extent_count, &remain);
|
|
|
|
if (offset > 0) {
|
|
|
|
ret = 0;
|
|
|
|
map_bh(bh_result, inode->i_sb, offset);
|
|
|
|
if (remain > max_blocks)
|
|
|
|
remain = max_blocks;
|
|
|
|
bh_result->b_size = (remain << inode->i_blkbits);
|
|
|
|
goto out_brelse;
|
|
|
|
}
|
|
|
|
if (next == ~0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
brelse(bh);
|
2008-09-07 01:51:53 +04:00
|
|
|
bh = omfs_bread(inode->i_sb, next);
|
2008-07-26 06:45:16 +04:00
|
|
|
if (!bh)
|
|
|
|
goto out;
|
|
|
|
oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
|
2008-08-15 11:40:47 +04:00
|
|
|
max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT);
|
2008-07-26 06:45:16 +04:00
|
|
|
}
|
|
|
|
if (create) {
|
|
|
|
ret = omfs_grow_extent(inode, oe, &new_block);
|
|
|
|
if (ret == 0) {
|
|
|
|
mark_buffer_dirty(bh);
|
|
|
|
mark_inode_dirty(inode);
|
|
|
|
map_bh(bh_result, inode->i_sb,
|
|
|
|
clus_to_blk(sbi, new_block));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out_brelse:
|
|
|
|
brelse(bh);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int omfs_readpage(struct file *file, struct page *page)
|
|
|
|
{
|
|
|
|
return block_read_full_page(page, omfs_get_block);
|
|
|
|
}
|
|
|
|
|
fs: convert mpage_readpages to mpage_readahead
Implement the new readahead aop and convert all callers (block_dev,
exfat, ext2, fat, gfs2, hpfs, isofs, jfs, nilfs2, ocfs2, omfs, qnx6,
reiserfs & udf).
The callers are all trivial except for GFS2 & OCFS2.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Junxiao Bi <junxiao.bi@oracle.com> # ocfs2
Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> # ocfs2
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-17-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-02 07:47:02 +03:00
|
|
|
static void omfs_readahead(struct readahead_control *rac)
|
2008-07-26 06:45:16 +04:00
|
|
|
{
|
fs: convert mpage_readpages to mpage_readahead
Implement the new readahead aop and convert all callers (block_dev,
exfat, ext2, fat, gfs2, hpfs, isofs, jfs, nilfs2, ocfs2, omfs, qnx6,
reiserfs & udf).
The callers are all trivial except for GFS2 & OCFS2.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Junxiao Bi <junxiao.bi@oracle.com> # ocfs2
Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> # ocfs2
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-17-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-02 07:47:02 +03:00
|
|
|
mpage_readahead(rac, omfs_get_block);
|
2008-07-26 06:45:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int omfs_writepage(struct page *page, struct writeback_control *wbc)
|
|
|
|
{
|
|
|
|
return block_write_full_page(page, omfs_get_block, wbc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
omfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
|
|
|
{
|
|
|
|
return mpage_writepages(mapping, wbc, omfs_get_block);
|
|
|
|
}
|
|
|
|
|
2012-12-15 14:49:42 +04:00
|
|
|
static void omfs_write_failed(struct address_space *mapping, loff_t to)
|
|
|
|
{
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
|
|
|
|
if (to > inode->i_size) {
|
2013-09-13 02:13:56 +04:00
|
|
|
truncate_pagecache(inode, inode->i_size);
|
2012-12-15 14:49:42 +04:00
|
|
|
omfs_truncate(inode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-26 06:45:16 +04:00
|
|
|
static int omfs_write_begin(struct file *file, struct address_space *mapping,
|
|
|
|
loff_t pos, unsigned len, unsigned flags,
|
|
|
|
struct page **pagep, void **fsdata)
|
|
|
|
{
|
2010-06-04 13:29:58 +04:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = block_write_begin(mapping, pos, len, flags, pagep,
|
|
|
|
omfs_get_block);
|
2012-12-15 14:49:42 +04:00
|
|
|
if (unlikely(ret))
|
|
|
|
omfs_write_failed(mapping, pos + len);
|
2010-06-04 13:29:58 +04:00
|
|
|
|
|
|
|
return ret;
|
2008-07-26 06:45:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
|
|
|
|
{
|
|
|
|
return generic_block_bmap(mapping, block, omfs_get_block);
|
|
|
|
}
|
|
|
|
|
2009-10-02 02:43:56 +04:00
|
|
|
const struct file_operations omfs_file_operations = {
|
2008-07-26 06:45:16 +04:00
|
|
|
.llseek = generic_file_llseek,
|
2014-04-02 22:33:16 +04:00
|
|
|
.read_iter = generic_file_read_iter,
|
2014-04-03 11:17:43 +04:00
|
|
|
.write_iter = generic_file_write_iter,
|
2008-07-26 06:45:16 +04:00
|
|
|
.mmap = generic_file_mmap,
|
2010-05-26 19:53:41 +04:00
|
|
|
.fsync = generic_file_fsync,
|
2008-07-26 06:45:16 +04:00
|
|
|
.splice_read = generic_file_splice_read,
|
|
|
|
};
|
|
|
|
|
2021-01-21 16:19:43 +03:00
|
|
|
static int omfs_setattr(struct user_namespace *mnt_userns,
|
|
|
|
struct dentry *dentry, struct iattr *attr)
|
2010-06-04 13:29:59 +04:00
|
|
|
{
|
2015-03-18 01:25:59 +03:00
|
|
|
struct inode *inode = d_inode(dentry);
|
2010-06-04 13:29:59 +04:00
|
|
|
int error;
|
|
|
|
|
2021-01-21 16:19:26 +03:00
|
|
|
error = setattr_prepare(&init_user_ns, dentry, attr);
|
2010-06-04 13:29:59 +04:00
|
|
|
if (error)
|
|
|
|
return error;
|
2010-06-04 13:30:02 +04:00
|
|
|
|
|
|
|
if ((attr->ia_valid & ATTR_SIZE) &&
|
|
|
|
attr->ia_size != i_size_read(inode)) {
|
2012-12-15 14:49:42 +04:00
|
|
|
error = inode_newsize_ok(inode, attr->ia_size);
|
2010-06-04 13:30:02 +04:00
|
|
|
if (error)
|
|
|
|
return error;
|
2012-12-15 14:49:42 +04:00
|
|
|
truncate_setsize(inode, attr->ia_size);
|
|
|
|
omfs_truncate(inode);
|
2010-06-04 13:30:02 +04:00
|
|
|
}
|
|
|
|
|
2021-01-21 16:19:26 +03:00
|
|
|
setattr_copy(&init_user_ns, inode, attr);
|
2010-06-04 13:30:02 +04:00
|
|
|
mark_inode_dirty(inode);
|
|
|
|
return 0;
|
2010-06-04 13:29:59 +04:00
|
|
|
}
|
|
|
|
|
2009-09-22 04:01:11 +04:00
|
|
|
const struct inode_operations omfs_file_inops = {
|
2010-06-04 13:29:59 +04:00
|
|
|
.setattr = omfs_setattr,
|
2008-07-26 06:45:16 +04:00
|
|
|
};
|
|
|
|
|
2009-09-22 04:01:10 +04:00
|
|
|
const struct address_space_operations omfs_aops = {
|
2021-06-29 05:36:12 +03:00
|
|
|
.set_page_dirty = __set_page_dirty_buffers,
|
2008-07-26 06:45:16 +04:00
|
|
|
.readpage = omfs_readpage,
|
fs: convert mpage_readpages to mpage_readahead
Implement the new readahead aop and convert all callers (block_dev,
exfat, ext2, fat, gfs2, hpfs, isofs, jfs, nilfs2, ocfs2, omfs, qnx6,
reiserfs & udf).
The callers are all trivial except for GFS2 & OCFS2.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Junxiao Bi <junxiao.bi@oracle.com> # ocfs2
Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> # ocfs2
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-17-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-02 07:47:02 +03:00
|
|
|
.readahead = omfs_readahead,
|
2008-07-26 06:45:16 +04:00
|
|
|
.writepage = omfs_writepage,
|
|
|
|
.writepages = omfs_writepages,
|
|
|
|
.write_begin = omfs_write_begin,
|
|
|
|
.write_end = generic_write_end,
|
|
|
|
.bmap = omfs_bmap,
|
|
|
|
};
|
|
|
|
|