2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* JFFS2 -- Journalling Flash File System, Version 2.
|
|
|
|
*
|
2007-04-25 17:16:47 +04:00
|
|
|
* Copyright © 2001-2007 Red Hat, Inc.
|
2010-08-08 17:15:22 +04:00
|
|
|
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* Created by David Woodhouse <dwmw2@infradead.org>
|
|
|
|
*
|
|
|
|
* For licensing information, see the file 'LICENCE' in this directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2012-02-16 03:56:45 +04:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2006-01-11 23:17:46 +03:00
|
|
|
#include <linux/capability.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-02 19:54:15 +03:00
|
|
|
#include <linux/cred.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/mtd/mtd.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/vfs.h>
|
|
|
|
#include <linux/crc32.h>
|
|
|
|
#include "nodelist.h"
|
|
|
|
|
|
|
|
static int jffs2_flash_setup(struct jffs2_sb_info *c);
|
|
|
|
|
2007-08-22 15:39:19 +04:00
|
|
|
int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct jffs2_full_dnode *old_metadata, *new_metadata;
|
|
|
|
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
|
|
|
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
|
|
|
struct jffs2_raw_inode *ri;
|
2006-05-19 03:28:49 +04:00
|
|
|
union jffs2_device_node dev;
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned char *mdata = NULL;
|
|
|
|
int mdatalen = 0;
|
|
|
|
unsigned int ivalid;
|
2006-05-23 03:38:06 +04:00
|
|
|
uint32_t alloclen;
|
2005-04-17 02:20:36 +04:00
|
|
|
int ret;
|
2008-02-25 18:25:25 +03:00
|
|
|
int alloc_type = ALLOC_NORMAL;
|
2007-08-22 15:39:19 +04:00
|
|
|
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* Special cases - we don't want more than one data node
|
|
|
|
for these types on the medium at any time. So setattr
|
|
|
|
must read the original data associated with the node
|
|
|
|
(i.e. the device numbers or the target name) and write
|
|
|
|
it out again with the appropriate data attached */
|
|
|
|
if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
|
|
|
|
/* For these, we don't actually need to read the old node */
|
2006-05-19 03:28:49 +04:00
|
|
|
mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
|
2005-04-17 02:20:36 +04:00
|
|
|
mdata = (char *)&dev;
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
|
|
|
|
__func__, mdatalen);
|
2005-04-17 02:20:36 +04:00
|
|
|
} else if (S_ISLNK(inode->i_mode)) {
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_lock(&f->sem);
|
2005-04-17 02:20:36 +04:00
|
|
|
mdatalen = f->metadata->size;
|
|
|
|
mdata = kmalloc(f->metadata->size, GFP_USER);
|
2006-05-06 01:46:49 +04:00
|
|
|
if (!mdata) {
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_unlock(&f->sem);
|
2005-04-17 02:20:36 +04:00
|
|
|
return -ENOMEM;
|
2006-05-06 01:46:49 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
|
|
|
|
if (ret) {
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_unlock(&f->sem);
|
2005-04-17 02:20:36 +04:00
|
|
|
kfree(mdata);
|
|
|
|
return ret;
|
|
|
|
}
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_unlock(&f->sem);
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
|
|
|
|
__func__, mdatalen);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
ri = jffs2_alloc_raw_inode();
|
|
|
|
if (!ri) {
|
|
|
|
if (S_ISLNK(inode->i_mode))
|
|
|
|
kfree(mdata);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2005-11-07 14:16:07 +03:00
|
|
|
|
2006-05-23 03:38:06 +04:00
|
|
|
ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
|
|
|
|
ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ret) {
|
|
|
|
jffs2_free_raw_inode(ri);
|
2011-07-25 01:11:33 +04:00
|
|
|
if (S_ISLNK(inode->i_mode))
|
2005-04-17 02:20:36 +04:00
|
|
|
kfree(mdata);
|
|
|
|
return ret;
|
|
|
|
}
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_lock(&f->sem);
|
2005-04-17 02:20:36 +04:00
|
|
|
ivalid = iattr->ia_valid;
|
2005-11-07 14:16:07 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
|
|
|
|
ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
|
|
|
|
ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
|
|
|
|
ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
|
|
|
|
|
|
|
|
ri->ino = cpu_to_je32(inode->i_ino);
|
|
|
|
ri->version = cpu_to_je32(++f->highest_version);
|
|
|
|
|
2012-02-08 04:28:39 +04:00
|
|
|
ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
|
|
|
|
from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
|
|
|
|
ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
|
|
|
|
from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (ivalid & ATTR_MODE)
|
2007-11-01 23:27:38 +03:00
|
|
|
ri->mode = cpu_to_jemode(iattr->ia_mode);
|
2005-04-17 02:20:36 +04:00
|
|
|
else
|
|
|
|
ri->mode = cpu_to_jemode(inode->i_mode);
|
|
|
|
|
|
|
|
|
|
|
|
ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
|
|
|
|
ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
|
|
|
|
ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
|
|
|
|
ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
|
|
|
|
|
|
|
|
ri->offset = cpu_to_je32(0);
|
|
|
|
ri->csize = ri->dsize = cpu_to_je32(mdatalen);
|
|
|
|
ri->compr = JFFS2_COMPR_NONE;
|
|
|
|
if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
|
|
|
|
/* It's an extension. Make it a hole node */
|
|
|
|
ri->compr = JFFS2_COMPR_ZERO;
|
|
|
|
ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
|
|
|
|
ri->offset = cpu_to_je32(inode->i_size);
|
2008-02-25 18:25:25 +03:00
|
|
|
} else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
|
|
|
|
/* For truncate-to-zero, treat it as deletion because
|
|
|
|
it'll always be obsoleting all previous nodes */
|
|
|
|
alloc_type = ALLOC_DELETION;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
|
|
|
|
if (mdatalen)
|
|
|
|
ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
|
|
|
|
else
|
|
|
|
ri->data_crc = cpu_to_je32(0);
|
|
|
|
|
2008-02-25 18:25:25 +03:00
|
|
|
new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (S_ISLNK(inode->i_mode))
|
|
|
|
kfree(mdata);
|
2005-11-07 14:16:07 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if (IS_ERR(new_metadata)) {
|
|
|
|
jffs2_complete_reservation(c);
|
|
|
|
jffs2_free_raw_inode(ri);
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_unlock(&f->sem);
|
2005-04-17 02:20:36 +04:00
|
|
|
return PTR_ERR(new_metadata);
|
|
|
|
}
|
|
|
|
/* It worked. Update the inode */
|
|
|
|
inode->i_atime = ITIME(je32_to_cpu(ri->atime));
|
|
|
|
inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
|
|
|
|
inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
|
|
|
|
inode->i_mode = jemode_to_cpu(ri->mode);
|
2012-02-08 04:28:39 +04:00
|
|
|
i_uid_write(inode, je16_to_cpu(ri->uid));
|
|
|
|
i_gid_write(inode, je16_to_cpu(ri->gid));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
|
|
|
|
old_metadata = f->metadata;
|
|
|
|
|
|
|
|
if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
|
2005-07-24 19:29:59 +04:00
|
|
|
jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
|
|
|
|
jffs2_add_full_dnode_to_inode(c, f, new_metadata);
|
|
|
|
inode->i_size = iattr->ia_size;
|
2008-02-25 18:20:50 +03:00
|
|
|
inode->i_blocks = (inode->i_size + 511) >> 9;
|
2005-04-17 02:20:36 +04:00
|
|
|
f->metadata = NULL;
|
|
|
|
} else {
|
|
|
|
f->metadata = new_metadata;
|
|
|
|
}
|
|
|
|
if (old_metadata) {
|
|
|
|
jffs2_mark_node_obsolete(c, old_metadata->raw);
|
|
|
|
jffs2_free_full_dnode(old_metadata);
|
|
|
|
}
|
|
|
|
jffs2_free_raw_inode(ri);
|
|
|
|
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_unlock(&f->sem);
|
2005-04-17 02:20:36 +04:00
|
|
|
jffs2_complete_reservation(c);
|
|
|
|
|
2010-06-04 13:30:04 +04:00
|
|
|
/* We have to do the truncate_setsize() without f->sem held, since
|
2005-11-07 14:16:07 +03:00
|
|
|
some pages may be locked and waiting for it in readpage().
|
2005-04-17 02:20:36 +04:00
|
|
|
We are protected from a simultaneous write() extending i_size
|
|
|
|
back past iattr->ia_size, because do_truncate() holds the
|
|
|
|
generic inode semaphore. */
|
2008-02-25 18:20:50 +03:00
|
|
|
if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
|
2010-06-04 13:30:04 +04:00
|
|
|
truncate_setsize(inode, iattr->ia_size);
|
2008-02-25 18:20:50 +03:00
|
|
|
inode->i_blocks = (inode->i_size + 511) >> 9;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
|
|
|
|
{
|
2015-03-18 01:25:59 +03:00
|
|
|
struct inode *inode = d_inode(dentry);
|
2006-05-13 10:09:47 +04:00
|
|
|
int rc;
|
|
|
|
|
2016-05-26 17:55:18 +03:00
|
|
|
rc = setattr_prepare(dentry, iattr);
|
2007-08-22 15:39:19 +04:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2013-12-20 17:16:47 +04:00
|
|
|
rc = jffs2_do_setattr(inode, iattr);
|
2006-05-13 10:09:47 +04:00
|
|
|
if (!rc && (iattr->ia_valid & ATTR_MODE))
|
2013-12-20 17:16:47 +04:00
|
|
|
rc = posix_acl_chmod(inode, inode->i_mode);
|
2007-08-22 15:39:19 +04:00
|
|
|
|
2006-05-13 10:09:47 +04:00
|
|
|
return rc;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2006-06-23 13:02:58 +04:00
|
|
|
int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2006-06-23 13:02:58 +04:00
|
|
|
struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned long avail;
|
|
|
|
|
|
|
|
buf->f_type = JFFS2_SUPER_MAGIC;
|
|
|
|
buf->f_bsize = 1 << PAGE_SHIFT;
|
|
|
|
buf->f_blocks = c->flash_size >> PAGE_SHIFT;
|
|
|
|
buf->f_files = 0;
|
|
|
|
buf->f_ffree = 0;
|
|
|
|
buf->f_namelen = JFFS2_MAX_NAME_LEN;
|
2008-08-18 19:23:53 +04:00
|
|
|
buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
|
|
|
|
buf->f_fsid.val[1] = c->mtd->index;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
spin_lock(&c->erase_completion_lock);
|
|
|
|
avail = c->dirty_size + c->free_size;
|
|
|
|
if (avail > c->sector_size * c->resv_blocks_write)
|
|
|
|
avail -= c->sector_size * c->resv_blocks_write;
|
|
|
|
else
|
|
|
|
avail = 0;
|
2005-07-24 19:14:17 +04:00
|
|
|
spin_unlock(&c->erase_completion_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-06-07 22:34:48 +04:00
|
|
|
void jffs2_evict_inode (struct inode *inode)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2005-11-07 14:16:07 +03:00
|
|
|
/* We can forget about this inode for now - drop all
|
2005-04-17 02:20:36 +04:00
|
|
|
* the nodelists associated with it, etc.
|
|
|
|
*/
|
|
|
|
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
|
|
|
|
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
|
2005-11-07 14:16:07 +03:00
|
|
|
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
|
|
|
|
__func__, inode->i_ino, inode->i_mode);
|
2014-04-04 01:47:49 +04:00
|
|
|
truncate_inode_pages_final(&inode->i_data);
|
2012-05-03 16:48:02 +04:00
|
|
|
clear_inode(inode);
|
2005-04-17 02:20:36 +04:00
|
|
|
jffs2_do_clear_inode(c, f);
|
|
|
|
}
|
|
|
|
|
2008-02-07 11:15:42 +03:00
|
|
|
struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct jffs2_inode_info *f;
|
|
|
|
struct jffs2_sb_info *c;
|
|
|
|
struct jffs2_raw_inode latest_node;
|
2006-05-19 03:28:49 +04:00
|
|
|
union jffs2_device_node jdev;
|
2008-02-07 11:15:42 +03:00
|
|
|
struct inode *inode;
|
2006-05-19 03:28:49 +04:00
|
|
|
dev_t rdev = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
int ret;
|
|
|
|
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
|
2008-02-07 11:15:42 +03:00
|
|
|
|
|
|
|
inode = iget_locked(sb, ino);
|
|
|
|
if (!inode)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
if (!(inode->i_state & I_NEW))
|
|
|
|
return inode;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
f = JFFS2_INODE_INFO(inode);
|
|
|
|
c = JFFS2_SB_INFO(inode->i_sb);
|
|
|
|
|
|
|
|
jffs2_init_inode_info(f);
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_lock(&f->sem);
|
2005-11-07 14:16:07 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
|
2014-02-26 06:27:40 +04:00
|
|
|
if (ret)
|
|
|
|
goto error;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
inode->i_mode = jemode_to_cpu(latest_node.mode);
|
2012-02-08 04:28:39 +04:00
|
|
|
i_uid_write(inode, je16_to_cpu(latest_node.uid));
|
|
|
|
i_gid_write(inode, je16_to_cpu(latest_node.gid));
|
2005-04-17 02:20:36 +04:00
|
|
|
inode->i_size = je32_to_cpu(latest_node.isize);
|
|
|
|
inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
|
|
|
|
inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
|
|
|
|
inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
|
|
|
|
|
2011-10-28 16:13:29 +04:00
|
|
|
set_nlink(inode, f->inocache->pino_nlink);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
inode->i_blocks = (inode->i_size + 511) >> 9;
|
2005-11-07 14:16:07 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
switch (inode->i_mode & S_IFMT) {
|
|
|
|
|
|
|
|
case S_IFLNK:
|
|
|
|
inode->i_op = &jffs2_symlink_inode_operations;
|
2015-05-02 17:21:20 +03:00
|
|
|
inode->i_link = f->target;
|
2005-04-17 02:20:36 +04:00
|
|
|
break;
|
2005-11-07 14:16:07 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
case S_IFDIR:
|
|
|
|
{
|
|
|
|
struct jffs2_full_dirent *fd;
|
2011-10-28 16:13:29 +04:00
|
|
|
set_nlink(inode, 2); /* parent and '.' */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
for (fd=f->dents; fd; fd = fd->next) {
|
|
|
|
if (fd->type == DT_DIR && fd->ino)
|
2006-10-01 10:29:04 +04:00
|
|
|
inc_nlink(inode);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
/* Root dir gets i_nlink 3 for some reason */
|
|
|
|
if (inode->i_ino == 1)
|
2006-10-01 10:29:04 +04:00
|
|
|
inc_nlink(inode);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
inode->i_op = &jffs2_dir_inode_operations;
|
|
|
|
inode->i_fop = &jffs2_dir_operations;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case S_IFREG:
|
|
|
|
inode->i_op = &jffs2_file_inode_operations;
|
|
|
|
inode->i_fop = &jffs2_file_operations;
|
|
|
|
inode->i_mapping->a_ops = &jffs2_file_address_operations;
|
|
|
|
inode->i_mapping->nrpages = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case S_IFBLK:
|
|
|
|
case S_IFCHR:
|
|
|
|
/* Read the device numbers from the media */
|
2010-02-03 01:43:10 +03:00
|
|
|
if (f->metadata->size != sizeof(jdev.old_id) &&
|
|
|
|
f->metadata->size != sizeof(jdev.new_id)) {
|
2012-02-16 03:56:44 +04:00
|
|
|
pr_notice("Device node has strange size %d\n",
|
|
|
|
f->metadata->size);
|
2008-02-07 11:15:42 +03:00
|
|
|
goto error_io;
|
2006-05-19 03:28:49 +04:00
|
|
|
}
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "Reading device numbers from flash\n");
|
2008-02-07 11:15:42 +03:00
|
|
|
ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
|
|
|
|
if (ret < 0) {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Eep */
|
2012-02-16 03:56:44 +04:00
|
|
|
pr_notice("Read device numbers for inode %lu failed\n",
|
|
|
|
(unsigned long)inode->i_ino);
|
2008-02-07 11:15:42 +03:00
|
|
|
goto error;
|
2005-11-07 14:16:07 +03:00
|
|
|
}
|
2010-02-03 01:43:10 +03:00
|
|
|
if (f->metadata->size == sizeof(jdev.old_id))
|
|
|
|
rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
|
2006-05-19 03:28:49 +04:00
|
|
|
else
|
2010-02-03 01:43:10 +03:00
|
|
|
rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
case S_IFSOCK:
|
|
|
|
case S_IFIFO:
|
|
|
|
inode->i_op = &jffs2_file_inode_operations;
|
2006-05-19 03:28:49 +04:00
|
|
|
init_special_inode(inode, inode->i_mode, rdev);
|
2005-04-17 02:20:36 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2012-02-16 03:56:44 +04:00
|
|
|
pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
|
|
|
|
__func__, inode->i_mode, (unsigned long)inode->i_ino);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_unlock(&f->sem);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "jffs2_read_inode() returning\n");
|
2008-02-07 11:15:42 +03:00
|
|
|
unlock_new_inode(inode);
|
|
|
|
return inode;
|
|
|
|
|
|
|
|
error_io:
|
|
|
|
ret = -EIO;
|
|
|
|
error:
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_unlock(&f->sem);
|
2008-02-07 11:15:42 +03:00
|
|
|
jffs2_do_clear_inode(c, f);
|
|
|
|
iget_failed(inode);
|
|
|
|
return ERR_PTR(ret);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2011-05-27 14:53:02 +04:00
|
|
|
void jffs2_dirty_inode(struct inode *inode, int flags)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct iattr iattr;
|
|
|
|
|
|
|
|
if (!(inode->i_state & I_DIRTY_DATASYNC)) {
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
|
|
|
|
__func__, inode->i_ino);
|
2005-04-17 02:20:36 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
|
|
|
|
__func__, inode->i_ino);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
|
|
|
|
iattr.ia_mode = inode->i_mode;
|
|
|
|
iattr.ia_uid = inode->i_uid;
|
|
|
|
iattr.ia_gid = inode->i_gid;
|
|
|
|
iattr.ia_atime = inode->i_atime;
|
|
|
|
iattr.ia_mtime = inode->i_mtime;
|
|
|
|
iattr.ia_ctime = inode->i_ctime;
|
|
|
|
|
|
|
|
jffs2_do_setattr(inode, &iattr);
|
|
|
|
}
|
|
|
|
|
2011-10-17 05:15:16 +04:00
|
|
|
int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
|
|
|
|
|
2017-07-17 10:45:34 +03:00
|
|
|
if (c->flags & JFFS2_SB_FLAG_RO && !sb_rdonly(sb))
|
2005-04-17 02:20:36 +04:00
|
|
|
return -EROFS;
|
|
|
|
|
|
|
|
/* We stop if it was running, then restart if it needs to.
|
|
|
|
This also catches the case where it was stopped and this
|
|
|
|
is just a remount to restart it.
|
|
|
|
Flush the writebuffer, if neccecary, else we loose it */
|
2017-07-17 10:45:34 +03:00
|
|
|
if (!sb_rdonly(sb)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
jffs2_stop_garbage_collect_thread(c);
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_lock(&c->alloc_sem);
|
2005-04-17 02:20:36 +04:00
|
|
|
jffs2_flush_wbuf_pad(c);
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_unlock(&c->alloc_sem);
|
2005-11-07 14:16:07 +03:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2017-11-28 00:05:09 +03:00
|
|
|
if (!(*flags & SB_RDONLY))
|
2005-04-17 02:20:36 +04:00
|
|
|
jffs2_start_garbage_collect_thread(c);
|
2005-11-07 14:16:07 +03:00
|
|
|
|
2017-11-28 00:05:09 +03:00
|
|
|
*flags |= SB_NOATIME;
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
|
|
|
|
fill in the raw_inode while you're at it. */
|
2011-07-24 02:37:50 +04:00
|
|
|
struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
struct super_block *sb = dir_i->i_sb;
|
|
|
|
struct jffs2_sb_info *c;
|
|
|
|
struct jffs2_inode_info *f;
|
|
|
|
int ret;
|
|
|
|
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
|
|
|
|
__func__, dir_i->i_ino, mode);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
c = JFFS2_SB_INFO(sb);
|
2005-11-07 14:16:07 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
inode = new_inode(sb);
|
2005-11-07 14:16:07 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!inode)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
f = JFFS2_INODE_INFO(inode);
|
|
|
|
jffs2_init_inode_info(f);
|
2008-04-22 18:13:40 +04:00
|
|
|
mutex_lock(&f->sem);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
memset(ri, 0, sizeof(*ri));
|
|
|
|
/* Set OS-specific defaults for new inodes */
|
2012-02-08 04:28:39 +04:00
|
|
|
ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (dir_i->i_mode & S_ISGID) {
|
2012-02-08 04:28:39 +04:00
|
|
|
ri->gid = cpu_to_je16(i_gid_read(dir_i));
|
2005-04-17 02:20:36 +04:00
|
|
|
if (S_ISDIR(mode))
|
|
|
|
mode |= S_ISGID;
|
|
|
|
} else {
|
2012-02-08 04:28:39 +04:00
|
|
|
ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2007-08-22 15:39:19 +04:00
|
|
|
|
|
|
|
/* POSIX ACLs have to be processed now, at least partly.
|
|
|
|
The umask is only applied if there's no default ACL */
|
2007-09-14 10:16:35 +04:00
|
|
|
ret = jffs2_init_acl_pre(dir_i, inode, &mode);
|
|
|
|
if (ret) {
|
2014-02-13 00:44:54 +04:00
|
|
|
mutex_unlock(&f->sem);
|
|
|
|
make_bad_inode(inode);
|
|
|
|
iput(inode);
|
|
|
|
return ERR_PTR(ret);
|
2007-08-22 15:39:19 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
ret = jffs2_do_new_inode (c, f, mode, ri);
|
|
|
|
if (ret) {
|
2014-02-13 00:44:54 +04:00
|
|
|
mutex_unlock(&f->sem);
|
2005-04-17 02:20:36 +04:00
|
|
|
make_bad_inode(inode);
|
|
|
|
iput(inode);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
2011-10-28 16:13:29 +04:00
|
|
|
set_nlink(inode, 1);
|
2005-04-17 02:20:36 +04:00
|
|
|
inode->i_ino = je32_to_cpu(ri->ino);
|
|
|
|
inode->i_mode = jemode_to_cpu(ri->mode);
|
2012-02-08 04:28:39 +04:00
|
|
|
i_gid_write(inode, je16_to_cpu(ri->gid));
|
|
|
|
i_uid_write(inode, je16_to_cpu(ri->uid));
|
2016-09-14 17:48:05 +03:00
|
|
|
inode->i_atime = inode->i_ctime = inode->i_mtime = current_time(inode);
|
2005-04-17 02:20:36 +04:00
|
|
|
ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
|
|
|
|
|
|
|
|
inode->i_blocks = 0;
|
|
|
|
inode->i_size = 0;
|
|
|
|
|
2010-06-03 11:09:12 +04:00
|
|
|
if (insert_inode_locked(inode) < 0) {
|
2014-02-13 00:44:54 +04:00
|
|
|
mutex_unlock(&f->sem);
|
2010-06-03 11:09:12 +04:00
|
|
|
make_bad_inode(inode);
|
|
|
|
iput(inode);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
return inode;
|
|
|
|
}
|
|
|
|
|
2010-10-07 22:14:02 +04:00
|
|
|
static int calculate_inocache_hashsize(uint32_t flash_size)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Pick a inocache hash size based on the size of the medium.
|
|
|
|
* Count how many megabytes we're dealing with, apply a hashsize twice
|
|
|
|
* that size, but rounding down to the usual big powers of 2. And keep
|
|
|
|
* to sensible bounds.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int size_mb = flash_size / 1024 / 1024;
|
|
|
|
int hashsize = (size_mb * 2) & ~0x3f;
|
|
|
|
|
|
|
|
if (hashsize < INOCACHE_HASHSIZE_MIN)
|
|
|
|
return INOCACHE_HASHSIZE_MIN;
|
|
|
|
if (hashsize > INOCACHE_HASHSIZE_MAX)
|
|
|
|
return INOCACHE_HASHSIZE_MAX;
|
|
|
|
|
|
|
|
return hashsize;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
|
|
|
|
{
|
|
|
|
struct jffs2_sb_info *c;
|
|
|
|
struct inode *root_i;
|
|
|
|
int ret;
|
|
|
|
size_t blocks;
|
|
|
|
|
|
|
|
c = JFFS2_SB_INFO(sb);
|
|
|
|
|
2013-09-25 10:58:20 +04:00
|
|
|
/* Do not support the MLC nand */
|
|
|
|
if (c->mtd->type == MTD_MLCNANDFLASH)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2005-02-09 12:24:26 +03:00
|
|
|
#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
|
2005-04-17 02:20:36 +04:00
|
|
|
if (c->mtd->type == MTD_NANDFLASH) {
|
2012-02-16 03:56:45 +04:00
|
|
|
pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
|
2005-04-17 02:20:36 +04:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2005-02-09 12:17:45 +03:00
|
|
|
if (c->mtd->type == MTD_DATAFLASH) {
|
2012-02-16 03:56:45 +04:00
|
|
|
pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
|
2005-02-09 12:17:45 +03:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
c->flash_size = c->mtd->size;
|
2005-11-07 14:16:07 +03:00
|
|
|
c->sector_size = c->mtd->erasesize;
|
2005-04-17 02:20:36 +04:00
|
|
|
blocks = c->flash_size / c->sector_size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Size alignment check
|
|
|
|
*/
|
|
|
|
if ((c->sector_size * blocks) != c->flash_size) {
|
2005-11-07 14:16:07 +03:00
|
|
|
c->flash_size = c->sector_size * blocks;
|
2012-02-16 03:56:45 +04:00
|
|
|
pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
|
2005-04-17 02:20:36 +04:00
|
|
|
c->flash_size / 1024);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c->flash_size < 5*c->sector_size) {
|
2012-02-16 03:56:45 +04:00
|
|
|
pr_err("Too few erase blocks (%d)\n",
|
2012-02-16 03:56:44 +04:00
|
|
|
c->flash_size / c->sector_size);
|
2005-04-17 02:20:36 +04:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
|
|
|
|
|
|
|
|
/* NAND (or other bizarre) flash... do setup accordingly */
|
|
|
|
ret = jffs2_flash_setup(c);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2010-10-07 22:14:02 +04:00
|
|
|
c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
|
|
|
|
c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!c->inocache_list) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_wbuf;
|
|
|
|
}
|
|
|
|
|
2006-05-13 10:09:47 +04:00
|
|
|
jffs2_init_xattr_subsystem(c);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if ((ret = jffs2_do_mount_fs(c)))
|
|
|
|
goto out_inohash;
|
|
|
|
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
|
2008-02-07 11:15:42 +03:00
|
|
|
root_i = jffs2_iget(sb, 1);
|
|
|
|
if (IS_ERR(root_i)) {
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "get root inode failed\n");
|
2008-02-07 11:15:42 +03:00
|
|
|
ret = PTR_ERR(root_i);
|
|
|
|
goto out_root;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2008-02-07 11:15:42 +03:00
|
|
|
ret = -ENOMEM;
|
|
|
|
|
2012-03-31 04:31:56 +04:00
|
|
|
jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
|
2012-01-09 07:15:13 +04:00
|
|
|
sb->s_root = d_make_root(root_i);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!sb->s_root)
|
2012-01-09 07:15:13 +04:00
|
|
|
goto out_root;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
sb->s_maxbytes = 0xFFFFFFFF;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 15:29:47 +03:00
|
|
|
sb->s_blocksize = PAGE_SIZE;
|
|
|
|
sb->s_blocksize_bits = PAGE_SHIFT;
|
2005-04-17 02:20:36 +04:00
|
|
|
sb->s_magic = JFFS2_SUPER_MAGIC;
|
2017-07-17 10:45:34 +03:00
|
|
|
if (!sb_rdonly(sb))
|
2005-04-17 02:20:36 +04:00
|
|
|
jffs2_start_garbage_collect_thread(c);
|
|
|
|
return 0;
|
|
|
|
|
2008-02-07 11:15:42 +03:00
|
|
|
out_root:
|
2005-04-17 02:20:36 +04:00
|
|
|
jffs2_free_ino_caches(c);
|
|
|
|
jffs2_free_raw_node_refs(c);
|
2016-01-23 02:11:02 +03:00
|
|
|
kvfree(c->blocks);
|
2005-04-17 02:20:36 +04:00
|
|
|
out_inohash:
|
2006-05-13 10:09:47 +04:00
|
|
|
jffs2_clear_xattr_subsystem(c);
|
2005-04-17 02:20:36 +04:00
|
|
|
kfree(c->inocache_list);
|
|
|
|
out_wbuf:
|
|
|
|
jffs2_flash_cleanup(c);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void jffs2_gc_release_inode(struct jffs2_sb_info *c,
|
|
|
|
struct jffs2_inode_info *f)
|
|
|
|
{
|
|
|
|
iput(OFNI_EDONI_2SFFJ(f));
|
|
|
|
}
|
|
|
|
|
|
|
|
struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
|
2008-05-01 19:59:24 +04:00
|
|
|
int inum, int unlinked)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
struct jffs2_inode_cache *ic;
|
2008-05-01 19:59:24 +04:00
|
|
|
|
|
|
|
if (unlinked) {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* The inode has zero nlink but its nodes weren't yet marked
|
2005-11-07 14:16:07 +03:00
|
|
|
obsolete. This has to be because we're still waiting for
|
2005-04-17 02:20:36 +04:00
|
|
|
the final (close() and) iput() to happen.
|
|
|
|
|
2005-11-07 14:16:07 +03:00
|
|
|
There's a possibility that the final iput() could have
|
2005-04-17 02:20:36 +04:00
|
|
|
happened while we were contemplating. In order to ensure
|
|
|
|
that we don't cause a new read_inode() (which would fail)
|
|
|
|
for the inode in question, we use ilookup() in this case
|
|
|
|
instead of iget().
|
|
|
|
|
2005-11-07 14:16:07 +03:00
|
|
|
The nlink can't _become_ zero at this point because we're
|
2005-04-17 02:20:36 +04:00
|
|
|
holding the alloc_sem, and jffs2_do_unlink() would also
|
|
|
|
need that while decrementing nlink on any inode.
|
|
|
|
*/
|
|
|
|
inode = ilookup(OFNI_BS_2SFFJ(c), inum);
|
|
|
|
if (!inode) {
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
|
|
|
|
inum);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
spin_lock(&c->inocache_lock);
|
|
|
|
ic = jffs2_get_ino_cache(c, inum);
|
|
|
|
if (!ic) {
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
|
|
|
|
inum);
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_unlock(&c->inocache_lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (ic->state != INO_STATE_CHECKEDABSENT) {
|
|
|
|
/* Wait for progress. Don't just loop */
|
2012-02-16 03:56:43 +04:00
|
|
|
jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
|
|
|
|
ic->ino, ic->state);
|
2005-04-17 02:20:36 +04:00
|
|
|
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
|
|
|
|
} else {
|
|
|
|
spin_unlock(&c->inocache_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Inode has links to it still; they're not going away because
|
|
|
|
jffs2_do_unlink() would need the alloc_sem and we have it.
|
|
|
|
Just iget() it, and if read_inode() is necessary that's OK.
|
|
|
|
*/
|
2008-02-07 11:15:42 +03:00
|
|
|
inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
|
|
|
|
if (IS_ERR(inode))
|
|
|
|
return ERR_CAST(inode);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
if (is_bad_inode(inode)) {
|
2012-02-16 03:56:44 +04:00
|
|
|
pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
|
|
|
|
inum, unlinked);
|
2005-04-17 02:20:36 +04:00
|
|
|
/* NB. This will happen again. We need to do something appropriate here. */
|
|
|
|
iput(inode);
|
|
|
|
return ERR_PTR(-EIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
return JFFS2_INODE_INFO(inode);
|
|
|
|
}
|
|
|
|
|
2005-11-07 14:16:07 +03:00
|
|
|
unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
|
|
|
|
struct jffs2_inode_info *f,
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned long offset,
|
|
|
|
unsigned long *priv)
|
|
|
|
{
|
|
|
|
struct inode *inode = OFNI_EDONI_2SFFJ(f);
|
|
|
|
struct page *pg;
|
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 15:29:47 +03:00
|
|
|
pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
|
2005-04-17 02:20:36 +04:00
|
|
|
(void *)jffs2_do_readpage_unlock, inode);
|
|
|
|
if (IS_ERR(pg))
|
|
|
|
return (void *)pg;
|
2005-11-07 14:16:07 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
*priv = (unsigned long)pg;
|
|
|
|
return kmap(pg);
|
|
|
|
}
|
|
|
|
|
|
|
|
void jffs2_gc_release_page(struct jffs2_sb_info *c,
|
|
|
|
unsigned char *ptr,
|
|
|
|
unsigned long *priv)
|
|
|
|
{
|
|
|
|
struct page *pg = (void *)*priv;
|
|
|
|
|
|
|
|
kunmap(pg);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 15:29:47 +03:00
|
|
|
put_page(pg);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int jffs2_flash_setup(struct jffs2_sb_info *c) {
|
|
|
|
int ret = 0;
|
2005-11-07 14:16:07 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if (jffs2_cleanmarker_oob(c)) {
|
|
|
|
/* NAND flash... do setup accordingly */
|
|
|
|
ret = jffs2_nand_flash_setup(c);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-02-09 12:17:45 +03:00
|
|
|
/* and Dataflash */
|
|
|
|
if (jffs2_dataflash(c)) {
|
|
|
|
ret = jffs2_dataflash_setup(c);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2005-08-06 08:51:33 +04:00
|
|
|
|
|
|
|
/* and Intel "Sibley" flash */
|
|
|
|
if (jffs2_nor_wbuf_flash(c)) {
|
|
|
|
ret = jffs2_nor_wbuf_flash_setup(c);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-10-04 20:15:21 +04:00
|
|
|
/* and an UBI volume */
|
|
|
|
if (jffs2_ubivol(c)) {
|
|
|
|
ret = jffs2_ubivol_setup(c);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
|
|
|
|
|
|
|
|
if (jffs2_cleanmarker_oob(c)) {
|
|
|
|
jffs2_nand_flash_cleanup(c);
|
|
|
|
}
|
|
|
|
|
2005-02-09 12:17:45 +03:00
|
|
|
/* and DataFlash */
|
|
|
|
if (jffs2_dataflash(c)) {
|
|
|
|
jffs2_dataflash_cleanup(c);
|
|
|
|
}
|
2005-08-06 08:51:33 +04:00
|
|
|
|
|
|
|
/* and Intel "Sibley" flash */
|
|
|
|
if (jffs2_nor_wbuf_flash(c)) {
|
|
|
|
jffs2_nor_wbuf_flash_cleanup(c);
|
|
|
|
}
|
2006-10-04 20:15:21 +04:00
|
|
|
|
|
|
|
/* and an UBI volume */
|
|
|
|
if (jffs2_ubivol(c)) {
|
|
|
|
jffs2_ubivol_cleanup(c);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|