stat: handle idmapped mounts
The generic_fillattr() helper fills in the basic attributes associated with an inode. Enable it to handle idmapped mounts. If the inode is accessed through an idmapped mount map it into the mount's user namespace before we store the uid and gid. If the initial user namespace is passed nothing changes so non-idmapped mounts will see identical behavior as before. Link: https://lore.kernel.org/r/20210121131959.646623-12-christian.brauner@ubuntu.com Cc: Christoph Hellwig <hch@lst.de> Cc: David Howells <dhowells@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: linux-fsdevel@vger.kernel.org Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: James Morris <jamorris@linux.microsoft.com> Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
This commit is contained in:
Родитель
71bc356f93
Коммит
0d56a4518d
|
@ -1027,7 +1027,7 @@ v9fs_vfs_getattr(const struct path *path, struct kstat *stat,
|
|||
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
|
||||
v9ses = v9fs_dentry2v9ses(dentry);
|
||||
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
|
||||
generic_fillattr(d_inode(dentry), stat);
|
||||
generic_fillattr(&init_user_ns, d_inode(dentry), stat);
|
||||
return 0;
|
||||
}
|
||||
fid = v9fs_fid_lookup(dentry);
|
||||
|
@ -1040,7 +1040,7 @@ v9fs_vfs_getattr(const struct path *path, struct kstat *stat,
|
|||
return PTR_ERR(st);
|
||||
|
||||
v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
|
||||
generic_fillattr(d_inode(dentry), stat);
|
||||
generic_fillattr(&init_user_ns, d_inode(dentry), stat);
|
||||
|
||||
p9stat_free(st);
|
||||
kfree(st);
|
||||
|
|
|
@ -468,7 +468,7 @@ v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat,
|
|||
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
|
||||
v9ses = v9fs_dentry2v9ses(dentry);
|
||||
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
|
||||
generic_fillattr(d_inode(dentry), stat);
|
||||
generic_fillattr(&init_user_ns, d_inode(dentry), stat);
|
||||
return 0;
|
||||
}
|
||||
fid = v9fs_fid_lookup(dentry);
|
||||
|
@ -485,7 +485,7 @@ v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat,
|
|||
return PTR_ERR(st);
|
||||
|
||||
v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
|
||||
generic_fillattr(d_inode(dentry), stat);
|
||||
generic_fillattr(&init_user_ns, d_inode(dentry), stat);
|
||||
/* Change block size to what the server returned */
|
||||
stat->blksize = st->st_blksize;
|
||||
|
||||
|
|
|
@ -745,7 +745,7 @@ int afs_getattr(const struct path *path, struct kstat *stat,
|
|||
|
||||
do {
|
||||
read_seqbegin_or_lock(&vnode->cb_lock, &seq);
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
if (test_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags) &&
|
||||
stat->nlink > 0)
|
||||
stat->nlink -= 1;
|
||||
|
|
|
@ -8842,7 +8842,7 @@ static int btrfs_getattr(const struct path *path, struct kstat *stat,
|
|||
STATX_ATTR_IMMUTABLE |
|
||||
STATX_ATTR_NODUMP);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
stat->dev = BTRFS_I(inode)->root->anon_dev;
|
||||
|
||||
spin_lock(&BTRFS_I(inode)->lock);
|
||||
|
|
|
@ -2385,7 +2385,7 @@ int ceph_getattr(const struct path *path, struct kstat *stat,
|
|||
return err;
|
||||
}
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
stat->ino = ceph_present_inode(inode);
|
||||
|
||||
/*
|
||||
|
|
|
@ -2408,7 +2408,7 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
|
|||
return rc;
|
||||
}
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
stat->blksize = cifs_sb->ctx->bsize;
|
||||
stat->ino = CIFS_I(inode)->uniqueid;
|
||||
|
||||
|
|
|
@ -256,7 +256,7 @@ int coda_getattr(const struct path *path, struct kstat *stat,
|
|||
{
|
||||
int err = coda_revalidate_inode(d_inode(path->dentry));
|
||||
if (!err)
|
||||
generic_fillattr(d_inode(path->dentry), stat);
|
||||
generic_fillattr(&init_user_ns, d_inode(path->dentry), stat);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -977,7 +977,7 @@ static int ecryptfs_getattr_link(const struct path *path, struct kstat *stat,
|
|||
|
||||
mount_crypt_stat = &ecryptfs_superblock_to_private(
|
||||
dentry->d_sb)->mount_crypt_stat;
|
||||
generic_fillattr(d_inode(dentry), stat);
|
||||
generic_fillattr(&init_user_ns, d_inode(dentry), stat);
|
||||
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
|
||||
char *target;
|
||||
size_t targetsiz;
|
||||
|
@ -1005,7 +1005,7 @@ static int ecryptfs_getattr(const struct path *path, struct kstat *stat,
|
|||
if (!rc) {
|
||||
fsstack_copy_attr_all(d_inode(dentry),
|
||||
ecryptfs_inode_to_lower(d_inode(dentry)));
|
||||
generic_fillattr(d_inode(dentry), stat);
|
||||
generic_fillattr(&init_user_ns, d_inode(dentry), stat);
|
||||
stat->blocks = lower_stat.blocks;
|
||||
}
|
||||
return rc;
|
||||
|
|
|
@ -343,7 +343,7 @@ int erofs_getattr(const struct path *path, struct kstat *stat,
|
|||
stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
|
||||
STATX_ATTR_IMMUTABLE);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -273,7 +273,7 @@ int exfat_getattr(const struct path *path, struct kstat *stat,
|
|||
struct inode *inode = d_backing_inode(path->dentry);
|
||||
struct exfat_inode_info *ei = EXFAT_I(inode);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
exfat_truncate_atime(&stat->atime);
|
||||
stat->result_mask |= STATX_BTIME;
|
||||
stat->btime.tv_sec = ei->i_crtime.tv_sec;
|
||||
|
|
|
@ -1660,7 +1660,7 @@ int ext2_getattr(const struct path *path, struct kstat *stat,
|
|||
STATX_ATTR_IMMUTABLE |
|
||||
STATX_ATTR_NODUMP);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -5571,7 +5571,7 @@ int ext4_getattr(const struct path *path, struct kstat *stat,
|
|||
STATX_ATTR_NODUMP |
|
||||
STATX_ATTR_VERITY);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -820,7 +820,7 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
|
|||
STATX_ATTR_NODUMP |
|
||||
STATX_ATTR_VERITY);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
|
||||
/* we need to show initial sectors used for inline_data/dentries */
|
||||
if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
|
||||
|
|
|
@ -398,7 +398,7 @@ int fat_getattr(const struct path *path, struct kstat *stat,
|
|||
u32 request_mask, unsigned int flags)
|
||||
{
|
||||
struct inode *inode = d_inode(path->dentry);
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
stat->blksize = MSDOS_SB(inode->i_sb)->cluster_size;
|
||||
|
||||
if (MSDOS_SB(inode->i_sb)->options.nfs == FAT_NFS_NOSTALE_RO) {
|
||||
|
|
|
@ -1087,7 +1087,7 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
|
|||
forget_all_cached_acls(inode);
|
||||
err = fuse_do_getattr(inode, stat, file);
|
||||
} else if (stat) {
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
stat->mode = fi->orig_i_mode;
|
||||
stat->ino = fi->orig_ino;
|
||||
}
|
||||
|
|
|
@ -2050,7 +2050,7 @@ static int gfs2_getattr(const struct path *path, struct kstat *stat,
|
|||
STATX_ATTR_IMMUTABLE |
|
||||
STATX_ATTR_NODUMP);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
|
||||
if (gfs2_holder_initialized(&gh))
|
||||
gfs2_glock_dq_uninit(&gh);
|
||||
|
|
|
@ -286,7 +286,7 @@ int hfsplus_getattr(const struct path *path, struct kstat *stat,
|
|||
stat->attributes_mask |= STATX_ATTR_APPEND | STATX_ATTR_IMMUTABLE |
|
||||
STATX_ATTR_NODUMP;
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -193,7 +193,7 @@ int kernfs_iop_getattr(const struct path *path, struct kstat *stat,
|
|||
kernfs_refresh_inode(kn, inode);
|
||||
mutex_unlock(&kernfs_mutex);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ int simple_getattr(const struct path *path, struct kstat *stat,
|
|||
u32 request_mask, unsigned int query_flags)
|
||||
{
|
||||
struct inode *inode = d_inode(path->dentry);
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1304,7 +1304,7 @@ static int empty_dir_getattr(const struct path *path, struct kstat *stat,
|
|||
u32 request_mask, unsigned int query_flags)
|
||||
{
|
||||
struct inode *inode = d_inode(path->dentry);
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -658,7 +658,7 @@ int minix_getattr(const struct path *path, struct kstat *stat,
|
|||
struct super_block *sb = path->dentry->d_sb;
|
||||
struct inode *inode = d_inode(path->dentry);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
if (INODE_VERSION(inode) == MINIX_V1)
|
||||
stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb);
|
||||
else
|
||||
|
|
|
@ -857,7 +857,7 @@ out_no_revalidate:
|
|||
/* Only return attributes that were revalidated. */
|
||||
stat->result_mask &= request_mask;
|
||||
out_no_update:
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
stat->blksize = NFS_SERVER(inode)->dtsize;
|
||||
|
|
|
@ -213,7 +213,7 @@ nfs_namespace_getattr(const struct path *path, struct kstat *stat,
|
|||
{
|
||||
if (NFS_FH(d_inode(path->dentry))->size != 0)
|
||||
return nfs_getattr(path, stat, request_mask, query_flags);
|
||||
generic_fillattr(d_inode(path->dentry), stat);
|
||||
generic_fillattr(&init_user_ns, d_inode(path->dentry), stat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1313,7 +1313,7 @@ int ocfs2_getattr(const struct path *path, struct kstat *stat,
|
|||
goto bail;
|
||||
}
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
/*
|
||||
* If there is inline data in the inode, the inode will normally not
|
||||
* have data blocks allocated (it may have an external xattr block).
|
||||
|
|
|
@ -903,7 +903,7 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
|
|||
ret = orangefs_inode_getattr(inode,
|
||||
request_mask & STATX_SIZE ? ORANGEFS_GETATTR_SIZE : 0);
|
||||
if (ret == 0) {
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
|
||||
/* override block size reported to stat */
|
||||
if (!(request_mask & STATX_SIZE))
|
||||
|
|
|
@ -1934,7 +1934,7 @@ int pid_getattr(const struct path *path, struct kstat *stat,
|
|||
struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
|
||||
struct task_struct *task;
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
|
||||
stat->uid = GLOBAL_ROOT_UID;
|
||||
stat->gid = GLOBAL_ROOT_GID;
|
||||
|
@ -3803,7 +3803,7 @@ static int proc_task_getattr(const struct path *path, struct kstat *stat,
|
|||
{
|
||||
struct inode *inode = d_inode(path->dentry);
|
||||
struct task_struct *p = get_proc_task(inode);
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
|
||||
if (p) {
|
||||
stat->nlink += get_nr_threads(p);
|
||||
|
|
|
@ -145,7 +145,7 @@ static int proc_getattr(const struct path *path, struct kstat *stat,
|
|||
}
|
||||
}
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -297,7 +297,7 @@ static int proc_tgid_net_getattr(const struct path *path, struct kstat *stat,
|
|||
|
||||
net = get_proc_task_net(inode);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
|
||||
if (net != NULL) {
|
||||
stat->nlink = net->proc_net->nlink;
|
||||
|
|
|
@ -840,7 +840,7 @@ static int proc_sys_getattr(const struct path *path, struct kstat *stat,
|
|||
if (IS_ERR(head))
|
||||
return PTR_ERR(head);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
if (table)
|
||||
stat->mode = (stat->mode & S_IFMT) | table->mode;
|
||||
|
||||
|
|
|
@ -311,7 +311,7 @@ void __init proc_root_init(void)
|
|||
static int proc_root_getattr(const struct path *path, struct kstat *stat,
|
||||
u32 request_mask, unsigned int query_flags)
|
||||
{
|
||||
generic_fillattr(d_inode(path->dentry), stat);
|
||||
generic_fillattr(&init_user_ns, d_inode(path->dentry), stat);
|
||||
stat->nlink = proc_root.nlink + nr_processes();
|
||||
return 0;
|
||||
}
|
||||
|
|
20
fs/stat.c
20
fs/stat.c
|
@ -26,21 +26,29 @@
|
|||
|
||||
/**
|
||||
* generic_fillattr - Fill in the basic attributes from the inode struct
|
||||
* @inode: Inode to use as the source
|
||||
* @stat: Where to fill in the attributes
|
||||
* @mnt_userns: user namespace of the mount the inode was found from
|
||||
* @inode: Inode to use as the source
|
||||
* @stat: Where to fill in the attributes
|
||||
*
|
||||
* Fill in the basic attributes in the kstat structure from data that's to be
|
||||
* found on the VFS inode structure. This is the default if no getattr inode
|
||||
* operation is supplied.
|
||||
*
|
||||
* If the inode has been found through an idmapped mount the user namespace of
|
||||
* the vfsmount must be passed through @mnt_userns. This function will then
|
||||
* take care to map the inode according to @mnt_userns before filling in the
|
||||
* uid and gid filds. On non-idmapped mounts or if permission checking is to be
|
||||
* performed on the raw inode simply passs init_user_ns.
|
||||
*/
|
||||
void generic_fillattr(struct inode *inode, struct kstat *stat)
|
||||
void generic_fillattr(struct user_namespace *mnt_userns, struct inode *inode,
|
||||
struct kstat *stat)
|
||||
{
|
||||
stat->dev = inode->i_sb->s_dev;
|
||||
stat->ino = inode->i_ino;
|
||||
stat->mode = inode->i_mode;
|
||||
stat->nlink = inode->i_nlink;
|
||||
stat->uid = inode->i_uid;
|
||||
stat->gid = inode->i_gid;
|
||||
stat->uid = i_uid_into_mnt(mnt_userns, inode);
|
||||
stat->gid = i_gid_into_mnt(mnt_userns, inode);
|
||||
stat->rdev = inode->i_rdev;
|
||||
stat->size = i_size_read(inode);
|
||||
stat->atime = inode->i_atime;
|
||||
|
@ -87,7 +95,7 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
|
|||
return inode->i_op->getattr(path, stat, request_mask,
|
||||
query_flags);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(mnt_user_ns(path->mnt), inode, stat);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(vfs_getattr_nosec);
|
||||
|
|
|
@ -445,7 +445,7 @@ int sysv_getattr(const struct path *path, struct kstat *stat,
|
|||
u32 request_mask, unsigned int flags)
|
||||
{
|
||||
struct super_block *s = path->dentry->d_sb;
|
||||
generic_fillattr(d_inode(path->dentry), stat);
|
||||
generic_fillattr(&init_user_ns, d_inode(path->dentry), stat);
|
||||
stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size);
|
||||
stat->blksize = s->s_blocksize;
|
||||
return 0;
|
||||
|
|
|
@ -1589,7 +1589,7 @@ int ubifs_getattr(const struct path *path, struct kstat *stat,
|
|||
STATX_ATTR_ENCRYPTED |
|
||||
STATX_ATTR_IMMUTABLE);
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
stat->blksize = UBIFS_BLOCK_SIZE;
|
||||
stat->size = ui->ui_size;
|
||||
|
||||
|
|
|
@ -159,7 +159,7 @@ static int udf_symlink_getattr(const struct path *path, struct kstat *stat,
|
|||
struct inode *inode = d_backing_inode(dentry);
|
||||
struct page *page;
|
||||
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
page = read_mapping_page(inode->i_mapping, 0, NULL);
|
||||
if (IS_ERR(page))
|
||||
return PTR_ERR(page);
|
||||
|
|
|
@ -233,7 +233,7 @@ int vboxsf_getattr(const struct path *path, struct kstat *kstat,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
generic_fillattr(d_inode(dentry), kstat);
|
||||
generic_fillattr(&init_user_ns, d_inode(dentry), kstat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -3154,7 +3154,7 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len,
|
|||
extern int page_symlink(struct inode *inode, const char *symname, int len);
|
||||
extern const struct inode_operations page_symlink_inode_operations;
|
||||
extern void kfree_link(void *);
|
||||
extern void generic_fillattr(struct inode *, struct kstat *);
|
||||
void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *);
|
||||
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
|
||||
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
|
||||
void __inode_add_bytes(struct inode *inode, loff_t bytes);
|
||||
|
|
|
@ -1072,7 +1072,7 @@ static int shmem_getattr(const struct path *path, struct kstat *stat,
|
|||
shmem_recalc_inode(inode);
|
||||
spin_unlock_irq(&info->lock);
|
||||
}
|
||||
generic_fillattr(inode, stat);
|
||||
generic_fillattr(&init_user_ns, inode, stat);
|
||||
|
||||
if (is_huge_enabled(sb_info))
|
||||
stat->blksize = HPAGE_PMD_SIZE;
|
||||
|
|
Загрузка…
Ссылка в новой задаче