2019-07-31 18:57:31 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2018-07-26 15:21:52 +03:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2017-2018 HUAWEI, Inc.
|
|
|
|
* http://www.huawei.com/
|
|
|
|
* Created by Gao Xiang <gaoxiang25@huawei.com>
|
|
|
|
*/
|
|
|
|
#include <linux/security.h>
|
|
|
|
#include "xattr.h"
|
|
|
|
|
|
|
|
struct xattr_iter {
|
|
|
|
struct super_block *sb;
|
|
|
|
struct page *page;
|
|
|
|
void *kaddr;
|
|
|
|
|
|
|
|
erofs_blk_t blkaddr;
|
2018-09-10 22:41:14 +03:00
|
|
|
unsigned int ofs;
|
2018-07-26 15:21:52 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
|
|
|
|
{
|
2018-08-21 17:49:31 +03:00
|
|
|
/* the only user of kunmap() is 'init_inode_xattrs' */
|
2019-08-29 19:38:27 +03:00
|
|
|
if (!atomic)
|
2018-07-26 15:21:52 +03:00
|
|
|
kunmap(it->page);
|
|
|
|
else
|
|
|
|
kunmap_atomic(it->kaddr);
|
2018-08-21 17:49:31 +03:00
|
|
|
|
2018-07-26 15:21:52 +03:00
|
|
|
unlock_page(it->page);
|
|
|
|
put_page(it->page);
|
|
|
|
}
|
|
|
|
|
2018-08-21 17:49:31 +03:00
|
|
|
static inline void xattr_iter_end_final(struct xattr_iter *it)
|
|
|
|
{
|
2019-03-22 05:38:16 +03:00
|
|
|
if (!it->page)
|
2018-08-21 17:49:31 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
xattr_iter_end(it, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int init_inode_xattrs(struct inode *inode)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
2019-09-04 05:08:56 +03:00
|
|
|
struct erofs_inode *const vi = EROFS_I(inode);
|
2018-07-26 15:21:52 +03:00
|
|
|
struct xattr_iter it;
|
2018-09-10 22:41:14 +03:00
|
|
|
unsigned int i;
|
2018-07-26 15:21:52 +03:00
|
|
|
struct erofs_xattr_ibody_header *ih;
|
2018-08-21 17:49:30 +03:00
|
|
|
struct super_block *sb;
|
2018-07-26 15:21:52 +03:00
|
|
|
struct erofs_sb_info *sbi;
|
|
|
|
bool atomic_map;
|
2019-02-18 10:19:04 +03:00
|
|
|
int ret = 0;
|
2018-07-26 15:21:52 +03:00
|
|
|
|
2019-02-18 10:19:04 +03:00
|
|
|
/* the most case is that xattrs of this inode are initialized. */
|
2019-09-04 05:08:56 +03:00
|
|
|
if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
|
2018-08-21 17:49:31 +03:00
|
|
|
return 0;
|
2018-07-26 15:21:52 +03:00
|
|
|
|
2019-09-04 05:08:56 +03:00
|
|
|
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
|
2019-02-18 10:19:04 +03:00
|
|
|
return -ERESTARTSYS;
|
|
|
|
|
|
|
|
/* someone has initialized xattrs for us? */
|
2019-09-04 05:08:56 +03:00
|
|
|
if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
|
2019-02-18 10:19:04 +03:00
|
|
|
goto out_unlock;
|
2019-01-14 14:40:23 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* bypass all xattr operations if ->xattr_isize is not greater than
|
|
|
|
* sizeof(struct erofs_xattr_ibody_header), in detail:
|
|
|
|
* 1) it is not enough to contain erofs_xattr_ibody_header then
|
|
|
|
* ->xattr_isize should be 0 (it means no xattr);
|
|
|
|
* 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
|
|
|
|
* undefined right now (maybe use later with some new sb feature).
|
|
|
|
*/
|
|
|
|
if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
|
2019-09-04 05:09:09 +03:00
|
|
|
erofs_err(inode->i_sb,
|
|
|
|
"xattr_isize %d of nid %llu is not supported yet",
|
|
|
|
vi->xattr_isize, vi->nid);
|
2019-08-14 13:37:05 +03:00
|
|
|
ret = -EOPNOTSUPP;
|
2019-02-18 10:19:04 +03:00
|
|
|
goto out_unlock;
|
2019-01-14 14:40:23 +03:00
|
|
|
} else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
|
2019-08-29 19:38:27 +03:00
|
|
|
if (vi->xattr_isize) {
|
2019-09-04 05:09:09 +03:00
|
|
|
erofs_err(inode->i_sb,
|
|
|
|
"bogus xattr ibody @ nid %llu", vi->nid);
|
2019-01-14 14:40:23 +03:00
|
|
|
DBG_BUGON(1);
|
2019-08-14 13:37:03 +03:00
|
|
|
ret = -EFSCORRUPTED;
|
2019-02-18 10:19:04 +03:00
|
|
|
goto out_unlock; /* xattr ondisk layout error */
|
2019-01-14 14:40:23 +03:00
|
|
|
}
|
2019-02-18 10:19:04 +03:00
|
|
|
ret = -ENOATTR;
|
|
|
|
goto out_unlock;
|
2019-01-14 14:40:23 +03:00
|
|
|
}
|
2018-07-26 15:21:52 +03:00
|
|
|
|
2018-08-21 17:49:30 +03:00
|
|
|
sb = inode->i_sb;
|
|
|
|
sbi = EROFS_SB(sb);
|
2018-07-26 15:21:52 +03:00
|
|
|
it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
|
|
|
|
it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
|
|
|
|
|
2019-09-04 05:09:03 +03:00
|
|
|
it.page = erofs_get_meta_page(sb, it.blkaddr);
|
2019-02-18 10:19:04 +03:00
|
|
|
if (IS_ERR(it.page)) {
|
|
|
|
ret = PTR_ERR(it.page);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2018-07-26 15:21:52 +03:00
|
|
|
|
|
|
|
/* read in shared xattr array (non-atomic, see kmalloc below) */
|
|
|
|
it.kaddr = kmap(it.page);
|
|
|
|
atomic_map = false;
|
|
|
|
|
|
|
|
ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
|
|
|
|
|
|
|
|
vi->xattr_shared_count = ih->h_shared_count;
|
2018-08-21 17:49:31 +03:00
|
|
|
vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
|
|
|
|
sizeof(uint), GFP_KERNEL);
|
2019-03-22 05:38:16 +03:00
|
|
|
if (!vi->xattr_shared_xattrs) {
|
2018-08-21 17:49:31 +03:00
|
|
|
xattr_iter_end(&it, atomic_map);
|
2019-02-18 10:19:04 +03:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_unlock;
|
2018-08-21 17:49:31 +03:00
|
|
|
}
|
2018-07-26 15:21:52 +03:00
|
|
|
|
|
|
|
/* let's skip ibody header */
|
|
|
|
it.ofs += sizeof(struct erofs_xattr_ibody_header);
|
|
|
|
|
|
|
|
for (i = 0; i < vi->xattr_shared_count; ++i) {
|
2019-08-29 19:38:27 +03:00
|
|
|
if (it.ofs >= EROFS_BLKSIZ) {
|
2018-07-26 15:21:52 +03:00
|
|
|
/* cannot be unaligned */
|
2019-08-13 05:30:54 +03:00
|
|
|
DBG_BUGON(it.ofs != EROFS_BLKSIZ);
|
2018-07-26 15:21:52 +03:00
|
|
|
xattr_iter_end(&it, atomic_map);
|
|
|
|
|
2019-09-04 05:09:03 +03:00
|
|
|
it.page = erofs_get_meta_page(sb, ++it.blkaddr);
|
2019-02-14 09:46:36 +03:00
|
|
|
if (IS_ERR(it.page)) {
|
|
|
|
kfree(vi->xattr_shared_xattrs);
|
|
|
|
vi->xattr_shared_xattrs = NULL;
|
2019-02-18 10:19:04 +03:00
|
|
|
ret = PTR_ERR(it.page);
|
|
|
|
goto out_unlock;
|
2019-02-14 09:46:36 +03:00
|
|
|
}
|
2018-07-26 15:21:52 +03:00
|
|
|
|
|
|
|
it.kaddr = kmap_atomic(it.page);
|
|
|
|
atomic_map = true;
|
|
|
|
it.ofs = 0;
|
|
|
|
}
|
|
|
|
vi->xattr_shared_xattrs[i] =
|
|
|
|
le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
|
|
|
|
it.ofs += sizeof(__le32);
|
|
|
|
}
|
|
|
|
xattr_iter_end(&it, atomic_map);
|
|
|
|
|
2019-09-04 05:08:56 +03:00
|
|
|
set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
|
2019-02-18 10:19:04 +03:00
|
|
|
|
|
|
|
out_unlock:
|
2019-09-04 05:08:56 +03:00
|
|
|
clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
|
2019-02-18 10:19:04 +03:00
|
|
|
return ret;
|
2018-07-26 15:21:52 +03:00
|
|
|
}
|
|
|
|
|
2018-09-19 08:49:09 +03:00
|
|
|
/*
|
|
|
|
* the general idea for these return values is
|
|
|
|
* if 0 is returned, go on processing the current xattr;
|
|
|
|
* 1 (> 0) is returned, skip this round to process the next xattr;
|
|
|
|
* -err (< 0) is returned, an error (maybe ENOXATTR) occurred
|
|
|
|
* and need to be handled
|
|
|
|
*/
|
2018-07-26 15:21:52 +03:00
|
|
|
struct xattr_iter_handlers {
|
2019-01-08 16:24:54 +03:00
|
|
|
int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
|
|
|
|
int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
|
|
|
|
unsigned int len);
|
|
|
|
int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
|
|
|
|
void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
|
|
|
|
unsigned int len);
|
2018-07-26 15:21:52 +03:00
|
|
|
};
|
|
|
|
|
2018-08-21 17:49:31 +03:00
|
|
|
static inline int xattr_iter_fixup(struct xattr_iter *it)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
2018-08-21 17:49:31 +03:00
|
|
|
if (it->ofs < EROFS_BLKSIZ)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
xattr_iter_end(it, true);
|
2018-07-26 15:21:52 +03:00
|
|
|
|
2018-08-21 17:49:31 +03:00
|
|
|
it->blkaddr += erofs_blknr(it->ofs);
|
2018-07-26 15:21:52 +03:00
|
|
|
|
2019-09-04 05:09:03 +03:00
|
|
|
it->page = erofs_get_meta_page(it->sb, it->blkaddr);
|
2018-08-21 17:49:31 +03:00
|
|
|
if (IS_ERR(it->page)) {
|
|
|
|
int err = PTR_ERR(it->page);
|
|
|
|
|
|
|
|
it->page = NULL;
|
|
|
|
return err;
|
2018-07-26 15:21:52 +03:00
|
|
|
}
|
2018-08-21 17:49:31 +03:00
|
|
|
|
|
|
|
it->kaddr = kmap_atomic(it->page);
|
|
|
|
it->ofs = erofs_blkoff(it->ofs);
|
|
|
|
return 0;
|
2018-07-26 15:21:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int inline_xattr_iter_begin(struct xattr_iter *it,
|
2019-03-19 02:58:41 +03:00
|
|
|
struct inode *inode)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
2019-09-04 05:08:56 +03:00
|
|
|
struct erofs_inode *const vi = EROFS_I(inode);
|
2018-07-26 15:21:52 +03:00
|
|
|
struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
|
2018-09-10 22:41:14 +03:00
|
|
|
unsigned int xattr_header_sz, inline_xattr_ofs;
|
2018-07-26 15:21:52 +03:00
|
|
|
|
|
|
|
xattr_header_sz = inlinexattr_header_size(inode);
|
2019-08-29 19:38:27 +03:00
|
|
|
if (xattr_header_sz >= vi->xattr_isize) {
|
2019-08-13 05:30:54 +03:00
|
|
|
DBG_BUGON(xattr_header_sz > vi->xattr_isize);
|
2018-07-26 15:21:52 +03:00
|
|
|
return -ENOATTR;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
|
|
|
|
|
|
|
|
it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
|
|
|
|
it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
|
|
|
|
|
2019-09-04 05:09:03 +03:00
|
|
|
it->page = erofs_get_meta_page(inode->i_sb, it->blkaddr);
|
2018-08-21 17:49:31 +03:00
|
|
|
if (IS_ERR(it->page))
|
|
|
|
return PTR_ERR(it->page);
|
2018-07-26 15:21:52 +03:00
|
|
|
|
2018-08-21 17:49:31 +03:00
|
|
|
it->kaddr = kmap_atomic(it->page);
|
2018-07-26 15:21:52 +03:00
|
|
|
return vi->xattr_isize - xattr_header_sz;
|
|
|
|
}
|
|
|
|
|
2018-09-19 08:49:09 +03:00
|
|
|
/*
|
|
|
|
* Regardless of success or failure, `xattr_foreach' will end up with
|
|
|
|
* `ofs' pointing to the next xattr item rather than an arbitrary position.
|
|
|
|
*/
|
2018-07-26 15:21:52 +03:00
|
|
|
static int xattr_foreach(struct xattr_iter *it,
|
2019-03-19 02:58:41 +03:00
|
|
|
const struct xattr_iter_handlers *op,
|
|
|
|
unsigned int *tlimit)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
|
|
|
struct erofs_xattr_entry entry;
|
2018-09-10 22:41:14 +03:00
|
|
|
unsigned int value_sz, processed, slice;
|
2018-07-26 15:21:52 +03:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* 0. fixup blkaddr, ofs, ipage */
|
2018-08-21 17:49:31 +03:00
|
|
|
err = xattr_iter_fixup(it);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2018-07-26 15:21:52 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* 1. read xattr entry to the memory,
|
|
|
|
* since we do EROFS_XATTR_ALIGN
|
|
|
|
* therefore entry should be in the page
|
|
|
|
*/
|
|
|
|
entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
|
2019-03-22 05:38:16 +03:00
|
|
|
if (tlimit) {
|
2019-09-04 05:08:50 +03:00
|
|
|
unsigned int entry_sz = erofs_xattr_entry_size(&entry);
|
2018-07-26 15:21:52 +03:00
|
|
|
|
2019-08-13 05:30:54 +03:00
|
|
|
/* xattr on-disk corruption: xattr entry beyond xattr_isize */
|
2019-08-29 19:38:27 +03:00
|
|
|
if (*tlimit < entry_sz) {
|
2019-08-13 05:30:54 +03:00
|
|
|
DBG_BUGON(1);
|
2019-08-14 13:37:03 +03:00
|
|
|
return -EFSCORRUPTED;
|
2019-08-13 05:30:54 +03:00
|
|
|
}
|
2018-07-26 15:21:52 +03:00
|
|
|
*tlimit -= entry_sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
it->ofs += sizeof(struct erofs_xattr_entry);
|
|
|
|
value_sz = le16_to_cpu(entry.e_value_size);
|
|
|
|
|
|
|
|
/* handle entry */
|
|
|
|
err = op->entry(it, &entry);
|
|
|
|
if (err) {
|
|
|
|
it->ofs += entry.e_name_len + value_sz;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 2. handle xattr name (ofs will finally be at the end of name) */
|
|
|
|
processed = 0;
|
|
|
|
|
|
|
|
while (processed < entry.e_name_len) {
|
|
|
|
if (it->ofs >= EROFS_BLKSIZ) {
|
2019-08-13 05:30:54 +03:00
|
|
|
DBG_BUGON(it->ofs > EROFS_BLKSIZ);
|
2018-07-26 15:21:52 +03:00
|
|
|
|
2018-08-21 17:49:31 +03:00
|
|
|
err = xattr_iter_fixup(it);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2018-07-26 15:21:52 +03:00
|
|
|
it->ofs = 0;
|
|
|
|
}
|
|
|
|
|
2018-09-10 22:41:14 +03:00
|
|
|
slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
|
|
|
|
entry.e_name_len - processed);
|
2018-07-26 15:21:52 +03:00
|
|
|
|
|
|
|
/* handle name */
|
|
|
|
err = op->name(it, processed, it->kaddr + it->ofs, slice);
|
|
|
|
if (err) {
|
|
|
|
it->ofs += entry.e_name_len - processed + value_sz;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
it->ofs += slice;
|
|
|
|
processed += slice;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 3. handle xattr value */
|
|
|
|
processed = 0;
|
|
|
|
|
2019-03-22 05:38:16 +03:00
|
|
|
if (op->alloc_buffer) {
|
2018-07-26 15:21:52 +03:00
|
|
|
err = op->alloc_buffer(it, value_sz);
|
|
|
|
if (err) {
|
|
|
|
it->ofs += value_sz;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (processed < value_sz) {
|
|
|
|
if (it->ofs >= EROFS_BLKSIZ) {
|
2019-08-13 05:30:54 +03:00
|
|
|
DBG_BUGON(it->ofs > EROFS_BLKSIZ);
|
2018-08-21 17:49:31 +03:00
|
|
|
|
|
|
|
err = xattr_iter_fixup(it);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2018-07-26 15:21:52 +03:00
|
|
|
it->ofs = 0;
|
|
|
|
}
|
|
|
|
|
2018-09-10 22:41:14 +03:00
|
|
|
slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
|
|
|
|
value_sz - processed);
|
2018-07-26 15:21:52 +03:00
|
|
|
op->value(it, processed, it->kaddr + it->ofs, slice);
|
|
|
|
it->ofs += slice;
|
|
|
|
processed += slice;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2018-09-19 08:49:09 +03:00
|
|
|
/* xattrs should be 4-byte aligned (on-disk constraint) */
|
2018-07-26 15:21:52 +03:00
|
|
|
it->ofs = EROFS_XATTR_ALIGN(it->ofs);
|
2018-09-19 08:49:10 +03:00
|
|
|
return err < 0 ? err : 0;
|
2018-07-26 15:21:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
struct getxattr_iter {
|
|
|
|
struct xattr_iter it;
|
|
|
|
|
|
|
|
char *buffer;
|
|
|
|
int buffer_size, index;
|
|
|
|
struct qstr name;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int xattr_entrymatch(struct xattr_iter *_it,
|
2019-03-19 02:58:41 +03:00
|
|
|
struct erofs_xattr_entry *entry)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
|
|
|
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
|
|
|
|
|
|
|
|
return (it->index != entry->e_name_index ||
|
|
|
|
it->name.len != entry->e_name_len) ? -ENOATTR : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xattr_namematch(struct xattr_iter *_it,
|
2019-03-19 02:58:41 +03:00
|
|
|
unsigned int processed, char *buf, unsigned int len)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
|
|
|
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
|
|
|
|
|
|
|
|
return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xattr_checkbuffer(struct xattr_iter *_it,
|
2019-03-19 02:58:41 +03:00
|
|
|
unsigned int value_sz)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
|
|
|
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
|
|
|
|
int err = it->buffer_size < value_sz ? -ERANGE : 0;
|
|
|
|
|
|
|
|
it->buffer_size = value_sz;
|
2019-03-22 05:38:16 +03:00
|
|
|
return !it->buffer ? 1 : err;
|
2018-07-26 15:21:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void xattr_copyvalue(struct xattr_iter *_it,
|
2019-03-19 02:58:41 +03:00
|
|
|
unsigned int processed,
|
|
|
|
char *buf, unsigned int len)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
|
|
|
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
|
|
|
|
|
|
|
|
memcpy(it->buffer + processed, buf, len);
|
|
|
|
}
|
|
|
|
|
2018-08-21 17:49:31 +03:00
|
|
|
static const struct xattr_iter_handlers find_xattr_handlers = {
|
2018-07-26 15:21:52 +03:00
|
|
|
.entry = xattr_entrymatch,
|
|
|
|
.name = xattr_namematch,
|
|
|
|
.alloc_buffer = xattr_checkbuffer,
|
|
|
|
.value = xattr_copyvalue
|
|
|
|
};
|
|
|
|
|
|
|
|
static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
|
|
|
|
{
|
|
|
|
int ret;
|
2018-09-10 22:41:14 +03:00
|
|
|
unsigned int remaining;
|
2018-07-26 15:21:52 +03:00
|
|
|
|
|
|
|
ret = inline_xattr_iter_begin(&it->it, inode);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
remaining = ret;
|
|
|
|
while (remaining) {
|
2018-08-05 18:21:01 +03:00
|
|
|
ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
|
2018-09-19 08:49:10 +03:00
|
|
|
if (ret != -ENOATTR)
|
2018-08-21 17:49:31 +03:00
|
|
|
break;
|
2018-07-26 15:21:52 +03:00
|
|
|
}
|
2018-08-21 17:49:31 +03:00
|
|
|
xattr_iter_end_final(&it->it);
|
2018-07-26 15:21:52 +03:00
|
|
|
|
2018-09-19 08:49:10 +03:00
|
|
|
return ret ? ret : it->buffer_size;
|
2018-07-26 15:21:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
|
|
|
|
{
|
2019-09-04 05:08:56 +03:00
|
|
|
struct erofs_inode *const vi = EROFS_I(inode);
|
2018-08-21 17:49:30 +03:00
|
|
|
struct super_block *const sb = inode->i_sb;
|
|
|
|
struct erofs_sb_info *const sbi = EROFS_SB(sb);
|
2018-09-10 22:41:14 +03:00
|
|
|
unsigned int i;
|
2018-07-26 15:21:52 +03:00
|
|
|
int ret = -ENOATTR;
|
|
|
|
|
|
|
|
for (i = 0; i < vi->xattr_shared_count; ++i) {
|
|
|
|
erofs_blk_t blkaddr =
|
|
|
|
xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
|
|
|
|
|
|
|
|
it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
|
|
|
|
|
|
|
|
if (!i || blkaddr != it->it.blkaddr) {
|
|
|
|
if (i)
|
|
|
|
xattr_iter_end(&it->it, true);
|
|
|
|
|
2019-09-04 05:09:03 +03:00
|
|
|
it->it.page = erofs_get_meta_page(sb, blkaddr);
|
2018-08-21 17:49:31 +03:00
|
|
|
if (IS_ERR(it->it.page))
|
|
|
|
return PTR_ERR(it->it.page);
|
|
|
|
|
2018-07-26 15:21:52 +03:00
|
|
|
it->it.kaddr = kmap_atomic(it->it.page);
|
|
|
|
it->it.blkaddr = blkaddr;
|
|
|
|
}
|
|
|
|
|
2018-08-05 18:21:01 +03:00
|
|
|
ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
|
2018-09-19 08:49:10 +03:00
|
|
|
if (ret != -ENOATTR)
|
2018-08-21 17:49:31 +03:00
|
|
|
break;
|
2018-07-26 15:21:52 +03:00
|
|
|
}
|
|
|
|
if (vi->xattr_shared_count)
|
2018-08-21 17:49:31 +03:00
|
|
|
xattr_iter_end_final(&it->it);
|
2018-07-26 15:21:52 +03:00
|
|
|
|
2018-09-19 08:49:10 +03:00
|
|
|
return ret ? ret : it->buffer_size;
|
2018-07-26 15:21:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool erofs_xattr_user_list(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool erofs_xattr_trusted_list(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
return capable(CAP_SYS_ADMIN);
|
|
|
|
}
|
|
|
|
|
|
|
|
int erofs_getxattr(struct inode *inode, int index,
|
2019-03-19 02:58:41 +03:00
|
|
|
const char *name,
|
|
|
|
void *buffer, size_t buffer_size)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct getxattr_iter it;
|
|
|
|
|
2019-08-29 19:38:27 +03:00
|
|
|
if (!name)
|
2018-07-26 15:21:52 +03:00
|
|
|
return -EINVAL;
|
|
|
|
|
2018-08-21 17:49:31 +03:00
|
|
|
ret = init_inode_xattrs(inode);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-07-26 15:21:52 +03:00
|
|
|
|
|
|
|
it.index = index;
|
|
|
|
|
|
|
|
it.name.len = strlen(name);
|
|
|
|
if (it.name.len > EROFS_NAME_LEN)
|
|
|
|
return -ERANGE;
|
|
|
|
it.name.name = name;
|
|
|
|
|
|
|
|
it.buffer = buffer;
|
|
|
|
it.buffer_size = buffer_size;
|
|
|
|
|
|
|
|
it.it.sb = inode->i_sb;
|
|
|
|
ret = inline_getxattr(inode, &it);
|
|
|
|
if (ret == -ENOATTR)
|
|
|
|
ret = shared_getxattr(inode, &it);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int erofs_xattr_generic_get(const struct xattr_handler *handler,
|
2019-03-19 02:58:41 +03:00
|
|
|
struct dentry *unused, struct inode *inode,
|
|
|
|
const char *name, void *buffer, size_t size)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
|
|
|
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
|
|
|
|
|
|
|
|
switch (handler->flags) {
|
|
|
|
case EROFS_XATTR_INDEX_USER:
|
|
|
|
if (!test_opt(sbi, XATTR_USER))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
case EROFS_XATTR_INDEX_TRUSTED:
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
break;
|
|
|
|
case EROFS_XATTR_INDEX_SECURITY:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return erofs_getxattr(inode, handler->flags, name, buffer, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct xattr_handler erofs_xattr_user_handler = {
|
|
|
|
.prefix = XATTR_USER_PREFIX,
|
|
|
|
.flags = EROFS_XATTR_INDEX_USER,
|
|
|
|
.list = erofs_xattr_user_list,
|
|
|
|
.get = erofs_xattr_generic_get,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct xattr_handler erofs_xattr_trusted_handler = {
|
|
|
|
.prefix = XATTR_TRUSTED_PREFIX,
|
|
|
|
.flags = EROFS_XATTR_INDEX_TRUSTED,
|
|
|
|
.list = erofs_xattr_trusted_list,
|
|
|
|
.get = erofs_xattr_generic_get,
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_EROFS_FS_SECURITY
|
|
|
|
const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
|
|
|
|
.prefix = XATTR_SECURITY_PREFIX,
|
|
|
|
.flags = EROFS_XATTR_INDEX_SECURITY,
|
|
|
|
.get = erofs_xattr_generic_get,
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
const struct xattr_handler *erofs_xattr_handlers[] = {
|
|
|
|
&erofs_xattr_user_handler,
|
|
|
|
#ifdef CONFIG_EROFS_FS_POSIX_ACL
|
|
|
|
&posix_acl_access_xattr_handler,
|
|
|
|
&posix_acl_default_xattr_handler,
|
|
|
|
#endif
|
|
|
|
&erofs_xattr_trusted_handler,
|
|
|
|
#ifdef CONFIG_EROFS_FS_SECURITY
|
|
|
|
&erofs_xattr_security_handler,
|
|
|
|
#endif
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct listxattr_iter {
|
|
|
|
struct xattr_iter it;
|
|
|
|
|
|
|
|
struct dentry *dentry;
|
|
|
|
char *buffer;
|
|
|
|
int buffer_size, buffer_ofs;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int xattr_entrylist(struct xattr_iter *_it,
|
2019-03-19 02:58:41 +03:00
|
|
|
struct erofs_xattr_entry *entry)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
|
|
|
struct listxattr_iter *it =
|
|
|
|
container_of(_it, struct listxattr_iter, it);
|
2018-09-10 22:41:14 +03:00
|
|
|
unsigned int prefix_len;
|
2018-07-26 15:21:52 +03:00
|
|
|
const char *prefix;
|
|
|
|
|
|
|
|
const struct xattr_handler *h =
|
|
|
|
erofs_xattr_handler(entry->e_name_index);
|
|
|
|
|
2019-03-22 05:38:16 +03:00
|
|
|
if (!h || (h->list && !h->list(it->dentry)))
|
2018-07-26 15:21:52 +03:00
|
|
|
return 1;
|
|
|
|
|
2019-01-29 11:35:19 +03:00
|
|
|
prefix = xattr_prefix(h);
|
2018-07-26 15:21:52 +03:00
|
|
|
prefix_len = strlen(prefix);
|
|
|
|
|
2019-03-22 05:38:16 +03:00
|
|
|
if (!it->buffer) {
|
2018-07-26 15:21:52 +03:00
|
|
|
it->buffer_ofs += prefix_len + entry->e_name_len + 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (it->buffer_ofs + prefix_len
|
|
|
|
+ entry->e_name_len + 1 > it->buffer_size)
|
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
|
|
|
|
it->buffer_ofs += prefix_len;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xattr_namelist(struct xattr_iter *_it,
|
2019-03-19 02:58:41 +03:00
|
|
|
unsigned int processed, char *buf, unsigned int len)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
|
|
|
struct listxattr_iter *it =
|
|
|
|
container_of(_it, struct listxattr_iter, it);
|
|
|
|
|
|
|
|
memcpy(it->buffer + it->buffer_ofs, buf, len);
|
|
|
|
it->buffer_ofs += len;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xattr_skipvalue(struct xattr_iter *_it,
|
2019-03-19 02:58:41 +03:00
|
|
|
unsigned int value_sz)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
|
|
|
struct listxattr_iter *it =
|
|
|
|
container_of(_it, struct listxattr_iter, it);
|
|
|
|
|
|
|
|
it->buffer[it->buffer_ofs++] = '\0';
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-08-21 17:49:31 +03:00
|
|
|
static const struct xattr_iter_handlers list_xattr_handlers = {
|
2018-07-26 15:21:52 +03:00
|
|
|
.entry = xattr_entrylist,
|
|
|
|
.name = xattr_namelist,
|
|
|
|
.alloc_buffer = xattr_skipvalue,
|
|
|
|
.value = NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static int inline_listxattr(struct listxattr_iter *it)
|
|
|
|
{
|
|
|
|
int ret;
|
2018-09-10 22:41:14 +03:00
|
|
|
unsigned int remaining;
|
2018-07-26 15:21:52 +03:00
|
|
|
|
|
|
|
ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
remaining = ret;
|
|
|
|
while (remaining) {
|
2018-08-05 18:21:01 +03:00
|
|
|
ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
|
2018-09-19 08:49:10 +03:00
|
|
|
if (ret)
|
2018-07-26 15:21:52 +03:00
|
|
|
break;
|
|
|
|
}
|
2018-08-21 17:49:31 +03:00
|
|
|
xattr_iter_end_final(&it->it);
|
2018-09-19 08:49:10 +03:00
|
|
|
return ret ? ret : it->buffer_ofs;
|
2018-07-26 15:21:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int shared_listxattr(struct listxattr_iter *it)
|
|
|
|
{
|
|
|
|
struct inode *const inode = d_inode(it->dentry);
|
2019-09-04 05:08:56 +03:00
|
|
|
struct erofs_inode *const vi = EROFS_I(inode);
|
2018-08-21 17:49:30 +03:00
|
|
|
struct super_block *const sb = inode->i_sb;
|
|
|
|
struct erofs_sb_info *const sbi = EROFS_SB(sb);
|
2018-09-10 22:41:14 +03:00
|
|
|
unsigned int i;
|
2018-07-26 15:21:52 +03:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < vi->xattr_shared_count; ++i) {
|
|
|
|
erofs_blk_t blkaddr =
|
|
|
|
xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
|
|
|
|
|
|
|
|
it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
|
|
|
|
if (!i || blkaddr != it->it.blkaddr) {
|
|
|
|
if (i)
|
|
|
|
xattr_iter_end(&it->it, true);
|
|
|
|
|
2019-09-04 05:09:03 +03:00
|
|
|
it->it.page = erofs_get_meta_page(sb, blkaddr);
|
2018-08-21 17:49:31 +03:00
|
|
|
if (IS_ERR(it->it.page))
|
|
|
|
return PTR_ERR(it->it.page);
|
|
|
|
|
2018-07-26 15:21:52 +03:00
|
|
|
it->it.kaddr = kmap_atomic(it->it.page);
|
|
|
|
it->it.blkaddr = blkaddr;
|
|
|
|
}
|
|
|
|
|
2018-08-05 18:21:01 +03:00
|
|
|
ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
|
2018-09-19 08:49:10 +03:00
|
|
|
if (ret)
|
2018-07-26 15:21:52 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (vi->xattr_shared_count)
|
2018-08-21 17:49:31 +03:00
|
|
|
xattr_iter_end_final(&it->it);
|
2018-07-26 15:21:52 +03:00
|
|
|
|
2018-09-19 08:49:10 +03:00
|
|
|
return ret ? ret : it->buffer_ofs;
|
2018-07-26 15:21:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t erofs_listxattr(struct dentry *dentry,
|
2019-03-19 02:58:41 +03:00
|
|
|
char *buffer, size_t buffer_size)
|
2018-07-26 15:21:52 +03:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct listxattr_iter it;
|
|
|
|
|
2018-08-21 17:49:31 +03:00
|
|
|
ret = init_inode_xattrs(d_inode(dentry));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-07-26 15:21:52 +03:00
|
|
|
|
|
|
|
it.dentry = dentry;
|
|
|
|
it.buffer = buffer;
|
|
|
|
it.buffer_size = buffer_size;
|
|
|
|
it.buffer_ofs = 0;
|
|
|
|
|
|
|
|
it.it.sb = dentry->d_sb;
|
|
|
|
|
|
|
|
ret = inline_listxattr(&it);
|
|
|
|
if (ret < 0 && ret != -ENOATTR)
|
|
|
|
return ret;
|
|
|
|
return shared_listxattr(&it);
|
|
|
|
}
|
|
|
|
|
2019-01-29 11:35:20 +03:00
|
|
|
#ifdef CONFIG_EROFS_FS_POSIX_ACL
|
|
|
|
struct posix_acl *erofs_get_acl(struct inode *inode, int type)
|
|
|
|
{
|
|
|
|
struct posix_acl *acl;
|
|
|
|
int prefix, rc;
|
|
|
|
char *value = NULL;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case ACL_TYPE_ACCESS:
|
|
|
|
prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
|
|
|
|
break;
|
|
|
|
case ACL_TYPE_DEFAULT:
|
|
|
|
prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = erofs_getxattr(inode, prefix, "", NULL, 0);
|
|
|
|
if (rc > 0) {
|
|
|
|
value = kmalloc(rc, GFP_KERNEL);
|
|
|
|
if (!value)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
rc = erofs_getxattr(inode, prefix, "", value, rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc == -ENOATTR)
|
|
|
|
acl = NULL;
|
|
|
|
else if (rc < 0)
|
|
|
|
acl = ERR_PTR(rc);
|
|
|
|
else
|
|
|
|
acl = posix_acl_from_xattr(&init_user_ns, value, rc);
|
|
|
|
kfree(value);
|
|
|
|
return acl;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|