exofs: Move all operations to an io_engine
In anticipation for multi-device operations, we separate osd operations into an abstract I/O API. Currently only one device is used but later when adding more devices, we will drive all devices in parallel according to a "data_map" that describes how data is arranged on multiple devices. The file system level operates, like before, as if there is one object (inode-number) and an i_size. The io engine will split this to the same object-number but on multiple device. At first we introduce Mirror (raid 1) layout. But at the final outcome we intend to fully implement the pNFS-Objects data-map, including raid 0,4,5,6 over mirrored devices, over multiple device-groups. And more. See: http://tools.ietf.org/html/draft-ietf-nfsv4-pnfs-obj-12 * Define an io_state based API for accessing osd storage devices in an abstract way. Usage: First a caller allocates an io state with: exofs_get_io_state(struct exofs_sb_info *sbi, struct exofs_io_state** ios); Then calles one of: exofs_sbi_create(struct exofs_io_state *ios); exofs_sbi_remove(struct exofs_io_state *ios); exofs_sbi_write(struct exofs_io_state *ios); exofs_sbi_read(struct exofs_io_state *ios); exofs_oi_truncate(struct exofs_i_info *oi, u64 new_len); And when done exofs_put_io_state(struct exofs_io_state *ios); * Convert all source files to use this new API * Convert from bio_alloc to bio_kmalloc * In io engine we make use of the now fixed osd_req_decode_sense There are no functional changes or on disk additions after this patch. Signed-off-by: Boaz Harrosh <bharrosh@panasas.com>
This commit is contained in:
Родитель
8ce9bdd1fb
Коммит
06886a5a3d
|
@ -155,22 +155,4 @@ enum {
|
|||
(((name_len) + offsetof(struct exofs_dir_entry, name) + \
|
||||
EXOFS_DIR_ROUND) & ~EXOFS_DIR_ROUND)
|
||||
|
||||
/*************************
|
||||
* function declarations *
|
||||
*************************/
|
||||
/* osd.c */
|
||||
void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
|
||||
const struct osd_obj_id *obj);
|
||||
|
||||
int exofs_check_ok_resid(struct osd_request *or, u64 *in_resid, u64 *out_resid);
|
||||
static inline int exofs_check_ok(struct osd_request *or)
|
||||
{
|
||||
return exofs_check_ok_resid(or, NULL, NULL);
|
||||
}
|
||||
int exofs_sync_op(struct osd_request *or, int timeout, u8 *cred);
|
||||
int exofs_async_op(struct osd_request *or,
|
||||
osd_req_done_fn *async_done, void *caller_context, u8 *cred);
|
||||
|
||||
int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr);
|
||||
|
||||
#endif /*ifndef __EXOFS_COM_H__*/
|
||||
|
|
|
@ -30,14 +30,13 @@
|
|||
* along with exofs; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __EXOFS_H__
|
||||
#define __EXOFS_H__
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/time.h>
|
||||
#include "common.h"
|
||||
|
||||
#ifndef __EXOFS_H__
|
||||
#define __EXOFS_H__
|
||||
|
||||
#define EXOFS_ERR(fmt, a...) printk(KERN_ERR "exofs: " fmt, ##a)
|
||||
|
||||
#ifdef CONFIG_EXOFS_DEBUG
|
||||
|
@ -56,6 +55,7 @@
|
|||
*/
|
||||
struct exofs_sb_info {
|
||||
struct osd_dev *s_dev; /* returned by get_osd_dev */
|
||||
struct exofs_fscb s_fscb; /* Written often, pre-allocate*/
|
||||
osd_id s_pid; /* partition ID of file system*/
|
||||
int s_timeout; /* timeout for OSD operations */
|
||||
uint64_t s_nextid; /* highest object ID used */
|
||||
|
@ -79,6 +79,50 @@ struct exofs_i_info {
|
|||
struct inode vfs_inode; /* normal in-memory inode */
|
||||
};
|
||||
|
||||
static inline osd_id exofs_oi_objno(struct exofs_i_info *oi)
|
||||
{
|
||||
return oi->vfs_inode.i_ino + EXOFS_OBJ_OFF;
|
||||
}
|
||||
|
||||
struct exofs_io_state;
|
||||
typedef void (*exofs_io_done_fn)(struct exofs_io_state *or, void *private);
|
||||
|
||||
struct exofs_io_state {
|
||||
struct kref kref;
|
||||
|
||||
void *private;
|
||||
exofs_io_done_fn done;
|
||||
|
||||
struct exofs_sb_info *sbi;
|
||||
struct osd_obj_id obj;
|
||||
u8 *cred;
|
||||
|
||||
/* Global read/write IO*/
|
||||
loff_t offset;
|
||||
unsigned long length;
|
||||
void *kern_buff;
|
||||
struct bio *bio;
|
||||
|
||||
/* Attributes */
|
||||
unsigned in_attr_len;
|
||||
struct osd_attr *in_attr;
|
||||
unsigned out_attr_len;
|
||||
struct osd_attr *out_attr;
|
||||
|
||||
/* Variable array of size numdevs */
|
||||
unsigned numdevs;
|
||||
struct exofs_per_dev_state {
|
||||
struct osd_request *or;
|
||||
struct bio *bio;
|
||||
} per_dev[];
|
||||
};
|
||||
|
||||
static inline unsigned exofs_io_state_size(unsigned numdevs)
|
||||
{
|
||||
return sizeof(struct exofs_io_state) +
|
||||
sizeof(struct exofs_per_dev_state) * numdevs;
|
||||
}
|
||||
|
||||
/*
|
||||
* our inode flags
|
||||
*/
|
||||
|
@ -130,6 +174,42 @@ static inline struct exofs_i_info *exofs_i(struct inode *inode)
|
|||
/*************************
|
||||
* function declarations *
|
||||
*************************/
|
||||
|
||||
/* ios.c */
|
||||
void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
|
||||
const struct osd_obj_id *obj);
|
||||
int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
|
||||
u64 offset, void *p, unsigned length);
|
||||
|
||||
int exofs_get_io_state(struct exofs_sb_info *sbi, struct exofs_io_state** ios);
|
||||
void exofs_put_io_state(struct exofs_io_state *ios);
|
||||
|
||||
int exofs_check_io(struct exofs_io_state *ios, u64 *resid);
|
||||
|
||||
int exofs_sbi_create(struct exofs_io_state *ios);
|
||||
int exofs_sbi_remove(struct exofs_io_state *ios);
|
||||
int exofs_sbi_write(struct exofs_io_state *ios);
|
||||
int exofs_sbi_read(struct exofs_io_state *ios);
|
||||
|
||||
int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr);
|
||||
|
||||
int exofs_oi_truncate(struct exofs_i_info *oi, u64 new_len);
|
||||
static inline int exofs_oi_write(struct exofs_i_info *oi,
|
||||
struct exofs_io_state *ios)
|
||||
{
|
||||
ios->obj.id = exofs_oi_objno(oi);
|
||||
ios->cred = oi->i_cred;
|
||||
return exofs_sbi_write(ios);
|
||||
}
|
||||
|
||||
static inline int exofs_oi_read(struct exofs_i_info *oi,
|
||||
struct exofs_io_state *ios)
|
||||
{
|
||||
ios->obj.id = exofs_oi_objno(oi);
|
||||
ios->cred = oi->i_cred;
|
||||
return exofs_sbi_read(ios);
|
||||
}
|
||||
|
||||
/* inode.c */
|
||||
void exofs_truncate(struct inode *inode);
|
||||
int exofs_setattr(struct dentry *, struct iattr *);
|
||||
|
@ -169,6 +249,7 @@ extern const struct file_operations exofs_file_operations;
|
|||
|
||||
/* inode.c */
|
||||
extern const struct address_space_operations exofs_aops;
|
||||
extern const struct osd_attr g_attr_logical_length;
|
||||
|
||||
/* namei.c */
|
||||
extern const struct inode_operations exofs_dir_inode_operations;
|
||||
|
|
383
fs/exofs/inode.c
383
fs/exofs/inode.c
|
@ -37,17 +37,18 @@
|
|||
|
||||
#include "exofs.h"
|
||||
|
||||
#ifdef CONFIG_EXOFS_DEBUG
|
||||
# define EXOFS_DEBUG_OBJ_ISIZE 1
|
||||
#endif
|
||||
|
||||
#define EXOFS_DBGMSG2(M...) do {} while (0)
|
||||
|
||||
enum { BIO_MAX_PAGES_KMALLOC =
|
||||
(PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
|
||||
};
|
||||
|
||||
struct page_collect {
|
||||
struct exofs_sb_info *sbi;
|
||||
struct request_queue *req_q;
|
||||
struct inode *inode;
|
||||
unsigned expected_pages;
|
||||
struct exofs_io_state *ios;
|
||||
|
||||
struct bio *bio;
|
||||
unsigned nr_pages;
|
||||
|
@ -56,7 +57,7 @@ struct page_collect {
|
|||
};
|
||||
|
||||
static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
|
||||
struct inode *inode)
|
||||
struct inode *inode)
|
||||
{
|
||||
struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
|
||||
|
||||
|
@ -65,13 +66,11 @@ static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
|
|||
pcol->inode = inode;
|
||||
pcol->expected_pages = expected_pages;
|
||||
|
||||
pcol->ios = NULL;
|
||||
pcol->bio = NULL;
|
||||
pcol->nr_pages = 0;
|
||||
pcol->length = 0;
|
||||
pcol->pg_first = -1;
|
||||
|
||||
EXOFS_DBGMSG("_pcol_init ino=0x%lx expected_pages=%u\n", inode->i_ino,
|
||||
expected_pages);
|
||||
}
|
||||
|
||||
static void _pcol_reset(struct page_collect *pcol)
|
||||
|
@ -82,35 +81,49 @@ static void _pcol_reset(struct page_collect *pcol)
|
|||
pcol->nr_pages = 0;
|
||||
pcol->length = 0;
|
||||
pcol->pg_first = -1;
|
||||
EXOFS_DBGMSG("_pcol_reset ino=0x%lx expected_pages=%u\n",
|
||||
pcol->inode->i_ino, pcol->expected_pages);
|
||||
pcol->ios = NULL;
|
||||
|
||||
/* this is probably the end of the loop but in writes
|
||||
* it might not end here. don't be left with nothing
|
||||
*/
|
||||
if (!pcol->expected_pages)
|
||||
pcol->expected_pages = 128;
|
||||
pcol->expected_pages = BIO_MAX_PAGES_KMALLOC;
|
||||
}
|
||||
|
||||
static int pcol_try_alloc(struct page_collect *pcol)
|
||||
{
|
||||
int pages = min_t(unsigned, pcol->expected_pages, BIO_MAX_PAGES);
|
||||
int pages = min_t(unsigned, pcol->expected_pages,
|
||||
BIO_MAX_PAGES_KMALLOC);
|
||||
|
||||
if (!pcol->ios) { /* First time allocate io_state */
|
||||
int ret = exofs_get_io_state(pcol->sbi, &pcol->ios);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (; pages; pages >>= 1) {
|
||||
pcol->bio = bio_alloc(GFP_KERNEL, pages);
|
||||
pcol->bio = bio_kmalloc(GFP_KERNEL, pages);
|
||||
if (likely(pcol->bio))
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXOFS_ERR("Failed to kcalloc expected_pages=%u\n",
|
||||
EXOFS_ERR("Failed to bio_kmalloc expected_pages=%u\n",
|
||||
pcol->expected_pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void pcol_free(struct page_collect *pcol)
|
||||
{
|
||||
bio_put(pcol->bio);
|
||||
pcol->bio = NULL;
|
||||
if (pcol->bio) {
|
||||
bio_put(pcol->bio);
|
||||
pcol->bio = NULL;
|
||||
}
|
||||
|
||||
if (pcol->ios) {
|
||||
exofs_put_io_state(pcol->ios);
|
||||
pcol->ios = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int pcol_add_page(struct page_collect *pcol, struct page *page,
|
||||
|
@ -163,22 +176,17 @@ static void update_write_page(struct page *page, int ret)
|
|||
/* Called at the end of reads, to optionally unlock pages and update their
|
||||
* status.
|
||||
*/
|
||||
static int __readpages_done(struct osd_request *or, struct page_collect *pcol,
|
||||
bool do_unlock)
|
||||
static int __readpages_done(struct page_collect *pcol, bool do_unlock)
|
||||
{
|
||||
struct bio_vec *bvec;
|
||||
int i;
|
||||
u64 resid;
|
||||
u64 good_bytes;
|
||||
u64 length = 0;
|
||||
int ret = exofs_check_ok_resid(or, &resid, NULL);
|
||||
|
||||
osd_end_request(or);
|
||||
int ret = exofs_check_io(pcol->ios, &resid);
|
||||
|
||||
if (likely(!ret))
|
||||
good_bytes = pcol->length;
|
||||
else if (!resid)
|
||||
good_bytes = 0;
|
||||
else
|
||||
good_bytes = pcol->length - resid;
|
||||
|
||||
|
@ -216,13 +224,13 @@ static int __readpages_done(struct osd_request *or, struct page_collect *pcol,
|
|||
}
|
||||
|
||||
/* callback of async reads */
|
||||
static void readpages_done(struct osd_request *or, void *p)
|
||||
static void readpages_done(struct exofs_io_state *ios, void *p)
|
||||
{
|
||||
struct page_collect *pcol = p;
|
||||
|
||||
__readpages_done(or, pcol, true);
|
||||
__readpages_done(pcol, true);
|
||||
atomic_dec(&pcol->sbi->s_curr_pending);
|
||||
kfree(p);
|
||||
kfree(pcol);
|
||||
}
|
||||
|
||||
static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
|
||||
|
@ -240,17 +248,13 @@ static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
|
|||
|
||||
unlock_page(page);
|
||||
}
|
||||
pcol_free(pcol);
|
||||
}
|
||||
|
||||
static int read_exec(struct page_collect *pcol, bool is_sync)
|
||||
{
|
||||
struct exofs_i_info *oi = exofs_i(pcol->inode);
|
||||
struct osd_obj_id obj = {pcol->sbi->s_pid,
|
||||
pcol->inode->i_ino + EXOFS_OBJ_OFF};
|
||||
struct osd_request *or = NULL;
|
||||
struct exofs_io_state *ios = pcol->ios;
|
||||
struct page_collect *pcol_copy = NULL;
|
||||
loff_t i_start = pcol->pg_first << PAGE_CACHE_SHIFT;
|
||||
int ret;
|
||||
|
||||
if (!pcol->bio)
|
||||
|
@ -259,17 +263,13 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
|
|||
/* see comment in _readpage() about sync reads */
|
||||
WARN_ON(is_sync && (pcol->nr_pages != 1));
|
||||
|
||||
or = osd_start_request(pcol->sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
osd_req_read(or, &obj, i_start, pcol->bio, pcol->length);
|
||||
ios->bio = pcol->bio;
|
||||
ios->length = pcol->length;
|
||||
ios->offset = pcol->pg_first << PAGE_CACHE_SHIFT;
|
||||
|
||||
if (is_sync) {
|
||||
exofs_sync_op(or, pcol->sbi->s_timeout, oi->i_cred);
|
||||
return __readpages_done(or, pcol, false);
|
||||
exofs_oi_read(oi, pcol->ios);
|
||||
return __readpages_done(pcol, false);
|
||||
}
|
||||
|
||||
pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
|
||||
|
@ -279,14 +279,16 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
|
|||
}
|
||||
|
||||
*pcol_copy = *pcol;
|
||||
ret = exofs_async_op(or, readpages_done, pcol_copy, oi->i_cred);
|
||||
ios->done = readpages_done;
|
||||
ios->private = pcol_copy;
|
||||
ret = exofs_oi_read(oi, ios);
|
||||
if (unlikely(ret))
|
||||
goto err;
|
||||
|
||||
atomic_inc(&pcol->sbi->s_curr_pending);
|
||||
|
||||
EXOFS_DBGMSG("read_exec obj=0x%llx start=0x%llx length=0x%lx\n",
|
||||
obj.id, _LLU(i_start), pcol->length);
|
||||
ios->obj.id, _LLU(ios->offset), pcol->length);
|
||||
|
||||
/* pages ownership was passed to pcol_copy */
|
||||
_pcol_reset(pcol);
|
||||
|
@ -295,12 +297,10 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
|
|||
err:
|
||||
if (!is_sync)
|
||||
_unlock_pcol_pages(pcol, ret, READ);
|
||||
else /* Pages unlocked by caller in sync mode only free bio */
|
||||
pcol_free(pcol);
|
||||
|
||||
pcol_free(pcol);
|
||||
|
||||
kfree(pcol_copy);
|
||||
if (or)
|
||||
osd_end_request(or);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -421,9 +421,8 @@ static int _readpage(struct page *page, bool is_sync)
|
|||
|
||||
_pcol_init(&pcol, 1, page->mapping->host);
|
||||
|
||||
/* readpage_strip might call read_exec(,async) inside at several places
|
||||
* but this is safe for is_async=0 since read_exec will not do anything
|
||||
* when we have a single page.
|
||||
/* readpage_strip might call read_exec(,is_sync==false) at several
|
||||
* places but not if we have a single page.
|
||||
*/
|
||||
ret = readpage_strip(&pcol, page);
|
||||
if (ret) {
|
||||
|
@ -442,8 +441,8 @@ static int exofs_readpage(struct file *file, struct page *page)
|
|||
return _readpage(page, false);
|
||||
}
|
||||
|
||||
/* Callback for osd_write. All writes are asynchronouse */
|
||||
static void writepages_done(struct osd_request *or, void *p)
|
||||
/* Callback for osd_write. All writes are asynchronous */
|
||||
static void writepages_done(struct exofs_io_state *ios, void *p)
|
||||
{
|
||||
struct page_collect *pcol = p;
|
||||
struct bio_vec *bvec;
|
||||
|
@ -451,16 +450,12 @@ static void writepages_done(struct osd_request *or, void *p)
|
|||
u64 resid;
|
||||
u64 good_bytes;
|
||||
u64 length = 0;
|
||||
int ret = exofs_check_io(ios, &resid);
|
||||
|
||||
int ret = exofs_check_ok_resid(or, NULL, &resid);
|
||||
|
||||
osd_end_request(or);
|
||||
atomic_dec(&pcol->sbi->s_curr_pending);
|
||||
|
||||
if (likely(!ret))
|
||||
good_bytes = pcol->length;
|
||||
else if (!resid)
|
||||
good_bytes = 0;
|
||||
else
|
||||
good_bytes = pcol->length - resid;
|
||||
|
||||
|
@ -498,23 +493,13 @@ static void writepages_done(struct osd_request *or, void *p)
|
|||
static int write_exec(struct page_collect *pcol)
|
||||
{
|
||||
struct exofs_i_info *oi = exofs_i(pcol->inode);
|
||||
struct osd_obj_id obj = {pcol->sbi->s_pid,
|
||||
pcol->inode->i_ino + EXOFS_OBJ_OFF};
|
||||
struct osd_request *or = NULL;
|
||||
struct exofs_io_state *ios = pcol->ios;
|
||||
struct page_collect *pcol_copy = NULL;
|
||||
loff_t i_start = pcol->pg_first << PAGE_CACHE_SHIFT;
|
||||
int ret;
|
||||
|
||||
if (!pcol->bio)
|
||||
return 0;
|
||||
|
||||
or = osd_start_request(pcol->sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_ERR("write_exec: Faild to osd_start_request()\n");
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
|
||||
if (!pcol_copy) {
|
||||
EXOFS_ERR("write_exec: Faild to kmalloc(pcol)\n");
|
||||
|
@ -525,16 +510,22 @@ static int write_exec(struct page_collect *pcol)
|
|||
*pcol_copy = *pcol;
|
||||
|
||||
pcol_copy->bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
|
||||
osd_req_write(or, &obj, i_start, pcol_copy->bio, pcol_copy->length);
|
||||
ret = exofs_async_op(or, writepages_done, pcol_copy, oi->i_cred);
|
||||
|
||||
ios->bio = pcol_copy->bio;
|
||||
ios->offset = pcol_copy->pg_first << PAGE_CACHE_SHIFT;
|
||||
ios->length = pcol_copy->length;
|
||||
ios->done = writepages_done;
|
||||
ios->private = pcol_copy;
|
||||
|
||||
ret = exofs_oi_write(oi, ios);
|
||||
if (unlikely(ret)) {
|
||||
EXOFS_ERR("write_exec: exofs_async_op() Faild\n");
|
||||
EXOFS_ERR("write_exec: exofs_oi_write() Faild\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
atomic_inc(&pcol->sbi->s_curr_pending);
|
||||
EXOFS_DBGMSG("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n",
|
||||
pcol->inode->i_ino, pcol->pg_first, _LLU(i_start),
|
||||
pcol->inode->i_ino, pcol->pg_first, _LLU(ios->offset),
|
||||
pcol->length);
|
||||
/* pages ownership was passed to pcol_copy */
|
||||
_pcol_reset(pcol);
|
||||
|
@ -542,9 +533,9 @@ static int write_exec(struct page_collect *pcol)
|
|||
|
||||
err:
|
||||
_unlock_pcol_pages(pcol, ret, WRITE);
|
||||
pcol_free(pcol);
|
||||
kfree(pcol_copy);
|
||||
if (or)
|
||||
osd_end_request(or);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -588,6 +579,9 @@ static int writepage_strip(struct page *page,
|
|||
if (PageError(page))
|
||||
ClearPageError(page);
|
||||
unlock_page(page);
|
||||
EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
|
||||
"outside the limits\n",
|
||||
inode->i_ino, page->index);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -602,6 +596,9 @@ try_again:
|
|||
ret = write_exec(pcol);
|
||||
if (unlikely(ret))
|
||||
goto fail;
|
||||
|
||||
EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
|
||||
inode->i_ino, page->index);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
|
@ -636,6 +633,8 @@ try_again:
|
|||
return 0;
|
||||
|
||||
fail:
|
||||
EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
|
||||
inode->i_ino, page->index, ret);
|
||||
set_bit(AS_EIO, &page->mapping->flags);
|
||||
unlock_page(page);
|
||||
return ret;
|
||||
|
@ -654,14 +653,17 @@ static int exofs_writepages(struct address_space *mapping,
|
|||
wbc->range_end >> PAGE_CACHE_SHIFT;
|
||||
|
||||
if (start || end)
|
||||
expected_pages = min(end - start + 1, 32L);
|
||||
expected_pages = end - start + 1;
|
||||
else
|
||||
expected_pages = mapping->nrpages;
|
||||
|
||||
EXOFS_DBGMSG("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx"
|
||||
" m->nrpages=%lu start=0x%lx end=0x%lx\n",
|
||||
if (expected_pages < 32L)
|
||||
expected_pages = 32L;
|
||||
|
||||
EXOFS_DBGMSG("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
|
||||
"nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
|
||||
mapping->host->i_ino, wbc->range_start, wbc->range_end,
|
||||
mapping->nrpages, start, end);
|
||||
mapping->nrpages, start, end, expected_pages);
|
||||
|
||||
_pcol_init(&pcol, expected_pages, mapping->host);
|
||||
|
||||
|
@ -773,19 +775,28 @@ static int exofs_get_block(struct inode *inode, sector_t iblock,
|
|||
const struct osd_attr g_attr_logical_length = ATTR_DEF(
|
||||
OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
|
||||
|
||||
static int _do_truncate(struct inode *inode)
|
||||
{
|
||||
struct exofs_i_info *oi = exofs_i(inode);
|
||||
loff_t isize = i_size_read(inode);
|
||||
int ret;
|
||||
|
||||
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
|
||||
nobh_truncate_page(inode->i_mapping, isize, exofs_get_block);
|
||||
|
||||
ret = exofs_oi_truncate(oi, (u64)isize);
|
||||
EXOFS_DBGMSG("(0x%lx) size=0x%llx\n", inode->i_ino, isize);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Truncate a file to the specified size - all we have to do is set the size
|
||||
* attribute. We make sure the object exists first.
|
||||
*/
|
||||
void exofs_truncate(struct inode *inode)
|
||||
{
|
||||
struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
|
||||
struct exofs_i_info *oi = exofs_i(inode);
|
||||
struct osd_obj_id obj = {sbi->s_pid, inode->i_ino + EXOFS_OBJ_OFF};
|
||||
struct osd_request *or;
|
||||
struct osd_attr attr;
|
||||
loff_t isize = i_size_read(inode);
|
||||
__be64 newsize;
|
||||
int ret;
|
||||
|
||||
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
|
||||
|
@ -795,22 +806,6 @@ void exofs_truncate(struct inode *inode)
|
|||
return;
|
||||
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
|
||||
return;
|
||||
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
|
||||
nobh_truncate_page(inode->i_mapping, isize, exofs_get_block);
|
||||
|
||||
or = osd_start_request(sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_ERR("ERROR: exofs_truncate: osd_start_request failed\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
osd_req_set_attributes(or, &obj);
|
||||
|
||||
newsize = cpu_to_be64((u64)isize);
|
||||
attr = g_attr_logical_length;
|
||||
attr.val_ptr = &newsize;
|
||||
osd_req_add_set_attr_list(or, &attr, 1);
|
||||
|
||||
/* if we are about to truncate an object, and it hasn't been
|
||||
* created yet, wait
|
||||
|
@ -818,8 +813,7 @@ void exofs_truncate(struct inode *inode)
|
|||
if (unlikely(wait_obj_created(oi)))
|
||||
goto fail;
|
||||
|
||||
ret = exofs_sync_op(or, sbi->s_timeout, oi->i_cred);
|
||||
osd_end_request(or);
|
||||
ret = _do_truncate(inode);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
|
@ -849,66 +843,57 @@ int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
|
|||
|
||||
/*
|
||||
* Read an inode from the OSD, and return it as is. We also return the size
|
||||
* attribute in the 'sanity' argument if we got compiled with debugging turned
|
||||
* on.
|
||||
* attribute in the 'obj_size' argument.
|
||||
*/
|
||||
static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
|
||||
struct exofs_fcb *inode, uint64_t *sanity)
|
||||
struct exofs_fcb *inode, uint64_t *obj_size)
|
||||
{
|
||||
struct exofs_sb_info *sbi = sb->s_fs_info;
|
||||
struct osd_request *or;
|
||||
struct osd_attr attr;
|
||||
struct osd_obj_id obj = {sbi->s_pid,
|
||||
oi->vfs_inode.i_ino + EXOFS_OBJ_OFF};
|
||||
struct osd_attr attrs[2];
|
||||
struct exofs_io_state *ios;
|
||||
int ret;
|
||||
|
||||
exofs_make_credential(oi->i_cred, &obj);
|
||||
|
||||
or = osd_start_request(sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_ERR("exofs_get_inode: osd_start_request failed.\n");
|
||||
return -ENOMEM;
|
||||
*obj_size = ~0;
|
||||
ret = exofs_get_io_state(sbi, &ios);
|
||||
if (unlikely(ret)) {
|
||||
EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
osd_req_get_attributes(or, &obj);
|
||||
|
||||
/* we need the inode attribute */
|
||||
osd_req_add_get_attr_list(or, &g_attr_inode_data, 1);
|
||||
ios->obj.id = exofs_oi_objno(oi);
|
||||
exofs_make_credential(oi->i_cred, &ios->obj);
|
||||
ios->cred = oi->i_cred;
|
||||
|
||||
#ifdef EXOFS_DEBUG_OBJ_ISIZE
|
||||
/* we get the size attributes to do a sanity check */
|
||||
osd_req_add_get_attr_list(or, &g_attr_logical_length, 1);
|
||||
#endif
|
||||
attrs[0] = g_attr_inode_data;
|
||||
attrs[1] = g_attr_logical_length;
|
||||
ios->in_attr = attrs;
|
||||
ios->in_attr_len = ARRAY_SIZE(attrs);
|
||||
|
||||
ret = exofs_sync_op(or, sbi->s_timeout, oi->i_cred);
|
||||
ret = exofs_sbi_read(ios);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
attr = g_attr_inode_data;
|
||||
ret = extract_attr_from_req(or, &attr);
|
||||
ret = extract_attr_from_ios(ios, &attrs[0]);
|
||||
if (ret) {
|
||||
EXOFS_ERR("exofs_get_inode: extract_attr_from_req failed\n");
|
||||
EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
|
||||
memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
|
||||
|
||||
WARN_ON(attr.len != EXOFS_INO_ATTR_SIZE);
|
||||
memcpy(inode, attr.val_ptr, EXOFS_INO_ATTR_SIZE);
|
||||
|
||||
#ifdef EXOFS_DEBUG_OBJ_ISIZE
|
||||
attr = g_attr_logical_length;
|
||||
ret = extract_attr_from_req(or, &attr);
|
||||
ret = extract_attr_from_ios(ios, &attrs[1]);
|
||||
if (ret) {
|
||||
EXOFS_ERR("ERROR: extract attr from or failed\n");
|
||||
EXOFS_ERR("%s: extract_attr of logical_length failed\n",
|
||||
__func__);
|
||||
goto out;
|
||||
}
|
||||
*sanity = get_unaligned_be64(attr.val_ptr);
|
||||
#endif
|
||||
*obj_size = get_unaligned_be64(attrs[1].val_ptr);
|
||||
|
||||
out:
|
||||
osd_end_request(or);
|
||||
exofs_put_io_state(ios);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void __oi_init(struct exofs_i_info *oi)
|
||||
{
|
||||
init_waitqueue_head(&oi->i_wq);
|
||||
|
@ -922,7 +907,7 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
|
|||
struct exofs_i_info *oi;
|
||||
struct exofs_fcb fcb;
|
||||
struct inode *inode;
|
||||
uint64_t uninitialized_var(sanity);
|
||||
uint64_t obj_size;
|
||||
int ret;
|
||||
|
||||
inode = iget_locked(sb, ino);
|
||||
|
@ -934,7 +919,7 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
|
|||
__oi_init(oi);
|
||||
|
||||
/* read the inode from the osd */
|
||||
ret = exofs_get_inode(sb, oi, &fcb, &sanity);
|
||||
ret = exofs_get_inode(sb, oi, &fcb, &obj_size);
|
||||
if (ret)
|
||||
goto bad_inode;
|
||||
|
||||
|
@ -955,13 +940,12 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
|
|||
inode->i_blkbits = EXOFS_BLKSHIFT;
|
||||
inode->i_generation = le32_to_cpu(fcb.i_generation);
|
||||
|
||||
#ifdef EXOFS_DEBUG_OBJ_ISIZE
|
||||
if ((inode->i_size != sanity) &&
|
||||
if ((inode->i_size != obj_size) &&
|
||||
(!exofs_inode_is_fast_symlink(inode))) {
|
||||
EXOFS_ERR("WARNING: Size of inode=%llu != object=%llu\n",
|
||||
inode->i_size, _LLU(sanity));
|
||||
inode->i_size, _LLU(obj_size));
|
||||
/* FIXME: call exofs_inode_recovery() */
|
||||
}
|
||||
#endif
|
||||
|
||||
oi->i_dir_start_lookup = 0;
|
||||
|
||||
|
@ -1027,23 +1011,30 @@ int __exofs_wait_obj_created(struct exofs_i_info *oi)
|
|||
* set the obj_created flag so that other methods know that the object exists on
|
||||
* the OSD.
|
||||
*/
|
||||
static void create_done(struct osd_request *or, void *p)
|
||||
static void create_done(struct exofs_io_state *ios, void *p)
|
||||
{
|
||||
struct inode *inode = p;
|
||||
struct exofs_i_info *oi = exofs_i(inode);
|
||||
struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
|
||||
int ret;
|
||||
|
||||
ret = exofs_check_ok(or);
|
||||
osd_end_request(or);
|
||||
ret = exofs_check_io(ios, NULL);
|
||||
exofs_put_io_state(ios);
|
||||
|
||||
atomic_dec(&sbi->s_curr_pending);
|
||||
|
||||
if (unlikely(ret)) {
|
||||
EXOFS_ERR("object=0x%llx creation faild in pid=0x%llx",
|
||||
_LLU(sbi->s_pid), _LLU(inode->i_ino + EXOFS_OBJ_OFF));
|
||||
make_bad_inode(inode);
|
||||
} else
|
||||
set_obj_created(oi);
|
||||
_LLU(exofs_oi_objno(oi)), _LLU(sbi->s_pid));
|
||||
/*TODO: When FS is corrupted creation can fail, object already
|
||||
* exist. Get rid of this asynchronous creation, if exist
|
||||
* increment the obj counter and try the next object. Until we
|
||||
* succeed. All these dangling objects will be made into lost
|
||||
* files by chkfs.exofs
|
||||
*/
|
||||
}
|
||||
|
||||
set_obj_created(oi);
|
||||
|
||||
atomic_dec(&inode->i_count);
|
||||
wake_up(&oi->i_wq);
|
||||
|
@ -1058,8 +1049,7 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
|
|||
struct inode *inode;
|
||||
struct exofs_i_info *oi;
|
||||
struct exofs_sb_info *sbi;
|
||||
struct osd_request *or;
|
||||
struct osd_obj_id obj;
|
||||
struct exofs_io_state *ios;
|
||||
int ret;
|
||||
|
||||
sb = dir->i_sb;
|
||||
|
@ -1096,28 +1086,28 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
|
|||
|
||||
mark_inode_dirty(inode);
|
||||
|
||||
obj.partition = sbi->s_pid;
|
||||
obj.id = inode->i_ino + EXOFS_OBJ_OFF;
|
||||
exofs_make_credential(oi->i_cred, &obj);
|
||||
|
||||
or = osd_start_request(sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_ERR("exofs_new_inode: osd_start_request failed\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ret = exofs_get_io_state(sbi, &ios);
|
||||
if (unlikely(ret)) {
|
||||
EXOFS_ERR("exofs_new_inode: exofs_get_io_state failed\n");
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
osd_req_create_object(or, &obj);
|
||||
ios->obj.id = exofs_oi_objno(oi);
|
||||
exofs_make_credential(oi->i_cred, &ios->obj);
|
||||
|
||||
/* increment the refcount so that the inode will still be around when we
|
||||
* reach the callback
|
||||
*/
|
||||
atomic_inc(&inode->i_count);
|
||||
|
||||
ret = exofs_async_op(or, create_done, inode, oi->i_cred);
|
||||
ios->done = create_done;
|
||||
ios->private = inode;
|
||||
ios->cred = oi->i_cred;
|
||||
ret = exofs_sbi_create(ios);
|
||||
if (ret) {
|
||||
atomic_dec(&inode->i_count);
|
||||
osd_end_request(or);
|
||||
return ERR_PTR(-EIO);
|
||||
exofs_put_io_state(ios);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
atomic_inc(&sbi->s_curr_pending);
|
||||
|
||||
|
@ -1135,11 +1125,11 @@ struct updatei_args {
|
|||
/*
|
||||
* Callback function from exofs_update_inode().
|
||||
*/
|
||||
static void updatei_done(struct osd_request *or, void *p)
|
||||
static void updatei_done(struct exofs_io_state *ios, void *p)
|
||||
{
|
||||
struct updatei_args *args = p;
|
||||
|
||||
osd_end_request(or);
|
||||
exofs_put_io_state(ios);
|
||||
|
||||
atomic_dec(&args->sbi->s_curr_pending);
|
||||
|
||||
|
@ -1155,8 +1145,7 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
|
|||
struct exofs_i_info *oi = exofs_i(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct exofs_sb_info *sbi = sb->s_fs_info;
|
||||
struct osd_obj_id obj = {sbi->s_pid, inode->i_ino + EXOFS_OBJ_OFF};
|
||||
struct osd_request *or;
|
||||
struct exofs_io_state *ios;
|
||||
struct osd_attr attr;
|
||||
struct exofs_fcb *fcb;
|
||||
struct updatei_args *args;
|
||||
|
@ -1193,18 +1182,16 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
|
|||
} else
|
||||
memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
|
||||
|
||||
or = osd_start_request(sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_ERR("exofs_update_inode: osd_start_request failed.\n");
|
||||
ret = -ENOMEM;
|
||||
ret = exofs_get_io_state(sbi, &ios);
|
||||
if (unlikely(ret)) {
|
||||
EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
|
||||
goto free_args;
|
||||
}
|
||||
|
||||
osd_req_set_attributes(or, &obj);
|
||||
|
||||
attr = g_attr_inode_data;
|
||||
attr.val_ptr = fcb;
|
||||
osd_req_add_set_attr_list(or, &attr, 1);
|
||||
ios->out_attr_len = 1;
|
||||
ios->out_attr = &attr;
|
||||
|
||||
if (!obj_created(oi)) {
|
||||
EXOFS_DBGMSG("!obj_created\n");
|
||||
|
@ -1213,22 +1200,19 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
|
|||
EXOFS_DBGMSG("wait_event done\n");
|
||||
}
|
||||
|
||||
if (do_sync) {
|
||||
ret = exofs_sync_op(or, sbi->s_timeout, oi->i_cred);
|
||||
osd_end_request(or);
|
||||
goto free_args;
|
||||
} else {
|
||||
if (!do_sync) {
|
||||
args->sbi = sbi;
|
||||
ios->done = updatei_done;
|
||||
ios->private = args;
|
||||
}
|
||||
|
||||
ret = exofs_async_op(or, updatei_done, args, oi->i_cred);
|
||||
if (ret) {
|
||||
osd_end_request(or);
|
||||
goto free_args;
|
||||
}
|
||||
ret = exofs_oi_write(oi, ios);
|
||||
if (!do_sync && !ret) {
|
||||
atomic_inc(&sbi->s_curr_pending);
|
||||
goto out; /* deallocation in updatei_done */
|
||||
}
|
||||
|
||||
exofs_put_io_state(ios);
|
||||
free_args:
|
||||
kfree(args);
|
||||
out:
|
||||
|
@ -1245,11 +1229,12 @@ int exofs_write_inode(struct inode *inode, int wait)
|
|||
* Callback function from exofs_delete_inode() - don't have much cleaning up to
|
||||
* do.
|
||||
*/
|
||||
static void delete_done(struct osd_request *or, void *p)
|
||||
static void delete_done(struct exofs_io_state *ios, void *p)
|
||||
{
|
||||
struct exofs_sb_info *sbi;
|
||||
osd_end_request(or);
|
||||
sbi = p;
|
||||
struct exofs_sb_info *sbi = p;
|
||||
|
||||
exofs_put_io_state(ios);
|
||||
|
||||
atomic_dec(&sbi->s_curr_pending);
|
||||
}
|
||||
|
||||
|
@ -1263,8 +1248,7 @@ void exofs_delete_inode(struct inode *inode)
|
|||
struct exofs_i_info *oi = exofs_i(inode);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct exofs_sb_info *sbi = sb->s_fs_info;
|
||||
struct osd_obj_id obj = {sbi->s_pid, inode->i_ino + EXOFS_OBJ_OFF};
|
||||
struct osd_request *or;
|
||||
struct exofs_io_state *ios;
|
||||
int ret;
|
||||
|
||||
truncate_inode_pages(&inode->i_data, 0);
|
||||
|
@ -1281,25 +1265,26 @@ void exofs_delete_inode(struct inode *inode)
|
|||
|
||||
clear_inode(inode);
|
||||
|
||||
or = osd_start_request(sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_ERR("exofs_delete_inode: osd_start_request failed\n");
|
||||
ret = exofs_get_io_state(sbi, &ios);
|
||||
if (unlikely(ret)) {
|
||||
EXOFS_ERR("%s: exofs_get_io_state failed\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
osd_req_remove_object(or, &obj);
|
||||
|
||||
/* if we are deleting an obj that hasn't been created yet, wait */
|
||||
if (!obj_created(oi)) {
|
||||
BUG_ON(!obj_2bcreated(oi));
|
||||
wait_event(oi->i_wq, obj_created(oi));
|
||||
}
|
||||
|
||||
ret = exofs_async_op(or, delete_done, sbi, oi->i_cred);
|
||||
ios->obj.id = exofs_oi_objno(oi);
|
||||
ios->done = delete_done;
|
||||
ios->private = sbi;
|
||||
ios->cred = oi->i_cred;
|
||||
ret = exofs_sbi_remove(ios);
|
||||
if (ret) {
|
||||
EXOFS_ERR(
|
||||
"ERROR: @exofs_delete_inode exofs_async_op failed\n");
|
||||
osd_end_request(or);
|
||||
EXOFS_ERR("%s: exofs_sbi_remove failed\n", __func__);
|
||||
exofs_put_io_state(ios);
|
||||
return;
|
||||
}
|
||||
atomic_inc(&sbi->s_curr_pending);
|
||||
|
|
390
fs/exofs/ios.c
390
fs/exofs/ios.c
|
@ -23,88 +23,327 @@
|
|||
*/
|
||||
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/osd_sense.h>
|
||||
|
||||
#include "exofs.h"
|
||||
|
||||
int exofs_check_ok_resid(struct osd_request *or, u64 *in_resid, u64 *out_resid)
|
||||
{
|
||||
struct osd_sense_info osi;
|
||||
int ret = osd_req_decode_sense(or, &osi);
|
||||
|
||||
if (ret) { /* translate to Linux codes */
|
||||
if (osi.additional_code == scsi_invalid_field_in_cdb) {
|
||||
if (osi.cdb_field_offset == OSD_CFO_STARTING_BYTE)
|
||||
ret = -EFAULT;
|
||||
if (osi.cdb_field_offset == OSD_CFO_OBJECT_ID)
|
||||
ret = -ENOENT;
|
||||
else
|
||||
ret = -EINVAL;
|
||||
} else if (osi.additional_code == osd_quota_error)
|
||||
ret = -ENOSPC;
|
||||
else
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
/* FIXME: should be include in osd_sense_info */
|
||||
if (in_resid)
|
||||
*in_resid = or->in.req ? or->in.req->resid_len : 0;
|
||||
|
||||
if (out_resid)
|
||||
*out_resid = or->out.req ? or->out.req->resid_len : 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj)
|
||||
{
|
||||
osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform a synchronous OSD operation.
|
||||
*/
|
||||
int exofs_sync_op(struct osd_request *or, int timeout, uint8_t *credential)
|
||||
int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
|
||||
u64 offset, void *p, unsigned length)
|
||||
{
|
||||
struct osd_request *or = osd_start_request(od, GFP_KERNEL);
|
||||
/* struct osd_sense_info osi = {.key = 0};*/
|
||||
int ret;
|
||||
|
||||
or->timeout = timeout;
|
||||
ret = osd_finalize_request(or, 0, credential, NULL);
|
||||
if (ret) {
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_DBGMSG("%s: osd_start_request failed.\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = osd_req_read_kern(or, obj, offset, p, length);
|
||||
if (unlikely(ret)) {
|
||||
EXOFS_DBGMSG("%s: osd_req_read_kern failed.\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = osd_finalize_request(or, 0, cred, NULL);
|
||||
if (unlikely(ret)) {
|
||||
EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n", ret);
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = osd_execute_request(or);
|
||||
|
||||
if (ret)
|
||||
if (unlikely(ret))
|
||||
EXOFS_DBGMSG("osd_execute_request() => %d\n", ret);
|
||||
/* osd_req_decode_sense(or, ret); */
|
||||
|
||||
out:
|
||||
osd_end_request(or);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform an asynchronous OSD operation.
|
||||
*/
|
||||
int exofs_async_op(struct osd_request *or, osd_req_done_fn *async_done,
|
||||
void *caller_context, u8 *cred)
|
||||
int exofs_get_io_state(struct exofs_sb_info *sbi, struct exofs_io_state** pios)
|
||||
{
|
||||
int ret;
|
||||
struct exofs_io_state *ios;
|
||||
|
||||
ret = osd_finalize_request(or, 0, cred, NULL);
|
||||
if (ret) {
|
||||
EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n", ret);
|
||||
return ret;
|
||||
/*TODO: Maybe use kmem_cach per sbi of size
|
||||
* exofs_io_state_size(sbi->s_numdevs)
|
||||
*/
|
||||
ios = kzalloc(exofs_io_state_size(1), GFP_KERNEL);
|
||||
if (unlikely(!ios)) {
|
||||
*pios = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = osd_execute_request_async(or, async_done, caller_context);
|
||||
ios->sbi = sbi;
|
||||
ios->obj.partition = sbi->s_pid;
|
||||
*pios = ios;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
EXOFS_DBGMSG("osd_execute_request_async() => %d\n", ret);
|
||||
void exofs_put_io_state(struct exofs_io_state *ios)
|
||||
{
|
||||
if (ios) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ios->numdevs; i++) {
|
||||
struct exofs_per_dev_state *per_dev = &ios->per_dev[i];
|
||||
|
||||
if (per_dev->or)
|
||||
osd_end_request(per_dev->or);
|
||||
if (per_dev->bio)
|
||||
bio_put(per_dev->bio);
|
||||
}
|
||||
|
||||
kfree(ios);
|
||||
}
|
||||
}
|
||||
|
||||
static void _sync_done(struct exofs_io_state *ios, void *p)
|
||||
{
|
||||
struct completion *waiting = p;
|
||||
|
||||
complete(waiting);
|
||||
}
|
||||
|
||||
static void _last_io(struct kref *kref)
|
||||
{
|
||||
struct exofs_io_state *ios = container_of(
|
||||
kref, struct exofs_io_state, kref);
|
||||
|
||||
ios->done(ios, ios->private);
|
||||
}
|
||||
|
||||
static void _done_io(struct osd_request *or, void *p)
|
||||
{
|
||||
struct exofs_io_state *ios = p;
|
||||
|
||||
kref_put(&ios->kref, _last_io);
|
||||
}
|
||||
|
||||
static int exofs_io_execute(struct exofs_io_state *ios)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
bool sync = (ios->done == NULL);
|
||||
int i, ret;
|
||||
|
||||
if (sync) {
|
||||
ios->done = _sync_done;
|
||||
ios->private = &wait;
|
||||
}
|
||||
|
||||
for (i = 0; i < ios->numdevs; i++) {
|
||||
struct osd_request *or = ios->per_dev[i].or;
|
||||
if (unlikely(!or))
|
||||
continue;
|
||||
|
||||
ret = osd_finalize_request(or, 0, ios->cred, NULL);
|
||||
if (unlikely(ret)) {
|
||||
EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
kref_init(&ios->kref);
|
||||
|
||||
for (i = 0; i < ios->numdevs; i++) {
|
||||
struct osd_request *or = ios->per_dev[i].or;
|
||||
if (unlikely(!or))
|
||||
continue;
|
||||
|
||||
kref_get(&ios->kref);
|
||||
osd_execute_request_async(or, _done_io, ios);
|
||||
}
|
||||
|
||||
kref_put(&ios->kref, _last_io);
|
||||
ret = 0;
|
||||
|
||||
if (sync) {
|
||||
wait_for_completion(&wait);
|
||||
ret = exofs_check_io(ios, NULL);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr)
|
||||
int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
|
||||
{
|
||||
enum osd_err_priority acumulated_osd_err = 0;
|
||||
int acumulated_lin_err = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ios->numdevs; i++) {
|
||||
struct osd_sense_info osi;
|
||||
int ret = osd_req_decode_sense(ios->per_dev[i].or, &osi);
|
||||
|
||||
if (likely(!ret))
|
||||
continue;
|
||||
|
||||
if (unlikely(ret == -EFAULT)) {
|
||||
EXOFS_DBGMSG("%s: EFAULT Need page clear\n", __func__);
|
||||
/*FIXME: All the pages in this device range should:
|
||||
* clear_highpage(page);
|
||||
*/
|
||||
}
|
||||
|
||||
if (osi.osd_err_pri >= acumulated_osd_err) {
|
||||
acumulated_osd_err = osi.osd_err_pri;
|
||||
acumulated_lin_err = ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: raid specific residual calculations */
|
||||
if (resid) {
|
||||
if (likely(!acumulated_lin_err))
|
||||
*resid = 0;
|
||||
else
|
||||
*resid = ios->length;
|
||||
}
|
||||
|
||||
return acumulated_lin_err;
|
||||
}
|
||||
|
||||
int exofs_sbi_create(struct exofs_io_state *ios)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < 1; i++) {
|
||||
struct osd_request *or;
|
||||
|
||||
or = osd_start_request(ios->sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_ERR("%s: osd_start_request failed\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
ios->per_dev[i].or = or;
|
||||
ios->numdevs++;
|
||||
|
||||
osd_req_create_object(or, &ios->obj);
|
||||
}
|
||||
ret = exofs_io_execute(ios);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int exofs_sbi_remove(struct exofs_io_state *ios)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < 1; i++) {
|
||||
struct osd_request *or;
|
||||
|
||||
or = osd_start_request(ios->sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_ERR("%s: osd_start_request failed\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
ios->per_dev[i].or = or;
|
||||
ios->numdevs++;
|
||||
|
||||
osd_req_remove_object(or, &ios->obj);
|
||||
}
|
||||
ret = exofs_io_execute(ios);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int exofs_sbi_write(struct exofs_io_state *ios)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < 1; i++) {
|
||||
struct osd_request *or;
|
||||
|
||||
or = osd_start_request(ios->sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_ERR("%s: osd_start_request failed\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
ios->per_dev[i].or = or;
|
||||
ios->numdevs++;
|
||||
|
||||
if (ios->bio) {
|
||||
struct bio *bio;
|
||||
|
||||
bio = ios->bio;
|
||||
|
||||
osd_req_write(or, &ios->obj, ios->offset, bio,
|
||||
ios->length);
|
||||
/* EXOFS_DBGMSG("write sync=%d\n", sync);*/
|
||||
} else if (ios->kern_buff) {
|
||||
osd_req_write_kern(or, &ios->obj, ios->offset,
|
||||
ios->kern_buff, ios->length);
|
||||
/* EXOFS_DBGMSG("write_kern sync=%d\n", sync);*/
|
||||
} else {
|
||||
osd_req_set_attributes(or, &ios->obj);
|
||||
/* EXOFS_DBGMSG("set_attributes sync=%d\n", sync);*/
|
||||
}
|
||||
|
||||
if (ios->out_attr)
|
||||
osd_req_add_set_attr_list(or, ios->out_attr,
|
||||
ios->out_attr_len);
|
||||
|
||||
if (ios->in_attr)
|
||||
osd_req_add_get_attr_list(or, ios->in_attr,
|
||||
ios->in_attr_len);
|
||||
}
|
||||
ret = exofs_io_execute(ios);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int exofs_sbi_read(struct exofs_io_state *ios)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < 1; i++) {
|
||||
struct osd_request *or;
|
||||
|
||||
or = osd_start_request(ios->sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_ERR("%s: osd_start_request failed\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
ios->per_dev[i].or = or;
|
||||
ios->numdevs++;
|
||||
|
||||
if (ios->bio) {
|
||||
osd_req_read(or, &ios->obj, ios->offset, ios->bio,
|
||||
ios->length);
|
||||
/* EXOFS_DBGMSG("read sync=%d\n", sync);*/
|
||||
} else if (ios->kern_buff) {
|
||||
osd_req_read_kern(or, &ios->obj, ios->offset,
|
||||
ios->kern_buff, ios->length);
|
||||
/* EXOFS_DBGMSG("read_kern sync=%d\n", sync);*/
|
||||
} else {
|
||||
osd_req_get_attributes(or, &ios->obj);
|
||||
/* EXOFS_DBGMSG("get_attributes sync=%d\n", sync);*/
|
||||
}
|
||||
|
||||
if (ios->out_attr)
|
||||
osd_req_add_set_attr_list(or, ios->out_attr,
|
||||
ios->out_attr_len);
|
||||
|
||||
if (ios->in_attr)
|
||||
osd_req_add_get_attr_list(or, ios->in_attr,
|
||||
ios->in_attr_len);
|
||||
}
|
||||
ret = exofs_io_execute(ios);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr)
|
||||
{
|
||||
struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
|
||||
void *iter = NULL;
|
||||
|
@ -112,7 +351,8 @@ int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr)
|
|||
|
||||
do {
|
||||
nelem = 1;
|
||||
osd_req_decode_get_attr_list(or, &cur_attr, &nelem, &iter);
|
||||
osd_req_decode_get_attr_list(ios->per_dev[0].or,
|
||||
&cur_attr, &nelem, &iter);
|
||||
if ((cur_attr.attr_page == attr->attr_page) &&
|
||||
(cur_attr.attr_id == attr->attr_id)) {
|
||||
attr->len = cur_attr.len;
|
||||
|
@ -123,3 +363,43 @@ int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr)
|
|||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
int exofs_oi_truncate(struct exofs_i_info *oi, u64 size)
|
||||
{
|
||||
struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info;
|
||||
struct exofs_io_state *ios;
|
||||
struct osd_attr attr;
|
||||
__be64 newsize;
|
||||
int i, ret;
|
||||
|
||||
if (exofs_get_io_state(sbi, &ios))
|
||||
return -ENOMEM;
|
||||
|
||||
ios->obj.id = exofs_oi_objno(oi);
|
||||
ios->cred = oi->i_cred;
|
||||
|
||||
newsize = cpu_to_be64(size);
|
||||
attr = g_attr_logical_length;
|
||||
attr.val_ptr = &newsize;
|
||||
|
||||
for (i = 0; i < 1; i++) {
|
||||
struct osd_request *or;
|
||||
|
||||
or = osd_start_request(sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_ERR("%s: osd_start_request failed\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
ios->per_dev[i].or = or;
|
||||
ios->numdevs++;
|
||||
|
||||
osd_req_set_attributes(or, &ios->obj);
|
||||
osd_req_add_set_attr_list(or, &attr, 1);
|
||||
}
|
||||
ret = exofs_io_execute(ios);
|
||||
|
||||
out:
|
||||
exofs_put_io_state(ios);
|
||||
return ret;
|
||||
}
|
||||
|
|
120
fs/exofs/super.c
120
fs/exofs/super.c
|
@ -203,49 +203,40 @@ int exofs_sync_fs(struct super_block *sb, int wait)
|
|||
{
|
||||
struct exofs_sb_info *sbi;
|
||||
struct exofs_fscb *fscb;
|
||||
struct osd_request *or;
|
||||
struct osd_obj_id obj;
|
||||
struct exofs_io_state *ios;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
fscb = kzalloc(sizeof(struct exofs_fscb), GFP_KERNEL);
|
||||
if (!fscb) {
|
||||
EXOFS_ERR("exofs_write_super: memory allocation failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
lock_super(sb);
|
||||
sbi = sb->s_fs_info;
|
||||
fscb = &sbi->s_fscb;
|
||||
|
||||
ret = exofs_get_io_state(sbi, &ios);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ios->length = sizeof(*fscb);
|
||||
memset(fscb, 0, ios->length);
|
||||
fscb->s_nextid = cpu_to_le64(sbi->s_nextid);
|
||||
fscb->s_numfiles = cpu_to_le32(sbi->s_numfiles);
|
||||
fscb->s_magic = cpu_to_le16(sb->s_magic);
|
||||
fscb->s_newfs = 0;
|
||||
|
||||
or = osd_start_request(sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_ERR("exofs_write_super: osd_start_request failed.\n");
|
||||
goto out;
|
||||
}
|
||||
ios->obj.id = EXOFS_SUPER_ID;
|
||||
ios->offset = 0;
|
||||
ios->kern_buff = fscb;
|
||||
ios->cred = sbi->s_cred;
|
||||
|
||||
obj.partition = sbi->s_pid;
|
||||
obj.id = EXOFS_SUPER_ID;
|
||||
ret = osd_req_write_kern(or, &obj, 0, fscb, sizeof(*fscb));
|
||||
ret = exofs_sbi_write(ios);
|
||||
if (unlikely(ret)) {
|
||||
EXOFS_ERR("exofs_write_super: osd_req_write_kern failed.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = exofs_sync_op(or, sbi->s_timeout, sbi->s_cred);
|
||||
if (unlikely(ret)) {
|
||||
EXOFS_ERR("exofs_write_super: exofs_sync_op failed.\n");
|
||||
EXOFS_ERR("%s: exofs_sbi_write failed.\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
sb->s_dirt = 0;
|
||||
|
||||
out:
|
||||
if (or)
|
||||
osd_end_request(or);
|
||||
EXOFS_DBGMSG("s_nextid=0x%llx ret=%d\n", _LLU(sbi->s_nextid), ret);
|
||||
exofs_put_io_state(ios);
|
||||
unlock_super(sb);
|
||||
kfree(fscb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -302,24 +293,23 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
struct inode *root;
|
||||
struct exofs_mountopt *opts = data;
|
||||
struct exofs_sb_info *sbi; /*extended info */
|
||||
struct osd_dev *od; /* Master device */
|
||||
struct exofs_fscb fscb; /*on-disk superblock info */
|
||||
struct osd_request *or = NULL;
|
||||
struct osd_obj_id obj;
|
||||
int ret;
|
||||
|
||||
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
|
||||
if (!sbi)
|
||||
return -ENOMEM;
|
||||
sb->s_fs_info = sbi;
|
||||
|
||||
/* use mount options to fill superblock */
|
||||
sbi->s_dev = osduld_path_lookup(opts->dev_name);
|
||||
if (IS_ERR(sbi->s_dev)) {
|
||||
ret = PTR_ERR(sbi->s_dev);
|
||||
sbi->s_dev = NULL;
|
||||
od = osduld_path_lookup(opts->dev_name);
|
||||
if (IS_ERR(od)) {
|
||||
ret = PTR_ERR(od);
|
||||
goto free_sbi;
|
||||
}
|
||||
|
||||
sbi->s_dev = od;
|
||||
sbi->s_pid = opts->pid;
|
||||
sbi->s_timeout = opts->timeout;
|
||||
|
||||
|
@ -333,35 +323,13 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
sb->s_bdev = NULL;
|
||||
sb->s_dev = 0;
|
||||
|
||||
/* read data from on-disk superblock object */
|
||||
obj.partition = sbi->s_pid;
|
||||
obj.id = EXOFS_SUPER_ID;
|
||||
exofs_make_credential(sbi->s_cred, &obj);
|
||||
|
||||
or = osd_start_request(sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
if (!silent)
|
||||
EXOFS_ERR(
|
||||
"exofs_fill_super: osd_start_request failed.\n");
|
||||
ret = -ENOMEM;
|
||||
ret = exofs_read_kern(od, sbi->s_cred, &obj, 0, &fscb, sizeof(fscb));
|
||||
if (unlikely(ret))
|
||||
goto free_sbi;
|
||||
}
|
||||
ret = osd_req_read_kern(or, &obj, 0, &fscb, sizeof(fscb));
|
||||
if (unlikely(ret)) {
|
||||
if (!silent)
|
||||
EXOFS_ERR(
|
||||
"exofs_fill_super: osd_req_read_kern failed.\n");
|
||||
ret = -ENOMEM;
|
||||
goto free_sbi;
|
||||
}
|
||||
|
||||
ret = exofs_sync_op(or, sbi->s_timeout, sbi->s_cred);
|
||||
if (unlikely(ret)) {
|
||||
if (!silent)
|
||||
EXOFS_ERR("exofs_fill_super: exofs_sync_op failed.\n");
|
||||
ret = -EIO;
|
||||
goto free_sbi;
|
||||
}
|
||||
|
||||
sb->s_magic = le16_to_cpu(fscb.s_magic);
|
||||
sbi->s_nextid = le64_to_cpu(fscb.s_nextid);
|
||||
|
@ -380,6 +348,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
spin_lock_init(&sbi->s_next_gen_lock);
|
||||
|
||||
/* set up operation vectors */
|
||||
sb->s_fs_info = sbi;
|
||||
sb->s_op = &exofs_sops;
|
||||
sb->s_export_op = &exofs_export_ops;
|
||||
root = exofs_iget(sb, EXOFS_ROOT_ID - EXOFS_OBJ_OFF);
|
||||
|
@ -406,16 +375,14 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
}
|
||||
|
||||
_exofs_print_device("Mounting", opts->dev_name, sbi->s_dev, sbi->s_pid);
|
||||
ret = 0;
|
||||
out:
|
||||
if (or)
|
||||
osd_end_request(or);
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
free_sbi:
|
||||
EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n",
|
||||
opts->dev_name, sbi->s_pid, ret);
|
||||
osduld_put_device(sbi->s_dev); /* NULL safe */
|
||||
kfree(sbi);
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -444,7 +411,7 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
{
|
||||
struct super_block *sb = dentry->d_sb;
|
||||
struct exofs_sb_info *sbi = sb->s_fs_info;
|
||||
struct osd_obj_id obj = {sbi->s_pid, 0};
|
||||
struct exofs_io_state *ios;
|
||||
struct osd_attr attrs[] = {
|
||||
ATTR_DEF(OSD_APAGE_PARTITION_QUOTAS,
|
||||
OSD_ATTR_PQ_CAPACITY_QUOTA, sizeof(__be64)),
|
||||
|
@ -453,26 +420,25 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
};
|
||||
uint64_t capacity = ULLONG_MAX;
|
||||
uint64_t used = ULLONG_MAX;
|
||||
struct osd_request *or;
|
||||
uint8_t cred_a[OSD_CAP_LEN];
|
||||
int ret;
|
||||
|
||||
/* get used/capacity attributes */
|
||||
exofs_make_credential(cred_a, &obj);
|
||||
|
||||
or = osd_start_request(sbi->s_dev, GFP_KERNEL);
|
||||
if (unlikely(!or)) {
|
||||
EXOFS_DBGMSG("exofs_statfs: osd_start_request failed.\n");
|
||||
return -ENOMEM;
|
||||
ret = exofs_get_io_state(sbi, &ios);
|
||||
if (ret) {
|
||||
EXOFS_DBGMSG("exofs_get_io_state failed.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
osd_req_get_attributes(or, &obj);
|
||||
osd_req_add_get_attr_list(or, attrs, ARRAY_SIZE(attrs));
|
||||
ret = exofs_sync_op(or, sbi->s_timeout, cred_a);
|
||||
exofs_make_credential(cred_a, &ios->obj);
|
||||
ios->cred = sbi->s_cred;
|
||||
ios->in_attr = attrs;
|
||||
ios->in_attr_len = ARRAY_SIZE(attrs);
|
||||
|
||||
ret = exofs_sbi_read(ios);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
|
||||
ret = extract_attr_from_req(or, &attrs[0]);
|
||||
ret = extract_attr_from_ios(ios, &attrs[0]);
|
||||
if (likely(!ret)) {
|
||||
capacity = get_unaligned_be64(attrs[0].val_ptr);
|
||||
if (unlikely(!capacity))
|
||||
|
@ -480,7 +446,7 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
} else
|
||||
EXOFS_DBGMSG("exofs_statfs: get capacity failed.\n");
|
||||
|
||||
ret = extract_attr_from_req(or, &attrs[1]);
|
||||
ret = extract_attr_from_ios(ios, &attrs[1]);
|
||||
if (likely(!ret))
|
||||
used = get_unaligned_be64(attrs[1].val_ptr);
|
||||
else
|
||||
|
@ -497,7 +463,7 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
buf->f_namelen = EXOFS_NAME_LEN;
|
||||
|
||||
out:
|
||||
osd_end_request(or);
|
||||
exofs_put_io_state(ios);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче