ceph: cache layout in parent dir on first sync create
If a create is done, then typically we'll end up writing to the file soon afterward. We don't want to wait for the reply before doing that when doing an async create, so that means we need the layout for the new file before we've gotten the response from the MDS. All files created in a directory will initially inherit the same layout, so copy off the requisite info from the first synchronous create in the directory, and save it in a new i_cached_layout field. Zero out the layout when we lose Dc caps in the dir. Signed-off-by: Jeff Layton <jlayton@kernel.org> Reviewed-by: "Yan, Zheng" <zyan@redhat.com> Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
Родитель
6deb8008a8
Коммит
785892fe88
|
@ -561,14 +561,14 @@ static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
|
|||
spin_unlock(&mdsc->cap_delay_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Common issue checks for add_cap, handle_cap_grant.
|
||||
*/
|
||||
/* Common issue checks for add_cap, handle_cap_grant. */
|
||||
static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
|
||||
unsigned issued)
|
||||
{
|
||||
unsigned had = __ceph_caps_issued(ci, NULL);
|
||||
|
||||
lockdep_assert_held(&ci->i_ceph_lock);
|
||||
|
||||
/*
|
||||
* Each time we receive FILE_CACHE anew, we increment
|
||||
* i_rdcache_gen.
|
||||
|
@ -593,6 +593,13 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
|
|||
__ceph_dir_clear_complete(ci);
|
||||
}
|
||||
}
|
||||
|
||||
/* Wipe saved layout if we're losing DIR_CREATE caps */
|
||||
if (S_ISDIR(ci->vfs_inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) &&
|
||||
!(issued & CEPH_CAP_DIR_CREATE)) {
|
||||
ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
|
||||
memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -430,6 +430,23 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
/* Clone the layout from a synchronous create, if the dir now has Dc caps */
|
||||
static void
|
||||
cache_file_layout(struct inode *dst, struct inode *src)
|
||||
{
|
||||
struct ceph_inode_info *cdst = ceph_inode(dst);
|
||||
struct ceph_inode_info *csrc = ceph_inode(src);
|
||||
|
||||
spin_lock(&cdst->i_ceph_lock);
|
||||
if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
|
||||
!ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
|
||||
memcpy(&cdst->i_cached_layout, &csrc->i_layout,
|
||||
sizeof(cdst->i_cached_layout));
|
||||
rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
|
||||
ceph_try_get_string(csrc->i_layout.pool_ns));
|
||||
}
|
||||
spin_unlock(&cdst->i_ceph_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do a lookup + open with a single request. If we get a non-existent
|
||||
|
@ -518,7 +535,10 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
|
|||
} else {
|
||||
dout("atomic_open finish_open on dn %p\n", dn);
|
||||
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
|
||||
ceph_init_inode_acls(d_inode(dentry), &as_ctx);
|
||||
struct inode *newino = d_inode(dentry);
|
||||
|
||||
cache_file_layout(dir, newino);
|
||||
ceph_init_inode_acls(newino, &as_ctx);
|
||||
file->f_mode |= FMODE_CREATED;
|
||||
}
|
||||
err = finish_open(file, dentry, ceph_open);
|
||||
|
|
|
@ -447,6 +447,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
|
|||
ci->i_max_files = 0;
|
||||
|
||||
memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
|
||||
memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
|
||||
RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
|
||||
|
||||
ci->i_fragtree = RB_ROOT;
|
||||
|
@ -587,6 +588,7 @@ void ceph_evict_inode(struct inode *inode)
|
|||
ceph_buffer_put(ci->i_xattrs.prealloc_blob);
|
||||
|
||||
ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
|
||||
ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
|
||||
}
|
||||
|
||||
static inline blkcnt_t calc_inode_blocks(u64 size)
|
||||
|
|
|
@ -3535,8 +3535,13 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
|||
cap->cap_gen = cap->session->s_cap_gen;
|
||||
|
||||
/* These are lost when the session goes away */
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
if (cap->issued & CEPH_CAP_DIR_CREATE) {
|
||||
ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
|
||||
memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
|
||||
}
|
||||
cap->issued &= ~CEPH_CAP_ANY_DIR_OPS;
|
||||
}
|
||||
|
||||
if (recon_state->msg_version >= 2) {
|
||||
rec.v2.cap_id = cpu_to_le64(cap->cap_id);
|
||||
|
|
|
@ -326,6 +326,7 @@ struct ceph_inode_info {
|
|||
|
||||
struct ceph_dir_layout i_dir_layout;
|
||||
struct ceph_file_layout i_layout;
|
||||
struct ceph_file_layout i_cached_layout; // for async creates
|
||||
char *i_symlink;
|
||||
|
||||
/* for dirs */
|
||||
|
|
Загрузка…
Ссылка в новой задаче