Merge branch 'testing' of github.com:ceph/ceph-client into v3.8-rc5-testing

This commit is contained in:
Alex Elder 2013-01-30 07:54:34 -06:00
Родитель 949db153b6 1ec3911dbd
Коммит 969e5aa3b0
14 изменённых файлов: 647 добавлений и 606 удалений

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -611,8 +611,16 @@ retry:
if (flags & CEPH_CAP_FLAG_AUTH) if (flags & CEPH_CAP_FLAG_AUTH)
ci->i_auth_cap = cap; ci->i_auth_cap = cap;
else if (ci->i_auth_cap == cap) else if (ci->i_auth_cap == cap) {
ci->i_auth_cap = NULL; ci->i_auth_cap = NULL;
spin_lock(&mdsc->cap_dirty_lock);
if (!list_empty(&ci->i_dirty_item)) {
dout(" moving %p to cap_dirty_migrating\n", inode);
list_move(&ci->i_dirty_item,
&mdsc->cap_dirty_migrating);
}
spin_unlock(&mdsc->cap_dirty_lock);
}
dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n", dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
inode, ceph_vinop(inode), cap, ceph_cap_string(issued), inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
@ -1460,7 +1468,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = &ci->vfs_inode; struct inode *inode = &ci->vfs_inode;
struct ceph_cap *cap; struct ceph_cap *cap;
int file_wanted, used; int file_wanted, used, cap_used;
int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */ int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
int issued, implemented, want, retain, revoking, flushing = 0; int issued, implemented, want, retain, revoking, flushing = 0;
int mds = -1; /* keep track of how far we've gone through i_caps list int mds = -1; /* keep track of how far we've gone through i_caps list
@ -1563,9 +1571,14 @@ retry_locked:
/* NOTE: no side-effects allowed, until we take s_mutex */ /* NOTE: no side-effects allowed, until we take s_mutex */
cap_used = used;
if (ci->i_auth_cap && cap != ci->i_auth_cap)
cap_used &= ~ci->i_auth_cap->issued;
revoking = cap->implemented & ~cap->issued; revoking = cap->implemented & ~cap->issued;
dout(" mds%d cap %p issued %s implemented %s revoking %s\n", dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
cap->mds, cap, ceph_cap_string(cap->issued), cap->mds, cap, ceph_cap_string(cap->issued),
ceph_cap_string(cap_used),
ceph_cap_string(cap->implemented), ceph_cap_string(cap->implemented),
ceph_cap_string(revoking)); ceph_cap_string(revoking));
@ -1593,7 +1606,7 @@ retry_locked:
} }
/* completed revocation? going down and there are no caps? */ /* completed revocation? going down and there are no caps? */
if (revoking && (revoking & used) == 0) { if (revoking && (revoking & cap_used) == 0) {
dout("completed revocation of %s\n", dout("completed revocation of %s\n",
ceph_cap_string(cap->implemented & ~cap->issued)); ceph_cap_string(cap->implemented & ~cap->issued));
goto ack; goto ack;
@ -1670,8 +1683,8 @@ ack:
sent++; sent++;
/* __send_cap drops i_ceph_lock */ /* __send_cap drops i_ceph_lock */
delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want, delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
retain, flushing, NULL); want, retain, flushing, NULL);
goto retry; /* retake i_ceph_lock and restart our cap scan. */ goto retry; /* retake i_ceph_lock and restart our cap scan. */
} }
@ -2416,7 +2429,9 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
dout("mds wanted %s -> %s\n", dout("mds wanted %s -> %s\n",
ceph_cap_string(le32_to_cpu(grant->wanted)), ceph_cap_string(le32_to_cpu(grant->wanted)),
ceph_cap_string(wanted)); ceph_cap_string(wanted));
grant->wanted = cpu_to_le32(wanted); /* imported cap may not have correct mds_wanted */
if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
check_caps = 1;
} }
cap->seq = seq; cap->seq = seq;
@ -2820,6 +2835,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq, dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
(unsigned)seq); (unsigned)seq);
if (op == CEPH_CAP_OP_IMPORT)
ceph_add_cap_releases(mdsc, session);
/* lookup ino */ /* lookup ino */
inode = ceph_find_inode(sb, vino); inode = ceph_find_inode(sb, vino);
ci = ceph_inode(inode); ci = ceph_inode(inode);

Просмотреть файл

@ -243,6 +243,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
err = ceph_mdsc_do_request(mdsc, err = ceph_mdsc_do_request(mdsc,
(flags & (O_CREAT|O_TRUNC)) ? dir : NULL, (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
req); req);
if (err)
goto out_err;
err = ceph_handle_snapdir(req, dentry, err); err = ceph_handle_snapdir(req, dentry, err);
if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry); err = ceph_handle_notrace_create(dir, dentry);
@ -263,6 +266,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
err = finish_no_open(file, dn); err = finish_no_open(file, dn);
} else { } else {
dout("atomic_open finish_open on dn %p\n", dn); dout("atomic_open finish_open on dn %p\n", dn);
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
*opened |= FILE_CREATED;
}
err = finish_open(file, dentry, ceph_open, opened); err = finish_open(file, dentry, ceph_open, opened);
} }

Просмотреть файл

@ -194,7 +194,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
return -EFAULT; return -EFAULT;
down_read(&osdc->map_sem); down_read(&osdc->map_sem);
r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len, r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
&dl.object_no, &dl.object_offset, &dl.object_no, &dl.object_offset,
&olen); &olen);
if (r < 0) if (r < 0)

Просмотреть файл

@ -232,6 +232,30 @@ bad:
return -EIO; return -EIO;
} }
/*
* parse create results
*/
static int parse_reply_info_create(void **p, void *end,
struct ceph_mds_reply_info_parsed *info,
int features)
{
if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
if (*p == end) {
info->has_create_ino = false;
} else {
info->has_create_ino = true;
info->ino = ceph_decode_64(p);
}
}
if (unlikely(*p != end))
goto bad;
return 0;
bad:
return -EIO;
}
/* /*
* parse extra results * parse extra results
*/ */
@ -241,8 +265,12 @@ static int parse_reply_info_extra(void **p, void *end,
{ {
if (info->head->op == CEPH_MDS_OP_GETFILELOCK) if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
return parse_reply_info_filelock(p, end, info, features); return parse_reply_info_filelock(p, end, info, features);
else else if (info->head->op == CEPH_MDS_OP_READDIR)
return parse_reply_info_dir(p, end, info, features); return parse_reply_info_dir(p, end, info, features);
else if (info->head->op == CEPH_MDS_OP_CREATE)
return parse_reply_info_create(p, end, info, features);
else
return -EIO;
} }
/* /*
@ -2170,7 +2198,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
mutex_lock(&req->r_fill_mutex); mutex_lock(&req->r_fill_mutex);
err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
if (err == 0) { if (err == 0) {
if (result == 0 && req->r_op != CEPH_MDS_OP_GETFILELOCK && if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
req->r_op == CEPH_MDS_OP_LSSNAP) &&
rinfo->dir_nr) rinfo->dir_nr)
ceph_readdir_prepopulate(req, req->r_session); ceph_readdir_prepopulate(req, req->r_session);
ceph_unreserve_caps(mdsc, &req->r_caps_reservation); ceph_unreserve_caps(mdsc, &req->r_caps_reservation);

Просмотреть файл

@ -74,6 +74,12 @@ struct ceph_mds_reply_info_parsed {
struct ceph_mds_reply_info_in *dir_in; struct ceph_mds_reply_info_in *dir_in;
u8 dir_complete, dir_end; u8 dir_complete, dir_end;
}; };
/* for create results */
struct {
bool has_create_ino;
u64 ino;
};
}; };
/* encoded blob describing snapshot contexts for certain /* encoded blob describing snapshot contexts for certain

Просмотреть файл

@ -14,13 +14,19 @@
#define CEPH_FEATURE_DIRLAYOUTHASH (1<<7) #define CEPH_FEATURE_DIRLAYOUTHASH (1<<7)
/* bits 8-17 defined by user-space; not supported yet here */ /* bits 8-17 defined by user-space; not supported yet here */
#define CEPH_FEATURE_CRUSH_TUNABLES (1<<18) #define CEPH_FEATURE_CRUSH_TUNABLES (1<<18)
/* bits 19-24 defined by user-space; not supported yet here */
#define CEPH_FEATURE_CRUSH_TUNABLES2 (1<<25)
/* bit 26 defined by user-space; not supported yet here */
#define CEPH_FEATURE_REPLY_CREATE_INODE (1<<27)
/* /*
* Features supported. * Features supported.
*/ */
#define CEPH_FEATURES_SUPPORTED_DEFAULT \ #define CEPH_FEATURES_SUPPORTED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \ (CEPH_FEATURE_NOSRCADDR | \
CEPH_FEATURE_CRUSH_TUNABLES) CEPH_FEATURE_CRUSH_TUNABLES | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE)
#define CEPH_FEATURES_REQUIRED_DEFAULT \ #define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR) (CEPH_FEATURE_NOSRCADDR)

Просмотреть файл

@ -52,10 +52,10 @@ static inline int ceph_has_room(void **p, void *end, size_t n)
return end >= *p && n <= end - *p; return end >= *p && n <= end - *p;
} }
#define ceph_decode_need(p, end, n, bad) \ #define ceph_decode_need(p, end, n, bad) \
do { \ do { \
if (!likely(ceph_has_room(p, end, n))) \ if (!likely(ceph_has_room(p, end, n))) \
goto bad; \ goto bad; \
} while (0) } while (0)
#define ceph_decode_64_safe(p, end, v, bad) \ #define ceph_decode_64_safe(p, end, v, bad) \
@ -99,8 +99,8 @@ static inline int ceph_has_room(void **p, void *end, size_t n)
* *
* There are two possible failures: * There are two possible failures:
* - converting the string would require accessing memory at or * - converting the string would require accessing memory at or
* beyond the "end" pointer provided (-E * beyond the "end" pointer provided (-ERANGE)
* - memory could not be allocated for the result * - memory could not be allocated for the result (-ENOMEM)
*/ */
static inline char *ceph_extract_encoded_string(void **p, void *end, static inline char *ceph_extract_encoded_string(void **p, void *end,
size_t *lenp, gfp_t gfp) size_t *lenp, gfp_t gfp)
@ -217,10 +217,10 @@ static inline void ceph_encode_string(void **p, void *end,
*p += len; *p += len;
} }
#define ceph_encode_need(p, end, n, bad) \ #define ceph_encode_need(p, end, n, bad) \
do { \ do { \
if (!likely(ceph_has_room(p, end, n))) \ if (!likely(ceph_has_room(p, end, n))) \
goto bad; \ goto bad; \
} while (0) } while (0)
#define ceph_encode_64_safe(p, end, v, bad) \ #define ceph_encode_64_safe(p, end, v, bad) \
@ -231,12 +231,17 @@ static inline void ceph_encode_string(void **p, void *end,
#define ceph_encode_32_safe(p, end, v, bad) \ #define ceph_encode_32_safe(p, end, v, bad) \
do { \ do { \
ceph_encode_need(p, end, sizeof(u32), bad); \ ceph_encode_need(p, end, sizeof(u32), bad); \
ceph_encode_32(p, v); \ ceph_encode_32(p, v); \
} while (0) } while (0)
#define ceph_encode_16_safe(p, end, v, bad) \ #define ceph_encode_16_safe(p, end, v, bad) \
do { \ do { \
ceph_encode_need(p, end, sizeof(u16), bad); \ ceph_encode_need(p, end, sizeof(u16), bad); \
ceph_encode_16(p, v); \ ceph_encode_16(p, v); \
} while (0)
#define ceph_encode_8_safe(p, end, v, bad) \
do { \
ceph_encode_need(p, end, sizeof(u8), bad); \
ceph_encode_8(p, v); \
} while (0) } while (0)
#define ceph_encode_copy_safe(p, end, pv, n, bad) \ #define ceph_encode_copy_safe(p, end, pv, n, bad) \

Просмотреть файл

@ -10,6 +10,7 @@
#include <linux/ceph/osdmap.h> #include <linux/ceph/osdmap.h>
#include <linux/ceph/messenger.h> #include <linux/ceph/messenger.h>
#include <linux/ceph/auth.h> #include <linux/ceph/auth.h>
#include <linux/ceph/pagelist.h>
/* /*
* Maximum object name size * Maximum object name size
@ -22,7 +23,6 @@ struct ceph_snap_context;
struct ceph_osd_request; struct ceph_osd_request;
struct ceph_osd_client; struct ceph_osd_client;
struct ceph_authorizer; struct ceph_authorizer;
struct ceph_pagelist;
/* /*
* completion callback for async writepages * completion callback for async writepages
@ -95,7 +95,7 @@ struct ceph_osd_request {
struct bio *r_bio; /* instead of pages */ struct bio *r_bio; /* instead of pages */
#endif #endif
struct ceph_pagelist *r_trail; /* trailing part of the data */ struct ceph_pagelist r_trail; /* trailing part of the data */
}; };
struct ceph_osd_event { struct ceph_osd_event {
@ -157,7 +157,6 @@ struct ceph_osd_client {
struct ceph_osd_req_op { struct ceph_osd_req_op {
u16 op; /* CEPH_OSD_OP_* */ u16 op; /* CEPH_OSD_OP_* */
u32 flags; /* CEPH_OSD_FLAG_* */
union { union {
struct { struct {
u64 offset, length; u64 offset, length;
@ -207,29 +206,24 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg); struct ceph_msg *msg);
extern int ceph_calc_raw_layout(struct ceph_osd_client *osdc, extern int ceph_calc_raw_layout(struct ceph_file_layout *layout,
struct ceph_file_layout *layout,
u64 snapid,
u64 off, u64 *plen, u64 *bno, u64 off, u64 *plen, u64 *bno,
struct ceph_osd_request *req, struct ceph_osd_request *req,
struct ceph_osd_req_op *op); struct ceph_osd_req_op *op);
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
int flags,
struct ceph_snap_context *snapc, struct ceph_snap_context *snapc,
struct ceph_osd_req_op *ops, unsigned int num_op,
bool use_mempool, bool use_mempool,
gfp_t gfp_flags, gfp_t gfp_flags);
struct page **pages,
struct bio *bio);
extern void ceph_osdc_build_request(struct ceph_osd_request *req, extern void ceph_osdc_build_request(struct ceph_osd_request *req,
u64 off, u64 *plen, u64 off, u64 len,
unsigned int num_op,
struct ceph_osd_req_op *src_ops, struct ceph_osd_req_op *src_ops,
struct ceph_snap_context *snapc, struct ceph_snap_context *snapc,
struct timespec *mtime, u64 snap_id,
const char *oid, struct timespec *mtime);
int oid_len);
extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
struct ceph_file_layout *layout, struct ceph_file_layout *layout,

Просмотреть файл

@ -110,7 +110,7 @@ extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
/* calculate mapping of a file extent to an object */ /* calculate mapping of a file extent to an object */
extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
u64 off, u64 *plen, u64 off, u64 len,
u64 *bno, u64 *oxoff, u64 *oxlen); u64 *bno, u64 *oxoff, u64 *oxlen);
/* calculate mapping of object to a placement group */ /* calculate mapping of object to a placement group */

Просмотреть файл

@ -162,6 +162,8 @@ struct crush_map {
__u32 choose_local_fallback_tries; __u32 choose_local_fallback_tries;
/* choose attempts before giving up */ /* choose attempts before giving up */
__u32 choose_total_tries; __u32 choose_total_tries;
/* attempt chooseleaf inner descent once; on failure retry outer descent */
__u32 chooseleaf_descend_once;
}; };

Просмотреть файл

@ -287,6 +287,7 @@ static int is_out(const struct crush_map *map, const __u32 *weight, int item, in
* @outpos: our position in that vector * @outpos: our position in that vector
* @firstn: true if choosing "first n" items, false if choosing "indep" * @firstn: true if choosing "first n" items, false if choosing "indep"
* @recurse_to_leaf: true if we want one device under each item of given type * @recurse_to_leaf: true if we want one device under each item of given type
* @descend_once: true if we should only try one descent before giving up
* @out2: second output vector for leaf items (if @recurse_to_leaf) * @out2: second output vector for leaf items (if @recurse_to_leaf)
*/ */
static int crush_choose(const struct crush_map *map, static int crush_choose(const struct crush_map *map,
@ -295,7 +296,7 @@ static int crush_choose(const struct crush_map *map,
int x, int numrep, int type, int x, int numrep, int type,
int *out, int outpos, int *out, int outpos,
int firstn, int recurse_to_leaf, int firstn, int recurse_to_leaf,
int *out2) int descend_once, int *out2)
{ {
int rep; int rep;
unsigned int ftotal, flocal; unsigned int ftotal, flocal;
@ -391,7 +392,7 @@ static int crush_choose(const struct crush_map *map,
} }
reject = 0; reject = 0;
if (recurse_to_leaf) { if (!collide && recurse_to_leaf) {
if (item < 0) { if (item < 0) {
if (crush_choose(map, if (crush_choose(map,
map->buckets[-1-item], map->buckets[-1-item],
@ -399,6 +400,7 @@ static int crush_choose(const struct crush_map *map,
x, outpos+1, 0, x, outpos+1, 0,
out2, outpos, out2, outpos,
firstn, 0, firstn, 0,
map->chooseleaf_descend_once,
NULL) <= outpos) NULL) <= outpos)
/* didn't get leaf */ /* didn't get leaf */
reject = 1; reject = 1;
@ -422,7 +424,10 @@ reject:
ftotal++; ftotal++;
flocal++; flocal++;
if (collide && flocal <= map->choose_local_tries) if (reject && descend_once)
/* let outer call try again */
skip_rep = 1;
else if (collide && flocal <= map->choose_local_tries)
/* retry locally a few times */ /* retry locally a few times */
retry_bucket = 1; retry_bucket = 1;
else if (map->choose_local_fallback_tries > 0 && else if (map->choose_local_fallback_tries > 0 &&
@ -485,6 +490,7 @@ int crush_do_rule(const struct crush_map *map,
int i, j; int i, j;
int numrep; int numrep;
int firstn; int firstn;
const int descend_once = 0;
if ((__u32)ruleno >= map->max_rules) { if ((__u32)ruleno >= map->max_rules) {
dprintk(" bad ruleno %d\n", ruleno); dprintk(" bad ruleno %d\n", ruleno);
@ -544,7 +550,8 @@ int crush_do_rule(const struct crush_map *map,
curstep->arg2, curstep->arg2,
o+osize, j, o+osize, j,
firstn, firstn,
recurse_to_leaf, c+osize); recurse_to_leaf,
descend_once, c+osize);
} }
if (recurse_to_leaf) if (recurse_to_leaf)

Просмотреть файл

@ -32,52 +32,43 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
static void __send_request(struct ceph_osd_client *osdc, static void __send_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req); struct ceph_osd_request *req);
static int op_needs_trail(int op)
{
switch (op) {
case CEPH_OSD_OP_GETXATTR:
case CEPH_OSD_OP_SETXATTR:
case CEPH_OSD_OP_CMPXATTR:
case CEPH_OSD_OP_CALL:
case CEPH_OSD_OP_NOTIFY:
return 1;
default:
return 0;
}
}
static int op_has_extent(int op) static int op_has_extent(int op)
{ {
return (op == CEPH_OSD_OP_READ || return (op == CEPH_OSD_OP_READ ||
op == CEPH_OSD_OP_WRITE); op == CEPH_OSD_OP_WRITE);
} }
int ceph_calc_raw_layout(struct ceph_osd_client *osdc, int ceph_calc_raw_layout(struct ceph_file_layout *layout,
struct ceph_file_layout *layout,
u64 snapid,
u64 off, u64 *plen, u64 *bno, u64 off, u64 *plen, u64 *bno,
struct ceph_osd_request *req, struct ceph_osd_request *req,
struct ceph_osd_req_op *op) struct ceph_osd_req_op *op)
{ {
struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
u64 orig_len = *plen; u64 orig_len = *plen;
u64 objoff, objlen; /* extent in object */ u64 objoff, objlen; /* extent in object */
int r; int r;
reqhead->snapid = cpu_to_le64(snapid);
/* object extent? */ /* object extent? */
r = ceph_calc_file_object_mapping(layout, off, plen, bno, r = ceph_calc_file_object_mapping(layout, off, orig_len, bno,
&objoff, &objlen); &objoff, &objlen);
if (r < 0) if (r < 0)
return r; return r;
if (*plen < orig_len) if (objlen < orig_len) {
*plen = objlen;
dout(" skipping last %llu, final file extent %llu~%llu\n", dout(" skipping last %llu, final file extent %llu~%llu\n",
orig_len - *plen, off, *plen); orig_len - *plen, off, *plen);
}
if (op_has_extent(op->op)) { if (op_has_extent(op->op)) {
u32 osize = le32_to_cpu(layout->fl_object_size);
op->extent.offset = objoff; op->extent.offset = objoff;
op->extent.length = objlen; op->extent.length = objlen;
if (op->extent.truncate_size <= off - objoff) {
op->extent.truncate_size = 0;
} else {
op->extent.truncate_size -= off - objoff;
if (op->extent.truncate_size > osize)
op->extent.truncate_size = osize;
}
} }
req->r_num_pages = calc_pages_for(off, *plen); req->r_num_pages = calc_pages_for(off, *plen);
req->r_page_alignment = off & ~PAGE_MASK; req->r_page_alignment = off & ~PAGE_MASK;
@ -115,8 +106,7 @@ EXPORT_SYMBOL(ceph_calc_raw_layout);
* *
* fill osd op in request message. * fill osd op in request message.
*/ */
static int calc_layout(struct ceph_osd_client *osdc, static int calc_layout(struct ceph_vino vino,
struct ceph_vino vino,
struct ceph_file_layout *layout, struct ceph_file_layout *layout,
u64 off, u64 *plen, u64 off, u64 *plen,
struct ceph_osd_request *req, struct ceph_osd_request *req,
@ -125,8 +115,7 @@ static int calc_layout(struct ceph_osd_client *osdc,
u64 bno; u64 bno;
int r; int r;
r = ceph_calc_raw_layout(osdc, layout, vino.snap, off, r = ceph_calc_raw_layout(layout, off, plen, &bno, req, op);
plen, &bno, req, op);
if (r < 0) if (r < 0)
return r; return r;
@ -163,10 +152,7 @@ void ceph_osdc_release_request(struct kref *kref)
bio_put(req->r_bio); bio_put(req->r_bio);
#endif #endif
ceph_put_snap_context(req->r_snapc); ceph_put_snap_context(req->r_snapc);
if (req->r_trail) { ceph_pagelist_release(&req->r_trail);
ceph_pagelist_release(req->r_trail);
kfree(req->r_trail);
}
if (req->r_mempool) if (req->r_mempool)
mempool_free(req, req->r_osdc->req_mempool); mempool_free(req, req->r_osdc->req_mempool);
else else
@ -174,34 +160,14 @@ void ceph_osdc_release_request(struct kref *kref)
} }
EXPORT_SYMBOL(ceph_osdc_release_request); EXPORT_SYMBOL(ceph_osdc_release_request);
static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail)
{
int i = 0;
if (needs_trail)
*needs_trail = 0;
while (ops[i].op) {
if (needs_trail && op_needs_trail(ops[i].op))
*needs_trail = 1;
i++;
}
return i;
}
struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
int flags,
struct ceph_snap_context *snapc, struct ceph_snap_context *snapc,
struct ceph_osd_req_op *ops, unsigned int num_op,
bool use_mempool, bool use_mempool,
gfp_t gfp_flags, gfp_t gfp_flags)
struct page **pages,
struct bio *bio)
{ {
struct ceph_osd_request *req; struct ceph_osd_request *req;
struct ceph_msg *msg; struct ceph_msg *msg;
int needs_trail;
int num_op = get_num_ops(ops, &needs_trail);
size_t msg_size = sizeof(struct ceph_osd_request_head); size_t msg_size = sizeof(struct ceph_osd_request_head);
msg_size += num_op*sizeof(struct ceph_osd_op); msg_size += num_op*sizeof(struct ceph_osd_op);
@ -228,10 +194,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
INIT_LIST_HEAD(&req->r_req_lru_item); INIT_LIST_HEAD(&req->r_req_lru_item);
INIT_LIST_HEAD(&req->r_osd_item); INIT_LIST_HEAD(&req->r_osd_item);
req->r_flags = flags;
WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
/* create reply message */ /* create reply message */
if (use_mempool) if (use_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
@ -244,15 +206,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
} }
req->r_reply = msg; req->r_reply = msg;
/* allocate space for the trailing data */ ceph_pagelist_init(&req->r_trail);
if (needs_trail) {
req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags);
if (!req->r_trail) {
ceph_osdc_put_request(req);
return NULL;
}
ceph_pagelist_init(req->r_trail);
}
/* create request message; allow space for oid */ /* create request message; allow space for oid */
msg_size += MAX_OBJ_NAME_SIZE; msg_size += MAX_OBJ_NAME_SIZE;
@ -270,13 +224,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
memset(msg->front.iov_base, 0, msg->front.iov_len); memset(msg->front.iov_base, 0, msg->front.iov_len);
req->r_request = msg; req->r_request = msg;
req->r_pages = pages;
#ifdef CONFIG_BLOCK
if (bio) {
req->r_bio = bio;
bio_get(req->r_bio);
}
#endif
return req; return req;
} }
@ -304,29 +251,25 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
case CEPH_OSD_OP_GETXATTR: case CEPH_OSD_OP_GETXATTR:
case CEPH_OSD_OP_SETXATTR: case CEPH_OSD_OP_SETXATTR:
case CEPH_OSD_OP_CMPXATTR: case CEPH_OSD_OP_CMPXATTR:
BUG_ON(!req->r_trail);
dst->xattr.name_len = cpu_to_le32(src->xattr.name_len); dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
dst->xattr.value_len = cpu_to_le32(src->xattr.value_len); dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
dst->xattr.cmp_op = src->xattr.cmp_op; dst->xattr.cmp_op = src->xattr.cmp_op;
dst->xattr.cmp_mode = src->xattr.cmp_mode; dst->xattr.cmp_mode = src->xattr.cmp_mode;
ceph_pagelist_append(req->r_trail, src->xattr.name, ceph_pagelist_append(&req->r_trail, src->xattr.name,
src->xattr.name_len); src->xattr.name_len);
ceph_pagelist_append(req->r_trail, src->xattr.val, ceph_pagelist_append(&req->r_trail, src->xattr.val,
src->xattr.value_len); src->xattr.value_len);
break; break;
case CEPH_OSD_OP_CALL: case CEPH_OSD_OP_CALL:
BUG_ON(!req->r_trail);
dst->cls.class_len = src->cls.class_len; dst->cls.class_len = src->cls.class_len;
dst->cls.method_len = src->cls.method_len; dst->cls.method_len = src->cls.method_len;
dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
ceph_pagelist_append(req->r_trail, src->cls.class_name, ceph_pagelist_append(&req->r_trail, src->cls.class_name,
src->cls.class_len); src->cls.class_len);
ceph_pagelist_append(req->r_trail, src->cls.method_name, ceph_pagelist_append(&req->r_trail, src->cls.method_name,
src->cls.method_len); src->cls.method_len);
ceph_pagelist_append(req->r_trail, src->cls.indata, ceph_pagelist_append(&req->r_trail, src->cls.indata,
src->cls.indata_len); src->cls.indata_len);
break; break;
case CEPH_OSD_OP_ROLLBACK: case CEPH_OSD_OP_ROLLBACK:
@ -339,11 +282,9 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
__le32 prot_ver = cpu_to_le32(src->watch.prot_ver); __le32 prot_ver = cpu_to_le32(src->watch.prot_ver);
__le32 timeout = cpu_to_le32(src->watch.timeout); __le32 timeout = cpu_to_le32(src->watch.timeout);
BUG_ON(!req->r_trail); ceph_pagelist_append(&req->r_trail,
ceph_pagelist_append(req->r_trail,
&prot_ver, sizeof(prot_ver)); &prot_ver, sizeof(prot_ver));
ceph_pagelist_append(req->r_trail, ceph_pagelist_append(&req->r_trail,
&timeout, sizeof(timeout)); &timeout, sizeof(timeout));
} }
case CEPH_OSD_OP_NOTIFY_ACK: case CEPH_OSD_OP_NOTIFY_ACK:
@ -365,25 +306,25 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
* *
*/ */
void ceph_osdc_build_request(struct ceph_osd_request *req, void ceph_osdc_build_request(struct ceph_osd_request *req,
u64 off, u64 *plen, u64 off, u64 len, unsigned int num_op,
struct ceph_osd_req_op *src_ops, struct ceph_osd_req_op *src_ops,
struct ceph_snap_context *snapc, struct ceph_snap_context *snapc, u64 snap_id,
struct timespec *mtime, struct timespec *mtime)
const char *oid,
int oid_len)
{ {
struct ceph_msg *msg = req->r_request; struct ceph_msg *msg = req->r_request;
struct ceph_osd_request_head *head; struct ceph_osd_request_head *head;
struct ceph_osd_req_op *src_op; struct ceph_osd_req_op *src_op;
struct ceph_osd_op *op; struct ceph_osd_op *op;
void *p; void *p;
int num_op = get_num_ops(src_ops, NULL);
size_t msg_size = sizeof(*head) + num_op*sizeof(*op); size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
int flags = req->r_flags; int flags = req->r_flags;
u64 data_len = 0; u64 data_len = 0;
int i; int i;
WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
head = msg->front.iov_base; head = msg->front.iov_base;
head->snapid = cpu_to_le64(snap_id);
op = (void *)(head + 1); op = (void *)(head + 1);
p = (void *)(op + num_op); p = (void *)(op + num_op);
@ -393,23 +334,19 @@ void ceph_osdc_build_request(struct ceph_osd_request *req,
head->flags = cpu_to_le32(flags); head->flags = cpu_to_le32(flags);
if (flags & CEPH_OSD_FLAG_WRITE) if (flags & CEPH_OSD_FLAG_WRITE)
ceph_encode_timespec(&head->mtime, mtime); ceph_encode_timespec(&head->mtime, mtime);
BUG_ON(num_op > (unsigned int) ((u16) -1));
head->num_ops = cpu_to_le16(num_op); head->num_ops = cpu_to_le16(num_op);
/* fill in oid */ /* fill in oid */
head->object_len = cpu_to_le32(oid_len); head->object_len = cpu_to_le32(req->r_oid_len);
memcpy(p, oid, oid_len); memcpy(p, req->r_oid, req->r_oid_len);
p += oid_len; p += req->r_oid_len;
src_op = src_ops; src_op = src_ops;
while (src_op->op) { while (num_op--)
osd_req_encode_op(req, op, src_op); osd_req_encode_op(req, op++, src_op++);
src_op++;
op++;
}
if (req->r_trail) data_len += req->r_trail.length;
data_len += req->r_trail->length;
if (snapc) { if (snapc) {
head->snap_seq = cpu_to_le64(snapc->seq); head->snap_seq = cpu_to_le64(snapc->seq);
@ -422,7 +359,7 @@ void ceph_osdc_build_request(struct ceph_osd_request *req,
if (flags & CEPH_OSD_FLAG_WRITE) { if (flags & CEPH_OSD_FLAG_WRITE) {
req->r_request->hdr.data_off = cpu_to_le16(off); req->r_request->hdr.data_off = cpu_to_le16(off);
req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len); req->r_request->hdr.data_len = cpu_to_le32(len + data_len);
} else if (data_len) { } else if (data_len) {
req->r_request->hdr.data_off = 0; req->r_request->hdr.data_off = 0;
req->r_request->hdr.data_len = cpu_to_le32(data_len); req->r_request->hdr.data_len = cpu_to_le32(data_len);
@ -462,31 +399,30 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
bool use_mempool, int num_reply, bool use_mempool, int num_reply,
int page_align) int page_align)
{ {
struct ceph_osd_req_op ops[3]; struct ceph_osd_req_op ops[2];
struct ceph_osd_request *req; struct ceph_osd_request *req;
unsigned int num_op = 1;
int r; int r;
memset(&ops, 0, sizeof ops);
ops[0].op = opcode; ops[0].op = opcode;
ops[0].extent.truncate_seq = truncate_seq; ops[0].extent.truncate_seq = truncate_seq;
ops[0].extent.truncate_size = truncate_size; ops[0].extent.truncate_size = truncate_size;
ops[0].payload_len = 0;
if (do_sync) { if (do_sync) {
ops[1].op = CEPH_OSD_OP_STARTSYNC; ops[1].op = CEPH_OSD_OP_STARTSYNC;
ops[1].payload_len = 0; num_op++;
ops[2].op = 0; }
} else
ops[1].op = 0;
req = ceph_osdc_alloc_request(osdc, flags, req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool,
snapc, ops, GFP_NOFS);
use_mempool,
GFP_NOFS, NULL, NULL);
if (!req) if (!req)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
req->r_flags = flags;
/* calculate max write size */ /* calculate max write size */
r = calc_layout(osdc, vino, layout, off, plen, req, ops); r = calc_layout(vino, layout, off, plen, req, ops);
if (r < 0) if (r < 0)
return ERR_PTR(r); return ERR_PTR(r);
req->r_file_layout = *layout; /* keep a copy */ req->r_file_layout = *layout; /* keep a copy */
@ -496,10 +432,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
req->r_num_pages = calc_pages_for(page_align, *plen); req->r_num_pages = calc_pages_for(page_align, *plen);
req->r_page_alignment = page_align; req->r_page_alignment = page_align;
ceph_osdc_build_request(req, off, plen, ops, ceph_osdc_build_request(req, off, *plen, num_op, ops,
snapc, snapc, vino.snap, mtime);
mtime,
req->r_oid, req->r_oid_len);
return req; return req;
} }
@ -739,31 +673,35 @@ static void remove_old_osds(struct ceph_osd_client *osdc)
*/ */
static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
{ {
struct ceph_osd_request *req; struct ceph_entity_addr *peer_addr;
int ret = 0;
dout("__reset_osd %p osd%d\n", osd, osd->o_osd); dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
if (list_empty(&osd->o_requests) && if (list_empty(&osd->o_requests) &&
list_empty(&osd->o_linger_requests)) { list_empty(&osd->o_linger_requests)) {
__remove_osd(osdc, osd); __remove_osd(osdc, osd);
ret = -ENODEV;
} else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd], return -ENODEV;
&osd->o_con.peer_addr, }
sizeof(osd->o_con.peer_addr)) == 0 &&
!ceph_con_opened(&osd->o_con)) { peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
!ceph_con_opened(&osd->o_con)) {
struct ceph_osd_request *req;
dout(" osd addr hasn't changed and connection never opened," dout(" osd addr hasn't changed and connection never opened,"
" letting msgr retry"); " letting msgr retry");
/* touch each r_stamp for handle_timeout()'s benfit */ /* touch each r_stamp for handle_timeout()'s benfit */
list_for_each_entry(req, &osd->o_requests, r_osd_item) list_for_each_entry(req, &osd->o_requests, r_osd_item)
req->r_stamp = jiffies; req->r_stamp = jiffies;
ret = -EAGAIN;
} else { return -EAGAIN;
ceph_con_close(&osd->o_con);
ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
&osdc->osdmap->osd_addr[osd->o_osd]);
osd->o_incarnation++;
} }
return ret;
ceph_con_close(&osd->o_con);
ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
osd->o_incarnation++;
return 0;
} }
static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
@ -1706,7 +1644,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
req->r_request->bio = req->r_bio; req->r_request->bio = req->r_bio;
#endif #endif
req->r_request->trail = req->r_trail; req->r_request->trail = &req->r_trail;
register_request(osdc, req); register_request(osdc, req);

Просмотреть файл

@ -13,26 +13,18 @@
char *ceph_osdmap_state_str(char *str, int len, int state) char *ceph_osdmap_state_str(char *str, int len, int state)
{ {
int flag = 0;
if (!len) if (!len)
goto done; return str;
*str = '\0'; if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
if (state) { snprintf(str, len, "exists, up");
if (state & CEPH_OSD_EXISTS) { else if (state & CEPH_OSD_EXISTS)
snprintf(str, len, "exists"); snprintf(str, len, "exists");
flag = 1; else if (state & CEPH_OSD_UP)
} snprintf(str, len, "up");
if (state & CEPH_OSD_UP) { else
snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""),
"up");
flag = 1;
}
} else {
snprintf(str, len, "doesn't exist"); snprintf(str, len, "doesn't exist");
}
done:
return str; return str;
} }
@ -170,6 +162,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
c->choose_local_tries = 2; c->choose_local_tries = 2;
c->choose_local_fallback_tries = 5; c->choose_local_fallback_tries = 5;
c->choose_total_tries = 19; c->choose_total_tries = 19;
c->chooseleaf_descend_once = 0;
ceph_decode_need(p, end, 4*sizeof(u32), bad); ceph_decode_need(p, end, 4*sizeof(u32), bad);
magic = ceph_decode_32(p); magic = ceph_decode_32(p);
@ -336,6 +329,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
dout("crush decode tunable choose_total_tries = %d", dout("crush decode tunable choose_total_tries = %d",
c->choose_total_tries); c->choose_total_tries);
ceph_decode_need(p, end, sizeof(u32), done);
c->chooseleaf_descend_once = ceph_decode_32(p);
dout("crush decode tunable chooseleaf_descend_once = %d",
c->chooseleaf_descend_once);
done: done:
dout("crush_decode success\n"); dout("crush_decode success\n");
return c; return c;
@ -1010,7 +1008,7 @@ bad:
* pass a stride back to the caller. * pass a stride back to the caller.
*/ */
int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
u64 off, u64 *plen, u64 off, u64 len,
u64 *ono, u64 *ono,
u64 *oxoff, u64 *oxlen) u64 *oxoff, u64 *oxlen)
{ {
@ -1021,7 +1019,7 @@ int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
u32 su_per_object; u32 su_per_object;
u64 t, su_offset; u64 t, su_offset;
dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen, dout("mapping %llu~%llu osize %u fl_su %u\n", off, len,
osize, su); osize, su);
if (su == 0 || sc == 0) if (su == 0 || sc == 0)
goto invalid; goto invalid;
@ -1054,11 +1052,10 @@ int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
/* /*
* Calculate the length of the extent being written to the selected * Calculate the length of the extent being written to the selected
* object. This is the minimum of the full length requested (plen) or * object. This is the minimum of the full length requested (len) or
* the remainder of the current stripe being written to. * the remainder of the current stripe being written to.
*/ */
*oxlen = min_t(u64, *plen, su - su_offset); *oxlen = min_t(u64, len, su - su_offset);
*plen = *oxlen;
dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
return 0; return 0;