2013-04-05 10:27:12 +04:00
|
|
|
|
2010-04-07 02:14:15 +04:00
|
|
|
#include <linux/ceph/ceph_debug.h>
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2010-04-07 02:14:15 +04:00
|
|
|
#include <linux/module.h>
|
2009-10-06 22:31:10 +04:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/uaccess.h>
|
2010-04-07 02:01:27 +04:00
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#endif
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2010-04-07 02:14:15 +04:00
|
|
|
#include <linux/ceph/libceph.h>
|
|
|
|
#include <linux/ceph/osd_client.h>
|
|
|
|
#include <linux/ceph/messenger.h>
|
|
|
|
#include <linux/ceph/decode.h>
|
|
|
|
#include <linux/ceph/auth.h>
|
|
|
|
#include <linux/ceph/pagelist.h>
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2010-03-02 00:02:00 +03:00
|
|
|
#define OSD_OP_FRONT_LEN 4096
|
|
|
|
#define OSD_OPREPLY_FRONT_LEN 512
|
2010-01-14 04:03:23 +03:00
|
|
|
|
2013-05-01 21:43:04 +04:00
|
|
|
static struct kmem_cache *ceph_osd_request_cache;
|
|
|
|
|
2010-05-20 12:40:19 +04:00
|
|
|
static const struct ceph_connection_operations osd_con_ops;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-02-15 21:42:29 +04:00
|
|
|
static void __send_queued(struct ceph_osd_client *osdc);
|
2011-01-18 07:34:08 +03:00
|
|
|
static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
|
2011-03-22 01:07:16 +03:00
|
|
|
static void __register_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req);
|
2014-09-03 14:41:45 +04:00
|
|
|
static void __unregister_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req);
|
2011-03-22 01:07:16 +03:00
|
|
|
static void __unregister_linger_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req);
|
2014-09-03 14:41:45 +04:00
|
|
|
static void __enqueue_request(struct ceph_osd_request *req);
|
2012-01-04 00:34:34 +04:00
|
|
|
static void __send_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Implement client access to distributed object storage cluster.
|
|
|
|
*
|
|
|
|
* All data objects are stored within a cluster/cloud of OSDs, or
|
|
|
|
* "object storage devices." (Note that Ceph OSDs have _nothing_ to
|
|
|
|
* do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
|
|
|
|
* remote daemons serving up and coordinating consistent and safe
|
|
|
|
* access to storage.
|
|
|
|
*
|
|
|
|
* Cluster membership and the mapping of data objects onto storage devices
|
|
|
|
* are described by the osd map.
|
|
|
|
*
|
|
|
|
* We keep track of pending OSD requests (read, write), resubmit
|
|
|
|
* requests to different OSDs when the cluster topology/data layout
|
|
|
|
* change, or retry the affected requests when the communications
|
|
|
|
* channel with an OSD is reset.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* calculate the mapping of a file extent onto an object, and fill out the
|
|
|
|
* request accordingly. shorten extent as necessary if it crosses an
|
|
|
|
* object boundary.
|
|
|
|
*
|
|
|
|
* fill osd op in request message.
|
|
|
|
*/
|
2013-02-16 08:10:17 +04:00
|
|
|
static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
|
2013-03-14 05:50:01 +04:00
|
|
|
u64 *objnum, u64 *objoff, u64 *objlen)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
2013-02-15 21:42:29 +04:00
|
|
|
u64 orig_len = *plen;
|
2012-09-25 07:59:48 +04:00
|
|
|
int r;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-02-15 21:42:29 +04:00
|
|
|
/* object extent? */
|
2013-03-14 05:50:00 +04:00
|
|
|
r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
|
|
|
|
objoff, objlen);
|
2012-09-25 07:59:48 +04:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2013-03-14 05:50:00 +04:00
|
|
|
if (*objlen < orig_len) {
|
|
|
|
*plen = *objlen;
|
2013-02-15 21:42:29 +04:00
|
|
|
dout(" skipping last %llu, final file extent %llu~%llu\n",
|
|
|
|
orig_len - *plen, off, *plen);
|
|
|
|
}
|
|
|
|
|
2013-03-14 05:50:00 +04:00
|
|
|
dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-02-16 08:10:17 +04:00
|
|
|
return 0;
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
|
2013-04-03 10:28:57 +04:00
|
|
|
static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
|
|
|
|
{
|
|
|
|
memset(osd_data, 0, sizeof (*osd_data));
|
|
|
|
osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
|
|
|
|
}
|
|
|
|
|
2013-04-05 10:27:12 +04:00
|
|
|
static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
|
2013-04-03 10:28:57 +04:00
|
|
|
struct page **pages, u64 length, u32 alignment,
|
|
|
|
bool pages_from_pool, bool own_pages)
|
|
|
|
{
|
|
|
|
osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
|
|
|
|
osd_data->pages = pages;
|
|
|
|
osd_data->length = length;
|
|
|
|
osd_data->alignment = alignment;
|
|
|
|
osd_data->pages_from_pool = pages_from_pool;
|
|
|
|
osd_data->own_pages = own_pages;
|
|
|
|
}
|
|
|
|
|
2013-04-05 10:27:12 +04:00
|
|
|
static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
|
2013-04-03 10:28:57 +04:00
|
|
|
struct ceph_pagelist *pagelist)
|
|
|
|
{
|
|
|
|
osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
|
|
|
|
osd_data->pagelist = pagelist;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_BLOCK
|
2013-04-05 10:27:12 +04:00
|
|
|
static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
|
2013-04-03 10:28:57 +04:00
|
|
|
struct bio *bio, size_t bio_length)
|
|
|
|
{
|
|
|
|
osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
|
|
|
|
osd_data->bio = bio;
|
|
|
|
osd_data->bio_length = bio_length;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
|
2013-04-15 23:50:36 +04:00
|
|
|
#define osd_req_op_data(oreq, whch, typ, fld) \
|
|
|
|
({ \
|
|
|
|
BUG_ON(whch >= (oreq)->r_num_ops); \
|
|
|
|
&(oreq)->r_ops[whch].typ.fld; \
|
|
|
|
})
|
|
|
|
|
2013-02-11 22:33:24 +04:00
|
|
|
static struct ceph_osd_data *
|
|
|
|
osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
|
|
|
|
{
|
|
|
|
BUG_ON(which >= osd_req->r_num_ops);
|
|
|
|
|
|
|
|
return &osd_req->r_ops[which].raw_data_in;
|
|
|
|
}
|
|
|
|
|
2013-04-05 10:27:12 +04:00
|
|
|
struct ceph_osd_data *
|
|
|
|
osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
|
2013-04-15 23:50:36 +04:00
|
|
|
unsigned int which)
|
2013-04-05 10:27:12 +04:00
|
|
|
{
|
2013-04-15 23:50:36 +04:00
|
|
|
return osd_req_op_data(osd_req, which, extent, osd_data);
|
2013-04-05 10:27:12 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_extent_osd_data);
|
|
|
|
|
|
|
|
struct ceph_osd_data *
|
|
|
|
osd_req_op_cls_response_data(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which)
|
|
|
|
{
|
2013-04-15 23:50:36 +04:00
|
|
|
return osd_req_op_data(osd_req, which, cls, response_data);
|
2013-04-05 10:27:12 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */
|
|
|
|
|
2013-02-11 22:33:24 +04:00
|
|
|
void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, struct page **pages,
|
|
|
|
u64 length, u32 alignment,
|
|
|
|
bool pages_from_pool, bool own_pages)
|
|
|
|
{
|
|
|
|
struct ceph_osd_data *osd_data;
|
|
|
|
|
|
|
|
osd_data = osd_req_op_raw_data_in(osd_req, which);
|
|
|
|
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
|
|
|
|
pages_from_pool, own_pages);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
|
|
|
|
|
2013-04-05 10:27:12 +04:00
|
|
|
void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
|
2013-04-15 23:50:36 +04:00
|
|
|
unsigned int which, struct page **pages,
|
|
|
|
u64 length, u32 alignment,
|
2013-04-05 10:27:12 +04:00
|
|
|
bool pages_from_pool, bool own_pages)
|
|
|
|
{
|
|
|
|
struct ceph_osd_data *osd_data;
|
|
|
|
|
2013-04-15 23:50:36 +04:00
|
|
|
osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
|
2013-04-05 10:27:12 +04:00
|
|
|
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
|
|
|
|
pages_from_pool, own_pages);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
|
|
|
|
|
|
|
|
void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
|
2013-04-15 23:50:36 +04:00
|
|
|
unsigned int which, struct ceph_pagelist *pagelist)
|
2013-04-05 10:27:12 +04:00
|
|
|
{
|
|
|
|
struct ceph_osd_data *osd_data;
|
|
|
|
|
2013-04-15 23:50:36 +04:00
|
|
|
osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
|
2013-04-05 10:27:12 +04:00
|
|
|
ceph_osd_data_pagelist_init(osd_data, pagelist);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
|
|
|
|
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
|
2013-04-15 23:50:36 +04:00
|
|
|
unsigned int which, struct bio *bio, size_t bio_length)
|
2013-04-05 10:27:12 +04:00
|
|
|
{
|
|
|
|
struct ceph_osd_data *osd_data;
|
2013-04-15 23:50:36 +04:00
|
|
|
|
|
|
|
osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
|
2013-04-05 10:27:12 +04:00
|
|
|
ceph_osd_data_bio_init(osd_data, bio, bio_length);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
|
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
|
|
|
|
static void osd_req_op_cls_request_info_pagelist(
|
|
|
|
struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, struct ceph_pagelist *pagelist)
|
|
|
|
{
|
|
|
|
struct ceph_osd_data *osd_data;
|
|
|
|
|
2013-04-15 23:50:36 +04:00
|
|
|
osd_data = osd_req_op_data(osd_req, which, cls, request_info);
|
2013-04-05 10:27:12 +04:00
|
|
|
ceph_osd_data_pagelist_init(osd_data, pagelist);
|
|
|
|
}
|
|
|
|
|
2013-04-05 23:46:02 +04:00
|
|
|
void osd_req_op_cls_request_data_pagelist(
|
|
|
|
struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, struct ceph_pagelist *pagelist)
|
|
|
|
{
|
|
|
|
struct ceph_osd_data *osd_data;
|
|
|
|
|
2013-04-15 23:50:36 +04:00
|
|
|
osd_data = osd_req_op_data(osd_req, which, cls, request_data);
|
2013-04-05 23:46:02 +04:00
|
|
|
ceph_osd_data_pagelist_init(osd_data, pagelist);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
|
|
|
|
|
2013-04-20 00:34:49 +04:00
|
|
|
void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, struct page **pages, u64 length,
|
|
|
|
u32 alignment, bool pages_from_pool, bool own_pages)
|
|
|
|
{
|
|
|
|
struct ceph_osd_data *osd_data;
|
|
|
|
|
|
|
|
osd_data = osd_req_op_data(osd_req, which, cls, request_data);
|
|
|
|
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
|
|
|
|
pages_from_pool, own_pages);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
|
|
|
|
|
2013-04-05 10:27:12 +04:00
|
|
|
void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, struct page **pages, u64 length,
|
|
|
|
u32 alignment, bool pages_from_pool, bool own_pages)
|
|
|
|
{
|
|
|
|
struct ceph_osd_data *osd_data;
|
|
|
|
|
2013-04-15 23:50:36 +04:00
|
|
|
osd_data = osd_req_op_data(osd_req, which, cls, response_data);
|
2013-04-05 10:27:12 +04:00
|
|
|
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
|
|
|
|
pages_from_pool, own_pages);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
|
|
|
|
|
2013-04-03 10:28:58 +04:00
|
|
|
static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
|
|
|
|
{
|
|
|
|
switch (osd_data->type) {
|
|
|
|
case CEPH_OSD_DATA_TYPE_NONE:
|
|
|
|
return 0;
|
|
|
|
case CEPH_OSD_DATA_TYPE_PAGES:
|
|
|
|
return osd_data->length;
|
|
|
|
case CEPH_OSD_DATA_TYPE_PAGELIST:
|
|
|
|
return (u64)osd_data->pagelist->length;
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
case CEPH_OSD_DATA_TYPE_BIO:
|
|
|
|
return (u64)osd_data->bio_length;
|
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
default:
|
|
|
|
WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-03 10:28:57 +04:00
|
|
|
static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
|
|
|
|
{
|
2013-04-05 10:27:12 +04:00
|
|
|
if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
|
2013-04-03 10:28:57 +04:00
|
|
|
int num_pages;
|
|
|
|
|
|
|
|
num_pages = calc_pages_for((u64)osd_data->alignment,
|
|
|
|
(u64)osd_data->length);
|
|
|
|
ceph_release_page_vector(osd_data->pages, num_pages);
|
|
|
|
}
|
2013-04-05 10:27:12 +04:00
|
|
|
ceph_osd_data_init(osd_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which)
|
|
|
|
{
|
|
|
|
struct ceph_osd_req_op *op;
|
|
|
|
|
|
|
|
BUG_ON(which >= osd_req->r_num_ops);
|
|
|
|
op = &osd_req->r_ops[which];
|
|
|
|
|
|
|
|
switch (op->op) {
|
|
|
|
case CEPH_OSD_OP_READ:
|
|
|
|
case CEPH_OSD_OP_WRITE:
|
|
|
|
ceph_osd_data_release(&op->extent.osd_data);
|
|
|
|
break;
|
|
|
|
case CEPH_OSD_OP_CALL:
|
|
|
|
ceph_osd_data_release(&op->cls.request_info);
|
2013-04-05 23:46:02 +04:00
|
|
|
ceph_osd_data_release(&op->cls.request_data);
|
2013-04-05 10:27:12 +04:00
|
|
|
ceph_osd_data_release(&op->cls.response_data);
|
|
|
|
break;
|
2014-11-12 09:00:43 +03:00
|
|
|
case CEPH_OSD_OP_SETXATTR:
|
|
|
|
case CEPH_OSD_OP_CMPXATTR:
|
|
|
|
ceph_osd_data_release(&op->xattr.osd_data);
|
|
|
|
break;
|
2015-04-27 06:02:35 +03:00
|
|
|
case CEPH_OSD_OP_STAT:
|
|
|
|
ceph_osd_data_release(&op->raw_data_in);
|
|
|
|
break;
|
2013-04-05 10:27:12 +04:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2013-04-03 10:28:57 +04:00
|
|
|
}
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/*
|
|
|
|
* requests
|
|
|
|
*/
|
2014-06-20 14:14:42 +04:00
|
|
|
static void ceph_osdc_release_request(struct kref *kref)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
2014-06-20 14:14:42 +04:00
|
|
|
struct ceph_osd_request *req = container_of(kref,
|
|
|
|
struct ceph_osd_request, r_kref);
|
2013-04-05 10:27:12 +04:00
|
|
|
unsigned int which;
|
2009-12-08 00:37:03 +03:00
|
|
|
|
2014-06-20 14:14:42 +04:00
|
|
|
dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
|
|
|
|
req->r_request, req->r_reply);
|
2014-06-20 14:14:42 +04:00
|
|
|
WARN_ON(!RB_EMPTY_NODE(&req->r_node));
|
|
|
|
WARN_ON(!list_empty(&req->r_req_lru_item));
|
|
|
|
WARN_ON(!list_empty(&req->r_osd_item));
|
|
|
|
WARN_ON(!list_empty(&req->r_linger_item));
|
|
|
|
WARN_ON(!list_empty(&req->r_linger_osd_item));
|
|
|
|
WARN_ON(req->r_osd);
|
2014-06-20 14:14:42 +04:00
|
|
|
|
2009-12-08 00:37:03 +03:00
|
|
|
if (req->r_request)
|
|
|
|
ceph_msg_put(req->r_request);
|
2013-04-02 01:12:14 +04:00
|
|
|
if (req->r_reply) {
|
2012-06-01 23:56:43 +04:00
|
|
|
ceph_msg_revoke_incoming(req->r_reply);
|
2012-06-04 23:43:32 +04:00
|
|
|
ceph_msg_put(req->r_reply);
|
2013-04-02 01:12:14 +04:00
|
|
|
}
|
2013-02-14 22:16:43 +04:00
|
|
|
|
2013-04-05 10:27:12 +04:00
|
|
|
for (which = 0; which < req->r_num_ops; which++)
|
|
|
|
osd_req_op_data_release(req, which);
|
2013-02-14 22:16:43 +04:00
|
|
|
|
2009-12-08 00:37:03 +03:00
|
|
|
ceph_put_snap_context(req->r_snapc);
|
|
|
|
if (req->r_mempool)
|
|
|
|
mempool_free(req, req->r_osdc->req_mempool);
|
|
|
|
else
|
2013-05-01 21:43:04 +04:00
|
|
|
kmem_cache_free(ceph_osd_request_cache, req);
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
2014-06-20 14:14:42 +04:00
|
|
|
|
|
|
|
void ceph_osdc_get_request(struct ceph_osd_request *req)
|
|
|
|
{
|
|
|
|
dout("%s %p (was %d)\n", __func__, req,
|
|
|
|
atomic_read(&req->r_kref.refcount));
|
|
|
|
kref_get(&req->r_kref);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ceph_osdc_get_request);
|
|
|
|
|
|
|
|
void ceph_osdc_put_request(struct ceph_osd_request *req)
|
|
|
|
{
|
|
|
|
dout("%s %p (was %d)\n", __func__, req,
|
|
|
|
atomic_read(&req->r_kref.refcount));
|
|
|
|
kref_put(&req->r_kref, ceph_osdc_release_request);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ceph_osdc_put_request);
|
2010-04-07 02:01:27 +04:00
|
|
|
|
2010-04-07 01:51:47 +04:00
|
|
|
struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
|
2009-10-06 22:31:10 +04:00
|
|
|
struct ceph_snap_context *snapc,
|
2013-02-26 04:11:12 +04:00
|
|
|
unsigned int num_ops,
|
2010-04-07 01:51:47 +04:00
|
|
|
bool use_mempool,
|
2012-11-14 07:11:15 +04:00
|
|
|
gfp_t gfp_flags)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
|
|
|
struct ceph_osd_request *req;
|
|
|
|
struct ceph_msg *msg;
|
2013-02-26 04:11:12 +04:00
|
|
|
size_t msg_size;
|
|
|
|
|
2013-04-04 06:32:51 +04:00
|
|
|
BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX);
|
|
|
|
BUG_ON(num_ops > CEPH_OSD_MAX_OP);
|
|
|
|
|
2013-02-26 04:11:12 +04:00
|
|
|
msg_size = 4 + 4 + 8 + 8 + 4+8;
|
|
|
|
msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
|
|
|
|
msg_size += 1 + 8 + 4 + 4; /* pg_t */
|
2014-01-27 19:40:18 +04:00
|
|
|
msg_size += 4 + CEPH_MAX_OID_NAME_LEN; /* oid */
|
2013-02-26 04:11:12 +04:00
|
|
|
msg_size += 2 + num_ops*sizeof(struct ceph_osd_op);
|
|
|
|
msg_size += 8; /* snapid */
|
|
|
|
msg_size += 8; /* snap_seq */
|
|
|
|
msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
|
|
|
|
msg_size += 4;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
if (use_mempool) {
|
2010-04-07 01:51:47 +04:00
|
|
|
req = mempool_alloc(osdc->req_mempool, gfp_flags);
|
2009-10-06 22:31:10 +04:00
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
|
} else {
|
2013-05-01 21:43:04 +04:00
|
|
|
req = kmem_cache_zalloc(ceph_osd_request_cache, gfp_flags);
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
if (req == NULL)
|
2010-04-02 03:06:19 +04:00
|
|
|
return NULL;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
req->r_osdc = osdc;
|
|
|
|
req->r_mempool = use_mempool;
|
2013-04-04 06:32:51 +04:00
|
|
|
req->r_num_ops = num_ops;
|
2010-04-07 02:01:27 +04:00
|
|
|
|
2009-12-08 00:37:03 +03:00
|
|
|
kref_init(&req->r_kref);
|
2009-10-06 22:31:10 +04:00
|
|
|
init_completion(&req->r_completion);
|
|
|
|
init_completion(&req->r_safe_completion);
|
2012-12-17 22:23:48 +04:00
|
|
|
RB_CLEAR_NODE(&req->r_node);
|
2009-10-06 22:31:10 +04:00
|
|
|
INIT_LIST_HEAD(&req->r_unsafe_item);
|
2011-03-22 01:07:16 +03:00
|
|
|
INIT_LIST_HEAD(&req->r_linger_item);
|
2014-06-20 14:14:41 +04:00
|
|
|
INIT_LIST_HEAD(&req->r_linger_osd_item);
|
2011-09-16 22:13:17 +04:00
|
|
|
INIT_LIST_HEAD(&req->r_req_lru_item);
|
2012-07-10 01:31:41 +04:00
|
|
|
INIT_LIST_HEAD(&req->r_osd_item);
|
|
|
|
|
2014-01-27 19:40:20 +04:00
|
|
|
req->r_base_oloc.pool = -1;
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
req->r_target_oloc.pool = -1;
|
2014-01-27 19:40:18 +04:00
|
|
|
|
2010-03-02 00:02:00 +03:00
|
|
|
/* create reply message */
|
|
|
|
if (use_mempool)
|
|
|
|
msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
|
|
|
|
else
|
|
|
|
msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
|
2011-08-10 02:03:46 +04:00
|
|
|
OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
|
2010-04-02 03:06:19 +04:00
|
|
|
if (!msg) {
|
2010-03-02 00:02:00 +03:00
|
|
|
ceph_osdc_put_request(req);
|
2010-04-02 03:06:19 +04:00
|
|
|
return NULL;
|
2010-03-02 00:02:00 +03:00
|
|
|
}
|
|
|
|
req->r_reply = msg;
|
|
|
|
|
|
|
|
/* create request message; allow space for oid */
|
2009-10-06 22:31:10 +04:00
|
|
|
if (use_mempool)
|
2009-10-15 04:36:07 +04:00
|
|
|
msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
|
2009-10-06 22:31:10 +04:00
|
|
|
else
|
2011-08-10 02:03:46 +04:00
|
|
|
msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
|
2010-04-02 03:06:19 +04:00
|
|
|
if (!msg) {
|
2009-10-06 22:31:10 +04:00
|
|
|
ceph_osdc_put_request(req);
|
2010-04-02 03:06:19 +04:00
|
|
|
return NULL;
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
2010-04-07 02:01:27 +04:00
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
memset(msg->front.iov_base, 0, msg->front.iov_len);
|
2010-04-07 01:51:47 +04:00
|
|
|
|
|
|
|
req->r_request = msg;
|
|
|
|
|
|
|
|
return req;
|
|
|
|
}
|
2010-04-07 02:14:15 +04:00
|
|
|
EXPORT_SYMBOL(ceph_osdc_alloc_request);
|
2010-04-07 01:51:47 +04:00
|
|
|
|
2013-03-14 05:50:00 +04:00
|
|
|
static bool osd_req_opcode_valid(u16 opcode)
|
2010-04-07 02:01:27 +04:00
|
|
|
{
|
2013-03-14 05:50:00 +04:00
|
|
|
switch (opcode) {
|
2014-10-02 17:22:29 +04:00
|
|
|
#define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
|
|
|
|
__CEPH_FORALL_OSD_OPS(GENERATE_CASE)
|
|
|
|
#undef GENERATE_CASE
|
2013-03-14 05:50:00 +04:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
/*
|
|
|
|
* This is an osd op init function for opcodes that have no data or
|
|
|
|
* other information associated with them. It also serves as a
|
|
|
|
* common init routine for all the other init functions, below.
|
|
|
|
*/
|
2013-04-05 10:27:11 +04:00
|
|
|
static struct ceph_osd_req_op *
|
2013-02-11 22:33:24 +04:00
|
|
|
_osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
|
2015-04-27 06:09:54 +03:00
|
|
|
u16 opcode, u32 flags)
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
{
|
2013-04-05 10:27:11 +04:00
|
|
|
struct ceph_osd_req_op *op;
|
|
|
|
|
|
|
|
BUG_ON(which >= osd_req->r_num_ops);
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
BUG_ON(!osd_req_opcode_valid(opcode));
|
|
|
|
|
2013-04-05 10:27:11 +04:00
|
|
|
op = &osd_req->r_ops[which];
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
memset(op, 0, sizeof (*op));
|
|
|
|
op->op = opcode;
|
2015-04-27 06:09:54 +03:00
|
|
|
op->flags = flags;
|
2013-04-05 10:27:11 +04:00
|
|
|
|
|
|
|
return op;
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
}
|
|
|
|
|
2013-02-11 22:33:24 +04:00
|
|
|
void osd_req_op_init(struct ceph_osd_request *osd_req,
|
2015-04-27 06:09:54 +03:00
|
|
|
unsigned int which, u16 opcode, u32 flags)
|
2013-02-11 22:33:24 +04:00
|
|
|
{
|
2015-04-27 06:09:54 +03:00
|
|
|
(void)_osd_req_op_init(osd_req, which, opcode, flags);
|
2013-02-11 22:33:24 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_init);
|
|
|
|
|
2013-04-05 10:27:11 +04:00
|
|
|
void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, u16 opcode,
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
u64 offset, u64 length,
|
|
|
|
u64 truncate_size, u32 truncate_seq)
|
|
|
|
{
|
2015-04-27 06:09:54 +03:00
|
|
|
struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
|
|
|
|
opcode, 0);
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
size_t payload_len = 0;
|
|
|
|
|
2013-08-15 07:51:44 +04:00
|
|
|
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
|
2014-11-13 05:47:25 +03:00
|
|
|
opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE);
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
|
|
|
|
op->extent.offset = offset;
|
|
|
|
op->extent.length = length;
|
|
|
|
op->extent.truncate_size = truncate_size;
|
|
|
|
op->extent.truncate_seq = truncate_seq;
|
|
|
|
if (opcode == CEPH_OSD_OP_WRITE)
|
|
|
|
payload_len += length;
|
|
|
|
|
|
|
|
op->payload_len = payload_len;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_extent_init);
|
|
|
|
|
2013-04-05 10:27:11 +04:00
|
|
|
void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, u64 length)
|
2013-03-14 23:09:05 +04:00
|
|
|
{
|
2013-04-05 10:27:11 +04:00
|
|
|
struct ceph_osd_req_op *op;
|
|
|
|
u64 previous;
|
|
|
|
|
|
|
|
BUG_ON(which >= osd_req->r_num_ops);
|
|
|
|
op = &osd_req->r_ops[which];
|
|
|
|
previous = op->extent.length;
|
2013-03-14 23:09:05 +04:00
|
|
|
|
|
|
|
if (length == previous)
|
|
|
|
return; /* Nothing to do */
|
|
|
|
BUG_ON(length > previous);
|
|
|
|
|
|
|
|
op->extent.length = length;
|
|
|
|
op->payload_len -= previous - length;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_extent_update);
|
|
|
|
|
2013-04-05 10:27:11 +04:00
|
|
|
void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
|
2013-04-05 23:46:02 +04:00
|
|
|
u16 opcode, const char *class, const char *method)
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
{
|
2015-04-27 06:09:54 +03:00
|
|
|
struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
|
|
|
|
opcode, 0);
|
2013-04-05 10:27:12 +04:00
|
|
|
struct ceph_pagelist *pagelist;
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
size_t payload_len = 0;
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
BUG_ON(opcode != CEPH_OSD_OP_CALL);
|
|
|
|
|
2013-04-05 10:27:12 +04:00
|
|
|
pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
|
|
|
|
BUG_ON(!pagelist);
|
|
|
|
ceph_pagelist_init(pagelist);
|
|
|
|
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
op->cls.class_name = class;
|
|
|
|
size = strlen(class);
|
|
|
|
BUG_ON(size > (size_t) U8_MAX);
|
|
|
|
op->cls.class_len = size;
|
2013-04-05 10:27:12 +04:00
|
|
|
ceph_pagelist_append(pagelist, class, size);
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
payload_len += size;
|
|
|
|
|
|
|
|
op->cls.method_name = method;
|
|
|
|
size = strlen(method);
|
|
|
|
BUG_ON(size > (size_t) U8_MAX);
|
|
|
|
op->cls.method_len = size;
|
2013-04-05 10:27:12 +04:00
|
|
|
ceph_pagelist_append(pagelist, method, size);
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
payload_len += size;
|
|
|
|
|
2013-04-05 10:27:12 +04:00
|
|
|
osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
|
2013-04-05 10:27:12 +04:00
|
|
|
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
op->cls.argc = 0; /* currently unused */
|
|
|
|
|
|
|
|
op->payload_len = payload_len;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_cls_init);
|
2013-04-03 10:28:58 +04:00
|
|
|
|
2014-11-12 09:00:43 +03:00
|
|
|
int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
|
|
|
|
u16 opcode, const char *name, const void *value,
|
|
|
|
size_t size, u8 cmp_op, u8 cmp_mode)
|
|
|
|
{
|
2015-04-27 06:09:54 +03:00
|
|
|
struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
|
|
|
|
opcode, 0);
|
2014-11-12 09:00:43 +03:00
|
|
|
struct ceph_pagelist *pagelist;
|
|
|
|
size_t payload_len;
|
|
|
|
|
|
|
|
BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
|
|
|
|
|
|
|
|
pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
|
|
|
|
if (!pagelist)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ceph_pagelist_init(pagelist);
|
|
|
|
|
|
|
|
payload_len = strlen(name);
|
|
|
|
op->xattr.name_len = payload_len;
|
|
|
|
ceph_pagelist_append(pagelist, name, payload_len);
|
|
|
|
|
|
|
|
op->xattr.value_len = size;
|
|
|
|
ceph_pagelist_append(pagelist, value, size);
|
|
|
|
payload_len += size;
|
|
|
|
|
|
|
|
op->xattr.cmp_op = cmp_op;
|
|
|
|
op->xattr.cmp_mode = cmp_mode;
|
|
|
|
|
|
|
|
ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
|
|
|
|
op->payload_len = payload_len;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_xattr_init);
|
|
|
|
|
2013-04-05 10:27:11 +04:00
|
|
|
void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which, u16 opcode,
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
u64 cookie, u64 version, int flag)
|
|
|
|
{
|
2015-04-27 06:09:54 +03:00
|
|
|
struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
|
|
|
|
opcode, 0);
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
|
2013-04-05 10:27:11 +04:00
|
|
|
BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH);
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
|
|
|
|
op->watch.cookie = cookie;
|
2013-04-22 01:51:50 +04:00
|
|
|
op->watch.ver = version;
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
if (opcode == CEPH_OSD_OP_WATCH && flag)
|
2013-04-05 10:27:11 +04:00
|
|
|
op->watch.flag = (u8)1;
|
libceph: define source request op functions
The rbd code has a function that allocates and populates a
ceph_osd_req_op structure (the in-core version of an osd request
operation). When reviewed, Josh suggested two things: that the
big varargs function might be better split into type-specific
functions; and that this functionality really belongs in the osd
client rather than rbd.
This patch implements both of Josh's suggestions. It breaks
up the rbd function into separate functions and defines them
in the osd client module as exported interfaces. Unlike the
rbd version, however, the functions don't allocate an osd_req_op
structure; they are provided the address of one and that is
initialized instead.
The rbd function has been eliminated and calls to it have been
replaced by calls to the new routines. The rbd code now now use a
stack (struct) variable to hold the op rather than allocating and
freeing it each time.
For now only the capabilities used by rbd are implemented.
Implementing all the other osd op types, and making the rest of the
code use it will be done separately, in the next few patches.
Note that only the extent, cls, and watch portions of the
ceph_osd_req_op structure are currently used. Delete the others
(xattr, pgls, and snap) from its definition so nobody thinks it's
actually implemented or needed. We can add it back again later
if needed, when we know it's been tested.
This (and a few follow-on patches) resolves:
http://tracker.ceph.com/issues/3861
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-14 05:50:00 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_watch_init);
|
|
|
|
|
2014-02-25 18:22:27 +04:00
|
|
|
void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
|
|
|
|
unsigned int which,
|
|
|
|
u64 expected_object_size,
|
|
|
|
u64 expected_write_size)
|
|
|
|
{
|
|
|
|
struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
|
2015-04-27 06:09:54 +03:00
|
|
|
CEPH_OSD_OP_SETALLOCHINT,
|
|
|
|
0);
|
2014-02-25 18:22:27 +04:00
|
|
|
|
|
|
|
op->alloc_hint.expected_object_size = expected_object_size;
|
|
|
|
op->alloc_hint.expected_write_size = expected_write_size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
|
|
|
|
* not worth a feature bit. Set FAILOK per-op flag to make
|
|
|
|
* sure older osds don't trip over an unsupported opcode.
|
|
|
|
*/
|
|
|
|
op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
|
|
|
|
|
2013-04-05 23:46:01 +04:00
|
|
|
static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
|
2013-04-05 10:27:12 +04:00
|
|
|
struct ceph_osd_data *osd_data)
|
|
|
|
{
|
|
|
|
u64 length = ceph_osd_data_length(osd_data);
|
|
|
|
|
|
|
|
if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
|
|
|
|
BUG_ON(length > (u64) SIZE_MAX);
|
|
|
|
if (length)
|
2013-04-05 23:46:01 +04:00
|
|
|
ceph_msg_data_add_pages(msg, osd_data->pages,
|
2013-04-05 10:27:12 +04:00
|
|
|
length, osd_data->alignment);
|
|
|
|
} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
|
|
|
|
BUG_ON(!length);
|
2013-04-05 23:46:01 +04:00
|
|
|
ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
|
2013-04-05 10:27:12 +04:00
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
} else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
|
2013-04-05 23:46:01 +04:00
|
|
|
ceph_msg_data_add_bio(msg, osd_data->bio, length);
|
2013-04-05 10:27:12 +04:00
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-14 05:50:00 +04:00
|
|
|
static u64 osd_req_encode_op(struct ceph_osd_request *req,
|
2013-04-04 06:32:51 +04:00
|
|
|
struct ceph_osd_op *dst, unsigned int which)
|
2013-03-14 05:50:00 +04:00
|
|
|
{
|
2013-04-04 06:32:51 +04:00
|
|
|
struct ceph_osd_req_op *src;
|
2013-04-05 23:46:02 +04:00
|
|
|
struct ceph_osd_data *osd_data;
|
2013-04-03 10:28:58 +04:00
|
|
|
u64 request_data_len = 0;
|
2013-04-05 23:46:02 +04:00
|
|
|
u64 data_length;
|
2013-03-14 05:50:00 +04:00
|
|
|
|
2013-04-04 06:32:51 +04:00
|
|
|
BUG_ON(which >= req->r_num_ops);
|
|
|
|
src = &req->r_ops[which];
|
2013-03-14 05:50:00 +04:00
|
|
|
if (WARN_ON(!osd_req_opcode_valid(src->op))) {
|
|
|
|
pr_err("unrecognized osd opcode %d\n", src->op);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (src->op) {
|
|
|
|
case CEPH_OSD_OP_STAT:
|
2013-02-11 22:33:24 +04:00
|
|
|
osd_data = &src->raw_data_in;
|
|
|
|
ceph_osdc_msg_data_add(req->r_reply, osd_data);
|
2013-03-14 05:50:00 +04:00
|
|
|
break;
|
|
|
|
case CEPH_OSD_OP_READ:
|
|
|
|
case CEPH_OSD_OP_WRITE:
|
2013-08-15 07:51:44 +04:00
|
|
|
case CEPH_OSD_OP_ZERO:
|
|
|
|
case CEPH_OSD_OP_TRUNCATE:
|
2013-03-14 05:50:00 +04:00
|
|
|
if (src->op == CEPH_OSD_OP_WRITE)
|
2013-04-03 10:28:58 +04:00
|
|
|
request_data_len = src->extent.length;
|
2013-03-14 05:50:00 +04:00
|
|
|
dst->extent.offset = cpu_to_le64(src->extent.offset);
|
|
|
|
dst->extent.length = cpu_to_le64(src->extent.length);
|
|
|
|
dst->extent.truncate_size =
|
|
|
|
cpu_to_le64(src->extent.truncate_size);
|
|
|
|
dst->extent.truncate_seq =
|
|
|
|
cpu_to_le32(src->extent.truncate_seq);
|
2013-04-05 23:46:02 +04:00
|
|
|
osd_data = &src->extent.osd_data;
|
2013-04-05 10:27:12 +04:00
|
|
|
if (src->op == CEPH_OSD_OP_WRITE)
|
2013-04-05 23:46:02 +04:00
|
|
|
ceph_osdc_msg_data_add(req->r_request, osd_data);
|
2013-04-05 10:27:12 +04:00
|
|
|
else
|
2013-04-05 23:46:02 +04:00
|
|
|
ceph_osdc_msg_data_add(req->r_reply, osd_data);
|
2013-03-14 05:50:00 +04:00
|
|
|
break;
|
|
|
|
case CEPH_OSD_OP_CALL:
|
|
|
|
dst->cls.class_len = src->cls.class_len;
|
|
|
|
dst->cls.method_len = src->cls.method_len;
|
2013-04-05 23:46:02 +04:00
|
|
|
osd_data = &src->cls.request_info;
|
|
|
|
ceph_osdc_msg_data_add(req->r_request, osd_data);
|
|
|
|
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGELIST);
|
|
|
|
request_data_len = osd_data->pagelist->length;
|
|
|
|
|
|
|
|
osd_data = &src->cls.request_data;
|
|
|
|
data_length = ceph_osd_data_length(osd_data);
|
|
|
|
if (data_length) {
|
|
|
|
BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE);
|
|
|
|
dst->cls.indata_len = cpu_to_le32(data_length);
|
|
|
|
ceph_osdc_msg_data_add(req->r_request, osd_data);
|
|
|
|
src->payload_len += data_length;
|
|
|
|
request_data_len += data_length;
|
|
|
|
}
|
|
|
|
osd_data = &src->cls.response_data;
|
|
|
|
ceph_osdc_msg_data_add(req->r_reply, osd_data);
|
2013-03-14 05:50:00 +04:00
|
|
|
break;
|
|
|
|
case CEPH_OSD_OP_STARTSYNC:
|
|
|
|
break;
|
|
|
|
case CEPH_OSD_OP_NOTIFY_ACK:
|
|
|
|
case CEPH_OSD_OP_WATCH:
|
|
|
|
dst->watch.cookie = cpu_to_le64(src->watch.cookie);
|
|
|
|
dst->watch.ver = cpu_to_le64(src->watch.ver);
|
|
|
|
dst->watch.flag = src->watch.flag;
|
|
|
|
break;
|
2014-02-25 18:22:27 +04:00
|
|
|
case CEPH_OSD_OP_SETALLOCHINT:
|
|
|
|
dst->alloc_hint.expected_object_size =
|
|
|
|
cpu_to_le64(src->alloc_hint.expected_object_size);
|
|
|
|
dst->alloc_hint.expected_write_size =
|
|
|
|
cpu_to_le64(src->alloc_hint.expected_write_size);
|
|
|
|
break;
|
2014-11-12 09:00:43 +03:00
|
|
|
case CEPH_OSD_OP_SETXATTR:
|
|
|
|
case CEPH_OSD_OP_CMPXATTR:
|
|
|
|
dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
|
|
|
|
dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
|
|
|
|
dst->xattr.cmp_op = src->xattr.cmp_op;
|
|
|
|
dst->xattr.cmp_mode = src->xattr.cmp_mode;
|
|
|
|
osd_data = &src->xattr.osd_data;
|
|
|
|
ceph_osdc_msg_data_add(req->r_request, osd_data);
|
|
|
|
request_data_len = osd_data->pagelist->length;
|
|
|
|
break;
|
2014-11-13 05:47:25 +03:00
|
|
|
case CEPH_OSD_OP_CREATE:
|
|
|
|
case CEPH_OSD_OP_DELETE:
|
|
|
|
break;
|
2013-03-14 05:50:00 +04:00
|
|
|
default:
|
2013-02-15 21:42:30 +04:00
|
|
|
pr_err("unsupported osd opcode %s\n",
|
2013-03-04 21:08:29 +04:00
|
|
|
ceph_osd_op_name(src->op));
|
2013-02-15 21:42:30 +04:00
|
|
|
WARN_ON(1);
|
2013-03-14 05:50:00 +04:00
|
|
|
|
|
|
|
return 0;
|
2010-04-07 02:01:27 +04:00
|
|
|
}
|
2014-02-25 18:22:26 +04:00
|
|
|
|
2013-03-14 05:50:00 +04:00
|
|
|
dst->op = cpu_to_le16(src->op);
|
2014-02-25 18:22:26 +04:00
|
|
|
dst->flags = cpu_to_le32(src->flags);
|
2010-04-07 02:01:27 +04:00
|
|
|
dst->payload_len = cpu_to_le32(src->payload_len);
|
2013-03-08 23:35:36 +04:00
|
|
|
|
2013-04-03 10:28:58 +04:00
|
|
|
return request_data_len;
|
2010-04-07 02:01:27 +04:00
|
|
|
}
|
|
|
|
|
2010-04-07 01:51:47 +04:00
|
|
|
/*
|
|
|
|
* build new request AND message, calculate layout, and adjust file
|
|
|
|
* extent as needed.
|
|
|
|
*
|
|
|
|
* if the file was recently truncated, we include information about its
|
|
|
|
* old and new size so that the object can be updated appropriately. (we
|
|
|
|
* avoid synchronously deleting truncated objects because it's slow.)
|
|
|
|
*
|
|
|
|
* if @do_sync, include a 'startsync' command so that the osd will flush
|
|
|
|
* data quickly.
|
|
|
|
*/
|
|
|
|
struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_file_layout *layout,
|
|
|
|
struct ceph_vino vino,
|
2014-11-13 09:40:37 +03:00
|
|
|
u64 off, u64 *plen,
|
|
|
|
unsigned int which, int num_ops,
|
2010-04-07 01:51:47 +04:00
|
|
|
int opcode, int flags,
|
|
|
|
struct ceph_snap_context *snapc,
|
|
|
|
u32 truncate_seq,
|
|
|
|
u64 truncate_size,
|
libceph: don't assign page info in ceph_osdc_new_request()
Currently ceph_osdc_new_request() assigns an osd request's
r_num_pages and r_alignment fields. The only thing it does
after that is call ceph_osdc_build_request(), and that doesn't
need those fields to be assigned.
Move the assignment of those fields out of ceph_osdc_new_request()
and into its caller. As a result, the page_align parameter is no
longer used, so get rid of it.
Note that in ceph_sync_write(), the value for req->r_num_pages had
already been calculated earlier (as num_pages, and fortunately
it was computed the same way). So don't bother recomputing it,
but because it's not needed earlier, move that calculation after the
call to ceph_osdc_new_request(). Hold off making the assignment to
r_alignment, doing it instead r_pages and r_num_pages are
getting set.
Similarly, in start_read(), nr_pages already holds the number of
pages in the array (and is calculated the same way), so there's no
need to recompute it. Move the assignment of the page alignment
down with the others there as well.
This and the next few patches are preparation work for:
http://tracker.ceph.com/issues/4127
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-02 04:00:15 +04:00
|
|
|
bool use_mempool)
|
2010-04-07 01:51:47 +04:00
|
|
|
{
|
2010-04-07 02:01:27 +04:00
|
|
|
struct ceph_osd_request *req;
|
2013-03-14 05:50:00 +04:00
|
|
|
u64 objnum = 0;
|
|
|
|
u64 objoff = 0;
|
|
|
|
u64 objlen = 0;
|
2012-09-25 08:01:02 +04:00
|
|
|
int r;
|
2010-04-07 02:01:27 +04:00
|
|
|
|
2013-08-15 07:51:44 +04:00
|
|
|
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
|
2014-11-13 05:47:25 +03:00
|
|
|
opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
|
|
|
|
opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
|
2010-04-07 02:01:27 +04:00
|
|
|
|
2013-03-14 23:09:05 +04:00
|
|
|
req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
|
2012-11-14 07:11:15 +04:00
|
|
|
GFP_NOFS);
|
2011-05-03 20:23:36 +04:00
|
|
|
if (!req)
|
2012-09-25 08:01:02 +04:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2013-04-04 06:32:51 +04:00
|
|
|
|
2012-11-14 07:11:15 +04:00
|
|
|
req->r_flags = flags;
|
2010-04-07 01:51:47 +04:00
|
|
|
|
|
|
|
/* calculate max write size */
|
2013-03-14 05:50:01 +04:00
|
|
|
r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
|
2013-02-16 08:10:17 +04:00
|
|
|
if (r < 0) {
|
|
|
|
ceph_osdc_put_request(req);
|
2012-09-25 08:01:02 +04:00
|
|
|
return ERR_PTR(r);
|
2013-02-16 08:10:17 +04:00
|
|
|
}
|
2013-03-14 05:50:01 +04:00
|
|
|
|
2014-11-13 05:47:25 +03:00
|
|
|
if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
|
2015-04-27 06:09:54 +03:00
|
|
|
osd_req_op_init(req, which, opcode, 0);
|
2014-11-13 05:47:25 +03:00
|
|
|
} else {
|
|
|
|
u32 object_size = le32_to_cpu(layout->fl_object_size);
|
|
|
|
u32 object_base = off - objoff;
|
|
|
|
if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
|
|
|
|
if (truncate_size <= object_base) {
|
|
|
|
truncate_size = 0;
|
|
|
|
} else {
|
|
|
|
truncate_size -= object_base;
|
|
|
|
if (truncate_size > object_size)
|
|
|
|
truncate_size = object_size;
|
|
|
|
}
|
2013-06-02 14:40:23 +04:00
|
|
|
}
|
2014-11-13 09:40:37 +03:00
|
|
|
osd_req_op_extent_init(req, which, opcode, objoff, objlen,
|
2014-11-13 05:47:25 +03:00
|
|
|
truncate_size, truncate_seq);
|
|
|
|
}
|
2013-03-14 05:50:01 +04:00
|
|
|
|
2014-01-27 19:40:20 +04:00
|
|
|
req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout);
|
2010-04-07 01:51:47 +04:00
|
|
|
|
2014-01-27 19:40:20 +04:00
|
|
|
snprintf(req->r_base_oid.name, sizeof(req->r_base_oid.name),
|
2014-01-27 19:40:18 +04:00
|
|
|
"%llx.%08llx", vino.ino, objnum);
|
2014-01-27 19:40:20 +04:00
|
|
|
req->r_base_oid.name_len = strlen(req->r_base_oid.name);
|
2013-02-16 08:10:17 +04:00
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
return req;
|
|
|
|
}
|
2010-04-07 02:14:15 +04:00
|
|
|
EXPORT_SYMBOL(ceph_osdc_new_request);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We keep osd requests in an rbtree, sorted by ->r_tid.
|
|
|
|
*/
|
|
|
|
static void __insert_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *new)
|
|
|
|
{
|
|
|
|
struct rb_node **p = &osdc->requests.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct ceph_osd_request *req = NULL;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
req = rb_entry(parent, struct ceph_osd_request, r_node);
|
|
|
|
if (new->r_tid < req->r_tid)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else if (new->r_tid > req->r_tid)
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&new->r_node, parent, p);
|
|
|
|
rb_insert_color(&new->r_node, &osdc->requests);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
|
|
|
|
u64 tid)
|
|
|
|
{
|
|
|
|
struct ceph_osd_request *req;
|
|
|
|
struct rb_node *n = osdc->requests.rb_node;
|
|
|
|
|
|
|
|
while (n) {
|
|
|
|
req = rb_entry(n, struct ceph_osd_request, r_node);
|
|
|
|
if (tid < req->r_tid)
|
|
|
|
n = n->rb_left;
|
|
|
|
else if (tid > req->r_tid)
|
|
|
|
n = n->rb_right;
|
|
|
|
else
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ceph_osd_request *
|
|
|
|
__lookup_request_ge(struct ceph_osd_client *osdc,
|
|
|
|
u64 tid)
|
|
|
|
{
|
|
|
|
struct ceph_osd_request *req;
|
|
|
|
struct rb_node *n = osdc->requests.rb_node;
|
|
|
|
|
|
|
|
while (n) {
|
|
|
|
req = rb_entry(n, struct ceph_osd_request, r_node);
|
|
|
|
if (tid < req->r_tid) {
|
|
|
|
if (!n->rb_left)
|
|
|
|
return req;
|
|
|
|
n = n->rb_left;
|
|
|
|
} else if (tid > req->r_tid) {
|
|
|
|
n = n->rb_right;
|
|
|
|
} else {
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-09-03 14:41:45 +04:00
|
|
|
static void __kick_linger_request(struct ceph_osd_request *req)
|
|
|
|
{
|
|
|
|
struct ceph_osd_client *osdc = req->r_osdc;
|
|
|
|
struct ceph_osd *osd = req->r_osd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Linger requests need to be resent with a new tid to avoid
|
|
|
|
* the dup op detection logic on the OSDs. Achieve this with
|
|
|
|
* a re-register dance instead of open-coding.
|
|
|
|
*/
|
|
|
|
ceph_osdc_get_request(req);
|
|
|
|
if (!list_empty(&req->r_linger_item))
|
|
|
|
__unregister_linger_request(osdc, req);
|
|
|
|
else
|
|
|
|
__unregister_request(osdc, req);
|
|
|
|
__register_request(osdc, req);
|
|
|
|
ceph_osdc_put_request(req);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unless request has been registered as both normal and
|
|
|
|
* lingering, __unregister{,_linger}_request clears r_osd.
|
|
|
|
* However, here we need to preserve r_osd to make sure we
|
|
|
|
* requeue on the same OSD.
|
|
|
|
*/
|
|
|
|
WARN_ON(req->r_osd || !osd);
|
|
|
|
req->r_osd = osd;
|
|
|
|
|
|
|
|
dout("%s requeueing %p tid %llu\n", __func__, req, req->r_tid);
|
|
|
|
__enqueue_request(req);
|
|
|
|
}
|
|
|
|
|
2011-01-18 07:34:08 +03:00
|
|
|
/*
|
|
|
|
* Resubmit requests pending on the given osd.
|
|
|
|
*/
|
|
|
|
static void __kick_osd_requests(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd *osd)
|
|
|
|
{
|
2011-03-22 01:07:16 +03:00
|
|
|
struct ceph_osd_request *req, *nreq;
|
libceph: requeue only sent requests when kicking
The osd expects incoming requests for a given object from a given
client to arrive in order, with the tid for each request being
greater than the tid for requests that have already arrived. This
patch fixes two places the osd client might not maintain that
ordering.
For the osd client, the connection fault method is osd_reset().
That function calls __reset_osd() to close and re-open the
connection, then calls __kick_osd_requests() to cause all
outstanding requests for the affected osd to be re-sent after
the connection has been re-established.
When an osd is reset, any in-flight messages will need to be
re-sent. An osd client maintains distinct lists for unsent and
in-flight messages. Meanwhile, an osd maintains a single list of
all its requests (both sent and un-sent). (Each message is linked
into two lists--one for the osd client and one list for the osd.)
To process an osd "kick" operation, the request list for the *osd*
is traversed, and each request is moved off whichever osd *client*
list it was on (unsent or sent) and placed onto the osd client's
unsent list. (It remains where it is on the osd's request list.)
When that is done, osd_reset() calls __send_queued() to cause each
of the osd client's unsent messages to be sent.
OK, with that background...
As the osd request list is traversed each request is prepended to
the osd client's unsent list in the order they're seen. The effect
of this is to reverse the order of these requests as they are put
(back) onto the unsent list.
Instead, build up a list of only the requests for an osd that have
already been sent (by checking their r_sent flag values). Once an
unsent request is found, stop examining requests and prepend the
requests that need re-sending to the osd client's unsent list.
Preserve the original order of requests in the process (previously
re-queued requests were reversed in this process). Because they
have already been sent, they will have lower tids than any request
already present on the unsent list.
Just below that, traverse the linger list in forward order as
before, but add them to the *tail* of the list rather than the head.
These requests get re-registered, and in the process are give a new
(higher) tid, so the should go at the end.
This partially resolves:
http://tracker.ceph.com/issues/4392
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-off-by: Sage Weil <sage@inktank.com>
2013-03-26 03:16:11 +04:00
|
|
|
LIST_HEAD(resend);
|
2014-09-03 14:41:45 +04:00
|
|
|
LIST_HEAD(resend_linger);
|
2011-01-18 07:34:08 +03:00
|
|
|
int err;
|
|
|
|
|
2014-09-03 14:41:45 +04:00
|
|
|
dout("%s osd%d\n", __func__, osd->o_osd);
|
2011-01-18 07:34:08 +03:00
|
|
|
err = __reset_osd(osdc, osd);
|
2012-12-07 19:57:58 +04:00
|
|
|
if (err)
|
2011-01-18 07:34:08 +03:00
|
|
|
return;
|
2014-09-03 14:41:45 +04:00
|
|
|
|
libceph: requeue only sent requests when kicking
The osd expects incoming requests for a given object from a given
client to arrive in order, with the tid for each request being
greater than the tid for requests that have already arrived. This
patch fixes two places the osd client might not maintain that
ordering.
For the osd client, the connection fault method is osd_reset().
That function calls __reset_osd() to close and re-open the
connection, then calls __kick_osd_requests() to cause all
outstanding requests for the affected osd to be re-sent after
the connection has been re-established.
When an osd is reset, any in-flight messages will need to be
re-sent. An osd client maintains distinct lists for unsent and
in-flight messages. Meanwhile, an osd maintains a single list of
all its requests (both sent and un-sent). (Each message is linked
into two lists--one for the osd client and one list for the osd.)
To process an osd "kick" operation, the request list for the *osd*
is traversed, and each request is moved off whichever osd *client*
list it was on (unsent or sent) and placed onto the osd client's
unsent list. (It remains where it is on the osd's request list.)
When that is done, osd_reset() calls __send_queued() to cause each
of the osd client's unsent messages to be sent.
OK, with that background...
As the osd request list is traversed each request is prepended to
the osd client's unsent list in the order they're seen. The effect
of this is to reverse the order of these requests as they are put
(back) onto the unsent list.
Instead, build up a list of only the requests for an osd that have
already been sent (by checking their r_sent flag values). Once an
unsent request is found, stop examining requests and prepend the
requests that need re-sending to the osd client's unsent list.
Preserve the original order of requests in the process (previously
re-queued requests were reversed in this process). Because they
have already been sent, they will have lower tids than any request
already present on the unsent list.
Just below that, traverse the linger list in forward order as
before, but add them to the *tail* of the list rather than the head.
These requests get re-registered, and in the process are give a new
(higher) tid, so the should go at the end.
This partially resolves:
http://tracker.ceph.com/issues/4392
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-off-by: Sage Weil <sage@inktank.com>
2013-03-26 03:16:11 +04:00
|
|
|
/*
|
|
|
|
* Build up a list of requests to resend by traversing the
|
|
|
|
* osd's list of requests. Requests for a given object are
|
|
|
|
* sent in tid order, and that is also the order they're
|
|
|
|
* kept on this list. Therefore all requests that are in
|
|
|
|
* flight will be found first, followed by all requests that
|
|
|
|
* have not yet been sent. And to resend requests while
|
|
|
|
* preserving this order we will want to put any sent
|
|
|
|
* requests back on the front of the osd client's unsent
|
|
|
|
* list.
|
|
|
|
*
|
|
|
|
* So we build a separate ordered list of already-sent
|
|
|
|
* requests for the affected osd and splice it onto the
|
|
|
|
* front of the osd client's unsent list. Once we've seen a
|
|
|
|
* request that has not yet been sent we're done. Those
|
|
|
|
* requests are already sitting right where they belong.
|
|
|
|
*/
|
2011-01-18 07:34:08 +03:00
|
|
|
list_for_each_entry(req, &osd->o_requests, r_osd_item) {
|
libceph: requeue only sent requests when kicking
The osd expects incoming requests for a given object from a given
client to arrive in order, with the tid for each request being
greater than the tid for requests that have already arrived. This
patch fixes two places the osd client might not maintain that
ordering.
For the osd client, the connection fault method is osd_reset().
That function calls __reset_osd() to close and re-open the
connection, then calls __kick_osd_requests() to cause all
outstanding requests for the affected osd to be re-sent after
the connection has been re-established.
When an osd is reset, any in-flight messages will need to be
re-sent. An osd client maintains distinct lists for unsent and
in-flight messages. Meanwhile, an osd maintains a single list of
all its requests (both sent and un-sent). (Each message is linked
into two lists--one for the osd client and one list for the osd.)
To process an osd "kick" operation, the request list for the *osd*
is traversed, and each request is moved off whichever osd *client*
list it was on (unsent or sent) and placed onto the osd client's
unsent list. (It remains where it is on the osd's request list.)
When that is done, osd_reset() calls __send_queued() to cause each
of the osd client's unsent messages to be sent.
OK, with that background...
As the osd request list is traversed each request is prepended to
the osd client's unsent list in the order they're seen. The effect
of this is to reverse the order of these requests as they are put
(back) onto the unsent list.
Instead, build up a list of only the requests for an osd that have
already been sent (by checking their r_sent flag values). Once an
unsent request is found, stop examining requests and prepend the
requests that need re-sending to the osd client's unsent list.
Preserve the original order of requests in the process (previously
re-queued requests were reversed in this process). Because they
have already been sent, they will have lower tids than any request
already present on the unsent list.
Just below that, traverse the linger list in forward order as
before, but add them to the *tail* of the list rather than the head.
These requests get re-registered, and in the process are give a new
(higher) tid, so the should go at the end.
This partially resolves:
http://tracker.ceph.com/issues/4392
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-off-by: Sage Weil <sage@inktank.com>
2013-03-26 03:16:11 +04:00
|
|
|
if (!req->r_sent)
|
|
|
|
break;
|
2014-09-03 14:41:45 +04:00
|
|
|
|
|
|
|
if (!req->r_linger) {
|
|
|
|
dout("%s requeueing %p tid %llu\n", __func__, req,
|
|
|
|
req->r_tid);
|
|
|
|
list_move_tail(&req->r_req_lru_item, &resend);
|
2011-03-22 01:07:16 +03:00
|
|
|
req->r_flags |= CEPH_OSD_FLAG_RETRY;
|
2014-09-03 14:41:45 +04:00
|
|
|
} else {
|
|
|
|
list_move_tail(&req->r_req_lru_item, &resend_linger);
|
|
|
|
}
|
2011-03-22 01:07:16 +03:00
|
|
|
}
|
libceph: requeue only sent requests when kicking
The osd expects incoming requests for a given object from a given
client to arrive in order, with the tid for each request being
greater than the tid for requests that have already arrived. This
patch fixes two places the osd client might not maintain that
ordering.
For the osd client, the connection fault method is osd_reset().
That function calls __reset_osd() to close and re-open the
connection, then calls __kick_osd_requests() to cause all
outstanding requests for the affected osd to be re-sent after
the connection has been re-established.
When an osd is reset, any in-flight messages will need to be
re-sent. An osd client maintains distinct lists for unsent and
in-flight messages. Meanwhile, an osd maintains a single list of
all its requests (both sent and un-sent). (Each message is linked
into two lists--one for the osd client and one list for the osd.)
To process an osd "kick" operation, the request list for the *osd*
is traversed, and each request is moved off whichever osd *client*
list it was on (unsent or sent) and placed onto the osd client's
unsent list. (It remains where it is on the osd's request list.)
When that is done, osd_reset() calls __send_queued() to cause each
of the osd client's unsent messages to be sent.
OK, with that background...
As the osd request list is traversed each request is prepended to
the osd client's unsent list in the order they're seen. The effect
of this is to reverse the order of these requests as they are put
(back) onto the unsent list.
Instead, build up a list of only the requests for an osd that have
already been sent (by checking their r_sent flag values). Once an
unsent request is found, stop examining requests and prepend the
requests that need re-sending to the osd client's unsent list.
Preserve the original order of requests in the process (previously
re-queued requests were reversed in this process). Because they
have already been sent, they will have lower tids than any request
already present on the unsent list.
Just below that, traverse the linger list in forward order as
before, but add them to the *tail* of the list rather than the head.
These requests get re-registered, and in the process are give a new
(higher) tid, so the should go at the end.
This partially resolves:
http://tracker.ceph.com/issues/4392
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-off-by: Sage Weil <sage@inktank.com>
2013-03-26 03:16:11 +04:00
|
|
|
list_splice(&resend, &osdc->req_unsent);
|
2011-03-22 01:07:16 +03:00
|
|
|
|
libceph: requeue only sent requests when kicking
The osd expects incoming requests for a given object from a given
client to arrive in order, with the tid for each request being
greater than the tid for requests that have already arrived. This
patch fixes two places the osd client might not maintain that
ordering.
For the osd client, the connection fault method is osd_reset().
That function calls __reset_osd() to close and re-open the
connection, then calls __kick_osd_requests() to cause all
outstanding requests for the affected osd to be re-sent after
the connection has been re-established.
When an osd is reset, any in-flight messages will need to be
re-sent. An osd client maintains distinct lists for unsent and
in-flight messages. Meanwhile, an osd maintains a single list of
all its requests (both sent and un-sent). (Each message is linked
into two lists--one for the osd client and one list for the osd.)
To process an osd "kick" operation, the request list for the *osd*
is traversed, and each request is moved off whichever osd *client*
list it was on (unsent or sent) and placed onto the osd client's
unsent list. (It remains where it is on the osd's request list.)
When that is done, osd_reset() calls __send_queued() to cause each
of the osd client's unsent messages to be sent.
OK, with that background...
As the osd request list is traversed each request is prepended to
the osd client's unsent list in the order they're seen. The effect
of this is to reverse the order of these requests as they are put
(back) onto the unsent list.
Instead, build up a list of only the requests for an osd that have
already been sent (by checking their r_sent flag values). Once an
unsent request is found, stop examining requests and prepend the
requests that need re-sending to the osd client's unsent list.
Preserve the original order of requests in the process (previously
re-queued requests were reversed in this process). Because they
have already been sent, they will have lower tids than any request
already present on the unsent list.
Just below that, traverse the linger list in forward order as
before, but add them to the *tail* of the list rather than the head.
These requests get re-registered, and in the process are give a new
(higher) tid, so the should go at the end.
This partially resolves:
http://tracker.ceph.com/issues/4392
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-off-by: Sage Weil <sage@inktank.com>
2013-03-26 03:16:11 +04:00
|
|
|
/*
|
2014-09-03 14:41:45 +04:00
|
|
|
* Both registered and not yet registered linger requests are
|
|
|
|
* enqueued with a new tid on the same OSD. We add/move them
|
|
|
|
* to req_unsent/o_requests at the end to keep things in tid
|
|
|
|
* order.
|
libceph: requeue only sent requests when kicking
The osd expects incoming requests for a given object from a given
client to arrive in order, with the tid for each request being
greater than the tid for requests that have already arrived. This
patch fixes two places the osd client might not maintain that
ordering.
For the osd client, the connection fault method is osd_reset().
That function calls __reset_osd() to close and re-open the
connection, then calls __kick_osd_requests() to cause all
outstanding requests for the affected osd to be re-sent after
the connection has been re-established.
When an osd is reset, any in-flight messages will need to be
re-sent. An osd client maintains distinct lists for unsent and
in-flight messages. Meanwhile, an osd maintains a single list of
all its requests (both sent and un-sent). (Each message is linked
into two lists--one for the osd client and one list for the osd.)
To process an osd "kick" operation, the request list for the *osd*
is traversed, and each request is moved off whichever osd *client*
list it was on (unsent or sent) and placed onto the osd client's
unsent list. (It remains where it is on the osd's request list.)
When that is done, osd_reset() calls __send_queued() to cause each
of the osd client's unsent messages to be sent.
OK, with that background...
As the osd request list is traversed each request is prepended to
the osd client's unsent list in the order they're seen. The effect
of this is to reverse the order of these requests as they are put
(back) onto the unsent list.
Instead, build up a list of only the requests for an osd that have
already been sent (by checking their r_sent flag values). Once an
unsent request is found, stop examining requests and prepend the
requests that need re-sending to the osd client's unsent list.
Preserve the original order of requests in the process (previously
re-queued requests were reversed in this process). Because they
have already been sent, they will have lower tids than any request
already present on the unsent list.
Just below that, traverse the linger list in forward order as
before, but add them to the *tail* of the list rather than the head.
These requests get re-registered, and in the process are give a new
(higher) tid, so the should go at the end.
This partially resolves:
http://tracker.ceph.com/issues/4392
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-off-by: Sage Weil <sage@inktank.com>
2013-03-26 03:16:11 +04:00
|
|
|
*/
|
2011-03-22 01:07:16 +03:00
|
|
|
list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
|
2014-06-20 14:14:41 +04:00
|
|
|
r_linger_osd_item) {
|
2014-09-03 14:41:45 +04:00
|
|
|
WARN_ON(!list_empty(&req->r_req_lru_item));
|
|
|
|
__kick_linger_request(req);
|
2011-01-18 07:34:08 +03:00
|
|
|
}
|
2014-09-03 14:41:45 +04:00
|
|
|
|
|
|
|
list_for_each_entry_safe(req, nreq, &resend_linger, r_req_lru_item)
|
|
|
|
__kick_linger_request(req);
|
2011-01-18 07:34:08 +03:00
|
|
|
}
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/*
|
2009-10-09 21:29:18 +04:00
|
|
|
* If the osd connection drops, we need to resubmit all requests.
|
2009-10-06 22:31:10 +04:00
|
|
|
*/
|
|
|
|
static void osd_reset(struct ceph_connection *con)
|
|
|
|
{
|
|
|
|
struct ceph_osd *osd = con->private;
|
|
|
|
struct ceph_osd_client *osdc;
|
|
|
|
|
|
|
|
if (!osd)
|
|
|
|
return;
|
|
|
|
dout("osd_reset osd%d\n", osd->o_osd);
|
|
|
|
osdc = osd->o_osdc;
|
|
|
|
down_read(&osdc->map_sem);
|
2012-11-29 00:28:24 +04:00
|
|
|
mutex_lock(&osdc->request_mutex);
|
|
|
|
__kick_osd_requests(osdc, osd);
|
2013-02-15 21:42:29 +04:00
|
|
|
__send_queued(osdc);
|
2012-11-29 00:28:24 +04:00
|
|
|
mutex_unlock(&osdc->request_mutex);
|
2009-10-06 22:31:10 +04:00
|
|
|
up_read(&osdc->map_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Track open sessions with osds.
|
|
|
|
*/
|
2012-05-27 08:26:43 +04:00
|
|
|
static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
|
|
|
struct ceph_osd *osd;
|
|
|
|
|
|
|
|
osd = kzalloc(sizeof(*osd), GFP_NOFS);
|
|
|
|
if (!osd)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
atomic_set(&osd->o_ref, 1);
|
|
|
|
osd->o_osdc = osdc;
|
2012-05-27 08:26:43 +04:00
|
|
|
osd->o_osd = onum;
|
2012-12-06 17:22:04 +04:00
|
|
|
RB_CLEAR_NODE(&osd->o_node);
|
2009-10-06 22:31:10 +04:00
|
|
|
INIT_LIST_HEAD(&osd->o_requests);
|
2011-03-22 01:07:16 +03:00
|
|
|
INIT_LIST_HEAD(&osd->o_linger_requests);
|
2010-02-03 22:00:26 +03:00
|
|
|
INIT_LIST_HEAD(&osd->o_osd_lru);
|
2009-10-06 22:31:10 +04:00
|
|
|
osd->o_incarnation = 1;
|
|
|
|
|
2012-06-27 23:24:08 +04:00
|
|
|
ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
|
2009-11-19 03:19:57 +03:00
|
|
|
|
2010-02-27 02:32:31 +03:00
|
|
|
INIT_LIST_HEAD(&osd->o_keepalive_item);
|
2009-10-06 22:31:10 +04:00
|
|
|
return osd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ceph_osd *get_osd(struct ceph_osd *osd)
|
|
|
|
{
|
|
|
|
if (atomic_inc_not_zero(&osd->o_ref)) {
|
|
|
|
dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
|
|
|
|
atomic_read(&osd->o_ref));
|
|
|
|
return osd;
|
|
|
|
} else {
|
|
|
|
dout("get_osd %p FAIL\n", osd);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void put_osd(struct ceph_osd *osd)
|
|
|
|
{
|
|
|
|
dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
|
|
|
|
atomic_read(&osd->o_ref) - 1);
|
2015-02-16 11:49:42 +03:00
|
|
|
if (atomic_dec_and_test(&osd->o_ref)) {
|
2010-05-28 01:15:49 +04:00
|
|
|
struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
|
|
|
|
|
2015-02-16 11:49:42 +03:00
|
|
|
if (osd->o_auth.authorizer)
|
|
|
|
ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
|
2009-10-06 22:31:10 +04:00
|
|
|
kfree(osd);
|
2010-05-28 01:15:49 +04:00
|
|
|
}
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* remove an osd from our map
|
|
|
|
*/
|
2010-02-03 22:00:26 +03:00
|
|
|
static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
2015-02-17 19:37:15 +03:00
|
|
|
dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
|
2014-11-05 19:33:44 +03:00
|
|
|
WARN_ON(!list_empty(&osd->o_requests));
|
|
|
|
WARN_ON(!list_empty(&osd->o_linger_requests));
|
2014-06-18 13:02:12 +04:00
|
|
|
|
2010-02-03 22:00:26 +03:00
|
|
|
list_del_init(&osd->o_osd_lru);
|
2015-02-17 19:37:15 +03:00
|
|
|
rb_erase(&osd->o_node, &osdc->osds);
|
|
|
|
RB_CLEAR_NODE(&osd->o_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
|
|
|
|
{
|
|
|
|
dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
|
|
|
|
|
|
|
|
if (!RB_EMPTY_NODE(&osd->o_node)) {
|
|
|
|
ceph_con_close(&osd->o_con);
|
|
|
|
__remove_osd(osdc, osd);
|
|
|
|
put_osd(osd);
|
|
|
|
}
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
|
2011-09-01 01:45:53 +04:00
|
|
|
static void remove_all_osds(struct ceph_osd_client *osdc)
|
|
|
|
{
|
2012-07-20 17:18:36 +04:00
|
|
|
dout("%s %p\n", __func__, osdc);
|
2011-09-01 01:45:53 +04:00
|
|
|
mutex_lock(&osdc->request_mutex);
|
|
|
|
while (!RB_EMPTY_ROOT(&osdc->osds)) {
|
|
|
|
struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
|
|
|
|
struct ceph_osd, o_node);
|
2015-02-17 19:37:15 +03:00
|
|
|
remove_osd(osdc, osd);
|
2011-09-01 01:45:53 +04:00
|
|
|
}
|
|
|
|
mutex_unlock(&osdc->request_mutex);
|
|
|
|
}
|
|
|
|
|
2010-02-03 22:00:26 +03:00
|
|
|
static void __move_osd_to_lru(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd *osd)
|
|
|
|
{
|
2014-06-20 14:14:41 +04:00
|
|
|
dout("%s %p\n", __func__, osd);
|
2010-02-03 22:00:26 +03:00
|
|
|
BUG_ON(!list_empty(&osd->o_osd_lru));
|
2014-06-20 14:14:41 +04:00
|
|
|
|
2010-02-03 22:00:26 +03:00
|
|
|
list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
|
2010-04-07 02:14:15 +04:00
|
|
|
osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
|
2010-02-03 22:00:26 +03:00
|
|
|
}
|
|
|
|
|
2014-06-20 14:14:41 +04:00
|
|
|
static void maybe_move_osd_to_lru(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd *osd)
|
|
|
|
{
|
|
|
|
dout("%s %p\n", __func__, osd);
|
|
|
|
|
|
|
|
if (list_empty(&osd->o_requests) &&
|
|
|
|
list_empty(&osd->o_linger_requests))
|
|
|
|
__move_osd_to_lru(osdc, osd);
|
|
|
|
}
|
|
|
|
|
2010-02-03 22:00:26 +03:00
|
|
|
static void __remove_osd_from_lru(struct ceph_osd *osd)
|
|
|
|
{
|
|
|
|
dout("__remove_osd_from_lru %p\n", osd);
|
|
|
|
if (!list_empty(&osd->o_osd_lru))
|
|
|
|
list_del_init(&osd->o_osd_lru);
|
|
|
|
}
|
|
|
|
|
2011-09-01 01:45:53 +04:00
|
|
|
static void remove_old_osds(struct ceph_osd_client *osdc)
|
2010-02-03 22:00:26 +03:00
|
|
|
{
|
|
|
|
struct ceph_osd *osd, *nosd;
|
|
|
|
|
|
|
|
dout("__remove_old_osds %p\n", osdc);
|
|
|
|
mutex_lock(&osdc->request_mutex);
|
|
|
|
list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
|
2011-09-01 01:45:53 +04:00
|
|
|
if (time_before(jiffies, osd->lru_ttl))
|
2010-02-03 22:00:26 +03:00
|
|
|
break;
|
2015-02-17 19:37:15 +03:00
|
|
|
remove_osd(osdc, osd);
|
2010-02-03 22:00:26 +03:00
|
|
|
}
|
|
|
|
mutex_unlock(&osdc->request_mutex);
|
|
|
|
}
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/*
|
|
|
|
* reset osd connect
|
|
|
|
*/
|
2010-02-03 22:00:26 +03:00
|
|
|
static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
2012-12-07 19:57:58 +04:00
|
|
|
struct ceph_entity_addr *peer_addr;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2010-02-03 22:00:26 +03:00
|
|
|
dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
|
2011-03-22 01:07:16 +03:00
|
|
|
if (list_empty(&osd->o_requests) &&
|
|
|
|
list_empty(&osd->o_linger_requests)) {
|
2015-02-17 19:37:15 +03:00
|
|
|
remove_osd(osdc, osd);
|
2012-12-07 19:57:58 +04:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
|
|
|
|
if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
|
|
|
|
!ceph_con_opened(&osd->o_con)) {
|
|
|
|
struct ceph_osd_request *req;
|
|
|
|
|
2014-01-16 21:18:27 +04:00
|
|
|
dout("osd addr hasn't changed and connection never opened, "
|
|
|
|
"letting msgr retry\n");
|
ceph: avoid reopening osd connections when address hasn't changed
We get a fault callback on _every_ tcp connection fault. Normally, we
want to reopen the connection when that happens. If the address we have
is bad, however, and connection attempts always result in a connection
refused or similar error, explicitly closing and reopening the msgr
connection just prevents the messenger's backoff logic from kicking in.
The result can be a console full of
[ 3974.417106] ceph: osd11 10.3.14.138:6800 connection failed
[ 3974.423295] ceph: osd11 10.3.14.138:6800 connection failed
[ 3974.429709] ceph: osd11 10.3.14.138:6800 connection failed
Instead, if we get a fault, and have outstanding requests, but the osd
address hasn't changed and the connection never successfully connected in
the first place, do nothing to the osd connection. The messenger layer
will back off and retry periodically, because we never connected and thus
the lossy bit is not set.
Instead, touch each request's r_stamp so that handle_timeout can tell the
request is still alive and kicking.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-03-23 00:51:18 +03:00
|
|
|
/* touch each r_stamp for handle_timeout()'s benfit */
|
|
|
|
list_for_each_entry(req, &osd->o_requests, r_osd_item)
|
|
|
|
req->r_stamp = jiffies;
|
2012-12-07 19:57:58 +04:00
|
|
|
|
|
|
|
return -EAGAIN;
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
2012-12-07 19:57:58 +04:00
|
|
|
|
|
|
|
ceph_con_close(&osd->o_con);
|
|
|
|
ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
|
|
|
|
osd->o_incarnation++;
|
|
|
|
|
|
|
|
return 0;
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
|
|
|
|
{
|
|
|
|
struct rb_node **p = &osdc->osds.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct ceph_osd *osd = NULL;
|
|
|
|
|
2011-09-01 01:45:53 +04:00
|
|
|
dout("__insert_osd %p osd%d\n", new, new->o_osd);
|
2009-10-06 22:31:10 +04:00
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
osd = rb_entry(parent, struct ceph_osd, o_node);
|
|
|
|
if (new->o_osd < osd->o_osd)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else if (new->o_osd > osd->o_osd)
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&new->o_node, parent, p);
|
|
|
|
rb_insert_color(&new->o_node, &osdc->osds);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
|
|
|
|
{
|
|
|
|
struct ceph_osd *osd;
|
|
|
|
struct rb_node *n = osdc->osds.rb_node;
|
|
|
|
|
|
|
|
while (n) {
|
|
|
|
osd = rb_entry(n, struct ceph_osd, o_node);
|
|
|
|
if (o < osd->o_osd)
|
|
|
|
n = n->rb_left;
|
|
|
|
else if (o > osd->o_osd)
|
|
|
|
n = n->rb_right;
|
|
|
|
else
|
|
|
|
return osd;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-02-27 02:32:31 +03:00
|
|
|
static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
|
|
|
|
{
|
|
|
|
schedule_delayed_work(&osdc->timeout_work,
|
2010-04-07 02:14:15 +04:00
|
|
|
osdc->client->options->osd_keepalive_timeout * HZ);
|
2010-02-27 02:32:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
|
|
|
|
{
|
|
|
|
cancel_delayed_work(&osdc->timeout_work);
|
|
|
|
}
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Register request, assign tid. If this is the first request, set up
|
|
|
|
* the timeout event.
|
|
|
|
*/
|
2011-03-22 01:07:16 +03:00
|
|
|
static void __register_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
|
|
|
req->r_tid = ++osdc->last_tid;
|
2009-12-22 22:24:33 +03:00
|
|
|
req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
|
2011-04-06 20:09:16 +04:00
|
|
|
dout("__register_request %p tid %lld\n", req, req->r_tid);
|
2009-10-06 22:31:10 +04:00
|
|
|
__insert_request(osdc, req);
|
|
|
|
ceph_osdc_get_request(req);
|
|
|
|
osdc->num_requests++;
|
|
|
|
if (osdc->num_requests == 1) {
|
2010-02-27 02:32:31 +03:00
|
|
|
dout(" first request, scheduling timeout\n");
|
|
|
|
__schedule_osd_timeout(osdc);
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
2011-03-22 01:07:16 +03:00
|
|
|
}
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/*
|
|
|
|
* called under osdc->request_mutex
|
|
|
|
*/
|
|
|
|
static void __unregister_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req)
|
|
|
|
{
|
2012-05-17 00:16:38 +04:00
|
|
|
if (RB_EMPTY_NODE(&req->r_node)) {
|
|
|
|
dout("__unregister_request %p tid %lld not registered\n",
|
|
|
|
req, req->r_tid);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
dout("__unregister_request %p tid %lld\n", req, req->r_tid);
|
|
|
|
rb_erase(&req->r_node, &osdc->requests);
|
2014-06-20 14:14:42 +04:00
|
|
|
RB_CLEAR_NODE(&req->r_node);
|
2009-10-06 22:31:10 +04:00
|
|
|
osdc->num_requests--;
|
|
|
|
|
2009-10-09 03:57:16 +04:00
|
|
|
if (req->r_osd) {
|
|
|
|
/* make sure the original request isn't in flight. */
|
2012-06-01 23:56:43 +04:00
|
|
|
ceph_msg_revoke(req->r_request);
|
2009-10-09 03:57:16 +04:00
|
|
|
|
|
|
|
list_del_init(&req->r_osd_item);
|
2014-06-20 14:14:41 +04:00
|
|
|
maybe_move_osd_to_lru(osdc, req->r_osd);
|
2014-06-20 18:29:20 +04:00
|
|
|
if (list_empty(&req->r_linger_osd_item))
|
2011-03-22 01:07:16 +03:00
|
|
|
req->r_osd = NULL;
|
2009-10-09 03:57:16 +04:00
|
|
|
}
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2012-11-29 18:37:03 +04:00
|
|
|
list_del_init(&req->r_req_lru_item);
|
2009-10-06 22:31:10 +04:00
|
|
|
ceph_osdc_put_request(req);
|
|
|
|
|
2010-02-27 02:32:31 +03:00
|
|
|
if (osdc->num_requests == 0) {
|
|
|
|
dout(" no requests, canceling timeout\n");
|
|
|
|
__cancel_osd_timeout(osdc);
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cancel a previously queued request message
|
|
|
|
*/
|
|
|
|
static void __cancel_request(struct ceph_osd_request *req)
|
|
|
|
{
|
2010-09-27 21:18:52 +04:00
|
|
|
if (req->r_sent && req->r_osd) {
|
2012-06-01 23:56:43 +04:00
|
|
|
ceph_msg_revoke(req->r_request);
|
2009-10-06 22:31:10 +04:00
|
|
|
req->r_sent = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-22 01:07:16 +03:00
|
|
|
static void __register_linger_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req)
|
|
|
|
{
|
2014-06-20 18:29:20 +04:00
|
|
|
dout("%s %p tid %llu\n", __func__, req, req->r_tid);
|
|
|
|
WARN_ON(!req->r_linger);
|
|
|
|
|
2013-05-23 05:54:25 +04:00
|
|
|
ceph_osdc_get_request(req);
|
2011-03-22 01:07:16 +03:00
|
|
|
list_add_tail(&req->r_linger_item, &osdc->req_linger);
|
2012-07-31 03:19:28 +04:00
|
|
|
if (req->r_osd)
|
2014-06-20 14:14:41 +04:00
|
|
|
list_add_tail(&req->r_linger_osd_item,
|
2012-07-31 03:19:28 +04:00
|
|
|
&req->r_osd->o_linger_requests);
|
2011-03-22 01:07:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __unregister_linger_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req)
|
|
|
|
{
|
2014-06-20 18:29:20 +04:00
|
|
|
WARN_ON(!req->r_linger);
|
|
|
|
|
|
|
|
if (list_empty(&req->r_linger_item)) {
|
|
|
|
dout("%s %p tid %llu not registered\n", __func__, req,
|
|
|
|
req->r_tid);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
dout("%s %p tid %llu\n", __func__, req, req->r_tid);
|
2012-12-06 19:37:23 +04:00
|
|
|
list_del_init(&req->r_linger_item);
|
2014-06-20 18:29:20 +04:00
|
|
|
|
2011-03-22 01:07:16 +03:00
|
|
|
if (req->r_osd) {
|
2014-06-20 14:14:41 +04:00
|
|
|
list_del_init(&req->r_linger_osd_item);
|
2014-06-20 14:14:41 +04:00
|
|
|
maybe_move_osd_to_lru(osdc, req->r_osd);
|
2011-03-29 23:11:06 +04:00
|
|
|
if (list_empty(&req->r_osd_item))
|
|
|
|
req->r_osd = NULL;
|
2011-03-22 01:07:16 +03:00
|
|
|
}
|
2013-05-23 05:54:25 +04:00
|
|
|
ceph_osdc_put_request(req);
|
2011-03-22 01:07:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req)
|
|
|
|
{
|
|
|
|
if (!req->r_linger) {
|
|
|
|
dout("set_request_linger %p\n", req);
|
|
|
|
req->r_linger = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ceph_osdc_set_request_linger);
|
|
|
|
|
libceph: block I/O when PAUSE or FULL osd map flags are set
The PAUSEWR and PAUSERD flags are meant to stop the cluster from
processing writes and reads, respectively. The FULL flag is set when
the cluster determines that it is out of space, and will no longer
process writes. PAUSEWR and PAUSERD are purely client-side settings
already implemented in userspace clients. The osd does nothing special
with these flags.
When the FULL flag is set, however, the osd responds to all writes
with -ENOSPC. For cephfs, this makes sense, but for rbd the block
layer translates this into EIO. If a cluster goes from full to
non-full quickly, a filesystem on top of rbd will not behave well,
since some writes succeed while others get EIO.
Fix this by blocking any writes when the FULL flag is set in the osd
client. This is the same strategy used by userspace, so apply it by
default. A follow-on patch makes this configurable.
__map_request() is called to re-target osd requests in case the
available osds changed. Add a paused field to a ceph_osd_request, and
set it whenever an appropriate osd map flag is set. Avoid queueing
paused requests in __map_request(), but force them to be resent if
they become unpaused.
Also subscribe to the next osd map from the monitor if any of these
flags are set, so paused requests can be unblocked as soon as
possible.
Fixes: http://tracker.ceph.com/issues/6079
Reviewed-by: Sage Weil <sage@inktank.com>
Signed-off-by: Josh Durgin <josh.durgin@inktank.com>
2013-12-03 07:11:48 +04:00
|
|
|
/*
|
|
|
|
* Returns whether a request should be blocked from being sent
|
|
|
|
* based on the current osdmap and osd_client settings.
|
|
|
|
*
|
|
|
|
* Caller should hold map_sem for read.
|
|
|
|
*/
|
|
|
|
static bool __req_should_be_paused(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req)
|
|
|
|
{
|
|
|
|
bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
|
|
|
|
bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
|
|
|
|
ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
|
|
|
|
return (req->r_flags & CEPH_OSD_FLAG_READ && pauserd) ||
|
|
|
|
(req->r_flags & CEPH_OSD_FLAG_WRITE && pausewr);
|
|
|
|
}
|
|
|
|
|
2014-01-27 19:40:19 +04:00
|
|
|
/*
|
|
|
|
* Calculate mapping of a request to a PG. Takes tiering into account.
|
|
|
|
*/
|
|
|
|
static int __calc_request_pg(struct ceph_osdmap *osdmap,
|
|
|
|
struct ceph_osd_request *req,
|
|
|
|
struct ceph_pg *pg_out)
|
|
|
|
{
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
bool need_check_tiering;
|
|
|
|
|
|
|
|
need_check_tiering = false;
|
|
|
|
if (req->r_target_oloc.pool == -1) {
|
|
|
|
req->r_target_oloc = req->r_base_oloc; /* struct */
|
|
|
|
need_check_tiering = true;
|
|
|
|
}
|
|
|
|
if (req->r_target_oid.name_len == 0) {
|
|
|
|
ceph_oid_copy(&req->r_target_oid, &req->r_base_oid);
|
|
|
|
need_check_tiering = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (need_check_tiering &&
|
|
|
|
(req->r_flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
|
2014-01-27 19:40:19 +04:00
|
|
|
struct ceph_pg_pool_info *pi;
|
|
|
|
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
pi = ceph_pg_pool_by_id(osdmap, req->r_target_oloc.pool);
|
2014-01-27 19:40:19 +04:00
|
|
|
if (pi) {
|
|
|
|
if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
|
|
|
|
pi->read_tier >= 0)
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
req->r_target_oloc.pool = pi->read_tier;
|
2014-01-27 19:40:19 +04:00
|
|
|
if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
|
|
|
|
pi->write_tier >= 0)
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
req->r_target_oloc.pool = pi->write_tier;
|
2014-01-27 19:40:19 +04:00
|
|
|
}
|
|
|
|
/* !pi is caught in ceph_oloc_oid_to_pg() */
|
|
|
|
}
|
|
|
|
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
return ceph_oloc_oid_to_pg(osdmap, &req->r_target_oloc,
|
|
|
|
&req->r_target_oid, pg_out);
|
2014-01-27 19:40:19 +04:00
|
|
|
}
|
|
|
|
|
2014-09-02 13:40:33 +04:00
|
|
|
static void __enqueue_request(struct ceph_osd_request *req)
|
|
|
|
{
|
|
|
|
struct ceph_osd_client *osdc = req->r_osdc;
|
|
|
|
|
|
|
|
dout("%s %p tid %llu to osd%d\n", __func__, req, req->r_tid,
|
|
|
|
req->r_osd ? req->r_osd->o_osd : -1);
|
|
|
|
|
|
|
|
if (req->r_osd) {
|
|
|
|
__remove_osd_from_lru(req->r_osd);
|
|
|
|
list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
|
|
|
|
list_move_tail(&req->r_req_lru_item, &osdc->req_unsent);
|
|
|
|
} else {
|
|
|
|
list_move_tail(&req->r_req_lru_item, &osdc->req_notarget);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/*
|
|
|
|
* Pick an osd (the first 'up' osd in the pg), allocate the osd struct
|
|
|
|
* (as needed), and set the request r_osd appropriately. If there is
|
2011-03-31 05:57:33 +04:00
|
|
|
* no up osd, set r_osd to NULL. Move the request to the appropriate list
|
2011-01-18 07:34:08 +03:00
|
|
|
* (unsent, homeless) or leave on in-flight lru.
|
2009-10-06 22:31:10 +04:00
|
|
|
*
|
|
|
|
* Return 0 if unchanged, 1 if changed, or negative on error.
|
|
|
|
*
|
|
|
|
* Caller should hold map_sem for read and request_mutex.
|
|
|
|
*/
|
2011-01-18 07:34:08 +03:00
|
|
|
static int __map_request(struct ceph_osd_client *osdc,
|
2011-10-15 00:33:55 +04:00
|
|
|
struct ceph_osd_request *req, int force_resend)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
2013-02-23 22:38:16 +04:00
|
|
|
struct ceph_pg pgid;
|
2010-05-10 21:24:48 +04:00
|
|
|
int acting[CEPH_PG_MAX_SIZE];
|
2014-03-24 19:12:48 +04:00
|
|
|
int num, o;
|
2009-10-06 22:31:10 +04:00
|
|
|
int err;
|
libceph: block I/O when PAUSE or FULL osd map flags are set
The PAUSEWR and PAUSERD flags are meant to stop the cluster from
processing writes and reads, respectively. The FULL flag is set when
the cluster determines that it is out of space, and will no longer
process writes. PAUSEWR and PAUSERD are purely client-side settings
already implemented in userspace clients. The osd does nothing special
with these flags.
When the FULL flag is set, however, the osd responds to all writes
with -ENOSPC. For cephfs, this makes sense, but for rbd the block
layer translates this into EIO. If a cluster goes from full to
non-full quickly, a filesystem on top of rbd will not behave well,
since some writes succeed while others get EIO.
Fix this by blocking any writes when the FULL flag is set in the osd
client. This is the same strategy used by userspace, so apply it by
default. A follow-on patch makes this configurable.
__map_request() is called to re-target osd requests in case the
available osds changed. Add a paused field to a ceph_osd_request, and
set it whenever an appropriate osd map flag is set. Avoid queueing
paused requests in __map_request(), but force them to be resent if
they become unpaused.
Also subscribe to the next osd map from the monitor if any of these
flags are set, so paused requests can be unblocked as soon as
possible.
Fixes: http://tracker.ceph.com/issues/6079
Reviewed-by: Sage Weil <sage@inktank.com>
Signed-off-by: Josh Durgin <josh.durgin@inktank.com>
2013-12-03 07:11:48 +04:00
|
|
|
bool was_paused;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2011-01-18 07:34:08 +03:00
|
|
|
dout("map_request %p tid %lld\n", req, req->r_tid);
|
2014-01-27 19:40:19 +04:00
|
|
|
|
|
|
|
err = __calc_request_pg(osdc->osdmap, req, &pgid);
|
2011-01-18 07:34:08 +03:00
|
|
|
if (err) {
|
|
|
|
list_move(&req->r_req_lru_item, &osdc->req_notarget);
|
2009-10-06 22:31:10 +04:00
|
|
|
return err;
|
2011-01-18 07:34:08 +03:00
|
|
|
}
|
2010-01-09 02:58:25 +03:00
|
|
|
req->r_pgid = pgid;
|
|
|
|
|
2014-03-24 19:12:48 +04:00
|
|
|
num = ceph_calc_pg_acting(osdc->osdmap, pgid, acting, &o);
|
|
|
|
if (num < 0)
|
|
|
|
num = 0;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
libceph: block I/O when PAUSE or FULL osd map flags are set
The PAUSEWR and PAUSERD flags are meant to stop the cluster from
processing writes and reads, respectively. The FULL flag is set when
the cluster determines that it is out of space, and will no longer
process writes. PAUSEWR and PAUSERD are purely client-side settings
already implemented in userspace clients. The osd does nothing special
with these flags.
When the FULL flag is set, however, the osd responds to all writes
with -ENOSPC. For cephfs, this makes sense, but for rbd the block
layer translates this into EIO. If a cluster goes from full to
non-full quickly, a filesystem on top of rbd will not behave well,
since some writes succeed while others get EIO.
Fix this by blocking any writes when the FULL flag is set in the osd
client. This is the same strategy used by userspace, so apply it by
default. A follow-on patch makes this configurable.
__map_request() is called to re-target osd requests in case the
available osds changed. Add a paused field to a ceph_osd_request, and
set it whenever an appropriate osd map flag is set. Avoid queueing
paused requests in __map_request(), but force them to be resent if
they become unpaused.
Also subscribe to the next osd map from the monitor if any of these
flags are set, so paused requests can be unblocked as soon as
possible.
Fixes: http://tracker.ceph.com/issues/6079
Reviewed-by: Sage Weil <sage@inktank.com>
Signed-off-by: Josh Durgin <josh.durgin@inktank.com>
2013-12-03 07:11:48 +04:00
|
|
|
was_paused = req->r_paused;
|
|
|
|
req->r_paused = __req_should_be_paused(osdc, req);
|
|
|
|
if (was_paused && !req->r_paused)
|
|
|
|
force_resend = 1;
|
|
|
|
|
2011-10-15 00:33:55 +04:00
|
|
|
if ((!force_resend &&
|
|
|
|
req->r_osd && req->r_osd->o_osd == o &&
|
2010-05-10 21:24:48 +04:00
|
|
|
req->r_sent >= req->r_osd->o_incarnation &&
|
|
|
|
req->r_num_pg_osds == num &&
|
|
|
|
memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
|
libceph: block I/O when PAUSE or FULL osd map flags are set
The PAUSEWR and PAUSERD flags are meant to stop the cluster from
processing writes and reads, respectively. The FULL flag is set when
the cluster determines that it is out of space, and will no longer
process writes. PAUSEWR and PAUSERD are purely client-side settings
already implemented in userspace clients. The osd does nothing special
with these flags.
When the FULL flag is set, however, the osd responds to all writes
with -ENOSPC. For cephfs, this makes sense, but for rbd the block
layer translates this into EIO. If a cluster goes from full to
non-full quickly, a filesystem on top of rbd will not behave well,
since some writes succeed while others get EIO.
Fix this by blocking any writes when the FULL flag is set in the osd
client. This is the same strategy used by userspace, so apply it by
default. A follow-on patch makes this configurable.
__map_request() is called to re-target osd requests in case the
available osds changed. Add a paused field to a ceph_osd_request, and
set it whenever an appropriate osd map flag is set. Avoid queueing
paused requests in __map_request(), but force them to be resent if
they become unpaused.
Also subscribe to the next osd map from the monitor if any of these
flags are set, so paused requests can be unblocked as soon as
possible.
Fixes: http://tracker.ceph.com/issues/6079
Reviewed-by: Sage Weil <sage@inktank.com>
Signed-off-by: Josh Durgin <josh.durgin@inktank.com>
2013-12-03 07:11:48 +04:00
|
|
|
(req->r_osd == NULL && o == -1) ||
|
|
|
|
req->r_paused)
|
2009-10-06 22:31:10 +04:00
|
|
|
return 0; /* no change */
|
|
|
|
|
2013-02-23 22:38:16 +04:00
|
|
|
dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
|
|
|
|
req->r_tid, pgid.pool, pgid.seed, o,
|
2009-10-06 22:31:10 +04:00
|
|
|
req->r_osd ? req->r_osd->o_osd : -1);
|
|
|
|
|
2010-05-10 21:24:48 +04:00
|
|
|
/* record full pg acting set */
|
|
|
|
memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
|
|
|
|
req->r_num_pg_osds = num;
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
if (req->r_osd) {
|
|
|
|
__cancel_request(req);
|
|
|
|
list_del_init(&req->r_osd_item);
|
2014-11-04 18:32:14 +03:00
|
|
|
list_del_init(&req->r_linger_osd_item);
|
2009-10-06 22:31:10 +04:00
|
|
|
req->r_osd = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
req->r_osd = __lookup_osd(osdc, o);
|
|
|
|
if (!req->r_osd && o >= 0) {
|
2010-02-26 20:37:33 +03:00
|
|
|
err = -ENOMEM;
|
2012-05-27 08:26:43 +04:00
|
|
|
req->r_osd = create_osd(osdc, o);
|
2011-01-18 07:34:08 +03:00
|
|
|
if (!req->r_osd) {
|
|
|
|
list_move(&req->r_req_lru_item, &osdc->req_notarget);
|
2010-02-26 20:37:33 +03:00
|
|
|
goto out;
|
2011-01-18 07:34:08 +03:00
|
|
|
}
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2011-01-18 07:34:08 +03:00
|
|
|
dout("map_request osd %p is osd%d\n", req->r_osd, o);
|
2009-10-06 22:31:10 +04:00
|
|
|
__insert_osd(osdc, req->r_osd);
|
|
|
|
|
2012-06-27 23:24:08 +04:00
|
|
|
ceph_con_open(&req->r_osd->o_con,
|
|
|
|
CEPH_ENTITY_TYPE_OSD, o,
|
|
|
|
&osdc->osdmap->osd_addr[o]);
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
|
2014-09-02 13:40:33 +04:00
|
|
|
__enqueue_request(req);
|
2010-05-10 21:24:48 +04:00
|
|
|
err = 1; /* osd or pg changed */
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* caller should hold map_sem (for read) and request_mutex
|
|
|
|
*/
|
2012-01-04 00:34:34 +04:00
|
|
|
static void __send_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
2013-02-26 04:11:12 +04:00
|
|
|
void *p;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-02-26 04:11:12 +04:00
|
|
|
dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
|
|
|
|
req, req->r_tid, req->r_osd->o_osd, req->r_flags,
|
|
|
|
(unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
|
|
|
|
|
|
|
|
/* fill in message content that changes each time we send it */
|
|
|
|
put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
|
|
|
|
put_unaligned_le32(req->r_flags, req->r_request_flags);
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
put_unaligned_le64(req->r_target_oloc.pool, req->r_request_pool);
|
2013-02-26 04:11:12 +04:00
|
|
|
p = req->r_request_pgid;
|
|
|
|
ceph_encode_64(&p, req->r_pgid.pool);
|
|
|
|
ceph_encode_32(&p, req->r_pgid.seed);
|
|
|
|
put_unaligned_le64(1, req->r_request_attempts); /* FIXME */
|
|
|
|
memcpy(req->r_request_reassert_version, &req->r_reassert_version,
|
|
|
|
sizeof(req->r_reassert_version));
|
2013-02-26 04:13:08 +04:00
|
|
|
|
2010-03-23 00:42:30 +03:00
|
|
|
req->r_stamp = jiffies;
|
2010-08-23 08:34:27 +04:00
|
|
|
list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
ceph_msg_get(req->r_request); /* send consumes a ref */
|
libceph: change how "safe" callback is used
An osd request currently has two callbacks. They inform the
initiator of the request when we've received confirmation for the
target osd that a request was received, and when the osd indicates
all changes described by the request are durable.
The only time the second callback is used is in the ceph file system
for a synchronous write. There's a race that makes some handling of
this case unsafe. This patch addresses this problem. The error
handling for this callback is also kind of gross, and this patch
changes that as well.
In ceph_sync_write(), if a safe callback is requested we want to add
the request on the ceph inode's unsafe items list. Because items on
this list must have their tid set (by ceph_osd_start_request()), the
request added *after* the call to that function returns. The
problem with this is that there's a race between starting the
request and adding it to the unsafe items list; the request may
already be complete before ceph_sync_write() even begins to put it
on the list.
To address this, we change the way the "safe" callback is used.
Rather than just calling it when the request is "safe", we use it to
notify the initiator the bounds (start and end) of the period during
which the request is *unsafe*. So the initiator gets notified just
before the request gets sent to the osd (when it is "unsafe"), and
again when it's known the results are durable (it's no longer
unsafe). The first call will get made in __send_request(), just
before the request message gets sent to the messenger for the first
time. That function is only called by __send_queued(), which is
always called with the osd client's request mutex held.
We then have this callback function insert the request on the ceph
inode's unsafe list when we're told the request is unsafe. This
will avoid the race because this call will be made under protection
of the osd client's request mutex. It also nicely groups the setup
and cleanup of the state associated with managing unsafe requests.
The name of the "safe" callback field is changed to "unsafe" to
better reflect its new purpose. It has a Boolean "unsafe" parameter
to indicate whether the request is becoming unsafe or is now safe.
Because the "msg" parameter wasn't used, we drop that.
This resolves the original problem reportedin:
http://tracker.ceph.com/issues/4706
Reported-by: Yan, Zheng <zheng.z.yan@intel.com>
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Yan, Zheng <zheng.z.yan@intel.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2013-04-15 20:20:42 +04:00
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
req->r_sent = req->r_osd->o_incarnation;
|
libceph: change how "safe" callback is used
An osd request currently has two callbacks. They inform the
initiator of the request when we've received confirmation for the
target osd that a request was received, and when the osd indicates
all changes described by the request are durable.
The only time the second callback is used is in the ceph file system
for a synchronous write. There's a race that makes some handling of
this case unsafe. This patch addresses this problem. The error
handling for this callback is also kind of gross, and this patch
changes that as well.
In ceph_sync_write(), if a safe callback is requested we want to add
the request on the ceph inode's unsafe items list. Because items on
this list must have their tid set (by ceph_osd_start_request()), the
request added *after* the call to that function returns. The
problem with this is that there's a race between starting the
request and adding it to the unsafe items list; the request may
already be complete before ceph_sync_write() even begins to put it
on the list.
To address this, we change the way the "safe" callback is used.
Rather than just calling it when the request is "safe", we use it to
notify the initiator the bounds (start and end) of the period during
which the request is *unsafe*. So the initiator gets notified just
before the request gets sent to the osd (when it is "unsafe"), and
again when it's known the results are durable (it's no longer
unsafe). The first call will get made in __send_request(), just
before the request message gets sent to the messenger for the first
time. That function is only called by __send_queued(), which is
always called with the osd client's request mutex held.
We then have this callback function insert the request on the ceph
inode's unsafe list when we're told the request is unsafe. This
will avoid the race because this call will be made under protection
of the osd client's request mutex. It also nicely groups the setup
and cleanup of the state associated with managing unsafe requests.
The name of the "safe" callback field is changed to "unsafe" to
better reflect its new purpose. It has a Boolean "unsafe" parameter
to indicate whether the request is becoming unsafe or is now safe.
Because the "msg" parameter wasn't used, we drop that.
This resolves the original problem reportedin:
http://tracker.ceph.com/issues/4706
Reported-by: Yan, Zheng <zheng.z.yan@intel.com>
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Yan, Zheng <zheng.z.yan@intel.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2013-04-15 20:20:42 +04:00
|
|
|
|
|
|
|
ceph_con_send(&req->r_osd->o_con, req->r_request);
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
|
2011-01-18 07:34:08 +03:00
|
|
|
/*
|
|
|
|
* Send any requests in the queue (req_unsent).
|
|
|
|
*/
|
2013-02-15 21:42:29 +04:00
|
|
|
static void __send_queued(struct ceph_osd_client *osdc)
|
2011-01-18 07:34:08 +03:00
|
|
|
{
|
|
|
|
struct ceph_osd_request *req, *tmp;
|
|
|
|
|
2013-02-15 21:42:29 +04:00
|
|
|
dout("__send_queued\n");
|
|
|
|
list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
|
2011-01-18 07:34:08 +03:00
|
|
|
__send_request(osdc, req);
|
|
|
|
}
|
|
|
|
|
2014-01-31 21:33:39 +04:00
|
|
|
/*
|
|
|
|
* Caller should hold map_sem for read and request_mutex.
|
|
|
|
*/
|
|
|
|
static int __ceph_osdc_start_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req,
|
|
|
|
bool nofail)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
__register_request(osdc, req);
|
|
|
|
req->r_sent = 0;
|
|
|
|
req->r_got_reply = 0;
|
|
|
|
rc = __map_request(osdc, req, 0);
|
|
|
|
if (rc < 0) {
|
|
|
|
if (nofail) {
|
|
|
|
dout("osdc_start_request failed map, "
|
|
|
|
" will retry %lld\n", req->r_tid);
|
|
|
|
rc = 0;
|
|
|
|
} else {
|
|
|
|
__unregister_request(osdc, req);
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req->r_osd == NULL) {
|
|
|
|
dout("send_request %p no up osds in pg\n", req);
|
|
|
|
ceph_monc_request_next_osdmap(&osdc->client->monc);
|
|
|
|
} else {
|
|
|
|
__send_queued(osdc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/*
|
|
|
|
* Timeout callback, called every N seconds when 1 or more osd
|
|
|
|
* requests has been active for more than N seconds. When this
|
|
|
|
* happens, we ping all OSDs with requests who have timed out to
|
|
|
|
* ensure any communications channel reset is detected. Reset the
|
|
|
|
* request timeouts another N seconds in the future as we go.
|
|
|
|
* Reschedule the timeout event another N seconds in future (unless
|
|
|
|
* there are no open requests).
|
|
|
|
*/
|
|
|
|
static void handle_timeout(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct ceph_osd_client *osdc =
|
|
|
|
container_of(work, struct ceph_osd_client, timeout_work.work);
|
2012-11-29 00:28:24 +04:00
|
|
|
struct ceph_osd_request *req;
|
2009-10-06 22:31:10 +04:00
|
|
|
struct ceph_osd *osd;
|
2010-02-27 02:32:31 +03:00
|
|
|
unsigned long keepalive =
|
2010-04-07 02:14:15 +04:00
|
|
|
osdc->client->options->osd_keepalive_timeout * HZ;
|
2010-02-27 02:32:31 +03:00
|
|
|
struct list_head slow_osds;
|
2009-10-06 22:31:10 +04:00
|
|
|
dout("timeout\n");
|
|
|
|
down_read(&osdc->map_sem);
|
|
|
|
|
|
|
|
ceph_monc_request_next_osdmap(&osdc->client->monc);
|
|
|
|
|
|
|
|
mutex_lock(&osdc->request_mutex);
|
|
|
|
|
2010-02-27 02:32:31 +03:00
|
|
|
/*
|
|
|
|
* ping osds that are a bit slow. this ensures that if there
|
|
|
|
* is a break in the TCP connection we will notice, and reopen
|
|
|
|
* a connection with that osd (from the fault callback).
|
|
|
|
*/
|
|
|
|
INIT_LIST_HEAD(&slow_osds);
|
|
|
|
list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
|
2010-03-23 00:42:30 +03:00
|
|
|
if (time_before(jiffies, req->r_stamp + keepalive))
|
2010-02-27 02:32:31 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
osd = req->r_osd;
|
|
|
|
BUG_ON(!osd);
|
|
|
|
dout(" tid %llu is slow, will send keepalive on osd%d\n",
|
2009-10-06 22:31:10 +04:00
|
|
|
req->r_tid, osd->o_osd);
|
2010-02-27 02:32:31 +03:00
|
|
|
list_move_tail(&osd->o_keepalive_item, &slow_osds);
|
|
|
|
}
|
|
|
|
while (!list_empty(&slow_osds)) {
|
|
|
|
osd = list_entry(slow_osds.next, struct ceph_osd,
|
|
|
|
o_keepalive_item);
|
|
|
|
list_del_init(&osd->o_keepalive_item);
|
2009-10-06 22:31:10 +04:00
|
|
|
ceph_con_keepalive(&osd->o_con);
|
|
|
|
}
|
|
|
|
|
2010-02-27 02:32:31 +03:00
|
|
|
__schedule_osd_timeout(osdc);
|
2013-02-15 21:42:29 +04:00
|
|
|
__send_queued(osdc);
|
2009-10-06 22:31:10 +04:00
|
|
|
mutex_unlock(&osdc->request_mutex);
|
|
|
|
up_read(&osdc->map_sem);
|
|
|
|
}
|
|
|
|
|
2010-02-03 22:00:26 +03:00
|
|
|
static void handle_osds_timeout(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct ceph_osd_client *osdc =
|
|
|
|
container_of(work, struct ceph_osd_client,
|
|
|
|
osds_timeout_work.work);
|
|
|
|
unsigned long delay =
|
2010-04-07 02:14:15 +04:00
|
|
|
osdc->client->options->osd_idle_ttl * HZ >> 2;
|
2010-02-03 22:00:26 +03:00
|
|
|
|
|
|
|
dout("osds timeout\n");
|
|
|
|
down_read(&osdc->map_sem);
|
2011-09-01 01:45:53 +04:00
|
|
|
remove_old_osds(osdc);
|
2010-02-03 22:00:26 +03:00
|
|
|
up_read(&osdc->map_sem);
|
|
|
|
|
|
|
|
schedule_delayed_work(&osdc->osds_timeout_work,
|
|
|
|
round_jiffies_relative(delay));
|
|
|
|
}
|
|
|
|
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
static int ceph_oloc_decode(void **p, void *end,
|
|
|
|
struct ceph_object_locator *oloc)
|
|
|
|
{
|
|
|
|
u8 struct_v, struct_cv;
|
|
|
|
u32 len;
|
|
|
|
void *struct_end;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
|
|
|
|
struct_v = ceph_decode_8(p);
|
|
|
|
struct_cv = ceph_decode_8(p);
|
|
|
|
if (struct_v < 3) {
|
|
|
|
pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
|
|
|
|
struct_v, struct_cv);
|
|
|
|
goto e_inval;
|
|
|
|
}
|
|
|
|
if (struct_cv > 6) {
|
|
|
|
pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
|
|
|
|
struct_v, struct_cv);
|
|
|
|
goto e_inval;
|
|
|
|
}
|
|
|
|
len = ceph_decode_32(p);
|
|
|
|
ceph_decode_need(p, end, len, e_inval);
|
|
|
|
struct_end = *p + len;
|
|
|
|
|
|
|
|
oloc->pool = ceph_decode_64(p);
|
|
|
|
*p += 4; /* skip preferred */
|
|
|
|
|
|
|
|
len = ceph_decode_32(p);
|
|
|
|
if (len > 0) {
|
|
|
|
pr_warn("ceph_object_locator::key is set\n");
|
|
|
|
goto e_inval;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (struct_v >= 5) {
|
|
|
|
len = ceph_decode_32(p);
|
|
|
|
if (len > 0) {
|
|
|
|
pr_warn("ceph_object_locator::nspace is set\n");
|
|
|
|
goto e_inval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (struct_v >= 6) {
|
|
|
|
s64 hash = ceph_decode_64(p);
|
|
|
|
if (hash != -1) {
|
|
|
|
pr_warn("ceph_object_locator::hash is set\n");
|
|
|
|
goto e_inval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* skip the rest */
|
|
|
|
*p = struct_end;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
e_inval:
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ceph_redirect_decode(void **p, void *end,
|
|
|
|
struct ceph_request_redirect *redir)
|
|
|
|
{
|
|
|
|
u8 struct_v, struct_cv;
|
|
|
|
u32 len;
|
|
|
|
void *struct_end;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
|
|
|
|
struct_v = ceph_decode_8(p);
|
|
|
|
struct_cv = ceph_decode_8(p);
|
|
|
|
if (struct_cv > 1) {
|
|
|
|
pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
|
|
|
|
struct_v, struct_cv);
|
|
|
|
goto e_inval;
|
|
|
|
}
|
|
|
|
len = ceph_decode_32(p);
|
|
|
|
ceph_decode_need(p, end, len, e_inval);
|
|
|
|
struct_end = *p + len;
|
|
|
|
|
|
|
|
ret = ceph_oloc_decode(p, end, &redir->oloc);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
len = ceph_decode_32(p);
|
|
|
|
if (len > 0) {
|
|
|
|
pr_warn("ceph_request_redirect::object_name is set\n");
|
|
|
|
goto e_inval;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = ceph_decode_32(p);
|
|
|
|
*p += len; /* skip osd_instructions */
|
|
|
|
|
|
|
|
/* skip the rest */
|
|
|
|
*p = struct_end;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
e_inval:
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2011-06-03 20:37:09 +04:00
|
|
|
static void complete_request(struct ceph_osd_request *req)
|
|
|
|
{
|
|
|
|
complete_all(&req->r_safe_completion); /* fsync waiter */
|
|
|
|
}
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/*
|
|
|
|
* handle osd op reply. either call the callback if it is specified,
|
|
|
|
* or do the completion to wake up the waiting thread.
|
|
|
|
*/
|
2009-12-22 21:45:45 +03:00
|
|
|
static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
|
|
|
|
struct ceph_connection *con)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
2013-02-26 04:11:12 +04:00
|
|
|
void *p, *end;
|
2009-10-06 22:31:10 +04:00
|
|
|
struct ceph_osd_request *req;
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
struct ceph_request_redirect redir;
|
2009-10-06 22:31:10 +04:00
|
|
|
u64 tid;
|
2013-02-26 04:11:12 +04:00
|
|
|
int object_len;
|
2013-04-04 06:32:51 +04:00
|
|
|
unsigned int numops;
|
|
|
|
int payload_len, flags;
|
2010-05-11 20:53:18 +04:00
|
|
|
s32 result;
|
2013-02-26 04:11:12 +04:00
|
|
|
s32 retry_attempt;
|
|
|
|
struct ceph_pg pg;
|
|
|
|
int err;
|
|
|
|
u32 reassert_epoch;
|
|
|
|
u64 reassert_version;
|
|
|
|
u32 osdmap_epoch;
|
2013-02-27 20:26:25 +04:00
|
|
|
int already_completed;
|
2013-04-03 10:28:57 +04:00
|
|
|
u32 bytes;
|
2013-04-04 06:32:51 +04:00
|
|
|
unsigned int i;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2009-12-22 22:24:33 +03:00
|
|
|
tid = le64_to_cpu(msg->hdr.tid);
|
2013-02-26 04:11:12 +04:00
|
|
|
dout("handle_reply %p tid %llu\n", msg, tid);
|
|
|
|
|
|
|
|
p = msg->front.iov_base;
|
|
|
|
end = p + msg->front.iov_len;
|
|
|
|
|
|
|
|
ceph_decode_need(&p, end, 4, bad);
|
|
|
|
object_len = ceph_decode_32(&p);
|
|
|
|
ceph_decode_need(&p, end, object_len, bad);
|
|
|
|
p += object_len;
|
|
|
|
|
2013-04-02 03:58:26 +04:00
|
|
|
err = ceph_decode_pgid(&p, end, &pg);
|
2013-02-26 04:11:12 +04:00
|
|
|
if (err)
|
2009-10-06 22:31:10 +04:00
|
|
|
goto bad;
|
2013-02-26 04:11:12 +04:00
|
|
|
|
|
|
|
ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
|
|
|
|
flags = ceph_decode_64(&p);
|
|
|
|
result = ceph_decode_32(&p);
|
|
|
|
reassert_epoch = ceph_decode_32(&p);
|
|
|
|
reassert_version = ceph_decode_64(&p);
|
|
|
|
osdmap_epoch = ceph_decode_32(&p);
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/* lookup */
|
2014-02-03 15:56:33 +04:00
|
|
|
down_read(&osdc->map_sem);
|
2009-10-06 22:31:10 +04:00
|
|
|
mutex_lock(&osdc->request_mutex);
|
|
|
|
req = __lookup_request(osdc, tid);
|
|
|
|
if (req == NULL) {
|
|
|
|
dout("handle_reply tid %llu dne\n", tid);
|
2013-04-02 03:58:26 +04:00
|
|
|
goto bad_mutex;
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
ceph_osdc_get_request(req);
|
2013-02-26 04:11:12 +04:00
|
|
|
|
|
|
|
dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
|
|
|
|
req, result);
|
|
|
|
|
2013-08-15 09:51:58 +04:00
|
|
|
ceph_decode_need(&p, end, 4, bad_put);
|
2013-02-26 04:11:12 +04:00
|
|
|
numops = ceph_decode_32(&p);
|
|
|
|
if (numops > CEPH_OSD_MAX_OP)
|
|
|
|
goto bad_put;
|
|
|
|
if (numops != req->r_num_ops)
|
|
|
|
goto bad_put;
|
|
|
|
payload_len = 0;
|
2013-08-15 09:51:58 +04:00
|
|
|
ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad_put);
|
2013-02-26 04:11:12 +04:00
|
|
|
for (i = 0; i < numops; i++) {
|
|
|
|
struct ceph_osd_op *op = p;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
len = le32_to_cpu(op->payload_len);
|
|
|
|
req->r_reply_op_len[i] = len;
|
|
|
|
dout(" op %d has %d bytes\n", i, len);
|
|
|
|
payload_len += len;
|
|
|
|
p += sizeof(*op);
|
|
|
|
}
|
2013-04-03 10:28:57 +04:00
|
|
|
bytes = le32_to_cpu(msg->hdr.data_len);
|
|
|
|
if (payload_len != bytes) {
|
2014-09-10 08:17:29 +04:00
|
|
|
pr_warn("sum of op payload lens %d != data_len %d\n",
|
|
|
|
payload_len, bytes);
|
2013-02-26 04:11:12 +04:00
|
|
|
goto bad_put;
|
|
|
|
}
|
|
|
|
|
2013-08-15 09:51:58 +04:00
|
|
|
ceph_decode_need(&p, end, 4 + numops * 4, bad_put);
|
2013-02-26 04:11:12 +04:00
|
|
|
retry_attempt = ceph_decode_32(&p);
|
|
|
|
for (i = 0; i < numops; i++)
|
|
|
|
req->r_reply_op_result[i] = ceph_decode_32(&p);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
if (le16_to_cpu(msg->hdr.version) >= 6) {
|
|
|
|
p += 8 + 4; /* skip replay_version */
|
|
|
|
p += 8; /* skip user_version */
|
2013-05-31 11:54:44 +04:00
|
|
|
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
err = ceph_redirect_decode(&p, end, &redir);
|
|
|
|
if (err)
|
|
|
|
goto bad_put;
|
|
|
|
} else {
|
|
|
|
redir.oloc.pool = -1;
|
|
|
|
}
|
2009-10-06 22:31:10 +04:00
|
|
|
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
if (redir.oloc.pool != -1) {
|
|
|
|
dout("redirect pool %lld\n", redir.oloc.pool);
|
|
|
|
|
|
|
|
__unregister_request(osdc, req);
|
|
|
|
|
|
|
|
req->r_target_oloc = redir.oloc; /* struct */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start redirect requests with nofail=true. If
|
|
|
|
* mapping fails, request will end up on the notarget
|
|
|
|
* list, waiting for the new osdmap (which can take
|
|
|
|
* a while), even though the original request mapped
|
|
|
|
* successfully. In the future we might want to follow
|
|
|
|
* original request's nofail setting here.
|
|
|
|
*/
|
2014-02-03 15:56:33 +04:00
|
|
|
err = __ceph_osdc_start_request(osdc, req, true);
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
BUG_ON(err);
|
|
|
|
|
2014-02-03 15:56:33 +04:00
|
|
|
goto out_unlock;
|
libceph: follow redirect replies from osds
Follow redirect replies from osds, for details see ceph.git commit
fbbe3ad1220799b7bb00ea30fce581c5eadaf034.
v1 (current) version of redirect reply consists of oloc and oid, which
expands to pool, key, nspace, hash and oid. However, server-side code
that would populate anything other than pool doesn't exist yet, and
hence this commit adds support for pool redirects only. To make sure
that future server-side updates don't break us, we decode all fields
and, if any of key, nspace, hash or oid have a non-default value, error
out with "corrupt osd_op_reply ..." message.
Signed-off-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2014-01-27 19:40:20 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
already_completed = req->r_got_reply;
|
|
|
|
if (!req->r_got_reply) {
|
2013-02-26 04:11:12 +04:00
|
|
|
req->r_result = result;
|
2009-10-06 22:31:10 +04:00
|
|
|
dout("handle_reply result %d bytes %d\n", req->r_result,
|
|
|
|
bytes);
|
|
|
|
if (req->r_result == 0)
|
|
|
|
req->r_result = bytes;
|
|
|
|
|
|
|
|
/* in case this is a write and we need to replay, */
|
2013-02-26 04:11:12 +04:00
|
|
|
req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
|
|
|
|
req->r_reassert_version.version = cpu_to_le64(reassert_version);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
req->r_got_reply = 1;
|
|
|
|
} else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
|
|
|
|
dout("handle_reply tid %llu dup ack\n", tid);
|
2014-02-03 15:56:33 +04:00
|
|
|
goto out_unlock;
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
dout("handle_reply tid %llu flags %d\n", tid, flags);
|
|
|
|
|
2011-03-22 01:07:16 +03:00
|
|
|
if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
|
|
|
|
__register_linger_request(osdc, req);
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/* either this is a read, or we got the safe response */
|
2010-05-11 20:53:18 +04:00
|
|
|
if (result < 0 ||
|
|
|
|
(flags & CEPH_OSD_FLAG_ONDISK) ||
|
2009-10-06 22:31:10 +04:00
|
|
|
((flags & CEPH_OSD_FLAG_WRITE) == 0))
|
|
|
|
__unregister_request(osdc, req);
|
|
|
|
|
|
|
|
mutex_unlock(&osdc->request_mutex);
|
2014-02-03 15:56:33 +04:00
|
|
|
up_read(&osdc->map_sem);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-05-31 11:54:44 +04:00
|
|
|
if (!already_completed) {
|
2013-06-24 10:41:27 +04:00
|
|
|
if (req->r_unsafe_callback &&
|
|
|
|
result >= 0 && !(flags & CEPH_OSD_FLAG_ONDISK))
|
|
|
|
req->r_unsafe_callback(req, true);
|
2013-05-31 11:54:44 +04:00
|
|
|
if (req->r_callback)
|
|
|
|
req->r_callback(req, msg);
|
|
|
|
else
|
|
|
|
complete_all(&req->r_completion);
|
|
|
|
}
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-06-24 10:41:27 +04:00
|
|
|
if (flags & CEPH_OSD_FLAG_ONDISK) {
|
|
|
|
if (req->r_unsafe_callback && already_completed)
|
|
|
|
req->r_unsafe_callback(req, false);
|
2011-06-03 20:37:09 +04:00
|
|
|
complete_request(req);
|
2013-06-24 10:41:27 +04:00
|
|
|
}
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2014-02-03 15:56:33 +04:00
|
|
|
out:
|
2011-03-22 01:07:16 +03:00
|
|
|
dout("req=%p req->r_linger=%d\n", req, req->r_linger);
|
2009-10-06 22:31:10 +04:00
|
|
|
ceph_osdc_put_request(req);
|
|
|
|
return;
|
2014-02-03 15:56:33 +04:00
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&osdc->request_mutex);
|
|
|
|
up_read(&osdc->map_sem);
|
|
|
|
goto out;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-02-26 04:11:12 +04:00
|
|
|
bad_put:
|
2013-11-27 18:28:14 +04:00
|
|
|
req->r_result = -EIO;
|
|
|
|
__unregister_request(osdc, req);
|
|
|
|
if (req->r_callback)
|
|
|
|
req->r_callback(req, msg);
|
|
|
|
else
|
|
|
|
complete_all(&req->r_completion);
|
|
|
|
complete_request(req);
|
2013-02-26 04:11:12 +04:00
|
|
|
ceph_osdc_put_request(req);
|
2013-04-02 03:58:26 +04:00
|
|
|
bad_mutex:
|
|
|
|
mutex_unlock(&osdc->request_mutex);
|
2014-02-03 15:56:33 +04:00
|
|
|
up_read(&osdc->map_sem);
|
2009-10-06 22:31:10 +04:00
|
|
|
bad:
|
2013-02-26 04:11:12 +04:00
|
|
|
pr_err("corrupt osd_op_reply got %d %d\n",
|
|
|
|
(int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
|
2009-12-15 02:13:47 +03:00
|
|
|
ceph_msg_dump(msg);
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
|
2011-01-18 07:34:08 +03:00
|
|
|
static void reset_changed_osds(struct ceph_osd_client *osdc)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
|
|
|
struct rb_node *p, *n;
|
|
|
|
|
2015-02-17 19:37:15 +03:00
|
|
|
dout("%s %p\n", __func__, osdc);
|
2011-01-18 07:34:08 +03:00
|
|
|
for (p = rb_first(&osdc->osds); p; p = n) {
|
|
|
|
struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2011-01-18 07:34:08 +03:00
|
|
|
n = rb_next(p);
|
|
|
|
if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
|
|
|
|
memcmp(&osd->o_con.peer_addr,
|
|
|
|
ceph_osd_addr(osdc->osdmap,
|
|
|
|
osd->o_osd),
|
|
|
|
sizeof(struct ceph_entity_addr)) != 0)
|
|
|
|
__reset_osd(osdc, osd);
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
2010-02-27 02:32:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-01-18 07:34:08 +03:00
|
|
|
* Requeue requests whose mapping to an OSD has changed. If requests map to
|
|
|
|
* no osd, request a new map.
|
2010-02-27 02:32:31 +03:00
|
|
|
*
|
2012-12-27 00:31:40 +04:00
|
|
|
* Caller should hold map_sem for read.
|
2010-02-27 02:32:31 +03:00
|
|
|
*/
|
2013-12-10 21:35:13 +04:00
|
|
|
static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
|
|
|
|
bool force_resend_writes)
|
2010-02-27 02:32:31 +03:00
|
|
|
{
|
2011-03-22 01:07:16 +03:00
|
|
|
struct ceph_osd_request *req, *nreq;
|
2011-01-18 07:34:08 +03:00
|
|
|
struct rb_node *p;
|
|
|
|
int needmap = 0;
|
|
|
|
int err;
|
2013-12-10 21:35:13 +04:00
|
|
|
bool force_resend_req;
|
2010-02-27 02:32:31 +03:00
|
|
|
|
2013-12-10 21:35:13 +04:00
|
|
|
dout("kick_requests %s %s\n", force_resend ? " (force resend)" : "",
|
|
|
|
force_resend_writes ? " (force resend writes)" : "");
|
2010-02-27 02:32:31 +03:00
|
|
|
mutex_lock(&osdc->request_mutex);
|
2012-07-31 03:19:28 +04:00
|
|
|
for (p = rb_first(&osdc->requests); p; ) {
|
2011-01-18 07:34:08 +03:00
|
|
|
req = rb_entry(p, struct ceph_osd_request, r_node);
|
2012-07-31 03:19:28 +04:00
|
|
|
p = rb_next(p);
|
libceph: move linger requests sooner in kick_requests()
The kick_requests() function is called by ceph_osdc_handle_map()
when an osd map change has been indicated. Its purpose is to
re-queue any request whose target osd is different from what it
was when it was originally sent.
It is structured as two loops, one for incomplete but registered
requests, and a second for handling completed linger requests.
As a special case, in the first loop if a request marked to linger
has not yet completed, it is moved from the request list to the
linger list. This is as a quick and dirty way to have the second
loop handle sending the request along with all the other linger
requests.
Because of the way it's done now, however, this quick and dirty
solution can result in these incomplete linger requests never
getting re-sent as desired. The problem lies in the fact that
the second loop only arranges for a linger request to be sent
if it appears its target osd has changed. This is the proper
handling for *completed* linger requests (it avoids issuing
the same linger request twice to the same osd).
But although the linger requests added to the list in the first loop
may have been sent, they have not yet completed, so they need to be
re-sent regardless of whether their target osd has changed.
The first required fix is we need to avoid calling __map_request()
on any incomplete linger request. Otherwise the subsequent
__map_request() call in the second loop will find the target osd
has not changed and will therefore not re-send the request.
Second, we need to be sure that a sent but incomplete linger request
gets re-sent. If the target osd is the same with the new osd map as
it was when the request was originally sent, this won't happen.
This can be fixed through careful handling when we move these
requests from the request list to the linger list, by unregistering
the request *before* it is registered as a linger request. This
works because a side-effect of unregistering the request is to make
the request's r_osd pointer be NULL, and *that* will ensure the
second loop actually re-sends the linger request.
Processing of such a request is done at that point, so continue with
the next one once it's been moved.
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2012-12-20 01:52:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For linger requests that have not yet been
|
|
|
|
* registered, move them to the linger list; they'll
|
|
|
|
* be sent to the osd in the loop below. Unregister
|
|
|
|
* the request before re-registering it as a linger
|
|
|
|
* request to ensure the __map_request() below
|
|
|
|
* will decide it needs to be sent.
|
|
|
|
*/
|
|
|
|
if (req->r_linger && list_empty(&req->r_linger_item)) {
|
|
|
|
dout("%p tid %llu restart on osd%d\n",
|
|
|
|
req, req->r_tid,
|
|
|
|
req->r_osd ? req->r_osd->o_osd : -1);
|
2013-05-23 05:54:25 +04:00
|
|
|
ceph_osdc_get_request(req);
|
libceph: move linger requests sooner in kick_requests()
The kick_requests() function is called by ceph_osdc_handle_map()
when an osd map change has been indicated. Its purpose is to
re-queue any request whose target osd is different from what it
was when it was originally sent.
It is structured as two loops, one for incomplete but registered
requests, and a second for handling completed linger requests.
As a special case, in the first loop if a request marked to linger
has not yet completed, it is moved from the request list to the
linger list. This is as a quick and dirty way to have the second
loop handle sending the request along with all the other linger
requests.
Because of the way it's done now, however, this quick and dirty
solution can result in these incomplete linger requests never
getting re-sent as desired. The problem lies in the fact that
the second loop only arranges for a linger request to be sent
if it appears its target osd has changed. This is the proper
handling for *completed* linger requests (it avoids issuing
the same linger request twice to the same osd).
But although the linger requests added to the list in the first loop
may have been sent, they have not yet completed, so they need to be
re-sent regardless of whether their target osd has changed.
The first required fix is we need to avoid calling __map_request()
on any incomplete linger request. Otherwise the subsequent
__map_request() call in the second loop will find the target osd
has not changed and will therefore not re-send the request.
Second, we need to be sure that a sent but incomplete linger request
gets re-sent. If the target osd is the same with the new osd map as
it was when the request was originally sent, this won't happen.
This can be fixed through careful handling when we move these
requests from the request list to the linger list, by unregistering
the request *before* it is registered as a linger request. This
works because a side-effect of unregistering the request is to make
the request's r_osd pointer be NULL, and *that* will ensure the
second loop actually re-sends the linger request.
Processing of such a request is done at that point, so continue with
the next one once it's been moved.
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2012-12-20 01:52:36 +04:00
|
|
|
__unregister_request(osdc, req);
|
|
|
|
__register_linger_request(osdc, req);
|
2013-05-23 05:54:25 +04:00
|
|
|
ceph_osdc_put_request(req);
|
libceph: move linger requests sooner in kick_requests()
The kick_requests() function is called by ceph_osdc_handle_map()
when an osd map change has been indicated. Its purpose is to
re-queue any request whose target osd is different from what it
was when it was originally sent.
It is structured as two loops, one for incomplete but registered
requests, and a second for handling completed linger requests.
As a special case, in the first loop if a request marked to linger
has not yet completed, it is moved from the request list to the
linger list. This is as a quick and dirty way to have the second
loop handle sending the request along with all the other linger
requests.
Because of the way it's done now, however, this quick and dirty
solution can result in these incomplete linger requests never
getting re-sent as desired. The problem lies in the fact that
the second loop only arranges for a linger request to be sent
if it appears its target osd has changed. This is the proper
handling for *completed* linger requests (it avoids issuing
the same linger request twice to the same osd).
But although the linger requests added to the list in the first loop
may have been sent, they have not yet completed, so they need to be
re-sent regardless of whether their target osd has changed.
The first required fix is we need to avoid calling __map_request()
on any incomplete linger request. Otherwise the subsequent
__map_request() call in the second loop will find the target osd
has not changed and will therefore not re-send the request.
Second, we need to be sure that a sent but incomplete linger request
gets re-sent. If the target osd is the same with the new osd map as
it was when the request was originally sent, this won't happen.
This can be fixed through careful handling when we move these
requests from the request list to the linger list, by unregistering
the request *before* it is registered as a linger request. This
works because a side-effect of unregistering the request is to make
the request's r_osd pointer be NULL, and *that* will ensure the
second loop actually re-sends the linger request.
Processing of such a request is done at that point, so continue with
the next one once it's been moved.
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2012-12-20 01:52:36 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-12-10 21:35:13 +04:00
|
|
|
force_resend_req = force_resend ||
|
|
|
|
(force_resend_writes &&
|
|
|
|
req->r_flags & CEPH_OSD_FLAG_WRITE);
|
|
|
|
err = __map_request(osdc, req, force_resend_req);
|
2011-01-18 07:34:08 +03:00
|
|
|
if (err < 0)
|
|
|
|
continue; /* error */
|
|
|
|
if (req->r_osd == NULL) {
|
|
|
|
dout("%p tid %llu maps to no osd\n", req, req->r_tid);
|
|
|
|
needmap++; /* request a newer map */
|
|
|
|
} else if (err > 0) {
|
2012-07-31 03:19:28 +04:00
|
|
|
if (!req->r_linger) {
|
|
|
|
dout("%p tid %llu requeued on osd%d\n", req,
|
|
|
|
req->r_tid,
|
|
|
|
req->r_osd ? req->r_osd->o_osd : -1);
|
2011-03-22 01:07:16 +03:00
|
|
|
req->r_flags |= CEPH_OSD_FLAG_RETRY;
|
2012-07-31 03:19:28 +04:00
|
|
|
}
|
|
|
|
}
|
2011-03-22 01:07:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(req, nreq, &osdc->req_linger,
|
|
|
|
r_linger_item) {
|
|
|
|
dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
|
|
|
|
|
2013-12-10 21:35:13 +04:00
|
|
|
err = __map_request(osdc, req,
|
|
|
|
force_resend || force_resend_writes);
|
libceph: move linger requests sooner in kick_requests()
The kick_requests() function is called by ceph_osdc_handle_map()
when an osd map change has been indicated. Its purpose is to
re-queue any request whose target osd is different from what it
was when it was originally sent.
It is structured as two loops, one for incomplete but registered
requests, and a second for handling completed linger requests.
As a special case, in the first loop if a request marked to linger
has not yet completed, it is moved from the request list to the
linger list. This is as a quick and dirty way to have the second
loop handle sending the request along with all the other linger
requests.
Because of the way it's done now, however, this quick and dirty
solution can result in these incomplete linger requests never
getting re-sent as desired. The problem lies in the fact that
the second loop only arranges for a linger request to be sent
if it appears its target osd has changed. This is the proper
handling for *completed* linger requests (it avoids issuing
the same linger request twice to the same osd).
But although the linger requests added to the list in the first loop
may have been sent, they have not yet completed, so they need to be
re-sent regardless of whether their target osd has changed.
The first required fix is we need to avoid calling __map_request()
on any incomplete linger request. Otherwise the subsequent
__map_request() call in the second loop will find the target osd
has not changed and will therefore not re-send the request.
Second, we need to be sure that a sent but incomplete linger request
gets re-sent. If the target osd is the same with the new osd map as
it was when the request was originally sent, this won't happen.
This can be fixed through careful handling when we move these
requests from the request list to the linger list, by unregistering
the request *before* it is registered as a linger request. This
works because a side-effect of unregistering the request is to make
the request's r_osd pointer be NULL, and *that* will ensure the
second loop actually re-sends the linger request.
Processing of such a request is done at that point, so continue with
the next one once it's been moved.
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
2012-12-20 01:52:36 +04:00
|
|
|
dout("__map_request returned %d\n", err);
|
2011-03-22 01:07:16 +03:00
|
|
|
if (err < 0)
|
|
|
|
continue; /* hrm! */
|
2015-05-11 17:53:10 +03:00
|
|
|
if (req->r_osd == NULL || err > 0) {
|
|
|
|
if (req->r_osd == NULL) {
|
|
|
|
dout("lingering %p tid %llu maps to no osd\n",
|
|
|
|
req, req->r_tid);
|
|
|
|
/*
|
|
|
|
* A homeless lingering request makes
|
|
|
|
* no sense, as it's job is to keep
|
|
|
|
* a particular OSD connection open.
|
|
|
|
* Request a newer map and kick the
|
|
|
|
* request, knowing that it won't be
|
|
|
|
* resent until we actually get a map
|
|
|
|
* that can tell us where to send it.
|
|
|
|
*/
|
|
|
|
needmap++;
|
|
|
|
}
|
2011-03-22 01:07:16 +03:00
|
|
|
|
2015-05-11 17:53:10 +03:00
|
|
|
dout("kicking lingering %p tid %llu osd%d\n", req,
|
|
|
|
req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
|
|
|
|
__register_request(osdc, req);
|
|
|
|
__unregister_linger_request(osdc, req);
|
|
|
|
}
|
2011-01-18 07:34:08 +03:00
|
|
|
}
|
2013-05-16 01:28:33 +04:00
|
|
|
reset_changed_osds(osdc);
|
2009-10-06 22:31:10 +04:00
|
|
|
mutex_unlock(&osdc->request_mutex);
|
|
|
|
|
|
|
|
if (needmap) {
|
|
|
|
dout("%d requests for down osds, need new map\n", needmap);
|
|
|
|
ceph_monc_request_next_osdmap(&osdc->client->monc);
|
|
|
|
}
|
2010-02-27 02:32:31 +03:00
|
|
|
}
|
2011-01-18 07:34:08 +03:00
|
|
|
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/*
|
|
|
|
* Process updated osd map.
|
|
|
|
*
|
|
|
|
* The message contains any number of incremental and full maps, normally
|
|
|
|
* indicating some sort of topology change in the cluster. Kick requests
|
|
|
|
* off to different OSDs as needed.
|
|
|
|
*/
|
|
|
|
void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
|
|
|
|
{
|
|
|
|
void *p, *end, *next;
|
|
|
|
u32 nr_maps, maplen;
|
|
|
|
u32 epoch;
|
|
|
|
struct ceph_osdmap *newmap = NULL, *oldmap;
|
|
|
|
int err;
|
|
|
|
struct ceph_fsid fsid;
|
2013-12-10 21:35:13 +04:00
|
|
|
bool was_full;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
|
|
|
|
p = msg->front.iov_base;
|
|
|
|
end = p + msg->front.iov_len;
|
|
|
|
|
|
|
|
/* verify fsid */
|
|
|
|
ceph_decode_need(&p, end, sizeof(fsid), bad);
|
|
|
|
ceph_decode_copy(&p, &fsid, sizeof(fsid));
|
2009-11-19 03:50:41 +03:00
|
|
|
if (ceph_check_fsid(osdc->client, &fsid) < 0)
|
|
|
|
return;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
down_write(&osdc->map_sem);
|
|
|
|
|
2013-12-10 21:35:13 +04:00
|
|
|
was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/* incremental maps */
|
|
|
|
ceph_decode_32_safe(&p, end, nr_maps, bad);
|
|
|
|
dout(" %d inc maps\n", nr_maps);
|
|
|
|
while (nr_maps > 0) {
|
|
|
|
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
|
2009-10-14 20:59:09 +04:00
|
|
|
epoch = ceph_decode_32(&p);
|
|
|
|
maplen = ceph_decode_32(&p);
|
2009-10-06 22:31:10 +04:00
|
|
|
ceph_decode_need(&p, end, maplen, bad);
|
|
|
|
next = p + maplen;
|
|
|
|
if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
|
|
|
|
dout("applying incremental map %u len %d\n",
|
|
|
|
epoch, maplen);
|
|
|
|
newmap = osdmap_apply_incremental(&p, next,
|
|
|
|
osdc->osdmap,
|
2012-05-27 08:26:43 +04:00
|
|
|
&osdc->client->msgr);
|
2009-10-06 22:31:10 +04:00
|
|
|
if (IS_ERR(newmap)) {
|
|
|
|
err = PTR_ERR(newmap);
|
|
|
|
goto bad;
|
|
|
|
}
|
2009-12-22 01:49:37 +03:00
|
|
|
BUG_ON(!newmap);
|
2009-10-06 22:31:10 +04:00
|
|
|
if (newmap != osdc->osdmap) {
|
|
|
|
ceph_osdmap_destroy(osdc->osdmap);
|
|
|
|
osdc->osdmap = newmap;
|
|
|
|
}
|
2013-12-10 21:35:13 +04:00
|
|
|
was_full = was_full ||
|
|
|
|
ceph_osdmap_flag(osdc->osdmap,
|
|
|
|
CEPH_OSDMAP_FULL);
|
|
|
|
kick_requests(osdc, 0, was_full);
|
2009-10-06 22:31:10 +04:00
|
|
|
} else {
|
|
|
|
dout("ignoring incremental map %u len %d\n",
|
|
|
|
epoch, maplen);
|
|
|
|
}
|
|
|
|
p = next;
|
|
|
|
nr_maps--;
|
|
|
|
}
|
|
|
|
if (newmap)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/* full maps */
|
|
|
|
ceph_decode_32_safe(&p, end, nr_maps, bad);
|
|
|
|
dout(" %d full maps\n", nr_maps);
|
|
|
|
while (nr_maps) {
|
|
|
|
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
|
2009-10-14 20:59:09 +04:00
|
|
|
epoch = ceph_decode_32(&p);
|
|
|
|
maplen = ceph_decode_32(&p);
|
2009-10-06 22:31:10 +04:00
|
|
|
ceph_decode_need(&p, end, maplen, bad);
|
|
|
|
if (nr_maps > 1) {
|
|
|
|
dout("skipping non-latest full map %u len %d\n",
|
|
|
|
epoch, maplen);
|
|
|
|
} else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
|
|
|
|
dout("skipping full map %u len %d, "
|
|
|
|
"older than our %u\n", epoch, maplen,
|
|
|
|
osdc->osdmap->epoch);
|
|
|
|
} else {
|
2011-10-15 00:33:55 +04:00
|
|
|
int skipped_map = 0;
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
dout("taking full map %u len %d\n", epoch, maplen);
|
2014-03-13 18:36:13 +04:00
|
|
|
newmap = ceph_osdmap_decode(&p, p+maplen);
|
2009-10-06 22:31:10 +04:00
|
|
|
if (IS_ERR(newmap)) {
|
|
|
|
err = PTR_ERR(newmap);
|
|
|
|
goto bad;
|
|
|
|
}
|
2009-12-22 01:49:37 +03:00
|
|
|
BUG_ON(!newmap);
|
2009-10-06 22:31:10 +04:00
|
|
|
oldmap = osdc->osdmap;
|
|
|
|
osdc->osdmap = newmap;
|
2011-10-15 00:33:55 +04:00
|
|
|
if (oldmap) {
|
|
|
|
if (oldmap->epoch + 1 < newmap->epoch)
|
|
|
|
skipped_map = 1;
|
2009-10-06 22:31:10 +04:00
|
|
|
ceph_osdmap_destroy(oldmap);
|
2011-10-15 00:33:55 +04:00
|
|
|
}
|
2013-12-10 21:35:13 +04:00
|
|
|
was_full = was_full ||
|
|
|
|
ceph_osdmap_flag(osdc->osdmap,
|
|
|
|
CEPH_OSDMAP_FULL);
|
|
|
|
kick_requests(osdc, skipped_map, was_full);
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
p += maplen;
|
|
|
|
nr_maps--;
|
|
|
|
}
|
|
|
|
|
2013-08-15 09:52:48 +04:00
|
|
|
if (!osdc->osdmap)
|
|
|
|
goto bad;
|
2009-10-06 22:31:10 +04:00
|
|
|
done:
|
|
|
|
downgrade_write(&osdc->map_sem);
|
|
|
|
ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
|
2011-05-12 20:29:18 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* subscribe to subsequent osdmap updates if full to ensure
|
|
|
|
* we find out when we are no longer full and stop returning
|
|
|
|
* ENOSPC.
|
|
|
|
*/
|
libceph: block I/O when PAUSE or FULL osd map flags are set
The PAUSEWR and PAUSERD flags are meant to stop the cluster from
processing writes and reads, respectively. The FULL flag is set when
the cluster determines that it is out of space, and will no longer
process writes. PAUSEWR and PAUSERD are purely client-side settings
already implemented in userspace clients. The osd does nothing special
with these flags.
When the FULL flag is set, however, the osd responds to all writes
with -ENOSPC. For cephfs, this makes sense, but for rbd the block
layer translates this into EIO. If a cluster goes from full to
non-full quickly, a filesystem on top of rbd will not behave well,
since some writes succeed while others get EIO.
Fix this by blocking any writes when the FULL flag is set in the osd
client. This is the same strategy used by userspace, so apply it by
default. A follow-on patch makes this configurable.
__map_request() is called to re-target osd requests in case the
available osds changed. Add a paused field to a ceph_osd_request, and
set it whenever an appropriate osd map flag is set. Avoid queueing
paused requests in __map_request(), but force them to be resent if
they become unpaused.
Also subscribe to the next osd map from the monitor if any of these
flags are set, so paused requests can be unblocked as soon as
possible.
Fixes: http://tracker.ceph.com/issues/6079
Reviewed-by: Sage Weil <sage@inktank.com>
Signed-off-by: Josh Durgin <josh.durgin@inktank.com>
2013-12-03 07:11:48 +04:00
|
|
|
if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
|
|
|
|
ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
|
|
|
|
ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR))
|
2011-05-12 20:29:18 +04:00
|
|
|
ceph_monc_request_next_osdmap(&osdc->client->monc);
|
|
|
|
|
2013-02-15 21:42:29 +04:00
|
|
|
mutex_lock(&osdc->request_mutex);
|
|
|
|
__send_queued(osdc);
|
|
|
|
mutex_unlock(&osdc->request_mutex);
|
2009-10-06 22:31:10 +04:00
|
|
|
up_read(&osdc->map_sem);
|
2010-07-28 00:11:08 +04:00
|
|
|
wake_up_all(&osdc->client->auth_wq);
|
2009-10-06 22:31:10 +04:00
|
|
|
return;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
pr_err("osdc handle_map corrupt msg\n");
|
2009-12-15 02:13:47 +03:00
|
|
|
ceph_msg_dump(msg);
|
2009-10-06 22:31:10 +04:00
|
|
|
up_write(&osdc->map_sem);
|
|
|
|
}
|
|
|
|
|
2011-03-22 01:07:16 +03:00
|
|
|
/*
|
|
|
|
* watch/notify callback event infrastructure
|
|
|
|
*
|
|
|
|
* These callbacks are used both for watch and notify operations.
|
|
|
|
*/
|
|
|
|
static void __release_event(struct kref *kref)
|
|
|
|
{
|
|
|
|
struct ceph_osd_event *event =
|
|
|
|
container_of(kref, struct ceph_osd_event, kref);
|
|
|
|
|
|
|
|
dout("__release_event %p\n", event);
|
|
|
|
kfree(event);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void get_event(struct ceph_osd_event *event)
|
|
|
|
{
|
|
|
|
kref_get(&event->kref);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ceph_osdc_put_event(struct ceph_osd_event *event)
|
|
|
|
{
|
|
|
|
kref_put(&event->kref, __release_event);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ceph_osdc_put_event);
|
|
|
|
|
|
|
|
static void __insert_event(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_event *new)
|
|
|
|
{
|
|
|
|
struct rb_node **p = &osdc->event_tree.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct ceph_osd_event *event = NULL;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
event = rb_entry(parent, struct ceph_osd_event, node);
|
|
|
|
if (new->cookie < event->cookie)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else if (new->cookie > event->cookie)
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&new->node, parent, p);
|
|
|
|
rb_insert_color(&new->node, &osdc->event_tree);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
|
|
|
|
u64 cookie)
|
|
|
|
{
|
|
|
|
struct rb_node **p = &osdc->event_tree.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct ceph_osd_event *event = NULL;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
event = rb_entry(parent, struct ceph_osd_event, node);
|
|
|
|
if (cookie < event->cookie)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else if (cookie > event->cookie)
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
|
|
|
return event;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __remove_event(struct ceph_osd_event *event)
|
|
|
|
{
|
|
|
|
struct ceph_osd_client *osdc = event->osdc;
|
|
|
|
|
|
|
|
if (!RB_EMPTY_NODE(&event->node)) {
|
|
|
|
dout("__remove_event removed %p\n", event);
|
|
|
|
rb_erase(&event->node, &osdc->event_tree);
|
|
|
|
ceph_osdc_put_event(event);
|
|
|
|
} else {
|
|
|
|
dout("__remove_event didn't remove %p\n", event);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int ceph_osdc_create_event(struct ceph_osd_client *osdc,
|
|
|
|
void (*event_cb)(u64, u64, u8, void *),
|
2013-02-15 21:42:30 +04:00
|
|
|
void *data, struct ceph_osd_event **pevent)
|
2011-03-22 01:07:16 +03:00
|
|
|
{
|
|
|
|
struct ceph_osd_event *event;
|
|
|
|
|
|
|
|
event = kmalloc(sizeof(*event), GFP_NOIO);
|
|
|
|
if (!event)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
dout("create_event %p\n", event);
|
|
|
|
event->cb = event_cb;
|
2013-02-15 21:42:30 +04:00
|
|
|
event->one_shot = 0;
|
2011-03-22 01:07:16 +03:00
|
|
|
event->data = data;
|
|
|
|
event->osdc = osdc;
|
|
|
|
INIT_LIST_HEAD(&event->osd_node);
|
2012-12-17 22:23:48 +04:00
|
|
|
RB_CLEAR_NODE(&event->node);
|
2011-03-22 01:07:16 +03:00
|
|
|
kref_init(&event->kref); /* one ref for us */
|
|
|
|
kref_get(&event->kref); /* one ref for the caller */
|
|
|
|
|
|
|
|
spin_lock(&osdc->event_lock);
|
|
|
|
event->cookie = ++osdc->event_count;
|
|
|
|
__insert_event(osdc, event);
|
|
|
|
spin_unlock(&osdc->event_lock);
|
|
|
|
|
|
|
|
*pevent = event;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ceph_osdc_create_event);
|
|
|
|
|
|
|
|
void ceph_osdc_cancel_event(struct ceph_osd_event *event)
|
|
|
|
{
|
|
|
|
struct ceph_osd_client *osdc = event->osdc;
|
|
|
|
|
|
|
|
dout("cancel_event %p\n", event);
|
|
|
|
spin_lock(&osdc->event_lock);
|
|
|
|
__remove_event(event);
|
|
|
|
spin_unlock(&osdc->event_lock);
|
|
|
|
ceph_osdc_put_event(event); /* caller's */
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ceph_osdc_cancel_event);
|
|
|
|
|
|
|
|
|
|
|
|
static void do_event_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct ceph_osd_event_work *event_work =
|
|
|
|
container_of(work, struct ceph_osd_event_work, work);
|
|
|
|
struct ceph_osd_event *event = event_work->event;
|
|
|
|
u64 ver = event_work->ver;
|
|
|
|
u64 notify_id = event_work->notify_id;
|
|
|
|
u8 opcode = event_work->opcode;
|
|
|
|
|
|
|
|
dout("do_event_work completing %p\n", event);
|
|
|
|
event->cb(ver, notify_id, opcode, event->data);
|
|
|
|
dout("do_event_work completed %p\n", event);
|
|
|
|
ceph_osdc_put_event(event);
|
|
|
|
kfree(event_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process osd watch notifications
|
|
|
|
*/
|
2013-02-15 21:42:30 +04:00
|
|
|
static void handle_watch_notify(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_msg *msg)
|
2011-03-22 01:07:16 +03:00
|
|
|
{
|
|
|
|
void *p, *end;
|
|
|
|
u8 proto_ver;
|
|
|
|
u64 cookie, ver, notify_id;
|
|
|
|
u8 opcode;
|
|
|
|
struct ceph_osd_event *event;
|
|
|
|
struct ceph_osd_event_work *event_work;
|
|
|
|
|
|
|
|
p = msg->front.iov_base;
|
|
|
|
end = p + msg->front.iov_len;
|
|
|
|
|
|
|
|
ceph_decode_8_safe(&p, end, proto_ver, bad);
|
|
|
|
ceph_decode_8_safe(&p, end, opcode, bad);
|
|
|
|
ceph_decode_64_safe(&p, end, cookie, bad);
|
|
|
|
ceph_decode_64_safe(&p, end, ver, bad);
|
|
|
|
ceph_decode_64_safe(&p, end, notify_id, bad);
|
|
|
|
|
|
|
|
spin_lock(&osdc->event_lock);
|
|
|
|
event = __find_event(osdc, cookie);
|
|
|
|
if (event) {
|
2013-02-15 21:42:30 +04:00
|
|
|
BUG_ON(event->one_shot);
|
2011-03-22 01:07:16 +03:00
|
|
|
get_event(event);
|
|
|
|
}
|
|
|
|
spin_unlock(&osdc->event_lock);
|
|
|
|
dout("handle_watch_notify cookie %lld ver %lld event %p\n",
|
|
|
|
cookie, ver, event);
|
|
|
|
if (event) {
|
|
|
|
event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
|
|
|
|
if (!event_work) {
|
2014-09-11 12:18:53 +04:00
|
|
|
pr_err("couldn't allocate event_work\n");
|
|
|
|
ceph_osdc_put_event(event);
|
|
|
|
return;
|
2011-03-22 01:07:16 +03:00
|
|
|
}
|
2011-03-26 21:29:34 +03:00
|
|
|
INIT_WORK(&event_work->work, do_event_work);
|
2011-03-22 01:07:16 +03:00
|
|
|
event_work->event = event;
|
|
|
|
event_work->ver = ver;
|
|
|
|
event_work->notify_id = notify_id;
|
|
|
|
event_work->opcode = opcode;
|
|
|
|
|
2014-09-11 12:18:53 +04:00
|
|
|
queue_work(osdc->notify_wq, &event_work->work);
|
|
|
|
}
|
2011-03-22 01:07:16 +03:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
pr_err("osdc handle_watch_notify corrupt msg\n");
|
|
|
|
}
|
|
|
|
|
2013-04-05 10:27:12 +04:00
|
|
|
/*
|
|
|
|
* build new request AND message
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
|
|
|
|
struct ceph_snap_context *snapc, u64 snap_id,
|
|
|
|
struct timespec *mtime)
|
|
|
|
{
|
|
|
|
struct ceph_msg *msg = req->r_request;
|
|
|
|
void *p;
|
|
|
|
size_t msg_size;
|
|
|
|
int flags = req->r_flags;
|
|
|
|
u64 data_len;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
req->r_snapid = snap_id;
|
|
|
|
req->r_snapc = ceph_get_snap_context(snapc);
|
|
|
|
|
|
|
|
/* encode request */
|
|
|
|
msg->hdr.version = cpu_to_le16(4);
|
|
|
|
|
|
|
|
p = msg->front.iov_base;
|
|
|
|
ceph_encode_32(&p, 1); /* client_inc is always 1 */
|
|
|
|
req->r_request_osdmap_epoch = p;
|
|
|
|
p += 4;
|
|
|
|
req->r_request_flags = p;
|
|
|
|
p += 4;
|
|
|
|
if (req->r_flags & CEPH_OSD_FLAG_WRITE)
|
|
|
|
ceph_encode_timespec(p, mtime);
|
|
|
|
p += sizeof(struct ceph_timespec);
|
|
|
|
req->r_request_reassert_version = p;
|
|
|
|
p += sizeof(struct ceph_eversion); /* will get filled in */
|
|
|
|
|
|
|
|
/* oloc */
|
|
|
|
ceph_encode_8(&p, 4);
|
|
|
|
ceph_encode_8(&p, 4);
|
|
|
|
ceph_encode_32(&p, 8 + 4 + 4);
|
|
|
|
req->r_request_pool = p;
|
|
|
|
p += 8;
|
|
|
|
ceph_encode_32(&p, -1); /* preferred */
|
|
|
|
ceph_encode_32(&p, 0); /* key len */
|
|
|
|
|
|
|
|
ceph_encode_8(&p, 1);
|
|
|
|
req->r_request_pgid = p;
|
|
|
|
p += 8 + 4;
|
|
|
|
ceph_encode_32(&p, -1); /* preferred */
|
|
|
|
|
|
|
|
/* oid */
|
2014-01-27 19:40:20 +04:00
|
|
|
ceph_encode_32(&p, req->r_base_oid.name_len);
|
|
|
|
memcpy(p, req->r_base_oid.name, req->r_base_oid.name_len);
|
|
|
|
dout("oid '%.*s' len %d\n", req->r_base_oid.name_len,
|
|
|
|
req->r_base_oid.name, req->r_base_oid.name_len);
|
|
|
|
p += req->r_base_oid.name_len;
|
2013-04-05 10:27:12 +04:00
|
|
|
|
|
|
|
/* ops--can imply data */
|
|
|
|
ceph_encode_16(&p, (u16)req->r_num_ops);
|
|
|
|
data_len = 0;
|
|
|
|
for (i = 0; i < req->r_num_ops; i++) {
|
|
|
|
data_len += osd_req_encode_op(req, p, i);
|
|
|
|
p += sizeof(struct ceph_osd_op);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* snaps */
|
|
|
|
ceph_encode_64(&p, req->r_snapid);
|
|
|
|
ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
|
|
|
|
ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
|
|
|
|
if (req->r_snapc) {
|
|
|
|
for (i = 0; i < snapc->num_snaps; i++) {
|
|
|
|
ceph_encode_64(&p, req->r_snapc->snaps[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
req->r_request_attempts = p;
|
|
|
|
p += 4;
|
|
|
|
|
|
|
|
/* data */
|
|
|
|
if (flags & CEPH_OSD_FLAG_WRITE) {
|
|
|
|
u16 data_off;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The header "data_off" is a hint to the receiver
|
|
|
|
* allowing it to align received data into its
|
|
|
|
* buffers such that there's no need to re-copy
|
|
|
|
* it before writing it to disk (direct I/O).
|
|
|
|
*/
|
|
|
|
data_off = (u16) (off & 0xffff);
|
|
|
|
req->r_request->hdr.data_off = cpu_to_le16(data_off);
|
|
|
|
}
|
|
|
|
req->r_request->hdr.data_len = cpu_to_le32(data_len);
|
|
|
|
|
|
|
|
BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
|
|
|
|
msg_size = p - msg->front.iov_base;
|
|
|
|
msg->front.iov_len = msg_size;
|
|
|
|
msg->hdr.front_len = cpu_to_le32(msg_size);
|
|
|
|
|
|
|
|
dout("build_request msg_size was %d\n", (int)msg_size);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ceph_osdc_build_request);
|
|
|
|
|
2013-03-05 04:29:06 +04:00
|
|
|
/*
|
|
|
|
* Register request, send initial attempt.
|
|
|
|
*/
|
|
|
|
int ceph_osdc_start_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req,
|
|
|
|
bool nofail)
|
|
|
|
{
|
2014-01-31 21:33:39 +04:00
|
|
|
int rc;
|
2013-03-05 04:29:06 +04:00
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
down_read(&osdc->map_sem);
|
|
|
|
mutex_lock(&osdc->request_mutex);
|
2014-01-31 21:33:39 +04:00
|
|
|
|
|
|
|
rc = __ceph_osdc_start_request(osdc, req, nofail);
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
mutex_unlock(&osdc->request_mutex);
|
|
|
|
up_read(&osdc->map_sem);
|
2014-01-31 21:33:39 +04:00
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
return rc;
|
|
|
|
}
|
2010-04-07 02:14:15 +04:00
|
|
|
EXPORT_SYMBOL(ceph_osdc_start_request);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2014-06-19 11:38:13 +04:00
|
|
|
/*
|
|
|
|
* Unregister a registered request. The request is not completed (i.e.
|
|
|
|
* no callbacks or wakeups) - higher layers are supposed to know what
|
|
|
|
* they are canceling.
|
|
|
|
*/
|
|
|
|
void ceph_osdc_cancel_request(struct ceph_osd_request *req)
|
|
|
|
{
|
|
|
|
struct ceph_osd_client *osdc = req->r_osdc;
|
|
|
|
|
|
|
|
mutex_lock(&osdc->request_mutex);
|
|
|
|
if (req->r_linger)
|
|
|
|
__unregister_linger_request(osdc, req);
|
|
|
|
__unregister_request(osdc, req);
|
|
|
|
mutex_unlock(&osdc->request_mutex);
|
|
|
|
|
|
|
|
dout("%s %p tid %llu canceled\n", __func__, req, req->r_tid);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ceph_osdc_cancel_request);
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/*
|
|
|
|
* wait for a request to complete
|
|
|
|
*/
|
|
|
|
int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_osd_request *req)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2014-06-19 11:38:13 +04:00
|
|
|
dout("%s %p tid %llu\n", __func__, req, req->r_tid);
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
rc = wait_for_completion_interruptible(&req->r_completion);
|
|
|
|
if (rc < 0) {
|
2014-06-19 11:38:13 +04:00
|
|
|
dout("%s %p tid %llu interrupted\n", __func__, req, req->r_tid);
|
|
|
|
ceph_osdc_cancel_request(req);
|
2011-06-03 20:37:09 +04:00
|
|
|
complete_request(req);
|
2009-10-06 22:31:10 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2014-06-19 11:38:13 +04:00
|
|
|
dout("%s %p tid %llu result %d\n", __func__, req, req->r_tid,
|
|
|
|
req->r_result);
|
2009-10-06 22:31:10 +04:00
|
|
|
return req->r_result;
|
|
|
|
}
|
2010-04-07 02:14:15 +04:00
|
|
|
EXPORT_SYMBOL(ceph_osdc_wait_request);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* sync - wait for all in-flight requests to flush. avoid starvation.
|
|
|
|
*/
|
|
|
|
void ceph_osdc_sync(struct ceph_osd_client *osdc)
|
|
|
|
{
|
|
|
|
struct ceph_osd_request *req;
|
|
|
|
u64 last_tid, next_tid = 0;
|
|
|
|
|
|
|
|
mutex_lock(&osdc->request_mutex);
|
|
|
|
last_tid = osdc->last_tid;
|
|
|
|
while (1) {
|
|
|
|
req = __lookup_request_ge(osdc, next_tid);
|
|
|
|
if (!req)
|
|
|
|
break;
|
|
|
|
if (req->r_tid > last_tid)
|
|
|
|
break;
|
|
|
|
|
|
|
|
next_tid = req->r_tid + 1;
|
|
|
|
if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ceph_osdc_get_request(req);
|
|
|
|
mutex_unlock(&osdc->request_mutex);
|
|
|
|
dout("sync waiting on tid %llu (last is %llu)\n",
|
|
|
|
req->r_tid, last_tid);
|
|
|
|
wait_for_completion(&req->r_safe_completion);
|
|
|
|
mutex_lock(&osdc->request_mutex);
|
|
|
|
ceph_osdc_put_request(req);
|
|
|
|
}
|
|
|
|
mutex_unlock(&osdc->request_mutex);
|
|
|
|
dout("sync done (thru tid %llu)\n", last_tid);
|
|
|
|
}
|
2010-04-07 02:14:15 +04:00
|
|
|
EXPORT_SYMBOL(ceph_osdc_sync);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-08-29 08:43:09 +04:00
|
|
|
/*
|
|
|
|
* Call all pending notify callbacks - for use after a watch is
|
|
|
|
* unregistered, to make sure no more callbacks for it will be invoked
|
|
|
|
*/
|
2014-06-11 07:30:13 +04:00
|
|
|
void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
|
2013-08-29 08:43:09 +04:00
|
|
|
{
|
|
|
|
flush_workqueue(osdc->notify_wq);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ceph_osdc_flush_notifies);
|
|
|
|
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/*
|
|
|
|
* init, shutdown
|
|
|
|
*/
|
|
|
|
int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
dout("init\n");
|
|
|
|
osdc->client = client;
|
|
|
|
osdc->osdmap = NULL;
|
|
|
|
init_rwsem(&osdc->map_sem);
|
|
|
|
init_completion(&osdc->map_waiters);
|
|
|
|
osdc->last_requested_map = 0;
|
|
|
|
mutex_init(&osdc->request_mutex);
|
|
|
|
osdc->last_tid = 0;
|
|
|
|
osdc->osds = RB_ROOT;
|
2010-02-03 22:00:26 +03:00
|
|
|
INIT_LIST_HEAD(&osdc->osd_lru);
|
2009-10-06 22:31:10 +04:00
|
|
|
osdc->requests = RB_ROOT;
|
2010-02-27 02:32:31 +03:00
|
|
|
INIT_LIST_HEAD(&osdc->req_lru);
|
2011-01-18 07:34:08 +03:00
|
|
|
INIT_LIST_HEAD(&osdc->req_unsent);
|
|
|
|
INIT_LIST_HEAD(&osdc->req_notarget);
|
2011-03-22 01:07:16 +03:00
|
|
|
INIT_LIST_HEAD(&osdc->req_linger);
|
2009-10-06 22:31:10 +04:00
|
|
|
osdc->num_requests = 0;
|
|
|
|
INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
|
2010-02-03 22:00:26 +03:00
|
|
|
INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
|
2011-03-22 01:07:16 +03:00
|
|
|
spin_lock_init(&osdc->event_lock);
|
|
|
|
osdc->event_tree = RB_ROOT;
|
|
|
|
osdc->event_count = 0;
|
2010-02-03 22:00:26 +03:00
|
|
|
|
|
|
|
schedule_delayed_work(&osdc->osds_timeout_work,
|
2010-04-07 02:14:15 +04:00
|
|
|
round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2009-11-19 01:52:18 +03:00
|
|
|
err = -ENOMEM;
|
2009-10-06 22:31:10 +04:00
|
|
|
osdc->req_mempool = mempool_create_kmalloc_pool(10,
|
|
|
|
sizeof(struct ceph_osd_request));
|
|
|
|
if (!osdc->req_mempool)
|
2009-11-19 01:52:18 +03:00
|
|
|
goto out;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2012-07-10 01:22:34 +04:00
|
|
|
err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
|
|
|
|
OSD_OP_FRONT_LEN, 10, true,
|
2010-04-24 20:56:35 +04:00
|
|
|
"osd_op");
|
2009-10-06 22:31:10 +04:00
|
|
|
if (err < 0)
|
2009-11-19 01:52:18 +03:00
|
|
|
goto out_mempool;
|
2012-07-10 01:22:34 +04:00
|
|
|
err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
|
2010-04-24 20:56:35 +04:00
|
|
|
OSD_OPREPLY_FRONT_LEN, 10, true,
|
|
|
|
"osd_op_reply");
|
2010-03-02 00:02:00 +03:00
|
|
|
if (err < 0)
|
|
|
|
goto out_msgpool;
|
2011-03-22 01:07:16 +03:00
|
|
|
|
2013-08-15 09:58:59 +04:00
|
|
|
err = -ENOMEM;
|
2011-03-22 01:07:16 +03:00
|
|
|
osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
|
2013-08-15 09:58:59 +04:00
|
|
|
if (!osdc->notify_wq)
|
2014-01-31 19:49:22 +04:00
|
|
|
goto out_msgpool_reply;
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
return 0;
|
2009-11-19 01:52:18 +03:00
|
|
|
|
2014-01-31 19:49:22 +04:00
|
|
|
out_msgpool_reply:
|
|
|
|
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
|
2010-03-02 00:02:00 +03:00
|
|
|
out_msgpool:
|
|
|
|
ceph_msgpool_destroy(&osdc->msgpool_op);
|
2009-11-19 01:52:18 +03:00
|
|
|
out_mempool:
|
|
|
|
mempool_destroy(osdc->req_mempool);
|
|
|
|
out:
|
|
|
|
return err;
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void ceph_osdc_stop(struct ceph_osd_client *osdc)
|
|
|
|
{
|
2011-03-22 01:07:16 +03:00
|
|
|
flush_workqueue(osdc->notify_wq);
|
|
|
|
destroy_workqueue(osdc->notify_wq);
|
2009-10-06 22:31:10 +04:00
|
|
|
cancel_delayed_work_sync(&osdc->timeout_work);
|
2010-02-03 22:00:26 +03:00
|
|
|
cancel_delayed_work_sync(&osdc->osds_timeout_work);
|
2009-10-06 22:31:10 +04:00
|
|
|
if (osdc->osdmap) {
|
|
|
|
ceph_osdmap_destroy(osdc->osdmap);
|
|
|
|
osdc->osdmap = NULL;
|
|
|
|
}
|
2011-09-01 01:45:53 +04:00
|
|
|
remove_all_osds(osdc);
|
2009-10-06 22:31:10 +04:00
|
|
|
mempool_destroy(osdc->req_mempool);
|
|
|
|
ceph_msgpool_destroy(&osdc->msgpool_op);
|
2010-03-02 00:02:00 +03:00
|
|
|
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read some contiguous pages. If we cross a stripe boundary, shorten
|
|
|
|
* *plen. Return number of bytes read, or error.
|
|
|
|
*/
|
|
|
|
int ceph_osdc_readpages(struct ceph_osd_client *osdc,
|
|
|
|
struct ceph_vino vino, struct ceph_file_layout *layout,
|
|
|
|
u64 off, u64 *plen,
|
|
|
|
u32 truncate_seq, u64 truncate_size,
|
2010-11-09 23:43:12 +03:00
|
|
|
struct page **pages, int num_pages, int page_align)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
|
|
|
struct ceph_osd_request *req;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
|
|
|
|
vino.snap, off, *plen);
|
2014-11-13 09:40:37 +03:00
|
|
|
req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
|
2009-10-06 22:31:10 +04:00
|
|
|
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
|
2013-03-14 23:09:05 +04:00
|
|
|
NULL, truncate_seq, truncate_size,
|
libceph: don't assign page info in ceph_osdc_new_request()
Currently ceph_osdc_new_request() assigns an osd request's
r_num_pages and r_alignment fields. The only thing it does
after that is call ceph_osdc_build_request(), and that doesn't
need those fields to be assigned.
Move the assignment of those fields out of ceph_osdc_new_request()
and into its caller. As a result, the page_align parameter is no
longer used, so get rid of it.
Note that in ceph_sync_write(), the value for req->r_num_pages had
already been calculated earlier (as num_pages, and fortunately
it was computed the same way). So don't bother recomputing it,
but because it's not needed earlier, move that calculation after the
call to ceph_osdc_new_request(). Hold off making the assignment to
r_alignment, doing it instead r_pages and r_num_pages are
getting set.
Similarly, in start_read(), nr_pages already holds the number of
pages in the array (and is calculated the same way), so there's no
need to recompute it. Move the assignment of the page alignment
down with the others there as well.
This and the next few patches are preparation work for:
http://tracker.ceph.com/issues/4127
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-02 04:00:15 +04:00
|
|
|
false);
|
2012-09-25 08:01:02 +04:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
/* it may be a short read due to an object boundary */
|
2013-02-14 22:16:43 +04:00
|
|
|
|
2013-04-15 23:50:36 +04:00
|
|
|
osd_req_op_extent_osd_data_pages(req, 0,
|
2013-04-05 10:27:12 +04:00
|
|
|
pages, *plen, page_align, false, false);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-03-08 01:38:25 +04:00
|
|
|
dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
|
2013-04-03 10:28:57 +04:00
|
|
|
off, *plen, *plen, page_align);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-04-04 06:32:51 +04:00
|
|
|
ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
|
2013-03-14 23:09:06 +04:00
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
rc = ceph_osdc_start_request(osdc, req, false);
|
|
|
|
if (!rc)
|
|
|
|
rc = ceph_osdc_wait_request(osdc, req);
|
|
|
|
|
|
|
|
ceph_osdc_put_request(req);
|
|
|
|
dout("readpages result %d\n", rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2010-04-07 02:14:15 +04:00
|
|
|
EXPORT_SYMBOL(ceph_osdc_readpages);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* do a synchronous write on N pages
|
|
|
|
*/
|
|
|
|
int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
|
|
|
|
struct ceph_file_layout *layout,
|
|
|
|
struct ceph_snap_context *snapc,
|
|
|
|
u64 off, u64 len,
|
|
|
|
u32 truncate_seq, u64 truncate_size,
|
|
|
|
struct timespec *mtime,
|
2013-02-15 21:42:29 +04:00
|
|
|
struct page **pages, int num_pages)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
|
|
|
struct ceph_osd_request *req;
|
|
|
|
int rc = 0;
|
2010-11-09 23:43:12 +03:00
|
|
|
int page_align = off & ~PAGE_MASK;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-03-14 23:09:05 +04:00
|
|
|
BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */
|
2014-11-13 09:40:37 +03:00
|
|
|
req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
|
2009-10-06 22:31:10 +04:00
|
|
|
CEPH_OSD_OP_WRITE,
|
2013-02-15 21:42:29 +04:00
|
|
|
CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
|
2013-03-14 23:09:05 +04:00
|
|
|
snapc, truncate_seq, truncate_size,
|
libceph: don't assign page info in ceph_osdc_new_request()
Currently ceph_osdc_new_request() assigns an osd request's
r_num_pages and r_alignment fields. The only thing it does
after that is call ceph_osdc_build_request(), and that doesn't
need those fields to be assigned.
Move the assignment of those fields out of ceph_osdc_new_request()
and into its caller. As a result, the page_align parameter is no
longer used, so get rid of it.
Note that in ceph_sync_write(), the value for req->r_num_pages had
already been calculated earlier (as num_pages, and fortunately
it was computed the same way). So don't bother recomputing it,
but because it's not needed earlier, move that calculation after the
call to ceph_osdc_new_request(). Hold off making the assignment to
r_alignment, doing it instead r_pages and r_num_pages are
getting set.
Similarly, in start_read(), nr_pages already holds the number of
pages in the array (and is calculated the same way), so there's no
need to recompute it. Move the assignment of the page alignment
down with the others there as well.
This and the next few patches are preparation work for:
http://tracker.ceph.com/issues/4127
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
2013-03-02 04:00:15 +04:00
|
|
|
true);
|
2012-09-25 08:01:02 +04:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
/* it may be a short write due to an object boundary */
|
2013-04-15 23:50:36 +04:00
|
|
|
osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
|
2013-04-03 10:28:57 +04:00
|
|
|
false, false);
|
|
|
|
dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-04-04 06:32:51 +04:00
|
|
|
ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime);
|
2013-03-14 23:09:06 +04:00
|
|
|
|
2013-02-15 21:42:29 +04:00
|
|
|
rc = ceph_osdc_start_request(osdc, req, true);
|
2009-10-06 22:31:10 +04:00
|
|
|
if (!rc)
|
|
|
|
rc = ceph_osdc_wait_request(osdc, req);
|
|
|
|
|
|
|
|
ceph_osdc_put_request(req);
|
|
|
|
if (rc == 0)
|
|
|
|
rc = len;
|
|
|
|
dout("writepages result %d\n", rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2010-04-07 02:14:15 +04:00
|
|
|
EXPORT_SYMBOL(ceph_osdc_writepages);
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2013-05-01 21:43:04 +04:00
|
|
|
int ceph_osdc_setup(void)
|
|
|
|
{
|
|
|
|
BUG_ON(ceph_osd_request_cache);
|
|
|
|
ceph_osd_request_cache = kmem_cache_create("ceph_osd_request",
|
|
|
|
sizeof (struct ceph_osd_request),
|
|
|
|
__alignof__(struct ceph_osd_request),
|
|
|
|
0, NULL);
|
|
|
|
|
|
|
|
return ceph_osd_request_cache ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ceph_osdc_setup);
|
|
|
|
|
|
|
|
void ceph_osdc_cleanup(void)
|
|
|
|
{
|
|
|
|
BUG_ON(!ceph_osd_request_cache);
|
|
|
|
kmem_cache_destroy(ceph_osd_request_cache);
|
|
|
|
ceph_osd_request_cache = NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ceph_osdc_cleanup);
|
|
|
|
|
2009-10-06 22:31:10 +04:00
|
|
|
/*
|
|
|
|
* handle incoming message
|
|
|
|
*/
|
|
|
|
static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
|
|
|
|
{
|
|
|
|
struct ceph_osd *osd = con->private;
|
2009-11-21 18:53:16 +03:00
|
|
|
struct ceph_osd_client *osdc;
|
2009-10-06 22:31:10 +04:00
|
|
|
int type = le16_to_cpu(msg->hdr.type);
|
|
|
|
|
|
|
|
if (!osd)
|
2010-06-13 21:27:53 +04:00
|
|
|
goto out;
|
2009-11-21 18:53:16 +03:00
|
|
|
osdc = osd->o_osdc;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case CEPH_MSG_OSD_MAP:
|
|
|
|
ceph_osdc_handle_map(osdc, msg);
|
|
|
|
break;
|
|
|
|
case CEPH_MSG_OSD_OPREPLY:
|
2009-12-22 21:45:45 +03:00
|
|
|
handle_reply(osdc, msg, con);
|
2009-10-06 22:31:10 +04:00
|
|
|
break;
|
2011-03-22 01:07:16 +03:00
|
|
|
case CEPH_MSG_WATCH_NOTIFY:
|
|
|
|
handle_watch_notify(osdc, msg);
|
|
|
|
break;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
|
|
|
default:
|
|
|
|
pr_err("received unknown message type %d %s\n", type,
|
|
|
|
ceph_msg_type_name(type));
|
|
|
|
}
|
2010-06-13 21:27:53 +04:00
|
|
|
out:
|
2009-10-06 22:31:10 +04:00
|
|
|
ceph_msg_put(msg);
|
|
|
|
}
|
|
|
|
|
2010-02-20 08:43:23 +03:00
|
|
|
/*
|
2010-03-04 21:22:59 +03:00
|
|
|
* lookup and return message for incoming reply. set up reply message
|
|
|
|
* pages.
|
2010-02-20 08:43:23 +03:00
|
|
|
*/
|
|
|
|
static struct ceph_msg *get_reply(struct ceph_connection *con,
|
2010-01-09 00:58:34 +03:00
|
|
|
struct ceph_msg_header *hdr,
|
|
|
|
int *skip)
|
2009-10-06 22:31:10 +04:00
|
|
|
{
|
|
|
|
struct ceph_osd *osd = con->private;
|
|
|
|
struct ceph_osd_client *osdc = osd->o_osdc;
|
2010-01-09 00:58:34 +03:00
|
|
|
struct ceph_msg *m;
|
2010-01-12 01:47:13 +03:00
|
|
|
struct ceph_osd_request *req;
|
2014-01-09 22:08:21 +04:00
|
|
|
int front_len = le32_to_cpu(hdr->front_len);
|
2010-02-20 08:43:23 +03:00
|
|
|
int data_len = le32_to_cpu(hdr->data_len);
|
2010-01-12 01:47:13 +03:00
|
|
|
u64 tid;
|
2009-10-06 22:31:10 +04:00
|
|
|
|
2010-01-12 01:47:13 +03:00
|
|
|
tid = le64_to_cpu(hdr->tid);
|
|
|
|
mutex_lock(&osdc->request_mutex);
|
|
|
|
req = __lookup_request(osdc, tid);
|
|
|
|
if (!req) {
|
|
|
|
*skip = 1;
|
|
|
|
m = NULL;
|
2012-07-31 03:26:13 +04:00
|
|
|
dout("get_reply unknown tid %llu from osd%d\n", tid,
|
|
|
|
osd->o_osd);
|
2010-01-12 01:47:13 +03:00
|
|
|
goto out;
|
|
|
|
}
|
2010-03-02 00:02:00 +03:00
|
|
|
|
2013-04-02 01:12:14 +04:00
|
|
|
if (req->r_reply->con)
|
2012-06-01 23:56:43 +04:00
|
|
|
dout("%s revoking msg %p from old con %p\n", __func__,
|
2013-04-02 01:12:14 +04:00
|
|
|
req->r_reply, req->r_reply->con);
|
|
|
|
ceph_msg_revoke_incoming(req->r_reply);
|
2010-01-12 01:47:13 +03:00
|
|
|
|
2014-01-09 22:08:21 +04:00
|
|
|
if (front_len > req->r_reply->front_alloc_len) {
|
2014-09-10 08:17:29 +04:00
|
|
|
pr_warn("get_reply front %d > preallocated %d (%u#%llu)\n",
|
|
|
|
front_len, req->r_reply->front_alloc_len,
|
|
|
|
(unsigned int)con->peer_name.type,
|
|
|
|
le64_to_cpu(con->peer_name.num));
|
2014-01-09 22:08:21 +04:00
|
|
|
m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
|
|
|
|
false);
|
2010-04-02 03:06:19 +04:00
|
|
|
if (!m)
|
2010-03-02 00:02:00 +03:00
|
|
|
goto out;
|
|
|
|
ceph_msg_put(req->r_reply);
|
|
|
|
req->r_reply = m;
|
|
|
|
}
|
|
|
|
m = ceph_msg_get(req->r_reply);
|
|
|
|
|
2010-01-12 01:47:13 +03:00
|
|
|
if (data_len > 0) {
|
2013-04-05 10:27:12 +04:00
|
|
|
struct ceph_osd_data *osd_data;
|
2013-02-14 22:16:43 +04:00
|
|
|
|
2013-04-05 10:27:12 +04:00
|
|
|
/*
|
|
|
|
* XXX This is assuming there is only one op containing
|
|
|
|
* XXX page data. Probably OK for reads, but this
|
|
|
|
* XXX ought to be done more generally.
|
|
|
|
*/
|
2013-04-15 23:50:36 +04:00
|
|
|
osd_data = osd_req_op_extent_osd_data(req, 0);
|
2013-02-14 22:16:43 +04:00
|
|
|
if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
|
|
|
|
if (osd_data->pages &&
|
2013-03-08 01:38:25 +04:00
|
|
|
unlikely(osd_data->length < data_len)) {
|
2013-02-14 22:16:43 +04:00
|
|
|
|
2014-09-10 08:17:29 +04:00
|
|
|
pr_warn("tid %lld reply has %d bytes we had only %llu bytes ready\n",
|
2013-03-08 01:38:25 +04:00
|
|
|
tid, data_len, osd_data->length);
|
2013-02-14 22:16:43 +04:00
|
|
|
*skip = 1;
|
|
|
|
ceph_msg_put(m);
|
|
|
|
m = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
2010-01-12 01:47:13 +03:00
|
|
|
}
|
2010-02-20 08:43:23 +03:00
|
|
|
*skip = 0;
|
2010-03-02 00:02:00 +03:00
|
|
|
dout("get_reply tid %lld %p\n", tid, m);
|
2010-01-12 01:47:13 +03:00
|
|
|
|
|
|
|
out:
|
|
|
|
mutex_unlock(&osdc->request_mutex);
|
2010-01-09 00:58:34 +03:00
|
|
|
return m;
|
2010-02-20 08:43:23 +03:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ceph_msg *alloc_msg(struct ceph_connection *con,
|
|
|
|
struct ceph_msg_header *hdr,
|
|
|
|
int *skip)
|
|
|
|
{
|
|
|
|
struct ceph_osd *osd = con->private;
|
|
|
|
int type = le16_to_cpu(hdr->type);
|
|
|
|
int front = le32_to_cpu(hdr->front_len);
|
|
|
|
|
2012-06-04 23:43:32 +04:00
|
|
|
*skip = 0;
|
2010-02-20 08:43:23 +03:00
|
|
|
switch (type) {
|
|
|
|
case CEPH_MSG_OSD_MAP:
|
2011-03-22 01:07:16 +03:00
|
|
|
case CEPH_MSG_WATCH_NOTIFY:
|
2011-08-10 02:03:46 +04:00
|
|
|
return ceph_msg_new(type, front, GFP_NOFS, false);
|
2010-02-20 08:43:23 +03:00
|
|
|
case CEPH_MSG_OSD_OPREPLY:
|
|
|
|
return get_reply(con, hdr, skip);
|
|
|
|
default:
|
|
|
|
pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
|
|
|
|
osd->o_osd);
|
|
|
|
*skip = 1;
|
|
|
|
return NULL;
|
|
|
|
}
|
2009-10-06 22:31:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wrappers to refcount containing ceph_osd struct
|
|
|
|
*/
|
|
|
|
static struct ceph_connection *get_osd_con(struct ceph_connection *con)
|
|
|
|
{
|
|
|
|
struct ceph_osd *osd = con->private;
|
|
|
|
if (get_osd(osd))
|
|
|
|
return con;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void put_osd_con(struct ceph_connection *con)
|
|
|
|
{
|
|
|
|
struct ceph_osd *osd = con->private;
|
|
|
|
put_osd(osd);
|
|
|
|
}
|
|
|
|
|
2009-11-19 03:19:57 +03:00
|
|
|
/*
|
|
|
|
* authentication
|
|
|
|
*/
|
2012-05-17 00:16:39 +04:00
|
|
|
/*
|
|
|
|
* Note: returned pointer is the address of a structure that's
|
|
|
|
* managed separately. Caller must *not* attempt to free it.
|
|
|
|
*/
|
|
|
|
static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
|
2012-05-17 00:16:39 +04:00
|
|
|
int *proto, int force_new)
|
2009-11-19 03:19:57 +03:00
|
|
|
{
|
|
|
|
struct ceph_osd *o = con->private;
|
|
|
|
struct ceph_osd_client *osdc = o->o_osdc;
|
|
|
|
struct ceph_auth_client *ac = osdc->client->monc.auth;
|
2012-05-17 00:16:39 +04:00
|
|
|
struct ceph_auth_handshake *auth = &o->o_auth;
|
2009-11-19 03:19:57 +03:00
|
|
|
|
2012-05-17 00:16:39 +04:00
|
|
|
if (force_new && auth->authorizer) {
|
2013-03-25 21:26:14 +04:00
|
|
|
ceph_auth_destroy_authorizer(ac, auth->authorizer);
|
2012-05-17 00:16:39 +04:00
|
|
|
auth->authorizer = NULL;
|
|
|
|
}
|
2013-03-25 21:26:14 +04:00
|
|
|
if (!auth->authorizer) {
|
|
|
|
int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
|
|
|
|
auth);
|
2009-11-19 03:19:57 +03:00
|
|
|
if (ret)
|
2012-05-17 00:16:39 +04:00
|
|
|
return ERR_PTR(ret);
|
2013-03-25 21:26:14 +04:00
|
|
|
} else {
|
|
|
|
int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
|
2013-03-25 21:26:01 +04:00
|
|
|
auth);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
2009-11-19 03:19:57 +03:00
|
|
|
}
|
|
|
|
*proto = ac->protocol;
|
2012-05-17 00:16:39 +04:00
|
|
|
|
2012-05-17 00:16:39 +04:00
|
|
|
return auth;
|
2009-11-19 03:19:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int verify_authorizer_reply(struct ceph_connection *con, int len)
|
|
|
|
{
|
|
|
|
struct ceph_osd *o = con->private;
|
|
|
|
struct ceph_osd_client *osdc = o->o_osdc;
|
|
|
|
struct ceph_auth_client *ac = osdc->client->monc.auth;
|
|
|
|
|
2013-03-25 21:26:14 +04:00
|
|
|
return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
|
2009-11-19 03:19:57 +03:00
|
|
|
}
|
|
|
|
|
2010-02-03 03:21:06 +03:00
|
|
|
static int invalidate_authorizer(struct ceph_connection *con)
|
|
|
|
{
|
|
|
|
struct ceph_osd *o = con->private;
|
|
|
|
struct ceph_osd_client *osdc = o->o_osdc;
|
|
|
|
struct ceph_auth_client *ac = osdc->client->monc.auth;
|
|
|
|
|
2013-03-25 21:26:14 +04:00
|
|
|
ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
|
2010-02-03 03:21:06 +03:00
|
|
|
return ceph_monc_validate_auth(&osdc->client->monc);
|
|
|
|
}
|
2009-11-19 03:19:57 +03:00
|
|
|
|
2014-11-04 11:33:37 +03:00
|
|
|
static int sign_message(struct ceph_connection *con, struct ceph_msg *msg)
|
|
|
|
{
|
|
|
|
struct ceph_osd *o = con->private;
|
|
|
|
struct ceph_auth_handshake *auth = &o->o_auth;
|
|
|
|
return ceph_auth_sign_message(auth, msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int check_message_signature(struct ceph_connection *con, struct ceph_msg *msg)
|
|
|
|
{
|
|
|
|
struct ceph_osd *o = con->private;
|
|
|
|
struct ceph_auth_handshake *auth = &o->o_auth;
|
|
|
|
return ceph_auth_check_message_signature(auth, msg);
|
|
|
|
}
|
|
|
|
|
2010-05-20 12:40:19 +04:00
|
|
|
static const struct ceph_connection_operations osd_con_ops = {
|
2009-10-06 22:31:10 +04:00
|
|
|
.get = get_osd_con,
|
|
|
|
.put = put_osd_con,
|
|
|
|
.dispatch = dispatch,
|
2009-11-19 03:19:57 +03:00
|
|
|
.get_authorizer = get_authorizer,
|
|
|
|
.verify_authorizer_reply = verify_authorizer_reply,
|
2010-02-03 03:21:06 +03:00
|
|
|
.invalidate_authorizer = invalidate_authorizer,
|
2009-10-06 22:31:10 +04:00
|
|
|
.alloc_msg = alloc_msg,
|
2014-11-04 11:33:37 +03:00
|
|
|
.sign_message = sign_message,
|
|
|
|
.check_message_signature = check_message_signature,
|
2009-10-09 21:29:18 +04:00
|
|
|
.fault = osd_reset,
|
2009-10-06 22:31:10 +04:00
|
|
|
};
|