block: convert REQ_ATOM_COMPLETE to stealing rq->__deadline bit
We only have one atomic flag left. Instead of using an entire unsigned long for that, steal the bottom bit of the deadline field that we already reserved. Remove ->atomic_flags, since it's now unused. Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
0a72e7f449
Коммит
e14575b3d4
|
@ -2853,7 +2853,7 @@ void blk_start_request(struct request *req)
|
||||||
wbt_issue(req->q->rq_wb, &req->issue_stat);
|
wbt_issue(req->q->rq_wb, &req->issue_stat);
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
|
BUG_ON(blk_rq_is_complete(req));
|
||||||
blk_add_timer(req);
|
blk_add_timer(req);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_start_request);
|
EXPORT_SYMBOL(blk_start_request);
|
||||||
|
|
|
@ -294,12 +294,6 @@ static const char *const rqf_name[] = {
|
||||||
};
|
};
|
||||||
#undef RQF_NAME
|
#undef RQF_NAME
|
||||||
|
|
||||||
#define RQAF_NAME(name) [REQ_ATOM_##name] = #name
|
|
||||||
static const char *const rqaf_name[] = {
|
|
||||||
RQAF_NAME(COMPLETE),
|
|
||||||
};
|
|
||||||
#undef RQAF_NAME
|
|
||||||
|
|
||||||
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
|
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
|
||||||
{
|
{
|
||||||
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
|
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
|
||||||
|
@ -316,8 +310,7 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
|
||||||
seq_puts(m, ", .rq_flags=");
|
seq_puts(m, ", .rq_flags=");
|
||||||
blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
|
blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
|
||||||
ARRAY_SIZE(rqf_name));
|
ARRAY_SIZE(rqf_name));
|
||||||
seq_puts(m, ", .atomic_flags=");
|
seq_printf(m, ", complete=%d", blk_rq_is_complete(rq));
|
||||||
blk_flags_show(m, rq->atomic_flags, rqaf_name, ARRAY_SIZE(rqaf_name));
|
|
||||||
seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
|
seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
|
||||||
rq->internal_tag);
|
rq->internal_tag);
|
||||||
if (mq_ops->show_rq)
|
if (mq_ops->show_rq)
|
||||||
|
|
|
@ -294,7 +294,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||||
rq->rq_flags |= RQF_PREEMPT;
|
rq->rq_flags |= RQF_PREEMPT;
|
||||||
if (blk_queue_io_stat(data->q))
|
if (blk_queue_io_stat(data->q))
|
||||||
rq->rq_flags |= RQF_IO_STAT;
|
rq->rq_flags |= RQF_IO_STAT;
|
||||||
/* do not touch atomic flags, it needs atomic ops against the timer */
|
|
||||||
rq->cpu = -1;
|
rq->cpu = -1;
|
||||||
INIT_HLIST_NODE(&rq->hash);
|
INIT_HLIST_NODE(&rq->hash);
|
||||||
RB_CLEAR_NODE(&rq->rb_node);
|
RB_CLEAR_NODE(&rq->rb_node);
|
||||||
|
@ -313,6 +312,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||||
rq->special = NULL;
|
rq->special = NULL;
|
||||||
/* tag was already set */
|
/* tag was already set */
|
||||||
rq->extra_len = 0;
|
rq->extra_len = 0;
|
||||||
|
rq->__deadline = 0;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&rq->timeout_list);
|
INIT_LIST_HEAD(&rq->timeout_list);
|
||||||
rq->timeout = 0;
|
rq->timeout = 0;
|
||||||
|
|
19
block/blk.h
19
block/blk.h
|
@ -119,25 +119,24 @@ void blk_account_io_start(struct request *req, bool new_io);
|
||||||
void blk_account_io_completion(struct request *req, unsigned int bytes);
|
void blk_account_io_completion(struct request *req, unsigned int bytes);
|
||||||
void blk_account_io_done(struct request *req);
|
void blk_account_io_done(struct request *req);
|
||||||
|
|
||||||
/*
|
|
||||||
* Internal atomic flags for request handling
|
|
||||||
*/
|
|
||||||
enum rq_atomic_flags {
|
|
||||||
REQ_ATOM_COMPLETE = 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* EH timer and IO completion will both attempt to 'grab' the request, make
|
* EH timer and IO completion will both attempt to 'grab' the request, make
|
||||||
* sure that only one of them succeeds
|
* sure that only one of them succeeds. Steal the bottom bit of the
|
||||||
|
* __deadline field for this.
|
||||||
*/
|
*/
|
||||||
static inline int blk_mark_rq_complete(struct request *rq)
|
static inline int blk_mark_rq_complete(struct request *rq)
|
||||||
{
|
{
|
||||||
return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
|
return test_and_set_bit(0, &rq->__deadline);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void blk_clear_rq_complete(struct request *rq)
|
static inline void blk_clear_rq_complete(struct request *rq)
|
||||||
{
|
{
|
||||||
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
|
clear_bit(0, &rq->__deadline);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool blk_rq_is_complete(struct request *rq)
|
||||||
|
{
|
||||||
|
return test_bit(0, &rq->__deadline);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -156,8 +156,6 @@ struct request {
|
||||||
|
|
||||||
int internal_tag;
|
int internal_tag;
|
||||||
|
|
||||||
unsigned long atomic_flags;
|
|
||||||
|
|
||||||
/* the following two fields are internal, NEVER access directly */
|
/* the following two fields are internal, NEVER access directly */
|
||||||
unsigned int __data_len; /* total data len */
|
unsigned int __data_len; /* total data len */
|
||||||
int tag;
|
int tag;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче