block: remove elv_abort_queue and blk_abort_flushes
elv_abort_queue has no callers, and blk_abort_flushes is only called by elv_abort_queue. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Родитель
2dc24b0d06
Коммит
2940474af7
|
@ -421,44 +421,6 @@ void blk_insert_flush(struct request *rq)
|
|||
blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_abort_flushes - @q is being aborted, abort flush requests
|
||||
* @q: request_queue being aborted
|
||||
*
|
||||
* To be called from elv_abort_queue(). @q is being aborted. Prepare all
|
||||
* FLUSH/FUA requests for abortion.
|
||||
*
|
||||
* CONTEXT:
|
||||
* spin_lock_irq(q->queue_lock)
|
||||
*/
|
||||
void blk_abort_flushes(struct request_queue *q)
|
||||
{
|
||||
struct request *rq, *n;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Requests in flight for data are already owned by the dispatch
|
||||
* queue or the device driver. Just restore for normal completion.
|
||||
*/
|
||||
list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
|
||||
list_del_init(&rq->flush.list);
|
||||
blk_flush_restore_request(rq);
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to give away requests on flush queues. Restore for
|
||||
* normal completion and put them on the dispatch queue.
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
|
||||
list_for_each_entry_safe(rq, n, &q->flush_queue[i],
|
||||
flush.list) {
|
||||
list_del_init(&rq->flush.list);
|
||||
blk_flush_restore_request(rq);
|
||||
list_add_tail(&rq->queuelist, &q->queue_head);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* blkdev_issue_flush - queue a flush
|
||||
* @bdev: blockdev to issue flush for
|
||||
|
|
|
@ -84,7 +84,6 @@ static inline void blk_clear_rq_complete(struct request *rq)
|
|||
#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
|
||||
|
||||
void blk_insert_flush(struct request *rq);
|
||||
void blk_abort_flushes(struct request_queue *q);
|
||||
|
||||
static inline struct request *__elv_next_request(struct request_queue *q)
|
||||
{
|
||||
|
|
|
@ -729,26 +729,6 @@ int elv_may_queue(struct request_queue *q, int rw)
|
|||
return ELV_MQUEUE_MAY;
|
||||
}
|
||||
|
||||
void elv_abort_queue(struct request_queue *q)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
blk_abort_flushes(q);
|
||||
|
||||
while (!list_empty(&q->queue_head)) {
|
||||
rq = list_entry_rq(q->queue_head.next);
|
||||
rq->cmd_flags |= REQ_QUIET;
|
||||
trace_block_rq_abort(q, rq);
|
||||
/*
|
||||
* Mark this request as started so we don't trigger
|
||||
* any debug logic in the end I/O path.
|
||||
*/
|
||||
blk_start_request(rq);
|
||||
__blk_end_request_all(rq, -EIO);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(elv_abort_queue);
|
||||
|
||||
void elv_completed_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
|
|
@ -133,7 +133,6 @@ extern struct request *elv_latter_request(struct request_queue *, struct request
|
|||
extern int elv_register_queue(struct request_queue *q);
|
||||
extern void elv_unregister_queue(struct request_queue *q);
|
||||
extern int elv_may_queue(struct request_queue *, int);
|
||||
extern void elv_abort_queue(struct request_queue *);
|
||||
extern void elv_completed_request(struct request_queue *, struct request *);
|
||||
extern int elv_set_request(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio, gfp_t gfp_mask);
|
||||
|
|
Загрузка…
Ссылка в новой задаче