block: pass an explicit gfp_t to get_request
blk_old_get_request already has it at hand, and in blk_queue_bio, which is the fast path, it is constant. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
ff005a0662
Коммит
4accf5fc79
|
@ -1332,6 +1332,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
|
|||
* @op: operation and flags
|
||||
* @bio: bio to allocate request for (can be %NULL)
|
||||
* @flags: BLQ_MQ_REQ_* flags
|
||||
* @gfp_mask: allocator flags
|
||||
*
|
||||
* Get a free request from @q. This function may fail under memory
|
||||
* pressure or if @q is dead.
|
||||
|
@ -1341,7 +1342,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
|
|||
* Returns request pointer on success, with @q->queue_lock *not held*.
|
||||
*/
|
||||
static struct request *__get_request(struct request_list *rl, unsigned int op,
|
||||
struct bio *bio, blk_mq_req_flags_t flags)
|
||||
struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp_mask)
|
||||
{
|
||||
struct request_queue *q = rl->q;
|
||||
struct request *rq;
|
||||
|
@ -1350,8 +1351,6 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
|
|||
struct io_cq *icq = NULL;
|
||||
const bool is_sync = op_is_sync(op);
|
||||
int may_queue;
|
||||
gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
|
||||
__GFP_DIRECT_RECLAIM;
|
||||
req_flags_t rq_flags = RQF_ALLOCED;
|
||||
|
||||
lockdep_assert_held(q->queue_lock);
|
||||
|
@ -1515,6 +1514,7 @@ rq_starved:
|
|||
* @op: operation and flags
|
||||
* @bio: bio to allocate request for (can be %NULL)
|
||||
* @flags: BLK_MQ_REQ_* flags.
|
||||
* @gfp: allocator flags
|
||||
*
|
||||
* Get a free request from @q. If %BLK_MQ_REQ_NOWAIT is set in @flags,
|
||||
* this function keeps retrying under memory pressure and fails iff @q is dead.
|
||||
|
@ -1524,7 +1524,7 @@ rq_starved:
|
|||
* Returns request pointer on success, with @q->queue_lock *not held*.
|
||||
*/
|
||||
static struct request *get_request(struct request_queue *q, unsigned int op,
|
||||
struct bio *bio, blk_mq_req_flags_t flags)
|
||||
struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp)
|
||||
{
|
||||
const bool is_sync = op_is_sync(op);
|
||||
DEFINE_WAIT(wait);
|
||||
|
@ -1536,7 +1536,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
|
|||
|
||||
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
|
||||
retry:
|
||||
rq = __get_request(rl, op, bio, flags);
|
||||
rq = __get_request(rl, op, bio, flags, gfp);
|
||||
if (!IS_ERR(rq))
|
||||
return rq;
|
||||
|
||||
|
@ -1590,7 +1590,7 @@ static struct request *blk_old_get_request(struct request_queue *q,
|
|||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
rq = get_request(q, op, NULL, flags);
|
||||
rq = get_request(q, op, NULL, flags, gfp_mask);
|
||||
if (IS_ERR(rq)) {
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
blk_queue_exit(q);
|
||||
|
@ -2056,7 +2056,7 @@ get_rq:
|
|||
* Returns with the queue unlocked.
|
||||
*/
|
||||
blk_queue_enter_live(q);
|
||||
req = get_request(q, bio->bi_opf, bio, 0);
|
||||
req = get_request(q, bio->bi_opf, bio, 0, __GFP_DIRECT_RECLAIM);
|
||||
if (IS_ERR(req)) {
|
||||
blk_queue_exit(q);
|
||||
__wbt_done(q->rq_wb, wb_acct);
|
||||
|
|
|
@ -1933,10 +1933,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
|
|||
struct request *req;
|
||||
struct scsi_request *rq;
|
||||
|
||||
/*
|
||||
* blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a
|
||||
* request becomes available
|
||||
*/
|
||||
req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, 0);
|
||||
if (IS_ERR(req))
|
||||
return;
|
||||
|
|
Загрузка…
Ссылка в новой задаче