2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Deadline i/o scheduler.
|
|
|
|
*
|
2006-09-04 17:41:16 +04:00
|
|
|
* Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/elevator.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See Documentation/block/deadline-iosched.txt
|
|
|
|
*/
|
2006-01-06 11:46:02 +03:00
|
|
|
static const int read_expire = HZ / 2; /* max time before a read is submitted. */
|
|
|
|
static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
|
|
|
|
static const int writes_starved = 2; /* max times reads can starve a write */
|
|
|
|
static const int fifo_batch = 16; /* # of sequential requests treated as one
|
2005-04-17 02:20:36 +04:00
|
|
|
by the above parameters. For throughput. */
|
|
|
|
|
|
|
|
struct deadline_data {
|
|
|
|
/*
|
|
|
|
* run time data
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* requests (deadline_rq s) are present on both sort_list and fifo_list
|
|
|
|
*/
|
|
|
|
struct rb_root sort_list[2];
|
|
|
|
struct list_head fifo_list[2];
|
2008-08-14 12:17:14 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* next in sort order. read, write or both are NULL
|
|
|
|
*/
|
2006-07-13 14:36:41 +04:00
|
|
|
struct request *next_rq[2];
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned int batching; /* number of sequential requests made */
|
|
|
|
unsigned int starved; /* times reads have starved writes */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* settings that change how the i/o scheduler behaves
|
|
|
|
*/
|
|
|
|
int fifo_expire[2];
|
|
|
|
int fifo_batch;
|
|
|
|
int writes_starved;
|
|
|
|
int front_merges;
|
|
|
|
};
|
|
|
|
|
2008-08-14 12:17:14 +04:00
|
|
|
static inline struct rb_root *
|
|
|
|
deadline_rb_root(struct deadline_data *dd, struct request *rq)
|
|
|
|
{
|
|
|
|
return &dd->sort_list[rq_data_dir(rq)];
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-10-30 12:40:12 +03:00
|
|
|
/*
|
|
|
|
* get the request after `rq' in sector-sorted order
|
|
|
|
*/
|
|
|
|
static inline struct request *
|
|
|
|
deadline_latter_request(struct request *rq)
|
|
|
|
{
|
|
|
|
struct rb_node *node = rb_next(&rq->rb_node);
|
|
|
|
|
|
|
|
if (node)
|
|
|
|
return rb_entry_rq(node);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
static void
|
2006-07-13 14:36:41 +04:00
|
|
|
deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2008-08-14 12:17:14 +04:00
|
|
|
struct rb_root *root = deadline_rb_root(dd, rq);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
iosched: prevent aliased requests from starving other I/O
Hi, Jens,
If you recall, I posted an RFC patch for this back in July of last year:
http://lkml.org/lkml/2010/7/13/279
The basic problem is that a process can issue a never-ending stream of
async direct I/Os to the same sector on a device, thus starving out
other I/O in the system (due to the way the alias handling works in both
cfq and deadline). The solution I proposed back then was to start
dispatching from the fifo after a certain number of aliases had been
dispatched. Vivek asked why we had to treat aliases differently at all,
and I never had a good answer. So, I put together a simple patch which
allows aliases to be added to the rb tree (it adds them to the right,
though that doesn't matter as the order isn't guaranteed anyway). I
think this is the preferred solution, as it doesn't break up time slices
in CFQ or batches in deadline. I've tested it, and it does solve the
starvation issue. Let me know what you think.
Cheers,
Jeff
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-06-02 23:19:05 +04:00
|
|
|
elv_rb_add(root, rq);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2006-07-13 14:36:41 +04:00
|
|
|
deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2006-07-13 14:34:24 +04:00
|
|
|
const int data_dir = rq_data_dir(rq);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-10-30 12:40:12 +03:00
|
|
|
if (dd->next_rq[data_dir] == rq)
|
|
|
|
dd->next_rq[data_dir] = deadline_latter_request(rq);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-08-14 12:17:14 +04:00
|
|
|
elv_rb_del(deadline_rb_root(dd, rq), rq);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-07-13 14:36:41 +04:00
|
|
|
* add rq to rbtree and fifo
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2005-10-20 18:42:29 +04:00
|
|
|
static void
|
2005-04-17 02:20:36 +04:00
|
|
|
deadline_add_request(struct request_queue *q, struct request *rq)
|
|
|
|
{
|
|
|
|
struct deadline_data *dd = q->elevator->elevator_data;
|
2006-07-13 14:36:41 +04:00
|
|
|
const int data_dir = rq_data_dir(rq);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2017-12-21 09:43:42 +03:00
|
|
|
/*
|
|
|
|
* This may be a requeue of a write request that has locked its
|
|
|
|
* target zone. If it is the case, this releases the zone lock.
|
|
|
|
*/
|
|
|
|
blk_req_zone_write_unlock(rq);
|
|
|
|
|
2006-07-13 14:36:41 +04:00
|
|
|
deadline_add_rq_rb(dd, rq);
|
2006-07-28 11:23:08 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
2008-08-14 12:17:14 +04:00
|
|
|
* set expire time and add to fifo list
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2014-02-24 19:39:52 +04:00
|
|
|
rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
|
2006-07-13 14:36:41 +04:00
|
|
|
list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-07-28 11:23:08 +04:00
|
|
|
* remove rq from rbtree and fifo.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2007-07-24 11:28:11 +04:00
|
|
|
static void deadline_remove_request(struct request_queue *q, struct request *rq)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2005-10-20 18:42:29 +04:00
|
|
|
struct deadline_data *dd = q->elevator->elevator_data;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-07-13 14:36:41 +04:00
|
|
|
rq_fifo_clear(rq);
|
|
|
|
deadline_del_rq_rb(dd, rq);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2017-02-08 16:46:48 +03:00
|
|
|
static enum elv_merge
|
2007-07-24 11:28:11 +04:00
|
|
|
deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct deadline_data *dd = q->elevator->elevator_data;
|
|
|
|
struct request *__rq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* check for front merge
|
|
|
|
*/
|
|
|
|
if (dd->front_merges) {
|
2012-09-26 02:05:12 +04:00
|
|
|
sector_t sector = bio_end_sector(bio);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-07-13 14:34:24 +04:00
|
|
|
__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (__rq) {
|
2009-05-07 17:24:39 +04:00
|
|
|
BUG_ON(sector != blk_rq_pos(__rq));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2016-07-07 21:48:22 +03:00
|
|
|
if (elv_bio_merge_ok(__rq, bio)) {
|
2017-02-08 16:46:48 +03:00
|
|
|
*req = __rq;
|
|
|
|
return ELEVATOR_FRONT_MERGE;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ELEVATOR_NO_MERGE;
|
|
|
|
}
|
|
|
|
|
2007-07-24 11:28:11 +04:00
|
|
|
static void deadline_merged_request(struct request_queue *q,
|
2017-02-08 16:46:48 +03:00
|
|
|
struct request *req, enum elv_merge type)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct deadline_data *dd = q->elevator->elevator_data;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if the merge was a front merge, we need to reposition request
|
|
|
|
*/
|
2006-07-13 14:34:24 +04:00
|
|
|
if (type == ELEVATOR_FRONT_MERGE) {
|
2008-08-14 12:17:14 +04:00
|
|
|
elv_rb_del(deadline_rb_root(dd, req), req);
|
2006-07-13 14:36:41 +04:00
|
|
|
deadline_add_rq_rb(dd, req);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2007-07-24 11:28:11 +04:00
|
|
|
deadline_merged_requests(struct request_queue *q, struct request *req,
|
2005-04-17 02:20:36 +04:00
|
|
|
struct request *next)
|
|
|
|
{
|
|
|
|
/*
|
2006-07-13 14:36:41 +04:00
|
|
|
* if next expires before rq, assign its expire time to rq
|
|
|
|
* and move into next position (next will be deleted) in fifo
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2006-07-13 14:36:41 +04:00
|
|
|
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
|
2016-06-28 10:03:59 +03:00
|
|
|
if (time_before((unsigned long)next->fifo_time,
|
|
|
|
(unsigned long)req->fifo_time)) {
|
2006-07-13 14:36:41 +04:00
|
|
|
list_move(&req->queuelist, &next->queuelist);
|
2014-02-24 19:39:52 +04:00
|
|
|
req->fifo_time = next->fifo_time;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* kill knowledge of next, this one is a goner
|
|
|
|
*/
|
|
|
|
deadline_remove_request(q, next);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* move request from sort list to dispatch queue.
|
|
|
|
*/
|
|
|
|
static inline void
|
2006-07-13 14:36:41 +04:00
|
|
|
deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2007-07-24 11:28:11 +04:00
|
|
|
struct request_queue *q = rq->q;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2017-12-21 09:43:42 +03:00
|
|
|
/*
|
|
|
|
* For a zoned block device, write requests must write lock their
|
|
|
|
* target zone.
|
|
|
|
*/
|
|
|
|
blk_req_zone_write_lock(rq);
|
|
|
|
|
2006-07-13 14:36:41 +04:00
|
|
|
deadline_remove_request(q, rq);
|
|
|
|
elv_dispatch_add_tail(q, rq);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* move an entry to dispatch queue
|
|
|
|
*/
|
|
|
|
static void
|
2006-07-13 14:36:41 +04:00
|
|
|
deadline_move_request(struct deadline_data *dd, struct request *rq)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2006-07-13 14:34:24 +04:00
|
|
|
const int data_dir = rq_data_dir(rq);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-07-13 14:36:41 +04:00
|
|
|
dd->next_rq[READ] = NULL;
|
|
|
|
dd->next_rq[WRITE] = NULL;
|
2007-10-30 12:40:12 +03:00
|
|
|
dd->next_rq[data_dir] = deadline_latter_request(rq);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* take it off the sort and fifo list, move
|
|
|
|
* to dispatch queue
|
|
|
|
*/
|
2006-07-13 14:36:41 +04:00
|
|
|
deadline_move_to_dispatch(dd, rq);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-08-14 12:17:14 +04:00
|
|
|
* deadline_check_fifo returns 0 if there are no expired requests on the fifo,
|
2005-04-17 02:20:36 +04:00
|
|
|
* 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
|
|
|
|
*/
|
|
|
|
static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
|
|
|
|
{
|
2006-07-13 14:36:41 +04:00
|
|
|
struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
2006-07-13 14:36:41 +04:00
|
|
|
* rq is expired!
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2016-06-28 10:03:59 +03:00
|
|
|
if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
|
2005-04-17 02:20:36 +04:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-21 09:43:41 +03:00
|
|
|
/*
|
|
|
|
* For the specified data direction, return the next request to dispatch using
|
|
|
|
* arrival ordered lists.
|
|
|
|
*/
|
|
|
|
static struct request *
|
|
|
|
deadline_fifo_request(struct deadline_data *dd, int data_dir)
|
|
|
|
{
|
2017-12-21 09:43:42 +03:00
|
|
|
struct request *rq;
|
|
|
|
|
2017-12-21 09:43:41 +03:00
|
|
|
if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (list_empty(&dd->fifo_list[data_dir]))
|
|
|
|
return NULL;
|
|
|
|
|
2017-12-21 09:43:42 +03:00
|
|
|
rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
|
|
|
|
if (data_dir == READ || !blk_queue_is_zoned(rq->q))
|
|
|
|
return rq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for a write request that can be dispatched, that is one with
|
|
|
|
* an unlocked target zone.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
|
|
|
|
if (blk_req_can_dispatch_to_zone(rq))
|
|
|
|
return rq;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
2017-12-21 09:43:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For the specified data direction, return the next request to dispatch using
|
|
|
|
* sector position sorted lists.
|
|
|
|
*/
|
|
|
|
static struct request *
|
|
|
|
deadline_next_request(struct deadline_data *dd, int data_dir)
|
|
|
|
{
|
2017-12-21 09:43:42 +03:00
|
|
|
struct request *rq;
|
|
|
|
|
2017-12-21 09:43:41 +03:00
|
|
|
if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
|
|
|
|
return NULL;
|
|
|
|
|
2017-12-21 09:43:42 +03:00
|
|
|
rq = dd->next_rq[data_dir];
|
|
|
|
if (!rq)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (data_dir == READ || !blk_queue_is_zoned(rq->q))
|
|
|
|
return rq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for a write request that can be dispatched, that is one with
|
|
|
|
* an unlocked target zone.
|
|
|
|
*/
|
|
|
|
while (rq) {
|
|
|
|
if (blk_req_can_dispatch_to_zone(rq))
|
|
|
|
return rq;
|
|
|
|
rq = deadline_latter_request(rq);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
2017-12-21 09:43:41 +03:00
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* deadline_dispatch_requests selects the best request according to
|
|
|
|
* read/write expire, fifo_batch, etc
|
|
|
|
*/
|
2007-07-24 11:28:11 +04:00
|
|
|
static int deadline_dispatch_requests(struct request_queue *q, int force)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2005-10-20 18:42:29 +04:00
|
|
|
struct deadline_data *dd = q->elevator->elevator_data;
|
2005-04-17 02:20:36 +04:00
|
|
|
const int reads = !list_empty(&dd->fifo_list[READ]);
|
|
|
|
const int writes = !list_empty(&dd->fifo_list[WRITE]);
|
2017-12-21 09:43:41 +03:00
|
|
|
struct request *rq, *next_rq;
|
2005-09-07 02:17:20 +04:00
|
|
|
int data_dir;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* batches are currently reads XOR writes
|
|
|
|
*/
|
2017-12-21 09:43:41 +03:00
|
|
|
rq = deadline_next_request(dd, WRITE);
|
|
|
|
if (!rq)
|
|
|
|
rq = deadline_next_request(dd, READ);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-08-14 12:17:13 +04:00
|
|
|
if (rq && dd->batching < dd->fifo_batch)
|
|
|
|
/* we have a next request are still entitled to batch */
|
|
|
|
goto dispatch_request;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* at this point we are not running a batch. select the appropriate
|
|
|
|
* data direction (read / write)
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (reads) {
|
2006-06-21 11:36:18 +04:00
|
|
|
BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2017-12-21 09:43:42 +03:00
|
|
|
if (deadline_fifo_request(dd, WRITE) &&
|
|
|
|
(dd->starved++ >= dd->writes_starved))
|
2005-04-17 02:20:36 +04:00
|
|
|
goto dispatch_writes;
|
|
|
|
|
|
|
|
data_dir = READ;
|
|
|
|
|
|
|
|
goto dispatch_find_request;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* there are either no reads or writes have been starved
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (writes) {
|
|
|
|
dispatch_writes:
|
2006-06-21 11:36:18 +04:00
|
|
|
BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
dd->starved = 0;
|
|
|
|
|
|
|
|
data_dir = WRITE;
|
|
|
|
|
|
|
|
goto dispatch_find_request;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dispatch_find_request:
|
|
|
|
/*
|
|
|
|
* we are not running a batch, find best request for selected data_dir
|
|
|
|
*/
|
2017-12-21 09:43:41 +03:00
|
|
|
next_rq = deadline_next_request(dd, data_dir);
|
|
|
|
if (deadline_check_fifo(dd, data_dir) || !next_rq) {
|
2007-10-30 12:40:13 +03:00
|
|
|
/*
|
|
|
|
* A deadline has expired, the last request was in the other
|
|
|
|
* direction, or we have run out of higher-sectored requests.
|
|
|
|
* Start again from the request with the earliest expiry time.
|
|
|
|
*/
|
2017-12-21 09:43:41 +03:00
|
|
|
rq = deadline_fifo_request(dd, data_dir);
|
2007-10-30 12:40:13 +03:00
|
|
|
} else {
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* The last req was the same dir and we have a next request in
|
|
|
|
* sort order. No expired requests so continue on from here.
|
|
|
|
*/
|
2017-12-21 09:43:41 +03:00
|
|
|
rq = next_rq;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2017-12-21 09:43:42 +03:00
|
|
|
/*
|
|
|
|
* For a zoned block device, if we only have writes queued and none of
|
|
|
|
* them can be dispatched, rq will be NULL.
|
|
|
|
*/
|
|
|
|
if (!rq)
|
|
|
|
return 0;
|
|
|
|
|
2007-10-30 12:40:13 +03:00
|
|
|
dd->batching = 0;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
dispatch_request:
|
|
|
|
/*
|
2006-07-13 14:36:41 +04:00
|
|
|
* rq is the selected appropriate request.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
dd->batching++;
|
2006-07-13 14:36:41 +04:00
|
|
|
deadline_move_request(dd, rq);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-12-21 09:43:42 +03:00
|
|
|
/*
|
|
|
|
* For zoned block devices, write unlock the target zone of completed
|
|
|
|
* write requests.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
deadline_completed_request(struct request_queue *q, struct request *rq)
|
|
|
|
{
|
|
|
|
blk_req_zone_write_unlock(rq);
|
|
|
|
}
|
|
|
|
|
2008-10-31 12:05:07 +03:00
|
|
|
static void deadline_exit_queue(struct elevator_queue *e)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct deadline_data *dd = e->elevator_data;
|
|
|
|
|
|
|
|
BUG_ON(!list_empty(&dd->fifo_list[READ]));
|
|
|
|
BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
|
|
|
|
|
|
|
|
kfree(dd);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-07-13 14:36:41 +04:00
|
|
|
* initialize elevator private data (deadline_data).
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2013-07-03 15:25:24 +04:00
|
|
|
static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct deadline_data *dd;
|
2013-07-03 15:25:24 +04:00
|
|
|
struct elevator_queue *eq;
|
|
|
|
|
|
|
|
eq = elevator_alloc(q, e);
|
|
|
|
if (!eq)
|
|
|
|
return -ENOMEM;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-08-30 02:21:42 +04:00
|
|
|
dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
|
2013-07-03 15:25:24 +04:00
|
|
|
if (!dd) {
|
|
|
|
kobject_put(&eq->kobj);
|
2012-03-06 01:14:57 +04:00
|
|
|
return -ENOMEM;
|
2013-07-03 15:25:24 +04:00
|
|
|
}
|
|
|
|
eq->elevator_data = dd;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&dd->fifo_list[READ]);
|
|
|
|
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
|
|
|
|
dd->sort_list[READ] = RB_ROOT;
|
|
|
|
dd->sort_list[WRITE] = RB_ROOT;
|
|
|
|
dd->fifo_expire[READ] = read_expire;
|
|
|
|
dd->fifo_expire[WRITE] = write_expire;
|
|
|
|
dd->writes_starved = writes_starved;
|
|
|
|
dd->front_merges = 1;
|
|
|
|
dd->fifo_batch = fifo_batch;
|
2012-03-06 01:14:57 +04:00
|
|
|
|
2013-07-03 15:25:24 +04:00
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
|
q->elevator = eq;
|
|
|
|
spin_unlock_irq(q->queue_lock);
|
2012-03-06 01:14:57 +04:00
|
|
|
return 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sysfs parts below
|
|
|
|
*/
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
deadline_var_show(int var, char *page)
|
|
|
|
{
|
|
|
|
return sprintf(page, "%d\n", var);
|
|
|
|
}
|
|
|
|
|
2017-08-24 20:11:33 +03:00
|
|
|
static void
|
|
|
|
deadline_var_store(int *var, const char *page)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
char *p = (char *) page;
|
|
|
|
|
|
|
|
*var = simple_strtol(p, &p, 10);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
|
2008-10-31 12:05:07 +03:00
|
|
|
static ssize_t __FUNC(struct elevator_queue *e, char *page) \
|
2005-04-17 02:20:36 +04:00
|
|
|
{ \
|
2006-03-19 02:35:43 +03:00
|
|
|
struct deadline_data *dd = e->elevator_data; \
|
|
|
|
int __data = __VAR; \
|
2005-04-17 02:20:36 +04:00
|
|
|
if (__CONV) \
|
|
|
|
__data = jiffies_to_msecs(__data); \
|
|
|
|
return deadline_var_show(__data, (page)); \
|
|
|
|
}
|
2006-03-19 06:27:18 +03:00
|
|
|
SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
|
|
|
|
SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
|
|
|
|
SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
|
|
|
|
SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
|
|
|
|
SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
#undef SHOW_FUNCTION
|
|
|
|
|
|
|
|
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
2008-10-31 12:05:07 +03:00
|
|
|
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
|
2005-04-17 02:20:36 +04:00
|
|
|
{ \
|
2006-03-19 02:35:43 +03:00
|
|
|
struct deadline_data *dd = e->elevator_data; \
|
2005-04-17 02:20:36 +04:00
|
|
|
int __data; \
|
2017-08-24 20:11:33 +03:00
|
|
|
deadline_var_store(&__data, (page)); \
|
2005-04-17 02:20:36 +04:00
|
|
|
if (__data < (MIN)) \
|
|
|
|
__data = (MIN); \
|
|
|
|
else if (__data > (MAX)) \
|
|
|
|
__data = (MAX); \
|
|
|
|
if (__CONV) \
|
|
|
|
*(__PTR) = msecs_to_jiffies(__data); \
|
|
|
|
else \
|
|
|
|
*(__PTR) = __data; \
|
2017-08-24 20:11:33 +03:00
|
|
|
return count; \
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2006-03-19 06:27:18 +03:00
|
|
|
STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
|
|
|
|
STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
|
|
|
|
STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
|
|
|
|
STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
|
|
|
|
STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
#undef STORE_FUNCTION
|
|
|
|
|
2006-03-19 06:27:18 +03:00
|
|
|
#define DD_ATTR(name) \
|
|
|
|
__ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
|
|
|
|
deadline_##name##_store)
|
|
|
|
|
|
|
|
static struct elv_fs_entry deadline_attrs[] = {
|
|
|
|
DD_ATTR(read_expire),
|
|
|
|
DD_ATTR(write_expire),
|
|
|
|
DD_ATTR(writes_starved),
|
|
|
|
DD_ATTR(front_merges),
|
|
|
|
DD_ATTR(fifo_batch),
|
|
|
|
__ATTR_NULL
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct elevator_type iosched_deadline = {
|
2016-12-11 01:13:59 +03:00
|
|
|
.ops.sq = {
|
2005-04-17 02:20:36 +04:00
|
|
|
.elevator_merge_fn = deadline_merge,
|
|
|
|
.elevator_merged_fn = deadline_merged_request,
|
|
|
|
.elevator_merge_req_fn = deadline_merged_requests,
|
2005-10-20 18:42:29 +04:00
|
|
|
.elevator_dispatch_fn = deadline_dispatch_requests,
|
2017-12-21 09:43:42 +03:00
|
|
|
.elevator_completed_req_fn = deadline_completed_request,
|
2005-10-20 18:42:29 +04:00
|
|
|
.elevator_add_req_fn = deadline_add_request,
|
2006-07-13 14:34:24 +04:00
|
|
|
.elevator_former_req_fn = elv_rb_former_request,
|
|
|
|
.elevator_latter_req_fn = elv_rb_latter_request,
|
2005-04-17 02:20:36 +04:00
|
|
|
.elevator_init_fn = deadline_init_queue,
|
|
|
|
.elevator_exit_fn = deadline_exit_queue,
|
|
|
|
},
|
|
|
|
|
2006-03-19 02:35:43 +03:00
|
|
|
.elevator_attrs = deadline_attrs,
|
2005-04-17 02:20:36 +04:00
|
|
|
.elevator_name = "deadline",
|
|
|
|
.elevator_owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init deadline_init(void)
|
|
|
|
{
|
2011-12-14 03:33:42 +04:00
|
|
|
return elv_register(&iosched_deadline);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit deadline_exit(void)
|
|
|
|
{
|
|
|
|
elv_unregister(&iosched_deadline);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(deadline_init);
|
|
|
|
module_exit(deadline_exit);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Jens Axboe");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_DESCRIPTION("deadline IO scheduler");
|