block: Deprecate QUEUE_FLAG_CLUSTER and use queue_limits instead
When stacking devices, a request_queue is not always available. This forced us to have a no_cluster flag in the queue_limits that could be used as a carrier until the request_queue had been set up for a metadevice. There were several problems with that approach. First of all it was up to the stacking device to remember to set queue flag after stacking had completed. Also, the queue flag and the queue limits had to be kept in sync at all times. We got that wrong, which could lead to us issuing commands that went beyond the max scatterlist limit set by the driver. The proper fix is to avoid having two flags for tracking the same thing. We deprecate QUEUE_FLAG_CLUSTER and use the queue limit directly in the block layer merging functions. The queue_limit 'no_cluster' is turned into 'cluster' to avoid double negatives and to ease stacking. Clustering defaults to being enabled as before. The queue flag logic is removed from the stacking function, and explicitly setting the cluster flag is no longer necessary in DM and MD. Reported-by: Ed Lin <ed.lin@promise.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Acked-by: Mike Snitzer <snitzer@redhat.com> Cc: stable@kernel.org Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
Родитель
04a6b516cd
Коммит
e692cb668f
|
@ -21,7 +21,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
|||
return 0;
|
||||
|
||||
fbio = bio;
|
||||
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
||||
cluster = blk_queue_cluster(q);
|
||||
seg_size = 0;
|
||||
nr_phys_segs = 0;
|
||||
for_each_bio(bio) {
|
||||
|
@ -87,7 +87,7 @@ EXPORT_SYMBOL(blk_recount_segments);
|
|||
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
|
||||
struct bio *nxt)
|
||||
{
|
||||
if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
|
||||
if (!blk_queue_cluster(q))
|
||||
return 0;
|
||||
|
||||
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
|
||||
|
@ -123,7 +123,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
|||
int nsegs, cluster;
|
||||
|
||||
nsegs = 0;
|
||||
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
||||
cluster = blk_queue_cluster(q);
|
||||
|
||||
/*
|
||||
* for each bio in rq
|
||||
|
|
|
@ -126,7 +126,7 @@ void blk_set_default_limits(struct queue_limits *lim)
|
|||
lim->alignment_offset = 0;
|
||||
lim->io_opt = 0;
|
||||
lim->misaligned = 0;
|
||||
lim->no_cluster = 0;
|
||||
lim->cluster = 1;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_set_default_limits);
|
||||
|
||||
|
@ -464,15 +464,6 @@ EXPORT_SYMBOL(blk_queue_io_opt);
|
|||
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
|
||||
{
|
||||
blk_stack_limits(&t->limits, &b->limits, 0);
|
||||
|
||||
if (!t->queue_lock)
|
||||
WARN_ON_ONCE(1);
|
||||
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(t->queue_lock, flags);
|
||||
queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
|
||||
spin_unlock_irqrestore(t->queue_lock, flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_stack_limits);
|
||||
|
||||
|
@ -545,7 +536,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|||
t->io_min = max(t->io_min, b->io_min);
|
||||
t->io_opt = lcm(t->io_opt, b->io_opt);
|
||||
|
||||
t->no_cluster |= b->no_cluster;
|
||||
t->cluster &= b->cluster;
|
||||
t->discard_zeroes_data &= b->discard_zeroes_data;
|
||||
|
||||
/* Physical block size a multiple of the logical block size? */
|
||||
|
@ -641,7 +632,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
|
|||
sector_t offset)
|
||||
{
|
||||
struct request_queue *t = disk->queue;
|
||||
struct request_queue *b = bdev_get_queue(bdev);
|
||||
|
||||
if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
|
||||
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
|
||||
|
@ -652,17 +642,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
|
|||
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
|
||||
top, bottom);
|
||||
}
|
||||
|
||||
if (!t->queue_lock)
|
||||
WARN_ON_ONCE(1);
|
||||
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(t->queue_lock, flags);
|
||||
if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
|
||||
queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
|
||||
spin_unlock_irqrestore(t->queue_lock, flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(disk_stack_limits);
|
||||
|
||||
|
|
|
@ -119,7 +119,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *
|
|||
|
||||
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
|
||||
{
|
||||
if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
|
||||
if (blk_queue_cluster(q))
|
||||
return queue_var_show(queue_max_segment_size(q), (page));
|
||||
|
||||
return queue_var_show(PAGE_CACHE_SIZE, (page));
|
||||
|
|
|
@ -1131,11 +1131,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|||
*/
|
||||
q->limits = *limits;
|
||||
|
||||
if (limits->no_cluster)
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
|
||||
else
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
|
||||
|
||||
if (!dm_table_supports_discards(t))
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
|
||||
else
|
||||
|
|
|
@ -4296,9 +4296,6 @@ static int md_alloc(dev_t dev, char *name)
|
|||
goto abort;
|
||||
mddev->queue->queuedata = mddev;
|
||||
|
||||
/* Can be unlocked because the queue is new: no concurrency */
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
|
||||
|
||||
blk_queue_make_request(mddev->queue, md_make_request);
|
||||
|
||||
disk = alloc_disk(1 << shift);
|
||||
|
|
|
@ -1642,9 +1642,8 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
|
|||
|
||||
blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
|
||||
|
||||
/* New queue, no concurrency on queue_flags */
|
||||
if (!shost->use_clustering)
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
|
||||
q->limits.cluster = 0;
|
||||
|
||||
/*
|
||||
* set a reasonable default alignment on word boundaries: the
|
||||
|
|
|
@ -250,7 +250,7 @@ struct queue_limits {
|
|||
|
||||
unsigned char misaligned;
|
||||
unsigned char discard_misaligned;
|
||||
unsigned char no_cluster;
|
||||
unsigned char cluster;
|
||||
signed char discard_zeroes_data;
|
||||
};
|
||||
|
||||
|
@ -380,7 +380,6 @@ struct request_queue
|
|||
#endif
|
||||
};
|
||||
|
||||
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
|
||||
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
|
||||
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
|
||||
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
|
||||
|
@ -403,7 +402,6 @@ struct request_queue
|
|||
#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
|
||||
|
||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||
(1 << QUEUE_FLAG_CLUSTER) | \
|
||||
(1 << QUEUE_FLAG_STACKABLE) | \
|
||||
(1 << QUEUE_FLAG_SAME_COMP) | \
|
||||
(1 << QUEUE_FLAG_ADD_RANDOM))
|
||||
|
@ -510,6 +508,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
|||
|
||||
#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
|
||||
|
||||
static inline unsigned int blk_queue_cluster(struct request_queue *q)
|
||||
{
|
||||
return q->limits.cluster;
|
||||
}
|
||||
|
||||
/*
|
||||
* We regard a request as sync, if either a read or a sync write
|
||||
*/
|
||||
|
|
Загрузка…
Ссылка в новой задаче