Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "A small collection of fixes for the current series.  This contains:

   - Two fixes for xen-blkfront, from Bob Liu.

   - A bug fix for NVMe, releasing only the specific resources we
     requested.

   - Fix for a debugfs flags entry for nbd, from Josef.

   - Plug fix from Omar, fixing up a case of code being switched between
     two functions.

   - A missing bio_put() for the new discard callers of
     submit_bio_wait(), fixing a regression causing a leak of the bio.
     From Shaun.

   - Improve dirty limit calculation precision in the writeback code,
     fixing a case where setting a limit lower than 1% of memory would
     end up being zero.  From Tejun"

* 'for-linus' of git://git.kernel.dk/linux-block:
  NVMe: Only release requested regions
  xen-blkfront: fix resume issues after a migration
  xen-blkfront: don't call talk_to_blkback when already connected to blkback
  nbd: pass the nbd pointer for flags debugfs
  block: missing bio_put following submit_bio_wait
  blk-mq: really fix plug list flushing for nomerge queues
  writeback: use higher precision calculation in domain_dirty_limits()
This commit is contained in:
Linus Torvalds 2016-06-11 18:42:59 -07:00
Родитель 3a7c114d35 edb50a5403
Коммит 8714f8f5fe
6 изменённых файлов: 59 добавлений и 37 удалений

Просмотреть файл

@ -113,6 +113,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
ret = submit_bio_wait(type, bio); ret = submit_bio_wait(type, bio);
if (ret == -EOPNOTSUPP) if (ret == -EOPNOTSUPP)
ret = 0; ret = 0;
bio_put(bio);
} }
blk_finish_plug(&plug); blk_finish_plug(&plug);
@ -165,8 +166,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
} }
} }
if (bio) if (bio) {
ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
bio_put(bio);
}
return ret != -EOPNOTSUPP ? ret : 0; return ret != -EOPNOTSUPP ? ret : 0;
} }
EXPORT_SYMBOL(blkdev_issue_write_same); EXPORT_SYMBOL(blkdev_issue_write_same);
@ -206,8 +209,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
} }
} }
if (bio) if (bio) {
return submit_bio_wait(WRITE, bio); ret = submit_bio_wait(WRITE, bio);
bio_put(bio);
return ret;
}
return 0; return 0;
} }

Просмотреть файл

@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio, q->bio_split); blk_queue_split(q, &bio, q->bio_split);
if (!is_flush_fua && !blk_queue_nomerges(q)) { if (!is_flush_fua && !blk_queue_nomerges(q) &&
if (blk_attempt_plug_merge(q, bio, &request_count, blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
&same_queue_rq)) return BLK_QC_T_NONE;
return BLK_QC_T_NONE;
} else
request_count = blk_plug_queued_count(q);
rq = blk_mq_map_request(q, bio, &data); rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq)) if (unlikely(!rq))
@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio, q->bio_split); blk_queue_split(q, &bio, q->bio_split);
if (!is_flush_fua && !blk_queue_nomerges(q) && if (!is_flush_fua && !blk_queue_nomerges(q)) {
blk_attempt_plug_merge(q, bio, &request_count, NULL)) if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} else
request_count = blk_plug_queued_count(q);
rq = blk_mq_map_request(q, bio, &data); rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq)) if (unlikely(!rq))

Просмотреть файл

@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops); debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
return 0; return 0;
} }

Просмотреть файл

@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd) const struct blk_mq_queue_data *qd)
{ {
unsigned long flags; unsigned long flags;
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; int qid = hctx->queue_num;
struct blkfront_info *info = hctx->queue->queuedata;
struct blkfront_ring_info *rinfo = NULL;
BUG_ON(info->nr_rings <= qid);
rinfo = &info->rinfo[qid];
blk_mq_start_request(qd->rq); blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags); spin_lock_irqsave(&rinfo->ring_lock, flags);
if (RING_FULL(&rinfo->ring)) if (RING_FULL(&rinfo->ring))
@ -901,20 +905,9 @@ out_busy:
return BLK_MQ_RQ_QUEUE_BUSY; return BLK_MQ_RQ_QUEUE_BUSY;
} }
static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int index)
{
struct blkfront_info *info = (struct blkfront_info *)data;
BUG_ON(info->nr_rings <= index);
hctx->driver_data = &info->rinfo[index];
return 0;
}
static struct blk_mq_ops blkfront_mq_ops = { static struct blk_mq_ops blkfront_mq_ops = {
.queue_rq = blkif_queue_rq, .queue_rq = blkif_queue_rq,
.map_queue = blk_mq_map_queue, .map_queue = blk_mq_map_queue,
.init_hctx = blk_mq_init_hctx,
}; };
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
return PTR_ERR(rq); return PTR_ERR(rq);
} }
rq->queuedata = info;
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
if (info->feature_discard) { if (info->feature_discard) {
@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev)
return err; return err;
err = talk_to_blkback(dev, info); err = talk_to_blkback(dev, info);
if (!err)
blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
/* /*
* We have to wait for the backend to switch to * We have to wait for the backend to switch to
@ -2485,10 +2481,23 @@ static void blkback_changed(struct xenbus_device *dev,
break; break;
case XenbusStateConnected: case XenbusStateConnected:
if (dev->state != XenbusStateInitialised) { /*
* talk_to_blkback sets state to XenbusStateInitialised
* and blkfront_connect sets it to XenbusStateConnected
* (if connection went OK).
*
* If the backend (or toolstack) decides to poke at backend
* state (and re-trigger the watch by setting the state repeatedly
* to XenbusStateConnected (4)) we need to deal with this.
* This is allowed as this is used to communicate to the guest
* that the size of disk has changed!
*/
if ((dev->state != XenbusStateInitialised) &&
(dev->state != XenbusStateConnected)) {
if (talk_to_blkback(dev, info)) if (talk_to_blkback(dev, info))
break; break;
} }
blkfront_connect(info); blkfront_connect(info);
break; break;

Просмотреть файл

@ -1679,9 +1679,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
static void nvme_dev_unmap(struct nvme_dev *dev) static void nvme_dev_unmap(struct nvme_dev *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev->dev);
int bars;
if (dev->bar) if (dev->bar)
iounmap(dev->bar); iounmap(dev->bar);
pci_release_regions(to_pci_dev(dev->dev));
bars = pci_select_bars(pdev, IORESOURCE_MEM);
pci_release_selected_regions(pdev, bars);
} }
static void nvme_pci_disable(struct nvme_dev *dev) static void nvme_pci_disable(struct nvme_dev *dev)
@ -1924,7 +1929,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
return 0; return 0;
release: release:
pci_release_regions(pdev); pci_release_selected_regions(pdev, bars);
return -ENODEV; return -ENODEV;
} }

Просмотреть файл

@ -373,8 +373,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
unsigned long bytes = vm_dirty_bytes; unsigned long bytes = vm_dirty_bytes;
unsigned long bg_bytes = dirty_background_bytes; unsigned long bg_bytes = dirty_background_bytes;
unsigned long ratio = vm_dirty_ratio; /* convert ratios to per-PAGE_SIZE for higher precision */
unsigned long bg_ratio = dirty_background_ratio; unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
unsigned long thresh; unsigned long thresh;
unsigned long bg_thresh; unsigned long bg_thresh;
struct task_struct *tsk; struct task_struct *tsk;
@ -386,26 +387,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
/* /*
* The byte settings can't be applied directly to memcg * The byte settings can't be applied directly to memcg
* domains. Convert them to ratios by scaling against * domains. Convert them to ratios by scaling against
* globally available memory. * globally available memory. As the ratios are in
* per-PAGE_SIZE, they can be obtained by dividing bytes by
* number of pages.
*/ */
if (bytes) if (bytes)
ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 / ratio = min(DIV_ROUND_UP(bytes, global_avail),
global_avail, 100UL); PAGE_SIZE);
if (bg_bytes) if (bg_bytes)
bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 / bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
global_avail, 100UL); PAGE_SIZE);
bytes = bg_bytes = 0; bytes = bg_bytes = 0;
} }
if (bytes) if (bytes)
thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
else else
thresh = (ratio * available_memory) / 100; thresh = (ratio * available_memory) / PAGE_SIZE;
if (bg_bytes) if (bg_bytes)
bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE); bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
else else
bg_thresh = (bg_ratio * available_memory) / 100; bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
if (bg_thresh >= thresh) if (bg_thresh >= thresh)
bg_thresh = thresh / 2; bg_thresh = thresh / 2;