block: store a block_device pointer in struct bio

Replace the gendisk pointer in struct bio with a pointer to the newly
improved struct block device.  From that the gendisk can be trivially
accessed with an extra indirection, but it also allows to directly
look up all information related to partition remapping.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2021-01-24 11:02:34 +01:00 коммит произвёл Jens Axboe
Родитель cf9a978f97
Коммит 309dca309f
55 изменённых файлов: 154 добавлений и 184 удалений

Просмотреть файл

@ -61,7 +61,7 @@ struct nfhd_device {
static blk_qc_t nfhd_submit_bio(struct bio *bio)
{
struct nfhd_device *dev = bio->bi_disk->private_data;
struct nfhd_device *dev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec bvec;
struct bvec_iter iter;
int dir, len, shift;

Просмотреть файл

@ -103,7 +103,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
static blk_qc_t simdisk_submit_bio(struct bio *bio)
{
struct simdisk *dev = bio->bi_disk->private_data;
struct simdisk *dev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec bvec;
struct bvec_iter iter;
sector_t sector = bio->bi_iter.bi_sector;

Просмотреть файл

@ -140,7 +140,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
iv = bip->bip_vec + bip->bip_vcnt;
if (bip->bip_vcnt &&
bvec_gap_to_prev(bio->bi_disk->queue,
bvec_gap_to_prev(bio->bi_bdev->bd_disk->queue,
&bip->bip_vec[bip->bip_vcnt - 1], offset))
return 0;
@ -162,7 +162,7 @@ EXPORT_SYMBOL(bio_integrity_add_page);
static blk_status_t bio_integrity_process(struct bio *bio,
struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn)
{
struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct blk_integrity_iter iter;
struct bvec_iter bviter;
struct bio_vec bv;
@ -171,7 +171,7 @@ static blk_status_t bio_integrity_process(struct bio *bio,
void *prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset;
iter.disk_name = bio->bi_disk->disk_name;
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
iter.interval = 1 << bi->interval_exp;
iter.seed = proc_iter->bi_sector;
iter.prot_buf = prot_buf;
@ -208,8 +208,8 @@ static blk_status_t bio_integrity_process(struct bio *bio,
bool bio_integrity_prep(struct bio *bio)
{
struct bio_integrity_payload *bip;
struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
struct request_queue *q = bio->bi_disk->queue;
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
void *buf;
unsigned long start, end;
unsigned int len, nr_pages;
@ -329,7 +329,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio_integrity_payload *bip =
container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio;
struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
/*
* At the moment verify is called bio's iterator was advanced
@ -355,7 +355,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
*/
bool __bio_integrity_endio(struct bio *bio)
{
struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct bio_integrity_payload *bip = bio_integrity(bio);
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
@ -381,7 +381,7 @@ bool __bio_integrity_endio(struct bio *bio)
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
bip->bip_iter.bi_sector += bytes_done >> 9;
@ -397,7 +397,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
void bio_integrity_trim(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
}

Просмотреть файл

@ -607,16 +607,7 @@ void bio_truncate(struct bio *bio, unsigned new_size)
*/
void guard_bio_eod(struct bio *bio)
{
sector_t maxsector;
struct block_device *part;
rcu_read_lock();
part = __disk_get_part(bio->bi_disk, bio->bi_partno);
if (part)
maxsector = bdev_nr_sectors(part);
else
maxsector = get_capacity(bio->bi_disk);
rcu_read_unlock();
sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
if (!maxsector)
return;
@ -676,11 +667,10 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
/*
* most users will be overriding ->bi_disk with a new target,
* most users will be overriding ->bi_bdev with a new target,
* so we don't set nor calculate new physical/hw segment counts here
*/
bio->bi_disk = bio_src->bi_disk;
bio->bi_partno = bio_src->bi_partno;
bio->bi_bdev = bio_src->bi_bdev;
bio_set_flag(bio, BIO_CLONED);
if (bio_flagged(bio_src, BIO_THROTTLED))
bio_set_flag(bio, BIO_THROTTLED);
@ -730,7 +720,7 @@ EXPORT_SYMBOL(bio_clone_fast);
const char *bio_devname(struct bio *bio, char *buf)
{
return disk_name(bio->bi_disk, bio->bi_partno, buf);
return bdevname(bio->bi_bdev, buf);
}
EXPORT_SYMBOL(bio_devname);
@ -1037,7 +1027,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
{
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
struct request_queue *q = bio->bi_disk->queue;
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
@ -1145,7 +1135,8 @@ static void submit_bio_wait_endio(struct bio *bio)
*/
int submit_bio_wait(struct bio *bio)
{
DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
DECLARE_COMPLETION_ONSTACK_MAP(done,
bio->bi_bdev->bd_disk->lockdep_map);
unsigned long hang_check;
bio->bi_private = &done;
@ -1422,8 +1413,8 @@ again:
if (!bio_integrity_endio(bio))
return;
if (bio->bi_disk)
rq_qos_done_bio(bio->bi_disk->queue, bio);
if (bio->bi_bdev)
rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
/*
* Need to have a real endio function for chained bios, otherwise
@ -1438,8 +1429,8 @@ again:
goto again;
}
if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
trace_block_bio_complete(bio->bi_disk->queue, bio);
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
}

Просмотреть файл

@ -1800,7 +1800,8 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
struct blkcg_gq *blkg, *ret_blkg = NULL;
rcu_read_lock();
blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_disk->queue);
blkg = blkg_lookup_create(css_to_blkcg(css),
bio->bi_bdev->bd_disk->queue);
while (blkg) {
if (blkg_tryget(blkg)) {
ret_blkg = blkg;
@ -1836,8 +1837,8 @@ void bio_associate_blkg_from_css(struct bio *bio,
if (css && css->parent) {
bio->bi_blkg = blkg_tryget_closest(bio, css);
} else {
blkg_get(bio->bi_disk->queue->root_blkg);
bio->bi_blkg = bio->bi_disk->queue->root_blkg;
blkg_get(bio->bi_bdev->bd_disk->queue->root_blkg);
bio->bi_blkg = bio->bi_bdev->bd_disk->queue->root_blkg;
}
}
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);

Просмотреть файл

@ -476,7 +476,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
static inline int bio_queue_enter(struct bio *bio)
{
struct request_queue *q = bio->bi_disk->queue;
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
bool nowait = bio->bi_opf & REQ_NOWAIT;
int ret;
@ -712,7 +712,7 @@ static inline bool bio_check_ro(struct bio *bio, struct block_device *part)
static noinline int should_fail_bio(struct bio *bio)
{
if (should_fail_request(bio->bi_disk->part0, bio->bi_iter.bi_size))
if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
return -EIO;
return 0;
}
@ -741,13 +741,9 @@ static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
*/
static inline int blk_partition_remap(struct bio *bio)
{
struct block_device *p;
struct block_device *p = bio->bi_bdev;
int ret = -EIO;
rcu_read_lock();
p = __disk_get_part(bio->bi_disk, bio->bi_partno);
if (unlikely(!p))
goto out;
if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
goto out;
if (unlikely(bio_check_ro(bio, p)))
@ -761,10 +757,9 @@ static inline int blk_partition_remap(struct bio *bio)
bio->bi_iter.bi_sector -
p->bd_start_sect);
}
bio->bi_partno = 0;
bio->bi_bdev = bdev_whole(p);
ret = 0;
out:
rcu_read_unlock();
return ret;
}
@ -805,7 +800,8 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
static noinline_for_stack bool submit_bio_checks(struct bio *bio)
{
struct request_queue *q = bio->bi_disk->queue;
struct block_device *bdev = bio->bi_bdev;
struct request_queue *q = bdev->bd_disk->queue;
blk_status_t status = BLK_STS_IOERR;
struct blk_plug *plug;
@ -825,13 +821,13 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
if (should_fail_bio(bio))
goto end_io;
if (bio->bi_partno) {
if (bio->bi_bdev->bd_partno) {
if (unlikely(blk_partition_remap(bio)))
goto end_io;
} else {
if (unlikely(bio_check_ro(bio, bio->bi_disk->part0)))
if (unlikely(bio_check_ro(bio, bdev_whole(bdev))))
goto end_io;
if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
if (unlikely(bio_check_eod(bio, get_capacity(bdev->bd_disk))))
goto end_io;
}
@ -924,7 +920,7 @@ end_io:
static blk_qc_t __submit_bio(struct bio *bio)
{
struct gendisk *disk = bio->bi_disk;
struct gendisk *disk = bio->bi_bdev->bd_disk;
blk_qc_t ret = BLK_QC_T_NONE;
if (blk_crypto_bio_prep(&bio)) {
@ -966,7 +962,7 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
current->bio_list = bio_list_on_stack;
do {
struct request_queue *q = bio->bi_disk->queue;
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct bio_list lower, same;
if (unlikely(bio_queue_enter(bio) != 0))
@ -987,7 +983,7 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
bio_list_init(&lower);
bio_list_init(&same);
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
if (q == bio->bi_disk->queue)
if (q == bio->bi_bdev->bd_disk->queue)
bio_list_add(&same, bio);
else
bio_list_add(&lower, bio);
@ -1012,7 +1008,7 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
current->bio_list = bio_list;
do {
struct gendisk *disk = bio->bi_disk;
struct gendisk *disk = bio->bi_bdev->bd_disk;
if (unlikely(bio_queue_enter(bio) != 0))
continue;
@ -1055,7 +1051,7 @@ blk_qc_t submit_bio_noacct(struct bio *bio)
return BLK_QC_T_NONE;
}
if (!bio->bi_disk->fops->submit_bio)
if (!bio->bi_bdev->bd_disk->fops->submit_bio)
return __submit_bio_noacct_mq(bio);
return __submit_bio_noacct(bio);
}
@ -1067,7 +1063,7 @@ EXPORT_SYMBOL(submit_bio_noacct);
*
* submit_bio() is used to submit I/O requests to block devices. It is passed a
* fully set up &struct bio that describes the I/O that needs to be done. The
* bio will be send to the device described by the bi_disk and bi_partno fields.
* bio will be send to the device described by the bi_bdev field.
*
* The success/failure status of the request, along with notification of
* completion, is delivered asynchronously through the ->bi_end_io() callback
@ -1087,7 +1083,8 @@ blk_qc_t submit_bio(struct bio *bio)
unsigned int count;
if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
count = queue_logical_block_size(
bio->bi_bdev->bd_disk->queue) >> 9;
else
count = bio_sectors(bio);

Просмотреть файл

@ -167,7 +167,7 @@ static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL);
if (!bio)
return NULL;
bio->bi_disk = bio_src->bi_disk;
bio->bi_bdev = bio_src->bi_bdev;
bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;

Просмотреть файл

@ -280,7 +280,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
* Success if device supports the encryption context, or if we succeeded
* in falling back to the crypto API.
*/
if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm,
if (blk_ksm_crypto_cfg_supported(bio->bi_bdev->bd_disk->queue->ksm,
&bc_key->crypto_cfg))
return true;

Просмотреть файл

@ -298,14 +298,13 @@ split:
* Split a bio into two bios, chain the two bios, submit the second half and
* store a pointer to the first half in *@bio. If the second bio is still too
* big it will be split by a recursive call to this function. Since this
* function may allocate a new bio from @bio->bi_disk->queue->bio_split, it is
* the responsibility of the caller to ensure that
* @bio->bi_disk->queue->bio_split is only released after processing of the
* split bio has finished.
* function may allocate a new bio from q->bio_split, it is the responsibility
* of the caller to ensure that q->bio_split is only released after processing
* of the split bio has finished.
*/
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
{
struct request_queue *q = (*bio)->bi_disk->queue;
struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue;
struct bio *split = NULL;
switch (bio_op(*bio)) {
@ -358,9 +357,9 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
*
* Split a bio into two bios, chains the two bios, submit the second half and
* store a pointer to the first half in *@bio. Since this function may allocate
* a new bio from @bio->bi_disk->queue->bio_split, it is the responsibility of
* the caller to ensure that @bio->bi_disk->queue->bio_split is only released
* after processing of the split bio has finished.
* a new bio from q->bio_split, it is the responsibility of the caller to ensure
* that q->bio_split is only released after processing of the split bio has
* finished.
*/
void blk_queue_split(struct bio **bio)
{
@ -866,7 +865,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
return false;
/* must be same device */
if (rq->rq_disk != bio->bi_disk)
if (rq->rq_disk != bio->bi_bdev->bd_disk)
return false;
/* only merge integrity protected bio into ditto rq */

Просмотреть файл

@ -2128,7 +2128,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
*/
blk_qc_t blk_mq_submit_bio(struct bio *bio)
{
struct request_queue *q = bio->bi_disk->queue;
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = {

Просмотреть файл

@ -2178,7 +2178,7 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
bool blk_throtl_bio(struct bio *bio)
{
struct request_queue *q = bio->bi_disk->queue;
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct blkcg_gq *blkg = bio->bi_blkg;
struct throtl_qnode *qn = NULL;
struct throtl_grp *tg = blkg_to_tg(blkg);

Просмотреть файл

@ -202,8 +202,6 @@ static inline void elevator_exit(struct request_queue *q,
__elevator_exit(q, e);
}
struct block_device *__disk_get_part(struct gendisk *disk, int partno);
ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,

Просмотреть файл

@ -246,7 +246,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
if (!bio)
return NULL;
bio->bi_disk = bio_src->bi_disk;
bio->bi_bdev = bio_src->bi_bdev;
bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;

Просмотреть файл

@ -161,7 +161,7 @@ static void part_in_flight_rw(struct block_device *part,
inflight[1] = 0;
}
struct block_device *__disk_get_part(struct gendisk *disk, int partno)
static struct block_device *__disk_get_part(struct gendisk *disk, int partno)
{
struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);

Просмотреть файл

@ -284,7 +284,7 @@ out:
static blk_qc_t brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_disk->private_data;
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
sector_t sector = bio->bi_iter.bi_sector;
struct bio_vec bvec;
struct bvec_iter iter;

Просмотреть файл

@ -1579,8 +1579,8 @@ static inline void drbd_submit_bio_noacct(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
if (!bio->bi_disk) {
drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n");
if (!bio->bi_bdev) {
drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n");
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return;

Просмотреть файл

@ -1595,7 +1595,7 @@ void do_submit(struct work_struct *ws)
blk_qc_t drbd_submit_bio(struct bio *bio)
{
struct drbd_device *device = bio->bi_disk->private_data;
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
unsigned long start_jif;
blk_queue_split(&bio);

Просмотреть файл

@ -1420,7 +1420,7 @@ static blk_qc_t null_submit_bio(struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector;
sector_t nr_sectors = bio_sectors(bio);
struct nullb *nullb = bio->bi_disk->private_data;
struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
struct nullb_queue *nq = nullb_to_queue(nullb);
struct nullb_cmd *cmd;

Просмотреть файл

@ -2374,7 +2374,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
blk_queue_split(&bio);
pd = bio->bi_disk->queue->queuedata;
pd = bio->bi_bdev->bd_disk->queue->queuedata;
if (!pd) {
pr_err("%s incorrect request queue\n", bio_devname(bio, b));
goto end_io;
@ -2418,7 +2418,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
split = bio;
}
pkt_make_request_write(bio->bi_disk->queue, split);
pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
} while (split != bio);
return BLK_QC_T_NONE;

Просмотреть файл

@ -581,7 +581,7 @@ out:
static blk_qc_t ps3vram_submit_bio(struct bio *bio)
{
struct ps3_system_bus_device *dev = bio->bi_disk->private_data;
struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int busy;

Просмотреть файл

@ -122,7 +122,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
static blk_qc_t rsxx_submit_bio(struct bio *bio)
{
struct rsxx_cardinfo *card = bio->bi_disk->private_data;
struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
struct rsxx_bio_meta *bio_meta;
blk_status_t st = BLK_STS_IOERR;

Просмотреть файл

@ -521,7 +521,7 @@ static int mm_check_plugged(struct cardinfo *card)
static blk_qc_t mm_submit_bio(struct bio *bio)
{
struct cardinfo *card = bio->bi_disk->private_data;
struct cardinfo *card = bio->bi_bdev->bd_disk->private_data;
pr_debug("mm_make_request %llu %u\n",
(unsigned long long)bio->bi_iter.bi_sector,

Просмотреть файл

@ -1596,7 +1596,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
*/
static blk_qc_t zram_submit_bio(struct bio *bio)
{
struct zram *zram = bio->bi_disk->private_data;
struct zram *zram = bio->bi_bdev->bd_disk->private_data;
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {

Просмотреть файл

@ -49,7 +49,7 @@ struct bio_set pblk_bio_set;
static blk_qc_t pblk_submit_bio(struct bio *bio)
{
struct pblk *pblk = bio->bi_disk->queue->queuedata;
struct pblk *pblk = bio->bi_bdev->bd_disk->queue->queuedata;
if (bio_op(bio) == REQ_OP_DISCARD) {
pblk_discard(pblk, bio);

Просмотреть файл

@ -114,7 +114,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
check = bio_kmalloc(GFP_NOIO, bio_segments(bio));
if (!check)
return;
check->bi_disk = bio->bi_disk;
check->bi_bdev = bio->bi_bdev;
check->bi_opf = REQ_OP_READ;
check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
check->bi_iter.bi_size = bio->bi_iter.bi_size;

Просмотреть файл

@ -894,7 +894,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9,
get_capacity(bio->bi_disk) - bio_end_sector(bio));
get_capacity(bio->bi_bdev->bd_disk) -
bio_end_sector(bio));
s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
@ -1167,7 +1168,7 @@ static void quit_max_writeback_rate(struct cache_set *c,
blk_qc_t cached_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct bcache_device *d = bio->bi_disk->private_data;
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
int rw = bio_data_dir(bio);
@ -1274,7 +1275,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct closure *cl;
struct bcache_device *d = bio->bi_disk->private_data;
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
bio->bi_status = BLK_STS_IOERR;

Просмотреть файл

@ -18,8 +18,7 @@
*/
struct dm_bio_details {
struct gendisk *bi_disk;
u8 bi_partno;
struct block_device *bi_bdev;
int __bi_remaining;
unsigned long bi_flags;
struct bvec_iter bi_iter;
@ -31,8 +30,7 @@ struct dm_bio_details {
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
{
bd->bi_disk = bio->bi_disk;
bd->bi_partno = bio->bi_partno;
bd->bi_bdev = bio->bi_bdev;
bd->bi_flags = bio->bi_flags;
bd->bi_iter = bio->bi_iter;
bd->__bi_remaining = atomic_read(&bio->__bi_remaining);
@ -44,8 +42,7 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
{
bio->bi_disk = bd->bi_disk;
bio->bi_partno = bd->bi_partno;
bio->bi_bdev = bd->bi_bdev;
bio->bi_flags = bd->bi_flags;
bio->bi_iter = bd->bi_iter;
atomic_set(&bio->__bi_remaining, bd->__bi_remaining);

Просмотреть файл

@ -145,7 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record {
struct mirror *m;
/* if details->bi_disk == NULL, details were not saved */
/* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details;
region_t write_region;
};
@ -1190,7 +1190,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
bio_record->details.bi_disk = NULL;
bio_record->details.bi_bdev = NULL;
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
@ -1257,7 +1257,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
goto out;
if (unlikely(*error)) {
if (!bio_record->details.bi_disk) {
if (!bio_record->details.bi_bdev) {
/*
* There wasn't enough memory to record necessary
* information for a retry or there was no other
@ -1282,7 +1282,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
bd = &bio_record->details;
dm_bio_restore(bd, bio);
bio_record->details.bi_disk = NULL;
bio_record->details.bi_bdev = NULL;
bio->bi_status = 0;
queue_bio(ms, bio, rw);
@ -1292,7 +1292,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
}
out:
bio_record->details.bi_disk = NULL;
bio_record->details.bi_bdev = NULL;
return DM_ENDIO_DONE;
}

Просмотреть файл

@ -977,16 +977,17 @@ static void clone_endio(struct bio *bio)
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
struct bio *orig_bio = io->orig_bio;
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
!bio->bi_disk->queue->limits.max_discard_sectors)
!q->limits.max_discard_sectors)
disable_discard(md);
else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
!bio->bi_disk->queue->limits.max_write_same_sectors)
!q->limits.max_write_same_sectors)
disable_write_same(md);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bio->bi_disk->queue->limits.max_write_zeroes_sectors)
!q->limits.max_write_zeroes_sectors)
disable_write_zeroes(md);
}
@ -996,7 +997,7 @@ static void clone_endio(struct bio *bio)
*/
if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
sector_t written_sector = bio->bi_iter.bi_sector;
struct request_queue *q = orig_bio->bi_disk->queue;
struct request_queue *q = orig_bio->bi_bdev->bd_disk->queue;
u64 mask = (u64)blk_queue_zone_sectors(q) - 1;
orig_bio->bi_iter.bi_sector += written_sector & mask;
@ -1422,8 +1423,7 @@ static int __send_empty_flush(struct clone_info *ci)
*/
bio_init(&flush_bio, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
flush_bio.bi_disk = ci->io->md->disk;
bio_associate_blkg(&flush_bio);
bio_set_dev(&flush_bio, ci->io->md->disk->part0);
ci->bio = &flush_bio;
ci->sector_count = 0;
@ -1626,7 +1626,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
static blk_qc_t dm_submit_bio(struct bio *bio)
{
struct mapped_device *md = bio->bi_disk->private_data;
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;

Просмотреть файл

@ -252,7 +252,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
start_sector + data_offset;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bio->bi_disk->queue))) {
!blk_queue_discard(bio->bi_bdev->bd_disk->queue))) {
/* Just ignore it */
bio_endio(bio);
} else {

Просмотреть файл

@ -486,7 +486,7 @@ static void md_end_io(struct bio *bio)
static blk_qc_t md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct mddev *mddev = bio->bi_disk->private_data;
struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);

Просмотреть файл

@ -556,7 +556,7 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect
static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
{
atomic_add(nr_sectors, &bio->bi_disk->sync_io);
md_sync_acct(bio->bi_bdev, nr_sectors);
}
struct md_personality
@ -793,14 +793,14 @@ static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
!bio->bi_disk->queue->limits.max_write_same_sectors)
!bio->bi_bdev->bd_disk->queue->limits.max_write_same_sectors)
mddev->queue->limits.max_write_same_sectors = 0;
}
static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bio->bi_disk->queue->limits.max_write_zeroes_sectors)
!bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors)
mddev->queue->limits.max_write_zeroes_sectors = 0;
}

Просмотреть файл

@ -794,13 +794,13 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void *)bio->bi_disk;
struct md_rdev *rdev = (void *)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bio->bi_disk->queue)))
!blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@ -1520,7 +1520,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_disk = (void *)conf->mirrors[i].rdev;
mbio->bi_bdev = (void *)conf->mirrors[i].rdev;
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)

Просмотреть файл

@ -882,13 +882,13 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void*)bio->bi_disk;
struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bio->bi_disk->queue)))
!blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@ -1075,13 +1075,13 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void*)bio->bi_disk;
struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bio->bi_disk->queue)))
!blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@ -1253,7 +1253,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
r10_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_disk = (void *)rdev;
mbio->bi_bdev = (void *)rdev;
atomic_inc(&r10_bio->remaining);
@ -3003,7 +3003,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
* have bi_end_io, bi_sector, bi_disk set,
* have bi_end_io, bi_sector, bi_bdev set,
* and bi_private set to the r10bio.
* For recovery, we may actually create several r10bios
* with 2 bios in each, that correspond to the bios in the main one.

Просмотреть файл

@ -5310,7 +5310,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
unsigned int chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
WARN_ON_ONCE(bio->bi_partno);
WARN_ON_ONCE(bio->bi_bdev->bd_partno);
chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
return chunk_sectors >=

Просмотреть файл

@ -165,7 +165,7 @@ static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
static blk_qc_t nd_blk_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip;
struct nd_namespace_blk *nsblk = bio->bi_disk->private_data;
struct nd_namespace_blk *nsblk = bio->bi_bdev->bd_disk->private_data;
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
@ -177,7 +177,7 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio)
bip = bio_integrity(bio);
rw = bio_data_dir(bio);
do_acct = blk_queue_io_stat(bio->bi_disk->queue);
do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {

Просмотреть файл

@ -1442,7 +1442,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
static blk_qc_t btt_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct btt *btt = bio->bi_disk->private_data;
struct btt *btt = bio->bi_bdev->bd_disk->private_data;
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
@ -1452,7 +1452,7 @@ static blk_qc_t btt_submit_bio(struct bio *bio)
if (!bio_integrity_prep(bio))
return BLK_QC_T_NONE;
do_acct = blk_queue_io_stat(bio->bi_disk->queue);
do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {

Просмотреть файл

@ -197,13 +197,13 @@ static blk_qc_t pmem_submit_bio(struct bio *bio)
unsigned long start;
struct bio_vec bvec;
struct bvec_iter iter;
struct pmem_device *pmem = bio->bi_disk->private_data;
struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data;
struct nd_region *nd_region = to_region(pmem);
if (bio->bi_opf & REQ_PREFLUSH)
ret = nvdimm_flush(nd_region, bio);
do_acct = blk_queue_io_stat(bio->bi_disk->queue);
do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {

Просмотреть файл

@ -1113,7 +1113,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL;
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
struct request *req;
struct bio *bio = NULL;
void *meta = NULL;
@ -1133,8 +1133,8 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (ret)
goto out;
bio = req->bio;
bio->bi_disk = disk;
if (disk && meta_buffer && meta_len) {
bio->bi_bdev = bdev;
if (bdev && meta_buffer && meta_len) {
meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
meta_seed, write);
if (IS_ERR(meta)) {

Просмотреть файл

@ -757,7 +757,6 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
{
bool write = nvme_is_write((struct nvme_command *)vcmd);
struct nvm_dev *dev = ns->ndev;
struct gendisk *disk = ns->disk;
struct request *rq;
struct bio *bio = NULL;
__le64 *ppa_list = NULL;
@ -817,7 +816,7 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
}
bio->bi_disk = disk;
bio->bi_bdev = ns->disk->part0;
}
blk_execute_rq(q, NULL, rq, 0);

Просмотреть файл

@ -296,7 +296,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
{
struct nvme_ns_head *head = bio->bi_disk->private_data;
struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
struct nvme_ns *ns;
blk_qc_t ret = BLK_QC_T_NONE;
@ -312,7 +312,7 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
if (likely(ns)) {
bio->bi_disk = ns->disk;
bio->bi_bdev = ns->disk->part0;
bio->bi_opf |= REQ_NVME_MPATH;
trace_block_bio_remap(bio, disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
@ -352,7 +352,7 @@ static void nvme_requeue_work(struct work_struct *work)
* Reset disk to the mpath node and resubmit to select a new
* path.
*/
bio->bi_disk = head->disk;
bio->bi_bdev = head->disk->part0;
submit_bio_noacct(bio);
}
}

Просмотреть файл

@ -1468,7 +1468,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
if (unlikely(nr))
goto mr_put;
nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_disk), c,
nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
req->mr->sig_attrs, ns->pi_type);
nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);

Просмотреть файл

@ -879,7 +879,7 @@ dcssblk_submit_bio(struct bio *bio)
blk_queue_split(&bio);
bytes_done = 0;
dev_info = bio->bi_disk->private_data;
dev_info = bio->bi_bdev->bd_disk->private_data;
if (dev_info == NULL)
goto fail;
if ((bio->bi_iter.bi_sector & 7) != 0 ||

Просмотреть файл

@ -184,7 +184,7 @@ static unsigned long xpram_highest_page_index(void)
*/
static blk_qc_t xpram_submit_bio(struct bio *bio)
{
xpram_device_t *xdev = bio->bi_disk->private_data;
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec bvec;
struct bvec_iter iter;
unsigned int index;

Просмотреть файл

@ -2674,7 +2674,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
mutex_lock(&btrfsic_mutex);
/* since btrfsic_submit_bio() is also called before
* btrfsic_mount(), this might return NULL */
dev_state = btrfsic_dev_state_lookup(bio_dev(bio) + bio->bi_partno);
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev->bd_dev);
if (NULL != dev_state &&
(bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
unsigned int i = 0;
@ -2690,9 +2690,9 @@ static void __btrfsic_submit_bio(struct bio *bio)
bio_is_patched = 0;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_disk=%p)\n",
pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
bio_op(bio), bio->bi_opf, segs,
bio->bi_iter.bi_sector, dev_bytenr, bio->bi_disk);
bio->bi_iter.bi_sector, dev_bytenr, bio->bi_bdev);
mapped_datav = kmalloc_array(segs,
sizeof(*mapped_datav), GFP_NOFS);
@ -2721,8 +2721,8 @@ static void __btrfsic_submit_bio(struct bio *bio)
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
pr_info("submit_bio(rw=%d,0x%x FLUSH, disk=%p)\n",
bio_op(bio), bio->bi_opf, bio->bi_disk);
pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
bio_op(bio), bio->bi_opf, bio->bi_bdev);
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
if ((dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |

Просмотреть файл

@ -1105,8 +1105,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
* devices or if they are not contiguous
*/
if (last_end == disk_start && !last->bi_status &&
last->bi_disk == stripe->dev->bdev->bd_disk &&
last->bi_partno == stripe->dev->bdev->bd_partno) {
last->bi_bdev == stripe->dev->bdev) {
ret = bio_add_page(last, page, PAGE_SIZE, 0);
if (ret == PAGE_SIZE)
return 0;
@ -1357,9 +1356,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
for (i = 0; i < rbio->bbio->num_stripes; i++) {
stripe = &rbio->bbio->stripes[i];
if (in_range(physical, stripe->physical, rbio->stripe_len) &&
stripe->dev->bdev &&
bio->bi_disk == stripe->dev->bdev->bd_disk &&
bio->bi_partno == stripe->dev->bdev->bd_partno) {
stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) {
return i;
}
}

Просмотреть файл

@ -1695,7 +1695,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
sbio = sctx->wr_curr_bio;
sctx->wr_curr_bio = NULL;
WARN_ON(!sbio->bio->bi_disk);
WARN_ON(!sbio->bio->bi_bdev);
scrub_pending_bio_inc(sctx);
/* process all writes in a single worker thread. Then the block layer
* orders the requests before sending them to the driver which

Просмотреть файл

@ -434,7 +434,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
bio_set_pages_dirty(bio);
dio->bio_disk = bio->bi_disk;
dio->bio_disk = bio->bi_bdev->bd_disk;
if (sdio->submit_io) {
sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);

Просмотреть файл

@ -427,16 +427,6 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
return 0;
}
/*
* Return true, if pre_bio's bdev is same as its target device.
*/
static bool __same_bdev(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio)
{
struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
}
static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
{
struct f2fs_sb_info *sbi = fio->sbi;
@ -741,7 +731,7 @@ static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
return false;
if (last_blkaddr + 1 != cur_blkaddr)
return false;
return __same_bdev(sbi, cur_blkaddr, bio);
return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
}
static bool io_type_is_mergeable(struct f2fs_bio_info *io,

Просмотреть файл

@ -483,24 +483,22 @@ extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
extern const char *bio_devname(struct bio *bio, char *buffer);
#define bio_set_dev(bio, bdev) \
do { \
if ((bio)->bi_disk != (bdev)->bd_disk) \
bio_clear_flag(bio, BIO_THROTTLED);\
(bio)->bi_disk = (bdev)->bd_disk; \
(bio)->bi_partno = (bdev)->bd_partno; \
bio_associate_blkg(bio); \
#define bio_set_dev(bio, bdev) \
do { \
if ((bio)->bi_bdev != (bdev)) \
bio_clear_flag(bio, BIO_THROTTLED); \
(bio)->bi_bdev = (bdev); \
bio_associate_blkg(bio); \
} while (0)
#define bio_copy_dev(dst, src) \
do { \
(dst)->bi_disk = (src)->bi_disk; \
(dst)->bi_partno = (src)->bi_partno; \
(dst)->bi_bdev = (src)->bi_bdev; \
bio_clone_blkg_association(dst, src); \
} while (0)
#define bio_dev(bio) \
disk_devt((bio)->bi_disk)
disk_devt((bio)->bi_bdev->bd_disk)
#ifdef CONFIG_BLK_CGROUP
void bio_associate_blkg(struct bio *bio);

Просмотреть файл

@ -602,8 +602,8 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
rq->bio = rq->biotail = bio;
rq->ioprio = bio_prio(bio);
if (bio->bi_disk)
rq->rq_disk = bio->bi_disk;
if (bio->bi_bdev)
rq->rq_disk = bio->bi_bdev->bd_disk;
}
blk_qc_t blk_mq_submit_bio(struct bio *bio);

Просмотреть файл

@ -222,7 +222,7 @@ static inline void bio_issue_init(struct bio_issue *issue,
*/
struct bio {
struct bio *bi_next; /* request queue link */
struct gendisk *bi_disk;
struct block_device *bi_bdev;
unsigned int bi_opf; /* bottom bits req flags,
* top bits REQ_OP. Use
* accessors.
@ -231,7 +231,6 @@ struct bio {
unsigned short bi_ioprio;
unsigned short bi_write_hint;
blk_status_t bi_status;
u8 bi_partno;
atomic_t __bi_remaining;
struct bvec_iter bi_iter;

Просмотреть файл

@ -1967,7 +1967,8 @@ void part_end_io_acct(struct block_device *part, struct bio *bio,
*/
static inline unsigned long bio_start_io_acct(struct bio *bio)
{
return disk_start_io_acct(bio->bi_disk, bio_sectors(bio), bio_op(bio));
return disk_start_io_acct(bio->bi_bdev->bd_disk, bio_sectors(bio),
bio_op(bio));
}
/**
@ -1977,7 +1978,7 @@ static inline unsigned long bio_start_io_acct(struct bio *bio)
*/
static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{
return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time);
return disk_end_io_acct(bio->bi_bdev->bd_disk, bio_op(bio), start_time);
}
int bdev_read_only(struct block_device *bdev);

Просмотреть файл

@ -903,7 +903,7 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
{
blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_BOUNCE, 0);
blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0);
}
static void blk_add_trace_bio_complete(void *ignore,
@ -915,22 +915,24 @@ static void blk_add_trace_bio_complete(void *ignore,
static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
{
blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_BACKMERGE, 0);
blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
0);
}
static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
{
blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_FRONTMERGE, 0);
blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
0);
}
static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
{
blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_QUEUE, 0);
blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0);
}
static void blk_add_trace_getrq(void *ignore, struct bio *bio)
{
blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_GETRQ, 0);
blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0);
}
static void blk_add_trace_plug(void *ignore, struct request_queue *q)
@ -967,7 +969,7 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
{
struct request_queue *q = bio->bi_disk->queue;
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct blk_trace *bt;
rcu_read_lock();
@ -997,7 +999,7 @@ static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
sector_t from)
{
struct request_queue *q = bio->bi_disk->queue;
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct blk_trace *bt;
struct blk_io_trace_remap r;

Просмотреть файл

@ -433,7 +433,7 @@ int swap_readpage(struct page *page, bool synchronous)
ret = -ENOMEM;
goto out;
}
disk = bio->bi_disk;
disk = bio->bi_bdev->bd_disk;
/*
* Keep this task valid during swap readpage because the oom killer may
* attempt to access it in the page fault retry time check.