for-linus-20190802
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl1ERCMQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpjr7D/0U8SMu1T9JOge91zXQQUc7XtCX9RvHYhhj vbwwN9RwpIfrTwuLZUCvt2vEz8WPOVfZbwYGkfFcdI+N5I/dOfT8Swiwy7Zabpi2 KTedn2EdELTizEuWQ3QhaBHWuTGvE04aAzZTBRCQ0tCOYTPpXGRavxhG6UHcQi+z lohB5Pr/cyX8/jWJj4kq7381QYUUH2bm9uY7qutBsQOt2CsN5prjWxX3JM6EO1wb VyyI25fWLaS+bZW+crVutcARxccuav4e+LEJbb9Z7+19vjmkc2qE+22F3MBxYCzo tOjU0RP0IvvVR9t0Hahw/3MnDTDfuSqlqrT12zNtn7FrzOKpkygMyRa+u8YygI6k 2iAp92HkNWpjBxUFNGoRCRfJpApG3vT6/VkI8tixFSw/Re3F1H9Bc9IRZxc3uU4H 5DMRmjZXGg+8Nw+93XzwWnD1paCJcDsHRHUpWFNJvRfJYQzDaziPUBV9a9TZ+HMF BnCJBCW641tcA5yCRwBF6OpoowtmxOtWce7Lr9wAjU+cYHMEzOQoG+J6gPH3q8Jh aD9U2FcnE6kReL+MsGj42q1U1n60xngcdzo8Ca4bWfWNpqb4lJatjumkDAiI6U4q DFDs9bRbB4LLgwkRQ+n1biwAK626KJOp5lGXrEu7XHXSTlO/BiJytISwASjlzKsZ 4uGHc/uUdA== =P5E/ -----END PGP SIGNATURE----- Merge tag 'for-linus-20190802' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "Here's a small collection of fixes that should go into this series. This contains: - io_uring potential use-after-free fix (Jackie) - loop regression fix (Jan) - O_DIRECT fragmented bio regression fix (Damien) - Mark Denis as the new floppy maintainer (Denis) - ataflop switch fall-through annotation (Gustavo) - libata zpodd overflow fix (Kees) - libata ahci deferred probe fix (Miquel) - nbd invalidation BUG_ON() fix (Munehisa) - dasd endless loop fix (Stefan)" * tag 'for-linus-20190802' of git://git.kernel.dk/linux-block: s390/dasd: fix endless loop after read unit address configuration block: Fix __blkdev_direct_IO() for bio fragments MAINTAINERS: floppy: take over maintainership nbd: replace kill_bdev() with __invalidate_device() again ata: libahci: do not complain in case of deferred probe io_uring: fix KASAN use after free in io_sq_wq_submit_work loop: Fix mount(2) failure due to race with LOOP_SET_FD libata: zpodd: Fix small read overflow in zpodd_get_mech_type() ataflop: Mark expected switch fall-through
This commit is contained in:
Коммит
10e5ddd71f
|
@ -6322,7 +6322,8 @@ F: Documentation/devicetree/bindings/counter/ftm-quaddec.txt
|
||||||
F: drivers/counter/ftm-quaddec.c
|
F: drivers/counter/ftm-quaddec.c
|
||||||
|
|
||||||
FLOPPY DRIVER
|
FLOPPY DRIVER
|
||||||
S: Orphan
|
M: Denis Efremov <efremov@linux.com>
|
||||||
|
S: Odd Fixes
|
||||||
L: linux-block@vger.kernel.org
|
L: linux-block@vger.kernel.org
|
||||||
F: drivers/block/floppy.c
|
F: drivers/block/floppy.c
|
||||||
|
|
||||||
|
|
|
@ -338,6 +338,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
|
||||||
hpriv->phys[port] = NULL;
|
hpriv->phys[port] = NULL;
|
||||||
rc = 0;
|
rc = 0;
|
||||||
break;
|
break;
|
||||||
|
case -EPROBE_DEFER:
|
||||||
|
/* Do not complain yet */
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
dev_err(dev,
|
dev_err(dev,
|
||||||
|
|
|
@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
|
||||||
unsigned int ret;
|
unsigned int ret;
|
||||||
struct rm_feature_desc *desc;
|
struct rm_feature_desc *desc;
|
||||||
struct ata_taskfile tf;
|
struct ata_taskfile tf;
|
||||||
static const char cdb[] = { GPCMD_GET_CONFIGURATION,
|
static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
|
||||||
2, /* only 1 feature descriptor requested */
|
2, /* only 1 feature descriptor requested */
|
||||||
0, 3, /* 3, removable medium feature */
|
0, 3, /* 3, removable medium feature */
|
||||||
0, 0, 0,/* reserved */
|
0, 0, 0,/* reserved */
|
||||||
|
|
|
@ -1726,6 +1726,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
|
||||||
/* MSch: invalidate default_params */
|
/* MSch: invalidate default_params */
|
||||||
default_params[drive].blocks = 0;
|
default_params[drive].blocks = 0;
|
||||||
set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
|
set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
|
||||||
|
/* Fall through */
|
||||||
case FDFMTEND:
|
case FDFMTEND:
|
||||||
case FDFLUSH:
|
case FDFLUSH:
|
||||||
/* invalidate the buffer track to force a reread */
|
/* invalidate the buffer track to force a reread */
|
||||||
|
|
|
@ -924,6 +924,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
||||||
struct file *file;
|
struct file *file;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
struct address_space *mapping;
|
struct address_space *mapping;
|
||||||
|
struct block_device *claimed_bdev = NULL;
|
||||||
int lo_flags = 0;
|
int lo_flags = 0;
|
||||||
int error;
|
int error;
|
||||||
loff_t size;
|
loff_t size;
|
||||||
|
@ -942,10 +943,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
||||||
* here to avoid changing device under exclusive owner.
|
* here to avoid changing device under exclusive owner.
|
||||||
*/
|
*/
|
||||||
if (!(mode & FMODE_EXCL)) {
|
if (!(mode & FMODE_EXCL)) {
|
||||||
bdgrab(bdev);
|
claimed_bdev = bd_start_claiming(bdev, loop_set_fd);
|
||||||
error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd);
|
if (IS_ERR(claimed_bdev)) {
|
||||||
if (error)
|
error = PTR_ERR(claimed_bdev);
|
||||||
goto out_putf;
|
goto out_putf;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
error = mutex_lock_killable(&loop_ctl_mutex);
|
error = mutex_lock_killable(&loop_ctl_mutex);
|
||||||
|
@ -1015,15 +1017,15 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
||||||
mutex_unlock(&loop_ctl_mutex);
|
mutex_unlock(&loop_ctl_mutex);
|
||||||
if (partscan)
|
if (partscan)
|
||||||
loop_reread_partitions(lo, bdev);
|
loop_reread_partitions(lo, bdev);
|
||||||
if (!(mode & FMODE_EXCL))
|
if (claimed_bdev)
|
||||||
blkdev_put(bdev, mode | FMODE_EXCL);
|
bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&loop_ctl_mutex);
|
mutex_unlock(&loop_ctl_mutex);
|
||||||
out_bdev:
|
out_bdev:
|
||||||
if (!(mode & FMODE_EXCL))
|
if (claimed_bdev)
|
||||||
blkdev_put(bdev, mode | FMODE_EXCL);
|
bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
|
||||||
out_putf:
|
out_putf:
|
||||||
fput(file);
|
fput(file);
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -1231,7 +1231,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
|
||||||
struct block_device *bdev)
|
struct block_device *bdev)
|
||||||
{
|
{
|
||||||
sock_shutdown(nbd);
|
sock_shutdown(nbd);
|
||||||
kill_bdev(bdev);
|
__invalidate_device(bdev, true);
|
||||||
nbd_bdev_reset(bdev);
|
nbd_bdev_reset(bdev);
|
||||||
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
|
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
|
||||||
&nbd->config->runtime_flags))
|
&nbd->config->runtime_flags))
|
||||||
|
|
|
@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
|
||||||
char msg_format;
|
char msg_format;
|
||||||
char msg_no;
|
char msg_no;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* intrc values ENODEV, ENOLINK and EPERM
|
||||||
|
* will be optained from sleep_on to indicate that no
|
||||||
|
* IO operation can be started
|
||||||
|
*/
|
||||||
|
if (cqr->intrc == -ENODEV)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (cqr->intrc == -ENOLINK)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (cqr->intrc == -EPERM)
|
||||||
|
return 1;
|
||||||
|
|
||||||
sense = dasd_get_sense(&cqr->irb);
|
sense = dasd_get_sense(&cqr->irb);
|
||||||
if (!sense)
|
if (!sense)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
|
||||||
lcu->flags &= ~NEED_UAC_UPDATE;
|
lcu->flags &= ~NEED_UAC_UPDATE;
|
||||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||||
|
|
||||||
do {
|
rc = dasd_sleep_on(cqr);
|
||||||
rc = dasd_sleep_on(cqr);
|
if (rc && !suborder_not_supported(cqr)) {
|
||||||
if (rc && suborder_not_supported(cqr))
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
} while (rc && (cqr->retries > 0));
|
|
||||||
if (rc) {
|
|
||||||
spin_lock_irqsave(&lcu->lock, flags);
|
spin_lock_irqsave(&lcu->lock, flags);
|
||||||
lcu->flags |= NEED_UAC_UPDATE;
|
lcu->flags |= NEED_UAC_UPDATE;
|
||||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||||
|
|
|
@ -439,6 +439,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
ret = dio->size;
|
||||||
|
|
||||||
if (polled)
|
if (polled)
|
||||||
WRITE_ONCE(iocb->ki_cookie, qc);
|
WRITE_ONCE(iocb->ki_cookie, qc);
|
||||||
|
@ -465,7 +466,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
ret += bio->bi_iter.bi_size;
|
ret = dio->size;
|
||||||
|
|
||||||
bio = bio_alloc(gfp, nr_pages);
|
bio = bio_alloc(gfp, nr_pages);
|
||||||
if (!bio) {
|
if (!bio) {
|
||||||
|
@ -1181,8 +1182,7 @@ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
|
||||||
* Pointer to the block device containing @bdev on success, ERR_PTR()
|
* Pointer to the block device containing @bdev on success, ERR_PTR()
|
||||||
* value on failure.
|
* value on failure.
|
||||||
*/
|
*/
|
||||||
static struct block_device *bd_start_claiming(struct block_device *bdev,
|
struct block_device *bd_start_claiming(struct block_device *bdev, void *holder)
|
||||||
void *holder)
|
|
||||||
{
|
{
|
||||||
struct gendisk *disk;
|
struct gendisk *disk;
|
||||||
struct block_device *whole;
|
struct block_device *whole;
|
||||||
|
@ -1229,6 +1229,62 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(bd_start_claiming);
|
||||||
|
|
||||||
|
static void bd_clear_claiming(struct block_device *whole, void *holder)
|
||||||
|
{
|
||||||
|
lockdep_assert_held(&bdev_lock);
|
||||||
|
/* tell others that we're done */
|
||||||
|
BUG_ON(whole->bd_claiming != holder);
|
||||||
|
whole->bd_claiming = NULL;
|
||||||
|
wake_up_bit(&whole->bd_claiming, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bd_finish_claiming - finish claiming of a block device
|
||||||
|
* @bdev: block device of interest
|
||||||
|
* @whole: whole block device (returned from bd_start_claiming())
|
||||||
|
* @holder: holder that has claimed @bdev
|
||||||
|
*
|
||||||
|
* Finish exclusive open of a block device. Mark the device as exlusively
|
||||||
|
* open by the holder and wake up all waiters for exclusive open to finish.
|
||||||
|
*/
|
||||||
|
void bd_finish_claiming(struct block_device *bdev, struct block_device *whole,
|
||||||
|
void *holder)
|
||||||
|
{
|
||||||
|
spin_lock(&bdev_lock);
|
||||||
|
BUG_ON(!bd_may_claim(bdev, whole, holder));
|
||||||
|
/*
|
||||||
|
* Note that for a whole device bd_holders will be incremented twice,
|
||||||
|
* and bd_holder will be set to bd_may_claim before being set to holder
|
||||||
|
*/
|
||||||
|
whole->bd_holders++;
|
||||||
|
whole->bd_holder = bd_may_claim;
|
||||||
|
bdev->bd_holders++;
|
||||||
|
bdev->bd_holder = holder;
|
||||||
|
bd_clear_claiming(whole, holder);
|
||||||
|
spin_unlock(&bdev_lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bd_finish_claiming);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bd_abort_claiming - abort claiming of a block device
|
||||||
|
* @bdev: block device of interest
|
||||||
|
* @whole: whole block device (returned from bd_start_claiming())
|
||||||
|
* @holder: holder that has claimed @bdev
|
||||||
|
*
|
||||||
|
* Abort claiming of a block device when the exclusive open failed. This can be
|
||||||
|
* also used when exclusive open is not actually desired and we just needed
|
||||||
|
* to block other exclusive openers for a while.
|
||||||
|
*/
|
||||||
|
void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
|
||||||
|
void *holder)
|
||||||
|
{
|
||||||
|
spin_lock(&bdev_lock);
|
||||||
|
bd_clear_claiming(whole, holder);
|
||||||
|
spin_unlock(&bdev_lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bd_abort_claiming);
|
||||||
|
|
||||||
#ifdef CONFIG_SYSFS
|
#ifdef CONFIG_SYSFS
|
||||||
struct bd_holder_disk {
|
struct bd_holder_disk {
|
||||||
|
@ -1698,29 +1754,7 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
|
||||||
|
|
||||||
/* finish claiming */
|
/* finish claiming */
|
||||||
mutex_lock(&bdev->bd_mutex);
|
mutex_lock(&bdev->bd_mutex);
|
||||||
spin_lock(&bdev_lock);
|
bd_finish_claiming(bdev, whole, holder);
|
||||||
|
|
||||||
if (!res) {
|
|
||||||
BUG_ON(!bd_may_claim(bdev, whole, holder));
|
|
||||||
/*
|
|
||||||
* Note that for a whole device bd_holders
|
|
||||||
* will be incremented twice, and bd_holder
|
|
||||||
* will be set to bd_may_claim before being
|
|
||||||
* set to holder
|
|
||||||
*/
|
|
||||||
whole->bd_holders++;
|
|
||||||
whole->bd_holder = bd_may_claim;
|
|
||||||
bdev->bd_holders++;
|
|
||||||
bdev->bd_holder = holder;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* tell others that we're done */
|
|
||||||
BUG_ON(whole->bd_claiming != holder);
|
|
||||||
whole->bd_claiming = NULL;
|
|
||||||
wake_up_bit(&whole->bd_claiming, 0);
|
|
||||||
|
|
||||||
spin_unlock(&bdev_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Block event polling for write claims if requested. Any
|
* Block event polling for write claims if requested. Any
|
||||||
* write holder makes the write_holder state stick until
|
* write holder makes the write_holder state stick until
|
||||||
|
|
|
@ -1838,6 +1838,7 @@ restart:
|
||||||
do {
|
do {
|
||||||
struct sqe_submit *s = &req->submit;
|
struct sqe_submit *s = &req->submit;
|
||||||
const struct io_uring_sqe *sqe = s->sqe;
|
const struct io_uring_sqe *sqe = s->sqe;
|
||||||
|
unsigned int flags = req->flags;
|
||||||
|
|
||||||
/* Ensure we clear previously set non-block flag */
|
/* Ensure we clear previously set non-block flag */
|
||||||
req->rw.ki_flags &= ~IOCB_NOWAIT;
|
req->rw.ki_flags &= ~IOCB_NOWAIT;
|
||||||
|
@ -1883,7 +1884,7 @@ restart:
|
||||||
kfree(sqe);
|
kfree(sqe);
|
||||||
|
|
||||||
/* req from defer and link list needn't decrease async cnt */
|
/* req from defer and link list needn't decrease async cnt */
|
||||||
if (req->flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
|
if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (!async_list)
|
if (!async_list)
|
||||||
|
|
|
@ -2598,6 +2598,12 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
|
||||||
void *holder);
|
void *holder);
|
||||||
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
|
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
|
||||||
void *holder);
|
void *holder);
|
||||||
|
extern struct block_device *bd_start_claiming(struct block_device *bdev,
|
||||||
|
void *holder);
|
||||||
|
extern void bd_finish_claiming(struct block_device *bdev,
|
||||||
|
struct block_device *whole, void *holder);
|
||||||
|
extern void bd_abort_claiming(struct block_device *bdev,
|
||||||
|
struct block_device *whole, void *holder);
|
||||||
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
|
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
|
||||||
extern int __blkdev_reread_part(struct block_device *bdev);
|
extern int __blkdev_reread_part(struct block_device *bdev);
|
||||||
extern int blkdev_reread_part(struct block_device *bdev);
|
extern int blkdev_reread_part(struct block_device *bdev);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче