-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmJHUgMQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpgMOD/9J9mO8CQ3THnyJeZb8hy+k7fPHw+P3OrAR
 umOea3ujoMqzJsd/aRenMMAHsr7Phnb5PmljvryOo59nvwxOZ5MIBzSf2H4qJ8U2
 B4jGwESVW4OFNS6Mu+lgYH7XMyDHvqCSVdIhcnqkseoFyndpnTfsu4cCphqajVaP
 gOmXLBSQAetULxMfbqm7ofKk8F7zA80LFbwVs1VWVnCMeLVDccmJJbfn97jDZaJc
 rl8xmcvmarYLOTxDoOSdmfp4ek7QzRKQuKDlfvn1Xi+lDtkKAnYygMHhqQ0WYYc1
 /jJEd3iLCeV0jYfsDpVq6n2KRAGPCtrP0HkujifMGtuL5N2MAn/Aq3Fqoztas1yK
 p3T3SBIBVeznTtOXxl4Fm8tvim3i3rxn2vPdYnm/8uuNxqCQy78gVf5bPlLY+bzT
 4ytrP7AUpyzFj5E+8F33mZd0Vj2AL1kvgjbfEWqyQdXu7zs98UJL3xWicLbrvt/E
 nmdlZjOOBbEgV3vGQD5wTRvlsBJswtl4mHBpYYLzZtmDr8wZxHD3DCUuM1i4xyHG
 qDxNTKME2KfCXA8DIlcPxOAnNeXtwW3J7KTDuPDwC6XW84hFiC2tkM5M8aWqRos+
 GiRczSArhaomaGq8W+vRqgCnPQgFAIt+oyJ9aqen0xHG8qCOTv3N6mMTJk/uBnMI
 qcfpguHp+g==
 =0Axq
 -----END PGP SIGNATURE-----

Merge tag 'for-5.18/drivers-2022-04-01' of git://git.kernel.dk/linux-block

Pull block driver fixes from Jens Axboe:
 "Followup block driver updates and fixes for the 5.18-rc1 merge window.
  In detail:

   - NVMe pull request
       - Fix multipath hang when disk goes live over reconnect (Anton
         Eidelman)
       - fix RCU hole that allowed for endless looping in multipath
         round robin (Chris Leech)
       - remove redundant assignment after left shift (Colin Ian King)
       - add quirks for Samsung X5 SSDs (Monish Kumar R)
       - fix the read-only state for zoned namespaces with unsupposed
         features (Pankaj Raghav)
       - use a private workqueue instead of the system workqueue in
         nvmet (Sagi Grimberg)
       - allow duplicate NSIDs for private namespaces (Sungup Moon)
       - expose use_threaded_interrupts read-only in sysfs (Xin Hao)"

   - nbd minor allocation fix (Zhang)

   - drbd fixes and maintainer addition (Lars, Jakob, Christoph)

   - n64cart build fix (Jackie)

   - loop compat ioctl fix (Carlos)

   - misc fixes (Colin, Dongli)"

* tag 'for-5.18/drivers-2022-04-01' of git://git.kernel.dk/linux-block:
  drbd: remove check of list iterator against head past the loop body
  drbd: remove usage of list iterator variable after loop
  nbd: fix possible overflow on 'first_minor' in nbd_dev_add()
  MAINTAINERS: add drbd co-maintainer
  drbd: fix potential silent data corruption
  loop: fix ioctl calls using compat_loop_info
  nvme-multipath: fix hang when disk goes live over reconnect
  nvme: fix RCU hole that allowed for endless looping in multipath round robin
  nvme: allow duplicate NSIDs for private namespaces
  nvmet: remove redundant assignment after left shift
  nvmet: use a private workqueue instead of the system workqueue
  nvme-pci: add quirks for Samsung X5 SSDs
  nvme-pci: expose use_threaded_interrupts read-only in sysfs
  nvme: fix the read-only state for zoned namespaces with unsupposed features
  n64cart: convert bi_disk to bi_bdev->bd_disk fix build
  xen/blkfront: fix comment for need_copy
  xen-blkback: remove redundant assignment to variable i
This commit is contained in:
Linus Torvalds 2022-04-01 16:26:57 -07:00
Родитель d589ae0d44 2651ee5ae4
Коммит 8467b0ed6c
25 изменённых файлов: 186 добавлений и 92 удалений

Просмотреть файл

@ -6052,6 +6052,7 @@ F: drivers/scsi/dpt/
DRBD DRIVER
M: Philipp Reisner <philipp.reisner@linbit.com>
M: Lars Ellenberg <lars.ellenberg@linbit.com>
M: Christoph Böhmwalder <christoph.boehmwalder@linbit.com>
L: drbd-dev@lists.linbit.com
S: Supported
W: http://www.drbd.org

Просмотреть файл

@ -171,7 +171,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
unsigned int set_size)
{
struct drbd_request *r;
struct drbd_request *req = NULL;
struct drbd_request *req = NULL, *tmp = NULL;
int expect_epoch = 0;
int expect_size = 0;
@ -225,8 +225,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
* to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */
list_for_each_entry(req, &connection->transfer_log, tl_requests)
if (req->epoch == expect_epoch)
if (req->epoch == expect_epoch) {
tmp = req;
break;
}
req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
if (req->epoch != expect_epoch)
break;

Просмотреть файл

@ -180,7 +180,8 @@ void start_new_tl_epoch(struct drbd_connection *connection)
void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m)
{
m->bio->bi_status = errno_to_blk_status(m->error);
if (unlikely(m->error))
m->bio->bi_status = errno_to_blk_status(m->error);
bio_endio(m->bio);
dec_ap_bio(device);
}
@ -332,17 +333,21 @@ static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct dr
static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
struct drbd_request *iter = req;
if (!connection)
return;
if (connection->req_next != req)
return;
list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
const unsigned s = req->rq_state;
if (s & RQ_NET_QUEUED)
req = NULL;
list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
const unsigned int s = iter->rq_state;
if (s & RQ_NET_QUEUED) {
req = iter;
break;
}
}
if (&req->tl_requests == &connection->transfer_log)
req = NULL;
connection->req_next = req;
}
@ -358,17 +363,21 @@ static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, st
static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
struct drbd_request *iter = req;
if (!connection)
return;
if (connection->req_ack_pending != req)
return;
list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
const unsigned s = req->rq_state;
if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
req = NULL;
list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
const unsigned int s = iter->rq_state;
if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) {
req = iter;
break;
}
}
if (&req->tl_requests == &connection->transfer_log)
req = NULL;
connection->req_ack_pending = req;
}
@ -384,17 +393,21 @@ static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, s
static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
{
struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
struct drbd_request *iter = req;
if (!connection)
return;
if (connection->req_not_net_done != req)
return;
list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
const unsigned s = req->rq_state;
if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
req = NULL;
list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
const unsigned int s = iter->rq_state;
if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) {
req = iter;
break;
}
}
if (&req->tl_requests == &connection->transfer_log)
req = NULL;
connection->req_not_net_done = req;
}

Просмотреть файл

@ -1591,6 +1591,7 @@ struct compat_loop_info {
compat_ulong_t lo_inode; /* ioctl r/o */
compat_dev_t lo_rdevice; /* ioctl r/o */
compat_int_t lo_offset;
compat_int_t lo_encrypt_type; /* obsolete, ignored */
compat_int_t lo_encrypt_key_size; /* ioctl w/o */
compat_int_t lo_flags; /* ioctl r/o */
char lo_name[LO_NAME_SIZE];

Просмотреть файл

@ -88,7 +88,7 @@ static void n64cart_submit_bio(struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
struct device *dev = bio->bi_disk->private_data;
struct device *dev = bio->bi_bdev->bd_disk->private_data;
u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
bio_for_each_segment(bvec, bio, iter) {

Просмотреть файл

@ -1800,17 +1800,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
refcount_set(&nbd->refs, 0);
INIT_LIST_HEAD(&nbd->list);
disk->major = NBD_MAJOR;
/* Too big first_minor can cause duplicate creation of
* sysfs files/links, since index << part_shift might overflow, or
* MKDEV() expect that the max bits of first_minor is 20.
*/
disk->first_minor = index << part_shift;
if (disk->first_minor < index || disk->first_minor > MINORMASK) {
err = -EINVAL;
goto out_free_work;
}
disk->minors = 1 << part_shift;
disk->fops = &nbd_fops;
disk->private_data = nbd;
@ -1915,8 +1904,19 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
if (info->attrs[NBD_ATTR_INDEX])
if (info->attrs[NBD_ATTR_INDEX]) {
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
/*
* Too big first_minor can cause duplicate creation of
* sysfs files/links, since index << part_shift might overflow, or
* MKDEV() expect that the max bits of first_minor is 20.
*/
if (index < 0 || index > MINORMASK >> part_shift) {
printk(KERN_ERR "nbd: illegal input index %d\n", index);
return -EINVAL;
}
}
if (!info->attrs[NBD_ATTR_SOCKETS]) {
printk(KERN_ERR "nbd: must specify at least one socket\n");
return -EINVAL;

Просмотреть файл

@ -931,7 +931,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
if (rc)
goto unmap;
for (n = 0, i = 0; n < nseg; n++) {
for (n = 0; n < nseg; n++) {
uint8_t first_sect, last_sect;
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {

Просмотреть файл

@ -576,7 +576,7 @@ struct setup_rw_req {
struct blkif_request *ring_req;
grant_ref_t gref_head;
unsigned int id;
/* Only used when persistent grant is used and it's a read request */
/* Only used when persistent grant is used and it's a write request */
bool need_copy;
unsigned int bvec_off;
char *bvec_data;

Просмотреть файл

@ -1830,9 +1830,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
nvme_config_discard(disk, ns);
blk_queue_max_write_zeroes_sectors(disk->queue,
ns->ctrl->max_zeroes_sectors);
set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
test_bit(NVME_NS_FORCE_RO, &ns->flags));
}
static inline bool nvme_first_scan(struct gendisk *disk)
@ -1891,6 +1888,8 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
goto out_unfreeze;
}
set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) ||
test_bit(NVME_NS_FORCE_RO, &ns->flags));
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
@ -1903,6 +1902,9 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id);
set_disk_ro(ns->head->disk,
(id->nsattr & NVME_NS_ATTR_RO) ||
test_bit(NVME_NS_FORCE_RO, &ns->flags));
nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
@ -3589,15 +3591,20 @@ static const struct attribute_group *nvme_dev_attr_groups[] = {
NULL,
};
static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
unsigned nsid)
{
struct nvme_ns_head *h;
lockdep_assert_held(&subsys->lock);
lockdep_assert_held(&ctrl->subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) {
if (h->ns_id != nsid)
list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
/*
* Private namespaces can share NSIDs under some conditions.
* In that case we can't use the same ns_head for namespaces
* with the same NSID.
*/
if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
continue;
if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
return h;
@ -3791,7 +3798,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
}
mutex_lock(&ctrl->subsys->lock);
head = nvme_find_ns_head(ctrl->subsys, nsid);
head = nvme_find_ns_head(ctrl, nsid);
if (!head) {
ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
if (ret) {
@ -3988,6 +3995,16 @@ static void nvme_ns_remove(struct nvme_ns *ns)
set_capacity(ns->disk, 0);
nvme_fault_inject_fini(&ns->fault_inject);
/*
* Ensure that !NVME_NS_READY is seen by other threads to prevent
* this ns going back into current_path.
*/
synchronize_srcu(&ns->head->srcu);
/* wait for concurrent submissions */
if (nvme_mpath_clear_current_path(ns))
synchronize_srcu(&ns->head->srcu);
mutex_lock(&ns->ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
if (list_empty(&ns->head->list)) {
@ -3999,10 +4016,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
/* guarantee not available in head->list */
synchronize_rcu();
/* wait for concurrent submissions */
if (nvme_mpath_clear_current_path(ns))
synchronize_srcu(&ns->head->srcu);
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
@ -4480,6 +4493,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
nvme_mpath_update(ctrl);
}
nvme_change_uevent(ctrl, "NVME_EVENT=connected");

Просмотреть файл

@ -482,10 +482,11 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
/*
* Add a multipath node if the subsystems supports multiple controllers.
* We also do this for private namespaces as the namespace sharing data could
* change after a rescan.
* We also do this for private namespaces as the namespace sharing flag
* could change after a rescan.
*/
if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
!nvme_is_unique_nsid(ctrl, head) || !multipath)
return 0;
head->disk = blk_alloc_disk(ctrl->numa_node);
@ -612,8 +613,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
if (nvme_state_is_live(ns->ana_state))
/*
* nvme_mpath_set_live() will trigger I/O to the multipath path device
* and in turn to this path device. However we cannot accept this I/O
* if the controller is not live. This may deadlock if called from
* nvme_mpath_init_identify() and the ctrl will never complete
* initialization, preventing I/O from completing. For this case we
* will reprocess the ANA log page in nvme_mpath_update() once the
* controller is ready.
*/
if (nvme_state_is_live(ns->ana_state) &&
ns->ctrl->state == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
}
@ -700,6 +710,18 @@ static void nvme_ana_work(struct work_struct *work)
nvme_read_ana_log(ctrl);
}
void nvme_mpath_update(struct nvme_ctrl *ctrl)
{
u32 nr_change_groups = 0;
if (!ctrl->ana_log_buf)
return;
mutex_lock(&ctrl->ana_lock);
nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
mutex_unlock(&ctrl->ana_lock);
}
static void nvme_anatt_timeout(struct timer_list *t)
{
struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);

Просмотреть файл

@ -723,6 +723,25 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
return queue_live;
return __nvme_check_ready(ctrl, rq, queue_live);
}
/*
* NSID shall be unique for all shared namespaces, or if at least one of the
* following conditions is met:
* 1. Namespace Management is supported by the controller
* 2. ANA is supported by the controller
* 3. NVM Set are supported by the controller
*
* In other case, private namespace are not required to report a unique NSID.
*/
static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
struct nvme_ns_head *head)
{
return head->shared ||
(ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
}
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
@ -782,6 +801,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_mpath_update(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
@ -853,6 +873,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0;
}
static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
{
}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
}

Просмотреть файл

@ -45,7 +45,7 @@
#define NVME_MAX_SEGS 127
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
module_param(use_threaded_interrupts, int, 0444);
static bool use_cmb_sqes = true;
module_param(use_cmb_sqes, bool, 0444);
@ -3467,7 +3467,10 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_128_BYTES_SQES |
NVME_QUIRK_SHARED_TAGS |
NVME_QUIRK_SKIP_CID_GEN },
{ PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
};

Просмотреть файл

@ -988,7 +988,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
mutex_unlock(&ctrl->lock);
schedule_work(&ctrl->async_event_work);
queue_work(nvmet_wq, &ctrl->async_event_work);
}
void nvmet_execute_keep_alive(struct nvmet_req *req)

Просмотреть файл

@ -1555,7 +1555,7 @@ static void nvmet_port_release(struct config_item *item)
struct nvmet_port *port = to_nvmet_port(item);
/* Let inflight controllers teardown complete */
flush_scheduled_work();
flush_workqueue(nvmet_wq);
list_del(&port->global_entry);
kfree(port->ana_state);

Просмотреть файл

@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
struct workqueue_struct *nvmet_wq;
EXPORT_SYMBOL_GPL(nvmet_wq);
/*
* This read/write semaphore is used to synchronize access to configuration
* information on a target system that will result in discovery log page
@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
list_add_tail(&aen->entry, &ctrl->async_events);
mutex_unlock(&ctrl->lock);
schedule_work(&ctrl->async_event_work);
queue_work(nvmet_wq, &ctrl->async_event_work);
}
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
return;
}
@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
@ -1120,7 +1123,7 @@ static inline u8 nvmet_cc_iocqes(u32 cc)
static inline bool nvmet_css_supported(u8 cc_css)
{
switch (cc_css <<= NVME_CC_CSS_SHIFT) {
switch (cc_css << NVME_CC_CSS_SHIFT) {
case NVME_CC_CSS_NVM:
case NVME_CC_CSS_CSI:
return true;
@ -1478,7 +1481,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS;
schedule_work(&ctrl->fatal_err_work);
queue_work(nvmet_wq, &ctrl->fatal_err_work);
}
mutex_unlock(&ctrl->lock);
}
@ -1619,9 +1622,15 @@ static int __init nvmet_init(void)
goto out_free_zbd_work_queue;
}
nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
if (!nvmet_wq) {
error = -ENOMEM;
goto out_free_buffered_work_queue;
}
error = nvmet_init_discovery();
if (error)
goto out_free_work_queue;
goto out_free_nvmet_work_queue;
error = nvmet_init_configfs();
if (error)
@ -1630,7 +1639,9 @@ static int __init nvmet_init(void)
out_exit_discovery:
nvmet_exit_discovery();
out_free_work_queue:
out_free_nvmet_work_queue:
destroy_workqueue(nvmet_wq);
out_free_buffered_work_queue:
destroy_workqueue(buffered_io_wq);
out_free_zbd_work_queue:
destroy_workqueue(zbd_wq);
@ -1642,6 +1653,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_configfs();
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq);

Просмотреть файл

@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
if (!schedule_work(&assoc->del_work))
if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
continue;
assoc->hostport->invalid = 1;
noassoc = false;
if (!schedule_work(&assoc->del_work))
if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
if (!schedule_work(&assoc->del_work))
if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
return;
@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
iod->rqstdatalen = lsreqbuf_len;
iod->hosthandle = hosthandle;
schedule_work(&iod->work);
queue_work(nvmet_wq, &iod->work);
return 0;
}

Просмотреть файл

@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock);
schedule_work(&rport->ls_work);
queue_work(nvmet_wq, &rport->ls_work);
return ret;
}
@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock);
schedule_work(&rport->ls_work);
queue_work(nvmet_wq, &rport->ls_work);
}
return 0;
@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock);
schedule_work(&tport->ls_work);
queue_work(nvmet_wq, &tport->ls_work);
return ret;
}
@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock);
schedule_work(&tport->ls_work);
queue_work(nvmet_wq, &tport->ls_work);
}
return 0;
@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
tgt_rscn->tport = tgtport->private;
INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
schedule_work(&tgt_rscn->work);
queue_work(nvmet_wq, &tgt_rscn->work);
}
static void
@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
kref_init(&tfcp_req->ref);
schedule_work(&tfcp_req->fcp_rcv_work);
queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
return 0;
}
@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
schedule_work(&tfcp_req->tio_done_work);
queue_work(nvmet_wq, &tfcp_req->tio_done_work);
}
static void
@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
if (abortio)
/* leave the reference while the work item is scheduled */
WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
else {
/*
* as the io has already had the done callback made,

Просмотреть файл

@ -283,7 +283,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
INIT_WORK(&req->f.work, nvmet_file_flush_work);
schedule_work(&req->f.work);
queue_work(nvmet_wq, &req->f.work);
}
static void nvmet_file_execute_discard(struct nvmet_req *req)
@ -343,7 +343,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
schedule_work(&req->f.work);
queue_work(nvmet_wq, &req->f.work);
}
static void nvmet_file_write_zeroes_work(struct work_struct *w)
@ -373,7 +373,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
schedule_work(&req->f.work);
queue_work(nvmet_wq, &req->f.work);
}
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)

Просмотреть файл

@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->req.transfer_len = blk_rq_payload_bytes(req);
}
schedule_work(&iod->work);
queue_work(nvmet_wq, &iod->work);
return BLK_STS_OK;
}
@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
return;
}
schedule_work(&iod->work);
queue_work(nvmet_wq, &iod->work);
}
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,

Просмотреть файл

@ -366,6 +366,7 @@ struct nvmet_req {
extern struct workqueue_struct *buffered_io_wq;
extern struct workqueue_struct *zbd_wq;
extern struct workqueue_struct *nvmet_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{

Просмотреть файл

@ -283,7 +283,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
if (req->p.use_workqueue || effects) {
INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
req->p.rq = rq;
schedule_work(&req->p.work);
queue_work(nvmet_wq, &req->p.work);
} else {
rq->end_io_data = req;
blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);

Просмотреть файл

@ -1584,7 +1584,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
flush_scheduled_work();
flush_workqueue(nvmet_wq);
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@ -1669,7 +1669,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) {
rdma_disconnect(queue->cm_id);
schedule_work(&queue->release_work);
queue_work(nvmet_wq, &queue->release_work);
}
}
@ -1699,7 +1699,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx);
schedule_work(&queue->release_work);
queue_work(nvmet_wq, &queue->release_work);
}
/**
@ -1773,7 +1773,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
if (!queue) {
struct nvmet_rdma_port *port = cm_id->context;
schedule_delayed_work(&port->repair_work, 0);
queue_delayed_work(nvmet_wq, &port->repair_work, 0);
break;
}
fallthrough;
@ -1903,7 +1903,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w)
nvmet_rdma_disable_port(port);
ret = nvmet_rdma_enable_port(port);
if (ret)
schedule_delayed_work(&port->repair_work, 5 * HZ);
queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
}
static int nvmet_rdma_add_port(struct nvmet_port *nport)
@ -2053,7 +2053,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
}
mutex_unlock(&nvmet_rdma_queue_mutex);
flush_scheduled_work();
flush_workqueue(nvmet_wq);
}
static struct ib_client nvmet_rdma_ib_client = {

Просмотреть файл

@ -1269,7 +1269,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
spin_lock(&queue->state_lock);
if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
queue->state = NVMET_TCP_Q_DISCONNECTING;
schedule_work(&queue->release_work);
queue_work(nvmet_wq, &queue->release_work);
}
spin_unlock(&queue->state_lock);
}
@ -1684,7 +1684,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
goto out;
if (sk->sk_state == TCP_LISTEN)
schedule_work(&port->accept_work);
queue_work(nvmet_wq, &port->accept_work);
out:
read_unlock_bh(&sk->sk_callback_lock);
}
@ -1815,7 +1815,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
if (sq->qid == 0) {
/* Let inflight controller teardown complete */
flush_scheduled_work();
flush_workqueue(nvmet_wq);
}
queue->nr_cmds = sq->size * 2;
@ -1876,12 +1876,12 @@ static void __exit nvmet_tcp_exit(void)
nvmet_unregister_transport(&nvmet_tcp_ops);
flush_scheduled_work();
flush_workqueue(nvmet_wq);
mutex_lock(&nvmet_tcp_queue_mutex);
list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
mutex_unlock(&nvmet_tcp_queue_mutex);
flush_scheduled_work();
flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq);
}

Просмотреть файл

@ -346,6 +346,7 @@ enum {
NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,

Просмотреть файл

@ -45,7 +45,7 @@ struct loop_info {
unsigned long lo_inode; /* ioctl r/o */
__kernel_old_dev_t lo_rdevice; /* ioctl r/o */
int lo_offset;
int lo_encrypt_type;
int lo_encrypt_type; /* obsolete, ignored */
int lo_encrypt_key_size; /* ioctl w/o */
int lo_flags;
char lo_name[LO_NAME_SIZE];
@ -61,7 +61,7 @@ struct loop_info64 {
__u64 lo_offset;
__u64 lo_sizelimit;/* bytes, 0 == max available */
__u32 lo_number; /* ioctl r/o */
__u32 lo_encrypt_type;
__u32 lo_encrypt_type; /* obsolete, ignored */
__u32 lo_encrypt_key_size; /* ioctl w/o */
__u32 lo_flags;
__u8 lo_file_name[LO_NAME_SIZE];