- Don't require quirk to use duplicate namespace identifiers
    (Christoph, Sagi)
  - One more BOGUS_NID quirk (Pankaj)
  - IO timeout and error hanlding fixes for PCI (Keith)
  - Enhanced metadata format mask fix (Ankit)
  - Association race condition fix for fibre channel (Michael)
  - Correct debugfs error checks (Minjie)
  - Use PAGE_SECTORS_SHIFT where needed (Damien)
  - Reduce kernel logs for legacy nguid attribute (Keith)
  - Use correct dma direction when unmapping metadata (Ming)
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE3Fbyvv+648XNRdHTPe3zGtjzRgkFAmSwW9cACgkQPe3zGtjz
 RglgZBAAiW5lUqiYwSUZNPN5EzV5eKhy7qrQ34lH7t8lobECp5CEueuIKn1Fs7KW
 kcaJTJJpBqHAd/UwbYLVRTJnBVWJ9a5VgHtoNlsNT8IAqz/k9cR+kirGqPRdhxXf
 TgKgP2g1kLS3DYoh7sOBjJiqAurUcV44XKRg7QypiRKaE9uA19zfVic9cI+o9wmE
 d0cZsl04WcTIce1OQp0g0ZDmng3DHh9EYG3uYrpMMiTO+7QQ5eZzPuxHPCewllxE
 ovUyZsSgYSVaxSaRE5Zj9P19z3Nt0xrjKwtoJGic6gY4oCZK66HF+U74jbeJL2Ux
 Wnl0XHZNoEFvCcPMwLeylJzHndmCUn9BVVO0LGBIDjW62JnhkvM63FWhuYdDXxuV
 xOAIhA4DahDbJrQXjEmsPMhrKy+y4vb25vqoxL/UF9z72IKUqDilq3bvSohhxzVK
 OOoHPB8uMaoCFqfu4C8wLZZeC/9Xb1OxZAypma+vNHa7s4GqSN1oYLR6cQcMb0Ct
 W78zx99/rZrlg0joo2iv5JBjWeusVQivb+bNbqlNJ8FRaF16u88ASkvQGo3MuX9M
 QzM9DWeWJOVAxmniCq4jUlB/nG+qzVshOfrfbbexVOHEHeiRjIe4MiEhSwjLgAg9
 d6Q+Btfe0lS0A5+naKPrFWcSgpw9WO9C9TJcO7rHkf9d5alPwUc=
 =focK
 -----END PGP SIGNATURE-----

Merge tag 'nvme-6.5-2023-07-13' of git://git.infradead.org/nvme into block-6.5

Pull NVMe fixes from Keith:

"nvme fixes for Linux 6.5

 - Don't require quirk to use duplicate namespace identifiers
   (Christoph, Sagi)
 - One more BOGUS_NID quirk (Pankaj)
 - IO timeout and error hanlding fixes for PCI (Keith)
 - Enhanced metadata format mask fix (Ankit)
 - Association race condition fix for fibre channel (Michael)
 - Correct debugfs error checks (Minjie)
 - Use PAGE_SECTORS_SHIFT where needed (Damien)
 - Reduce kernel logs for legacy nguid attribute (Keith)
 - Use correct dma direction when unmapping metadata (Ming)"

* tag 'nvme-6.5-2023-07-13' of git://git.infradead.org/nvme:
  nvme-pci: fix DMA direction of unmapping integrity data
  nvme: don't reject probe due to duplicate IDs for single-ported PCIe devices
  nvme: ensure disabling pairs with unquiesce
  nvme-fc: fix race between error recovery and creating association
  nvme-fc: return non-zero status code when fails to create association
  nvme: fix parameter check in nvme_fault_inject_init()
  nvme: warn only once for legacy uuid attribute
  nvme: fix the NVME_ID_NS_NVM_STS_MASK definition
  nvmet: use PAGE_SECTORS_SHIFT
  nvme: add BOGUS_NID quirk for Samsung SM953
This commit is contained in:
Jens Axboe 2023-07-13 14:26:38 -06:00
Родитель 5c17f45e91 b8f6446b68
Коммит 90b4622954
8 изменённых файлов: 89 добавлений и 25 удалений

Просмотреть файл

@ -3431,10 +3431,40 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids); ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
if (ret) { if (ret) {
dev_err(ctrl->device, /*
"globally duplicate IDs for nsid %d\n", info->nsid); * We've found two different namespaces on two different
* subsystems that report the same ID. This is pretty nasty
* for anything that actually requires unique device
* identification. In the kernel we need this for multipathing,
* and in user space the /dev/disk/by-id/ links rely on it.
*
* If the device also claims to be multi-path capable back off
* here now and refuse the probe the second device as this is a
* recipe for data corruption. If not this is probably a
* cheap consumer device if on the PCIe bus, so let the user
* proceed and use the shiny toy, but warn that with changing
* probing order (which due to our async probing could just be
* device taking longer to startup) the other device could show
* up at any time.
*/
nvme_print_device_info(ctrl); nvme_print_device_info(ctrl);
return ret; if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
info->is_shared)) {
dev_err(ctrl->device,
"ignoring nsid %d because of duplicate IDs\n",
info->nsid);
return ret;
}
dev_err(ctrl->device,
"clearing duplicate IDs for nsid %d\n", info->nsid);
dev_err(ctrl->device,
"use of /dev/disk/by-id/ may cause data corruption\n");
memset(&info->ids.nguid, 0, sizeof(info->ids.nguid));
memset(&info->ids.uuid, 0, sizeof(info->ids.uuid));
memset(&info->ids.eui64, 0, sizeof(info->ids.eui64));
ctrl->quirks |= NVME_QUIRK_BOGUS_NID;
} }
mutex_lock(&ctrl->subsys->lock); mutex_lock(&ctrl->subsys->lock);

Просмотреть файл

@ -27,7 +27,7 @@ void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
/* create debugfs directory and attribute */ /* create debugfs directory and attribute */
parent = debugfs_create_dir(dev_name, NULL); parent = debugfs_create_dir(dev_name, NULL);
if (!parent) { if (IS_ERR(parent)) {
pr_warn("%s: failed to create debugfs directory\n", dev_name); pr_warn("%s: failed to create debugfs directory\n", dev_name);
return; return;
} }

Просмотреть файл

@ -2548,14 +2548,24 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
* the controller. Abort any ios on the association and let the * the controller. Abort any ios on the association and let the
* create_association error path resolve things. * create_association error path resolve things.
*/ */
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { enum nvme_ctrl_state state;
__nvme_fc_abort_outstanding_ios(ctrl, true); unsigned long flags;
spin_lock_irqsave(&ctrl->lock, flags);
state = ctrl->ctrl.state;
if (state == NVME_CTRL_CONNECTING) {
set_bit(ASSOC_FAILED, &ctrl->flags); set_bit(ASSOC_FAILED, &ctrl->flags);
spin_unlock_irqrestore(&ctrl->lock, flags);
__nvme_fc_abort_outstanding_ios(ctrl, true);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport error during (re)connect\n",
ctrl->cnum);
return; return;
} }
spin_unlock_irqrestore(&ctrl->lock, flags);
/* Otherwise, only proceed if in LIVE state - e.g. on first error */ /* Otherwise, only proceed if in LIVE state - e.g. on first error */
if (ctrl->ctrl.state != NVME_CTRL_LIVE) if (state != NVME_CTRL_LIVE)
return; return;
dev_warn(ctrl->ctrl.device, dev_warn(ctrl->ctrl.device,
@ -3110,7 +3120,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
*/ */
ret = nvme_enable_ctrl(&ctrl->ctrl); ret = nvme_enable_ctrl(&ctrl->ctrl);
if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
ret = -EIO;
if (ret)
goto out_disconnect_admin_queue; goto out_disconnect_admin_queue;
ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
@ -3120,7 +3132,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
nvme_unquiesce_admin_queue(&ctrl->ctrl); nvme_unquiesce_admin_queue(&ctrl->ctrl);
ret = nvme_init_ctrl_finish(&ctrl->ctrl, false); ret = nvme_init_ctrl_finish(&ctrl->ctrl, false);
if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
ret = -EIO;
if (ret)
goto out_disconnect_admin_queue; goto out_disconnect_admin_queue;
/* sanity checks */ /* sanity checks */
@ -3165,10 +3179,16 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
else else
ret = nvme_fc_recreate_io_queues(ctrl); ret = nvme_fc_recreate_io_queues(ctrl);
} }
if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
goto out_term_aen_ops;
spin_lock_irqsave(&ctrl->lock, flags);
if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
ret = -EIO;
if (ret) {
spin_unlock_irqrestore(&ctrl->lock, flags);
goto out_term_aen_ops;
}
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
spin_unlock_irqrestore(&ctrl->lock, flags);
ctrl->ctrl.nr_reconnects = 0; ctrl->ctrl.nr_reconnects = 0;
@ -3180,6 +3200,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
out_term_aen_ops: out_term_aen_ops:
nvme_fc_term_aen_ops(ctrl); nvme_fc_term_aen_ops(ctrl);
out_disconnect_admin_queue: out_disconnect_admin_queue:
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n",
ctrl->cnum, ctrl->association_id, ret);
/* send a Disconnect(association) LS to fc-nvme target */ /* send a Disconnect(association) LS to fc-nvme target */
nvme_fc_xmt_disconnect_assoc(ctrl); nvme_fc_xmt_disconnect_assoc(ctrl);
spin_lock_irqsave(&ctrl->lock, flags); spin_lock_irqsave(&ctrl->lock, flags);

Просмотреть файл

@ -967,7 +967,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_unmap_page(dev->dev, iod->meta_dma, dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, rq_data_dir(req)); rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
} }
if (blk_rq_nr_phys_segments(req)) if (blk_rq_nr_phys_segments(req))
@ -1298,9 +1298,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
*/ */
if (nvme_should_reset(dev, csts)) { if (nvme_should_reset(dev, csts)) {
nvme_warn_reset(dev, csts); nvme_warn_reset(dev, csts);
nvme_dev_disable(dev, false); goto disable;
nvme_reset_ctrl(&dev->ctrl);
return BLK_EH_DONE;
} }
/* /*
@ -1351,10 +1349,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
"I/O %d QID %d timeout, reset controller\n", "I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid); req->tag, nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED; nvme_req(req)->flags |= NVME_REQ_CANCELLED;
nvme_dev_disable(dev, false); goto disable;
nvme_reset_ctrl(&dev->ctrl);
return BLK_EH_DONE;
} }
if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
@ -1391,6 +1386,15 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* as the device then is in a faulty state. * as the device then is in a faulty state.
*/ */
return BLK_EH_RESET_TIMER; return BLK_EH_RESET_TIMER;
disable:
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
return BLK_EH_DONE;
nvme_dev_disable(dev, false);
if (nvme_try_sched_reset(&dev->ctrl))
nvme_unquiesce_io_queues(&dev->ctrl);
return BLK_EH_DONE;
} }
static void nvme_free_queue(struct nvme_queue *nvmeq) static void nvme_free_queue(struct nvme_queue *nvmeq)
@ -3278,6 +3282,10 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
case pci_channel_io_frozen: case pci_channel_io_frozen:
dev_warn(dev->ctrl.device, dev_warn(dev->ctrl.device,
"frozen state error detected, reset controller\n"); "frozen state error detected, reset controller\n");
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
nvme_dev_disable(dev, true);
return PCI_ERS_RESULT_DISCONNECT;
}
nvme_dev_disable(dev, false); nvme_dev_disable(dev, false);
return PCI_ERS_RESULT_NEED_RESET; return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure: case pci_channel_io_perm_failure:
@ -3294,7 +3302,8 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
dev_info(dev->ctrl.device, "restart after slot reset\n"); dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev); pci_restore_state(pdev);
nvme_reset_ctrl(&dev->ctrl); if (!nvme_try_sched_reset(&dev->ctrl))
nvme_unquiesce_io_queues(&dev->ctrl);
return PCI_ERS_RESULT_RECOVERED; return PCI_ERS_RESULT_RECOVERED;
} }
@ -3396,6 +3405,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x144d, 0xa802), /* Samsung SM953 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */

Просмотреть файл

@ -92,7 +92,7 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
* we have no UUID set * we have no UUID set
*/ */
if (uuid_is_null(&ids->uuid)) { if (uuid_is_null(&ids->uuid)) {
dev_warn_ratelimited(dev, dev_warn_once(dev,
"No UUID available providing old NGUID\n"); "No UUID available providing old NGUID\n");
return sysfs_emit(buf, "%pU\n", ids->nguid); return sysfs_emit(buf, "%pU\n", ids->nguid);
} }

Просмотреть файл

@ -373,7 +373,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
goto out_cleanup_tagset; goto out_cleanup_tagset;
ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9); (NVME_LOOP_MAX_SEGMENTS - 1) << PAGE_SECTORS_SHIFT;
nvme_unquiesce_admin_queue(&ctrl->ctrl); nvme_unquiesce_admin_queue(&ctrl->ctrl);

Просмотреть файл

@ -102,14 +102,14 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
* which depends on the host's memory fragementation. To solve this, * which depends on the host's memory fragementation. To solve this,
* ensure mdts is limited to the pages equal to the number of segments. * ensure mdts is limited to the pages equal to the number of segments.
*/ */
max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9), max_hw_sectors = min_not_zero(pctrl->max_segments << PAGE_SECTORS_SHIFT,
pctrl->max_hw_sectors); pctrl->max_hw_sectors);
/* /*
* nvmet_passthru_map_sg is limitted to using a single bio so limit * nvmet_passthru_map_sg is limitted to using a single bio so limit
* the mdts based on BIO_MAX_VECS as well * the mdts based on BIO_MAX_VECS as well
*/ */
max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9), max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT,
max_hw_sectors); max_hw_sectors);
page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;

Просмотреть файл

@ -473,7 +473,7 @@ struct nvme_id_ns_nvm {
}; };
enum { enum {
NVME_ID_NS_NVM_STS_MASK = 0x3f, NVME_ID_NS_NVM_STS_MASK = 0x7f,
NVME_ID_NS_NVM_GUARD_SHIFT = 7, NVME_ID_NS_NVM_GUARD_SHIFT = 7,
NVME_ID_NS_NVM_GUARD_MASK = 0x3, NVME_ID_NS_NVM_GUARD_MASK = 0x3,
}; };