2019-05-29 17:18:09 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-04-27 02:26:48 +03:00
|
|
|
/*
|
|
|
|
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
|
|
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
2017-08-23 22:48:26 +03:00
|
|
|
#include <linux/libnvdimm.h>
|
2017-08-31 04:36:02 +03:00
|
|
|
#include <linux/sched/mm.h>
|
2015-06-08 21:27:06 +03:00
|
|
|
#include <linux/vmalloc.h>
|
2015-04-27 02:26:48 +03:00
|
|
|
#include <linux/uaccess.h>
|
2015-05-31 22:02:11 +03:00
|
|
|
#include <linux/module.h>
|
2015-06-25 11:20:04 +03:00
|
|
|
#include <linux/blkdev.h>
|
2015-04-27 02:26:48 +03:00
|
|
|
#include <linux/fcntl.h>
|
2015-04-25 10:56:17 +03:00
|
|
|
#include <linux/async.h>
|
2015-06-25 11:20:04 +03:00
|
|
|
#include <linux/genhd.h>
|
2015-06-08 21:27:06 +03:00
|
|
|
#include <linux/ndctl.h>
|
2015-05-31 21:41:48 +03:00
|
|
|
#include <linux/sched.h>
|
2015-04-27 02:26:48 +03:00
|
|
|
#include <linux/slab.h>
|
2019-01-22 21:39:47 +03:00
|
|
|
#include <linux/cpu.h>
|
2015-04-27 02:26:48 +03:00
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/io.h>
|
2015-06-08 21:27:06 +03:00
|
|
|
#include <linux/mm.h>
|
2015-05-31 21:41:48 +03:00
|
|
|
#include <linux/nd.h>
|
2015-04-27 02:26:48 +03:00
|
|
|
#include "nd-core.h"
|
2015-05-31 21:41:48 +03:00
|
|
|
#include "nd.h"
|
2017-04-08 01:33:31 +03:00
|
|
|
#include "pfn.h"
|
2015-04-27 02:26:48 +03:00
|
|
|
|
2015-06-08 21:27:06 +03:00
|
|
|
int nvdimm_major;
|
2015-04-27 02:26:48 +03:00
|
|
|
static int nvdimm_bus_major;
|
2019-07-18 04:08:26 +03:00
|
|
|
struct class *nd_class;
|
2016-07-23 09:46:08 +03:00
|
|
|
static DEFINE_IDA(nd_ida);
|
2015-04-27 02:26:48 +03:00
|
|
|
|
2015-05-31 21:41:48 +03:00
|
|
|
static int to_nd_device_type(struct device *dev)
|
|
|
|
{
|
|
|
|
if (is_nvdimm(dev))
|
|
|
|
return ND_DEVICE_DIMM;
|
2017-05-30 09:12:19 +03:00
|
|
|
else if (is_memory(dev))
|
2015-05-31 22:02:11 +03:00
|
|
|
return ND_DEVICE_REGION_PMEM;
|
|
|
|
else if (is_nd_blk(dev))
|
|
|
|
return ND_DEVICE_REGION_BLK;
|
2016-03-11 21:15:36 +03:00
|
|
|
else if (is_nd_dax(dev))
|
|
|
|
return ND_DEVICE_DAX_PMEM;
|
2017-05-30 09:12:19 +03:00
|
|
|
else if (is_nd_region(dev->parent))
|
2015-05-31 22:02:11 +03:00
|
|
|
return nd_region_to_nstype(to_nd_region(dev->parent));
|
2015-05-31 21:41:48 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|
|
|
{
|
|
|
|
return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
|
|
|
|
to_nd_device_type(dev));
|
|
|
|
}
|
|
|
|
|
2015-05-31 22:02:11 +03:00
|
|
|
static struct module *to_bus_provider(struct device *dev)
|
|
|
|
{
|
|
|
|
/* pin bus providers while regions are enabled */
|
2017-05-30 09:12:19 +03:00
|
|
|
if (is_nd_region(dev)) {
|
2015-05-31 22:02:11 +03:00
|
|
|
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
|
|
|
|
|
2016-07-22 06:03:19 +03:00
|
|
|
return nvdimm_bus->nd_desc->module;
|
2015-05-31 22:02:11 +03:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-05-01 20:11:27 +03:00
|
|
|
static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus)
|
|
|
|
{
|
|
|
|
nvdimm_bus_lock(&nvdimm_bus->dev);
|
|
|
|
nvdimm_bus->probe_active++;
|
|
|
|
nvdimm_bus_unlock(&nvdimm_bus->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
|
|
|
|
{
|
|
|
|
nvdimm_bus_lock(&nvdimm_bus->dev);
|
|
|
|
if (--nvdimm_bus->probe_active == 0)
|
2019-07-18 04:08:15 +03:00
|
|
|
wake_up(&nvdimm_bus->wait);
|
2015-05-01 20:11:27 +03:00
|
|
|
nvdimm_bus_unlock(&nvdimm_bus->dev);
|
|
|
|
}
|
|
|
|
|
2015-05-31 21:41:48 +03:00
|
|
|
static int nvdimm_bus_probe(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
|
2015-05-31 22:02:11 +03:00
|
|
|
struct module *provider = to_bus_provider(dev);
|
2015-05-31 21:41:48 +03:00
|
|
|
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
|
|
|
|
int rc;
|
|
|
|
|
2015-05-31 22:02:11 +03:00
|
|
|
if (!try_module_get(provider))
|
|
|
|
return -ENXIO;
|
|
|
|
|
2018-06-02 00:10:58 +03:00
|
|
|
dev_dbg(&nvdimm_bus->dev, "START: %s.probe(%s)\n",
|
|
|
|
dev->driver->name, dev_name(dev));
|
|
|
|
|
2015-05-01 20:11:27 +03:00
|
|
|
nvdimm_bus_probe_start(nvdimm_bus);
|
2019-07-18 04:08:26 +03:00
|
|
|
debug_nvdimm_lock(dev);
|
2015-05-31 21:41:48 +03:00
|
|
|
rc = nd_drv->probe(dev);
|
2019-07-18 04:08:26 +03:00
|
|
|
debug_nvdimm_unlock(dev);
|
|
|
|
|
2019-09-05 18:45:58 +03:00
|
|
|
if ((rc == 0 || rc == -EOPNOTSUPP) &&
|
|
|
|
dev->parent && is_nd_region(dev->parent))
|
2019-09-05 18:45:57 +03:00
|
|
|
nd_region_advance_seeds(to_nd_region(dev->parent), dev);
|
2015-05-01 20:11:27 +03:00
|
|
|
nvdimm_bus_probe_end(nvdimm_bus);
|
|
|
|
|
2018-06-02 00:10:58 +03:00
|
|
|
dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name,
|
2015-05-31 21:41:48 +03:00
|
|
|
dev_name(dev), rc);
|
2015-06-25 11:20:04 +03:00
|
|
|
|
2015-05-31 22:02:11 +03:00
|
|
|
if (rc != 0)
|
|
|
|
module_put(provider);
|
2015-05-31 21:41:48 +03:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvdimm_bus_remove(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
|
2015-05-31 22:02:11 +03:00
|
|
|
struct module *provider = to_bus_provider(dev);
|
2015-05-31 21:41:48 +03:00
|
|
|
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
|
2016-05-18 19:13:13 +03:00
|
|
|
int rc = 0;
|
2015-05-31 21:41:48 +03:00
|
|
|
|
2019-07-18 04:08:26 +03:00
|
|
|
if (nd_drv->remove) {
|
|
|
|
debug_nvdimm_lock(dev);
|
2016-05-18 19:13:13 +03:00
|
|
|
rc = nd_drv->remove(dev);
|
2019-07-18 04:08:26 +03:00
|
|
|
debug_nvdimm_unlock(dev);
|
|
|
|
}
|
2015-05-01 20:11:27 +03:00
|
|
|
|
2015-05-31 21:41:48 +03:00
|
|
|
dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
|
|
|
|
dev_name(dev), rc);
|
2015-05-31 22:02:11 +03:00
|
|
|
module_put(provider);
|
2015-05-31 21:41:48 +03:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2016-07-09 10:12:52 +03:00
|
|
|
static void nvdimm_bus_shutdown(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
|
|
|
|
struct nd_device_driver *nd_drv = NULL;
|
|
|
|
|
|
|
|
if (dev->driver)
|
|
|
|
nd_drv = to_nd_device_driver(dev->driver);
|
|
|
|
|
|
|
|
if (nd_drv && nd_drv->shutdown) {
|
|
|
|
nd_drv->shutdown(dev);
|
|
|
|
dev_dbg(&nvdimm_bus->dev, "%s.shutdown(%s)\n",
|
|
|
|
dev->driver->name, dev_name(dev));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-18 21:29:49 +03:00
|
|
|
void nd_device_notify(struct device *dev, enum nvdimm_event event)
|
|
|
|
{
|
2019-07-18 04:08:26 +03:00
|
|
|
nd_device_lock(dev);
|
2016-02-18 21:29:49 +03:00
|
|
|
if (dev->driver) {
|
|
|
|
struct nd_device_driver *nd_drv;
|
|
|
|
|
|
|
|
nd_drv = to_nd_device_driver(dev->driver);
|
|
|
|
if (nd_drv->notify)
|
|
|
|
nd_drv->notify(dev, event);
|
|
|
|
}
|
2019-07-18 04:08:26 +03:00
|
|
|
nd_device_unlock(dev);
|
2016-02-18 21:29:49 +03:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(nd_device_notify);
|
|
|
|
|
|
|
|
void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
|
|
|
|
|
|
|
|
if (!nvdimm_bus)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* caller is responsible for holding a reference on the device */
|
|
|
|
nd_device_notify(&nd_region->dev, event);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvdimm_region_notify);
|
|
|
|
|
2017-04-30 01:24:03 +03:00
|
|
|
struct clear_badblocks_context {
|
|
|
|
resource_size_t phys, cleared;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
struct clear_badblocks_context *ctx = data;
|
|
|
|
struct nd_region *nd_region;
|
|
|
|
resource_size_t ndr_end;
|
|
|
|
sector_t sector;
|
|
|
|
|
|
|
|
/* make sure device is a region */
|
2019-09-19 11:33:55 +03:00
|
|
|
if (!is_memory(dev))
|
2017-04-30 01:24:03 +03:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
nd_region = to_nd_region(dev);
|
|
|
|
ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
|
|
|
|
|
|
|
|
/* make sure we are in the region */
|
|
|
|
if (ctx->phys < nd_region->ndr_start
|
|
|
|
|| (ctx->phys + ctx->cleared) > ndr_end)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
sector = (ctx->phys - nd_region->ndr_start) / 512;
|
|
|
|
badblocks_clear(&nd_region->bb, sector, ctx->cleared / 512);
|
|
|
|
|
2017-06-13 01:25:11 +03:00
|
|
|
if (nd_region->bb_state)
|
|
|
|
sysfs_notify_dirent(nd_region->bb_state);
|
|
|
|
|
2017-04-30 01:24:03 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus,
|
|
|
|
phys_addr_t phys, u64 cleared)
|
|
|
|
{
|
|
|
|
struct clear_badblocks_context ctx = {
|
|
|
|
.phys = phys,
|
|
|
|
.cleared = cleared,
|
|
|
|
};
|
|
|
|
|
|
|
|
device_for_each_child(&nvdimm_bus->dev, &ctx,
|
|
|
|
nvdimm_clear_badblocks_region);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
|
|
|
|
phys_addr_t phys, u64 cleared)
|
|
|
|
{
|
|
|
|
if (cleared > 0)
|
2017-08-23 22:48:26 +03:00
|
|
|
badrange_forget(&nvdimm_bus->badrange, phys, cleared);
|
2017-04-30 01:24:03 +03:00
|
|
|
|
|
|
|
if (cleared > 0 && cleared / 512)
|
|
|
|
nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared);
|
|
|
|
}
|
|
|
|
|
2016-03-08 18:16:07 +03:00
|
|
|
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc;
|
|
|
|
struct nd_cmd_clear_error clear_err;
|
|
|
|
struct nd_cmd_ars_cap ars_cap;
|
|
|
|
u32 clear_err_unit, mask;
|
2017-08-31 04:36:02 +03:00
|
|
|
unsigned int noio_flag;
|
2016-03-08 18:16:07 +03:00
|
|
|
int cmd_rc, rc;
|
|
|
|
|
|
|
|
if (!nvdimm_bus)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
nd_desc = nvdimm_bus->nd_desc;
|
2016-09-09 19:10:08 +03:00
|
|
|
/*
|
|
|
|
* if ndctl does not exist, it's PMEM_LEGACY and
|
|
|
|
* we want to just pretend everything is handled.
|
|
|
|
*/
|
2016-03-08 18:16:07 +03:00
|
|
|
if (!nd_desc->ndctl)
|
2016-09-09 19:10:08 +03:00
|
|
|
return len;
|
2016-03-08 18:16:07 +03:00
|
|
|
|
|
|
|
memset(&ars_cap, 0, sizeof(ars_cap));
|
|
|
|
ars_cap.address = phys;
|
|
|
|
ars_cap.length = len;
|
2017-08-31 04:36:02 +03:00
|
|
|
noio_flag = memalloc_noio_save();
|
2016-03-08 18:16:07 +03:00
|
|
|
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, &ars_cap,
|
|
|
|
sizeof(ars_cap), &cmd_rc);
|
2017-08-31 04:36:02 +03:00
|
|
|
memalloc_noio_restore(noio_flag);
|
2016-03-08 18:16:07 +03:00
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
if (cmd_rc < 0)
|
|
|
|
return cmd_rc;
|
|
|
|
clear_err_unit = ars_cap.clear_err_unit;
|
|
|
|
if (!clear_err_unit || !is_power_of_2(clear_err_unit))
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
mask = clear_err_unit - 1;
|
|
|
|
if ((phys | len) & mask)
|
|
|
|
return -ENXIO;
|
|
|
|
memset(&clear_err, 0, sizeof(clear_err));
|
|
|
|
clear_err.address = phys;
|
|
|
|
clear_err.length = len;
|
2017-08-31 04:36:02 +03:00
|
|
|
noio_flag = memalloc_noio_save();
|
2016-03-08 18:16:07 +03:00
|
|
|
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CLEAR_ERROR, &clear_err,
|
|
|
|
sizeof(clear_err), &cmd_rc);
|
2017-08-31 04:36:02 +03:00
|
|
|
memalloc_noio_restore(noio_flag);
|
2016-03-08 18:16:07 +03:00
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
if (cmd_rc < 0)
|
|
|
|
return cmd_rc;
|
2016-10-01 02:19:31 +03:00
|
|
|
|
2017-04-30 01:24:03 +03:00
|
|
|
nvdimm_account_cleared_poison(nvdimm_bus, phys, clear_err.cleared);
|
2017-04-28 01:57:05 +03:00
|
|
|
|
2016-03-08 18:16:07 +03:00
|
|
|
return clear_err.cleared;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
|
|
|
|
|
2016-07-23 09:46:08 +03:00
|
|
|
static int nvdimm_bus_match(struct device *dev, struct device_driver *drv);
|
|
|
|
|
2015-05-31 21:41:48 +03:00
|
|
|
static struct bus_type nvdimm_bus_type = {
|
2015-04-25 10:56:17 +03:00
|
|
|
.name = "nd",
|
2015-05-31 21:41:48 +03:00
|
|
|
.uevent = nvdimm_bus_uevent,
|
|
|
|
.match = nvdimm_bus_match,
|
|
|
|
.probe = nvdimm_bus_probe,
|
|
|
|
.remove = nvdimm_bus_remove,
|
2016-07-09 10:12:52 +03:00
|
|
|
.shutdown = nvdimm_bus_shutdown,
|
2015-05-31 21:41:48 +03:00
|
|
|
};
|
|
|
|
|
2016-07-23 09:46:08 +03:00
|
|
|
static void nvdimm_bus_release(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus;
|
|
|
|
|
|
|
|
nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
|
|
|
|
ida_simple_remove(&nd_ida, nvdimm_bus->id);
|
|
|
|
kfree(nvdimm_bus);
|
|
|
|
}
|
|
|
|
|
2019-11-13 04:08:56 +03:00
|
|
|
static const struct device_type nvdimm_bus_dev_type = {
|
|
|
|
.release = nvdimm_bus_release,
|
|
|
|
.groups = nvdimm_bus_attribute_groups,
|
|
|
|
};
|
|
|
|
|
2019-07-18 04:08:26 +03:00
|
|
|
bool is_nvdimm_bus(struct device *dev)
|
2016-07-23 09:46:08 +03:00
|
|
|
{
|
2019-11-13 04:08:56 +03:00
|
|
|
return dev->type == &nvdimm_bus_dev_type;
|
2016-07-23 09:46:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
|
|
|
|
{
|
|
|
|
struct device *dev;
|
|
|
|
|
|
|
|
for (dev = nd_dev; dev; dev = dev->parent)
|
|
|
|
if (is_nvdimm_bus(dev))
|
|
|
|
break;
|
|
|
|
dev_WARN_ONCE(nd_dev, !dev, "invalid dev, not on nd bus\n");
|
|
|
|
if (dev)
|
|
|
|
return to_nvdimm_bus(dev);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nvdimm_bus *to_nvdimm_bus(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus;
|
|
|
|
|
|
|
|
nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
|
|
|
|
WARN_ON(!is_nvdimm_bus(dev));
|
|
|
|
return nvdimm_bus;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(to_nvdimm_bus);
|
|
|
|
|
2018-12-06 10:39:29 +03:00
|
|
|
struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm)
|
|
|
|
{
|
|
|
|
return to_nvdimm_bus(nvdimm->dev.parent);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvdimm_to_bus);
|
|
|
|
|
2016-07-23 09:46:08 +03:00
|
|
|
struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL);
|
|
|
|
if (!nvdimm_bus)
|
|
|
|
return NULL;
|
|
|
|
INIT_LIST_HEAD(&nvdimm_bus->list);
|
|
|
|
INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
|
2019-07-18 04:08:15 +03:00
|
|
|
init_waitqueue_head(&nvdimm_bus->wait);
|
2016-07-23 09:46:08 +03:00
|
|
|
nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
|
|
|
|
if (nvdimm_bus->id < 0) {
|
|
|
|
kfree(nvdimm_bus);
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-08-03 15:08:33 +03:00
|
|
|
mutex_init(&nvdimm_bus->reconfig_mutex);
|
|
|
|
badrange_init(&nvdimm_bus->badrange);
|
2016-07-23 09:46:08 +03:00
|
|
|
nvdimm_bus->nd_desc = nd_desc;
|
|
|
|
nvdimm_bus->dev.parent = parent;
|
2019-11-13 04:08:56 +03:00
|
|
|
nvdimm_bus->dev.type = &nvdimm_bus_dev_type;
|
2016-07-23 09:46:08 +03:00
|
|
|
nvdimm_bus->dev.groups = nd_desc->attr_groups;
|
|
|
|
nvdimm_bus->dev.bus = &nvdimm_bus_type;
|
2018-04-06 08:21:13 +03:00
|
|
|
nvdimm_bus->dev.of_node = nd_desc->of_node;
|
2016-07-23 09:46:08 +03:00
|
|
|
dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
|
|
|
|
rc = device_register(&nvdimm_bus->dev);
|
|
|
|
if (rc) {
|
|
|
|
dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nvdimm_bus;
|
|
|
|
err:
|
|
|
|
put_device(&nvdimm_bus->dev);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvdimm_bus_register);
|
|
|
|
|
|
|
|
void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
|
|
|
|
{
|
|
|
|
if (!nvdimm_bus)
|
|
|
|
return;
|
|
|
|
device_unregister(&nvdimm_bus->dev);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
|
|
|
|
|
|
|
|
static int child_unregister(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* the singular ndctl class device per bus needs to be
|
|
|
|
* "device_destroy"ed, so skip it here
|
|
|
|
*
|
|
|
|
* i.e. remove classless children
|
|
|
|
*/
|
|
|
|
if (dev->class)
|
2018-12-14 01:36:18 +03:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (is_nvdimm(dev)) {
|
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
bool dev_put = false;
|
|
|
|
|
|
|
|
/* We are shutting down. Make state frozen artificially. */
|
|
|
|
nvdimm_bus_lock(dev);
|
2019-08-27 03:54:54 +03:00
|
|
|
set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
|
2018-12-14 01:36:18 +03:00
|
|
|
if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
|
|
|
|
dev_put = true;
|
|
|
|
nvdimm_bus_unlock(dev);
|
|
|
|
cancel_delayed_work_sync(&nvdimm->dwork);
|
|
|
|
if (dev_put)
|
|
|
|
put_device(dev);
|
|
|
|
}
|
|
|
|
nd_device_unregister(dev, ND_SYNC);
|
|
|
|
|
2016-07-23 09:46:08 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-23 22:48:26 +03:00
|
|
|
static void free_badrange_list(struct list_head *badrange_list)
|
2016-07-23 09:46:08 +03:00
|
|
|
{
|
2017-08-23 22:48:26 +03:00
|
|
|
struct badrange_entry *bre, *next;
|
2016-07-23 09:46:08 +03:00
|
|
|
|
2017-08-23 22:48:26 +03:00
|
|
|
list_for_each_entry_safe(bre, next, badrange_list, list) {
|
|
|
|
list_del(&bre->list);
|
|
|
|
kfree(bre);
|
2016-07-23 09:46:08 +03:00
|
|
|
}
|
2017-08-23 22:48:26 +03:00
|
|
|
list_del_init(badrange_list);
|
2016-07-23 09:46:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nd_bus_remove(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
|
|
|
|
|
|
|
|
mutex_lock(&nvdimm_bus_list_mutex);
|
|
|
|
list_del_init(&nvdimm_bus->list);
|
|
|
|
mutex_unlock(&nvdimm_bus_list_mutex);
|
|
|
|
|
2019-07-18 04:08:15 +03:00
|
|
|
wait_event(nvdimm_bus->wait,
|
|
|
|
atomic_read(&nvdimm_bus->ioctl_active) == 0);
|
|
|
|
|
2016-07-23 09:46:08 +03:00
|
|
|
nd_synchronize();
|
|
|
|
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
|
|
|
|
|
2017-08-23 22:48:26 +03:00
|
|
|
spin_lock(&nvdimm_bus->badrange.lock);
|
|
|
|
free_badrange_list(&nvdimm_bus->badrange.list);
|
|
|
|
spin_unlock(&nvdimm_bus->badrange.lock);
|
2016-07-23 09:46:08 +03:00
|
|
|
|
|
|
|
nvdimm_bus_destroy_ndctl(nvdimm_bus);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nd_bus_probe(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = nvdimm_bus_create_ndctl(nvdimm_bus);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
mutex_lock(&nvdimm_bus_list_mutex);
|
|
|
|
list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list);
|
|
|
|
mutex_unlock(&nvdimm_bus_list_mutex);
|
|
|
|
|
|
|
|
/* enable bus provider attributes to look up their local context */
|
|
|
|
dev_set_drvdata(dev, nvdimm_bus->nd_desc);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct nd_device_driver nd_bus_driver = {
|
|
|
|
.probe = nd_bus_probe,
|
|
|
|
.remove = nd_bus_remove,
|
|
|
|
.drv = {
|
|
|
|
.name = "nd_bus",
|
|
|
|
.suppress_bind_attrs = true,
|
|
|
|
.bus = &nvdimm_bus_type,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.mod_name = KBUILD_MODNAME,
|
|
|
|
},
|
2015-05-31 21:41:48 +03:00
|
|
|
};
|
|
|
|
|
2016-07-23 09:46:08 +03:00
|
|
|
static int nvdimm_bus_match(struct device *dev, struct device_driver *drv)
|
|
|
|
{
|
|
|
|
struct nd_device_driver *nd_drv = to_nd_device_driver(drv);
|
|
|
|
|
|
|
|
if (is_nvdimm_bus(dev) && nd_drv == &nd_bus_driver)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return !!test_bit(to_nd_device_type(dev), &nd_drv->type);
|
|
|
|
}
|
|
|
|
|
2015-05-31 21:41:48 +03:00
|
|
|
static ASYNC_DOMAIN_EXCLUSIVE(nd_async_domain);
|
|
|
|
|
|
|
|
void nd_synchronize(void)
|
|
|
|
{
|
|
|
|
async_synchronize_full_domain(&nd_async_domain);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nd_synchronize);
|
|
|
|
|
|
|
|
static void nd_async_device_register(void *d, async_cookie_t cookie)
|
|
|
|
{
|
|
|
|
struct device *dev = d;
|
|
|
|
|
|
|
|
if (device_add(dev) != 0) {
|
|
|
|
dev_err(dev, "%s: failed\n", __func__);
|
|
|
|
put_device(dev);
|
|
|
|
}
|
|
|
|
put_device(dev);
|
2018-09-25 23:53:02 +03:00
|
|
|
if (dev->parent)
|
|
|
|
put_device(dev->parent);
|
2015-05-31 21:41:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nd_async_device_unregister(void *d, async_cookie_t cookie)
|
|
|
|
{
|
|
|
|
struct device *dev = d;
|
|
|
|
|
2015-05-30 19:35:36 +03:00
|
|
|
/* flush bus operations before delete */
|
|
|
|
nvdimm_bus_lock(dev);
|
|
|
|
nvdimm_bus_unlock(dev);
|
|
|
|
|
2015-05-31 21:41:48 +03:00
|
|
|
device_unregister(dev);
|
|
|
|
put_device(dev);
|
|
|
|
}
|
|
|
|
|
2015-06-25 11:20:04 +03:00
|
|
|
void __nd_device_register(struct device *dev)
|
2015-05-31 21:41:48 +03:00
|
|
|
{
|
2016-03-11 21:15:36 +03:00
|
|
|
if (!dev)
|
|
|
|
return;
|
2018-09-25 23:53:07 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure that region devices always have their NUMA node set as
|
|
|
|
* early as possible. This way we are able to make certain that
|
|
|
|
* any memory associated with the creation and the creation
|
|
|
|
* itself of the region is associated with the correct node.
|
|
|
|
*/
|
|
|
|
if (is_nd_region(dev))
|
|
|
|
set_dev_node(dev, to_nd_region(dev)->numa_node);
|
|
|
|
|
2015-05-31 21:41:48 +03:00
|
|
|
dev->bus = &nvdimm_bus_type;
|
2019-01-22 21:39:47 +03:00
|
|
|
if (dev->parent) {
|
2018-09-25 23:53:02 +03:00
|
|
|
get_device(dev->parent);
|
2019-01-22 21:39:47 +03:00
|
|
|
if (dev_to_node(dev) == NUMA_NO_NODE)
|
|
|
|
set_dev_node(dev, dev_to_node(dev->parent));
|
|
|
|
}
|
2015-05-31 21:41:48 +03:00
|
|
|
get_device(dev);
|
2019-01-22 21:39:47 +03:00
|
|
|
|
|
|
|
async_schedule_dev_domain(nd_async_device_register, dev,
|
|
|
|
&nd_async_domain);
|
2015-05-31 21:41:48 +03:00
|
|
|
}
|
2015-06-25 11:20:04 +03:00
|
|
|
|
|
|
|
void nd_device_register(struct device *dev)
|
|
|
|
{
|
|
|
|
device_initialize(dev);
|
|
|
|
__nd_device_register(dev);
|
|
|
|
}
|
2015-05-31 21:41:48 +03:00
|
|
|
EXPORT_SYMBOL(nd_device_register);
|
|
|
|
|
|
|
|
void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
|
|
|
|
{
|
2019-07-18 04:07:58 +03:00
|
|
|
bool killed;
|
|
|
|
|
2015-05-31 21:41:48 +03:00
|
|
|
switch (mode) {
|
|
|
|
case ND_ASYNC:
|
2019-07-18 04:07:58 +03:00
|
|
|
/*
|
|
|
|
* In the async case this is being triggered with the
|
|
|
|
* device lock held and the unregistration work needs to
|
|
|
|
* be moved out of line iff this is thread has won the
|
|
|
|
* race to schedule the deletion.
|
|
|
|
*/
|
|
|
|
if (!kill_device(dev))
|
|
|
|
return;
|
|
|
|
|
2015-05-31 21:41:48 +03:00
|
|
|
get_device(dev);
|
|
|
|
async_schedule_domain(nd_async_device_unregister, dev,
|
|
|
|
&nd_async_domain);
|
|
|
|
break;
|
|
|
|
case ND_SYNC:
|
2019-07-18 04:07:58 +03:00
|
|
|
/*
|
|
|
|
* In the sync case the device is being unregistered due
|
|
|
|
* to a state change of the parent. Claim the kill state
|
|
|
|
* to synchronize against other unregistration requests,
|
|
|
|
* or otherwise let the async path handle it if the
|
|
|
|
* unregistration was already queued.
|
|
|
|
*/
|
2019-07-18 04:08:26 +03:00
|
|
|
nd_device_lock(dev);
|
2019-07-18 04:07:58 +03:00
|
|
|
killed = kill_device(dev);
|
2019-07-18 04:08:26 +03:00
|
|
|
nd_device_unlock(dev);
|
2019-07-18 04:07:58 +03:00
|
|
|
|
|
|
|
if (!killed)
|
|
|
|
return;
|
|
|
|
|
2015-05-31 21:41:48 +03:00
|
|
|
nd_synchronize();
|
|
|
|
device_unregister(dev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(nd_device_unregister);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __nd_driver_register() - register a region or a namespace driver
|
|
|
|
* @nd_drv: driver to register
|
|
|
|
* @owner: automatically set by nd_driver_register() macro
|
|
|
|
* @mod_name: automatically set by nd_driver_register() macro
|
|
|
|
*/
|
|
|
|
int __nd_driver_register(struct nd_device_driver *nd_drv, struct module *owner,
|
|
|
|
const char *mod_name)
|
|
|
|
{
|
|
|
|
struct device_driver *drv = &nd_drv->drv;
|
|
|
|
|
|
|
|
if (!nd_drv->type) {
|
2019-03-25 22:32:28 +03:00
|
|
|
pr_debug("driver type bitmask not set (%ps)\n",
|
2015-05-31 21:41:48 +03:00
|
|
|
__builtin_return_address(0));
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-05-18 19:13:13 +03:00
|
|
|
if (!nd_drv->probe) {
|
|
|
|
pr_debug("%s ->probe() must be specified\n", mod_name);
|
2015-05-31 21:41:48 +03:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
drv->bus = &nvdimm_bus_type;
|
|
|
|
drv->owner = owner;
|
|
|
|
drv->mod_name = mod_name;
|
|
|
|
|
|
|
|
return driver_register(drv);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__nd_driver_register);
|
|
|
|
|
2015-06-24 03:08:34 +03:00
|
|
|
int nvdimm_revalidate_disk(struct gendisk *disk)
|
|
|
|
{
|
2016-06-16 05:43:07 +03:00
|
|
|
struct device *dev = disk_to_dev(disk)->parent;
|
2015-06-24 03:08:34 +03:00
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
2018-06-01 02:36:36 +03:00
|
|
|
int disk_ro = get_disk_ro(disk);
|
2015-06-24 03:08:34 +03:00
|
|
|
|
2018-06-01 02:36:36 +03:00
|
|
|
/*
|
|
|
|
* Upgrade to read-only if the region is read-only preserve as
|
|
|
|
* read-only if the disk is already read-only.
|
|
|
|
*/
|
|
|
|
if (disk_ro || nd_region->ro == disk_ro)
|
2015-06-24 03:08:34 +03:00
|
|
|
return 0;
|
|
|
|
|
2018-06-01 02:36:36 +03:00
|
|
|
dev_info(dev, "%s read-only, marking %s read-only\n",
|
|
|
|
dev_name(&nd_region->dev), disk->disk_name);
|
|
|
|
set_disk_ro(disk, 1);
|
2015-06-24 03:08:34 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(nvdimm_revalidate_disk);
|
|
|
|
|
2015-05-31 21:41:48 +03:00
|
|
|
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
return sprintf(buf, ND_DEVICE_MODALIAS_FMT "\n",
|
|
|
|
to_nd_device_type(dev));
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(modalias);
|
|
|
|
|
|
|
|
static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
return sprintf(buf, "%s\n", dev->type->name);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(devtype);
|
|
|
|
|
|
|
|
static struct attribute *nd_device_attributes[] = {
|
|
|
|
&dev_attr_modalias.attr,
|
|
|
|
&dev_attr_devtype.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2019-05-16 19:04:53 +03:00
|
|
|
/*
|
2015-05-31 21:41:48 +03:00
|
|
|
* nd_device_attribute_group - generic attributes for all devices on an nd bus
|
|
|
|
*/
|
2019-11-13 04:00:24 +03:00
|
|
|
const struct attribute_group nd_device_attribute_group = {
|
2015-05-31 21:41:48 +03:00
|
|
|
.attrs = nd_device_attributes,
|
2015-04-25 10:56:17 +03:00
|
|
|
};
|
|
|
|
|
2015-06-19 21:18:34 +03:00
|
|
|
static ssize_t numa_node_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
return sprintf(buf, "%d\n", dev_to_node(dev));
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(numa_node);
|
|
|
|
|
2019-11-17 20:45:45 +03:00
|
|
|
static int nvdimm_dev_to_target_node(struct device *dev)
|
|
|
|
{
|
|
|
|
struct device *parent = dev->parent;
|
|
|
|
struct nd_region *nd_region = NULL;
|
|
|
|
|
|
|
|
if (is_nd_region(dev))
|
|
|
|
nd_region = to_nd_region(dev);
|
|
|
|
else if (parent && is_nd_region(parent))
|
|
|
|
nd_region = to_nd_region(parent);
|
|
|
|
|
|
|
|
if (!nd_region)
|
|
|
|
return NUMA_NO_NODE;
|
|
|
|
return nd_region->target_node;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t target_node_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
return sprintf(buf, "%d\n", nvdimm_dev_to_target_node(dev));
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(target_node);
|
|
|
|
|
2015-06-19 21:18:34 +03:00
|
|
|
static struct attribute *nd_numa_attributes[] = {
|
|
|
|
&dev_attr_numa_node.attr,
|
2019-11-17 20:45:45 +03:00
|
|
|
&dev_attr_target_node.attr,
|
2015-06-19 21:18:34 +03:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
|
|
|
|
int n)
|
|
|
|
{
|
2019-11-17 20:45:45 +03:00
|
|
|
struct device *dev = container_of(kobj, typeof(*dev), kobj);
|
|
|
|
|
2015-06-19 21:18:34 +03:00
|
|
|
if (!IS_ENABLED(CONFIG_NUMA))
|
|
|
|
return 0;
|
|
|
|
|
2019-11-17 20:45:45 +03:00
|
|
|
if (a == &dev_attr_target_node.attr &&
|
|
|
|
nvdimm_dev_to_target_node(dev) == NUMA_NO_NODE)
|
|
|
|
return 0;
|
|
|
|
|
2015-06-19 21:18:34 +03:00
|
|
|
return a->mode;
|
|
|
|
}
|
|
|
|
|
2019-05-16 19:04:53 +03:00
|
|
|
/*
|
2015-06-19 21:18:34 +03:00
|
|
|
* nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
|
|
|
|
*/
|
2019-11-19 20:51:54 +03:00
|
|
|
const struct attribute_group nd_numa_attribute_group = {
|
2015-06-19 21:18:34 +03:00
|
|
|
.attrs = nd_numa_attributes,
|
|
|
|
.is_visible = nd_numa_attr_visible,
|
|
|
|
};
|
|
|
|
|
2015-04-27 02:26:48 +03:00
|
|
|
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
|
|
|
|
{
|
|
|
|
dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id);
|
|
|
|
struct device *dev;
|
|
|
|
|
|
|
|
dev = device_create(nd_class, &nvdimm_bus->dev, devt, nvdimm_bus,
|
|
|
|
"ndctl%d", nvdimm_bus->id);
|
|
|
|
|
2016-05-27 23:28:31 +03:00
|
|
|
if (IS_ERR(dev))
|
2015-04-27 02:26:48 +03:00
|
|
|
dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %ld\n",
|
|
|
|
nvdimm_bus->id, PTR_ERR(dev));
|
2016-05-27 23:28:31 +03:00
|
|
|
return PTR_ERR_OR_ZERO(dev);
|
2015-04-27 02:26:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus)
|
|
|
|
{
|
|
|
|
device_destroy(nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id));
|
|
|
|
}
|
|
|
|
|
2015-06-08 21:27:06 +03:00
|
|
|
static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
|
|
|
|
[ND_CMD_IMPLEMENTED] = { },
|
|
|
|
[ND_CMD_SMART] = {
|
|
|
|
.out_num = 2,
|
2016-04-08 05:58:44 +03:00
|
|
|
.out_sizes = { 4, 128, },
|
2015-06-08 21:27:06 +03:00
|
|
|
},
|
|
|
|
[ND_CMD_SMART_THRESHOLD] = {
|
|
|
|
.out_num = 2,
|
|
|
|
.out_sizes = { 4, 8, },
|
|
|
|
},
|
|
|
|
[ND_CMD_DIMM_FLAGS] = {
|
|
|
|
.out_num = 2,
|
|
|
|
.out_sizes = { 4, 4 },
|
|
|
|
},
|
|
|
|
[ND_CMD_GET_CONFIG_SIZE] = {
|
|
|
|
.out_num = 3,
|
|
|
|
.out_sizes = { 4, 4, 4, },
|
|
|
|
},
|
|
|
|
[ND_CMD_GET_CONFIG_DATA] = {
|
|
|
|
.in_num = 2,
|
|
|
|
.in_sizes = { 4, 4, },
|
|
|
|
.out_num = 2,
|
|
|
|
.out_sizes = { 4, UINT_MAX, },
|
|
|
|
},
|
|
|
|
[ND_CMD_SET_CONFIG_DATA] = {
|
|
|
|
.in_num = 3,
|
|
|
|
.in_sizes = { 4, 4, UINT_MAX, },
|
|
|
|
.out_num = 1,
|
|
|
|
.out_sizes = { 4, },
|
|
|
|
},
|
|
|
|
[ND_CMD_VENDOR] = {
|
|
|
|
.in_num = 3,
|
|
|
|
.in_sizes = { 4, 4, UINT_MAX, },
|
|
|
|
.out_num = 3,
|
|
|
|
.out_sizes = { 4, 4, UINT_MAX, },
|
|
|
|
},
|
2016-04-29 02:23:43 +03:00
|
|
|
[ND_CMD_CALL] = {
|
|
|
|
.in_num = 2,
|
|
|
|
.in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
|
|
|
|
.out_num = 1,
|
|
|
|
.out_sizes = { UINT_MAX, },
|
|
|
|
},
|
2015-06-08 21:27:06 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd)
|
|
|
|
{
|
|
|
|
if (cmd < ARRAY_SIZE(__nd_cmd_dimm_descs))
|
|
|
|
return &__nd_cmd_dimm_descs[cmd];
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nd_cmd_dimm_desc);
|
|
|
|
|
|
|
|
static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
|
|
|
|
[ND_CMD_IMPLEMENTED] = { },
|
|
|
|
[ND_CMD_ARS_CAP] = {
|
|
|
|
.in_num = 2,
|
|
|
|
.in_sizes = { 8, 8, },
|
2016-02-18 00:08:58 +03:00
|
|
|
.out_num = 4,
|
|
|
|
.out_sizes = { 4, 4, 4, 4, },
|
2015-06-08 21:27:06 +03:00
|
|
|
},
|
|
|
|
[ND_CMD_ARS_START] = {
|
2016-02-18 00:08:58 +03:00
|
|
|
.in_num = 5,
|
|
|
|
.in_sizes = { 8, 8, 2, 1, 5, },
|
|
|
|
.out_num = 2,
|
|
|
|
.out_sizes = { 4, 4, },
|
2015-06-08 21:27:06 +03:00
|
|
|
},
|
|
|
|
[ND_CMD_ARS_STATUS] = {
|
2016-02-20 02:21:14 +03:00
|
|
|
.out_num = 3,
|
|
|
|
.out_sizes = { 4, 4, UINT_MAX, },
|
2015-06-08 21:27:06 +03:00
|
|
|
},
|
2016-03-04 03:08:54 +03:00
|
|
|
[ND_CMD_CLEAR_ERROR] = {
|
|
|
|
.in_num = 2,
|
|
|
|
.in_sizes = { 8, 8, },
|
|
|
|
.out_num = 3,
|
|
|
|
.out_sizes = { 4, 4, 8, },
|
|
|
|
},
|
2016-04-29 02:23:43 +03:00
|
|
|
[ND_CMD_CALL] = {
|
|
|
|
.in_num = 2,
|
|
|
|
.in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
|
|
|
|
.out_num = 1,
|
|
|
|
.out_sizes = { UINT_MAX, },
|
|
|
|
},
|
2015-06-08 21:27:06 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd)
|
|
|
|
{
|
|
|
|
if (cmd < ARRAY_SIZE(__nd_cmd_bus_descs))
|
|
|
|
return &__nd_cmd_bus_descs[cmd];
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nd_cmd_bus_desc);
|
|
|
|
|
|
|
|
u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
|
|
|
|
const struct nd_cmd_desc *desc, int idx, void *buf)
|
|
|
|
{
|
|
|
|
if (idx >= desc->in_num)
|
|
|
|
return UINT_MAX;
|
|
|
|
|
|
|
|
if (desc->in_sizes[idx] < UINT_MAX)
|
|
|
|
return desc->in_sizes[idx];
|
|
|
|
|
|
|
|
if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA && idx == 2) {
|
|
|
|
struct nd_cmd_set_config_hdr *hdr = buf;
|
|
|
|
|
|
|
|
return hdr->in_length;
|
|
|
|
} else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) {
|
|
|
|
struct nd_cmd_vendor_hdr *hdr = buf;
|
|
|
|
|
|
|
|
return hdr->in_length;
|
2016-04-29 02:23:43 +03:00
|
|
|
} else if (cmd == ND_CMD_CALL) {
|
|
|
|
struct nd_cmd_pkg *pkg = buf;
|
|
|
|
|
|
|
|
return pkg->nd_size_in;
|
2015-06-08 21:27:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return UINT_MAX;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nd_cmd_in_size);
|
|
|
|
|
|
|
|
u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
|
|
|
|
const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
|
2016-12-06 20:10:12 +03:00
|
|
|
const u32 *out_field, unsigned long remainder)
|
2015-06-08 21:27:06 +03:00
|
|
|
{
|
|
|
|
if (idx >= desc->out_num)
|
|
|
|
return UINT_MAX;
|
|
|
|
|
|
|
|
if (desc->out_sizes[idx] < UINT_MAX)
|
|
|
|
return desc->out_sizes[idx];
|
|
|
|
|
|
|
|
if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && idx == 1)
|
|
|
|
return in_field[1];
|
|
|
|
else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
|
|
|
|
return out_field[1];
|
2016-12-06 20:10:12 +03:00
|
|
|
else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2) {
|
|
|
|
/*
|
|
|
|
* Per table 9-276 ARS Data in ACPI 6.1, out_field[1] is
|
|
|
|
* "Size of Output Buffer in bytes, including this
|
|
|
|
* field."
|
|
|
|
*/
|
|
|
|
if (out_field[1] < 4)
|
|
|
|
return 0;
|
|
|
|
/*
|
|
|
|
* ACPI 6.1 is ambiguous if 'status' is included in the
|
|
|
|
* output size. If we encounter an output size that
|
|
|
|
* overshoots the remainder by 4 bytes, assume it was
|
|
|
|
* including 'status'.
|
|
|
|
*/
|
libnvdimm: fix ars_status output length calculation
Commit efda1b5d87cb ("acpi, nfit, libnvdimm: fix / harden ars_status output length handling")
Introduced additional hardening for ambiguity in the ACPI spec for
ars_status output sizing. However, it had a couple of cases mixed up.
Where it should have been checking for (and returning) "out_field[1] -
4" it was using "out_field[1] - 8" and vice versa.
This caused a four byte discrepancy in the buffer size passed on to
the command handler, and in some cases, this caused memory corruption
like:
./daxdev-errors.sh: line 76: 24104 Aborted (core dumped) ./daxdev-errors $busdev $region
malloc(): memory corruption
Program received signal SIGABRT, Aborted.
[...]
#5 0x00007ffff7865a2e in calloc () from /lib64/libc.so.6
#6 0x00007ffff7bc2970 in ndctl_bus_cmd_new_ars_status (ars_cap=ars_cap@entry=0x6153b0) at ars.c:136
#7 0x0000000000401644 in check_ars_status (check=0x7fffffffdeb0, bus=0x604c20) at daxdev-errors.c:144
#8 test_daxdev_clear_error (region_name=<optimized out>, bus_name=<optimized out>)
at daxdev-errors.c:332
Cc: <stable@vger.kernel.org>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Lukasz Dorau <lukasz.dorau@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Fixes: efda1b5d87cb ("acpi, nfit, libnvdimm: fix / harden ars_status output length handling")
Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-of-by: Dave Jiang <dave.jiang@intel.com>
2018-08-10 22:23:15 +03:00
|
|
|
if (out_field[1] - 4 == remainder)
|
2016-12-06 20:10:12 +03:00
|
|
|
return remainder;
|
libnvdimm: fix ars_status output length calculation
Commit efda1b5d87cb ("acpi, nfit, libnvdimm: fix / harden ars_status output length handling")
Introduced additional hardening for ambiguity in the ACPI spec for
ars_status output sizing. However, it had a couple of cases mixed up.
Where it should have been checking for (and returning) "out_field[1] -
4" it was using "out_field[1] - 8" and vice versa.
This caused a four byte discrepancy in the buffer size passed on to
the command handler, and in some cases, this caused memory corruption
like:
./daxdev-errors.sh: line 76: 24104 Aborted (core dumped) ./daxdev-errors $busdev $region
malloc(): memory corruption
Program received signal SIGABRT, Aborted.
[...]
#5 0x00007ffff7865a2e in calloc () from /lib64/libc.so.6
#6 0x00007ffff7bc2970 in ndctl_bus_cmd_new_ars_status (ars_cap=ars_cap@entry=0x6153b0) at ars.c:136
#7 0x0000000000401644 in check_ars_status (check=0x7fffffffdeb0, bus=0x604c20) at daxdev-errors.c:144
#8 test_daxdev_clear_error (region_name=<optimized out>, bus_name=<optimized out>)
at daxdev-errors.c:332
Cc: <stable@vger.kernel.org>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Lukasz Dorau <lukasz.dorau@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Fixes: efda1b5d87cb ("acpi, nfit, libnvdimm: fix / harden ars_status output length handling")
Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-of-by: Dave Jiang <dave.jiang@intel.com>
2018-08-10 22:23:15 +03:00
|
|
|
return out_field[1] - 8;
|
2016-12-06 20:10:12 +03:00
|
|
|
} else if (cmd == ND_CMD_CALL) {
|
2016-04-29 02:23:43 +03:00
|
|
|
struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
|
|
|
|
|
|
|
|
return pkg->nd_size_out;
|
|
|
|
}
|
|
|
|
|
2015-06-08 21:27:06 +03:00
|
|
|
|
|
|
|
return UINT_MAX;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nd_cmd_out_size);
|
|
|
|
|
2015-06-18 00:14:46 +03:00
|
|
|
void wait_nvdimm_bus_probe_idle(struct device *dev)
|
2015-05-01 20:11:27 +03:00
|
|
|
{
|
2015-06-18 00:14:46 +03:00
|
|
|
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
|
|
|
|
|
2015-05-01 20:11:27 +03:00
|
|
|
do {
|
|
|
|
if (nvdimm_bus->probe_active == 0)
|
|
|
|
break;
|
2019-07-18 04:08:21 +03:00
|
|
|
nvdimm_bus_unlock(dev);
|
2019-07-18 04:08:26 +03:00
|
|
|
nd_device_unlock(dev);
|
2019-07-18 04:08:15 +03:00
|
|
|
wait_event(nvdimm_bus->wait,
|
2015-05-01 20:11:27 +03:00
|
|
|
nvdimm_bus->probe_active == 0);
|
2019-07-18 04:08:26 +03:00
|
|
|
nd_device_lock(dev);
|
2019-07-18 04:08:21 +03:00
|
|
|
nvdimm_bus_lock(dev);
|
2015-05-01 20:11:27 +03:00
|
|
|
} while (true);
|
|
|
|
}
|
|
|
|
|
2017-04-08 01:33:31 +03:00
|
|
|
static int nd_pmem_forget_poison_check(struct device *dev, void *data)
|
2016-03-04 03:08:54 +03:00
|
|
|
{
|
2017-04-08 01:33:31 +03:00
|
|
|
struct nd_cmd_clear_error *clear_err =
|
|
|
|
(struct nd_cmd_clear_error *)data;
|
|
|
|
struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
|
|
|
|
struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
|
|
|
|
struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
|
|
|
|
struct nd_namespace_common *ndns = NULL;
|
|
|
|
struct nd_namespace_io *nsio;
|
|
|
|
resource_size_t offset = 0, end_trunc = 0, start, end, pstart, pend;
|
|
|
|
|
|
|
|
if (nd_dax || !dev->driver)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
start = clear_err->address;
|
|
|
|
end = clear_err->address + clear_err->cleared - 1;
|
|
|
|
|
|
|
|
if (nd_btt || nd_pfn || nd_dax) {
|
|
|
|
if (nd_btt)
|
|
|
|
ndns = nd_btt->ndns;
|
|
|
|
else if (nd_pfn)
|
|
|
|
ndns = nd_pfn->ndns;
|
|
|
|
else if (nd_dax)
|
|
|
|
ndns = nd_dax->nd_pfn.ndns;
|
|
|
|
|
|
|
|
if (!ndns)
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
ndns = to_ndns(dev);
|
|
|
|
|
|
|
|
nsio = to_nd_namespace_io(&ndns->dev);
|
|
|
|
pstart = nsio->res.start + offset;
|
|
|
|
pend = nsio->res.end - end_trunc;
|
|
|
|
|
|
|
|
if ((pstart >= start) && (pend <= end))
|
2016-03-04 03:08:54 +03:00
|
|
|
return -EBUSY;
|
2017-04-08 01:33:31 +03:00
|
|
|
|
2016-03-04 03:08:54 +03:00
|
|
|
return 0;
|
2017-04-08 01:33:31 +03:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nd_ns_forget_poison_check(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
return device_for_each_child(dev, data, nd_pmem_forget_poison_check);
|
2016-03-04 03:08:54 +03:00
|
|
|
}
|
|
|
|
|
2015-05-01 20:11:27 +03:00
|
|
|
/* set_config requires an idle interleave set */
|
2016-02-23 08:50:31 +03:00
|
|
|
static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
|
2017-04-08 01:33:31 +03:00
|
|
|
struct nvdimm *nvdimm, unsigned int cmd, void *data)
|
2015-05-01 20:11:27 +03:00
|
|
|
{
|
2016-02-23 08:50:31 +03:00
|
|
|
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
|
|
|
|
|
|
|
|
/* ask the bus provider if it would like to block this request */
|
|
|
|
if (nd_desc->clear_to_send) {
|
acpi/nfit: Add support for Intel DSM 1.8 commands
Add command definition for security commands defined in Intel DSM
specification v1.8 [1]. This includes "get security state", "set
passphrase", "unlock unit", "freeze lock", "secure erase", "overwrite",
"overwrite query", "master passphrase enable/disable", and "master
erase", . Since this adds several Intel definitions, move the relevant
bits to their own header.
These commands mutate physical data, but that manipulation is not cache
coherent. The requirement to flush and invalidate caches makes these
commands unsuitable to be called from userspace, so extra logic is added
to detect and block these commands from being submitted via the ioctl
command submission path.
Lastly, the commands may contain sensitive key material that should not
be dumped in a standard debug session. Update the nvdimm-command
payload-dump facility to move security command payloads behind a
default-off compile time switch.
[1]: http://pmem.io/documents/NVDIMM_DSM_Interface-V1.8.pdf
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-12-04 21:31:11 +03:00
|
|
|
int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd, data);
|
2016-02-23 08:50:31 +03:00
|
|
|
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
2015-05-01 20:11:27 +03:00
|
|
|
|
2016-03-04 03:08:54 +03:00
|
|
|
/* require clear error to go through the pmem driver */
|
|
|
|
if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR)
|
2017-04-08 01:33:31 +03:00
|
|
|
return device_for_each_child(&nvdimm_bus->dev, data,
|
|
|
|
nd_ns_forget_poison_check);
|
2016-03-04 03:08:54 +03:00
|
|
|
|
2015-05-01 20:11:27 +03:00
|
|
|
if (!nvdimm || cmd != ND_CMD_SET_CONFIG_DATA)
|
|
|
|
return 0;
|
|
|
|
|
2016-02-23 08:50:31 +03:00
|
|
|
/* prevent label manipulation while the kernel owns label updates */
|
2015-06-18 00:14:46 +03:00
|
|
|
wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev);
|
2015-05-01 20:11:27 +03:00
|
|
|
if (atomic_read(&nvdimm->busy))
|
|
|
|
return -EBUSY;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-08 21:27:06 +03:00
|
|
|
static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|
|
|
int read_only, unsigned int ioctl_cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
|
|
|
|
const struct nd_cmd_desc *desc = NULL;
|
|
|
|
unsigned int cmd = _IOC_NR(ioctl_cmd);
|
|
|
|
struct device *dev = &nvdimm_bus->dev;
|
2017-09-01 01:41:55 +03:00
|
|
|
void __user *p = (void __user *) arg;
|
2019-07-18 04:08:09 +03:00
|
|
|
char *out_env = NULL, *in_env = NULL;
|
2015-06-08 21:27:06 +03:00
|
|
|
const char *cmd_name, *dimm_name;
|
2017-09-01 01:41:55 +03:00
|
|
|
u32 in_len = 0, out_len = 0;
|
|
|
|
unsigned int func = cmd;
|
2016-04-29 02:17:07 +03:00
|
|
|
unsigned long cmd_mask;
|
2017-09-01 01:41:55 +03:00
|
|
|
struct nd_cmd_pkg pkg;
|
2017-04-08 01:33:31 +03:00
|
|
|
int rc, i, cmd_rc;
|
2019-07-18 04:08:09 +03:00
|
|
|
void *buf = NULL;
|
2017-09-01 01:41:55 +03:00
|
|
|
u64 buf_len = 0;
|
2015-06-08 21:27:06 +03:00
|
|
|
|
|
|
|
if (nvdimm) {
|
|
|
|
desc = nd_cmd_dimm_desc(cmd);
|
|
|
|
cmd_name = nvdimm_cmd_name(cmd);
|
2016-04-29 02:17:07 +03:00
|
|
|
cmd_mask = nvdimm->cmd_mask;
|
2015-06-08 21:27:06 +03:00
|
|
|
dimm_name = dev_name(&nvdimm->dev);
|
|
|
|
} else {
|
|
|
|
desc = nd_cmd_bus_desc(cmd);
|
|
|
|
cmd_name = nvdimm_bus_cmd_name(cmd);
|
2016-04-29 02:17:07 +03:00
|
|
|
cmd_mask = nd_desc->cmd_mask;
|
2015-06-08 21:27:06 +03:00
|
|
|
dimm_name = "bus";
|
|
|
|
}
|
|
|
|
|
2016-04-29 02:23:43 +03:00
|
|
|
if (cmd == ND_CMD_CALL) {
|
|
|
|
if (copy_from_user(&pkg, p, sizeof(pkg)))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
2020-02-25 19:20:56 +03:00
|
|
|
if (!desc ||
|
|
|
|
(desc->out_num + desc->in_num == 0) ||
|
|
|
|
cmd > ND_CMD_CALL ||
|
|
|
|
!test_bit(cmd, &cmd_mask))
|
2015-06-08 21:27:06 +03:00
|
|
|
return -ENOTTY;
|
|
|
|
|
|
|
|
/* fail write commands (when read-only) */
|
|
|
|
if (read_only)
|
2016-01-07 02:03:41 +03:00
|
|
|
switch (cmd) {
|
|
|
|
case ND_CMD_VENDOR:
|
|
|
|
case ND_CMD_SET_CONFIG_DATA:
|
|
|
|
case ND_CMD_ARS_START:
|
2016-03-04 03:08:54 +03:00
|
|
|
case ND_CMD_CLEAR_ERROR:
|
2016-04-29 02:23:43 +03:00
|
|
|
case ND_CMD_CALL:
|
2019-07-18 04:08:21 +03:00
|
|
|
dev_dbg(dev, "'%s' command while read-only.\n",
|
2015-06-08 21:27:06 +03:00
|
|
|
nvdimm ? nvdimm_cmd_name(cmd)
|
|
|
|
: nvdimm_bus_cmd_name(cmd));
|
|
|
|
return -EPERM;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* process an input envelope */
|
2019-07-18 04:08:09 +03:00
|
|
|
in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
|
|
|
|
if (!in_env)
|
|
|
|
return -ENOMEM;
|
2015-06-08 21:27:06 +03:00
|
|
|
for (i = 0; i < desc->in_num; i++) {
|
|
|
|
u32 in_size, copy;
|
|
|
|
|
|
|
|
in_size = nd_cmd_in_size(nvdimm, cmd, desc, i, in_env);
|
|
|
|
if (in_size == UINT_MAX) {
|
|
|
|
dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
|
|
|
|
__func__, dimm_name, cmd_name, i);
|
2019-07-18 04:08:09 +03:00
|
|
|
rc = -ENXIO;
|
|
|
|
goto out;
|
2015-06-08 21:27:06 +03:00
|
|
|
}
|
2019-07-18 04:08:09 +03:00
|
|
|
if (in_len < ND_CMD_MAX_ENVELOPE)
|
|
|
|
copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
|
2015-06-08 21:27:06 +03:00
|
|
|
else
|
|
|
|
copy = 0;
|
2019-07-18 04:08:09 +03:00
|
|
|
if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
|
|
|
|
rc = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
2015-06-08 21:27:06 +03:00
|
|
|
in_len += in_size;
|
|
|
|
}
|
|
|
|
|
2016-04-29 02:23:43 +03:00
|
|
|
if (cmd == ND_CMD_CALL) {
|
2017-07-01 06:41:22 +03:00
|
|
|
func = pkg.nd_command;
|
2018-03-06 03:39:31 +03:00
|
|
|
dev_dbg(dev, "%s, idx: %llu, in: %u, out: %u, len %llu\n",
|
|
|
|
dimm_name, pkg.nd_command,
|
2016-04-29 02:23:43 +03:00
|
|
|
in_len, out_len, buf_len);
|
|
|
|
}
|
|
|
|
|
2015-06-08 21:27:06 +03:00
|
|
|
/* process an output envelope */
|
2019-07-18 04:08:09 +03:00
|
|
|
out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
|
|
|
|
if (!out_env) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-06-08 21:27:06 +03:00
|
|
|
for (i = 0; i < desc->out_num; i++) {
|
|
|
|
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
|
2016-12-06 20:10:12 +03:00
|
|
|
(u32 *) in_env, (u32 *) out_env, 0);
|
2015-06-08 21:27:06 +03:00
|
|
|
u32 copy;
|
|
|
|
|
|
|
|
if (out_size == UINT_MAX) {
|
2018-03-06 03:39:31 +03:00
|
|
|
dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
|
|
|
|
dimm_name, cmd_name, i);
|
2019-07-18 04:08:09 +03:00
|
|
|
rc = -EFAULT;
|
|
|
|
goto out;
|
2015-06-08 21:27:06 +03:00
|
|
|
}
|
2019-07-18 04:08:09 +03:00
|
|
|
if (out_len < ND_CMD_MAX_ENVELOPE)
|
|
|
|
copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
|
2015-06-08 21:27:06 +03:00
|
|
|
else
|
|
|
|
copy = 0;
|
|
|
|
if (copy && copy_from_user(&out_env[out_len],
|
2019-07-18 04:08:09 +03:00
|
|
|
p + in_len + out_len, copy)) {
|
|
|
|
rc = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
2015-06-08 21:27:06 +03:00
|
|
|
out_len += out_size;
|
|
|
|
}
|
|
|
|
|
2017-09-01 01:41:55 +03:00
|
|
|
buf_len = (u64) out_len + (u64) in_len;
|
2015-06-08 21:27:06 +03:00
|
|
|
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
|
2018-03-06 03:39:31 +03:00
|
|
|
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
|
|
|
|
cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
|
2019-07-18 04:08:09 +03:00
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
2015-06-08 21:27:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
buf = vmalloc(buf_len);
|
2019-07-18 04:08:09 +03:00
|
|
|
if (!buf) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2015-06-08 21:27:06 +03:00
|
|
|
|
|
|
|
if (copy_from_user(buf, p, buf_len)) {
|
|
|
|
rc = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-07-18 04:08:26 +03:00
|
|
|
nd_device_lock(dev);
|
2019-07-18 04:08:21 +03:00
|
|
|
nvdimm_bus_lock(dev);
|
2017-07-01 06:41:22 +03:00
|
|
|
rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
|
2015-05-01 20:11:27 +03:00
|
|
|
if (rc)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2017-04-08 01:33:31 +03:00
|
|
|
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc);
|
2015-06-08 21:27:06 +03:00
|
|
|
if (rc < 0)
|
2015-05-01 20:11:27 +03:00
|
|
|
goto out_unlock;
|
2017-04-08 01:33:31 +03:00
|
|
|
|
|
|
|
if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) {
|
|
|
|
struct nd_cmd_clear_error *clear_err = buf;
|
|
|
|
|
2017-04-30 01:24:03 +03:00
|
|
|
nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
|
|
|
|
clear_err->cleared);
|
2017-04-08 01:33:31 +03:00
|
|
|
}
|
2017-04-07 19:47:24 +03:00
|
|
|
|
2015-06-08 21:27:06 +03:00
|
|
|
if (copy_to_user(p, buf, buf_len))
|
|
|
|
rc = -EFAULT;
|
2017-04-07 19:47:24 +03:00
|
|
|
|
2019-07-18 04:08:09 +03:00
|
|
|
out_unlock:
|
2019-07-18 04:08:21 +03:00
|
|
|
nvdimm_bus_unlock(dev);
|
2019-07-18 04:08:26 +03:00
|
|
|
nd_device_unlock(dev);
|
2019-07-18 04:08:09 +03:00
|
|
|
out:
|
|
|
|
kfree(in_env);
|
|
|
|
kfree(out_env);
|
2015-06-08 21:27:06 +03:00
|
|
|
vfree(buf);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-07-18 04:08:15 +03:00
|
|
|
enum nd_ioctl_mode {
|
|
|
|
BUS_IOCTL,
|
|
|
|
DIMM_IOCTL,
|
|
|
|
};
|
2015-06-08 21:27:06 +03:00
|
|
|
|
|
|
|
static int match_dimm(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
long id = (long) data;
|
|
|
|
|
|
|
|
if (is_nvdimm(dev)) {
|
|
|
|
struct nvdimm *nvdimm = to_nvdimm(dev);
|
|
|
|
|
|
|
|
return nvdimm->id == id;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-18 04:08:15 +03:00
|
|
|
static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
|
|
|
|
enum nd_ioctl_mode mode)
|
|
|
|
|
2015-06-08 21:27:06 +03:00
|
|
|
{
|
2019-07-18 04:08:15 +03:00
|
|
|
struct nvdimm_bus *nvdimm_bus, *found = NULL;
|
|
|
|
long id = (long) file->private_data;
|
|
|
|
struct nvdimm *nvdimm = NULL;
|
|
|
|
int rc, ro;
|
2015-06-08 21:27:06 +03:00
|
|
|
|
2016-01-07 02:03:39 +03:00
|
|
|
ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
|
2015-06-08 21:27:06 +03:00
|
|
|
mutex_lock(&nvdimm_bus_list_mutex);
|
|
|
|
list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
|
2019-07-18 04:08:15 +03:00
|
|
|
if (mode == DIMM_IOCTL) {
|
|
|
|
struct device *dev;
|
|
|
|
|
|
|
|
dev = device_find_child(&nvdimm_bus->dev,
|
|
|
|
file->private_data, match_dimm);
|
|
|
|
if (!dev)
|
|
|
|
continue;
|
|
|
|
nvdimm = to_nvdimm(dev);
|
|
|
|
found = nvdimm_bus;
|
|
|
|
} else if (nvdimm_bus->id == id) {
|
|
|
|
found = nvdimm_bus;
|
|
|
|
}
|
2015-06-08 21:27:06 +03:00
|
|
|
|
2019-07-18 04:08:15 +03:00
|
|
|
if (found) {
|
|
|
|
atomic_inc(&nvdimm_bus->ioctl_active);
|
|
|
|
break;
|
|
|
|
}
|
2015-06-08 21:27:06 +03:00
|
|
|
}
|
|
|
|
mutex_unlock(&nvdimm_bus_list_mutex);
|
|
|
|
|
2019-07-18 04:08:15 +03:00
|
|
|
if (!found)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
nvdimm_bus = found;
|
|
|
|
rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
|
|
|
|
|
|
|
|
if (nvdimm)
|
|
|
|
put_device(&nvdimm->dev);
|
|
|
|
if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
|
|
|
|
wake_up(&nvdimm_bus->wait);
|
|
|
|
|
2015-06-08 21:27:06 +03:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-07-18 04:08:15 +03:00
|
|
|
static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
return nd_ioctl(file, cmd, arg, BUS_IOCTL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
return nd_ioctl(file, cmd, arg, DIMM_IOCTL);
|
|
|
|
}
|
|
|
|
|
2015-06-08 21:27:06 +03:00
|
|
|
static int nd_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
long minor = iminor(inode);
|
|
|
|
|
|
|
|
file->private_data = (void *) minor;
|
|
|
|
return 0;
|
2015-04-27 02:26:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations nvdimm_bus_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
2015-06-08 21:27:06 +03:00
|
|
|
.open = nd_open,
|
2019-07-18 04:08:15 +03:00
|
|
|
.unlocked_ioctl = bus_ioctl,
|
2018-09-11 22:59:08 +03:00
|
|
|
.compat_ioctl = compat_ptr_ioctl,
|
2015-04-27 02:26:48 +03:00
|
|
|
.llseek = noop_llseek,
|
|
|
|
};
|
|
|
|
|
2015-06-08 21:27:06 +03:00
|
|
|
static const struct file_operations nvdimm_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = nd_open,
|
2019-07-18 04:08:15 +03:00
|
|
|
.unlocked_ioctl = dimm_ioctl,
|
2018-09-11 22:59:08 +03:00
|
|
|
.compat_ioctl = compat_ptr_ioctl,
|
2015-06-08 21:27:06 +03:00
|
|
|
.llseek = noop_llseek,
|
|
|
|
};
|
|
|
|
|
2015-04-27 02:26:48 +03:00
|
|
|
int __init nvdimm_bus_init(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2015-04-25 10:56:17 +03:00
|
|
|
rc = bus_register(&nvdimm_bus_type);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2015-04-27 02:26:48 +03:00
|
|
|
rc = register_chrdev(0, "ndctl", &nvdimm_bus_fops);
|
|
|
|
if (rc < 0)
|
2015-06-08 21:27:06 +03:00
|
|
|
goto err_bus_chrdev;
|
2015-04-27 02:26:48 +03:00
|
|
|
nvdimm_bus_major = rc;
|
|
|
|
|
2015-06-08 21:27:06 +03:00
|
|
|
rc = register_chrdev(0, "dimmctl", &nvdimm_fops);
|
|
|
|
if (rc < 0)
|
|
|
|
goto err_dimm_chrdev;
|
|
|
|
nvdimm_major = rc;
|
|
|
|
|
2015-04-27 02:26:48 +03:00
|
|
|
nd_class = class_create(THIS_MODULE, "nd");
|
2015-06-28 12:00:57 +03:00
|
|
|
if (IS_ERR(nd_class)) {
|
|
|
|
rc = PTR_ERR(nd_class);
|
2015-04-27 02:26:48 +03:00
|
|
|
goto err_class;
|
2015-06-28 12:00:57 +03:00
|
|
|
}
|
2015-04-27 02:26:48 +03:00
|
|
|
|
2016-07-23 09:46:08 +03:00
|
|
|
rc = driver_register(&nd_bus_driver.drv);
|
|
|
|
if (rc)
|
|
|
|
goto err_nd_bus;
|
|
|
|
|
2015-04-27 02:26:48 +03:00
|
|
|
return 0;
|
|
|
|
|
2016-07-23 09:46:08 +03:00
|
|
|
err_nd_bus:
|
|
|
|
class_destroy(nd_class);
|
2015-04-27 02:26:48 +03:00
|
|
|
err_class:
|
2015-06-08 21:27:06 +03:00
|
|
|
unregister_chrdev(nvdimm_major, "dimmctl");
|
|
|
|
err_dimm_chrdev:
|
2015-04-27 02:26:48 +03:00
|
|
|
unregister_chrdev(nvdimm_bus_major, "ndctl");
|
2015-06-08 21:27:06 +03:00
|
|
|
err_bus_chrdev:
|
2015-04-25 10:56:17 +03:00
|
|
|
bus_unregister(&nvdimm_bus_type);
|
2015-04-27 02:26:48 +03:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-05-31 21:41:48 +03:00
|
|
|
void nvdimm_bus_exit(void)
|
2015-04-27 02:26:48 +03:00
|
|
|
{
|
2016-07-23 09:46:08 +03:00
|
|
|
driver_unregister(&nd_bus_driver.drv);
|
2015-04-27 02:26:48 +03:00
|
|
|
class_destroy(nd_class);
|
|
|
|
unregister_chrdev(nvdimm_bus_major, "ndctl");
|
2015-06-08 21:27:06 +03:00
|
|
|
unregister_chrdev(nvdimm_major, "dimmctl");
|
2015-04-25 10:56:17 +03:00
|
|
|
bus_unregister(&nvdimm_bus_type);
|
2016-07-23 09:46:08 +03:00
|
|
|
ida_destroy(&nd_ida);
|
2015-04-27 02:26:48 +03:00
|
|
|
}
|