- Fix a hang condition that started triggering after the Xarray

conversion of fsdax in the v4.20 kernel.
 
 - Add a 'resource' (root-only physical base address) sysfs attribute to
   device-dax instances to correlate memory-blocks onlined via the kmem
   driver with a given device instance.
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJdMIETAAoJEB7SkWpmfYgCei0P/A6BpAftQF8bWOX8drjrBj6J
 WSrrhmfNPQ0+D+UejfrPUGVg7JysmFpSvfaRkp41nSpKaX6wr6M2uQrHNQl5hIYK
 gi5PStYMQay4lM78TrLsFFdDqYX5M6VZhpO3Xgd82bPT2GMXhwckua4ad4WYoN8Y
 2ufNajZt/WxBL45VqL1FFqpPK+TKTbVihBR/3W36+NOSJnsj/IH5OlrHswsyq73v
 J1YkQY0IvhGR6nZdsNZZV9Faux4jsIVPFW/mh1k1QVLP1r70aJlxcCyka6lRVd4R
 ktYFOwtX/B39T72RPQB59Z4LOf/VC9pNaiK7hhWuGQ6XepMo5/0fkhYRslhQobll
 7XOYUC01J0jreMu5pvWrZKfaoF9HQwZ1q0NrwNeagZeOgrpoNLqE8WAXUj+c5hsv
 x7nPY4XNmRdw2/kkyPotyuRiGkbOOxNEdK0Avhl0id78RFiv4iwMzGdTRT+E9TMb
 SLF0KPskqKFcyjECD/zwhR2vEbm54harVqMI4pJU0745bjx/ZEfq+AYZN1Epza3N
 O2XYV+uWHi6NXALm195ccGuj2uWtfLHan9OFKyhJgfDbDfIngkr7dgtgCEAKqK9q
 zQrLemqeN3Xw2bRldsbg/6mnwhxqyLdoXbJ9JwD1JO1xvsVdElS0/z8dh4KdLv/8
 OQ2GIzgB1CyofYuyzpOI
 =UB+H
 -----END PGP SIGNATURE-----

Merge tag 'dax-for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull dax updates from Dan Williams:
 "The fruits of a bug hunt in the fsdax implementation with Willy and a
  small feature update for device-dax:

   - Fix a hang condition that started triggering after the Xarray
     conversion of fsdax in the v4.20 kernel.

   - Add a 'resource' (root-only physical base address) sysfs attribute
     to device-dax instances to correlate memory-blocks onlined via the
     kmem driver with a given device instance"

* tag 'dax-for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  dax: Fix missed wakeup with PMD faults
  device-dax: Add a 'resource' attribute
This commit is contained in:
Linus Torvalds 2019-07-18 10:58:52 -07:00
Родитель f8c3500cd1 23c84eb783
Коммит 0fe49f70a0
2 изменённых файлов: 52 добавлений и 20 удалений

Просмотреть файл

@ -295,6 +295,22 @@ static ssize_t target_node_show(struct device *dev,
}
static DEVICE_ATTR_RO(target_node);
static unsigned long long dev_dax_resource(struct dev_dax *dev_dax)
{
struct dax_region *dax_region = dev_dax->region;
return dax_region->res.start;
}
static ssize_t resource_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
return sprintf(buf, "%#llx\n", dev_dax_resource(dev_dax));
}
static DEVICE_ATTR_RO(resource);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@ -313,6 +329,8 @@ static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
return 0;
if (a == &dev_attr_resource.attr)
return 0400;
return a->mode;
}
@ -320,6 +338,7 @@ static struct attribute *dev_dax_attributes[] = {
&dev_attr_modalias.attr,
&dev_attr_size.attr,
&dev_attr_target_node.attr,
&dev_attr_resource.attr,
NULL,
};

Просмотреть файл

@ -123,6 +123,15 @@ static int dax_is_empty_entry(void *entry)
return xa_to_value(entry) & DAX_EMPTY;
}
/*
* true if the entry that was found is of a smaller order than the entry
* we were looking for
*/
static bool dax_is_conflict(void *entry)
{
return entry == XA_RETRY_ENTRY;
}
/*
* DAX page cache entry locking
*/
@ -195,11 +204,13 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
* Look up entry in page cache, wait for it to become unlocked if it
* is a DAX entry and return it. The caller must subsequently call
* put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
* if it did.
* if it did. The entry returned may have a larger order than @order.
* If @order is larger than the order of the entry found in i_pages, this
* function returns a dax_is_conflict entry.
*
* Must be called with the i_pages lock held.
*/
static void *get_unlocked_entry(struct xa_state *xas)
static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
{
void *entry;
struct wait_exceptional_entry_queue ewait;
@ -210,6 +221,8 @@ static void *get_unlocked_entry(struct xa_state *xas)
for (;;) {
entry = xas_find_conflict(xas);
if (dax_entry_order(entry) < order)
return XA_RETRY_ENTRY;
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
!dax_is_locked(entry))
return entry;
@ -254,7 +267,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
static void put_unlocked_entry(struct xa_state *xas, void *entry)
{
/* If we were the only waiter woken, wake the next one */
if (entry)
if (entry && dax_is_conflict(entry))
dax_wake_entry(xas, entry, false);
}
@ -461,7 +474,7 @@ void dax_unlock_page(struct page *page, dax_entry_t cookie)
* overlap with xarray value entries.
*/
static void *grab_mapping_entry(struct xa_state *xas,
struct address_space *mapping, unsigned long size_flag)
struct address_space *mapping, unsigned int order)
{
unsigned long index = xas->xa_index;
bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
@ -469,20 +482,17 @@ static void *grab_mapping_entry(struct xa_state *xas,
retry:
xas_lock_irq(xas);
entry = get_unlocked_entry(xas);
entry = get_unlocked_entry(xas, order);
if (entry) {
if (dax_is_conflict(entry))
goto fallback;
if (!xa_is_value(entry)) {
xas_set_err(xas, EIO);
goto out_unlock;
}
if (size_flag & DAX_PMD) {
if (dax_is_pte_entry(entry)) {
put_unlocked_entry(xas, entry);
goto fallback;
}
} else { /* trying to grab a PTE entry */
if (order == 0) {
if (dax_is_pmd_entry(entry) &&
(dax_is_zero_entry(entry) ||
dax_is_empty_entry(entry))) {
@ -523,7 +533,11 @@ retry:
if (entry) {
dax_lock_entry(xas, entry);
} else {
entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY);
unsigned long flags = DAX_EMPTY;
if (order > 0)
flags |= DAX_PMD;
entry = dax_make_entry(pfn_to_pfn_t(0), flags);
dax_lock_entry(xas, entry);
if (xas_error(xas))
goto out_unlock;
@ -594,7 +608,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
if (WARN_ON_ONCE(!xa_is_value(entry)))
continue;
if (unlikely(dax_is_locked(entry)))
entry = get_unlocked_entry(&xas);
entry = get_unlocked_entry(&xas, 0);
if (entry)
page = dax_busy_page(entry);
put_unlocked_entry(&xas, entry);
@ -621,7 +635,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
void *entry;
xas_lock_irq(&xas);
entry = get_unlocked_entry(&xas);
entry = get_unlocked_entry(&xas, 0);
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
goto out;
if (!trunc &&
@ -848,7 +862,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
if (unlikely(dax_is_locked(entry))) {
void *old_entry = entry;
entry = get_unlocked_entry(xas);
entry = get_unlocked_entry(xas, 0);
/* Entry got punched out / reallocated? */
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
@ -1509,7 +1523,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* entry is already in the array, for instance), it will return
* VM_FAULT_FALLBACK.
*/
entry = grab_mapping_entry(&xas, mapping, DAX_PMD);
entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
if (xa_is_internal(entry)) {
result = xa_to_internal(entry);
goto fallback;
@ -1658,11 +1672,10 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
vm_fault_t ret;
xas_lock_irq(&xas);
entry = get_unlocked_entry(&xas);
entry = get_unlocked_entry(&xas, order);
/* Did we race with someone splitting entry or so? */
if (!entry ||
(order == 0 && !dax_is_pte_entry(entry)) ||
(order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
if (!entry || dax_is_conflict(entry) ||
(order == 0 && !dax_is_pte_entry(entry))) {
put_unlocked_entry(&xas, entry);
xas_unlock_irq(&xas);
trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,