ACPI: OSL: Implement deferred unmapping of ACPI memory

The ACPI OS layer in Linux uses RCU to protect the walkers of the
list of ACPI memory mappings from seeing an inconsistent state
while it is being updated.  Among other situations, that list can
be walked in (NMI and non-NMI) interrupt context, so using a
sleeping lock to protect it is not an option.

However, performance issues related to the RCU usage in there
appear, as described by Dan Williams:

"Recently a performance problem was reported for a process invoking
a non-trival ASL program. The method call in this case ends up
repetitively triggering a call path like:

    acpi_ex_store
    acpi_ex_store_object_to_node
    acpi_ex_write_data_to_field
    acpi_ex_insert_into_field
    acpi_ex_write_with_update_rule
    acpi_ex_field_datum_io
    acpi_ex_access_region
    acpi_ev_address_space_dispatch
    acpi_ex_system_memory_space_handler
    acpi_os_map_cleanup.part.14
    _synchronize_rcu_expedited.constprop.89
    schedule

The end result of frequent synchronize_rcu_expedited() invocation is
tiny sub-millisecond spurts of execution where the scheduler freely
migrates this apparently sleepy task. The overhead of frequent
scheduler invocation multiplies the execution time by a factor
of 2-3X."

The source of this is that acpi_ex_system_memory_space_handler()
unmaps the memory mapping currently cached by it at the access time
if that mapping doesn't cover the memory area being accessed.
Consequently, if there is a memory opregion with two fields
separated from each other by an unused chunk of address space that
is large enough for not being covered by a single mapping, and they
happen to be used in an alternating pattern, the unmapping will
occur on every acpi_ex_system_memory_space_handler() invocation for
that memory opregion and that will lead to significant overhead.

Moreover, acpi_ex_system_memory_space_handler() carries out the
memory unmapping with the namespace and interpreter mutexes held
which may lead to additional latency, because all of the tasks
wanting to acquire on of these mutexes need to wait for the
memory unmapping operation to complete.

To address that, rework acpi_os_unmap_memory() so that it does not
release the memory mapping covering the given address range right
away and instead make it queue up the mapping at hand for removal
via queue_rcu_work().

Reported-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Xiang Li <xiang.z.li@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
Rafael J. Wysocki 2020-07-02 13:19:12 +02:00
Родитель 9ebcfadb06
Коммит 1757659d02
1 изменённых файлов: 79 добавлений и 37 удалений

Просмотреть файл

@ -77,7 +77,10 @@ struct acpi_ioremap {
void __iomem *virt;
acpi_physical_address phys;
acpi_size size;
unsigned long refcount;
union {
unsigned long refcount;
struct rcu_work rwork;
} track;
};
static LIST_HEAD(acpi_ioremaps);
@ -250,7 +253,7 @@ void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
map = acpi_map_lookup(phys, size);
if (map) {
virt = map->virt + (phys - map->phys);
map->refcount++;
map->track.refcount++;
}
mutex_unlock(&acpi_ioremap_lock);
return virt;
@ -335,7 +338,7 @@ void __iomem __ref
/* Check if there's a suitable mapping already. */
map = acpi_map_lookup(phys, size);
if (map) {
map->refcount++;
map->track.refcount++;
goto out;
}
@ -358,7 +361,7 @@ void __iomem __ref
map->virt = virt;
map->phys = pg_off;
map->size = pg_sz;
map->refcount = 1;
map->track.refcount = 1;
list_add_tail_rcu(&map->list, &acpi_ioremaps);
@ -374,21 +377,66 @@ void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
/* Must be called with mutex_lock(&acpi_ioremap_lock) */
static unsigned long acpi_os_drop_map_ref(struct acpi_ioremap *map)
static void acpi_os_map_remove(struct acpi_ioremap *map)
{
unsigned long refcount = --map->refcount;
acpi_unmap(map->phys, map->virt);
kfree(map);
}
if (!refcount)
list_del_rcu(&map->list);
return refcount;
static void acpi_os_map_cleanup_deferred(struct work_struct *work)
{
acpi_os_map_remove(container_of(to_rcu_work(work), struct acpi_ioremap,
track.rwork));
}
/* Must be called with mutex_lock(&acpi_ioremap_lock) */
static bool acpi_os_drop_map_ref(struct acpi_ioremap *map, bool defer)
{
if (--map->track.refcount)
return true;
list_del_rcu(&map->list);
if (defer) {
INIT_RCU_WORK(&map->track.rwork, acpi_os_map_cleanup_deferred);
queue_rcu_work(system_wq, &map->track.rwork);
}
return defer;
}
static void acpi_os_map_cleanup(struct acpi_ioremap *map)
{
if (!map)
return;
synchronize_rcu_expedited();
acpi_unmap(map->phys, map->virt);
kfree(map);
acpi_os_map_remove(map);
}
static void __ref __acpi_os_unmap_iomem(void __iomem *virt, acpi_size size,
bool defer)
{
struct acpi_ioremap *map;
if (!acpi_permanent_mmap) {
__acpi_unmap_table(virt, size);
return;
}
mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup_virt(virt, size);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
return;
}
if (acpi_os_drop_map_ref(map, defer))
map = NULL;
mutex_unlock(&acpi_ioremap_lock);
acpi_os_map_cleanup(map);
}
/**
@ -407,32 +455,25 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
*/
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
unsigned long refcount;
if (!acpi_permanent_mmap) {
__acpi_unmap_table(virt, size);
return;
}
mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup_virt(virt, size);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
return;
}
refcount = acpi_os_drop_map_ref(map);
mutex_unlock(&acpi_ioremap_lock);
if (!refcount)
acpi_os_map_cleanup(map);
__acpi_os_unmap_iomem(virt, size, false);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
/**
* acpi_os_unmap_memory - Drop a memory mapping reference.
* @virt: Start of the address range to drop a reference to.
* @size: Size of the address range to drop a reference to.
*
* Look up the given virtual address range in the list of existing ACPI memory
* mappings, drop a reference to it and if there are no more active references
* to it, queue it up for later removal.
*
* During early init (when acpi_permanent_mmap has not been set yet) this
* routine behaves like acpi_os_unmap_iomem().
*/
void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
{
return acpi_os_unmap_iomem((void __iomem *)virt, size);
__acpi_os_unmap_iomem((void __iomem *)virt, size, true);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
@ -461,7 +502,6 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
{
u64 addr;
struct acpi_ioremap *map;
unsigned long refcount;
if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
@ -472,16 +512,18 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
return;
mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup(addr, gas->bit_width / 8);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
return;
}
refcount = acpi_os_drop_map_ref(map);
if (acpi_os_drop_map_ref(map, false))
map = NULL;
mutex_unlock(&acpi_ioremap_lock);
if (!refcount)
acpi_os_map_cleanup(map);
acpi_os_map_cleanup(map);
}
EXPORT_SYMBOL(acpi_os_unmap_generic_address);