mm: remove managed_page_count_lock spinlock
Now that totalram_pages and managed_pages are atomic varibles, no need of managed_page_count spinlock. The lock had really a weak consistency guarantee. It hasn't been used for anything but the update but no reader actually cares about all the values being updated to be in sync. Link: http://lkml.kernel.org/r/1542090790-21750-5-git-send-email-arunks@codeaurora.org Signed-off-by: Arun KS <arunks@codeaurora.org> Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: David Hildenbrand <david@redhat.com> Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
ca79b0c211
Коммит
476567e873
|
@ -428,12 +428,6 @@ struct zone {
|
|||
* Write access to present_pages at runtime should be protected by
|
||||
* mem_hotplug_begin/end(). Any reader who can't tolerant drift of
|
||||
* present_pages should get_online_mems() to get a stable value.
|
||||
*
|
||||
* Read access to managed_pages should be safe because it's unsigned
|
||||
* long. Write access to zone->managed_pages and totalram_pages are
|
||||
* protected by managed_page_count_lock at runtime. Idealy only
|
||||
* adjust_managed_page_count() should be used instead of directly
|
||||
* touching zone->managed_pages and totalram_pages.
|
||||
*/
|
||||
atomic_long_t managed_pages;
|
||||
unsigned long spanned_pages;
|
||||
|
|
|
@ -122,9 +122,6 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
|
|||
};
|
||||
EXPORT_SYMBOL(node_states);
|
||||
|
||||
/* Protect totalram_pages and zone->managed_pages */
|
||||
static DEFINE_SPINLOCK(managed_page_count_lock);
|
||||
|
||||
atomic_long_t _totalram_pages __read_mostly;
|
||||
EXPORT_SYMBOL(_totalram_pages);
|
||||
unsigned long totalreserve_pages __read_mostly;
|
||||
|
@ -7077,14 +7074,12 @@ early_param("movablecore", cmdline_parse_movablecore);
|
|||
|
||||
void adjust_managed_page_count(struct page *page, long count)
|
||||
{
|
||||
spin_lock(&managed_page_count_lock);
|
||||
atomic_long_add(count, &page_zone(page)->managed_pages);
|
||||
totalram_pages_add(count);
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (PageHighMem(page))
|
||||
totalhigh_pages_add(count);
|
||||
#endif
|
||||
spin_unlock(&managed_page_count_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(adjust_managed_page_count);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче