mm: move anon_vma ref out from under CONFIG_foo
We need the anon_vma refcount unconditionally to simplify the anon_vma lifetime rules. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Hugh Dickins <hughd@google.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
9e60109f12
Коммит
83813267c6
|
@ -27,18 +27,15 @@
|
|||
struct anon_vma {
|
||||
struct anon_vma *root; /* Root of this anon_vma tree */
|
||||
spinlock_t lock; /* Serialize access to vma list */
|
||||
#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
|
||||
|
||||
/*
|
||||
* The external_refcount is taken by either KSM or page migration
|
||||
* to take a reference to an anon_vma when there is no
|
||||
* The refcount is taken on an anon_vma when there is no
|
||||
* guarantee that the vma of page tables will exist for
|
||||
* the duration of the operation. A caller that takes
|
||||
* the reference is responsible for clearing up the
|
||||
* anon_vma if they are the last user on release
|
||||
*/
|
||||
atomic_t external_refcount;
|
||||
#endif
|
||||
atomic_t refcount;
|
||||
|
||||
/*
|
||||
* NOTE: the LSB of the head.next is set by
|
||||
* mm_take_all_locks() _after_ taking the above lock. So the
|
||||
|
@ -71,41 +68,12 @@ struct anon_vma_chain {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
|
||||
static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
|
||||
{
|
||||
atomic_set(&anon_vma->external_refcount, 0);
|
||||
}
|
||||
|
||||
static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
|
||||
{
|
||||
return atomic_read(&anon_vma->external_refcount);
|
||||
}
|
||||
|
||||
static inline void get_anon_vma(struct anon_vma *anon_vma)
|
||||
{
|
||||
atomic_inc(&anon_vma->external_refcount);
|
||||
atomic_inc(&anon_vma->refcount);
|
||||
}
|
||||
|
||||
void put_anon_vma(struct anon_vma *);
|
||||
#else
|
||||
static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void get_anon_vma(struct anon_vma *anon_vma)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void put_anon_vma(struct anon_vma *anon_vma)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_KSM */
|
||||
|
||||
static inline struct anon_vma *page_anon_vma(struct page *page)
|
||||
{
|
||||
|
|
14
mm/rmap.c
14
mm/rmap.c
|
@ -272,7 +272,7 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
|
|||
list_del(&anon_vma_chain->same_anon_vma);
|
||||
|
||||
/* We must garbage collect the anon_vma if it's empty */
|
||||
empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma);
|
||||
empty = list_empty(&anon_vma->head) && !atomic_read(&anon_vma->refcount);
|
||||
anon_vma_unlock(anon_vma);
|
||||
|
||||
if (empty) {
|
||||
|
@ -303,7 +303,7 @@ static void anon_vma_ctor(void *data)
|
|||
struct anon_vma *anon_vma = data;
|
||||
|
||||
spin_lock_init(&anon_vma->lock);
|
||||
anonvma_external_refcount_init(anon_vma);
|
||||
atomic_set(&anon_vma->refcount, 0);
|
||||
INIT_LIST_HEAD(&anon_vma->head);
|
||||
}
|
||||
|
||||
|
@ -1486,7 +1486,6 @@ int try_to_munlock(struct page *page)
|
|||
return try_to_unmap_file(page, TTU_MUNLOCK);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
|
||||
/*
|
||||
* Drop an anon_vma refcount, freeing the anon_vma and anon_vma->root
|
||||
* if necessary. Be careful to do all the tests under the lock. Once
|
||||
|
@ -1495,8 +1494,8 @@ int try_to_munlock(struct page *page)
|
|||
*/
|
||||
void put_anon_vma(struct anon_vma *anon_vma)
|
||||
{
|
||||
BUG_ON(atomic_read(&anon_vma->external_refcount) <= 0);
|
||||
if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) {
|
||||
BUG_ON(atomic_read(&anon_vma->refcount) <= 0);
|
||||
if (atomic_dec_and_lock(&anon_vma->refcount, &anon_vma->root->lock)) {
|
||||
struct anon_vma *root = anon_vma->root;
|
||||
int empty = list_empty(&anon_vma->head);
|
||||
int last_root_user = 0;
|
||||
|
@ -1507,8 +1506,8 @@ void put_anon_vma(struct anon_vma *anon_vma)
|
|||
* the refcount on the root and check if we need to free it.
|
||||
*/
|
||||
if (empty && anon_vma != root) {
|
||||
BUG_ON(atomic_read(&root->external_refcount) <= 0);
|
||||
last_root_user = atomic_dec_and_test(&root->external_refcount);
|
||||
BUG_ON(atomic_read(&root->refcount) <= 0);
|
||||
last_root_user = atomic_dec_and_test(&root->refcount);
|
||||
root_empty = list_empty(&root->head);
|
||||
}
|
||||
anon_vma_unlock(anon_vma);
|
||||
|
@ -1520,7 +1519,6 @@ void put_anon_vma(struct anon_vma *anon_vma)
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MIGRATION
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче