An initial vmalloc failure should start off a synchronous flush of lazy
areas, in case someone is in progress flushing them already, which could
cause us to return an allocation failure even if there is plenty of KVA
free.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Nick Piggin 2008-11-19 15:36:33 -08:00 коммит произвёл Linus Torvalds
Родитель f011c2dae6
Коммит 496850e5f5
1 изменённых файлов: 13 добавлений и 2 удалений

Просмотреть файл

@ -521,6 +521,17 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
spin_unlock(&purge_lock); spin_unlock(&purge_lock);
} }
/*
* Kick off a purge of the outstanding lazy areas. Don't bother if somebody
* is already purging.
*/
static void try_purge_vmap_area_lazy(void)
{
unsigned long start = ULONG_MAX, end = 0;
__purge_vmap_area_lazy(&start, &end, 0, 0);
}
/* /*
* Kick off a purge of the outstanding lazy areas. * Kick off a purge of the outstanding lazy areas.
*/ */
@ -528,7 +539,7 @@ static void purge_vmap_area_lazy(void)
{ {
unsigned long start = ULONG_MAX, end = 0; unsigned long start = ULONG_MAX, end = 0;
__purge_vmap_area_lazy(&start, &end, 0, 0); __purge_vmap_area_lazy(&start, &end, 1, 0);
} }
/* /*
@ -539,7 +550,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
va->flags |= VM_LAZY_FREE; va->flags |= VM_LAZY_FREE;
atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
purge_vmap_area_lazy(); try_purge_vmap_area_lazy();
} }
static struct vmap_area *find_vmap_area(unsigned long addr) static struct vmap_area *find_vmap_area(unsigned long addr)