[PATCH] mm: use __GFP_NOMEMALLOC
Use the new __GFP_NOMEMALLOC to simplify the previous handling of PF_MEMALLOC. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
20a77776c2
Коммит
bd53b714d3
|
@ -331,25 +331,19 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
|
|||
struct bio *bio;
|
||||
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
int gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
|
||||
unsigned long flags = current->flags;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* Tell VM to act less aggressively and fail earlier.
|
||||
* This is not necessary but increases throughput.
|
||||
* Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and
|
||||
* to fail earlier. This is not necessary but increases throughput.
|
||||
* FIXME: Is this really intelligent?
|
||||
*/
|
||||
current->flags &= ~PF_MEMALLOC;
|
||||
|
||||
if (base_bio)
|
||||
bio = bio_clone(base_bio, GFP_NOIO);
|
||||
bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC);
|
||||
else
|
||||
bio = bio_alloc(GFP_NOIO, nr_iovecs);
|
||||
if (!bio) {
|
||||
if (flags & PF_MEMALLOC)
|
||||
current->flags |= PF_MEMALLOC;
|
||||
bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs);
|
||||
if (!bio)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* if the last bio was not complete, continue where that one ended */
|
||||
bio->bi_idx = *bio_vec_idx;
|
||||
|
@ -386,9 +380,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
|
|||
size -= bv->bv_len;
|
||||
}
|
||||
|
||||
if (flags & PF_MEMALLOC)
|
||||
current->flags |= PF_MEMALLOC;
|
||||
|
||||
if (!bio->bi_size) {
|
||||
bio_put(bio);
|
||||
return NULL;
|
||||
|
|
|
@ -143,7 +143,6 @@ void __delete_from_swap_cache(struct page *page)
|
|||
int add_to_swap(struct page * page)
|
||||
{
|
||||
swp_entry_t entry;
|
||||
int pf_flags;
|
||||
int err;
|
||||
|
||||
if (!PageLocked(page))
|
||||
|
@ -154,29 +153,19 @@ int add_to_swap(struct page * page)
|
|||
if (!entry.val)
|
||||
return 0;
|
||||
|
||||
/* Radix-tree node allocations are performing
|
||||
* GFP_ATOMIC allocations under PF_MEMALLOC.
|
||||
* They can completely exhaust the page allocator.
|
||||
/*
|
||||
* Radix-tree node allocations from PF_MEMALLOC contexts could
|
||||
* completely exhaust the page allocator. __GFP_NOMEMALLOC
|
||||
* stops emergency reserves from being allocated.
|
||||
*
|
||||
* So PF_MEMALLOC is dropped here. This causes the slab
|
||||
* allocations to fail earlier, so radix-tree nodes will
|
||||
* then be allocated from the mempool reserves.
|
||||
*
|
||||
* We're still using __GFP_HIGH for radix-tree node
|
||||
* allocations, so some of the emergency pools are available,
|
||||
* just not all of them.
|
||||
* TODO: this could cause a theoretical memory reclaim
|
||||
* deadlock in the swap out path.
|
||||
*/
|
||||
|
||||
pf_flags = current->flags;
|
||||
current->flags &= ~PF_MEMALLOC;
|
||||
|
||||
/*
|
||||
* Add it to the swap cache and mark it dirty
|
||||
*/
|
||||
err = __add_to_swap_cache(page, entry, GFP_ATOMIC|__GFP_NOWARN);
|
||||
|
||||
if (pf_flags & PF_MEMALLOC)
|
||||
current->flags |= PF_MEMALLOC;
|
||||
err = __add_to_swap_cache(page, entry,
|
||||
GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN);
|
||||
|
||||
switch (err) {
|
||||
case 0: /* Success */
|
||||
|
|
Загрузка…
Ссылка в новой задаче