mm/mmap: add inline munmap_vma_range() for code readability
There are two locations that have a block of code for munmapping a vma range. Change those two locations to use a function and add meaningful comments about what happens to the arguments, which was unclear in the previous code. Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20200818154707.2515169-2-Liam.Howlett@Oracle.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
3903b55a61
Коммит
fb8090b699
48
mm/mmap.c
48
mm/mmap.c
|
@ -575,6 +575,33 @@ static inline struct vm_area_struct *vma_next(struct mm_struct *mm,
|
|||
|
||||
return vma->vm_next;
|
||||
}
|
||||
|
||||
/*
|
||||
* munmap_vma_range() - munmap VMAs that overlap a range.
|
||||
* @mm: The mm struct
|
||||
* @start: The start of the range.
|
||||
* @len: The length of the range.
|
||||
* @pprev: pointer to the pointer that will be set to previous vm_area_struct
|
||||
* @rb_link: the rb_node
|
||||
* @rb_parent: the parent rb_node
|
||||
*
|
||||
* Find all the vm_area_struct that overlap from @start to
|
||||
* @end and munmap them. Set @pprev to the previous vm_area_struct.
|
||||
*
|
||||
* Returns: -ENOMEM on munmap failure or 0 on success.
|
||||
*/
|
||||
static inline int
|
||||
munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len,
|
||||
struct vm_area_struct **pprev, struct rb_node ***link,
|
||||
struct rb_node **parent, struct list_head *uf)
|
||||
{
|
||||
|
||||
while (find_vma_links(mm, start, start + len, pprev, link, parent))
|
||||
if (do_munmap(mm, start, len, uf))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
static unsigned long count_vma_pages_range(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long end)
|
||||
{
|
||||
|
@ -1721,13 +1748,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Clear old maps */
|
||||
while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
|
||||
&rb_parent)) {
|
||||
if (do_munmap(mm, addr, len, uf))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Clear old maps, set up prev, rb_link, rb_parent, and uf */
|
||||
if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
|
||||
return -ENOMEM;
|
||||
/*
|
||||
* Private writable mapping: check memory availability
|
||||
*/
|
||||
|
@ -3063,14 +3086,9 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Clear old maps. this also does some error checking for us
|
||||
*/
|
||||
while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
|
||||
&rb_parent)) {
|
||||
if (do_munmap(mm, addr, len, uf))
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Clear old maps, set up prev, rb_link, rb_parent, and uf */
|
||||
if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Check against address space limits *after* clearing old maps... */
|
||||
if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
|
||||
|
|
Загрузка…
Ссылка в новой задаче