mm: pass through vma iterator to __vma_adjust()
Pass the vma iterator through to __vma_adjust() so the state can be updated. Link: https://lkml.kernel.org/r/20230120162650.984577-33-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Родитель
fbcc3104b8
Коммит
9e56044625
|
@ -2831,13 +2831,15 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
|
||||||
|
|
||||||
/* mmap.c */
|
/* mmap.c */
|
||||||
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
|
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
|
||||||
extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
extern int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start,
|
||||||
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
|
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
|
||||||
struct vm_area_struct *expand);
|
struct vm_area_struct *expand);
|
||||||
static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
||||||
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
|
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
|
||||||
{
|
{
|
||||||
return __vma_adjust(vma, start, end, pgoff, insert, NULL);
|
VMA_ITERATOR(vmi, vma->vm_mm, start);
|
||||||
|
|
||||||
|
return __vma_adjust(&vmi, vma, start, end, pgoff, insert, NULL);
|
||||||
}
|
}
|
||||||
extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
|
extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
|
||||||
struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
|
struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
|
||||||
|
|
31
mm/mmap.c
31
mm/mmap.c
|
@ -576,9 +576,9 @@ nomem:
|
||||||
* are necessary. The "insert" vma (if any) is to be inserted
|
* are necessary. The "insert" vma (if any) is to be inserted
|
||||||
* before we drop the necessary locks.
|
* before we drop the necessary locks.
|
||||||
*/
|
*/
|
||||||
int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||||
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
|
unsigned long start, unsigned long end, pgoff_t pgoff,
|
||||||
struct vm_area_struct *expand)
|
struct vm_area_struct *insert, struct vm_area_struct *expand)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
struct vm_area_struct *next_next = NULL; /* uninit var warning */
|
struct vm_area_struct *next_next = NULL; /* uninit var warning */
|
||||||
|
@ -591,7 +591,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
||||||
bool vma_changed = false;
|
bool vma_changed = false;
|
||||||
long adjust_next = 0;
|
long adjust_next = 0;
|
||||||
int remove_next = 0;
|
int remove_next = 0;
|
||||||
VMA_ITERATOR(vmi, mm, 0);
|
|
||||||
struct vm_area_struct *exporter = NULL, *importer = NULL;
|
struct vm_area_struct *exporter = NULL, *importer = NULL;
|
||||||
|
|
||||||
if (next && !insert) {
|
if (next && !insert) {
|
||||||
|
@ -676,7 +675,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vma_iter_prealloc(&vmi))
|
if (vma_iter_prealloc(vmi))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
|
vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
|
||||||
|
@ -722,7 +721,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
||||||
if (start != vma->vm_start) {
|
if (start != vma->vm_start) {
|
||||||
if ((vma->vm_start < start) &&
|
if ((vma->vm_start < start) &&
|
||||||
(!insert || (insert->vm_end != start))) {
|
(!insert || (insert->vm_end != start))) {
|
||||||
vma_iter_clear(&vmi, vma->vm_start, start);
|
vma_iter_clear(vmi, vma->vm_start, start);
|
||||||
VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
|
VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
|
||||||
} else {
|
} else {
|
||||||
vma_changed = true;
|
vma_changed = true;
|
||||||
|
@ -732,8 +731,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
||||||
if (end != vma->vm_end) {
|
if (end != vma->vm_end) {
|
||||||
if (vma->vm_end > end) {
|
if (vma->vm_end > end) {
|
||||||
if (!insert || (insert->vm_start != end)) {
|
if (!insert || (insert->vm_start != end)) {
|
||||||
vma_iter_clear(&vmi, end, vma->vm_end);
|
vma_iter_clear(vmi, end, vma->vm_end);
|
||||||
vma_iter_set(&vmi, vma->vm_end);
|
vma_iter_set(vmi, vma->vm_end);
|
||||||
VM_WARN_ON(insert &&
|
VM_WARN_ON(insert &&
|
||||||
insert->vm_end < vma->vm_end);
|
insert->vm_end < vma->vm_end);
|
||||||
}
|
}
|
||||||
|
@ -744,13 +743,13 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vma_changed)
|
if (vma_changed)
|
||||||
vma_iter_store(&vmi, vma);
|
vma_iter_store(vmi, vma);
|
||||||
|
|
||||||
vma->vm_pgoff = pgoff;
|
vma->vm_pgoff = pgoff;
|
||||||
if (adjust_next) {
|
if (adjust_next) {
|
||||||
next->vm_start += adjust_next;
|
next->vm_start += adjust_next;
|
||||||
next->vm_pgoff += adjust_next >> PAGE_SHIFT;
|
next->vm_pgoff += adjust_next >> PAGE_SHIFT;
|
||||||
vma_iter_store(&vmi, next);
|
vma_iter_store(vmi, next);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (file) {
|
if (file) {
|
||||||
|
@ -770,7 +769,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
||||||
* us to insert it before dropping the locks
|
* us to insert it before dropping the locks
|
||||||
* (it may either follow vma or precede it).
|
* (it may either follow vma or precede it).
|
||||||
*/
|
*/
|
||||||
vma_iter_store(&vmi, insert);
|
vma_iter_store(vmi, insert);
|
||||||
mm->map_count++;
|
mm->map_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -816,7 +815,7 @@ again:
|
||||||
if (insert && file)
|
if (insert && file)
|
||||||
uprobe_mmap(insert);
|
uprobe_mmap(insert);
|
||||||
|
|
||||||
vma_iter_free(&vmi);
|
vma_iter_free(vmi);
|
||||||
validate_mm(mm);
|
validate_mm(mm);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1010,20 +1009,20 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
|
||||||
if (merge_prev && merge_next &&
|
if (merge_prev && merge_next &&
|
||||||
is_mergeable_anon_vma(prev->anon_vma,
|
is_mergeable_anon_vma(prev->anon_vma,
|
||||||
next->anon_vma, NULL)) { /* cases 1, 6 */
|
next->anon_vma, NULL)) { /* cases 1, 6 */
|
||||||
err = __vma_adjust(prev, prev->vm_start,
|
err = __vma_adjust(vmi, prev, prev->vm_start,
|
||||||
next->vm_end, prev->vm_pgoff, NULL,
|
next->vm_end, prev->vm_pgoff, NULL,
|
||||||
prev);
|
prev);
|
||||||
res = prev;
|
res = prev;
|
||||||
} else if (merge_prev) { /* cases 2, 5, 7 */
|
} else if (merge_prev) { /* cases 2, 5, 7 */
|
||||||
err = __vma_adjust(prev, prev->vm_start,
|
err = __vma_adjust(vmi, prev, prev->vm_start,
|
||||||
end, prev->vm_pgoff, NULL, prev);
|
end, prev->vm_pgoff, NULL, prev);
|
||||||
res = prev;
|
res = prev;
|
||||||
} else if (merge_next) {
|
} else if (merge_next) {
|
||||||
if (prev && addr < prev->vm_end) /* case 4 */
|
if (prev && addr < prev->vm_end) /* case 4 */
|
||||||
err = __vma_adjust(prev, prev->vm_start,
|
err = __vma_adjust(vmi, prev, prev->vm_start,
|
||||||
addr, prev->vm_pgoff, NULL, next);
|
addr, prev->vm_pgoff, NULL, next);
|
||||||
else /* cases 3, 8 */
|
else /* cases 3, 8 */
|
||||||
err = __vma_adjust(mid, addr, next->vm_end,
|
err = __vma_adjust(vmi, mid, addr, next->vm_end,
|
||||||
next->vm_pgoff - pglen, NULL, next);
|
next->vm_pgoff - pglen, NULL, next);
|
||||||
res = next;
|
res = next;
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче