mm: close race between do_fault_around() and fault_around_bytes_set()
Things can go wrong if fault_around_bytes will be changed under do_fault_around(): between fault_around_mask() and fault_around_pages(). Let's read fault_around_bytes only once during do_fault_around() and calculate mask based on the reading. Note: fault_around_bytes can only be updated via debug interface. Also I've tried but was not able to trigger a bad behaviour without the patch. So I would not consider this patch as urgent. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
2ab051e11b
Коммит
aecd6f4426
21
mm/memory.c
21
mm/memory.c
|
@ -2768,16 +2768,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
|
|||
|
||||
static unsigned long fault_around_bytes = rounddown_pow_of_two(65536);
|
||||
|
||||
static inline unsigned long fault_around_pages(void)
|
||||
{
|
||||
return fault_around_bytes >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long fault_around_mask(void)
|
||||
{
|
||||
return ~(fault_around_bytes - 1) & PAGE_MASK;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static int fault_around_bytes_get(void *data, u64 *val)
|
||||
{
|
||||
|
@ -2842,12 +2832,15 @@ late_initcall(fault_around_debugfs);
|
|||
static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *pte, pgoff_t pgoff, unsigned int flags)
|
||||
{
|
||||
unsigned long start_addr;
|
||||
unsigned long start_addr, nr_pages, mask;
|
||||
pgoff_t max_pgoff;
|
||||
struct vm_fault vmf;
|
||||
int off;
|
||||
|
||||
start_addr = max(address & fault_around_mask(), vma->vm_start);
|
||||
nr_pages = ACCESS_ONCE(fault_around_bytes) >> PAGE_SHIFT;
|
||||
mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
|
||||
|
||||
start_addr = max(address & mask, vma->vm_start);
|
||||
off = ((address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
||||
pte -= off;
|
||||
pgoff -= off;
|
||||
|
@ -2859,7 +2852,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
|
|||
max_pgoff = pgoff - ((start_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
|
||||
PTRS_PER_PTE - 1;
|
||||
max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1,
|
||||
pgoff + fault_around_pages() - 1);
|
||||
pgoff + nr_pages - 1);
|
||||
|
||||
/* Check if it makes any sense to call ->map_pages */
|
||||
while (!pte_none(*pte)) {
|
||||
|
@ -2894,7 +2887,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
* something).
|
||||
*/
|
||||
if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
|
||||
fault_around_pages() > 1) {
|
||||
fault_around_bytes >> PAGE_SHIFT > 1) {
|
||||
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
|
||||
do_fault_around(vma, address, pte, pgoff, flags);
|
||||
if (!pte_same(*pte, orig_pte))
|
||||
|
|
Загрузка…
Ссылка в новой задаче