Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "13 patches. Subsystems affected by this patch series: mips, mm (kfence, debug, pagealloc, memory-hotplug, hugetlb, kasan, and hugetlb), init, proc, lib, ocfs2, and mailmap" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mailmap: use private address for Michel Lespinasse ocfs2: fix data corruption by fallocate lib: crc64: fix kernel-doc warning mm, hugetlb: fix simple resv_huge_pages underflow on UFFDIO_COPY mm/kasan/init.c: fix doc warning proc: add .gitignore for proc-subset-pid selftest hugetlb: pass head page to remove_hugetlb_page() drivers/base/memory: fix trying offlining memory blocks with memory holes on aarch64 mm/page_alloc: fix counting of free pages after take off from buddy mm/debug_vm_pgtable: fix alignment for pmd/pud_advanced_tests() pid: take a reference when initializing `cad_pid` kfence: use TASK_IDLE when awaiting allocation Revert "MIPS: make userspace mapping young by default"
This commit is contained in:
Коммит
e5220dd167
3
.mailmap
3
.mailmap
|
@ -243,6 +243,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
|
|||
Mayuresh Janorkar <mayur@ti.com>
|
||||
Michael Buesch <m@bues.ch>
|
||||
Michel Dänzer <michel@tungstengraphics.com>
|
||||
Michel Lespinasse <michel@lespinasse.org>
|
||||
Michel Lespinasse <michel@lespinasse.org> <walken@google.com>
|
||||
Michel Lespinasse <michel@lespinasse.org> <walken@zoy.org>
|
||||
Miguel Ojeda <ojeda@kernel.org> <miguel.ojeda.sandonis@gmail.com>
|
||||
Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
|
||||
Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
|
||||
|
|
|
@ -158,31 +158,29 @@ unsigned long _page_cachable_default;
|
|||
EXPORT_SYMBOL(_page_cachable_default);
|
||||
|
||||
#define PM(p) __pgprot(_page_cachable_default | (p))
|
||||
#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
|
||||
|
||||
static inline void setup_protection_map(void)
|
||||
{
|
||||
protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
||||
protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||
protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
||||
protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||
protection_map[4] = PVA(_PAGE_PRESENT);
|
||||
protection_map[5] = PVA(_PAGE_PRESENT);
|
||||
protection_map[6] = PVA(_PAGE_PRESENT);
|
||||
protection_map[7] = PVA(_PAGE_PRESENT);
|
||||
protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||
protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
||||
protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||
protection_map[4] = PM(_PAGE_PRESENT);
|
||||
protection_map[5] = PM(_PAGE_PRESENT);
|
||||
protection_map[6] = PM(_PAGE_PRESENT);
|
||||
protection_map[7] = PM(_PAGE_PRESENT);
|
||||
|
||||
protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
||||
protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||
protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
|
||||
protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||
protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
|
||||
_PAGE_NO_READ);
|
||||
protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
|
||||
protection_map[12] = PVA(_PAGE_PRESENT);
|
||||
protection_map[13] = PVA(_PAGE_PRESENT);
|
||||
protection_map[14] = PVA(_PAGE_PRESENT);
|
||||
protection_map[15] = PVA(_PAGE_PRESENT);
|
||||
protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
|
||||
protection_map[12] = PM(_PAGE_PRESENT);
|
||||
protection_map[13] = PM(_PAGE_PRESENT);
|
||||
protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
|
||||
protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
|
||||
}
|
||||
|
||||
#undef _PVA
|
||||
#undef PM
|
||||
|
||||
void cpu_cache_init(void)
|
||||
|
|
|
@ -218,14 +218,14 @@ static int memory_block_offline(struct memory_block *mem)
|
|||
struct zone *zone;
|
||||
int ret;
|
||||
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
|
||||
/*
|
||||
* Unaccount before offlining, such that unpopulated zone and kthreads
|
||||
* can properly be torn down in offline_pages().
|
||||
*/
|
||||
if (nr_vmemmap_pages)
|
||||
if (nr_vmemmap_pages) {
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
adjust_present_page_count(zone, -nr_vmemmap_pages);
|
||||
}
|
||||
|
||||
ret = offline_pages(start_pfn + nr_vmemmap_pages,
|
||||
nr_pages - nr_vmemmap_pages);
|
||||
|
|
|
@ -1855,6 +1855,45 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* zero out partial blocks of one cluster.
|
||||
*
|
||||
* start: file offset where zero starts, will be made upper block aligned.
|
||||
* len: it will be trimmed to the end of current cluster if "start + len"
|
||||
* is bigger than it.
|
||||
*/
|
||||
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
int ret;
|
||||
u64 start_block, end_block, nr_blocks;
|
||||
u64 p_block, offset;
|
||||
u32 cluster, p_cluster, nr_clusters;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
|
||||
|
||||
if (start + len < end)
|
||||
end = start + len;
|
||||
|
||||
start_block = ocfs2_blocks_for_bytes(sb, start);
|
||||
end_block = ocfs2_blocks_for_bytes(sb, end);
|
||||
nr_blocks = end_block - start_block;
|
||||
if (!nr_blocks)
|
||||
return 0;
|
||||
|
||||
cluster = ocfs2_bytes_to_clusters(sb, start);
|
||||
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
|
||||
&nr_clusters, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!p_cluster)
|
||||
return 0;
|
||||
|
||||
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
|
||||
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
|
||||
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Parts of this function taken from xfs_change_file_space()
|
||||
*/
|
||||
|
@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
|||
{
|
||||
int ret;
|
||||
s64 llen;
|
||||
loff_t size;
|
||||
loff_t size, orig_isize;
|
||||
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
||||
struct buffer_head *di_bh = NULL;
|
||||
handle_t *handle;
|
||||
|
@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
|||
goto out_inode_unlock;
|
||||
}
|
||||
|
||||
orig_isize = i_size_read(inode);
|
||||
switch (sr->l_whence) {
|
||||
case 0: /*SEEK_SET*/
|
||||
break;
|
||||
|
@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
|||
sr->l_start += f_pos;
|
||||
break;
|
||||
case 2: /*SEEK_END*/
|
||||
sr->l_start += i_size_read(inode);
|
||||
sr->l_start += orig_isize;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
|||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
/* zeroout eof blocks in the cluster. */
|
||||
if (!ret && change_size && orig_isize < size) {
|
||||
ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
|
||||
size - orig_isize);
|
||||
if (!ret)
|
||||
i_size_write(inode, size);
|
||||
}
|
||||
up_write(&OCFS2_I(inode)->ip_alloc_sem);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
|
@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
|||
goto out_inode_unlock;
|
||||
}
|
||||
|
||||
if (change_size && i_size_read(inode) < size)
|
||||
i_size_write(inode, size);
|
||||
|
||||
inode->i_ctime = inode->i_mtime = current_time(inode);
|
||||
ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
|
|||
* To be differentiate with macro pte_mkyoung, this macro is used on platforms
|
||||
* where software maintains page access bit.
|
||||
*/
|
||||
#ifndef pte_sw_mkyoung
|
||||
static inline pte_t pte_sw_mkyoung(pte_t pte)
|
||||
{
|
||||
return pte;
|
||||
}
|
||||
#define pte_sw_mkyoung pte_sw_mkyoung
|
||||
#endif
|
||||
|
||||
#ifndef pte_savedwrite
|
||||
#define pte_savedwrite pte_write
|
||||
#endif
|
||||
|
|
|
@ -1537,7 +1537,7 @@ static noinline void __init kernel_init_freeable(void)
|
|||
*/
|
||||
set_mems_allowed(node_states[N_MEMORY]);
|
||||
|
||||
cad_pid = task_pid(current);
|
||||
cad_pid = get_pid(task_pid(current));
|
||||
|
||||
smp_prepare_cpus(setup_max_cpus);
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ MODULE_LICENSE("GPL v2");
|
|||
/**
|
||||
* crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
|
||||
* @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
|
||||
or the previous crc64 value if computing incrementally.
|
||||
* or the previous crc64 value if computing incrementally.
|
||||
* @p: pointer to buffer over which CRC64 is run
|
||||
* @len: length of buffer @p
|
||||
*/
|
||||
|
|
|
@ -192,7 +192,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
|
|||
|
||||
pr_debug("Validating PMD advanced\n");
|
||||
/* Align the address wrt HPAGE_PMD_SIZE */
|
||||
vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
|
||||
vaddr &= HPAGE_PMD_MASK;
|
||||
|
||||
pgtable_trans_huge_deposit(mm, pmdp, pgtable);
|
||||
|
||||
|
@ -330,7 +330,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
|
|||
|
||||
pr_debug("Validating PUD advanced\n");
|
||||
/* Align the address wrt HPAGE_PUD_SIZE */
|
||||
vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
|
||||
vaddr &= HPAGE_PUD_MASK;
|
||||
|
||||
set_pud_at(mm, vaddr, pudp, pud);
|
||||
pudp_set_wrprotect(mm, vaddr, pudp);
|
||||
|
|
18
mm/hugetlb.c
18
mm/hugetlb.c
|
@ -1793,7 +1793,7 @@ retry:
|
|||
SetPageHWPoison(page);
|
||||
ClearPageHWPoison(head);
|
||||
}
|
||||
remove_hugetlb_page(h, page, false);
|
||||
remove_hugetlb_page(h, head, false);
|
||||
h->max_huge_pages--;
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
update_and_free_page(h, head);
|
||||
|
@ -4889,10 +4889,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
|||
if (!page)
|
||||
goto out;
|
||||
} else if (!*pagep) {
|
||||
ret = -ENOMEM;
|
||||
page = alloc_huge_page(dst_vma, dst_addr, 0);
|
||||
if (IS_ERR(page))
|
||||
/* If a page already exists, then it's UFFDIO_COPY for
|
||||
* a non-missing case. Return -EEXIST.
|
||||
*/
|
||||
if (vm_shared &&
|
||||
hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
|
||||
ret = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
|
||||
page = alloc_huge_page(dst_vma, dst_addr, 0);
|
||||
if (IS_ERR(page)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = copy_huge_page_from_user(page,
|
||||
(const void __user *) src_addr,
|
||||
|
|
|
@ -220,8 +220,8 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
|
|||
/**
|
||||
* kasan_populate_early_shadow - populate shadow memory region with
|
||||
* kasan_early_shadow_page
|
||||
* @shadow_start - start of the memory range to populate
|
||||
* @shadow_end - end of the memory range to populate
|
||||
* @shadow_start: start of the memory range to populate
|
||||
* @shadow_end: end of the memory range to populate
|
||||
*/
|
||||
int __ref kasan_populate_early_shadow(const void *shadow_start,
|
||||
const void *shadow_end)
|
||||
|
|
|
@ -627,10 +627,10 @@ static void toggle_allocation_gate(struct work_struct *work)
|
|||
* During low activity with no allocations we might wait a
|
||||
* while; let's avoid the hung task warning.
|
||||
*/
|
||||
wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
|
||||
sysctl_hung_task_timeout_secs * HZ / 2);
|
||||
wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
|
||||
sysctl_hung_task_timeout_secs * HZ / 2);
|
||||
} else {
|
||||
wait_event(allocation_wait, atomic_read(&kfence_allocation_gate));
|
||||
wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
|
||||
}
|
||||
|
||||
/* Disable static key and reset timer. */
|
||||
|
|
|
@ -2939,6 +2939,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
|||
}
|
||||
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
|
||||
entry = mk_pte(new_page, vma->vm_page_prot);
|
||||
entry = pte_sw_mkyoung(entry);
|
||||
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
||||
|
||||
/*
|
||||
|
@ -3602,6 +3603,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
|
|||
__SetPageUptodate(page);
|
||||
|
||||
entry = mk_pte(page, vma->vm_page_prot);
|
||||
entry = pte_sw_mkyoung(entry);
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
entry = pte_mkwrite(pte_mkdirty(entry));
|
||||
|
||||
|
@ -3786,6 +3788,8 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
|
|||
|
||||
if (prefault && arch_wants_old_prefaulted_pte())
|
||||
entry = pte_mkold(entry);
|
||||
else
|
||||
entry = pte_sw_mkyoung(entry);
|
||||
|
||||
if (write)
|
||||
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
||||
|
|
|
@ -9158,6 +9158,8 @@ bool take_page_off_buddy(struct page *page)
|
|||
del_page_from_free_list(page_head, zone, page_order);
|
||||
break_down_buddy_pages(zone, page_head, page, 0,
|
||||
page_order, migratetype);
|
||||
if (!is_migrate_isolate(migratetype))
|
||||
__mod_zone_freepage_state(zone, -1, migratetype);
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
/proc-self-map-files-002
|
||||
/proc-self-syscall
|
||||
/proc-self-wchan
|
||||
/proc-subset-pid
|
||||
/proc-uptime-001
|
||||
/proc-uptime-002
|
||||
/read
|
||||
|
|
Загрузка…
Ссылка в новой задаче