13 hotfixes, 10 of which pertain to post-6.5 issues. The other 3 are
cc:stable. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZQ8hRwAKCRDdBJ7gKXxA jlK9AQDzT/FUQV3kIshsV1IwAKFcg7gtcFSN0vs+pV+e1+4tbQD/Z2OgfGFFsCSP X6uc2cYHc9DG5/o44iFgadW8byMssQs= =w+St -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2023-09-23-10-31' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "13 hotfixes, 10 of which pertain to post-6.5 issues. The other three are cc:stable" * tag 'mm-hotfixes-stable-2023-09-23-10-31' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: proc: nommu: fix empty /proc/<pid>/maps filemap: add filemap_map_order0_folio() to handle order0 folio proc: nommu: /proc/<pid>/maps: release mmap read lock mm: memcontrol: fix GFP_NOFS recursion in memory.high enforcement pidfd: prevent a kernel-doc warning argv_split: fix kernel-doc warnings scatterlist: add missing function params to kernel-doc selftests/proc: fixup proc-empty-vm test after KSM changes revert "scripts/gdb/symbols: add specific ko module load command" selftests: link libasan statically for tests with -fsanitize=address task_work: add kerneldoc annotation for 'data' argument mm: page_alloc: fix CMA and HIGHATOMIC landing on the wrong buddy list sh: mm: re-add lost __ref to ioremap_prot() to fix modpost warning
This commit is contained in:
Коммит
85eba5f175
|
@ -72,8 +72,8 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
|
|||
#define __ioremap_29bit(offset, size, prot) NULL
|
||||
#endif /* CONFIG_29BIT */
|
||||
|
||||
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
|
||||
unsigned long prot)
|
||||
void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size,
|
||||
unsigned long prot)
|
||||
{
|
||||
void __iomem *mapped;
|
||||
pgprot_t pgprot = __pgprot(prot);
|
||||
|
|
|
@ -289,9 +289,7 @@ struct proc_maps_private {
|
|||
struct inode *inode;
|
||||
struct task_struct *task;
|
||||
struct mm_struct *mm;
|
||||
#ifdef CONFIG_MMU
|
||||
struct vma_iterator iter;
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA
|
||||
struct mempolicy *task_mempolicy;
|
||||
#endif
|
||||
|
|
|
@ -175,15 +175,28 @@ static int show_map(struct seq_file *m, void *_p)
|
|||
return nommu_vma_show(m, _p);
|
||||
}
|
||||
|
||||
static void *m_start(struct seq_file *m, loff_t *pos)
|
||||
static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct vm_area_struct *vma = vma_next(&priv->iter);
|
||||
|
||||
if (vma) {
|
||||
*ppos = vma->vm_start;
|
||||
} else {
|
||||
*ppos = -1UL;
|
||||
}
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
static void *m_start(struct seq_file *m, loff_t *ppos)
|
||||
{
|
||||
struct proc_maps_private *priv = m->private;
|
||||
unsigned long last_addr = *ppos;
|
||||
struct mm_struct *mm;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long addr = *pos;
|
||||
|
||||
/* See m_next(). Zero at the start or after lseek. */
|
||||
if (addr == -1UL)
|
||||
/* See proc_get_vma(). Zero at the start or after lseek. */
|
||||
if (last_addr == -1UL)
|
||||
return NULL;
|
||||
|
||||
/* pin the task and mm whilst we play with them */
|
||||
|
@ -192,44 +205,41 @@ static void *m_start(struct seq_file *m, loff_t *pos)
|
|||
return ERR_PTR(-ESRCH);
|
||||
|
||||
mm = priv->mm;
|
||||
if (!mm || !mmget_not_zero(mm))
|
||||
if (!mm || !mmget_not_zero(mm)) {
|
||||
put_task_struct(priv->task);
|
||||
priv->task = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (mmap_read_lock_killable(mm)) {
|
||||
mmput(mm);
|
||||
put_task_struct(priv->task);
|
||||
priv->task = NULL;
|
||||
return ERR_PTR(-EINTR);
|
||||
}
|
||||
|
||||
/* start the next element from addr */
|
||||
vma = find_vma(mm, addr);
|
||||
if (vma)
|
||||
return vma;
|
||||
vma_iter_init(&priv->iter, mm, last_addr);
|
||||
|
||||
return proc_get_vma(priv, ppos);
|
||||
}
|
||||
|
||||
static void m_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
struct proc_maps_private *priv = m->private;
|
||||
struct mm_struct *mm = priv->mm;
|
||||
|
||||
if (!priv->task)
|
||||
return;
|
||||
|
||||
mmap_read_unlock(mm);
|
||||
mmput(mm);
|
||||
return NULL;
|
||||
put_task_struct(priv->task);
|
||||
priv->task = NULL;
|
||||
}
|
||||
|
||||
static void m_stop(struct seq_file *m, void *_vml)
|
||||
static void *m_next(struct seq_file *m, void *_p, loff_t *ppos)
|
||||
{
|
||||
struct proc_maps_private *priv = m->private;
|
||||
|
||||
if (!IS_ERR_OR_NULL(_vml)) {
|
||||
mmap_read_unlock(priv->mm);
|
||||
mmput(priv->mm);
|
||||
}
|
||||
if (priv->task) {
|
||||
put_task_struct(priv->task);
|
||||
priv->task = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
|
||||
{
|
||||
struct vm_area_struct *vma = _p;
|
||||
|
||||
*pos = vma->vm_end;
|
||||
return find_vma(vma->vm_mm, vma->vm_end);
|
||||
return proc_get_vma(m->private, ppos);
|
||||
}
|
||||
|
||||
static const struct seq_operations proc_pid_maps_ops = {
|
||||
|
|
|
@ -920,7 +920,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
|
|||
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
|
||||
}
|
||||
|
||||
void mem_cgroup_handle_over_high(void);
|
||||
void mem_cgroup_handle_over_high(gfp_t gfp_mask);
|
||||
|
||||
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
|
||||
|
||||
|
@ -1458,7 +1458,7 @@ static inline void mem_cgroup_unlock_pages(void)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_handle_over_high(void)
|
||||
static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs)
|
|||
}
|
||||
#endif
|
||||
|
||||
mem_cgroup_handle_over_high();
|
||||
mem_cgroup_handle_over_high(GFP_KERNEL);
|
||||
blkcg_maybe_throttle_current();
|
||||
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
|
|
|
@ -609,7 +609,7 @@ int pidfd_create(struct pid *pid, unsigned int flags)
|
|||
}
|
||||
|
||||
/**
|
||||
* pidfd_open() - Open new pid file descriptor.
|
||||
* sys_pidfd_open() - Open new pid file descriptor.
|
||||
*
|
||||
* @pid: pid for which to retrieve a pidfd
|
||||
* @flags: flags to pass
|
||||
|
|
|
@ -78,6 +78,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
|
|||
* task_work_cancel_match - cancel a pending work added by task_work_add()
|
||||
* @task: the task which should execute the work
|
||||
* @match: match function to call
|
||||
* @data: data to be passed in to match function
|
||||
*
|
||||
* RETURNS:
|
||||
* The found work or NULL if not found.
|
||||
|
|
|
@ -28,7 +28,7 @@ static int count_argc(const char *str)
|
|||
|
||||
/**
|
||||
* argv_free - free an argv
|
||||
* @argv - the argument vector to be freed
|
||||
* @argv: the argument vector to be freed
|
||||
*
|
||||
* Frees an argv and the strings it points to.
|
||||
*/
|
||||
|
@ -46,7 +46,7 @@ EXPORT_SYMBOL(argv_free);
|
|||
* @str: the string to be split
|
||||
* @argcp: returned argument count
|
||||
*
|
||||
* Returns an array of pointers to strings which are split out from
|
||||
* Returns: an array of pointers to strings which are split out from
|
||||
* @str. This is performed by strictly splitting on white-space; no
|
||||
* quote processing is performed. Multiple whitespace characters are
|
||||
* considered to be a single argument separator. The returned array
|
||||
|
|
|
@ -265,7 +265,8 @@ EXPORT_SYMBOL(sg_free_table);
|
|||
* @table: The sg table header to use
|
||||
* @nents: Number of entries in sg list
|
||||
* @max_ents: The maximum number of entries the allocator returns per call
|
||||
* @nents_first_chunk: Number of entries int the (preallocated) first
|
||||
* @first_chunk: first SGL if preallocated (may be %NULL)
|
||||
* @nents_first_chunk: Number of entries in the (preallocated) first
|
||||
* scatterlist chunk, 0 means no such preallocated chunk provided by user
|
||||
* @gfp_mask: GFP allocation mask
|
||||
* @alloc_fn: Allocator to use
|
||||
|
@ -788,6 +789,7 @@ EXPORT_SYMBOL(__sg_page_iter_dma_next);
|
|||
* @miter: sg mapping iter to be started
|
||||
* @sgl: sg list to iterate over
|
||||
* @nents: number of sg entries
|
||||
* @flags: sg iterator flags
|
||||
*
|
||||
* Description:
|
||||
* Starts mapping iterator @miter.
|
||||
|
|
69
mm/filemap.c
69
mm/filemap.c
|
@ -3475,13 +3475,11 @@ skip:
|
|||
*/
|
||||
static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
|
||||
struct folio *folio, unsigned long start,
|
||||
unsigned long addr, unsigned int nr_pages)
|
||||
unsigned long addr, unsigned int nr_pages,
|
||||
unsigned int *mmap_miss)
|
||||
{
|
||||
vm_fault_t ret = 0;
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct file *file = vma->vm_file;
|
||||
struct page *page = folio_page(folio, start);
|
||||
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
|
||||
unsigned int count = 0;
|
||||
pte_t *old_ptep = vmf->pte;
|
||||
|
||||
|
@ -3489,8 +3487,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
|
|||
if (PageHWPoison(page + count))
|
||||
goto skip;
|
||||
|
||||
if (mmap_miss > 0)
|
||||
mmap_miss--;
|
||||
(*mmap_miss)++;
|
||||
|
||||
/*
|
||||
* NOTE: If there're PTE markers, we'll leave them to be
|
||||
|
@ -3525,7 +3522,35 @@ skip:
|
|||
}
|
||||
|
||||
vmf->pte = old_ptep;
|
||||
WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
|
||||
struct folio *folio, unsigned long addr,
|
||||
unsigned int *mmap_miss)
|
||||
{
|
||||
vm_fault_t ret = 0;
|
||||
struct page *page = &folio->page;
|
||||
|
||||
if (PageHWPoison(page))
|
||||
return ret;
|
||||
|
||||
(*mmap_miss)++;
|
||||
|
||||
/*
|
||||
* NOTE: If there're PTE markers, we'll leave them to be
|
||||
* handled in the specific fault path, and it'll prohibit
|
||||
* the fault-around logic.
|
||||
*/
|
||||
if (!pte_none(ptep_get(vmf->pte)))
|
||||
return ret;
|
||||
|
||||
if (vmf->address == addr)
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
|
||||
set_pte_range(vmf, folio, page, 1, addr);
|
||||
folio_ref_inc(folio);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3541,7 +3566,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
|
|||
XA_STATE(xas, &mapping->i_pages, start_pgoff);
|
||||
struct folio *folio;
|
||||
vm_fault_t ret = 0;
|
||||
int nr_pages = 0;
|
||||
unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved;
|
||||
|
||||
rcu_read_lock();
|
||||
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
|
||||
|
@ -3569,25 +3594,27 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
|
|||
end = folio->index + folio_nr_pages(folio) - 1;
|
||||
nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
|
||||
|
||||
/*
|
||||
* NOTE: If there're PTE markers, we'll leave them to be
|
||||
* handled in the specific fault path, and it'll prohibit the
|
||||
* fault-around logic.
|
||||
*/
|
||||
if (!pte_none(ptep_get(vmf->pte)))
|
||||
goto unlock;
|
||||
if (!folio_test_large(folio))
|
||||
ret |= filemap_map_order0_folio(vmf,
|
||||
folio, addr, &mmap_miss);
|
||||
else
|
||||
ret |= filemap_map_folio_range(vmf, folio,
|
||||
xas.xa_index - folio->index, addr,
|
||||
nr_pages, &mmap_miss);
|
||||
|
||||
ret |= filemap_map_folio_range(vmf, folio,
|
||||
xas.xa_index - folio->index, addr, nr_pages);
|
||||
|
||||
unlock:
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
|
||||
} while (folio);
|
||||
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);
|
||||
if (mmap_miss >= mmap_miss_saved)
|
||||
WRITE_ONCE(file->f_ra.mmap_miss, 0);
|
||||
else
|
||||
WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(filemap_map_pages);
|
||||
|
|
|
@ -2555,7 +2555,7 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
|
|||
* Scheduled by try_charge() to be executed from the userland return path
|
||||
* and reclaims memory over the high limit.
|
||||
*/
|
||||
void mem_cgroup_handle_over_high(void)
|
||||
void mem_cgroup_handle_over_high(gfp_t gfp_mask)
|
||||
{
|
||||
unsigned long penalty_jiffies;
|
||||
unsigned long pflags;
|
||||
|
@ -2583,7 +2583,7 @@ retry_reclaim:
|
|||
*/
|
||||
nr_reclaimed = reclaim_high(memcg,
|
||||
in_retry ? SWAP_CLUSTER_MAX : nr_pages,
|
||||
GFP_KERNEL);
|
||||
gfp_mask);
|
||||
|
||||
/*
|
||||
* memory.high is breached and reclaim is unable to keep up. Throttle
|
||||
|
@ -2819,7 +2819,7 @@ done_restock:
|
|||
if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
|
||||
!(current->flags & PF_MEMALLOC) &&
|
||||
gfpflags_allow_blocking(gfp_mask)) {
|
||||
mem_cgroup_handle_over_high();
|
||||
mem_cgroup_handle_over_high(gfp_mask);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2400,7 +2400,7 @@ void free_unref_page(struct page *page, unsigned int order)
|
|||
struct per_cpu_pages *pcp;
|
||||
struct zone *zone;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
int migratetype;
|
||||
int migratetype, pcpmigratetype;
|
||||
|
||||
if (!free_unref_page_prepare(page, pfn, order))
|
||||
return;
|
||||
|
@ -2408,24 +2408,24 @@ void free_unref_page(struct page *page, unsigned int order)
|
|||
/*
|
||||
* We only track unmovable, reclaimable and movable on pcp lists.
|
||||
* Place ISOLATE pages on the isolated list because they are being
|
||||
* offlined but treat HIGHATOMIC as movable pages so we can get those
|
||||
* areas back if necessary. Otherwise, we may have to free
|
||||
* offlined but treat HIGHATOMIC and CMA as movable pages so we can
|
||||
* get those areas back if necessary. Otherwise, we may have to free
|
||||
* excessively into the page allocator
|
||||
*/
|
||||
migratetype = get_pcppage_migratetype(page);
|
||||
migratetype = pcpmigratetype = get_pcppage_migratetype(page);
|
||||
if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
|
||||
if (unlikely(is_migrate_isolate(migratetype))) {
|
||||
free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
|
||||
return;
|
||||
}
|
||||
migratetype = MIGRATE_MOVABLE;
|
||||
pcpmigratetype = MIGRATE_MOVABLE;
|
||||
}
|
||||
|
||||
zone = page_zone(page);
|
||||
pcp_trylock_prepare(UP_flags);
|
||||
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
|
||||
if (pcp) {
|
||||
free_unref_page_commit(zone, pcp, page, migratetype, order);
|
||||
free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
|
||||
pcp_spin_unlock(pcp);
|
||||
} else {
|
||||
free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
|
||||
|
|
|
@ -111,12 +111,11 @@ lx-symbols command."""
|
|||
return "{textaddr} {sections}".format(
|
||||
textaddr=textaddr, sections="".join(args))
|
||||
|
||||
def load_module_symbols(self, module, module_file=None):
|
||||
def load_module_symbols(self, module):
|
||||
module_name = module['name'].string()
|
||||
module_addr = str(module['mem'][constants.LX_MOD_TEXT]['base']).split()[0]
|
||||
|
||||
if not module_file:
|
||||
module_file = self._get_module_file(module_name)
|
||||
module_file = self._get_module_file(module_name)
|
||||
if not module_file and not self.module_files_updated:
|
||||
self._update_module_files()
|
||||
module_file = self._get_module_file(module_name)
|
||||
|
@ -139,19 +138,6 @@ lx-symbols command."""
|
|||
else:
|
||||
gdb.write("no module object found for '{0}'\n".format(module_name))
|
||||
|
||||
def load_ko_symbols(self, mod_path):
|
||||
self.loaded_modules = []
|
||||
module_list = modules.module_list()
|
||||
|
||||
for module in module_list:
|
||||
module_name = module['name'].string()
|
||||
module_pattern = ".*/{0}\.ko(?:.debug)?$".format(
|
||||
module_name.replace("_", r"[_\-]"))
|
||||
if re.match(module_pattern, mod_path) and os.path.exists(mod_path):
|
||||
self.load_module_symbols(module, mod_path)
|
||||
return
|
||||
raise gdb.GdbError("%s is not a valid .ko\n" % mod_path)
|
||||
|
||||
def load_all_symbols(self):
|
||||
gdb.write("loading vmlinux\n")
|
||||
|
||||
|
@ -190,11 +176,6 @@ lx-symbols command."""
|
|||
self.module_files = []
|
||||
self.module_files_updated = False
|
||||
|
||||
argv = gdb.string_to_argv(arg)
|
||||
if len(argv) == 1:
|
||||
self.load_ko_symbols(argv[0])
|
||||
return
|
||||
|
||||
self.load_all_symbols()
|
||||
|
||||
if hasattr(gdb, 'Breakpoint'):
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined $(KHDR_INCLUDES)
|
||||
CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan $(KHDR_INCLUDES)
|
||||
TEST_GEN_PROGS := fchmodat2_test
|
||||
|
||||
include ../lib.mk
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined
|
||||
CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan
|
||||
TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test
|
||||
|
||||
include ../lib.mk
|
||||
|
|
|
@ -267,6 +267,7 @@ static const char g_smaps_rollup[] =
|
|||
"Private_Dirty: 0 kB\n"
|
||||
"Referenced: 0 kB\n"
|
||||
"Anonymous: 0 kB\n"
|
||||
"KSM: 0 kB\n"
|
||||
"LazyFree: 0 kB\n"
|
||||
"AnonHugePages: 0 kB\n"
|
||||
"ShmemPmdMapped: 0 kB\n"
|
||||
|
|
Загрузка…
Ссылка в новой задаче