Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "16 patches. Subsystems affected by this patch series: xtensa, sh, ocfs2, scripts, lib, and mm (memory-failure, kasan, damon, shmem, tools, pagecache, debug, and pagemap)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: fix uninitialized use in overcommit_policy_handler mm/memory_failure: fix the missing pte_unmap() call kasan: always respect CONFIG_KASAN_STACK sh: pgtable-3level: fix cast to pointer from integer of different size mm/debug: sync up latest migrate_reason to migrate_reason_names mm/debug: sync up MR_CONTIG_RANGE and MR_LONGTERM_PIN mm: fs: invalidate bh_lrus for only cold path lib/zlib_inflate/inffast: check config in C to avoid unused function warning tools/vm/page-types: remove dependency on opt_file for idle page tracking scripts/sorttable: riscv: fix undeclared identifier 'EM_RISCV' error ocfs2: drop acl cache for directories too mm/shmem.c: fix judgment error in shmem_is_huge() xtensa: increase size of gcc stack frame check mm/damon: don't use strnlen() with known-bogus source length kasan: fix Kconfig check of CC_HAS_WORKING_NOSANITIZE_ADDRESS mm, hwpoison: add is_free_buddy_page() in HWPoisonHandlable()
This commit is contained in:
Коммит
a3b397b4ff
|
@ -34,7 +34,7 @@ typedef struct { unsigned long long pmd; } pmd_t;
|
|||
|
||||
static inline pmd_t *pud_pgtable(pud_t pud)
|
||||
{
|
||||
return (pmd_t *)pud_val(pud);
|
||||
return (pmd_t *)(unsigned long)pud_val(pud);
|
||||
}
|
||||
|
||||
/* only used by the stubbed out hugetlb gup code, should never be called */
|
||||
|
|
|
@ -1425,12 +1425,16 @@ void invalidate_bh_lrus(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
|
||||
|
||||
void invalidate_bh_lrus_cpu(int cpu)
|
||||
/*
|
||||
* It's called from workqueue context so we need a bh_lru_lock to close
|
||||
* the race with preemption/irq.
|
||||
*/
|
||||
void invalidate_bh_lrus_cpu(void)
|
||||
{
|
||||
struct bh_lru *b;
|
||||
|
||||
bh_lru_lock();
|
||||
b = per_cpu_ptr(&bh_lrus, cpu);
|
||||
b = this_cpu_ptr(&bh_lrus);
|
||||
__invalidate_bh_lrus(b);
|
||||
bh_lru_unlock();
|
||||
}
|
||||
|
|
|
@ -3951,7 +3951,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
|
|||
oi = OCFS2_I(inode);
|
||||
oi->ip_dir_lock_gen++;
|
||||
mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
|
||||
goto out;
|
||||
goto out_forget;
|
||||
}
|
||||
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
|
@ -3982,6 +3982,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
|
|||
filemap_fdatawait(mapping);
|
||||
}
|
||||
|
||||
out_forget:
|
||||
forget_all_cached_acls(inode);
|
||||
|
||||
out:
|
||||
|
|
|
@ -194,7 +194,7 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
|
|||
struct buffer_head *__bread_gfp(struct block_device *,
|
||||
sector_t block, unsigned size, gfp_t gfp);
|
||||
void invalidate_bh_lrus(void);
|
||||
void invalidate_bh_lrus_cpu(int cpu);
|
||||
void invalidate_bh_lrus_cpu(void);
|
||||
bool has_bh_in_lru(int cpu, void *dummy);
|
||||
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
|
||||
void free_buffer_head(struct buffer_head * bh);
|
||||
|
@ -408,7 +408,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
|
|||
static inline void invalidate_inode_buffers(struct inode *inode) {}
|
||||
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
|
||||
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
|
||||
static inline void invalidate_bh_lrus_cpu(int cpu) {}
|
||||
static inline void invalidate_bh_lrus_cpu(void) {}
|
||||
static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
|
||||
#define buffer_heads_over_limit 0
|
||||
|
||||
|
|
|
@ -19,6 +19,11 @@ struct migration_target_control;
|
|||
*/
|
||||
#define MIGRATEPAGE_SUCCESS 0
|
||||
|
||||
/*
|
||||
* Keep sync with:
|
||||
* - macro MIGRATE_REASON in include/trace/events/migrate.h
|
||||
* - migrate_reason_names[MR_TYPES] in mm/debug.c
|
||||
*/
|
||||
enum migrate_reason {
|
||||
MR_COMPACTION,
|
||||
MR_MEMORY_FAILURE,
|
||||
|
@ -32,7 +37,6 @@ enum migrate_reason {
|
|||
MR_TYPES
|
||||
};
|
||||
|
||||
/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
|
||||
extern const char *migrate_reason_names[MR_TYPES];
|
||||
|
||||
#ifdef CONFIG_MIGRATION
|
||||
|
|
|
@ -346,7 +346,7 @@ config FRAME_WARN
|
|||
int "Warn for stack frames larger than"
|
||||
range 0 8192
|
||||
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
|
||||
default 1536 if (!64BIT && PARISC)
|
||||
default 1536 if (!64BIT && (PARISC || XTENSA))
|
||||
default 1024 if (!64BIT && !PARISC)
|
||||
default 2048 if 64BIT
|
||||
help
|
||||
|
|
|
@ -66,6 +66,7 @@ choice
|
|||
config KASAN_GENERIC
|
||||
bool "Generic mode"
|
||||
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
|
||||
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
|
||||
select SLUB_DEBUG if SLUB
|
||||
select CONSTRUCTORS
|
||||
help
|
||||
|
@ -86,6 +87,7 @@ config KASAN_GENERIC
|
|||
config KASAN_SW_TAGS
|
||||
bool "Software tag-based mode"
|
||||
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
|
||||
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
|
||||
select SLUB_DEBUG if SLUB
|
||||
select CONSTRUCTORS
|
||||
help
|
||||
|
|
|
@ -253,13 +253,12 @@ void inflate_fast(z_streamp strm, unsigned start)
|
|||
|
||||
sfrom = (unsigned short *)(from);
|
||||
loops = len >> 1;
|
||||
do
|
||||
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
*sout++ = *sfrom++;
|
||||
#else
|
||||
*sout++ = get_unaligned16(sfrom++);
|
||||
#endif
|
||||
while (--loops);
|
||||
do {
|
||||
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
|
||||
*sout++ = *sfrom++;
|
||||
else
|
||||
*sout++ = get_unaligned16(sfrom++);
|
||||
} while (--loops);
|
||||
out = (unsigned char *)sout;
|
||||
from = (unsigned char *)sfrom;
|
||||
} else { /* dist == 1 or dist == 2 */
|
||||
|
|
|
@ -20,27 +20,27 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
|
|||
ssize_t nr_integers = 0, i;
|
||||
|
||||
question = "123";
|
||||
answers = str_to_target_ids(question, strnlen(question, 128),
|
||||
answers = str_to_target_ids(question, strlen(question),
|
||||
&nr_integers);
|
||||
KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
|
||||
KUNIT_EXPECT_EQ(test, 123ul, answers[0]);
|
||||
kfree(answers);
|
||||
|
||||
question = "123abc";
|
||||
answers = str_to_target_ids(question, strnlen(question, 128),
|
||||
answers = str_to_target_ids(question, strlen(question),
|
||||
&nr_integers);
|
||||
KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
|
||||
KUNIT_EXPECT_EQ(test, 123ul, answers[0]);
|
||||
kfree(answers);
|
||||
|
||||
question = "a123";
|
||||
answers = str_to_target_ids(question, strnlen(question, 128),
|
||||
answers = str_to_target_ids(question, strlen(question),
|
||||
&nr_integers);
|
||||
KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
|
||||
kfree(answers);
|
||||
|
||||
question = "12 35";
|
||||
answers = str_to_target_ids(question, strnlen(question, 128),
|
||||
answers = str_to_target_ids(question, strlen(question),
|
||||
&nr_integers);
|
||||
KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
|
||||
for (i = 0; i < nr_integers; i++)
|
||||
|
@ -48,7 +48,7 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
|
|||
kfree(answers);
|
||||
|
||||
question = "12 35 46";
|
||||
answers = str_to_target_ids(question, strnlen(question, 128),
|
||||
answers = str_to_target_ids(question, strlen(question),
|
||||
&nr_integers);
|
||||
KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers);
|
||||
for (i = 0; i < nr_integers; i++)
|
||||
|
@ -56,7 +56,7 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
|
|||
kfree(answers);
|
||||
|
||||
question = "12 35 abc 46";
|
||||
answers = str_to_target_ids(question, strnlen(question, 128),
|
||||
answers = str_to_target_ids(question, strlen(question),
|
||||
&nr_integers);
|
||||
KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
|
||||
for (i = 0; i < 2; i++)
|
||||
|
@ -64,13 +64,13 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
|
|||
kfree(answers);
|
||||
|
||||
question = "";
|
||||
answers = str_to_target_ids(question, strnlen(question, 128),
|
||||
answers = str_to_target_ids(question, strlen(question),
|
||||
&nr_integers);
|
||||
KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
|
||||
kfree(answers);
|
||||
|
||||
question = "\n";
|
||||
answers = str_to_target_ids(question, strnlen(question, 128),
|
||||
answers = str_to_target_ids(question, strlen(question),
|
||||
&nr_integers);
|
||||
KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
|
||||
kfree(answers);
|
||||
|
|
|
@ -24,7 +24,9 @@ const char *migrate_reason_names[MR_TYPES] = {
|
|||
"syscall_or_cpuset",
|
||||
"mempolicy_mbind",
|
||||
"numa_misplaced",
|
||||
"cma",
|
||||
"contig_range",
|
||||
"longterm_pin",
|
||||
"demotion",
|
||||
};
|
||||
|
||||
const struct trace_print_flags pageflag_names[] = {
|
||||
|
|
|
@ -306,6 +306,7 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
|
|||
struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long address = vma_address(page, vma);
|
||||
unsigned long ret = 0;
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
|
@ -329,11 +330,10 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
|
|||
if (pmd_devmap(*pmd))
|
||||
return PMD_SHIFT;
|
||||
pte = pte_offset_map(pmd, address);
|
||||
if (!pte_present(*pte))
|
||||
return 0;
|
||||
if (pte_devmap(*pte))
|
||||
return PAGE_SHIFT;
|
||||
return 0;
|
||||
if (pte_present(*pte) && pte_devmap(*pte))
|
||||
ret = PAGE_SHIFT;
|
||||
pte_unmap(pte);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1126,7 +1126,7 @@ static int page_action(struct page_state *ps, struct page *p,
|
|||
*/
|
||||
static inline bool HWPoisonHandlable(struct page *page)
|
||||
{
|
||||
return PageLRU(page) || __PageMovable(page);
|
||||
return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page);
|
||||
}
|
||||
|
||||
static int __get_hwpoison_page(struct page *page)
|
||||
|
|
|
@ -490,9 +490,9 @@ bool shmem_is_huge(struct vm_area_struct *vma,
|
|||
case SHMEM_HUGE_ALWAYS:
|
||||
return true;
|
||||
case SHMEM_HUGE_WITHIN_SIZE:
|
||||
index = round_up(index, HPAGE_PMD_NR);
|
||||
index = round_up(index + 1, HPAGE_PMD_NR);
|
||||
i_size = round_up(i_size_read(inode), PAGE_SIZE);
|
||||
if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)
|
||||
if (i_size >> PAGE_SHIFT >= index)
|
||||
return true;
|
||||
fallthrough;
|
||||
case SHMEM_HUGE_ADVISE:
|
||||
|
|
19
mm/swap.c
19
mm/swap.c
|
@ -620,7 +620,6 @@ void lru_add_drain_cpu(int cpu)
|
|||
pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
|
||||
|
||||
activate_page_drain(cpu);
|
||||
invalidate_bh_lrus_cpu(cpu);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -703,6 +702,20 @@ void lru_add_drain(void)
|
|||
local_unlock(&lru_pvecs.lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* It's called from per-cpu workqueue context in SMP case so
|
||||
* lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
|
||||
* the same cpu. It shouldn't be a problem in !SMP case since
|
||||
* the core is only one and the locks will disable preemption.
|
||||
*/
|
||||
static void lru_add_and_bh_lrus_drain(void)
|
||||
{
|
||||
local_lock(&lru_pvecs.lock);
|
||||
lru_add_drain_cpu(smp_processor_id());
|
||||
local_unlock(&lru_pvecs.lock);
|
||||
invalidate_bh_lrus_cpu();
|
||||
}
|
||||
|
||||
void lru_add_drain_cpu_zone(struct zone *zone)
|
||||
{
|
||||
local_lock(&lru_pvecs.lock);
|
||||
|
@ -717,7 +730,7 @@ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
|
|||
|
||||
static void lru_add_drain_per_cpu(struct work_struct *dummy)
|
||||
{
|
||||
lru_add_drain();
|
||||
lru_add_and_bh_lrus_drain();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -858,7 +871,7 @@ void lru_cache_disable(void)
|
|||
*/
|
||||
__lru_add_drain_all(true);
|
||||
#else
|
||||
lru_add_drain();
|
||||
lru_add_and_bh_lrus_drain();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -787,7 +787,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
|
|||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table t;
|
||||
int new_policy;
|
||||
int new_policy = -1;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@ -805,7 +805,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
|
|||
t = *table;
|
||||
t.data = &new_policy;
|
||||
ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
|
||||
if (ret)
|
||||
if (ret || new_policy == -1)
|
||||
return ret;
|
||||
|
||||
mm_compute_batch(new_policy);
|
||||
|
|
|
@ -33,10 +33,11 @@ else
|
|||
CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
|
||||
$(call cc-param,asan-globals=1) \
|
||||
$(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
|
||||
$(call cc-param,asan-stack=$(stack_enable)) \
|
||||
$(call cc-param,asan-instrument-allocas=1)
|
||||
endif
|
||||
|
||||
CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable))
|
||||
|
||||
endif # CONFIG_KASAN_GENERIC
|
||||
|
||||
ifdef CONFIG_KASAN_SW_TAGS
|
||||
|
|
|
@ -54,6 +54,10 @@
|
|||
#define EM_ARCV2 195
|
||||
#endif
|
||||
|
||||
#ifndef EM_RISCV
|
||||
#define EM_RISCV 243
|
||||
#endif
|
||||
|
||||
static uint32_t (*r)(const uint32_t *);
|
||||
static uint16_t (*r2)(const uint16_t *);
|
||||
static uint64_t (*r8)(const uint64_t *);
|
||||
|
|
|
@ -1331,7 +1331,7 @@ int main(int argc, char *argv[])
|
|||
if (opt_list && opt_list_mapcnt)
|
||||
kpagecount_fd = checked_open(PROC_KPAGECOUNT, O_RDONLY);
|
||||
|
||||
if (opt_mark_idle && opt_file)
|
||||
if (opt_mark_idle)
|
||||
page_idle_fd = checked_open(SYS_KERNEL_MM_PAGE_IDLE, O_RDWR);
|
||||
|
||||
if (opt_list && opt_pid)
|
||||
|
|
Загрузка…
Ссылка в новой задаче