sync mm-stable with mm-hotfixes-stable to pick up depended-upon upstream changes
This commit is contained in:
Коммит
f8f238ffe5
|
@ -430,6 +430,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
|
||||||
|
* @sci: segment constructor object
|
||||||
|
*
|
||||||
|
* nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
|
||||||
|
* the current segment summary block.
|
||||||
|
*/
|
||||||
|
static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
|
||||||
|
{
|
||||||
|
struct nilfs_segsum_pointer *ssp;
|
||||||
|
|
||||||
|
ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
|
||||||
|
if (ssp->offset < ssp->bh->b_size)
|
||||||
|
memset(ssp->bh->b_data + ssp->offset, 0,
|
||||||
|
ssp->bh->b_size - ssp->offset);
|
||||||
|
}
|
||||||
|
|
||||||
static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
|
static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
|
||||||
{
|
{
|
||||||
sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
|
sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
|
||||||
|
@ -438,6 +455,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
|
||||||
* The current segment is filled up
|
* The current segment is filled up
|
||||||
* (internal code)
|
* (internal code)
|
||||||
*/
|
*/
|
||||||
|
nilfs_segctor_zeropad_segsum(sci);
|
||||||
sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
|
sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
|
||||||
return nilfs_segctor_reset_segment_buffer(sci);
|
return nilfs_segctor_reset_segment_buffer(sci);
|
||||||
}
|
}
|
||||||
|
@ -542,6 +560,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
if (unlikely(required)) {
|
if (unlikely(required)) {
|
||||||
|
nilfs_segctor_zeropad_segsum(sci);
|
||||||
err = nilfs_segbuf_extend_segsum(segbuf);
|
err = nilfs_segbuf_extend_segsum(segbuf);
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
goto failed;
|
goto failed;
|
||||||
|
@ -1533,6 +1552,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
|
||||||
nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
|
nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
|
||||||
sci->sc_stage = prev_stage;
|
sci->sc_stage = prev_stage;
|
||||||
}
|
}
|
||||||
|
nilfs_segctor_zeropad_segsum(sci);
|
||||||
nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
|
nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -134,11 +134,12 @@ void kmsan_kfree_large(const void *ptr);
|
||||||
* @page_shift: page_shift passed to vmap_range_noflush().
|
* @page_shift: page_shift passed to vmap_range_noflush().
|
||||||
*
|
*
|
||||||
* KMSAN maps shadow and origin pages of @pages into contiguous ranges in
|
* KMSAN maps shadow and origin pages of @pages into contiguous ranges in
|
||||||
* vmalloc metadata address range.
|
* vmalloc metadata address range. Returns 0 on success, callers must check
|
||||||
|
* for non-zero return value.
|
||||||
*/
|
*/
|
||||||
void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
|
int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
|
||||||
pgprot_t prot, struct page **pages,
|
pgprot_t prot, struct page **pages,
|
||||||
unsigned int page_shift);
|
unsigned int page_shift);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
|
* kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
|
||||||
|
@ -159,11 +160,12 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
|
||||||
* @page_shift: page_shift argument passed to vmap_range_noflush().
|
* @page_shift: page_shift argument passed to vmap_range_noflush().
|
||||||
*
|
*
|
||||||
* KMSAN creates new metadata pages for the physical pages mapped into the
|
* KMSAN creates new metadata pages for the physical pages mapped into the
|
||||||
* virtual memory.
|
* virtual memory. Returns 0 on success, callers must check for non-zero return
|
||||||
|
* value.
|
||||||
*/
|
*/
|
||||||
void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
|
int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
|
||||||
phys_addr_t phys_addr, pgprot_t prot,
|
phys_addr_t phys_addr, pgprot_t prot,
|
||||||
unsigned int page_shift);
|
unsigned int page_shift);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
|
* kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
|
||||||
|
@ -281,12 +283,13 @@ static inline void kmsan_kfree_large(const void *ptr)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
|
static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
|
||||||
unsigned long end,
|
unsigned long end,
|
||||||
pgprot_t prot,
|
pgprot_t prot,
|
||||||
struct page **pages,
|
struct page **pages,
|
||||||
unsigned int page_shift)
|
unsigned int page_shift)
|
||||||
{
|
{
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kmsan_vunmap_range_noflush(unsigned long start,
|
static inline void kmsan_vunmap_range_noflush(unsigned long start,
|
||||||
|
@ -294,12 +297,12 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kmsan_ioremap_page_range(unsigned long start,
|
static inline int kmsan_ioremap_page_range(unsigned long start,
|
||||||
unsigned long end,
|
unsigned long end,
|
||||||
phys_addr_t phys_addr,
|
phys_addr_t phys_addr, pgprot_t prot,
|
||||||
pgprot_t prot,
|
unsigned int page_shift)
|
||||||
unsigned int page_shift)
|
|
||||||
{
|
{
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kmsan_iounmap_page_range(unsigned long start,
|
static inline void kmsan_iounmap_page_range(unsigned long start,
|
||||||
|
|
|
@ -1308,6 +1308,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
|
||||||
fail_pcpu:
|
fail_pcpu:
|
||||||
while (i > 0)
|
while (i > 0)
|
||||||
percpu_counter_destroy(&mm->rss_stat[--i]);
|
percpu_counter_destroy(&mm->rss_stat[--i]);
|
||||||
|
destroy_context(mm);
|
||||||
fail_nocontext:
|
fail_nocontext:
|
||||||
mm_free_pgd(mm);
|
mm_free_pgd(mm);
|
||||||
fail_nopgd:
|
fail_nopgd:
|
||||||
|
|
69
kernel/sys.c
69
kernel/sys.c
|
@ -664,6 +664,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
|
||||||
struct cred *new;
|
struct cred *new;
|
||||||
int retval;
|
int retval;
|
||||||
kuid_t kruid, keuid, ksuid;
|
kuid_t kruid, keuid, ksuid;
|
||||||
|
bool ruid_new, euid_new, suid_new;
|
||||||
|
|
||||||
kruid = make_kuid(ns, ruid);
|
kruid = make_kuid(ns, ruid);
|
||||||
keuid = make_kuid(ns, euid);
|
keuid = make_kuid(ns, euid);
|
||||||
|
@ -678,25 +679,29 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
|
||||||
if ((suid != (uid_t) -1) && !uid_valid(ksuid))
|
if ((suid != (uid_t) -1) && !uid_valid(ksuid))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
old = current_cred();
|
||||||
|
|
||||||
|
/* check for no-op */
|
||||||
|
if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) &&
|
||||||
|
(euid == (uid_t) -1 || (uid_eq(keuid, old->euid) &&
|
||||||
|
uid_eq(keuid, old->fsuid))) &&
|
||||||
|
(suid == (uid_t) -1 || uid_eq(ksuid, old->suid)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ruid_new = ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
|
||||||
|
!uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid);
|
||||||
|
euid_new = euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
|
||||||
|
!uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid);
|
||||||
|
suid_new = suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
|
||||||
|
!uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid);
|
||||||
|
if ((ruid_new || euid_new || suid_new) &&
|
||||||
|
!ns_capable_setid(old->user_ns, CAP_SETUID))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
new = prepare_creds();
|
new = prepare_creds();
|
||||||
if (!new)
|
if (!new)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
old = current_cred();
|
|
||||||
|
|
||||||
retval = -EPERM;
|
|
||||||
if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
|
|
||||||
if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
|
|
||||||
!uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
|
|
||||||
goto error;
|
|
||||||
if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
|
|
||||||
!uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
|
|
||||||
goto error;
|
|
||||||
if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
|
|
||||||
!uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ruid != (uid_t) -1) {
|
if (ruid != (uid_t) -1) {
|
||||||
new->uid = kruid;
|
new->uid = kruid;
|
||||||
if (!uid_eq(kruid, old->uid)) {
|
if (!uid_eq(kruid, old->uid)) {
|
||||||
|
@ -761,6 +766,7 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
|
||||||
struct cred *new;
|
struct cred *new;
|
||||||
int retval;
|
int retval;
|
||||||
kgid_t krgid, kegid, ksgid;
|
kgid_t krgid, kegid, ksgid;
|
||||||
|
bool rgid_new, egid_new, sgid_new;
|
||||||
|
|
||||||
krgid = make_kgid(ns, rgid);
|
krgid = make_kgid(ns, rgid);
|
||||||
kegid = make_kgid(ns, egid);
|
kegid = make_kgid(ns, egid);
|
||||||
|
@ -773,23 +779,28 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
|
||||||
if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
|
if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
old = current_cred();
|
||||||
|
|
||||||
|
/* check for no-op */
|
||||||
|
if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) &&
|
||||||
|
(egid == (gid_t) -1 || (gid_eq(kegid, old->egid) &&
|
||||||
|
gid_eq(kegid, old->fsgid))) &&
|
||||||
|
(sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
rgid_new = rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
|
||||||
|
!gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid);
|
||||||
|
egid_new = egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
|
||||||
|
!gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid);
|
||||||
|
sgid_new = sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
|
||||||
|
!gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid);
|
||||||
|
if ((rgid_new || egid_new || sgid_new) &&
|
||||||
|
!ns_capable_setid(old->user_ns, CAP_SETGID))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
new = prepare_creds();
|
new = prepare_creds();
|
||||||
if (!new)
|
if (!new)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
old = current_cred();
|
|
||||||
|
|
||||||
retval = -EPERM;
|
|
||||||
if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
|
|
||||||
if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
|
|
||||||
!gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
|
|
||||||
goto error;
|
|
||||||
if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
|
|
||||||
!gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
|
|
||||||
goto error;
|
|
||||||
if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
|
|
||||||
!gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rgid != (gid_t) -1)
|
if (rgid != (gid_t) -1)
|
||||||
new->gid = krgid;
|
new->gid = krgid;
|
||||||
|
|
|
@ -4965,7 +4965,8 @@ not_found:
|
||||||
* Return: True if found in a leaf, false otherwise.
|
* Return: True if found in a leaf, false otherwise.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
|
static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
|
||||||
|
unsigned long *gap_min, unsigned long *gap_max)
|
||||||
{
|
{
|
||||||
enum maple_type type = mte_node_type(mas->node);
|
enum maple_type type = mte_node_type(mas->node);
|
||||||
struct maple_node *node = mas_mn(mas);
|
struct maple_node *node = mas_mn(mas);
|
||||||
|
@ -5030,8 +5031,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
|
||||||
|
|
||||||
if (unlikely(ma_is_leaf(type))) {
|
if (unlikely(ma_is_leaf(type))) {
|
||||||
mas->offset = offset;
|
mas->offset = offset;
|
||||||
mas->min = min;
|
*gap_min = min;
|
||||||
mas->max = min + gap - 1;
|
*gap_max = min + gap - 1;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5055,10 +5056,10 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
|
||||||
{
|
{
|
||||||
enum maple_type type = mte_node_type(mas->node);
|
enum maple_type type = mte_node_type(mas->node);
|
||||||
unsigned long pivot, min, gap = 0;
|
unsigned long pivot, min, gap = 0;
|
||||||
unsigned char offset;
|
unsigned char offset, data_end;
|
||||||
unsigned long *gaps;
|
unsigned long *gaps, *pivots;
|
||||||
unsigned long *pivots = ma_pivots(mas_mn(mas), type);
|
void __rcu **slots;
|
||||||
void __rcu **slots = ma_slots(mas_mn(mas), type);
|
struct maple_node *node;
|
||||||
bool found = false;
|
bool found = false;
|
||||||
|
|
||||||
if (ma_is_dense(type)) {
|
if (ma_is_dense(type)) {
|
||||||
|
@ -5066,13 +5067,15 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
gaps = ma_gaps(mte_to_node(mas->node), type);
|
node = mas_mn(mas);
|
||||||
|
pivots = ma_pivots(node, type);
|
||||||
|
slots = ma_slots(node, type);
|
||||||
|
gaps = ma_gaps(node, type);
|
||||||
offset = mas->offset;
|
offset = mas->offset;
|
||||||
min = mas_safe_min(mas, pivots, offset);
|
min = mas_safe_min(mas, pivots, offset);
|
||||||
for (; offset < mt_slots[type]; offset++) {
|
data_end = ma_data_end(node, type, pivots, mas->max);
|
||||||
pivot = mas_safe_pivot(mas, pivots, offset, type);
|
for (; offset <= data_end; offset++) {
|
||||||
if (offset && !pivot)
|
pivot = mas_logical_pivot(mas, pivots, offset, type);
|
||||||
break;
|
|
||||||
|
|
||||||
/* Not within lower bounds */
|
/* Not within lower bounds */
|
||||||
if (mas->index > pivot)
|
if (mas->index > pivot)
|
||||||
|
@ -5307,6 +5310,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
|
||||||
unsigned long *pivots;
|
unsigned long *pivots;
|
||||||
enum maple_type mt;
|
enum maple_type mt;
|
||||||
|
|
||||||
|
if (min >= max)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (mas_is_start(mas))
|
if (mas_is_start(mas))
|
||||||
mas_start(mas);
|
mas_start(mas);
|
||||||
else if (mas->offset >= 2)
|
else if (mas->offset >= 2)
|
||||||
|
@ -5361,6 +5367,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
|
||||||
{
|
{
|
||||||
struct maple_enode *last = mas->node;
|
struct maple_enode *last = mas->node;
|
||||||
|
|
||||||
|
if (min >= max)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (mas_is_start(mas)) {
|
if (mas_is_start(mas)) {
|
||||||
mas_start(mas);
|
mas_start(mas);
|
||||||
mas->offset = mas_data_end(mas);
|
mas->offset = mas_data_end(mas);
|
||||||
|
@ -5380,7 +5389,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
|
||||||
mas->index = min;
|
mas->index = min;
|
||||||
mas->last = max;
|
mas->last = max;
|
||||||
|
|
||||||
while (!mas_rev_awalk(mas, size)) {
|
while (!mas_rev_awalk(mas, size, &min, &max)) {
|
||||||
if (last == mas->node) {
|
if (last == mas->node) {
|
||||||
if (!mas_rewind_node(mas))
|
if (!mas_rewind_node(mas))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
@ -5395,17 +5404,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
|
||||||
if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
|
if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
/*
|
|
||||||
* mas_rev_awalk() has set mas->min and mas->max to the gap values. If
|
|
||||||
* the maximum is outside the window we are searching, then use the last
|
|
||||||
* location in the search.
|
|
||||||
* mas->max and mas->min is the range of the gap.
|
|
||||||
* mas->index and mas->last are currently set to the search range.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Trim the upper limit to the max. */
|
/* Trim the upper limit to the max. */
|
||||||
if (mas->max <= mas->last)
|
if (max <= mas->last)
|
||||||
mas->last = mas->max;
|
mas->last = max;
|
||||||
|
|
||||||
mas->index = mas->last - size + 1;
|
mas->index = mas->last - size + 1;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -148,35 +148,74 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
|
||||||
* into the virtual memory. If those physical pages already had shadow/origin,
|
* into the virtual memory. If those physical pages already had shadow/origin,
|
||||||
* those are ignored.
|
* those are ignored.
|
||||||
*/
|
*/
|
||||||
void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
|
int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
|
||||||
phys_addr_t phys_addr, pgprot_t prot,
|
phys_addr_t phys_addr, pgprot_t prot,
|
||||||
unsigned int page_shift)
|
unsigned int page_shift)
|
||||||
{
|
{
|
||||||
gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
|
gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
|
||||||
struct page *shadow, *origin;
|
struct page *shadow, *origin;
|
||||||
unsigned long off = 0;
|
unsigned long off = 0;
|
||||||
int nr;
|
int nr, err = 0, clean = 0, mapped;
|
||||||
|
|
||||||
if (!kmsan_enabled || kmsan_in_runtime())
|
if (!kmsan_enabled || kmsan_in_runtime())
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
nr = (end - start) / PAGE_SIZE;
|
nr = (end - start) / PAGE_SIZE;
|
||||||
kmsan_enter_runtime();
|
kmsan_enter_runtime();
|
||||||
for (int i = 0; i < nr; i++, off += PAGE_SIZE) {
|
for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
|
||||||
shadow = alloc_pages(gfp_mask, 1);
|
shadow = alloc_pages(gfp_mask, 1);
|
||||||
origin = alloc_pages(gfp_mask, 1);
|
origin = alloc_pages(gfp_mask, 1);
|
||||||
__vmap_pages_range_noflush(
|
if (!shadow || !origin) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto ret;
|
||||||
|
}
|
||||||
|
mapped = __vmap_pages_range_noflush(
|
||||||
vmalloc_shadow(start + off),
|
vmalloc_shadow(start + off),
|
||||||
vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
|
vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
|
||||||
PAGE_SHIFT);
|
PAGE_SHIFT);
|
||||||
__vmap_pages_range_noflush(
|
if (mapped) {
|
||||||
|
err = mapped;
|
||||||
|
goto ret;
|
||||||
|
}
|
||||||
|
shadow = NULL;
|
||||||
|
mapped = __vmap_pages_range_noflush(
|
||||||
vmalloc_origin(start + off),
|
vmalloc_origin(start + off),
|
||||||
vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
|
vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
|
||||||
PAGE_SHIFT);
|
PAGE_SHIFT);
|
||||||
|
if (mapped) {
|
||||||
|
__vunmap_range_noflush(
|
||||||
|
vmalloc_shadow(start + off),
|
||||||
|
vmalloc_shadow(start + off + PAGE_SIZE));
|
||||||
|
err = mapped;
|
||||||
|
goto ret;
|
||||||
|
}
|
||||||
|
origin = NULL;
|
||||||
|
}
|
||||||
|
/* Page mapping loop finished normally, nothing to clean up. */
|
||||||
|
clean = 0;
|
||||||
|
|
||||||
|
ret:
|
||||||
|
if (clean > 0) {
|
||||||
|
/*
|
||||||
|
* Something went wrong. Clean up shadow/origin pages allocated
|
||||||
|
* on the last loop iteration, then delete mappings created
|
||||||
|
* during the previous iterations.
|
||||||
|
*/
|
||||||
|
if (shadow)
|
||||||
|
__free_pages(shadow, 1);
|
||||||
|
if (origin)
|
||||||
|
__free_pages(origin, 1);
|
||||||
|
__vunmap_range_noflush(
|
||||||
|
vmalloc_shadow(start),
|
||||||
|
vmalloc_shadow(start + clean * PAGE_SIZE));
|
||||||
|
__vunmap_range_noflush(
|
||||||
|
vmalloc_origin(start),
|
||||||
|
vmalloc_origin(start + clean * PAGE_SIZE));
|
||||||
}
|
}
|
||||||
flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
|
flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
|
||||||
flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
|
flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
|
||||||
kmsan_leave_runtime();
|
kmsan_leave_runtime();
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
|
void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
|
||||||
|
|
|
@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, unsigned int order)
|
||||||
kmsan_leave_runtime();
|
kmsan_leave_runtime();
|
||||||
}
|
}
|
||||||
|
|
||||||
void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
|
int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
|
||||||
pgprot_t prot, struct page **pages,
|
pgprot_t prot, struct page **pages,
|
||||||
unsigned int page_shift)
|
unsigned int page_shift)
|
||||||
{
|
{
|
||||||
unsigned long shadow_start, origin_start, shadow_end, origin_end;
|
unsigned long shadow_start, origin_start, shadow_end, origin_end;
|
||||||
struct page **s_pages, **o_pages;
|
struct page **s_pages, **o_pages;
|
||||||
int nr, mapped;
|
int nr, mapped, err = 0;
|
||||||
|
|
||||||
if (!kmsan_enabled)
|
if (!kmsan_enabled)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
|
shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
|
||||||
shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
|
shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
|
||||||
if (!shadow_start)
|
if (!shadow_start)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
nr = (end - start) / PAGE_SIZE;
|
nr = (end - start) / PAGE_SIZE;
|
||||||
s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
|
s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
|
||||||
o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
|
o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
|
||||||
if (!s_pages || !o_pages)
|
if (!s_pages || !o_pages) {
|
||||||
|
err = -ENOMEM;
|
||||||
goto ret;
|
goto ret;
|
||||||
|
}
|
||||||
for (int i = 0; i < nr; i++) {
|
for (int i = 0; i < nr; i++) {
|
||||||
s_pages[i] = shadow_page_for(pages[i]);
|
s_pages[i] = shadow_page_for(pages[i]);
|
||||||
o_pages[i] = origin_page_for(pages[i]);
|
o_pages[i] = origin_page_for(pages[i]);
|
||||||
|
@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
|
||||||
kmsan_enter_runtime();
|
kmsan_enter_runtime();
|
||||||
mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
|
mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
|
||||||
s_pages, page_shift);
|
s_pages, page_shift);
|
||||||
KMSAN_WARN_ON(mapped);
|
if (mapped) {
|
||||||
|
err = mapped;
|
||||||
|
goto ret;
|
||||||
|
}
|
||||||
mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
|
mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
|
||||||
o_pages, page_shift);
|
o_pages, page_shift);
|
||||||
KMSAN_WARN_ON(mapped);
|
if (mapped) {
|
||||||
|
err = mapped;
|
||||||
|
goto ret;
|
||||||
|
}
|
||||||
kmsan_leave_runtime();
|
kmsan_leave_runtime();
|
||||||
flush_tlb_kernel_range(shadow_start, shadow_end);
|
flush_tlb_kernel_range(shadow_start, shadow_end);
|
||||||
flush_tlb_kernel_range(origin_start, origin_end);
|
flush_tlb_kernel_range(origin_start, origin_end);
|
||||||
|
@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
|
||||||
ret:
|
ret:
|
||||||
kfree(s_pages);
|
kfree(s_pages);
|
||||||
kfree(o_pages);
|
kfree(o_pages);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate metadata for pages allocated at boot time. */
|
/* Allocate metadata for pages allocated at boot time. */
|
||||||
|
|
48
mm/mmap.c
48
mm/mmap.c
|
@ -1547,7 +1547,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
|
||||||
*/
|
*/
|
||||||
static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
|
static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
|
||||||
{
|
{
|
||||||
unsigned long length, gap;
|
unsigned long length, gap, low_limit;
|
||||||
|
struct vm_area_struct *tmp;
|
||||||
|
|
||||||
MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
|
MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
|
||||||
|
|
||||||
|
@ -1556,12 +1557,29 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
|
||||||
if (length < info->length)
|
if (length < info->length)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (mas_empty_area(&mas, info->low_limit, info->high_limit - 1,
|
low_limit = info->low_limit;
|
||||||
length))
|
retry:
|
||||||
|
if (mas_empty_area(&mas, low_limit, info->high_limit - 1, length))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
gap = mas.index;
|
gap = mas.index;
|
||||||
gap += (info->align_offset - gap) & info->align_mask;
|
gap += (info->align_offset - gap) & info->align_mask;
|
||||||
|
tmp = mas_next(&mas, ULONG_MAX);
|
||||||
|
if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
|
||||||
|
if (vm_start_gap(tmp) < gap + length - 1) {
|
||||||
|
low_limit = tmp->vm_end;
|
||||||
|
mas_reset(&mas);
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tmp = mas_prev(&mas, 0);
|
||||||
|
if (tmp && vm_end_gap(tmp) > gap) {
|
||||||
|
low_limit = vm_end_gap(tmp);
|
||||||
|
mas_reset(&mas);
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return gap;
|
return gap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1577,7 +1595,8 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
|
||||||
*/
|
*/
|
||||||
static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
|
static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
|
||||||
{
|
{
|
||||||
unsigned long length, gap;
|
unsigned long length, gap, high_limit, gap_end;
|
||||||
|
struct vm_area_struct *tmp;
|
||||||
|
|
||||||
MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
|
MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
|
||||||
/* Adjust search length to account for worst case alignment overhead */
|
/* Adjust search length to account for worst case alignment overhead */
|
||||||
|
@ -1585,12 +1604,31 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
|
||||||
if (length < info->length)
|
if (length < info->length)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1,
|
high_limit = info->high_limit;
|
||||||
|
retry:
|
||||||
|
if (mas_empty_area_rev(&mas, info->low_limit, high_limit - 1,
|
||||||
length))
|
length))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
gap = mas.last + 1 - info->length;
|
gap = mas.last + 1 - info->length;
|
||||||
gap -= (gap - info->align_offset) & info->align_mask;
|
gap -= (gap - info->align_offset) & info->align_mask;
|
||||||
|
gap_end = mas.last;
|
||||||
|
tmp = mas_next(&mas, ULONG_MAX);
|
||||||
|
if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
|
||||||
|
if (vm_start_gap(tmp) <= gap_end) {
|
||||||
|
high_limit = vm_start_gap(tmp);
|
||||||
|
mas_reset(&mas);
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tmp = mas_prev(&mas, 0);
|
||||||
|
if (tmp && vm_end_gap(tmp) > gap) {
|
||||||
|
high_limit = tmp->vm_start;
|
||||||
|
mas_reset(&mas);
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return gap;
|
return gap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5796,7 +5796,21 @@ static void __build_all_zonelists(void *data)
|
||||||
int nid;
|
int nid;
|
||||||
int __maybe_unused cpu;
|
int __maybe_unused cpu;
|
||||||
pg_data_t *self = data;
|
pg_data_t *self = data;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Explicitly disable this CPU's interrupts before taking seqlock
|
||||||
|
* to prevent any IRQ handler from calling into the page allocator
|
||||||
|
* (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock.
|
||||||
|
*/
|
||||||
|
local_irq_save(flags);
|
||||||
|
/*
|
||||||
|
* Explicitly disable this CPU's synchronous printk() before taking
|
||||||
|
* seqlock to prevent any printk() from trying to hold port->lock, for
|
||||||
|
* tty_insert_flip_string_and_push_buffer() on other CPU might be
|
||||||
|
* calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
|
||||||
|
*/
|
||||||
|
printk_deferred_enter();
|
||||||
write_seqlock(&zonelist_update_seq);
|
write_seqlock(&zonelist_update_seq);
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
|
@ -5835,6 +5849,8 @@ static void __build_all_zonelists(void *data)
|
||||||
}
|
}
|
||||||
|
|
||||||
write_sequnlock(&zonelist_update_seq);
|
write_sequnlock(&zonelist_update_seq);
|
||||||
|
printk_deferred_exit();
|
||||||
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline void __init
|
static noinline void __init
|
||||||
|
@ -6884,6 +6900,9 @@ static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
|
||||||
|
|
||||||
if (PageReserved(page))
|
if (PageReserved(page))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (PageHuge(page))
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
10
mm/vmalloc.c
10
mm/vmalloc.c
|
@ -313,8 +313,8 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
|
||||||
ioremap_max_page_shift);
|
ioremap_max_page_shift);
|
||||||
flush_cache_vmap(addr, end);
|
flush_cache_vmap(addr, end);
|
||||||
if (!err)
|
if (!err)
|
||||||
kmsan_ioremap_page_range(addr, end, phys_addr, prot,
|
err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
|
||||||
ioremap_max_page_shift);
|
ioremap_max_page_shift);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -605,7 +605,11 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
|
||||||
int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
|
int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
|
||||||
pgprot_t prot, struct page **pages, unsigned int page_shift)
|
pgprot_t prot, struct page **pages, unsigned int page_shift)
|
||||||
{
|
{
|
||||||
kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
|
int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
|
||||||
|
page_shift);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
|
return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ help:
|
||||||
@echo ' turbostat - Intel CPU idle stats and freq reporting tool'
|
@echo ' turbostat - Intel CPU idle stats and freq reporting tool'
|
||||||
@echo ' usb - USB testing tools'
|
@echo ' usb - USB testing tools'
|
||||||
@echo ' virtio - vhost test module'
|
@echo ' virtio - vhost test module'
|
||||||
@echo ' vm - misc vm tools'
|
@echo ' mm - misc mm tools'
|
||||||
@echo ' wmi - WMI interface examples'
|
@echo ' wmi - WMI interface examples'
|
||||||
@echo ' x86_energy_perf_policy - Intel energy policy tool'
|
@echo ' x86_energy_perf_policy - Intel energy policy tool'
|
||||||
@echo ''
|
@echo ''
|
||||||
|
@ -69,7 +69,7 @@ acpi: FORCE
|
||||||
cpupower: FORCE
|
cpupower: FORCE
|
||||||
$(call descend,power/$@)
|
$(call descend,power/$@)
|
||||||
|
|
||||||
cgroup counter firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging tracing: FORCE
|
cgroup counter firewire hv guest bootconfig spi usb virtio mm bpf iio gpio objtool leds wmi pci firmware debugging tracing: FORCE
|
||||||
$(call descend,$@)
|
$(call descend,$@)
|
||||||
|
|
||||||
bpf/%: FORCE
|
bpf/%: FORCE
|
||||||
|
@ -118,7 +118,7 @@ kvm_stat: FORCE
|
||||||
|
|
||||||
all: acpi cgroup counter cpupower gpio hv firewire \
|
all: acpi cgroup counter cpupower gpio hv firewire \
|
||||||
perf selftests bootconfig spi turbostat usb \
|
perf selftests bootconfig spi turbostat usb \
|
||||||
virtio vm bpf x86_energy_perf_policy \
|
virtio mm bpf x86_energy_perf_policy \
|
||||||
tmon freefall iio objtool kvm_stat wmi \
|
tmon freefall iio objtool kvm_stat wmi \
|
||||||
pci debugging tracing thermal thermometer thermal-engine
|
pci debugging tracing thermal thermometer thermal-engine
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ acpi_install:
|
||||||
cpupower_install:
|
cpupower_install:
|
||||||
$(call descend,power/$(@:_install=),install)
|
$(call descend,power/$(@:_install=),install)
|
||||||
|
|
||||||
cgroup_install counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
|
cgroup_install counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install mm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
|
||||||
$(call descend,$(@:_install=),install)
|
$(call descend,$(@:_install=),install)
|
||||||
|
|
||||||
selftests_install:
|
selftests_install:
|
||||||
|
@ -158,7 +158,7 @@ kvm_stat_install:
|
||||||
install: acpi_install cgroup_install counter_install cpupower_install gpio_install \
|
install: acpi_install cgroup_install counter_install cpupower_install gpio_install \
|
||||||
hv_install firewire_install iio_install \
|
hv_install firewire_install iio_install \
|
||||||
perf_install selftests_install turbostat_install usb_install \
|
perf_install selftests_install turbostat_install usb_install \
|
||||||
virtio_install vm_install bpf_install x86_energy_perf_policy_install \
|
virtio_install mm_install bpf_install x86_energy_perf_policy_install \
|
||||||
tmon_install freefall_install objtool_install kvm_stat_install \
|
tmon_install freefall_install objtool_install kvm_stat_install \
|
||||||
wmi_install pci_install debugging_install intel-speed-select_install \
|
wmi_install pci_install debugging_install intel-speed-select_install \
|
||||||
tracing_install thermometer_install thermal-engine_install
|
tracing_install thermometer_install thermal-engine_install
|
||||||
|
@ -169,7 +169,7 @@ acpi_clean:
|
||||||
cpupower_clean:
|
cpupower_clean:
|
||||||
$(call descend,power/cpupower,clean)
|
$(call descend,power/cpupower,clean)
|
||||||
|
|
||||||
cgroup_clean counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
|
cgroup_clean counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean mm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
|
||||||
$(call descend,$(@:_clean=),clean)
|
$(call descend,$(@:_clean=),clean)
|
||||||
|
|
||||||
libapi_clean:
|
libapi_clean:
|
||||||
|
@ -211,7 +211,7 @@ build_clean:
|
||||||
|
|
||||||
clean: acpi_clean cgroup_clean counter_clean cpupower_clean hv_clean firewire_clean \
|
clean: acpi_clean cgroup_clean counter_clean cpupower_clean hv_clean firewire_clean \
|
||||||
perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean \
|
perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean \
|
||||||
vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
|
mm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
|
||||||
freefall_clean build_clean libbpf_clean libsubcmd_clean \
|
freefall_clean build_clean libbpf_clean libsubcmd_clean \
|
||||||
gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \
|
gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \
|
||||||
intel-speed-select_clean tracing_clean thermal_clean thermometer_clean thermal-engine_clean
|
intel-speed-select_clean tracing_clean thermal_clean thermometer_clean thermal-engine_clean
|
||||||
|
|
Загрузка…
Ссылка в новой задаче