Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "16 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: kasan: fix memory hotplug during boot kasan: free allocated shadow memory on MEM_CANCEL_ONLINE checkpatch: fix macro argument precedence test init/main.c: include <linux/mem_encrypt.h> kernel/sys.c: fix potential Spectre v1 issue mm/memory_hotplug: fix leftover use of struct page during hotplug proc: fix smaps and meminfo alignment mm: do not warn on offline nodes unless the specific node is explicitly requested mm, memory_hotplug: make has_unmovable_pages more robust mm/kasan: don't vfree() nonexistent vm_area MAINTAINERS: change hugetlbfs maintainer and update files ipc/shm: fix shmat() nil address after round-down when remapping Revert "ipc/shm: Fix shmat mmap nil-page protection" idr: fix invalid ptr dereference on item delete ocfs2: revert "ocfs2/o2hb: check len for bio_add_page() to avoid getting incorrect bio" mm: fix nr_rotate_swap leak in swapon() error case
This commit is contained in:
Коммит
bc2dbc5420
|
@ -6503,9 +6503,15 @@ F: Documentation/networking/hinic.txt
|
||||||
F: drivers/net/ethernet/huawei/hinic/
|
F: drivers/net/ethernet/huawei/hinic/
|
||||||
|
|
||||||
HUGETLB FILESYSTEM
|
HUGETLB FILESYSTEM
|
||||||
M: Nadia Yvette Chambers <nyc@holomorphy.com>
|
M: Mike Kravetz <mike.kravetz@oracle.com>
|
||||||
|
L: linux-mm@kvack.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: fs/hugetlbfs/
|
F: fs/hugetlbfs/
|
||||||
|
F: mm/hugetlb.c
|
||||||
|
F: include/linux/hugetlb.h
|
||||||
|
F: Documentation/admin-guide/mm/hugetlbpage.rst
|
||||||
|
F: Documentation/vm/hugetlbfs_reserv.rst
|
||||||
|
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
|
||||||
|
|
||||||
HVA ST MEDIA DRIVER
|
HVA ST MEDIA DRIVER
|
||||||
M: Jean-Christophe Trotin <jean-christophe.trotin@st.com>
|
M: Jean-Christophe Trotin <jean-christophe.trotin@st.com>
|
||||||
|
|
|
@ -490,7 +490,8 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
|
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
||||||
|
bool check_nid)
|
||||||
{
|
{
|
||||||
unsigned long end_pfn = start_pfn + nr_pages;
|
unsigned long end_pfn = start_pfn + nr_pages;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
|
@ -514,7 +515,7 @@ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
|
||||||
|
|
||||||
mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
|
mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
|
||||||
|
|
||||||
ret = register_mem_sect_under_node(mem_blk, nid, true);
|
ret = register_mem_sect_under_node(mem_blk, nid, check_nid);
|
||||||
if (!err)
|
if (!err)
|
||||||
err = ret;
|
err = ret;
|
||||||
|
|
||||||
|
|
|
@ -570,16 +570,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
|
||||||
current_page, vec_len, vec_start);
|
current_page, vec_len, vec_start);
|
||||||
|
|
||||||
len = bio_add_page(bio, page, vec_len, vec_start);
|
len = bio_add_page(bio, page, vec_len, vec_start);
|
||||||
if (len != vec_len) {
|
if (len != vec_len) break;
|
||||||
mlog(ML_ERROR, "Adding page[%d] to bio failed, "
|
|
||||||
"page %p, len %d, vec_len %u, vec_start %u, "
|
|
||||||
"bi_sector %llu\n", current_page, page, len,
|
|
||||||
vec_len, vec_start,
|
|
||||||
(unsigned long long)bio->bi_iter.bi_sector);
|
|
||||||
bio_put(bio);
|
|
||||||
bio = ERR_PTR(-EIO);
|
|
||||||
return bio;
|
|
||||||
}
|
|
||||||
|
|
||||||
cs += vec_len / (PAGE_SIZE/spp);
|
cs += vec_len / (PAGE_SIZE/spp);
|
||||||
vec_start = 0;
|
vec_start = 0;
|
||||||
|
|
|
@ -709,11 +709,6 @@ void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter,
|
||||||
if (m->count + width >= m->size)
|
if (m->count + width >= m->size)
|
||||||
goto overflow;
|
goto overflow;
|
||||||
|
|
||||||
if (num < 10) {
|
|
||||||
m->buf[m->count++] = num + '0';
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
len = num_to_str(m->buf + m->count, m->size - m->count, num, width);
|
len = num_to_str(m->buf + m->count, m->size - m->count, num, width);
|
||||||
if (!len)
|
if (!len)
|
||||||
goto overflow;
|
goto overflow;
|
||||||
|
|
|
@ -464,7 +464,7 @@ static inline struct page *
|
||||||
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
|
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
|
||||||
{
|
{
|
||||||
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
|
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
|
||||||
VM_WARN_ON(!node_online(nid));
|
VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
|
||||||
|
|
||||||
return __alloc_pages(gfp_mask, order, nid);
|
return __alloc_pages(gfp_mask, order, nid);
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,9 +32,11 @@ extern struct node *node_devices[];
|
||||||
typedef void (*node_registration_func_t)(struct node *);
|
typedef void (*node_registration_func_t)(struct node *);
|
||||||
|
|
||||||
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
|
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
|
||||||
extern int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages);
|
extern int link_mem_sections(int nid, unsigned long start_pfn,
|
||||||
|
unsigned long nr_pages, bool check_nid);
|
||||||
#else
|
#else
|
||||||
static inline int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
|
static inline int link_mem_sections(int nid, unsigned long start_pfn,
|
||||||
|
unsigned long nr_pages, bool check_nid)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -57,7 +59,7 @@ static inline int register_one_node(int nid)
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
/* link memory sections under this node */
|
/* link memory sections under this node */
|
||||||
error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages);
|
error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
|
|
|
@ -91,6 +91,7 @@
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
#include <linux/rodata_test.h>
|
#include <linux/rodata_test.h>
|
||||||
#include <linux/jump_label.h>
|
#include <linux/jump_label.h>
|
||||||
|
#include <linux/mem_encrypt.h>
|
||||||
|
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/bugs.h>
|
#include <asm/bugs.h>
|
||||||
|
|
19
ipc/shm.c
19
ipc/shm.c
|
@ -1363,14 +1363,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
|
||||||
|
|
||||||
if (addr) {
|
if (addr) {
|
||||||
if (addr & (shmlba - 1)) {
|
if (addr & (shmlba - 1)) {
|
||||||
/*
|
if (shmflg & SHM_RND) {
|
||||||
* Round down to the nearest multiple of shmlba.
|
addr &= ~(shmlba - 1); /* round down */
|
||||||
* For sane do_mmap_pgoff() parameters, avoid
|
|
||||||
* round downs that trigger nil-page and MAP_FIXED.
|
/*
|
||||||
*/
|
* Ensure that the round-down is non-nil
|
||||||
if ((shmflg & SHM_RND) && addr >= shmlba)
|
* when remapping. This can happen for
|
||||||
addr &= ~(shmlba - 1);
|
* cases when addr < shmlba.
|
||||||
else
|
*/
|
||||||
|
if (!addr && (shmflg & SHM_REMAP))
|
||||||
|
goto out;
|
||||||
|
} else
|
||||||
#ifndef __ARCH_FORCE_SHMLBA
|
#ifndef __ARCH_FORCE_SHMLBA
|
||||||
if (addr & ~PAGE_MASK)
|
if (addr & ~PAGE_MASK)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -71,6 +71,9 @@
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
|
|
||||||
|
/* Hardening for Spectre-v1 */
|
||||||
|
#include <linux/nospec.h>
|
||||||
|
|
||||||
#include "uid16.h"
|
#include "uid16.h"
|
||||||
|
|
||||||
#ifndef SET_UNALIGN_CTL
|
#ifndef SET_UNALIGN_CTL
|
||||||
|
@ -1453,6 +1456,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
|
||||||
if (resource >= RLIM_NLIMITS)
|
if (resource >= RLIM_NLIMITS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
resource = array_index_nospec(resource, RLIM_NLIMITS);
|
||||||
task_lock(current->group_leader);
|
task_lock(current->group_leader);
|
||||||
x = current->signal->rlim[resource];
|
x = current->signal->rlim[resource];
|
||||||
task_unlock(current->group_leader);
|
task_unlock(current->group_leader);
|
||||||
|
@ -1472,6 +1476,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
|
||||||
if (resource >= RLIM_NLIMITS)
|
if (resource >= RLIM_NLIMITS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
resource = array_index_nospec(resource, RLIM_NLIMITS);
|
||||||
task_lock(current->group_leader);
|
task_lock(current->group_leader);
|
||||||
r = current->signal->rlim[resource];
|
r = current->signal->rlim[resource];
|
||||||
task_unlock(current->group_leader);
|
task_unlock(current->group_leader);
|
||||||
|
|
|
@ -2034,10 +2034,12 @@ void *radix_tree_delete_item(struct radix_tree_root *root,
|
||||||
unsigned long index, void *item)
|
unsigned long index, void *item)
|
||||||
{
|
{
|
||||||
struct radix_tree_node *node = NULL;
|
struct radix_tree_node *node = NULL;
|
||||||
void __rcu **slot;
|
void __rcu **slot = NULL;
|
||||||
void *entry;
|
void *entry;
|
||||||
|
|
||||||
entry = __radix_tree_lookup(root, index, &node, &slot);
|
entry = __radix_tree_lookup(root, index, &node, &slot);
|
||||||
|
if (!slot)
|
||||||
|
return NULL;
|
||||||
if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
|
if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
|
||||||
get_slot_offset(node, slot))))
|
get_slot_offset(node, slot))))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -792,6 +792,40 @@ DEFINE_ASAN_SET_SHADOW(f5);
|
||||||
DEFINE_ASAN_SET_SHADOW(f8);
|
DEFINE_ASAN_SET_SHADOW(f8);
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
|
static bool shadow_mapped(unsigned long addr)
|
||||||
|
{
|
||||||
|
pgd_t *pgd = pgd_offset_k(addr);
|
||||||
|
p4d_t *p4d;
|
||||||
|
pud_t *pud;
|
||||||
|
pmd_t *pmd;
|
||||||
|
pte_t *pte;
|
||||||
|
|
||||||
|
if (pgd_none(*pgd))
|
||||||
|
return false;
|
||||||
|
p4d = p4d_offset(pgd, addr);
|
||||||
|
if (p4d_none(*p4d))
|
||||||
|
return false;
|
||||||
|
pud = pud_offset(p4d, addr);
|
||||||
|
if (pud_none(*pud))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We can't use pud_large() or pud_huge(), the first one is
|
||||||
|
* arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
|
||||||
|
* pud_bad(), if pud is bad then it's bad because it's huge.
|
||||||
|
*/
|
||||||
|
if (pud_bad(*pud))
|
||||||
|
return true;
|
||||||
|
pmd = pmd_offset(pud, addr);
|
||||||
|
if (pmd_none(*pmd))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (pmd_bad(*pmd))
|
||||||
|
return true;
|
||||||
|
pte = pte_offset_kernel(pmd, addr);
|
||||||
|
return !pte_none(*pte);
|
||||||
|
}
|
||||||
|
|
||||||
static int __meminit kasan_mem_notifier(struct notifier_block *nb,
|
static int __meminit kasan_mem_notifier(struct notifier_block *nb,
|
||||||
unsigned long action, void *data)
|
unsigned long action, void *data)
|
||||||
{
|
{
|
||||||
|
@ -813,6 +847,14 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
|
||||||
case MEM_GOING_ONLINE: {
|
case MEM_GOING_ONLINE: {
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If shadow is mapped already than it must have been mapped
|
||||||
|
* during the boot. This could happen if we onlining previously
|
||||||
|
* offlined memory.
|
||||||
|
*/
|
||||||
|
if (shadow_mapped(shadow_start))
|
||||||
|
return NOTIFY_OK;
|
||||||
|
|
||||||
ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
|
ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
|
||||||
shadow_end, GFP_KERNEL,
|
shadow_end, GFP_KERNEL,
|
||||||
PAGE_KERNEL, VM_NO_GUARD,
|
PAGE_KERNEL, VM_NO_GUARD,
|
||||||
|
@ -824,8 +866,26 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
|
||||||
kmemleak_ignore(ret);
|
kmemleak_ignore(ret);
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
case MEM_OFFLINE:
|
case MEM_CANCEL_ONLINE:
|
||||||
vfree((void *)shadow_start);
|
case MEM_OFFLINE: {
|
||||||
|
struct vm_struct *vm;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* shadow_start was either mapped during boot by kasan_init()
|
||||||
|
* or during memory online by __vmalloc_node_range().
|
||||||
|
* In the latter case we can use vfree() to free shadow.
|
||||||
|
* Non-NULL result of the find_vm_area() will tell us if
|
||||||
|
* that was the second case.
|
||||||
|
*
|
||||||
|
* Currently it's not possible to free shadow mapped
|
||||||
|
* during boot by kasan_init(). It's because the code
|
||||||
|
* to do that hasn't been written yet. So we'll just
|
||||||
|
* leak the memory.
|
||||||
|
*/
|
||||||
|
vm = find_vm_area((void *)shadow_start);
|
||||||
|
if (vm)
|
||||||
|
vfree((void *)shadow_start);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
@ -838,5 +898,5 @@ static int __init kasan_memhotplug_init(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(kasan_memhotplug_init);
|
core_initcall(kasan_memhotplug_init);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1158,7 +1158,7 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
|
||||||
* nodes have to go through register_node.
|
* nodes have to go through register_node.
|
||||||
* TODO clean up this mess.
|
* TODO clean up this mess.
|
||||||
*/
|
*/
|
||||||
ret = link_mem_sections(nid, start_pfn, nr_pages);
|
ret = link_mem_sections(nid, start_pfn, nr_pages, false);
|
||||||
register_fail:
|
register_fail:
|
||||||
/*
|
/*
|
||||||
* If sysfs file of new node can't create, cpu on the node
|
* If sysfs file of new node can't create, cpu on the node
|
||||||
|
|
|
@ -7598,11 +7598,12 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
|
||||||
unsigned long pfn, iter, found;
|
unsigned long pfn, iter, found;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For avoiding noise data, lru_add_drain_all() should be called
|
* TODO we could make this much more efficient by not checking every
|
||||||
* If ZONE_MOVABLE, the zone never contains unmovable pages
|
* page in the range if we know all of them are in MOVABLE_ZONE and
|
||||||
|
* that the movable zone guarantees that pages are migratable but
|
||||||
|
* the later is not the case right now unfortunatelly. E.g. movablecore
|
||||||
|
* can still lead to having bootmem allocations in zone_movable.
|
||||||
*/
|
*/
|
||||||
if (zone_idx(zone) == ZONE_MOVABLE)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CMA allocations (alloc_contig_range) really need to mark isolate
|
* CMA allocations (alloc_contig_range) really need to mark isolate
|
||||||
|
@ -7623,7 +7624,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
|
||||||
page = pfn_to_page(check);
|
page = pfn_to_page(check);
|
||||||
|
|
||||||
if (PageReserved(page))
|
if (PageReserved(page))
|
||||||
return true;
|
goto unmovable;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hugepages are not in LRU lists, but they're movable.
|
* Hugepages are not in LRU lists, but they're movable.
|
||||||
|
@ -7673,9 +7674,12 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
|
||||||
* page at boot.
|
* page at boot.
|
||||||
*/
|
*/
|
||||||
if (found > count)
|
if (found > count)
|
||||||
return true;
|
goto unmovable;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
unmovable:
|
||||||
|
WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_pageblock_removable_nolock(struct page *page)
|
bool is_pageblock_removable_nolock(struct page *page)
|
||||||
|
|
|
@ -3112,6 +3112,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
||||||
unsigned long *frontswap_map = NULL;
|
unsigned long *frontswap_map = NULL;
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
struct inode *inode = NULL;
|
struct inode *inode = NULL;
|
||||||
|
bool inced_nr_rotate_swap = false;
|
||||||
|
|
||||||
if (swap_flags & ~SWAP_FLAGS_VALID)
|
if (swap_flags & ~SWAP_FLAGS_VALID)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -3215,8 +3216,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
||||||
cluster = per_cpu_ptr(p->percpu_cluster, cpu);
|
cluster = per_cpu_ptr(p->percpu_cluster, cpu);
|
||||||
cluster_set_null(&cluster->index);
|
cluster_set_null(&cluster->index);
|
||||||
}
|
}
|
||||||
} else
|
} else {
|
||||||
atomic_inc(&nr_rotate_swap);
|
atomic_inc(&nr_rotate_swap);
|
||||||
|
inced_nr_rotate_swap = true;
|
||||||
|
}
|
||||||
|
|
||||||
error = swap_cgroup_swapon(p->type, maxpages);
|
error = swap_cgroup_swapon(p->type, maxpages);
|
||||||
if (error)
|
if (error)
|
||||||
|
@ -3307,6 +3310,8 @@ bad_swap:
|
||||||
vfree(swap_map);
|
vfree(swap_map);
|
||||||
kvfree(cluster_info);
|
kvfree(cluster_info);
|
||||||
kvfree(frontswap_map);
|
kvfree(frontswap_map);
|
||||||
|
if (inced_nr_rotate_swap)
|
||||||
|
atomic_dec(&nr_rotate_swap);
|
||||||
if (swap_file) {
|
if (swap_file) {
|
||||||
if (inode && S_ISREG(inode->i_mode)) {
|
if (inode && S_ISREG(inode->i_mode)) {
|
||||||
inode_unlock(inode);
|
inode_unlock(inode);
|
||||||
|
|
|
@ -5041,7 +5041,7 @@ sub process {
|
||||||
$tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
|
$tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
|
||||||
$tmp_stmt =~ s/\#+\s*$arg\b//g;
|
$tmp_stmt =~ s/\#+\s*$arg\b//g;
|
||||||
$tmp_stmt =~ s/\b$arg\s*\#\#//g;
|
$tmp_stmt =~ s/\b$arg\s*\#\#//g;
|
||||||
my $use_cnt = $tmp_stmt =~ s/\b$arg\b//g;
|
my $use_cnt = () = $tmp_stmt =~ /\b$arg\b/g;
|
||||||
if ($use_cnt > 1) {
|
if ($use_cnt > 1) {
|
||||||
CHK("MACRO_ARG_REUSE",
|
CHK("MACRO_ARG_REUSE",
|
||||||
"Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx");
|
"Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx");
|
||||||
|
|
|
@ -252,6 +252,13 @@ void idr_checks(void)
|
||||||
idr_remove(&idr, 3);
|
idr_remove(&idr, 3);
|
||||||
idr_remove(&idr, 0);
|
idr_remove(&idr, 0);
|
||||||
|
|
||||||
|
assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0);
|
||||||
|
idr_remove(&idr, 1);
|
||||||
|
for (i = 1; i < RADIX_TREE_MAP_SIZE; i++)
|
||||||
|
assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i);
|
||||||
|
idr_remove(&idr, 1 << 30);
|
||||||
|
idr_destroy(&idr);
|
||||||
|
|
||||||
for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
|
for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
|
||||||
struct item *item = item_create(i, 0);
|
struct item *item = item_create(i, 0);
|
||||||
assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
|
assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче