Merge branch 'akpm' (patches from Andrew Morton)
Merge misc fixes from Andrew Morton: "10 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: slab: fix nodeid bounds check for non-contiguous node IDs lib/genalloc.c: export devm_gen_pool_create() for modules mm: fix anon_vma_clone() error treatment mm: fix swapoff hang after page migration and fork fat: fix oops on corrupted vfat fs ipc/sem.c: fully initialize sem_array before making it visible drivers/input/evdev.c: don't kfree() a vmalloc address mm/vmpressure.c: fix race in vmpressure_work_fn() mm: frontswap: invalidate expired data on a dup-store failure mm: do not overwrite reserved pages counter at show_mem()
This commit is contained in:
Коммит
1dd909affb
|
@ -421,7 +421,7 @@ static int evdev_open(struct inode *inode, struct file *file)
|
|||
|
||||
err_free_client:
|
||||
evdev_detach_client(evdev, client);
|
||||
kfree(client);
|
||||
kvfree(client);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -736,7 +736,12 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
|
|||
}
|
||||
|
||||
alias = d_find_alias(inode);
|
||||
if (alias && !vfat_d_anon_disconn(alias)) {
|
||||
/*
|
||||
* Checking "alias->d_parent == dentry->d_parent" to make sure
|
||||
* FS is not corrupted (especially double linked dir).
|
||||
*/
|
||||
if (alias && alias->d_parent == dentry->d_parent &&
|
||||
!vfat_d_anon_disconn(alias)) {
|
||||
/*
|
||||
* This inode has non anonymous-DCACHE_DISCONNECTED
|
||||
* dentry. This means, the user did ->lookup() by an
|
||||
|
@ -755,12 +760,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
|
|||
|
||||
out:
|
||||
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
||||
dentry->d_time = dentry->d_parent->d_inode->i_version;
|
||||
dentry = d_splice_alias(inode, dentry);
|
||||
if (dentry)
|
||||
dentry->d_time = dentry->d_parent->d_inode->i_version;
|
||||
return dentry;
|
||||
|
||||
if (!inode)
|
||||
dentry->d_time = dir->i_version;
|
||||
return d_splice_alias(inode, dentry);
|
||||
error:
|
||||
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
||||
return ERR_PTR(err);
|
||||
|
@ -793,7 +795,6 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
|
|||
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
|
||||
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
|
||||
|
||||
dentry->d_time = dentry->d_parent->d_inode->i_version;
|
||||
d_instantiate(dentry, inode);
|
||||
out:
|
||||
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
||||
|
@ -824,6 +825,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
clear_nlink(inode);
|
||||
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
|
||||
fat_detach(inode);
|
||||
dentry->d_time = dir->i_version;
|
||||
out:
|
||||
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
||||
|
||||
|
@ -849,6 +851,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
|
|||
clear_nlink(inode);
|
||||
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
|
||||
fat_detach(inode);
|
||||
dentry->d_time = dir->i_version;
|
||||
out:
|
||||
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
||||
|
||||
|
@ -889,7 +892,6 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
|
|||
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
|
||||
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
|
||||
|
||||
dentry->d_time = dentry->d_parent->d_inode->i_version;
|
||||
d_instantiate(dentry, inode);
|
||||
|
||||
mutex_unlock(&MSDOS_SB(sb)->s_lock);
|
||||
|
|
15
ipc/sem.c
15
ipc/sem.c
|
@ -507,13 +507,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
|
|||
return retval;
|
||||
}
|
||||
|
||||
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
|
||||
if (id < 0) {
|
||||
ipc_rcu_putref(sma, sem_rcu_free);
|
||||
return id;
|
||||
}
|
||||
ns->used_sems += nsems;
|
||||
|
||||
sma->sem_base = (struct sem *) &sma[1];
|
||||
|
||||
for (i = 0; i < nsems; i++) {
|
||||
|
@ -528,6 +521,14 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
|
|||
INIT_LIST_HEAD(&sma->list_id);
|
||||
sma->sem_nsems = nsems;
|
||||
sma->sem_ctime = get_seconds();
|
||||
|
||||
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
|
||||
if (id < 0) {
|
||||
ipc_rcu_putref(sma, sem_rcu_free);
|
||||
return id;
|
||||
}
|
||||
ns->used_sems += nsems;
|
||||
|
||||
sem_unlock(sma, -1);
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
@ -598,6 +598,7 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
|
|||
|
||||
return pool;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_gen_pool_create);
|
||||
|
||||
/**
|
||||
* dev_get_gen_pool - Obtain the gen_pool (if any) for a device
|
||||
|
|
|
@ -28,7 +28,7 @@ void show_mem(unsigned int filter)
|
|||
continue;
|
||||
|
||||
total += zone->present_pages;
|
||||
reserved = zone->present_pages - zone->managed_pages;
|
||||
reserved += zone->present_pages - zone->managed_pages;
|
||||
|
||||
if (is_highmem_idx(zoneid))
|
||||
highmem += zone->present_pages;
|
||||
|
|
|
@ -244,8 +244,10 @@ int __frontswap_store(struct page *page)
|
|||
the (older) page from frontswap
|
||||
*/
|
||||
inc_frontswap_failed_stores();
|
||||
if (dup)
|
||||
if (dup) {
|
||||
__frontswap_clear(sis, offset);
|
||||
frontswap_ops->invalidate_page(type, offset);
|
||||
}
|
||||
}
|
||||
if (frontswap_writethrough_enabled)
|
||||
/* report failure so swap also writes to swap device */
|
||||
|
|
24
mm/memory.c
24
mm/memory.c
|
@ -815,20 +815,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|||
if (!pte_file(pte)) {
|
||||
swp_entry_t entry = pte_to_swp_entry(pte);
|
||||
|
||||
if (swap_duplicate(entry) < 0)
|
||||
return entry.val;
|
||||
if (likely(!non_swap_entry(entry))) {
|
||||
if (swap_duplicate(entry) < 0)
|
||||
return entry.val;
|
||||
|
||||
/* make sure dst_mm is on swapoff's mmlist. */
|
||||
if (unlikely(list_empty(&dst_mm->mmlist))) {
|
||||
spin_lock(&mmlist_lock);
|
||||
if (list_empty(&dst_mm->mmlist))
|
||||
list_add(&dst_mm->mmlist,
|
||||
&src_mm->mmlist);
|
||||
spin_unlock(&mmlist_lock);
|
||||
}
|
||||
if (likely(!non_swap_entry(entry)))
|
||||
/* make sure dst_mm is on swapoff's mmlist. */
|
||||
if (unlikely(list_empty(&dst_mm->mmlist))) {
|
||||
spin_lock(&mmlist_lock);
|
||||
if (list_empty(&dst_mm->mmlist))
|
||||
list_add(&dst_mm->mmlist,
|
||||
&src_mm->mmlist);
|
||||
spin_unlock(&mmlist_lock);
|
||||
}
|
||||
rss[MM_SWAPENTS]++;
|
||||
else if (is_migration_entry(entry)) {
|
||||
} else if (is_migration_entry(entry)) {
|
||||
page = migration_entry_to_page(entry);
|
||||
|
||||
if (PageAnon(page))
|
||||
|
|
10
mm/mmap.c
10
mm/mmap.c
|
@ -776,8 +776,11 @@ again: remove_next = 1 + (end > next->vm_end);
|
|||
* shrinking vma had, to cover any anon pages imported.
|
||||
*/
|
||||
if (exporter && exporter->anon_vma && !importer->anon_vma) {
|
||||
if (anon_vma_clone(importer, exporter))
|
||||
return -ENOMEM;
|
||||
int error;
|
||||
|
||||
error = anon_vma_clone(importer, exporter);
|
||||
if (error)
|
||||
return error;
|
||||
importer->anon_vma = exporter->anon_vma;
|
||||
}
|
||||
}
|
||||
|
@ -2469,7 +2472,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
if (err)
|
||||
goto out_free_vma;
|
||||
|
||||
if (anon_vma_clone(new, vma))
|
||||
err = anon_vma_clone(new, vma);
|
||||
if (err)
|
||||
goto out_free_mpol;
|
||||
|
||||
if (new->vm_file)
|
||||
|
|
|
@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
|
|||
{
|
||||
struct anon_vma_chain *avc;
|
||||
struct anon_vma *anon_vma;
|
||||
int error;
|
||||
|
||||
/* Don't bother if the parent process has no anon_vma here. */
|
||||
if (!pvma->anon_vma)
|
||||
|
@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
|
|||
* First, attach the new VMA to the parent VMA's anon_vmas,
|
||||
* so rmap can find non-COWed pages in child processes.
|
||||
*/
|
||||
if (anon_vma_clone(vma, pvma))
|
||||
return -ENOMEM;
|
||||
error = anon_vma_clone(vma, pvma);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Then add our own anon_vma. */
|
||||
anon_vma = anon_vma_alloc();
|
||||
|
|
|
@ -3076,7 +3076,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
|
|||
void *obj;
|
||||
int x;
|
||||
|
||||
VM_BUG_ON(nodeid > num_online_nodes());
|
||||
VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
|
||||
n = get_node(cachep, nodeid);
|
||||
BUG_ON(!n);
|
||||
|
||||
|
|
|
@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
|
|||
unsigned long scanned;
|
||||
unsigned long reclaimed;
|
||||
|
||||
spin_lock(&vmpr->sr_lock);
|
||||
/*
|
||||
* Several contexts might be calling vmpressure(), so it is
|
||||
* possible that the work was rescheduled again before the old
|
||||
|
@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
|
|||
* here. No need for any locks here since we don't care if
|
||||
* vmpr->reclaimed is in sync.
|
||||
*/
|
||||
if (!vmpr->scanned)
|
||||
return;
|
||||
|
||||
spin_lock(&vmpr->sr_lock);
|
||||
scanned = vmpr->scanned;
|
||||
if (!scanned) {
|
||||
spin_unlock(&vmpr->sr_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
reclaimed = vmpr->reclaimed;
|
||||
vmpr->scanned = 0;
|
||||
vmpr->reclaimed = 0;
|
||||
|
|
Загрузка…
Ссылка в новой задаче