Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "11 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: Update maintainers for DRM STI driver mm: cma: mark cma_bitmap_maxno() inline in header zram: fix pool name truncation memory-hotplug: fix wrong edge when hot add a new node .mailmap: Andrey Ryabinin has moved ipc/sem.c: update/correct memory barriers mm/hwpoison: fix panic due to split huge zero page ipc,sem: remove uneeded sem_undo_list lock usage in exit_sem() ipc,sem: fix use after free on IPC_RMID after a task using same semaphore set exits mm/hwpoison: fix fail isolate hugetlbfs page w/ refcount held mm/hwpoison: fix page refcount of unknown non LRU page
This commit is contained in:
Коммит
8394a1b715
1
.mailmap
1
.mailmap
|
@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
|
|||
Al Viro <viro@ftp.linux.org.uk>
|
||||
Al Viro <viro@zenIV.linux.org.uk>
|
||||
Andreas Herrmann <aherrman@de.ibm.com>
|
||||
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
|
||||
Andrew Morton <akpm@linux-foundation.org>
|
||||
Andrew Vasquez <andrew.vasquez@qlogic.com>
|
||||
Andy Adamson <andros@citi.umich.edu>
|
||||
|
|
|
@ -3587,6 +3587,15 @@ S: Maintained
|
|||
F: drivers/gpu/drm/rockchip/
|
||||
F: Documentation/devicetree/bindings/video/rockchip*
|
||||
|
||||
DRM DRIVERS FOR STI
|
||||
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
||||
M: Vincent Abriou <vincent.abriou@st.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/sti
|
||||
F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt
|
||||
|
||||
DSBR100 USB FM RADIO DRIVER
|
||||
M: Alexey Klimov <klimov.linux@gmail.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
|
|
|
@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
|
|||
kfree(meta);
|
||||
}
|
||||
|
||||
static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
|
||||
static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
|
||||
{
|
||||
size_t num_pages;
|
||||
char pool_name[8];
|
||||
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
|
||||
|
||||
if (!meta)
|
||||
|
@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
|
|||
goto out_error;
|
||||
}
|
||||
|
||||
snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
|
||||
meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
|
||||
if (!meta->mem_pool) {
|
||||
pr_err("Error creating memory pool\n");
|
||||
|
@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
|
|||
return -EINVAL;
|
||||
|
||||
disksize = PAGE_ALIGN(disksize);
|
||||
meta = zram_meta_alloc(zram->disk->first_minor, disksize);
|
||||
meta = zram_meta_alloc(zram->disk->disk_name, disksize);
|
||||
if (!meta)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
49
ipc/sem.c
49
ipc/sem.c
|
@ -252,6 +252,16 @@ static void sem_rcu_free(struct rcu_head *head)
|
|||
ipc_rcu_free(head);
|
||||
}
|
||||
|
||||
/*
|
||||
* spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
|
||||
* are only control barriers.
|
||||
* The code must pair with spin_unlock(&sem->lock) or
|
||||
* spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
|
||||
*
|
||||
* smp_rmb() is sufficient, as writes cannot pass the control barrier.
|
||||
*/
|
||||
#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
|
||||
|
||||
/*
|
||||
* Wait until all currently ongoing simple ops have completed.
|
||||
* Caller must own sem_perm.lock.
|
||||
|
@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
|
|||
sem = sma->sem_base + i;
|
||||
spin_unlock_wait(&sem->lock);
|
||||
}
|
||||
ipc_smp_acquire__after_spin_is_unlocked();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
|
|||
/* Then check that the global lock is free */
|
||||
if (!spin_is_locked(&sma->sem_perm.lock)) {
|
||||
/*
|
||||
* The ipc object lock check must be visible on all
|
||||
* cores before rechecking the complex count. Otherwise
|
||||
* we can race with another thread that does:
|
||||
* We need a memory barrier with acquire semantics,
|
||||
* otherwise we can race with another thread that does:
|
||||
* complex_count++;
|
||||
* spin_unlock(sem_perm.lock);
|
||||
*/
|
||||
smp_rmb();
|
||||
ipc_smp_acquire__after_spin_is_unlocked();
|
||||
|
||||
/*
|
||||
* Now repeat the test of complex_count:
|
||||
|
@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
|
|||
rcu_read_lock();
|
||||
un = list_entry_rcu(ulp->list_proc.next,
|
||||
struct sem_undo, list_proc);
|
||||
if (&un->list_proc == &ulp->list_proc)
|
||||
semid = -1;
|
||||
else
|
||||
semid = un->semid;
|
||||
|
||||
if (semid == -1) {
|
||||
if (&un->list_proc == &ulp->list_proc) {
|
||||
/*
|
||||
* We must wait for freeary() before freeing this ulp,
|
||||
* in case we raced with last sem_undo. There is a small
|
||||
* possibility where we exit while freeary() didn't
|
||||
* finish unlocking sem_undo_list.
|
||||
*/
|
||||
spin_unlock_wait(&ulp->lock);
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
}
|
||||
spin_lock(&ulp->lock);
|
||||
semid = un->semid;
|
||||
spin_unlock(&ulp->lock);
|
||||
|
||||
sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
|
||||
/* exit_sem raced with IPC_RMID, nothing to do */
|
||||
if (semid == -1) {
|
||||
rcu_read_unlock();
|
||||
continue;
|
||||
}
|
||||
|
||||
sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
|
||||
/* exit_sem raced with IPC_RMID, nothing to do */
|
||||
if (IS_ERR(sma)) {
|
||||
rcu_read_unlock();
|
||||
|
@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
|
|||
ipc_assert_locked_object(&sma->sem_perm);
|
||||
list_del(&un->list_id);
|
||||
|
||||
spin_lock(&ulp->lock);
|
||||
/* we are the last process using this ulp, acquiring ulp->lock
|
||||
* isn't required. Besides that, we are also protected against
|
||||
* IPC_RMID as we hold sma->sem_perm lock now
|
||||
*/
|
||||
list_del_rcu(&un->list_proc);
|
||||
spin_unlock(&ulp->lock);
|
||||
|
||||
/* perform adjustments registered in un */
|
||||
for (i = 0; i < sma->sem_nsems; i++) {
|
||||
|
|
2
mm/cma.h
2
mm/cma.h
|
@ -16,7 +16,7 @@ struct cma {
|
|||
extern struct cma cma_areas[MAX_CMA_AREAS];
|
||||
extern unsigned cma_area_count;
|
||||
|
||||
static unsigned long cma_bitmap_maxno(struct cma *cma)
|
||||
static inline unsigned long cma_bitmap_maxno(struct cma *cma)
|
||||
{
|
||||
return cma->count >> cma->order_per_bit;
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* This file contains shadow memory manipulation code.
|
||||
*
|
||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
||||
* Author: Andrey Ryabinin <a.ryabinin@samsung.com>
|
||||
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||
*
|
||||
* Some of code borrowed from https://github.com/xairy/linux by
|
||||
* Andrey Konovalov <adech.fo@gmail.com>
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* This file contains error reporting code.
|
||||
*
|
||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
||||
* Author: Andrey Ryabinin <a.ryabinin@samsung.com>
|
||||
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||
*
|
||||
* Some of code borrowed from https://github.com/xairy/linux by
|
||||
* Andrey Konovalov <adech.fo@gmail.com>
|
||||
|
|
|
@ -1146,8 +1146,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
|
|||
}
|
||||
|
||||
if (!PageHuge(p) && PageTransHuge(hpage)) {
|
||||
if (unlikely(split_huge_page(hpage))) {
|
||||
pr_err("MCE: %#lx: thp split failed\n", pfn);
|
||||
if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
|
||||
if (!PageAnon(hpage))
|
||||
pr_err("MCE: %#lx: non anonymous thp\n", pfn);
|
||||
else
|
||||
pr_err("MCE: %#lx: thp split failed\n", pfn);
|
||||
if (TestClearPageHWPoison(p))
|
||||
atomic_long_sub(nr_pages, &num_poisoned_pages);
|
||||
put_page(p);
|
||||
|
@ -1538,6 +1541,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
|
|||
*/
|
||||
ret = __get_any_page(page, pfn, 0);
|
||||
if (!PageLRU(page)) {
|
||||
/* Drop page reference which is from __get_any_page() */
|
||||
put_page(page);
|
||||
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
|
||||
pfn, page->flags);
|
||||
return -EIO;
|
||||
|
@ -1567,13 +1572,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
|
|||
unlock_page(hpage);
|
||||
|
||||
ret = isolate_huge_page(hpage, &pagelist);
|
||||
if (ret) {
|
||||
/*
|
||||
* get_any_page() and isolate_huge_page() takes a refcount each,
|
||||
* so need to drop one here.
|
||||
*/
|
||||
put_page(hpage);
|
||||
} else {
|
||||
/*
|
||||
* get_any_page() and isolate_huge_page() takes a refcount each,
|
||||
* so need to drop one here.
|
||||
*/
|
||||
put_page(hpage);
|
||||
if (!ret) {
|
||||
pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
|
|
@ -1277,6 +1277,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
|
|||
|
||||
/* create new memmap entry */
|
||||
firmware_map_add_hotplug(start, start + size, "System RAM");
|
||||
memblock_add_node(start, size, nid);
|
||||
|
||||
goto out;
|
||||
|
||||
|
@ -2013,6 +2014,8 @@ void __ref remove_memory(int nid, u64 start, u64 size)
|
|||
|
||||
/* remove memmap entry */
|
||||
firmware_map_remove(start, start + size, "System RAM");
|
||||
memblock_free(start, size);
|
||||
memblock_remove(start, size);
|
||||
|
||||
arch_remove_memory(start, size);
|
||||
|
||||
|
|
|
@ -5060,6 +5060,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
|
|||
{
|
||||
unsigned long zone_start_pfn, zone_end_pfn;
|
||||
|
||||
/* When hotadd a new node, the node should be empty */
|
||||
if (!node_start_pfn && !node_end_pfn)
|
||||
return 0;
|
||||
|
||||
/* Get the start and end of the zone */
|
||||
zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
|
||||
zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
|
||||
|
@ -5123,6 +5127,10 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
|
|||
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
|
||||
unsigned long zone_start_pfn, zone_end_pfn;
|
||||
|
||||
/* When hotadd a new node, the node should be empty */
|
||||
if (!node_start_pfn && !node_end_pfn)
|
||||
return 0;
|
||||
|
||||
zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
|
||||
zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче