Merge branch 'akpm' (Andrew's fixes)

Merge misc fixes from Andrew Morton:
 "18 total.  15 fixes and some updates to a device_cgroup patchset which
  bring it up to date with the version which I should have merged in the
  first place."

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (18 patches)
  fs/compat_ioctl.c: VIDEO_SET_SPU_PALETTE missing error check
  gen_init_cpio: avoid stack overflow when expanding
  drivers/rtc/rtc-imxdi.c: add missing spin lock initialization
  mm, numa: avoid setting zone_reclaim_mode unless a node is sufficiently distant
  pidns: limit the nesting depth of pid namespaces
  drivers/dma/dw_dmac: make driver's endianness configurable
  mm/mmu_notifier: allocate mmu_notifier in advance
  tools/testing/selftests/epoll/test_epoll.c: fix build
  UAPI: fix tools/vm/page-types.c
  mm/page_alloc.c:alloc_contig_range(): return early for err path
  rbtree: include linux/compiler.h for definition of __always_inline
  genalloc: stop crashing the system when destroying a pool
  backlight: ili9320: add missing SPI dependency
  device_cgroup: add proper checking when changing default behavior
  device_cgroup: stop using simple_strtoul()
  device_cgroup: rename deny_all to behavior
  cgroup: fix invalid rcu dereference
  mm: fix XFS oops due to dirty pages without buffers on s390
This commit is contained in:
Linus Torvalds 2012-10-25 16:05:57 -07:00
Родитель b1e4279e4e 1217650336
Коммит 2ab3f29ddd
15 изменённых файлов: 158 добавлений и 75 удалений

Просмотреть файл

@ -90,6 +90,17 @@ config DW_DMAC
Support the Synopsys DesignWare AHB DMA controller. This
can be integrated in chips such as the Atmel AT32ap7000.
config DW_DMAC_BIG_ENDIAN_IO
bool "Use big endian I/O register access"
default y if AVR32
depends on DW_DMAC
help
Say yes here to use big endian I/O access when reading and writing
to the DMA controller registers. This is needed on some platforms,
like the Atmel AVR32 architecture.
If unsure, use the default setting.
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91

Просмотреть файл

@ -98,9 +98,17 @@ struct dw_dma_regs {
u32 DW_PARAMS;
};
#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
#define dma_readl_native ioread32be
#define dma_writel_native iowrite32be
#else
#define dma_readl_native readl
#define dma_writel_native writel
#endif
/* To access the registers in early stage of probe */
#define dma_read_byaddr(addr, name) \
readl((addr) + offsetof(struct dw_dma_regs, name))
dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
/* Bitfields in DW_PARAMS */
#define DW_PARAMS_NR_CHAN 8 /* number of channels */
@ -216,9 +224,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
}
#define channel_readl(dwc, name) \
readl(&(__dwc_regs(dwc)->name))
dma_readl_native(&(__dwc_regs(dwc)->name))
#define channel_writel(dwc, name, val) \
writel((val), &(__dwc_regs(dwc)->name))
dma_writel_native((val), &(__dwc_regs(dwc)->name))
static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
{
@ -246,9 +254,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
}
#define dma_readl(dw, name) \
readl(&(__dw_regs(dw)->name))
dma_readl_native(&(__dw_regs(dw)->name))
#define dma_writel(dw, name, val) \
writel((val), &(__dw_regs(dw)->name))
dma_writel_native((val), &(__dw_regs(dw)->name))
#define channel_set_bit(dw, reg, mask) \
dma_writel(dw, reg, ((mask) << 8) | (mask))

Просмотреть файл

@ -392,6 +392,8 @@ static int dryice_rtc_probe(struct platform_device *pdev)
if (imxdi->ioaddr == NULL)
return -ENOMEM;
spin_lock_init(&imxdi->irq_lock);
imxdi->irq = platform_get_irq(pdev, 0);
if (imxdi->irq < 0)
return imxdi->irq;

Просмотреть файл

@ -60,7 +60,8 @@ config LCD_LTV350QV
The LTV350QV panel is present on all ATSTK1000 boards.
config LCD_ILI9320
tristate
tristate "ILI Technology ILI9320 controller support"
depends on SPI
help
If you have a panel based on the ILI9320 controller chip
then say y to include a power driver for it.

Просмотреть файл

@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
err = get_user(palp, &up->palette);
err |= get_user(length, &up->length);
if (err)
return -EFAULT;
up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
err = put_user(compat_ptr(palp), &up_native->palette);

Просмотреть файл

@ -24,6 +24,7 @@
#ifndef _LINUX_RBTREE_AUGMENTED_H
#define _LINUX_RBTREE_AUGMENTED_H
#include <linux/compiler.h>
#include <linux/rbtree.h>
/*

Просмотреть файл

@ -71,12 +71,22 @@ err_alloc:
return NULL;
}
/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
#define MAX_PID_NS_LEVEL 32
static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns)
{
struct pid_namespace *ns;
unsigned int level = parent_pid_ns->level + 1;
int i, err = -ENOMEM;
int i;
int err;
if (level > MAX_PID_NS_LEVEL) {
err = -EINVAL;
goto out;
}
err = -ENOMEM;
ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
if (ns == NULL)
goto out;

Просмотреть файл

@ -178,7 +178,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
struct gen_pool_chunk *chunk;
int nbits = size >> pool->min_alloc_order;
int nbytes = sizeof(struct gen_pool_chunk) +
(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
BITS_TO_LONGS(nbits) * sizeof(long);
chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
if (unlikely(chunk == NULL))

Просмотреть файл

@ -196,28 +196,28 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
BUG_ON(atomic_read(&mm->mm_users) <= 0);
/*
* Verify that mmu_notifier_init() already run and the global srcu is
* initialized.
*/
* Verify that mmu_notifier_init() already run and the global srcu is
* initialized.
*/
BUG_ON(!srcu.per_cpu_ref);
ret = -ENOMEM;
mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
if (unlikely(!mmu_notifier_mm))
goto out;
if (take_mmap_sem)
down_write(&mm->mmap_sem);
ret = mm_take_all_locks(mm);
if (unlikely(ret))
goto out;
goto out_clean;
if (!mm_has_notifiers(mm)) {
mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
GFP_KERNEL);
if (unlikely(!mmu_notifier_mm)) {
ret = -ENOMEM;
goto out_of_mem;
}
INIT_HLIST_HEAD(&mmu_notifier_mm->list);
spin_lock_init(&mmu_notifier_mm->lock);
mm->mmu_notifier_mm = mmu_notifier_mm;
mmu_notifier_mm = NULL;
}
atomic_inc(&mm->mm_count);
@ -233,12 +233,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
spin_unlock(&mm->mmu_notifier_mm->lock);
out_of_mem:
mm_drop_all_locks(mm);
out:
out_clean:
if (take_mmap_sem)
up_write(&mm->mmap_sem);
kfree(mmu_notifier_mm);
out:
BUG_ON(atomic_read(&mm->mm_users) <= 0);
return ret;
}

Просмотреть файл

@ -1809,10 +1809,10 @@ static void __paginginit init_zone_allows_reclaim(int nid)
int i;
for_each_online_node(i)
if (node_distance(nid, i) <= RECLAIM_DISTANCE) {
if (node_distance(nid, i) <= RECLAIM_DISTANCE)
node_set(i, NODE_DATA(nid)->reclaim_nodes);
else
zone_reclaim_mode = 1;
}
}
#else /* CONFIG_NUMA */

Просмотреть файл

@ -56,6 +56,7 @@
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/hugetlb.h>
#include <linux/backing-dev.h>
#include <asm/tlbflush.h>
@ -926,11 +927,8 @@ int page_mkclean(struct page *page)
if (page_mapped(page)) {
struct address_space *mapping = page_mapping(page);
if (mapping) {
if (mapping)
ret = page_mkclean_file(mapping, page);
if (page_test_and_clear_dirty(page_to_pfn(page), 1))
ret = 1;
}
}
return ret;
@ -1116,6 +1114,7 @@ void page_add_file_rmap(struct page *page)
*/
void page_remove_rmap(struct page *page)
{
struct address_space *mapping = page_mapping(page);
bool anon = PageAnon(page);
bool locked;
unsigned long flags;
@ -1138,8 +1137,19 @@ void page_remove_rmap(struct page *page)
* this if the page is anon, so about to be freed; but perhaps
* not if it's in swapcache - there might be another pte slot
* containing the swap entry, but page not yet written to swap.
*
* And we can skip it on file pages, so long as the filesystem
* participates in dirty tracking; but need to catch shm and tmpfs
* and ramfs pages which have been modified since creation by read
* fault.
*
* Note that mapping must be decided above, before decrementing
* mapcount (which luckily provides a barrier): once page is unmapped,
* it could be truncated and page->mapping reset to NULL at any moment.
* Note also that we are relying on page_mapping(page) to set mapping
* to &swapper_space when PageSwapCache(page).
*/
if ((!anon || PageSwapCache(page)) &&
if (mapping && !mapping_cap_account_dirty(mapping) &&
page_test_and_clear_dirty(page_to_pfn(page), 1))
set_page_dirty(page);
/*

Просмотреть файл

@ -42,7 +42,10 @@ struct dev_exception_item {
struct dev_cgroup {
struct cgroup_subsys_state css;
struct list_head exceptions;
bool deny_all;
enum {
DEVCG_DEFAULT_ALLOW,
DEVCG_DEFAULT_DENY,
} behavior;
};
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
@ -182,13 +185,13 @@ static struct cgroup_subsys_state *devcgroup_create(struct cgroup *cgroup)
parent_cgroup = cgroup->parent;
if (parent_cgroup == NULL)
dev_cgroup->deny_all = false;
dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
else {
parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
mutex_lock(&devcgroup_mutex);
ret = dev_exceptions_copy(&dev_cgroup->exceptions,
&parent_dev_cgroup->exceptions);
dev_cgroup->deny_all = parent_dev_cgroup->deny_all;
dev_cgroup->behavior = parent_dev_cgroup->behavior;
mutex_unlock(&devcgroup_mutex);
if (ret) {
kfree(dev_cgroup);
@ -260,7 +263,7 @@ static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
* - List the exceptions in case the default policy is to deny
* This way, the file remains as a "whitelist of devices"
*/
if (devcgroup->deny_all == false) {
if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
set_access(acc, ACC_MASK);
set_majmin(maj, ~0);
set_majmin(min, ~0);
@ -314,12 +317,12 @@ static int may_access(struct dev_cgroup *dev_cgroup,
* In two cases we'll consider this new exception valid:
* - the dev cgroup has its default policy to allow + exception list:
* the new exception should *not* match any of the exceptions
* (!deny_all, !match)
* (behavior == DEVCG_DEFAULT_ALLOW, !match)
* - the dev cgroup has its default policy to deny + exception list:
* the new exception *should* match the exceptions
* (deny_all, match)
* (behavior == DEVCG_DEFAULT_DENY, match)
*/
if (dev_cgroup->deny_all == match)
if ((dev_cgroup->behavior == DEVCG_DEFAULT_DENY) == match)
return 1;
return 0;
}
@ -341,6 +344,17 @@ static int parent_has_perm(struct dev_cgroup *childcg,
return may_access(parent, ex);
}
/**
* may_allow_all - checks if it's possible to change the behavior to
* allow based on parent's rules.
* @parent: device cgroup's parent
* returns: != 0 in case it's allowed, 0 otherwise
*/
static inline int may_allow_all(struct dev_cgroup *parent)
{
return parent->behavior == DEVCG_DEFAULT_ALLOW;
}
/*
* Modify the exception list using allow/deny rules.
* CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
@ -358,9 +372,11 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
int filetype, const char *buffer)
{
const char *b;
char *endp;
int count;
char temp[12]; /* 11 + 1 characters needed for a u32 */
int count, rc;
struct dev_exception_item ex;
struct cgroup *p = devcgroup->css.cgroup;
struct dev_cgroup *parent = cgroup_to_devcgroup(p->parent);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@ -372,14 +388,18 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
case 'a':
switch (filetype) {
case DEVCG_ALLOW:
if (!parent_has_perm(devcgroup, &ex))
if (!may_allow_all(parent))
return -EPERM;
dev_exception_clean(devcgroup);
devcgroup->deny_all = false;
rc = dev_exceptions_copy(&devcgroup->exceptions,
&parent->exceptions);
if (rc)
return rc;
devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
break;
case DEVCG_DENY:
dev_exception_clean(devcgroup);
devcgroup->deny_all = true;
devcgroup->behavior = DEVCG_DEFAULT_DENY;
break;
default:
return -EINVAL;
@ -402,8 +422,16 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
ex.major = ~0;
b++;
} else if (isdigit(*b)) {
ex.major = simple_strtoul(b, &endp, 10);
b = endp;
memset(temp, 0, sizeof(temp));
for (count = 0; count < sizeof(temp) - 1; count++) {
temp[count] = *b;
b++;
if (!isdigit(*b))
break;
}
rc = kstrtou32(temp, 10, &ex.major);
if (rc)
return -EINVAL;
} else {
return -EINVAL;
}
@ -416,8 +444,16 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
ex.minor = ~0;
b++;
} else if (isdigit(*b)) {
ex.minor = simple_strtoul(b, &endp, 10);
b = endp;
memset(temp, 0, sizeof(temp));
for (count = 0; count < sizeof(temp) - 1; count++) {
temp[count] = *b;
b++;
if (!isdigit(*b))
break;
}
rc = kstrtou32(temp, 10, &ex.minor);
if (rc)
return -EINVAL;
} else {
return -EINVAL;
}
@ -452,7 +488,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
* an matching exception instead. And be silent about it: we
* don't want to break compatibility
*/
if (devcgroup->deny_all == false) {
if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
dev_exception_rm(devcgroup, &ex);
return 0;
}
@ -463,7 +499,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
* an matching exception instead. And be silent about it: we
* don't want to break compatibility
*/
if (devcgroup->deny_all == true) {
if (devcgroup->behavior == DEVCG_DEFAULT_DENY) {
dev_exception_rm(devcgroup, &ex);
return 0;
}
@ -533,10 +569,10 @@ struct cgroup_subsys devices_subsys = {
*
* returns 0 on success, -EPERM case the operation is not permitted
*/
static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup,
short type, u32 major, u32 minor,
static int __devcgroup_check_permission(short type, u32 major, u32 minor,
short access)
{
struct dev_cgroup *dev_cgroup;
struct dev_exception_item ex;
int rc;
@ -547,6 +583,7 @@ static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup,
ex.access = access;
rcu_read_lock();
dev_cgroup = task_devcgroup(current);
rc = may_access(dev_cgroup, &ex);
rcu_read_unlock();
@ -558,7 +595,6 @@ static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup,
int __devcgroup_inode_permission(struct inode *inode, int mask)
{
struct dev_cgroup *dev_cgroup = task_devcgroup(current);
short type, access = 0;
if (S_ISBLK(inode->i_mode))
@ -570,13 +606,12 @@ int __devcgroup_inode_permission(struct inode *inode, int mask)
if (mask & MAY_READ)
access |= ACC_READ;
return __devcgroup_check_permission(dev_cgroup, type, imajor(inode),
iminor(inode), access);
return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
access);
}
int devcgroup_inode_mknod(int mode, dev_t dev)
{
struct dev_cgroup *dev_cgroup = task_devcgroup(current);
short type;
if (!S_ISBLK(mode) && !S_ISCHR(mode))
@ -587,7 +622,7 @@ int devcgroup_inode_mknod(int mode, dev_t dev)
else
type = DEV_CHAR;
return __devcgroup_check_permission(dev_cgroup, type, MAJOR(dev),
MINOR(dev), ACC_MKNOD);
return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
ACC_MKNOD);
}

Просмотреть файл

@ -162,14 +162,14 @@ void *write_thread_function(void *function_data)
int index;
struct write_thread_data *thread_data =
(struct write_thread_data *)function_data;
while (!write_thread_data->stop)
while (!thread_data->stop)
for (index = 0;
!thread_data->stop && (index < thread_data->n_fds);
++index)
if ((write(thread_data->fds[index], &data, 1) < 1) &&
(errno != EAGAIN) &&
(errno != EWOULDBLOCK)) {
write_thread_data->status = errno;
thread_data->status = errno;
return;
}
}

Просмотреть файл

@ -35,7 +35,7 @@
#include <sys/mount.h>
#include <sys/statfs.h>
#include "../../include/uapi/linux/magic.h"
#include "../../include/linux/kernel-page-flags.h"
#include "../../include/uapi/linux/kernel-page-flags.h"
#ifndef MAX_PATH

Просмотреть файл

@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
int retval;
int rc = -1;
int namesize;
int i;
unsigned int i;
mode |= S_IFREG;
@ -381,25 +381,28 @@ error:
static char *cpio_replace_env(char *new_location)
{
char expanded[PATH_MAX + 1];
char env_var[PATH_MAX + 1];
char *start;
char *end;
char expanded[PATH_MAX + 1];
char env_var[PATH_MAX + 1];
char *start;
char *end;
for (start = NULL; (start = strstr(new_location, "${")); ) {
end = strchr(start, '}');
if (start < end) {
*env_var = *expanded = '\0';
strncat(env_var, start + 2, end - start - 2);
strncat(expanded, new_location, start - new_location);
strncat(expanded, getenv(env_var), PATH_MAX);
strncat(expanded, end + 1, PATH_MAX);
strncpy(new_location, expanded, PATH_MAX);
} else
break;
}
for (start = NULL; (start = strstr(new_location, "${")); ) {
end = strchr(start, '}');
if (start < end) {
*env_var = *expanded = '\0';
strncat(env_var, start + 2, end - start - 2);
strncat(expanded, new_location, start - new_location);
strncat(expanded, getenv(env_var),
PATH_MAX - strlen(expanded));
strncat(expanded, end + 1,
PATH_MAX - strlen(expanded));
strncpy(new_location, expanded, PATH_MAX);
new_location[PATH_MAX] = 0;
} else
break;
}
return new_location;
return new_location;
}