Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "A bunch of fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: slub: mark the dangling ifdef #else of CONFIG_SLUB_DEBUG slub: avoid irqoff/on in bulk allocation slub: create new ___slab_alloc function that can be called with irqs disabled mm: fix up sparse warning in gfpflags_allow_blocking ocfs2: fix umask ignored issue PM/OPP: add entry in MAINTAINERS kernel/panic.c: turn off locks debug before releasing console lock kernel/signal.c: unexport sigsuspend() kasan: fix kmemleak false-positive in kasan_module_alloc() fat: fix fake_offset handling on error path mm/hugetlbfs: fix bugs in fallocate hole punch of areas with holes mm/page-writeback.c: initialize m_dirty to avoid compile warning various: fix pci_set_dma_mask return value checking mm: loosen MADV_NOHUGEPAGE to enable Qemu postcopy on s390 mm: vmalloc: don't remove inexistent guard hole in remove_vm_area() tools/vm/page-types.c: support KPF_IDLE ncpfs: don't allow negative timeouts configfs: allow dynamic group creation MAINTAINERS: add Moritz as reviewer for FPGA Manager Framework slab.h: sprinkle __assume_aligned attributes
This commit is contained in:
Коммит
3ad5d7e06a
13
MAINTAINERS
13
MAINTAINERS
|
@ -4411,6 +4411,7 @@ K: fmc_d.*register
|
|||
|
||||
FPGA MANAGER FRAMEWORK
|
||||
M: Alan Tull <atull@opensource.altera.com>
|
||||
R: Moritz Fischer <moritz.fischer@ettus.com>
|
||||
S: Maintained
|
||||
F: drivers/fpga/
|
||||
F: include/linux/fpga/fpga-mgr.h
|
||||
|
@ -7904,6 +7905,18 @@ S: Maintained
|
|||
F: net/openvswitch/
|
||||
F: include/uapi/linux/openvswitch.h
|
||||
|
||||
OPERATING PERFORMANCE POINTS (OPP)
|
||||
M: Viresh Kumar <vireshk@kernel.org>
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
M: Stephen Boyd <sboyd@codeaurora.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git
|
||||
F: drivers/base/power/opp/
|
||||
F: include/linux/pm_opp.h
|
||||
F: Documentation/power/opp.txt
|
||||
F: Documentation/devicetree/bindings/opp/
|
||||
|
||||
OPL4 DRIVER
|
||||
M: Clemens Ladisch <clemens@ladisch.de>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
|
|
|
@ -1992,9 +1992,9 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
|
|||
(unsigned long long)pci_resource_start(pci_dev, 0));
|
||||
|
||||
pci_set_master(pci_dev);
|
||||
if (!pci_set_dma_mask(pci_dev, 0xffffffff)) {
|
||||
err = pci_set_dma_mask(pci_dev, 0xffffffff);
|
||||
if (err) {
|
||||
printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
|
||||
err = -EIO;
|
||||
goto fail_context;
|
||||
}
|
||||
|
||||
|
|
|
@ -1319,7 +1319,8 @@ static int cx25821_initdev(struct pci_dev *pci_dev,
|
|||
dev->pci_lat, (unsigned long long)dev->base_io_addr);
|
||||
|
||||
pci_set_master(pci_dev);
|
||||
if (!pci_set_dma_mask(pci_dev, 0xffffffff)) {
|
||||
err = pci_set_dma_mask(pci_dev, 0xffffffff);
|
||||
if (err) {
|
||||
pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
|
||||
err = -EIO;
|
||||
goto fail_irq;
|
||||
|
|
|
@ -890,9 +890,9 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (!pci_set_dma_mask(pci,DMA_BIT_MASK(32))) {
|
||||
err = pci_set_dma_mask(pci,DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
|
||||
err = -EIO;
|
||||
cx88_core_put(core, pci);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -393,7 +393,8 @@ static int cx8802_init_common(struct cx8802_dev *dev)
|
|||
if (pci_enable_device(dev->pci))
|
||||
return -EIO;
|
||||
pci_set_master(dev->pci);
|
||||
if (!pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32))) {
|
||||
err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name);
|
||||
return -EIO;
|
||||
}
|
||||
|
|
|
@ -1314,9 +1314,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
|
|||
dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
|
||||
|
||||
pci_set_master(pci_dev);
|
||||
if (!pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32))) {
|
||||
err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name);
|
||||
err = -EIO;
|
||||
goto fail_core;
|
||||
}
|
||||
dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
|
||||
|
|
|
@ -810,7 +810,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
|
|||
"%s(): board vendor 0x%x, revision 0x%x\n",
|
||||
__func__, board_vendor, board_revision);
|
||||
pci_set_master(pci_dev);
|
||||
if (!pci_set_dma_mask(pci_dev, 0xffffffff)) {
|
||||
if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
|
||||
dev_err(&pci_dev->dev,
|
||||
"%s(): 32bit PCI DMA is not supported\n", __func__);
|
||||
goto pci_detect_err;
|
||||
|
|
|
@ -951,9 +951,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
|
|||
pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
|
||||
dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
|
||||
pci_set_master(pci_dev);
|
||||
if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
|
||||
err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
|
||||
err = -EIO;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1264,9 +1264,9 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
|
|||
|
||||
pci_set_master(pci_dev);
|
||||
/* TODO */
|
||||
if (!pci_set_dma_mask(pci_dev, 0xffffffff)) {
|
||||
err = pci_set_dma_mask(pci_dev, 0xffffffff);
|
||||
if (err) {
|
||||
printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
|
||||
err = -EIO;
|
||||
goto fail_irq;
|
||||
}
|
||||
|
||||
|
|
|
@ -257,9 +257,9 @@ static int tw68_initdev(struct pci_dev *pci_dev,
|
|||
dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
|
||||
dev->pci_lat, (u64)pci_resource_start(pci_dev, 0));
|
||||
pci_set_master(pci_dev);
|
||||
if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
|
||||
err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
|
||||
err = -EIO;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1500,10 +1500,11 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!pci_set_dma_mask(pdev, PCNET32_DMA_MASK)) {
|
||||
err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
|
||||
if (err) {
|
||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||
pr_err("architecture does not support 32bit PCI busmaster DMA\n");
|
||||
return -ENODEV;
|
||||
return err;
|
||||
}
|
||||
if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
|
||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||
|
|
|
@ -1636,6 +1636,116 @@ const struct file_operations configfs_dir_operations = {
|
|||
.iterate = configfs_readdir,
|
||||
};
|
||||
|
||||
/**
|
||||
* configfs_register_group - creates a parent-child relation between two groups
|
||||
* @parent_group: parent group
|
||||
* @group: child group
|
||||
*
|
||||
* link groups, creates dentry for the child and attaches it to the
|
||||
* parent dentry.
|
||||
*
|
||||
* Return: 0 on success, negative errno code on error
|
||||
*/
|
||||
int configfs_register_group(struct config_group *parent_group,
|
||||
struct config_group *group)
|
||||
{
|
||||
struct configfs_subsystem *subsys = parent_group->cg_subsys;
|
||||
struct dentry *parent;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&subsys->su_mutex);
|
||||
link_group(parent_group, group);
|
||||
mutex_unlock(&subsys->su_mutex);
|
||||
|
||||
parent = parent_group->cg_item.ci_dentry;
|
||||
|
||||
mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT);
|
||||
ret = create_default_group(parent_group, group);
|
||||
if (!ret) {
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
}
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(configfs_register_group);
|
||||
|
||||
/**
|
||||
* configfs_unregister_group() - unregisters a child group from its parent
|
||||
* @group: parent group to be unregistered
|
||||
*
|
||||
* Undoes configfs_register_group()
|
||||
*/
|
||||
void configfs_unregister_group(struct config_group *group)
|
||||
{
|
||||
struct configfs_subsystem *subsys = group->cg_subsys;
|
||||
struct dentry *dentry = group->cg_item.ci_dentry;
|
||||
struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
|
||||
|
||||
mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT);
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
configfs_detach_prep(dentry, NULL);
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
|
||||
configfs_detach_group(&group->cg_item);
|
||||
d_inode(dentry)->i_flags |= S_DEAD;
|
||||
dont_mount(dentry);
|
||||
d_delete(dentry);
|
||||
mutex_unlock(&d_inode(parent)->i_mutex);
|
||||
|
||||
dput(dentry);
|
||||
|
||||
mutex_lock(&subsys->su_mutex);
|
||||
unlink_group(group);
|
||||
mutex_unlock(&subsys->su_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(configfs_unregister_group);
|
||||
|
||||
/**
|
||||
* configfs_register_default_group() - allocates and registers a child group
|
||||
* @parent_group: parent group
|
||||
* @name: child group name
|
||||
* @item_type: child item type description
|
||||
*
|
||||
* boilerplate to allocate and register a child group with its parent. We need
|
||||
* kzalloc'ed memory because child's default_group is initially empty.
|
||||
*
|
||||
* Return: allocated config group or ERR_PTR() on error
|
||||
*/
|
||||
struct config_group *
|
||||
configfs_register_default_group(struct config_group *parent_group,
|
||||
const char *name,
|
||||
struct config_item_type *item_type)
|
||||
{
|
||||
int ret;
|
||||
struct config_group *group;
|
||||
|
||||
group = kzalloc(sizeof(*group), GFP_KERNEL);
|
||||
if (!group)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
config_group_init_type_name(group, name, item_type);
|
||||
|
||||
ret = configfs_register_group(parent_group, group);
|
||||
if (ret) {
|
||||
kfree(group);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
return group;
|
||||
}
|
||||
EXPORT_SYMBOL(configfs_register_default_group);
|
||||
|
||||
/**
|
||||
* configfs_unregister_default_group() - unregisters and frees a child group
|
||||
* @group: the group to act on
|
||||
*/
|
||||
void configfs_unregister_default_group(struct config_group *group)
|
||||
{
|
||||
configfs_unregister_group(group);
|
||||
kfree(group);
|
||||
}
|
||||
EXPORT_SYMBOL(configfs_unregister_default_group);
|
||||
|
||||
int configfs_register_subsystem(struct configfs_subsystem *subsys)
|
||||
{
|
||||
int err;
|
||||
|
|
16
fs/fat/dir.c
16
fs/fat/dir.c
|
@ -610,9 +610,9 @@ parse_record:
|
|||
int status = fat_parse_long(inode, &cpos, &bh, &de,
|
||||
&unicode, &nr_slots);
|
||||
if (status < 0) {
|
||||
ctx->pos = cpos;
|
||||
bh = NULL;
|
||||
ret = status;
|
||||
goto out;
|
||||
goto end_of_dir;
|
||||
} else if (status == PARSE_INVALID)
|
||||
goto record_end;
|
||||
else if (status == PARSE_NOT_LONGNAME)
|
||||
|
@ -654,8 +654,9 @@ parse_record:
|
|||
fill_len = short_len;
|
||||
|
||||
start_filldir:
|
||||
if (!fake_offset)
|
||||
ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
|
||||
ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
|
||||
if (fake_offset && ctx->pos < 2)
|
||||
ctx->pos = 2;
|
||||
|
||||
if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) {
|
||||
if (!dir_emit_dot(file, ctx))
|
||||
|
@ -681,14 +682,19 @@ record_end:
|
|||
fake_offset = 0;
|
||||
ctx->pos = cpos;
|
||||
goto get_new;
|
||||
|
||||
end_of_dir:
|
||||
ctx->pos = cpos;
|
||||
if (fake_offset && cpos < 2)
|
||||
ctx->pos = 2;
|
||||
else
|
||||
ctx->pos = cpos;
|
||||
fill_failed:
|
||||
brelse(bh);
|
||||
if (unicode)
|
||||
__putname(unicode);
|
||||
out:
|
||||
mutex_unlock(&sbi->s_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -332,12 +332,17 @@ static void remove_huge_page(struct page *page)
|
|||
* truncation is indicated by end of range being LLONG_MAX
|
||||
* In this case, we first scan the range and release found pages.
|
||||
* After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
|
||||
* maps and global counts.
|
||||
* maps and global counts. Page faults can not race with truncation
|
||||
* in this routine. hugetlb_no_page() prevents page faults in the
|
||||
* truncated range. It checks i_size before allocation, and again after
|
||||
* with the page table lock for the page held. The same lock must be
|
||||
* acquired to unmap a page.
|
||||
* hole punch is indicated if end is not LLONG_MAX
|
||||
* In the hole punch case we scan the range and release found pages.
|
||||
* Only when releasing a page is the associated region/reserv map
|
||||
* deleted. The region/reserv map for ranges without associated
|
||||
* pages are not modified.
|
||||
* pages are not modified. Page faults can race with hole punch.
|
||||
* This is indicated if we find a mapped page.
|
||||
* Note: If the passed end of range value is beyond the end of file, but
|
||||
* not LLONG_MAX this routine still performs a hole punch operation.
|
||||
*/
|
||||
|
@ -361,46 +366,37 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
|
|||
next = start;
|
||||
while (next < end) {
|
||||
/*
|
||||
* Make sure to never grab more pages that we
|
||||
* might possibly need.
|
||||
* Don't grab more pages than the number left in the range.
|
||||
*/
|
||||
if (end - next < lookup_nr)
|
||||
lookup_nr = end - next;
|
||||
|
||||
/*
|
||||
* This pagevec_lookup() may return pages past 'end',
|
||||
* so we must check for page->index > end.
|
||||
* When no more pages are found, we are done.
|
||||
*/
|
||||
if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) {
|
||||
if (next == start)
|
||||
break;
|
||||
next = start;
|
||||
continue;
|
||||
}
|
||||
if (!pagevec_lookup(&pvec, mapping, next, lookup_nr))
|
||||
break;
|
||||
|
||||
for (i = 0; i < pagevec_count(&pvec); ++i) {
|
||||
struct page *page = pvec.pages[i];
|
||||
u32 hash;
|
||||
|
||||
/*
|
||||
* The page (index) could be beyond end. This is
|
||||
* only possible in the punch hole case as end is
|
||||
* max page offset in the truncate case.
|
||||
*/
|
||||
next = page->index;
|
||||
if (next >= end)
|
||||
break;
|
||||
|
||||
hash = hugetlb_fault_mutex_hash(h, current->mm,
|
||||
&pseudo_vma,
|
||||
mapping, next, 0);
|
||||
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
||||
|
||||
lock_page(page);
|
||||
if (page->index >= end) {
|
||||
unlock_page(page);
|
||||
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
||||
next = end; /* we are done */
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If page is mapped, it was faulted in after being
|
||||
* unmapped. Do nothing in this race case. In the
|
||||
* normal case page is not mapped.
|
||||
*/
|
||||
if (!page_mapped(page)) {
|
||||
if (likely(!page_mapped(page))) {
|
||||
bool rsv_on_error = !PagePrivate(page);
|
||||
/*
|
||||
* We must free the huge page and remove
|
||||
|
@ -421,17 +417,23 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
|
|||
hugetlb_fix_reserve_counts(
|
||||
inode, rsv_on_error);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* If page is mapped, it was faulted in after
|
||||
* being unmapped. It indicates a race between
|
||||
* hole punch and page fault. Do nothing in
|
||||
* this case. Getting here in a truncate
|
||||
* operation is a bug.
|
||||
*/
|
||||
BUG_ON(truncate_op);
|
||||
}
|
||||
|
||||
if (page->index > next)
|
||||
next = page->index;
|
||||
|
||||
++next;
|
||||
unlock_page(page);
|
||||
|
||||
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
||||
}
|
||||
++next;
|
||||
huge_pagevec_release(&pvec);
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (truncate_op)
|
||||
|
@ -647,9 +649,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
|
|||
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
|
||||
i_size_write(inode, offset + len);
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
spin_lock(&inode->i_lock);
|
||||
inode->i_private = NULL;
|
||||
spin_unlock(&inode->i_lock);
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return error;
|
||||
|
|
|
@ -525,6 +525,8 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
|
|||
switch (rqdata.cmd) {
|
||||
case NCP_LOCK_EX:
|
||||
case NCP_LOCK_SH:
|
||||
if (rqdata.timeout < 0)
|
||||
return -EINVAL;
|
||||
if (rqdata.timeout == 0)
|
||||
rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT;
|
||||
else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT)
|
||||
|
|
|
@ -372,6 +372,8 @@ static int ocfs2_mknod(struct inode *dir,
|
|||
mlog_errno(status);
|
||||
goto leave;
|
||||
}
|
||||
/* update inode->i_mode after mask with "umask". */
|
||||
inode->i_mode = mode;
|
||||
|
||||
handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
|
||||
S_ISDIR(mode),
|
||||
|
|
|
@ -197,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro
|
|||
int configfs_register_subsystem(struct configfs_subsystem *subsys);
|
||||
void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
|
||||
|
||||
int configfs_register_group(struct config_group *parent_group,
|
||||
struct config_group *group);
|
||||
void configfs_unregister_group(struct config_group *group);
|
||||
|
||||
struct config_group *
|
||||
configfs_register_default_group(struct config_group *parent_group,
|
||||
const char *name,
|
||||
struct config_item_type *item_type);
|
||||
void configfs_unregister_default_group(struct config_group *group);
|
||||
|
||||
/* These functions can sleep and can alloc with GFP_KERNEL */
|
||||
/* WARNING: These cannot be called underneath configfs callbacks!! */
|
||||
int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
|
||||
|
|
|
@ -271,7 +271,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
|
|||
|
||||
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
|
||||
{
|
||||
return gfp_flags & __GFP_DIRECT_RECLAIM;
|
||||
return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
|
|
@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
|
|||
extern void set_current_blocked(sigset_t *);
|
||||
extern void __set_current_blocked(const sigset_t *);
|
||||
extern int show_unhandled_signals;
|
||||
extern int sigsuspend(sigset_t *);
|
||||
|
||||
struct sigaction {
|
||||
#ifndef __ARCH_HAS_IRIX_SIGACTION
|
||||
|
|
|
@ -157,6 +157,24 @@ size_t ksize(const void *);
|
|||
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
|
||||
* Intended for arches that get misalignment faults even for 64 bit integer
|
||||
* aligned buffers.
|
||||
*/
|
||||
#ifndef ARCH_SLAB_MINALIGN
|
||||
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
|
||||
* pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
|
||||
* aligned pointers.
|
||||
*/
|
||||
#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
|
||||
#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
|
||||
#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
|
||||
|
||||
/*
|
||||
* Kmalloc array related definitions
|
||||
*/
|
||||
|
@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size)
|
|||
}
|
||||
#endif /* !CONFIG_SLOB */
|
||||
|
||||
void *__kmalloc(size_t size, gfp_t flags);
|
||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
|
||||
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
|
||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
|
||||
void kmem_cache_free(struct kmem_cache *, void *);
|
||||
|
||||
/*
|
||||
|
@ -301,8 +319,8 @@ void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
|
|||
bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
||||
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
|
||||
#else
|
||||
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
|
@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
|
||||
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
gfp_t gfpflags,
|
||||
int node, size_t size);
|
||||
int node, size_t size) __assume_slab_alignment;
|
||||
#else
|
||||
static __always_inline void *
|
||||
kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
|
@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
|||
}
|
||||
#endif /* CONFIG_TRACING */
|
||||
|
||||
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
|
||||
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
|
||||
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
|
||||
#else
|
||||
static __always_inline void *
|
||||
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
|
||||
|
@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
|||
return __kmalloc_node(size, flags, node);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
|
||||
* Intended for arches that get misalignment faults even for 64 bit integer
|
||||
* aligned buffers.
|
||||
*/
|
||||
#ifndef ARCH_SLAB_MINALIGN
|
||||
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
||||
#endif
|
||||
|
||||
struct memcg_cache_array {
|
||||
struct rcu_head rcu;
|
||||
struct kmem_cache *entries[0];
|
||||
|
|
|
@ -152,8 +152,11 @@ void panic(const char *fmt, ...)
|
|||
* We may have ended up stopping the CPU holding the lock (in
|
||||
* smp_send_stop()) while still having some valuable data in the console
|
||||
* buffer. Try to acquire the lock then release it regardless of the
|
||||
* result. The release will also print the buffers out.
|
||||
* result. The release will also print the buffers out. Locks debug
|
||||
* should be disabled to avoid reporting bad unlock balance when
|
||||
* panic() is not being callled from OOPS.
|
||||
*/
|
||||
debug_locks_off();
|
||||
console_trylock();
|
||||
console_unlock();
|
||||
|
||||
|
|
|
@ -3503,7 +3503,7 @@ SYSCALL_DEFINE0(pause)
|
|||
|
||||
#endif
|
||||
|
||||
int sigsuspend(sigset_t *set)
|
||||
static int sigsuspend(sigset_t *set)
|
||||
{
|
||||
current->saved_sigmask = current->blocked;
|
||||
set_current_blocked(set);
|
||||
|
|
|
@ -2009,7 +2009,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
|
|||
/*
|
||||
* Be somewhat over-protective like KSM for now!
|
||||
*/
|
||||
if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
|
||||
if (*vm_flags & VM_NO_THP)
|
||||
return -EINVAL;
|
||||
*vm_flags &= ~VM_NOHUGEPAGE;
|
||||
*vm_flags |= VM_HUGEPAGE;
|
||||
|
@ -2025,7 +2025,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
|
|||
/*
|
||||
* Be somewhat over-protective like KSM for now!
|
||||
*/
|
||||
if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
|
||||
if (*vm_flags & VM_NO_THP)
|
||||
return -EINVAL;
|
||||
*vm_flags &= ~VM_HUGEPAGE;
|
||||
*vm_flags |= VM_NOHUGEPAGE;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -444,6 +445,7 @@ int kasan_module_alloc(void *addr, size_t size)
|
|||
|
||||
if (ret) {
|
||||
find_vm_area(addr)->flags |= VM_KASAN;
|
||||
kmemleak_ignore(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1542,7 +1542,9 @@ static void balance_dirty_pages(struct address_space *mapping,
|
|||
for (;;) {
|
||||
unsigned long now = jiffies;
|
||||
unsigned long dirty, thresh, bg_thresh;
|
||||
unsigned long m_dirty, m_thresh, m_bg_thresh;
|
||||
unsigned long m_dirty = 0; /* stop bogus uninit warnings */
|
||||
unsigned long m_thresh = 0;
|
||||
unsigned long m_bg_thresh = 0;
|
||||
|
||||
/*
|
||||
* Unstable writes are a feature of certain networked
|
||||
|
|
70
mm/slub.c
70
mm/slub.c
|
@ -1204,7 +1204,7 @@ unsigned long kmem_cache_flags(unsigned long object_size,
|
|||
|
||||
return flags;
|
||||
}
|
||||
#else
|
||||
#else /* !CONFIG_SLUB_DEBUG */
|
||||
static inline void setup_object_debug(struct kmem_cache *s,
|
||||
struct page *page, void *object) {}
|
||||
|
||||
|
@ -2295,23 +2295,15 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
|
|||
* And if we were unable to get a new slab from the partial slab lists then
|
||||
* we need to allocate a new slab. This is the slowest path since it involves
|
||||
* a call to the page allocator and the setup of a new slab.
|
||||
*
|
||||
* Version of __slab_alloc to use when we know that interrupts are
|
||||
* already disabled (which is the case for bulk allocation).
|
||||
*/
|
||||
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
||||
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
||||
unsigned long addr, struct kmem_cache_cpu *c)
|
||||
{
|
||||
void *freelist;
|
||||
struct page *page;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/*
|
||||
* We may have been preempted and rescheduled on a different
|
||||
* cpu before disabling interrupts. Need to reload cpu area
|
||||
* pointer.
|
||||
*/
|
||||
c = this_cpu_ptr(s->cpu_slab);
|
||||
#endif
|
||||
|
||||
page = c->page;
|
||||
if (!page)
|
||||
|
@ -2369,7 +2361,6 @@ load_freelist:
|
|||
VM_BUG_ON(!c->page->frozen);
|
||||
c->freelist = get_freepointer(s, freelist);
|
||||
c->tid = next_tid(c->tid);
|
||||
local_irq_restore(flags);
|
||||
return freelist;
|
||||
|
||||
new_slab:
|
||||
|
@ -2386,7 +2377,6 @@ new_slab:
|
|||
|
||||
if (unlikely(!freelist)) {
|
||||
slab_out_of_memory(s, gfpflags, node);
|
||||
local_irq_restore(flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -2402,10 +2392,34 @@ new_slab:
|
|||
deactivate_slab(s, page, get_freepointer(s, freelist));
|
||||
c->page = NULL;
|
||||
c->freelist = NULL;
|
||||
local_irq_restore(flags);
|
||||
return freelist;
|
||||
}
|
||||
|
||||
/*
|
||||
* Another one that disabled interrupt and compensates for possible
|
||||
* cpu changes by refetching the per cpu area pointer.
|
||||
*/
|
||||
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
||||
unsigned long addr, struct kmem_cache_cpu *c)
|
||||
{
|
||||
void *p;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/*
|
||||
* We may have been preempted and rescheduled on a different
|
||||
* cpu before disabling interrupts. Need to reload cpu area
|
||||
* pointer.
|
||||
*/
|
||||
c = this_cpu_ptr(s->cpu_slab);
|
||||
#endif
|
||||
|
||||
p = ___slab_alloc(s, gfpflags, node, addr, c);
|
||||
local_irq_restore(flags);
|
||||
return p;
|
||||
}
|
||||
|
||||
/*
|
||||
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
|
||||
* have the fastpath folded into their functions. So no function call
|
||||
|
@ -2804,30 +2818,23 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|||
void *object = c->freelist;
|
||||
|
||||
if (unlikely(!object)) {
|
||||
local_irq_enable();
|
||||
/*
|
||||
* Invoking slow path likely have side-effect
|
||||
* of re-populating per CPU c->freelist
|
||||
*/
|
||||
p[i] = __slab_alloc(s, flags, NUMA_NO_NODE,
|
||||
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
|
||||
_RET_IP_, c);
|
||||
if (unlikely(!p[i])) {
|
||||
__kmem_cache_free_bulk(s, i, p);
|
||||
return false;
|
||||
}
|
||||
local_irq_disable();
|
||||
if (unlikely(!p[i]))
|
||||
goto error;
|
||||
|
||||
c = this_cpu_ptr(s->cpu_slab);
|
||||
continue; /* goto for-loop */
|
||||
}
|
||||
|
||||
/* kmem_cache debug support */
|
||||
s = slab_pre_alloc_hook(s, flags);
|
||||
if (unlikely(!s)) {
|
||||
__kmem_cache_free_bulk(s, i, p);
|
||||
c->tid = next_tid(c->tid);
|
||||
local_irq_enable();
|
||||
return false;
|
||||
}
|
||||
if (unlikely(!s))
|
||||
goto error;
|
||||
|
||||
c->freelist = get_freepointer(s, object);
|
||||
p[i] = object;
|
||||
|
@ -2847,6 +2854,11 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|||
}
|
||||
|
||||
return true;
|
||||
|
||||
error:
|
||||
__kmem_cache_free_bulk(s, i, p);
|
||||
local_irq_enable();
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
|
||||
|
||||
|
|
|
@ -1443,7 +1443,6 @@ struct vm_struct *remove_vm_area(const void *addr)
|
|||
vmap_debug_free_range(va->va_start, va->va_end);
|
||||
kasan_free_shadow(vm);
|
||||
free_unmap_vmap_area(va);
|
||||
vm->size -= PAGE_SIZE;
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
@ -1468,8 +1467,8 @@ static void __vunmap(const void *addr, int deallocate_pages)
|
|||
return;
|
||||
}
|
||||
|
||||
debug_check_no_locks_freed(addr, area->size);
|
||||
debug_check_no_obj_freed(addr, area->size);
|
||||
debug_check_no_locks_freed(addr, get_vm_area_size(area));
|
||||
debug_check_no_obj_freed(addr, get_vm_area_size(area));
|
||||
|
||||
if (deallocate_pages) {
|
||||
int i;
|
||||
|
|
|
@ -128,6 +128,7 @@ static const char * const page_flag_names[] = {
|
|||
[KPF_THP] = "t:thp",
|
||||
[KPF_BALLOON] = "o:balloon",
|
||||
[KPF_ZERO_PAGE] = "z:zero_page",
|
||||
[KPF_IDLE] = "i:idle_page",
|
||||
|
||||
[KPF_RESERVED] = "r:reserved",
|
||||
[KPF_MLOCKED] = "m:mlocked",
|
||||
|
|
Загрузка…
Ссылка в новой задаче