Merge branch 'akpm' (patches from Andrew)

Merge updates from Andrew Morton:

 - a few misc things

 - ocfs2 updates

 - the v9fs maintainers have been missing for a long time. I've taken
   over v9fs patch slinging.

 - most of MM

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (116 commits)
  mm,oom_reaper: check for MMF_OOM_SKIP before complaining
  mm/ksm: fix interaction with THP
  mm/memblock.c: cast constant ULLONG_MAX to phys_addr_t
  headers: untangle kmemleak.h from mm.h
  include/linux/mmdebug.h: make VM_WARN* non-rvals
  mm/page_isolation.c: make start_isolate_page_range() fail if already isolated
  mm: change return type to vm_fault_t
  mm, oom: remove 3% bonus for CAP_SYS_ADMIN processes
  mm, page_alloc: wakeup kcompactd even if kswapd cannot free more memory
  kernel/fork.c: detect early free of a live mm
  mm: make counting of list_lru_one::nr_items lockless
  mm/swap_state.c: make bool enable_vma_readahead and swap_vma_readahead() static
  block_invalidatepage(): only release page if the full page was invalidated
  mm: kernel-doc: add missing parameter descriptions
  mm/swap.c: remove @cold parameter description for release_pages()
  mm/nommu: remove description of alloc_vm_area
  zram: drop max_zpage_size and use zs_huge_class_size()
  zsmalloc: introduce zs_huge_class_size()
  mm: fix races between swapoff and flush dcache
  fs/direct-io.c: minor cleanups in do_blockdev_direct_IO
  ...
This commit is contained in:
Linus Torvalds 2018-04-06 14:19:26 -07:00
Родитель 3fd14cdcc0 97b1255cb2
Коммит 3b54765cca
151 изменённых файлов: 1606 добавлений и 1274 удалений

Просмотреть файл

@ -1840,30 +1840,29 @@
keepinitrd [HW,ARM]
kernelcore= [KNL,X86,IA-64,PPC]
Format: nn[KMGTPE] | "mirror"
This parameter
specifies the amount of memory usable by the kernel
for non-movable allocations. The requested amount is
spread evenly throughout all nodes in the system. The
remaining memory in each node is used for Movable
pages. In the event, a node is too small to have both
kernelcore and Movable pages, kernelcore pages will
take priority and other nodes will have a larger number
of Movable pages. The Movable zone is used for the
allocation of pages that may be reclaimed or moved
by the page migration subsystem. This means that
HugeTLB pages may not be allocated from this zone.
Note that allocations like PTEs-from-HighMem still
use the HighMem zone if it exists, and the Normal
Format: nn[KMGTPE] | nn% | "mirror"
This parameter specifies the amount of memory usable by
the kernel for non-movable allocations. The requested
amount is spread evenly throughout all nodes in the
system as ZONE_NORMAL. The remaining memory is used for
movable memory in its own zone, ZONE_MOVABLE. In the
event, a node is too small to have both ZONE_NORMAL and
ZONE_MOVABLE, kernelcore memory will take priority and
other nodes will have a larger ZONE_MOVABLE.
ZONE_MOVABLE is used for the allocation of pages that
may be reclaimed or moved by the page migration
subsystem. Note that allocations like PTEs-from-HighMem
still use the HighMem zone if it exists, and the Normal
zone if it does not.
Instead of specifying the amount of memory (nn[KMGTPE]),
you can specify "mirror" option. In case "mirror"
It is possible to specify the exact amount of memory in
the form of "nn[KMGTPE]", a percentage of total system
memory in the form of "nn%", or "mirror". If "mirror"
option is specified, mirrored (reliable) memory is used
for non-movable allocations and remaining memory is used
for Movable pages. nn[KMGTPE] and "mirror" are exclusive,
so you can NOT specify nn[KMGTPE] and "mirror" at the same
time.
for Movable pages. "nn[KMGTPE]", "nn%", and "mirror"
are exclusive, so you cannot specify multiple forms.
kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port.
Format: <Controller#>[,poll interval]
@ -2377,13 +2376,14 @@
mousedev.yres= [MOUSE] Vertical screen resolution, used for devices
reporting absolute coordinates, such as tablets
movablecore=nn[KMG] [KNL,X86,IA-64,PPC] This parameter
is similar to kernelcore except it specifies the
amount of memory used for migratable allocations.
If both kernelcore and movablecore is specified,
then kernelcore will be at *least* the specified
value but may be more. If movablecore on its own
is specified, the administrator must be careful
movablecore= [KNL,X86,IA-64,PPC]
Format: nn[KMGTPE] | nn%
This parameter is the complement to kernelcore=, it
specifies the amount of memory used for migratable
allocations. If both kernelcore and movablecore is
specified, then kernelcore will be at *least* the
specified value but may be more. If movablecore on its
own is specified, the administrator must be careful
that the amount of memory usable for all allocations
is not too small.

Просмотреть файл

@ -111,7 +111,7 @@ my $regex_direct_begin_default = 'order=([0-9]*) may_writepage=([0-9]*) gfp_flag
my $regex_direct_end_default = 'nr_reclaimed=([0-9]*)';
my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)';
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*) gfp_flags=([A-Z_|]*)';
my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) classzone_idx=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_skipped=([0-9]*) nr_taken=([0-9]*) lru=([a-z_]*)';
my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) nr_dirty=([0-9]*) nr_writeback=([0-9]*) nr_congested=([0-9]*) nr_immediate=([0-9]*) nr_activate=([0-9]*) nr_ref_keep=([0-9]*) nr_unmap_fail=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
@ -201,7 +201,7 @@ $regex_kswapd_sleep = generate_traceevent_regex(
$regex_wakeup_kswapd = generate_traceevent_regex(
"vmscan/mm_vmscan_wakeup_kswapd",
$regex_wakeup_kswapd_default,
"nid", "zid", "order");
"nid", "zid", "order", "gfp_flags");
$regex_lru_isolate = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_isolate",
$regex_lru_isolate_default,

Просмотреть файл

@ -833,7 +833,7 @@ void flush_dcache_page(struct page *page)
}
/* don't handle anon pages here */
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!mapping)
return;

Просмотреть файл

@ -128,12 +128,7 @@ asmlinkage void __div0(void)
error("Attempting division by 0!");
}
unsigned long __stack_chk_guard;
void __stack_chk_guard_setup(void)
{
__stack_chk_guard = 0x000a0dff;
}
const unsigned long __stack_chk_guard = 0x000a0dff;
void __stack_chk_fail(void)
{
@ -150,8 +145,6 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
{
int ret;
__stack_chk_guard_setup();
output_data = (unsigned char *)output_start;
free_mem_ptr = free_mem_ptr_p;
free_mem_end_ptr = free_mem_ptr_end_p;

Просмотреть файл

@ -70,7 +70,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
__flush_dcache_page(page_mapping_file(from), from);
raw_spin_lock(&minicache_lock);

Просмотреть файл

@ -76,7 +76,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
unsigned long kfrom, kto;
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
__flush_dcache_page(page_mapping_file(from), from);
/* FIXME: not highmem safe */
discard_old_kernel_data(page_address(to));

Просмотреть файл

@ -90,7 +90,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
__flush_dcache_page(page_mapping_file(from), from);
raw_spin_lock(&minicache_lock);

Просмотреть файл

@ -195,7 +195,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
if (mapping) {

Просмотреть файл

@ -285,7 +285,7 @@ void __sync_icache_dcache(pte_t pteval)
page = pfn_to_page(pfn);
if (cache_is_vipt_aliasing())
mapping = page_mapping(page);
mapping = page_mapping_file(page);
else
mapping = NULL;
@ -333,7 +333,7 @@ void flush_dcache_page(struct page *page)
return;
}
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!cache_ops_need_broadcast() &&
mapping && !page_mapcount(page))
@ -363,7 +363,7 @@ void flush_kernel_dcache_page(struct page *page)
if (cache_is_vivt() || cache_is_vipt_aliasing()) {
struct address_space *mapping;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!mapping || mapping_mapped(mapping)) {
void *addr;

Просмотреть файл

@ -76,12 +76,7 @@ void error(char *x)
#include "../../../../lib/decompress_unxz.c"
#endif
unsigned long __stack_chk_guard;
void __stack_chk_guard_setup(void)
{
__stack_chk_guard = 0x000a0dff;
}
const unsigned long __stack_chk_guard = 0x000a0dff;
void __stack_chk_fail(void)
{
@ -92,8 +87,6 @@ void decompress_kernel(unsigned long boot_heap_start)
{
unsigned long zimage_start, zimage_size;
__stack_chk_guard_setup();
zimage_start = (unsigned long)(&__image_begin);
zimage_size = (unsigned long)(&__image_end) -
(unsigned long)(&__image_begin);

Просмотреть файл

@ -86,7 +86,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
void __flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = page_mapping_file(page);
unsigned long addr;
if (mapping && !mapping_mapped(mapping)) {

Просмотреть файл

@ -180,7 +180,7 @@ void flush_dcache_page(struct page *page)
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
/* Flush this page if there are aliases. */
if (mapping && !mapping_mapped(mapping)) {
@ -215,7 +215,7 @@ void update_mmu_cache(struct vm_area_struct *vma,
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);

Просмотреть файл

@ -88,7 +88,8 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
return;
page = pfn_to_page(pfn);
if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
if (page_mapping_file(page) &&
test_bit(PG_dcache_dirty, &page->flags)) {
flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency())
@ -304,7 +305,7 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = page_mapping_file(page);
struct vm_area_struct *mpnt;
unsigned long offset;
unsigned long addr, old_addr = 0;

Просмотреть файл

@ -117,12 +117,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
unsigned long ceiling);
/*
* The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
* to override the version in mm/hugetlb.c
*/
#define vma_mmu_pagesize vma_mmu_pagesize
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.

Просмотреть файл

@ -568,10 +568,7 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
if (!radix_enabled())
return 1UL << mmu_psize_to_shift(psize);
#endif
if (!is_vm_hugetlb_page(vma))
return PAGE_SIZE;
return huge_page_size(hstate_vma(vma));
return vma_kernel_pagesize(vma);
}
static inline bool is_power_of_4(unsigned long x)

Просмотреть файл

@ -112,7 +112,7 @@ static int mm_iommu_move_page_from_cma(struct page *page)
put_page(page); /* Drop the gup reference */
ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
NULL, 0, MIGRATE_SYNC, MR_CMA);
NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
if (ret) {
if (!list_empty(&cma_migrate_pages))
putback_movable_pages(&cma_migrate_pages);

Просмотреть файл

@ -38,6 +38,7 @@
#include <linux/suspend.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/kmemleak.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>

Просмотреть файл

@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/bitmap.h>
#include <linux/bootmem.h>
#include <asm/msi_bitmap.h>

Просмотреть файл

@ -15,7 +15,7 @@
#include <linux/hardirq.h>
#include <linux/log2.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <linux/kmemleak.h>
#include <linux/time.h>
#include <linux/module.h>
#include <linux/sched/signal.h>

Просмотреть файл

@ -27,7 +27,6 @@
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/kernel_stat.h>
#include <linux/kmemleak.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqflags.h>

Просмотреть файл

@ -104,12 +104,7 @@ static void error(char *x)
while(1); /* Halt */
}
unsigned long __stack_chk_guard;
void __stack_chk_guard_setup(void)
{
__stack_chk_guard = 0x000a0dff;
}
const unsigned long __stack_chk_guard = 0x000a0dff;
void __stack_chk_fail(void)
{
@ -130,8 +125,6 @@ void decompress_kernel(void)
{
unsigned long output_addr;
__stack_chk_guard_setup();
#ifdef CONFIG_SUPERH64
output_addr = (CONFIG_MEMORY_START + 0x2000);
#else

Просмотреть файл

@ -112,7 +112,7 @@ static void sh4_flush_dcache_page(void *arg)
struct page *page = arg;
unsigned long addr = (unsigned long)page_address(page);
#ifndef CONFIG_SMP
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping))
clear_bit(PG_dcache_clean, &page->flags);

Просмотреть файл

@ -136,7 +136,7 @@ static void __flush_dcache_page(unsigned long phys)
static void sh7705_flush_dcache_page(void *arg)
{
struct page *page = arg;
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping))
clear_bit(PG_dcache_clean, &page->flags);

Просмотреть файл

@ -22,7 +22,6 @@
#include <linux/seq_file.h>
#include <linux/ftrace.h>
#include <linux/irq.h>
#include <linux/kmemleak.h>
#include <asm/ptrace.h>
#include <asm/processor.h>

Просмотреть файл

@ -929,9 +929,9 @@ static inline void __local_flush_dcache_page(struct page *page)
#ifdef DCACHE_ALIASING_POSSIBLE
__flush_dcache_page(page_address(page),
((tlb_type == spitfire) &&
page_mapping(page) != NULL));
page_mapping_file(page) != NULL));
#else
if (page_mapping(page) != NULL &&
if (page_mapping_file(page) != NULL &&
tlb_type == spitfire)
__flush_icache_page(__pa(page_address(page)));
#endif
@ -958,7 +958,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
if (page_mapping(page) != NULL)
if (page_mapping_file(page) != NULL)
data0 |= ((u64)1 << 32);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE
@ -994,7 +994,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
pg_addr = page_address(page);
if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
if (page_mapping(page) != NULL)
if (page_mapping_file(page) != NULL)
data0 |= ((u64)1 << 32);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE

Просмотреть файл

@ -206,9 +206,9 @@ inline void flush_dcache_page_impl(struct page *page)
#ifdef DCACHE_ALIASING_POSSIBLE
__flush_dcache_page(page_address(page),
((tlb_type == spitfire) &&
page_mapping(page) != NULL));
page_mapping_file(page) != NULL));
#else
if (page_mapping(page) != NULL &&
if (page_mapping_file(page) != NULL &&
tlb_type == spitfire)
__flush_icache_page(__pa(page_address(page)));
#endif
@ -490,7 +490,7 @@ void flush_dcache_page(struct page *page)
this_cpu = get_cpu();
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping)) {
int dirty = test_bit(PG_dcache_dirty, &page->flags);
if (dirty) {

Просмотреть файл

@ -128,7 +128,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
goto no_cache_flush;
/* A real file page? */
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!mapping)
goto no_cache_flush;

Просмотреть файл

@ -83,7 +83,7 @@ void flush_dcache_page(struct page *page)
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping))
clear_bit(PG_dcache_clean, &page->flags);

Просмотреть файл

@ -503,7 +503,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
if (mapping)

Просмотреть файл

@ -6,7 +6,6 @@
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/kmemleak.h>
#include <asm/proto.h>
#include <asm/dma.h>

Просмотреть файл

@ -1328,14 +1328,39 @@ int kern_addr_valid(unsigned long addr)
return pfn_valid(pte_pfn(*pte));
}
/*
* Block size is the minimum amount of memory which can be hotplugged or
* hotremoved. It must be power of two and must be equal or larger than
* MIN_MEMORY_BLOCK_SIZE.
*/
#define MAX_BLOCK_SIZE (2UL << 30)
/* Amount of ram needed to start using large blocks */
#define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
static unsigned long probe_memory_block_size(void)
{
unsigned long bz = MIN_MEMORY_BLOCK_SIZE;
unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
unsigned long bz;
/* if system is UV or has 64GB of RAM or more, use large blocks */
if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30)))
bz = 2UL << 30; /* 2GB */
/* If this is UV system, always set 2G block size */
if (is_uv_system()) {
bz = MAX_BLOCK_SIZE;
goto done;
}
/* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
bz = MIN_MEMORY_BLOCK_SIZE;
goto done;
}
/* Find the largest allowed block size that aligns to memory end */
for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) {
if (IS_ALIGNED(boot_mem_end, bz))
break;
}
done:
pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
return bz;

Просмотреть файл

@ -127,7 +127,7 @@ EXPORT_SYMBOL(copy_user_highpage);
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = page_mapping_file(page);
/*
* If we have a mapping but the page is not mapped to user-space

Просмотреть файл

@ -187,13 +187,14 @@ int memory_isolate_notify(unsigned long val, void *v)
}
/*
* The probe routines leave the pages reserved, just as the bootmem code does.
* Make sure they're still that way.
* The probe routines leave the pages uninitialized, just as the bootmem code
* does. Make sure we do not access them, but instead use only information from
* within sections.
*/
static bool pages_correctly_reserved(unsigned long start_pfn)
static bool pages_correctly_probed(unsigned long start_pfn)
{
int i, j;
struct page *page;
unsigned long section_nr = pfn_to_section_nr(start_pfn);
unsigned long section_nr_end = section_nr + sections_per_block;
unsigned long pfn = start_pfn;
/*
@ -201,21 +202,24 @@ static bool pages_correctly_reserved(unsigned long start_pfn)
* SPARSEMEM_VMEMMAP. We lookup the page once per section
* and assume memmap is contiguous within each section
*/
for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
for (; section_nr < section_nr_end; section_nr++) {
if (WARN_ON_ONCE(!pfn_valid(pfn)))
return false;
page = pfn_to_page(pfn);
for (j = 0; j < PAGES_PER_SECTION; j++) {
if (PageReserved(page + j))
continue;
printk(KERN_WARNING "section number %ld page number %d "
"not reserved, was it already online?\n",
pfn_to_section_nr(pfn), j);
if (!present_section_nr(section_nr)) {
pr_warn("section %ld pfn[%lx, %lx) not present",
section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
} else if (!valid_section_nr(section_nr)) {
pr_warn("section %ld pfn[%lx, %lx) no valid memmap",
section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
} else if (online_section_nr(section_nr)) {
pr_warn("section %ld pfn[%lx, %lx) is already online",
section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
}
pfn += PAGES_PER_SECTION;
}
return true;
@ -237,7 +241,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
switch (action) {
case MEM_ONLINE:
if (!pages_correctly_reserved(start_pfn))
if (!pages_correctly_probed(start_pfn))
return -EBUSY;
ret = online_pages(start_pfn, nr_pages, online_type);
@ -708,7 +712,7 @@ static int add_memory_block(int base_section_nr)
* need an interface for the VM to add new memory regions,
* but without onlining it.
*/
int register_new_memory(int nid, struct mem_section *section)
int hotplug_memory_register(int nid, struct mem_section *section)
{
int ret = 0;
struct memory_block *mem;
@ -727,7 +731,7 @@ int register_new_memory(int nid, struct mem_section *section)
}
if (mem->section_count == sections_per_block)
ret = register_mem_sect_under_node(mem, nid);
ret = register_mem_sect_under_node(mem, nid, false);
out:
mutex_unlock(&mem_sysfs_mutex);
return ret;

Просмотреть файл

@ -399,13 +399,16 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
}
/* register memory section under specified node if it spans that node */
int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
int register_mem_sect_under_node(struct memory_block *mem_blk, int nid,
bool check_nid)
{
int ret;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
if (!mem_blk)
return -EFAULT;
mem_blk->nid = nid;
if (!node_online(nid))
return 0;
@ -425,11 +428,18 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
continue;
}
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
if (page_nid != nid)
continue;
/*
* We need to check if page belongs to nid only for the boot
* case, during hotplug we know that all pages in the memory
* block belong to the same node.
*/
if (check_nid) {
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
if (page_nid != nid)
continue;
}
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
@ -504,7 +514,7 @@ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
ret = register_mem_sect_under_node(mem_blk, nid);
ret = register_mem_sect_under_node(mem_blk, nid, true);
if (!err)
err = ret;

Просмотреть файл

@ -44,6 +44,11 @@ static const char *default_compressor = "lzo";
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
/*
* Pages that compress to sizes equals or greater than this are stored
* uncompressed in memory.
*/
static size_t huge_class_size;
static void zram_free_page(struct zram *zram, size_t index);
@ -786,6 +791,8 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
return false;
}
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
return true;
}
@ -965,7 +972,7 @@ compress_again:
return ret;
}
if (unlikely(comp_len > max_zpage_size)) {
if (unlikely(comp_len >= huge_class_size)) {
if (zram_wb_enabled(zram) && allow_wb) {
zcomp_stream_put(zram->comp);
ret = write_to_bdev(zram, bvec, index, bio, &element);

Просмотреть файл

@ -21,22 +21,6 @@
#include "zcomp.h"
/*-- Configurable parameters */
/*
* Pages that compress to size greater than this are stored
* uncompressed in memory.
*/
static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/*
* NOTE: max_zpage_size must be less than or equal to:
* ZS_MAX_ALLOC_SIZE. Otherwise, zs_malloc() would
* always return failure.
*/
/*-- End of configurable params */
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
#define ZRAM_LOGICAL_BLOCK_SHIFT 12

Просмотреть файл

@ -439,10 +439,20 @@ static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
return 0;
}
static unsigned long dev_dax_pagesize(struct vm_area_struct *vma)
{
struct file *filp = vma->vm_file;
struct dev_dax *dev_dax = filp->private_data;
struct dax_region *dax_region = dev_dax->region;
return dax_region->align;
}
static const struct vm_operations_struct dax_vm_ops = {
.fault = dev_dax_fault,
.huge_fault = dev_dax_huge_fault,
.split = dev_dax_split,
.pagesize = dev_dax_pagesize,
};
static int dax_mmap(struct file *filp, struct vm_area_struct *vma)

Просмотреть файл

@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/interrupt.h>
#include <linux/kmemleak.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/of_iommu.h>

Просмотреть файл

@ -25,7 +25,6 @@
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/kmemleak.h>
#include <linux/list.h>
#include <linux/of_address.h>
#include <linux/of_iommu.h>

Просмотреть файл

@ -35,6 +35,7 @@
#include <linux/of_net.h>
#include <linux/of_device.h>
#include <linux/if_vlan.h>
#include <linux/kmemleak.h>
#include <linux/pinctrl/consumer.h>

Просмотреть файл

@ -31,7 +31,6 @@
#include "efuse.h"
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/module.h>
MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");

Просмотреть файл

@ -32,7 +32,6 @@
#include "../rtl8192ce/def.h"
#include "fw_common.h"
#include <linux/export.h>
#include <linux/kmemleak.h>
static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
{

Просмотреть файл

@ -30,7 +30,7 @@
#include "rtl8188e_hal.h"
#include <linux/firmware.h>
#include <linux/kmemleak.h>
#include <linux/slab.h>
static void _rtl88e_enable_fw_download(struct adapter *adapt, bool enable)
{

Просмотреть файл

@ -31,7 +31,6 @@
#include "efuse.h"
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/module.h>
MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");

Просмотреть файл

@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/kmemleak.h>
#include <linux/dma-mapping.h>
#include <xen/xen.h>

Просмотреть файл

@ -292,6 +292,10 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
#ifdef CONFIG_9P_FSCACHE
kfree(v9ses->cachetag);
v9ses->cachetag = match_strdup(&args[0]);
if (!v9ses->cachetag) {
ret = -ENOMEM;
goto free_and_return;
}
#endif
break;
case Opt_cache:
@ -471,6 +475,9 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
return fid;
err_clnt:
#ifdef CONFIG_9P_FSCACHE
kfree(v9ses->cachetag);
#endif
p9_client_destroy(v9ses->clnt);
err_names:
kfree(v9ses->uname);

Просмотреть файл

@ -578,6 +578,24 @@ static int v9fs_at_to_dotl_flags(int flags)
return rflags;
}
/**
* v9fs_dec_count - helper functon to drop i_nlink.
*
* If a directory had nlink <= 2 (including . and ..), then we should not drop
* the link count, which indicates the underlying exported fs doesn't maintain
* nlink accurately. e.g.
* - overlayfs sets nlink to 1 for merged dir
* - ext4 (with dir_nlink feature enabled) sets nlink to 1 if a dir has more
* than EXT4_LINK_MAX (65000) links.
*
* @inode: inode whose nlink is being dropped
*/
static void v9fs_dec_count(struct inode *inode)
{
if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
drop_nlink(inode);
}
/**
* v9fs_remove - helper function to remove files and directories
* @dir: directory inode that is being deleted
@ -621,9 +639,9 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
*/
if (flags & AT_REMOVEDIR) {
clear_nlink(inode);
drop_nlink(dir);
v9fs_dec_count(dir);
} else
drop_nlink(inode);
v9fs_dec_count(inode);
v9fs_invalidate_inode_attr(inode);
v9fs_invalidate_inode_attr(dir);
@ -1024,12 +1042,12 @@ clunk_newdir:
if (S_ISDIR(new_inode->i_mode))
clear_nlink(new_inode);
else
drop_nlink(new_inode);
v9fs_dec_count(new_inode);
}
if (S_ISDIR(old_inode->i_mode)) {
if (!new_inode)
inc_nlink(new_dir);
drop_nlink(old_dir);
v9fs_dec_count(old_dir);
}
v9fs_invalidate_inode_attr(old_inode);
v9fs_invalidate_inode_attr(old_dir);

Просмотреть файл

@ -94,7 +94,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
if (v9ses->cache)
sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
sb->s_flags |= SB_ACTIVE | SB_DIRSYNC | SB_NOATIME;
sb->s_flags |= SB_ACTIVE | SB_DIRSYNC;
if (!v9ses->cache)
sb->s_flags |= SB_SYNCHRONOUS;

Просмотреть файл

@ -1324,7 +1324,8 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty)
* @bdev: struct bdev to adjust.
*
* This routine checks to see if the bdev size does not match the disk size
* and adjusts it if it differs.
* and adjusts it if it differs. When shrinking the bdev size, its all caches
* are freed.
*/
void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
{
@ -1337,7 +1338,8 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
"%s: detected capacity change from %lld to %lld\n",
disk->disk_name, bdev_size, disk_size);
i_size_write(bdev->bd_inode, disk_size);
flush_disk(bdev, false);
if (bdev_size > disk_size)
flush_disk(bdev, false);
}
}
EXPORT_SYMBOL(check_disk_size_change);

Просмотреть файл

@ -1511,7 +1511,7 @@ void block_invalidatepage(struct page *page, unsigned int offset,
* The get_block cached value has been unconditionally invalidated,
* so real IO is not possible anymore.
*/
if (offset == 0)
if (length == PAGE_SIZE)
try_to_release_page(page, 0);
out:
return;

Просмотреть файл

@ -1177,9 +1177,9 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
unsigned blkbits = i_blkbits;
unsigned blocksize_mask = (1 << blkbits) - 1;
ssize_t retval = -EINVAL;
size_t count = iov_iter_count(iter);
const size_t count = iov_iter_count(iter);
loff_t offset = iocb->ki_pos;
loff_t end = offset + count;
const loff_t end = offset + count;
struct dio *dio;
struct dio_submit sdio = { 0, };
struct buffer_head map_bh = { 0, };
@ -1200,7 +1200,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
}
/* watch out for a 0 len io from a tricksy fs */
if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
if (iov_iter_rw(iter) == READ && !count)
return 0;
dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
@ -1315,8 +1315,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
dio->should_dirty = (iter->type == ITER_IOVEC);
sdio.iter = iter;
sdio.final_block_in_request =
(offset + iov_iter_count(iter)) >> blkbits;
sdio.final_block_in_request = end >> blkbits;
/*
* In case of non-aligned buffers, we may need 2 more

Просмотреть файл

@ -138,10 +138,14 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
/*
* page based offset in vm_pgoff could be sufficiently large to
* overflow a (l)off_t when converted to byte offset.
* overflow a loff_t when converted to byte offset. This can
* only happen on architectures where sizeof(loff_t) ==
* sizeof(unsigned long). So, only check in those instances.
*/
if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
return -EINVAL;
if (sizeof(unsigned long) == sizeof(loff_t)) {
if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
return -EINVAL;
}
/* must be huge page aligned */
if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))

Просмотреть файл

@ -7119,7 +7119,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
goto out_commit;
did_quota = 1;
data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
data_ac->ac_resv = &oi->ip_la_data_resv;
ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off,
&num);

Просмотреть файл

@ -346,7 +346,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
unlock = 0;
out_alloc:
up_read(&OCFS2_I(inode)->ip_alloc_sem);
up_read(&oi->ip_alloc_sem);
out_inode_unlock:
ocfs2_inode_unlock(inode, 0);
out:
@ -2213,7 +2213,7 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
down_write(&oi->ip_alloc_sem);
if (first_get_block) {
if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
if (ocfs2_sparse_alloc(osb))
ret = ocfs2_zero_tail(inode, di_bh, pos);
else
ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,

Просмотреть файл

@ -78,7 +78,7 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
/*
* Using a named enum representing lock types in terms of #N bit stored in
* iocb->private, which is going to be used for communication between
* ocfs2_dio_end_io() and ocfs2_file_aio_write/read().
* ocfs2_dio_end_io() and ocfs2_file_write/read_iter().
*/
enum ocfs2_iocb_lock_bits {
OCFS2_IOCB_RW_LOCK = 0,

Просмотреть файл

@ -570,7 +570,16 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
current_page, vec_len, vec_start);
len = bio_add_page(bio, page, vec_len, vec_start);
if (len != vec_len) break;
if (len != vec_len) {
mlog(ML_ERROR, "Adding page[%d] to bio failed, "
"page %p, len %d, vec_len %u, vec_start %u, "
"bi_sector %llu\n", current_page, page, len,
vec_len, vec_start,
(unsigned long long)bio->bi_iter.bi_sector);
bio_put(bio);
bio = ERR_PTR(-EIO);
return bio;
}
cs += vec_len / (PAGE_SIZE/spp);
vec_start = 0;

Просмотреть файл

@ -3072,7 +3072,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
* We need to return the correct block within the
* cluster which should hold our entry.
*/
off = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb),
off = ocfs2_dx_dir_hash_idx(osb,
&lookup->dl_hinfo);
get_bh(dx_leaves[off]);
lookup->dl_dx_leaf_bh = dx_leaves[off];

Просмотреть файл

@ -224,14 +224,12 @@ void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
dlm_astlockfunc_t *fn;
struct dlm_lockstatus *lksb;
mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name,
res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
lksb = lock->lksb;
fn = lock->ast;
BUG_ON(lock->ml.node != dlm->node_num);

Просмотреть файл

@ -140,6 +140,7 @@ struct dlm_ctxt
u8 node_num;
u32 key;
u8 joining_node;
u8 migrate_done; /* set to 1 means node has migrated all lock resources */
wait_queue_head_t dlm_join_events;
unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
@ -960,13 +961,10 @@ static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm,
void dlm_print_one_lock_resource(struct dlm_lock_resource *res);
void __dlm_print_one_lock_resource(struct dlm_lock_resource *res);
u8 dlm_nm_this_node(struct dlm_ctxt *dlm);
void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
int dlm_nm_init(struct dlm_ctxt *dlm);
int dlm_heartbeat_init(struct dlm_ctxt *dlm);
void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);

Просмотреть файл

@ -461,6 +461,19 @@ redo_bucket:
cond_resched_lock(&dlm->spinlock);
num += n;
}
if (!num) {
if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
mlog(0, "%s: perhaps there are more lock resources "
"need to be migrated after dlm recovery\n", dlm->name);
ret = -EAGAIN;
} else {
mlog(0, "%s: we won't do dlm recovery after migrating "
"all lock resources\n", dlm->name);
dlm->migrate_done = 1;
}
}
spin_unlock(&dlm->spinlock);
wake_up(&dlm->dlm_thread_wq);
@ -675,20 +688,6 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm)
spin_unlock(&dlm->spinlock);
}
int dlm_shutting_down(struct dlm_ctxt *dlm)
{
int ret = 0;
spin_lock(&dlm_domain_lock);
if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
ret = 1;
spin_unlock(&dlm_domain_lock);
return ret;
}
void dlm_unregister_domain(struct dlm_ctxt *dlm)
{
int leave = 0;
@ -2052,6 +2051,8 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN;
init_waitqueue_head(&dlm->dlm_join_events);
dlm->migrate_done = 0;
dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;

Просмотреть файл

@ -28,7 +28,30 @@
extern spinlock_t dlm_domain_lock;
extern struct list_head dlm_domains;
int dlm_shutting_down(struct dlm_ctxt *dlm);
static inline int dlm_joined(struct dlm_ctxt *dlm)
{
int ret = 0;
spin_lock(&dlm_domain_lock);
if (dlm->dlm_state == DLM_CTXT_JOINED)
ret = 1;
spin_unlock(&dlm_domain_lock);
return ret;
}
static inline int dlm_shutting_down(struct dlm_ctxt *dlm)
{
int ret = 0;
spin_lock(&dlm_domain_lock);
if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
ret = 1;
spin_unlock(&dlm_domain_lock);
return ret;
}
void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
int node_num);

Просмотреть файл

@ -77,8 +77,7 @@ int dlm_init_lock_cache(void)
void dlm_destroy_lock_cache(void)
{
if (dlm_lock_cache)
kmem_cache_destroy(dlm_lock_cache);
kmem_cache_destroy(dlm_lock_cache);
}
/* Tell us whether we can grant a new lock request.

Просмотреть файл

@ -414,8 +414,7 @@ int dlm_init_mle_cache(void)
void dlm_destroy_mle_cache(void)
{
if (dlm_mle_cache)
kmem_cache_destroy(dlm_mle_cache);
kmem_cache_destroy(dlm_mle_cache);
}
static void dlm_mle_release(struct kref *kref)
@ -472,15 +471,11 @@ bail:
void dlm_destroy_master_caches(void)
{
if (dlm_lockname_cache) {
kmem_cache_destroy(dlm_lockname_cache);
dlm_lockname_cache = NULL;
}
kmem_cache_destroy(dlm_lockname_cache);
dlm_lockname_cache = NULL;
if (dlm_lockres_cache) {
kmem_cache_destroy(dlm_lockres_cache);
dlm_lockres_cache = NULL;
}
kmem_cache_destroy(dlm_lockres_cache);
dlm_lockres_cache = NULL;
}
static void dlm_lockres_release(struct kref *kref)
@ -2495,13 +2490,13 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
}
/*
* A migrateable resource is one that is :
* A migratable resource is one that is :
* 1. locally mastered, and,
* 2. zero local locks, and,
* 3. one or more non-local locks, or, one or more references
* Returns 1 if yes, 0 if not.
*/
static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
static int dlm_is_lockres_migratable(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
enum dlm_lockres_list idx;
@ -2532,7 +2527,7 @@ static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
continue;
}
cookie = be64_to_cpu(lock->ml.cookie);
mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on "
mlog(0, "%s: Not migratable res %.*s, lock %u:%llu on "
"%s list\n", dlm->name, res->lockname.len,
res->lockname.name,
dlm_get_lock_cookie_node(cookie),
@ -2548,7 +2543,7 @@ static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
return 0;
}
mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len,
mlog(0, "%s: res %.*s, Migratable\n", dlm->name, res->lockname.len,
res->lockname.name);
return 1;
@ -2792,7 +2787,7 @@ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
assert_spin_locked(&dlm->spinlock);
spin_lock(&res->spinlock);
if (dlm_is_lockres_migrateable(dlm, res))
if (dlm_is_lockres_migratable(dlm, res))
target = dlm_pick_migration_target(dlm, res);
spin_unlock(&res->spinlock);

Просмотреть файл

@ -62,7 +62,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
static int dlm_request_all_locks(struct dlm_ctxt *dlm,
u8 request_from, u8 dead_node);
static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm);
static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
@ -423,12 +423,11 @@ void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
static void dlm_begin_recovery(struct dlm_ctxt *dlm)
{
spin_lock(&dlm->spinlock);
assert_spin_locked(&dlm->spinlock);
BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
dlm->name, dlm->reco.dead_node);
dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
spin_unlock(&dlm->spinlock);
}
static void dlm_end_recovery(struct dlm_ctxt *dlm)
@ -456,6 +455,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
spin_lock(&dlm->spinlock);
if (dlm->migrate_done) {
mlog(0, "%s: no need do recovery after migrating all "
"lock resources\n", dlm->name);
spin_unlock(&dlm->spinlock);
return 0;
}
/* check to see if the new master has died */
if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
test_bit(dlm->reco.new_master, dlm->recovery_map)) {
@ -490,12 +496,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
dlm->reco.dead_node);
spin_unlock(&dlm->spinlock);
/* take write barrier */
/* (stops the list reshuffling thread, proxy ast handling) */
dlm_begin_recovery(dlm);
spin_unlock(&dlm->spinlock);
if (dlm->reco.new_master == dlm->node_num)
goto master_here;
@ -739,7 +746,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
}
if (destroy)
dlm_destroy_recovery_area(dlm, dead_node);
dlm_destroy_recovery_area(dlm);
return status;
}
@ -764,7 +771,7 @@ static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
if (!ndata) {
dlm_destroy_recovery_area(dlm, dead_node);
dlm_destroy_recovery_area(dlm);
return -ENOMEM;
}
ndata->node_num = num;
@ -778,7 +785,7 @@ static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
return 0;
}
static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm)
{
struct dlm_reco_node_data *ndata, *next;
LIST_HEAD(tmplist);
@ -1378,6 +1385,15 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
if (!dlm_grab(dlm))
return -EINVAL;
if (!dlm_joined(dlm)) {
mlog(ML_ERROR, "Domain %s not joined! "
"lockres %.*s, master %u\n",
dlm->name, mres->lockname_len,
mres->lockname, mres->master);
dlm_put(dlm);
return -EINVAL;
}
BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
real_master = mres->master;
@ -1807,7 +1823,6 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
int i, j, bad;
struct dlm_lock *lock;
u8 from = O2NM_MAX_NODES;
unsigned int added = 0;
__be64 c;
mlog(0, "running %d locks for this lockres\n", mres->num_locks);
@ -1823,7 +1838,6 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
spin_lock(&res->spinlock);
dlm_lockres_set_refmap_bit(dlm, res, from);
spin_unlock(&res->spinlock);
added++;
break;
}
BUG_ON(ml->highest_blocked != LKM_IVMODE);
@ -1911,7 +1925,6 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
/* do not alter lock refcount. switching lists. */
list_move_tail(&lock->list, queue);
spin_unlock(&res->spinlock);
added++;
mlog(0, "just reordered a local lock!\n");
continue;
@ -2037,7 +2050,6 @@ skip_lvb:
"setting refmap bit\n", dlm->name,
res->lockname.len, res->lockname.name, ml->node);
dlm_lockres_set_refmap_bit(dlm, res, ml->node);
added++;
}
spin_unlock(&res->spinlock);
}
@ -2331,13 +2343,6 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
__dlm_dirty_lockres(dlm, res);
}
/* if this node is the recovery master, and there are no
* locks for a given lockres owned by this node that are in
* either PR or EX mode, zero out the lvb before requesting.
*
*/
static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
{
struct dlm_lock_resource *res;

Просмотреть файл

@ -1756,8 +1756,7 @@ int ocfs2_rw_lock(struct inode *inode, int write)
level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
0);
status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
if (status < 0)
mlog_errno(status);
@ -1796,7 +1795,7 @@ void ocfs2_rw_unlock(struct inode *inode, int write)
write ? "EXMODE" : "PRMODE");
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
ocfs2_cluster_unlock(osb, lockres, level);
}
/*
@ -1816,8 +1815,7 @@ int ocfs2_open_lock(struct inode *inode)
lockres = &OCFS2_I(inode)->ip_open_lockres;
status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
DLM_LOCK_PR, 0, 0);
status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_PR, 0, 0);
if (status < 0)
mlog_errno(status);
@ -1854,8 +1852,7 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
* other nodes and the -EAGAIN will indicate to the caller that
* this inode is still in use.
*/
status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
level, DLM_LKF_NOQUEUE, 0);
status = ocfs2_cluster_lock(osb, lockres, level, DLM_LKF_NOQUEUE, 0);
out:
return status;
@ -1876,11 +1873,9 @@ void ocfs2_open_unlock(struct inode *inode)
goto out;
if(lockres->l_ro_holders)
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
DLM_LOCK_PR);
ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_PR);
if(lockres->l_ex_holders)
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
DLM_LOCK_EX);
ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
out:
return;
@ -2601,9 +2596,9 @@ void ocfs2_inode_unlock(struct inode *inode,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
ex ? "EXMODE" : "PRMODE");
if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
if (!ocfs2_is_hard_readonly(osb) &&
!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
ocfs2_cluster_unlock(osb, lockres, level);
}
/*

Просмотреть файл

@ -101,7 +101,7 @@ static int ocfs2_file_open(struct inode *inode, struct file *file)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
trace_ocfs2_file_open(inode, file, file->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(unsigned long long)oi->ip_blkno,
file->f_path.dentry->d_name.len,
file->f_path.dentry->d_name.name, mode);
@ -116,7 +116,7 @@ static int ocfs2_file_open(struct inode *inode, struct file *file)
/* Check that the inode hasn't been wiped from disk by another
* node. If it hasn't then we're safe as long as we hold the
* spin lock until our increment of open count. */
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
if (oi->ip_flags & OCFS2_INODE_DELETED) {
spin_unlock(&oi->ip_lock);
status = -ENOENT;
@ -190,7 +190,7 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
bool needs_barrier = false;
trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
OCFS2_I(inode)->ip_blkno,
oi->ip_blkno,
file->f_path.dentry->d_name.len,
file->f_path.dentry->d_name.name,
(unsigned long long)datasync);
@ -296,7 +296,7 @@ int ocfs2_update_inode_atime(struct inode *inode,
ocfs2_journal_dirty(handle, bh);
out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
ocfs2_commit_trans(osb, handle);
out:
return ret;
}
@ -2257,7 +2257,7 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
file->f_path.dentry->d_name.len,
file->f_path.dentry->d_name.name,
@ -2405,7 +2405,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
filp->f_path.dentry->d_name.len,
filp->f_path.dentry->d_name.name,
@ -2448,7 +2448,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
*
* Take and drop the meta data lock to update inode fields
* like i_size. This allows the checks down below
* generic_file_aio_read() a chance of actually working.
* generic_file_read_iter() a chance of actually working.
*/
ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
!nowait);
@ -2460,7 +2460,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
ocfs2_inode_unlock(inode, lock_level);
ret = generic_file_read_iter(iocb, to);
trace_generic_file_aio_read_ret(ret);
trace_generic_file_read_iter_ret(ret);
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));

Просмотреть файл

@ -53,36 +53,6 @@ static const char * const ocfs2_filecheck_errs[] = {
"UNSUPPORTED"
};
static DEFINE_SPINLOCK(ocfs2_filecheck_sysfs_lock);
static LIST_HEAD(ocfs2_filecheck_sysfs_list);
struct ocfs2_filecheck {
struct list_head fc_head; /* File check entry list head */
spinlock_t fc_lock;
unsigned int fc_max; /* Maximum number of entry in list */
unsigned int fc_size; /* Current entry count in list */
unsigned int fc_done; /* Finished entry count in list */
};
struct ocfs2_filecheck_sysfs_entry { /* sysfs entry per mounting */
struct list_head fs_list;
atomic_t fs_count;
struct super_block *fs_sb;
struct kset *fs_devicekset;
struct kset *fs_fcheckkset;
struct ocfs2_filecheck *fs_fcheck;
};
#define OCFS2_FILECHECK_MAXSIZE 100
#define OCFS2_FILECHECK_MINSIZE 10
/* File check operation type */
enum {
OCFS2_FILECHECK_TYPE_CHK = 0, /* Check a file(inode) */
OCFS2_FILECHECK_TYPE_FIX, /* Fix a file(inode) */
OCFS2_FILECHECK_TYPE_SET = 100 /* Set entry list maximum size */
};
struct ocfs2_filecheck_entry {
struct list_head fe_list;
unsigned long fe_ino;
@ -110,35 +80,84 @@ ocfs2_filecheck_error(int errno)
return ocfs2_filecheck_errs[errno - OCFS2_FILECHECK_ERR_START + 1];
}
static ssize_t ocfs2_filecheck_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf);
static ssize_t ocfs2_filecheck_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count);
static struct kobj_attribute ocfs2_attr_filecheck_chk =
static ssize_t ocfs2_filecheck_attr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf);
static ssize_t ocfs2_filecheck_attr_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count);
static struct kobj_attribute ocfs2_filecheck_attr_chk =
__ATTR(check, S_IRUSR | S_IWUSR,
ocfs2_filecheck_show,
ocfs2_filecheck_store);
static struct kobj_attribute ocfs2_attr_filecheck_fix =
ocfs2_filecheck_attr_show,
ocfs2_filecheck_attr_store);
static struct kobj_attribute ocfs2_filecheck_attr_fix =
__ATTR(fix, S_IRUSR | S_IWUSR,
ocfs2_filecheck_show,
ocfs2_filecheck_store);
static struct kobj_attribute ocfs2_attr_filecheck_set =
ocfs2_filecheck_attr_show,
ocfs2_filecheck_attr_store);
static struct kobj_attribute ocfs2_filecheck_attr_set =
__ATTR(set, S_IRUSR | S_IWUSR,
ocfs2_filecheck_show,
ocfs2_filecheck_store);
ocfs2_filecheck_attr_show,
ocfs2_filecheck_attr_store);
static struct attribute *ocfs2_filecheck_attrs[] = {
&ocfs2_filecheck_attr_chk.attr,
&ocfs2_filecheck_attr_fix.attr,
&ocfs2_filecheck_attr_set.attr,
NULL
};
static void ocfs2_filecheck_release(struct kobject *kobj)
{
struct ocfs2_filecheck_sysfs_entry *entry = container_of(kobj,
struct ocfs2_filecheck_sysfs_entry, fs_kobj);
complete(&entry->fs_kobj_unregister);
}
static ssize_t
ocfs2_filecheck_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
ssize_t ret = -EIO;
struct kobj_attribute *kattr = container_of(attr,
struct kobj_attribute, attr);
kobject_get(kobj);
if (kattr->show)
ret = kattr->show(kobj, kattr, buf);
kobject_put(kobj);
return ret;
}
static ssize_t
ocfs2_filecheck_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
ssize_t ret = -EIO;
struct kobj_attribute *kattr = container_of(attr,
struct kobj_attribute, attr);
kobject_get(kobj);
if (kattr->store)
ret = kattr->store(kobj, kattr, buf, count);
kobject_put(kobj);
return ret;
}
static const struct sysfs_ops ocfs2_filecheck_ops = {
.show = ocfs2_filecheck_show,
.store = ocfs2_filecheck_store,
};
static struct kobj_type ocfs2_ktype_filecheck = {
.default_attrs = ocfs2_filecheck_attrs,
.sysfs_ops = &ocfs2_filecheck_ops,
.release = ocfs2_filecheck_release,
};
static void
ocfs2_filecheck_sysfs_free(struct ocfs2_filecheck_sysfs_entry *entry)
{
struct ocfs2_filecheck_entry *p;
if (!atomic_dec_and_test(&entry->fs_count)) {
wait_var_event(&entry->fs_count,
!atomic_read(&entry->fs_count));
}
spin_lock(&entry->fs_fcheck->fc_lock);
while (!list_empty(&entry->fs_fcheck->fc_head)) {
p = list_first_entry(&entry->fs_fcheck->fc_head,
@ -149,151 +168,48 @@ ocfs2_filecheck_sysfs_free(struct ocfs2_filecheck_sysfs_entry *entry)
}
spin_unlock(&entry->fs_fcheck->fc_lock);
kset_unregister(entry->fs_fcheckkset);
kset_unregister(entry->fs_devicekset);
kfree(entry->fs_fcheck);
kfree(entry);
entry->fs_fcheck = NULL;
}
static void
ocfs2_filecheck_sysfs_add(struct ocfs2_filecheck_sysfs_entry *entry)
int ocfs2_filecheck_create_sysfs(struct ocfs2_super *osb)
{
spin_lock(&ocfs2_filecheck_sysfs_lock);
list_add_tail(&entry->fs_list, &ocfs2_filecheck_sysfs_list);
spin_unlock(&ocfs2_filecheck_sysfs_lock);
}
static int ocfs2_filecheck_sysfs_del(const char *devname)
{
struct ocfs2_filecheck_sysfs_entry *p;
spin_lock(&ocfs2_filecheck_sysfs_lock);
list_for_each_entry(p, &ocfs2_filecheck_sysfs_list, fs_list) {
if (!strcmp(p->fs_sb->s_id, devname)) {
list_del(&p->fs_list);
spin_unlock(&ocfs2_filecheck_sysfs_lock);
ocfs2_filecheck_sysfs_free(p);
return 0;
}
}
spin_unlock(&ocfs2_filecheck_sysfs_lock);
return 1;
}
static void
ocfs2_filecheck_sysfs_put(struct ocfs2_filecheck_sysfs_entry *entry)
{
if (atomic_dec_and_test(&entry->fs_count))
wake_up_var(&entry->fs_count);
}
static struct ocfs2_filecheck_sysfs_entry *
ocfs2_filecheck_sysfs_get(const char *devname)
{
struct ocfs2_filecheck_sysfs_entry *p = NULL;
spin_lock(&ocfs2_filecheck_sysfs_lock);
list_for_each_entry(p, &ocfs2_filecheck_sysfs_list, fs_list) {
if (!strcmp(p->fs_sb->s_id, devname)) {
atomic_inc(&p->fs_count);
spin_unlock(&ocfs2_filecheck_sysfs_lock);
return p;
}
}
spin_unlock(&ocfs2_filecheck_sysfs_lock);
return NULL;
}
int ocfs2_filecheck_create_sysfs(struct super_block *sb)
{
int ret = 0;
struct kset *device_kset = NULL;
struct kset *fcheck_kset = NULL;
struct ocfs2_filecheck *fcheck = NULL;
struct ocfs2_filecheck_sysfs_entry *entry = NULL;
struct attribute **attrs = NULL;
struct attribute_group attrgp;
if (!ocfs2_kset)
return -ENOMEM;
attrs = kmalloc(sizeof(struct attribute *) * 4, GFP_NOFS);
if (!attrs) {
ret = -ENOMEM;
goto error;
} else {
attrs[0] = &ocfs2_attr_filecheck_chk.attr;
attrs[1] = &ocfs2_attr_filecheck_fix.attr;
attrs[2] = &ocfs2_attr_filecheck_set.attr;
attrs[3] = NULL;
memset(&attrgp, 0, sizeof(attrgp));
attrgp.attrs = attrs;
}
int ret;
struct ocfs2_filecheck *fcheck;
struct ocfs2_filecheck_sysfs_entry *entry = &osb->osb_fc_ent;
fcheck = kmalloc(sizeof(struct ocfs2_filecheck), GFP_NOFS);
if (!fcheck) {
ret = -ENOMEM;
goto error;
} else {
INIT_LIST_HEAD(&fcheck->fc_head);
spin_lock_init(&fcheck->fc_lock);
fcheck->fc_max = OCFS2_FILECHECK_MINSIZE;
fcheck->fc_size = 0;
fcheck->fc_done = 0;
if (!fcheck)
return -ENOMEM;
INIT_LIST_HEAD(&fcheck->fc_head);
spin_lock_init(&fcheck->fc_lock);
fcheck->fc_max = OCFS2_FILECHECK_MINSIZE;
fcheck->fc_size = 0;
fcheck->fc_done = 0;
entry->fs_kobj.kset = osb->osb_dev_kset;
init_completion(&entry->fs_kobj_unregister);
ret = kobject_init_and_add(&entry->fs_kobj, &ocfs2_ktype_filecheck,
NULL, "filecheck");
if (ret) {
kfree(fcheck);
return ret;
}
if (strlen(sb->s_id) <= 0) {
mlog(ML_ERROR,
"Cannot get device basename when create filecheck sysfs\n");
ret = -ENODEV;
goto error;
}
device_kset = kset_create_and_add(sb->s_id, NULL, &ocfs2_kset->kobj);
if (!device_kset) {
ret = -ENOMEM;
goto error;
}
fcheck_kset = kset_create_and_add("filecheck", NULL,
&device_kset->kobj);
if (!fcheck_kset) {
ret = -ENOMEM;
goto error;
}
ret = sysfs_create_group(&fcheck_kset->kobj, &attrgp);
if (ret)
goto error;
entry = kmalloc(sizeof(struct ocfs2_filecheck_sysfs_entry), GFP_NOFS);
if (!entry) {
ret = -ENOMEM;
goto error;
} else {
atomic_set(&entry->fs_count, 1);
entry->fs_sb = sb;
entry->fs_devicekset = device_kset;
entry->fs_fcheckkset = fcheck_kset;
entry->fs_fcheck = fcheck;
ocfs2_filecheck_sysfs_add(entry);
}
kfree(attrs);
entry->fs_fcheck = fcheck;
return 0;
error:
kfree(attrs);
kfree(entry);
kfree(fcheck);
kset_unregister(fcheck_kset);
kset_unregister(device_kset);
return ret;
}
int ocfs2_filecheck_remove_sysfs(struct super_block *sb)
void ocfs2_filecheck_remove_sysfs(struct ocfs2_super *osb)
{
return ocfs2_filecheck_sysfs_del(sb->s_id);
if (!osb->osb_fc_ent.fs_fcheck)
return;
kobject_del(&osb->osb_fc_ent.fs_kobj);
kobject_put(&osb->osb_fc_ent.fs_kobj);
wait_for_completion(&osb->osb_fc_ent.fs_kobj_unregister);
ocfs2_filecheck_sysfs_free(&osb->osb_fc_ent);
}
static int
@ -310,7 +226,7 @@ ocfs2_filecheck_adjust_max(struct ocfs2_filecheck_sysfs_entry *ent,
spin_lock(&ent->fs_fcheck->fc_lock);
if (len < (ent->fs_fcheck->fc_size - ent->fs_fcheck->fc_done)) {
mlog(ML_ERROR,
mlog(ML_NOTICE,
"Cannot set online file check maximum entry number "
"to %u due to too many pending entries(%u)\n",
len, ent->fs_fcheck->fc_size - ent->fs_fcheck->fc_done);
@ -387,7 +303,7 @@ ocfs2_filecheck_args_parse(const char *name, const char *buf, size_t count,
return 0;
}
static ssize_t ocfs2_filecheck_show(struct kobject *kobj,
static ssize_t ocfs2_filecheck_attr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
@ -395,19 +311,12 @@ static ssize_t ocfs2_filecheck_show(struct kobject *kobj,
ssize_t ret = 0, total = 0, remain = PAGE_SIZE;
unsigned int type;
struct ocfs2_filecheck_entry *p;
struct ocfs2_filecheck_sysfs_entry *ent;
struct ocfs2_filecheck_sysfs_entry *ent = container_of(kobj,
struct ocfs2_filecheck_sysfs_entry, fs_kobj);
if (ocfs2_filecheck_type_parse(attr->attr.name, &type))
return -EINVAL;
ent = ocfs2_filecheck_sysfs_get(kobj->parent->name);
if (!ent) {
mlog(ML_ERROR,
"Cannot get the corresponding entry via device basename %s\n",
kobj->name);
return -ENODEV;
}
if (type == OCFS2_FILECHECK_TYPE_SET) {
spin_lock(&ent->fs_fcheck->fc_lock);
total = snprintf(buf, remain, "%u\n", ent->fs_fcheck->fc_max);
@ -441,11 +350,26 @@ static ssize_t ocfs2_filecheck_show(struct kobject *kobj,
spin_unlock(&ent->fs_fcheck->fc_lock);
exit:
ocfs2_filecheck_sysfs_put(ent);
return total;
}
static int
static inline int
ocfs2_filecheck_is_dup_entry(struct ocfs2_filecheck_sysfs_entry *ent,
unsigned long ino)
{
struct ocfs2_filecheck_entry *p;
list_for_each_entry(p, &ent->fs_fcheck->fc_head, fe_list) {
if (!p->fe_done) {
if (p->fe_ino == ino)
return 1;
}
}
return 0;
}
static inline int
ocfs2_filecheck_erase_entry(struct ocfs2_filecheck_sysfs_entry *ent)
{
struct ocfs2_filecheck_entry *p;
@ -484,21 +408,21 @@ static void
ocfs2_filecheck_done_entry(struct ocfs2_filecheck_sysfs_entry *ent,
struct ocfs2_filecheck_entry *entry)
{
entry->fe_done = 1;
spin_lock(&ent->fs_fcheck->fc_lock);
entry->fe_done = 1;
ent->fs_fcheck->fc_done++;
spin_unlock(&ent->fs_fcheck->fc_lock);
}
static unsigned int
ocfs2_filecheck_handle(struct super_block *sb,
ocfs2_filecheck_handle(struct ocfs2_super *osb,
unsigned long ino, unsigned int flags)
{
unsigned int ret = OCFS2_FILECHECK_ERR_SUCCESS;
struct inode *inode = NULL;
int rc;
inode = ocfs2_iget(OCFS2_SB(sb), ino, flags, 0);
inode = ocfs2_iget(osb, ino, flags, 0);
if (IS_ERR(inode)) {
rc = (int)(-(long)inode);
if (rc >= OCFS2_FILECHECK_ERR_START &&
@ -516,11 +440,14 @@ static void
ocfs2_filecheck_handle_entry(struct ocfs2_filecheck_sysfs_entry *ent,
struct ocfs2_filecheck_entry *entry)
{
struct ocfs2_super *osb = container_of(ent, struct ocfs2_super,
osb_fc_ent);
if (entry->fe_type == OCFS2_FILECHECK_TYPE_CHK)
entry->fe_status = ocfs2_filecheck_handle(ent->fs_sb,
entry->fe_status = ocfs2_filecheck_handle(osb,
entry->fe_ino, OCFS2_FI_FLAG_FILECHECK_CHK);
else if (entry->fe_type == OCFS2_FILECHECK_TYPE_FIX)
entry->fe_status = ocfs2_filecheck_handle(ent->fs_sb,
entry->fe_status = ocfs2_filecheck_handle(osb,
entry->fe_ino, OCFS2_FI_FLAG_FILECHECK_FIX);
else
entry->fe_status = OCFS2_FILECHECK_ERR_UNSUPPORTED;
@ -528,30 +455,21 @@ ocfs2_filecheck_handle_entry(struct ocfs2_filecheck_sysfs_entry *ent,
ocfs2_filecheck_done_entry(ent, entry);
}
static ssize_t ocfs2_filecheck_store(struct kobject *kobj,
static ssize_t ocfs2_filecheck_attr_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
ssize_t ret = 0;
struct ocfs2_filecheck_args args;
struct ocfs2_filecheck_entry *entry;
struct ocfs2_filecheck_sysfs_entry *ent;
ssize_t ret = 0;
struct ocfs2_filecheck_sysfs_entry *ent = container_of(kobj,
struct ocfs2_filecheck_sysfs_entry, fs_kobj);
if (count == 0)
return count;
if (ocfs2_filecheck_args_parse(attr->attr.name, buf, count, &args)) {
mlog(ML_ERROR, "Invalid arguments for online file check\n");
if (ocfs2_filecheck_args_parse(attr->attr.name, buf, count, &args))
return -EINVAL;
}
ent = ocfs2_filecheck_sysfs_get(kobj->parent->name);
if (!ent) {
mlog(ML_ERROR,
"Cannot get the corresponding entry via device basename %s\n",
kobj->parent->name);
return -ENODEV;
}
if (args.fa_type == OCFS2_FILECHECK_TYPE_SET) {
ret = ocfs2_filecheck_adjust_max(ent, args.fa_len);
@ -565,13 +483,16 @@ static ssize_t ocfs2_filecheck_store(struct kobject *kobj,
}
spin_lock(&ent->fs_fcheck->fc_lock);
if ((ent->fs_fcheck->fc_size >= ent->fs_fcheck->fc_max) &&
(ent->fs_fcheck->fc_done == 0)) {
mlog(ML_ERROR,
if (ocfs2_filecheck_is_dup_entry(ent, args.fa_ino)) {
ret = -EEXIST;
kfree(entry);
} else if ((ent->fs_fcheck->fc_size >= ent->fs_fcheck->fc_max) &&
(ent->fs_fcheck->fc_done == 0)) {
mlog(ML_NOTICE,
"Cannot do more file check "
"since file check queue(%u) is full now\n",
ent->fs_fcheck->fc_max);
ret = -EBUSY;
ret = -EAGAIN;
kfree(entry);
} else {
if ((ent->fs_fcheck->fc_size >= ent->fs_fcheck->fc_max) &&
@ -596,6 +517,5 @@ static ssize_t ocfs2_filecheck_store(struct kobject *kobj,
ocfs2_filecheck_handle_entry(ent, entry);
exit:
ocfs2_filecheck_sysfs_put(ent);
return (!ret ? count : ret);
}

Просмотреть файл

@ -43,7 +43,32 @@ enum {
#define OCFS2_FILECHECK_ERR_START OCFS2_FILECHECK_ERR_FAILED
#define OCFS2_FILECHECK_ERR_END OCFS2_FILECHECK_ERR_UNSUPPORTED
int ocfs2_filecheck_create_sysfs(struct super_block *sb);
int ocfs2_filecheck_remove_sysfs(struct super_block *sb);
struct ocfs2_filecheck {
struct list_head fc_head; /* File check entry list head */
spinlock_t fc_lock;
unsigned int fc_max; /* Maximum number of entry in list */
unsigned int fc_size; /* Current entry count in list */
unsigned int fc_done; /* Finished entry count in list */
};
#define OCFS2_FILECHECK_MAXSIZE 100
#define OCFS2_FILECHECK_MINSIZE 10
/* File check operation type */
enum {
OCFS2_FILECHECK_TYPE_CHK = 0, /* Check a file(inode) */
OCFS2_FILECHECK_TYPE_FIX, /* Fix a file(inode) */
OCFS2_FILECHECK_TYPE_SET = 100 /* Set entry list maximum size */
};
struct ocfs2_filecheck_sysfs_entry { /* sysfs entry per partition */
struct kobject fs_kobj;
struct completion fs_kobj_unregister;
struct ocfs2_filecheck *fs_fcheck;
};
int ocfs2_filecheck_create_sysfs(struct ocfs2_super *osb);
void ocfs2_filecheck_remove_sysfs(struct ocfs2_super *osb);
#endif /* FILECHECK_H */

Просмотреть файл

@ -1135,7 +1135,7 @@ static void ocfs2_clear_inode(struct inode *inode)
trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
inode->i_nlink);
mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
mlog_bug_on_msg(osb == NULL,
"Inode=%lu\n", inode->i_ino);
dquot_drop(inode);
@ -1150,7 +1150,7 @@ static void ocfs2_clear_inode(struct inode *inode)
ocfs2_mark_lockres_freeing(osb, &oi->ip_inode_lockres);
ocfs2_mark_lockres_freeing(osb, &oi->ip_open_lockres);
ocfs2_resv_discard(&OCFS2_SB(inode->i_sb)->osb_la_resmap,
ocfs2_resv_discard(&osb->osb_la_resmap,
&oi->ip_la_data_resv);
ocfs2_resv_init_once(&oi->ip_la_data_resv);
@ -1160,7 +1160,7 @@ static void ocfs2_clear_inode(struct inode *inode)
* exception here are successfully wiped inodes - their
* metadata can now be considered to be part of the system
* inodes from which it came. */
if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED))
if (!(oi->ip_flags & OCFS2_INODE_DELETED))
ocfs2_checkpoint_inode(inode);
mlog_bug_on_msg(!list_empty(&oi->ip_io_markers),
@ -1223,7 +1223,7 @@ static void ocfs2_clear_inode(struct inode *inode)
* the journal is flushed before journal shutdown. Thus it is safe to
* have inodes get cleaned up after journal shutdown.
*/
jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal,
jbd2_journal_release_jbd_inode(osb->journal->j_journal,
&oi->ip_jinode);
}

Просмотреть файл

@ -525,7 +525,7 @@ static int __ocfs2_mknod_locked(struct inode *dir,
* these are used by the support functions here and in
* callers. */
inode->i_ino = ino_from_blkno(osb->sb, fe_blkno);
OCFS2_I(inode)->ip_blkno = fe_blkno;
oi->ip_blkno = fe_blkno;
spin_lock(&osb->osb_lock);
inode->i_generation = osb->s_next_generation++;
spin_unlock(&osb->osb_lock);
@ -1186,8 +1186,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
}
trace_ocfs2_double_lock_end(
(unsigned long long)OCFS2_I(inode1)->ip_blkno,
(unsigned long long)OCFS2_I(inode2)->ip_blkno);
(unsigned long long)oi1->ip_blkno,
(unsigned long long)oi2->ip_blkno);
bail:
if (status)

Просмотреть файл

@ -50,6 +50,8 @@
#include "reservations.h"
#include "filecheck.h"
/* Caching of metadata buffers */
/* Most user visible OCFS2 inodes will have very few pieces of
@ -472,6 +474,12 @@ struct ocfs2_super
* workqueue and schedule on our own.
*/
struct workqueue_struct *ocfs2_wq;
/* sysfs directory per partition */
struct kset *osb_dev_kset;
/* file check related stuff */
struct ocfs2_filecheck_sysfs_entry osb_fc_ent;
};
#define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info)

Просмотреть файл

@ -1311,11 +1311,11 @@ DEFINE_OCFS2_FILE_OPS(ocfs2_file_release);
DEFINE_OCFS2_FILE_OPS(ocfs2_sync_file);
DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_write);
DEFINE_OCFS2_FILE_OPS(ocfs2_file_write_iter);
DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_write);
DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_read);
DEFINE_OCFS2_FILE_OPS(ocfs2_file_read_iter);
DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_truncate_file);
@ -1467,7 +1467,7 @@ TRACE_EVENT(ocfs2_prepare_inode_for_write,
__entry->saved_pos, __entry->count, __entry->wait)
);
DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret);
DEFINE_OCFS2_INT_EVENT(generic_file_read_iter_ret);
/* End of trace events for fs/ocfs2/file.c. */

Просмотреть файл

@ -573,7 +573,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
BUG_ON(ocfs2_is_refcount_inode(inode));
trace_ocfs2_create_refcount_tree(
(unsigned long long)OCFS2_I(inode)->ip_blkno);
(unsigned long long)oi->ip_blkno);
ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
if (ret) {
@ -3359,7 +3359,7 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
unsigned int ext_flags;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
if (!ocfs2_refcount_tree(osb)) {
return ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
inode->i_ino);
}
@ -3707,7 +3707,7 @@ int ocfs2_add_refcount_flag(struct inode *inode,
trace_ocfs2_add_refcount_flag(ref_blocks, credits);
if (ref_blocks) {
ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
ret = ocfs2_reserve_new_metadata_blocks(osb,
ref_blocks, &meta_ac);
if (ret) {
mlog_errno(ret);
@ -4766,8 +4766,8 @@ static int ocfs2_reflink_inodes_lock(struct inode *s_inode,
*bh2 = *bh1;
trace_ocfs2_double_lock_end(
(unsigned long long)OCFS2_I(inode1)->ip_blkno,
(unsigned long long)OCFS2_I(inode2)->ip_blkno);
(unsigned long long)oi1->ip_blkno,
(unsigned long long)oi2->ip_blkno);
return 0;

Просмотреть файл

@ -79,8 +79,6 @@ static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
}
static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
static int ocfs2_block_group_fill(handle_t *handle,
struct inode *alloc_inode,
@ -387,7 +385,7 @@ static int ocfs2_block_group_fill(handle_t *handle,
memset(bg, 0, sb->s_blocksize);
strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
bg->bg_generation = cpu_to_le32(osb->fs_generation);
bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb, 1,
osb->s_feature_incompat));
bg->bg_chain = cpu_to_le16(my_chain);
@ -1521,7 +1519,7 @@ static int ocfs2_cluster_group_search(struct inode *inode,
OCFS2_I(inode)->ip_clusters, max_bits);
}
ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
ret = ocfs2_block_group_find_clear_bits(osb,
group_bh, bits_wanted,
max_bits, res);
if (ret)
@ -2626,53 +2624,6 @@ int ocfs2_release_clusters(handle_t *handle,
_ocfs2_clear_bit);
}
static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg)
{
printk("Block Group:\n");
printk("bg_signature: %s\n", bg->bg_signature);
printk("bg_size: %u\n", bg->bg_size);
printk("bg_bits: %u\n", bg->bg_bits);
printk("bg_free_bits_count: %u\n", bg->bg_free_bits_count);
printk("bg_chain: %u\n", bg->bg_chain);
printk("bg_generation: %u\n", le32_to_cpu(bg->bg_generation));
printk("bg_next_group: %llu\n",
(unsigned long long)bg->bg_next_group);
printk("bg_parent_dinode: %llu\n",
(unsigned long long)bg->bg_parent_dinode);
printk("bg_blkno: %llu\n",
(unsigned long long)bg->bg_blkno);
}
static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe)
{
int i;
printk("Suballoc Inode %llu:\n", (unsigned long long)fe->i_blkno);
printk("i_signature: %s\n", fe->i_signature);
printk("i_size: %llu\n",
(unsigned long long)fe->i_size);
printk("i_clusters: %u\n", fe->i_clusters);
printk("i_generation: %u\n",
le32_to_cpu(fe->i_generation));
printk("id1.bitmap1.i_used: %u\n",
le32_to_cpu(fe->id1.bitmap1.i_used));
printk("id1.bitmap1.i_total: %u\n",
le32_to_cpu(fe->id1.bitmap1.i_total));
printk("id2.i_chain.cl_cpg: %u\n", fe->id2.i_chain.cl_cpg);
printk("id2.i_chain.cl_bpc: %u\n", fe->id2.i_chain.cl_bpc);
printk("id2.i_chain.cl_count: %u\n", fe->id2.i_chain.cl_count);
printk("id2.i_chain.cl_next_free_rec: %u\n",
fe->id2.i_chain.cl_next_free_rec);
for(i = 0; i < fe->id2.i_chain.cl_next_free_rec; i++) {
printk("fe->id2.i_chain.cl_recs[%d].c_free: %u\n", i,
fe->id2.i_chain.cl_recs[i].c_free);
printk("fe->id2.i_chain.cl_recs[%d].c_total: %u\n", i,
fe->id2.i_chain.cl_recs[i].c_total);
printk("fe->id2.i_chain.cl_recs[%d].c_blkno: %llu\n", i,
(unsigned long long)fe->id2.i_chain.cl_recs[i].c_blkno);
}
}
/*
* For a given allocation, determine which allocators will need to be
* accessed, and lock them, reserving the appropriate number of bits.

Просмотреть файл

@ -423,10 +423,10 @@ static int ocfs2_sync_fs(struct super_block *sb, int wait)
ocfs2_schedule_truncate_log_flush(osb, 0);
}
if (jbd2_journal_start_commit(OCFS2_SB(sb)->journal->j_journal,
if (jbd2_journal_start_commit(osb->journal->j_journal,
&target)) {
if (wait)
jbd2_log_wait_commit(OCFS2_SB(sb)->journal->j_journal,
jbd2_log_wait_commit(osb->journal->j_journal,
target);
}
return 0;
@ -1161,6 +1161,23 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
ocfs2_complete_mount_recovery(osb);
osb->osb_dev_kset = kset_create_and_add(sb->s_id, NULL,
&ocfs2_kset->kobj);
if (!osb->osb_dev_kset) {
status = -ENOMEM;
mlog(ML_ERROR, "Unable to create device kset %s.\n", sb->s_id);
goto read_super_error;
}
/* Create filecheck sysfs related directories/files at
* /sys/fs/ocfs2/<devname>/filecheck */
if (ocfs2_filecheck_create_sysfs(osb)) {
status = -ENOMEM;
mlog(ML_ERROR, "Unable to create filecheck sysfs directory at "
"/sys/fs/ocfs2/%s/filecheck.\n", sb->s_id);
goto read_super_error;
}
if (ocfs2_mount_local(osb))
snprintf(nodestr, sizeof(nodestr), "local");
else
@ -1199,9 +1216,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
/* Start this when the mount is almost sure of being successful */
ocfs2_orphan_scan_start(osb);
/* Create filecheck sysfile /sys/fs/ocfs2/<devname>/filecheck */
ocfs2_filecheck_create_sysfs(sb);
return status;
read_super_error:
@ -1653,7 +1667,6 @@ static void ocfs2_put_super(struct super_block *sb)
ocfs2_sync_blockdev(sb);
ocfs2_dismount_volume(sb, 0);
ocfs2_filecheck_remove_sysfs(sb);
}
static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
@ -1768,12 +1781,9 @@ static int ocfs2_initialize_mem_caches(void)
NULL);
if (!ocfs2_inode_cachep || !ocfs2_dquot_cachep ||
!ocfs2_qf_chunk_cachep) {
if (ocfs2_inode_cachep)
kmem_cache_destroy(ocfs2_inode_cachep);
if (ocfs2_dquot_cachep)
kmem_cache_destroy(ocfs2_dquot_cachep);
if (ocfs2_qf_chunk_cachep)
kmem_cache_destroy(ocfs2_qf_chunk_cachep);
kmem_cache_destroy(ocfs2_inode_cachep);
kmem_cache_destroy(ocfs2_dquot_cachep);
kmem_cache_destroy(ocfs2_qf_chunk_cachep);
return -ENOMEM;
}
@ -1787,16 +1797,13 @@ static void ocfs2_free_mem_caches(void)
* destroy cache.
*/
rcu_barrier();
if (ocfs2_inode_cachep)
kmem_cache_destroy(ocfs2_inode_cachep);
kmem_cache_destroy(ocfs2_inode_cachep);
ocfs2_inode_cachep = NULL;
if (ocfs2_dquot_cachep)
kmem_cache_destroy(ocfs2_dquot_cachep);
kmem_cache_destroy(ocfs2_dquot_cachep);
ocfs2_dquot_cachep = NULL;
if (ocfs2_qf_chunk_cachep)
kmem_cache_destroy(ocfs2_qf_chunk_cachep);
kmem_cache_destroy(ocfs2_qf_chunk_cachep);
ocfs2_qf_chunk_cachep = NULL;
}
@ -1899,6 +1906,12 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
osb = OCFS2_SB(sb);
BUG_ON(!osb);
/* Remove file check sysfs related directores/files,
* and wait for the pending file check operations */
ocfs2_filecheck_remove_sysfs(osb);
kset_unregister(osb->osb_dev_kset);
debugfs_remove(osb->osb_ctxt);
/* Orphan scan should be stopped as early as possible */

Просмотреть файл

@ -633,6 +633,5 @@ int __init init_ocfs2_uptodate_cache(void)
void exit_ocfs2_uptodate_cache(void)
{
if (ocfs2_uptodate_cachep)
kmem_cache_destroy(ocfs2_uptodate_cachep);
kmem_cache_destroy(ocfs2_uptodate_cachep);
}

Просмотреть файл

@ -3564,7 +3564,7 @@ int ocfs2_xattr_set(struct inode *inode,
.not_found = -ENODATA,
};
if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
if (!ocfs2_supports_xattr(osb))
return -EOPNOTSUPP;
/*

Просмотреть файл

@ -64,10 +64,11 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
struct kmem_cache;
int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#ifdef CONFIG_FAILSLAB
extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags);
extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#else
static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags)
static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
return false;
}

Просмотреть файл

@ -43,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark);
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
@ -92,7 +92,7 @@ static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
size_t *size,
unsigned int *size,
slab_flags_t *flags) {}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}

Просмотреть файл

@ -32,6 +32,7 @@ struct list_lru_one {
};
struct list_lru_memcg {
struct rcu_head rcu;
/* array of per cgroup lists, indexed by memcg_cache_id */
struct list_lru_one *lru[0];
};
@ -43,7 +44,7 @@ struct list_lru_node {
struct list_lru_one lru;
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
struct list_lru_memcg *memcg_lrus;
struct list_lru_memcg __rcu *memcg_lrus;
#endif
long nr_items;
} ____cacheline_aligned_in_smp;

Просмотреть файл

@ -416,21 +416,11 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
{
}
#endif
extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
phys_addr_t end_addr);
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
return 0;
}
static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
phys_addr_t end_addr)
{
return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK */
#endif /* __KERNEL__ */

Просмотреть файл

@ -33,6 +33,7 @@ struct memory_block {
void *hw; /* optional pointer to fw/hw data */
int (*phys_callback)(struct memory_block *);
struct device dev;
int nid; /* NID for this memory block */
};
int arch_get_memory_phys_device(unsigned long start_pfn);
@ -109,7 +110,7 @@ extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
extern int register_new_memory(int, struct mem_section *);
int hotplug_memory_register(int nid, struct mem_section *section);
#ifdef CONFIG_MEMORY_HOTREMOVE
extern int unregister_memory_section(struct mem_section *);
#endif

Просмотреть файл

@ -51,24 +51,6 @@ enum {
MMOP_ONLINE_MOVABLE,
};
/*
* pgdat resizing functions
*/
static inline
void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
{
spin_lock_irqsave(&pgdat->node_size_lock, *flags);
}
static inline
void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
{
spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
}
static inline
void pgdat_resize_init(struct pglist_data *pgdat)
{
spin_lock_init(&pgdat->node_size_lock);
}
/*
* Zone resizing functions
*
@ -246,13 +228,6 @@ extern void clear_zone_contiguous(struct zone *zone);
___page; \
})
/*
* Stub functions for when hotplug is off
*/
static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
static inline unsigned zone_span_seqbegin(struct zone *zone)
{
return 0;
@ -293,6 +268,34 @@ static inline bool movable_node_is_enabled(void)
}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* pgdat resizing functions
*/
static inline
void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
{
spin_lock_irqsave(&pgdat->node_size_lock, *flags);
}
static inline
void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
{
spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
}
static inline
void pgdat_resize_init(struct pglist_data *pgdat)
{
spin_lock_init(&pgdat->node_size_lock);
}
#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
/*
* Stub functions for when hotplug is off
*/
static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
#ifdef CONFIG_MEMORY_HOTREMOVE
extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);

Просмотреть файл

@ -25,7 +25,7 @@ enum migrate_reason {
MR_SYSCALL, /* also applies to cpusets */
MR_MEMPOLICY_MBIND,
MR_NUMA_MISPLACED,
MR_CMA,
MR_CONTIG_RANGE,
MR_TYPES
};

Просмотреть файл

@ -386,17 +386,19 @@ struct vm_operations_struct {
void (*close)(struct vm_area_struct * area);
int (*split)(struct vm_area_struct * area, unsigned long addr);
int (*mremap)(struct vm_area_struct * area);
int (*fault)(struct vm_fault *vmf);
int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
vm_fault_t (*fault)(struct vm_fault *vmf);
vm_fault_t (*huge_fault)(struct vm_fault *vmf,
enum page_entry_size pe_size);
void (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
int (*page_mkwrite)(struct vm_fault *vmf);
vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
int (*pfn_mkwrite)(struct vm_fault *vmf);
vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs that can switch between memory and hardware
@ -903,7 +905,9 @@ extern int page_to_nid(const struct page *page);
#else
static inline int page_to_nid(const struct page *page)
{
return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
struct page *p = (struct page *)page;
return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
}
#endif
@ -1152,6 +1156,7 @@ static inline pgoff_t page_index(struct page *page)
bool page_mapped(struct page *page);
struct address_space *page_mapping(struct page *page);
struct address_space *page_mapping_file(struct page *page);
/*
* Return true only if the page has been allocated with
@ -2420,6 +2425,44 @@ int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
unsigned long addr, struct page *page)
{
int err = vm_insert_page(vma, addr, page);
if (err == -ENOMEM)
return VM_FAULT_OOM;
if (err < 0 && err != -EBUSY)
return VM_FAULT_SIGBUS;
return VM_FAULT_NOPAGE;
}
static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn)
{
int err = vm_insert_mixed(vma, addr, pfn);
if (err == -ENOMEM)
return VM_FAULT_OOM;
if (err < 0 && err != -EBUSY)
return VM_FAULT_SIGBUS;
return VM_FAULT_NOPAGE;
}
static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
int err = vm_insert_pfn(vma, addr, pfn);
if (err == -ENOMEM)
return VM_FAULT_OOM;
if (err < 0 && err != -EBUSY)
return VM_FAULT_SIGBUS;
return VM_FAULT_NOPAGE;
}
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int foll_flags,
@ -2589,7 +2632,7 @@ extern int get_hwpoison_page(struct page *page);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
extern atomic_long_t num_poisoned_pages;
extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(struct page *page, int flags);
@ -2611,6 +2654,7 @@ enum mf_action_page_type {
MF_MSG_POISONED_HUGE,
MF_MSG_HUGE,
MF_MSG_FREE_HUGE,
MF_MSG_NON_PMD_HUGE,
MF_MSG_UNMAP_FAILED,
MF_MSG_DIRTY_SWAPCACHE,
MF_MSG_CLEAN_SWAPCACHE,

Просмотреть файл

@ -22,6 +22,8 @@
#endif
#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
typedef int vm_fault_t;
struct address_space;
struct mem_cgroup;
struct hmm;

Просмотреть файл

@ -37,10 +37,10 @@ void dump_mm(const struct mm_struct *mm);
BUG(); \
} \
} while (0)
#define VM_WARN_ON(cond) WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
#define VM_WARN(cond, format...) WARN(cond, format)
#define VM_WARN_ON(cond) (void)WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
#define VM_WARN(cond, format...) (void)WARN(cond, format)
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)

Просмотреть файл

@ -633,14 +633,15 @@ typedef struct pglist_data {
#ifndef CONFIG_NO_BOOTMEM
struct bootmem_data *bdata;
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* Must be held any time you expect node_start_pfn, node_present_pages
* or node_spanned_pages stay constant. Holding this will also
* guarantee that any pfn_valid() stays that way.
*
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
* manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG.
* manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
* or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
*
* Nests above zone->lock and zone->span_seqlock
*/
@ -775,7 +776,8 @@ static inline bool is_dev_zone(const struct zone *zone)
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
enum zone_type classzone_idx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int classzone_idx, unsigned int alloc_flags,
long free_pages);

Просмотреть файл

@ -67,7 +67,7 @@ extern void unregister_one_node(int nid);
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
int nid);
int nid, bool check_nid);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
unsigned long phys_index);
@ -97,7 +97,7 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
return 0;
}
static inline int register_mem_sect_under_node(struct memory_block *mem_blk,
int nid)
int nid, bool check_nid)
{
return 0;
}

Просмотреть файл

@ -156,9 +156,18 @@ static __always_inline int PageCompound(struct page *page)
return test_bit(PG_head, &page->flags) || PageTail(page);
}
#define PAGE_POISON_PATTERN -1l
static inline int PagePoisoned(const struct page *page)
{
return page->flags == PAGE_POISON_PATTERN;
}
/*
* Page flags policies wrt compound pages
*
* PF_POISONED_CHECK
* check if this struct page poisoned/uninitialized
*
* PF_ANY:
* the page flag is relevant for small, head and tail pages.
*
@ -176,17 +185,20 @@ static __always_inline int PageCompound(struct page *page)
* PF_NO_COMPOUND:
* the page flag is not relevant for compound pages.
*/
#define PF_ANY(page, enforce) page
#define PF_HEAD(page, enforce) compound_head(page)
#define PF_POISONED_CHECK(page) ({ \
VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
page; })
#define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
#define PF_ONLY_HEAD(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(PageTail(page), page); \
page;})
PF_POISONED_CHECK(page); })
#define PF_NO_TAIL(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
compound_head(page);})
PF_POISONED_CHECK(compound_head(page)); })
#define PF_NO_COMPOUND(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
page;})
PF_POISONED_CHECK(page); })
/*
* Macros to create function definitions for page flags

Просмотреть файл

@ -175,8 +175,7 @@ static inline void page_ref_unfreeze(struct page *page, int count)
VM_BUG_ON_PAGE(page_count(page) != 0, page);
VM_BUG_ON(count == 0);
smp_mb();
atomic_set(&page->_refcount, count);
atomic_set_release(&page->_refcount, count);
if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
__page_ref_unfreeze(page, count);
}

Просмотреть файл

@ -125,7 +125,6 @@
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
(unsigned long)ZERO_SIZE_PTR)
#include <linux/kmemleak.h>
#include <linux/kasan.h>
struct mem_cgroup;
@ -137,12 +136,13 @@ bool slab_is_available(void);
extern bool usercopy_fallback;
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
size_t align, slab_flags_t flags,
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
unsigned int align, slab_flags_t flags,
void (*ctor)(void *));
struct kmem_cache *kmem_cache_create_usercopy(const char *name,
size_t size, size_t align, slab_flags_t flags,
size_t useroffset, size_t usersize,
unsigned int size, unsigned int align,
slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
@ -308,7 +308,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
* 2 = 129 .. 192 bytes
* n = 2^(n-1)+1 .. 2^n
*/
static __always_inline int kmalloc_index(size_t size)
static __always_inline unsigned int kmalloc_index(size_t size)
{
if (!size)
return 0;
@ -504,7 +504,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return kmalloc_large(size, flags);
#ifndef CONFIG_SLOB
if (!(flags & GFP_DMA)) {
int index = kmalloc_index(size);
unsigned int index = kmalloc_index(size);
if (!index)
return ZERO_SIZE_PTR;
@ -522,11 +522,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
* return size or 0 if a kmalloc cache for that
* size does not exist
*/
static __always_inline int kmalloc_size(int n)
static __always_inline unsigned int kmalloc_size(unsigned int n)
{
#ifndef CONFIG_SLOB
if (n > 2)
return 1 << n;
return 1U << n;
if (n == 1 && KMALLOC_MIN_SIZE <= 32)
return 96;
@ -542,7 +542,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#ifndef CONFIG_SLOB
if (__builtin_constant_p(size) &&
size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
int i = kmalloc_index(size);
unsigned int i = kmalloc_index(size);
if (!i)
return ZERO_SIZE_PTR;

Просмотреть файл

@ -85,8 +85,8 @@ struct kmem_cache {
unsigned int *random_seq;
#endif
size_t useroffset; /* Usercopy region offset */
size_t usersize; /* Usercopy region size */
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
struct kmem_cache_node *node[MAX_NUMNODES];
};

Просмотреть файл

@ -73,7 +73,7 @@ struct kmem_cache_cpu {
* given order would contain.
*/
struct kmem_cache_order_objects {
unsigned long x;
unsigned int x;
};
/*
@ -84,11 +84,12 @@ struct kmem_cache {
/* Used for retriving partial slabs etc */
slab_flags_t flags;
unsigned long min_partial;
int size; /* The size of an object including meta data */
int object_size; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
unsigned int size; /* The size of an object including meta data */
unsigned int object_size;/* The size of an object without meta data */
unsigned int offset; /* Free pointer offset. */
#ifdef CONFIG_SLUB_CPU_PARTIAL
int cpu_partial; /* Number of per cpu partial objects to keep around */
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
#endif
struct kmem_cache_order_objects oo;
@ -98,10 +99,10 @@ struct kmem_cache {
gfp_t allocflags; /* gfp flags to use on each alloc */
int refcount; /* Refcount for slab cache destroy */
void (*ctor)(void *);
int inuse; /* Offset to metadata */
int align; /* Alignment */
int reserved; /* Reserved bytes at the end of slabs */
int red_left_pad; /* Left redzone padding size */
unsigned int inuse; /* Offset to metadata */
unsigned int align; /* Alignment */
unsigned int reserved; /* Reserved bytes at the end of slabs */
unsigned int red_left_pad; /* Left redzone padding size */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SYSFS
@ -110,7 +111,8 @@ struct kmem_cache {
#endif
#ifdef CONFIG_MEMCG
struct memcg_cache_params memcg_params;
int max_attr_size; /* for propagation, maximum size of a stored attr */
/* for propagation, maximum size of a stored attr */
unsigned int max_attr_size;
#ifdef CONFIG_SYSFS
struct kset *memcg_kset;
#endif
@ -124,7 +126,7 @@ struct kmem_cache {
/*
* Defragmentation by allocating from a remote node.
*/
int remote_node_defrag_ratio;
unsigned int remote_node_defrag_ratio;
#endif
#ifdef CONFIG_SLAB_FREELIST_RANDOM
@ -135,8 +137,8 @@ struct kmem_cache {
struct kasan_cache kasan_info;
#endif
size_t useroffset; /* Usercopy region offset */
size_t usersize; /* Usercopy region size */
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
struct kmem_cache_node *node[MAX_NUMNODES];
};

Просмотреть файл

@ -400,7 +400,6 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *,
#define SWAP_ADDRESS_SPACE_SHIFT 14
#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
extern struct address_space *swapper_spaces[];
extern bool swap_vma_readahead;
#define swap_address_space(entry) \
(&swapper_spaces[swp_type(entry)][swp_offset(entry) \
>> SWAP_ADDRESS_SPACE_SHIFT])
@ -422,14 +421,10 @@ extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr,
bool *new_page_allocated);
extern struct page *swapin_readahead(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr);
extern struct page *swap_readahead_detect(struct vm_fault *vmf,
struct vma_swap_readahead *swap_ra);
extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
struct vm_fault *vmf,
struct vma_swap_readahead *swap_ra);
extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf);
extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf);
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
@ -437,11 +432,6 @@ extern long total_swap_pages;
extern atomic_t nr_rotate_swap;
extern bool has_usable_swap(void);
static inline bool swap_use_vma_readahead(void)
{
return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap);
}
/* Swap 50% full? Release swapcache more aggressively.. */
static inline bool vm_swap_full(void)
{
@ -537,26 +527,14 @@ static inline void put_swap_page(struct page *page, swp_entry_t swp)
{
}
static inline struct page *swap_cluster_readahead(swp_entry_t entry,
gfp_t gfp_mask, struct vm_fault *vmf)
{
return NULL;
}
static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
return NULL;
}
static inline bool swap_use_vma_readahead(void)
{
return false;
}
static inline struct page *swap_readahead_detect(
struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
{
return NULL;
}
static inline struct page *do_swap_page_readahead(
swp_entry_t fentry, gfp_t gfp_mask,
struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
struct vm_fault *vmf)
{
return NULL;
}

Просмотреть файл

@ -47,6 +47,8 @@ void zs_destroy_pool(struct zs_pool *pool);
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
void zs_free(struct zs_pool *pool, unsigned long obj);
size_t zs_huge_class_size(struct zs_pool *pool);
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle);

Просмотреть файл

@ -1114,8 +1114,8 @@ struct proto {
struct kmem_cache *slab;
unsigned int obj_size;
slab_flags_t slab_flags;
size_t useroffset; /* Usercopy region offset */
size_t usersize; /* Usercopy region size */
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
struct percpu_counter *orphan_count;

Просмотреть файл

@ -20,7 +20,7 @@
EM( MR_SYSCALL, "syscall_or_cpuset") \
EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \
EM( MR_NUMA_MISPLACED, "numa_misplaced") \
EMe(MR_CMA, "cma")
EMe(MR_CONTIG_RANGE, "contig_range")
/*
* First define the enums in the above macros to be exported to userspace

Просмотреть файл

@ -78,26 +78,29 @@ TRACE_EVENT(mm_vmscan_kswapd_wake,
TRACE_EVENT(mm_vmscan_wakeup_kswapd,
TP_PROTO(int nid, int zid, int order),
TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags),
TP_ARGS(nid, zid, order),
TP_ARGS(nid, zid, order, gfp_flags),
TP_STRUCT__entry(
__field( int, nid )
__field( int, zid )
__field( int, order )
__field( int, nid )
__field( int, zid )
__field( int, order )
__field( gfp_t, gfp_flags )
),
TP_fast_assign(
__entry->nid = nid;
__entry->zid = zid;
__entry->order = order;
__entry->gfp_flags = gfp_flags;
),
TP_printk("nid=%d zid=%d order=%d",
TP_printk("nid=%d zid=%d order=%d gfp_flags=%s",
__entry->nid,
__entry->zid,
__entry->order)
__entry->order,
show_gfp_flags(__entry->gfp_flags))
);
DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,

Просмотреть файл

@ -595,6 +595,8 @@ static void check_mm(struct mm_struct *mm)
void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
WARN_ON_ONCE(mm == current->mm);
WARN_ON_ONCE(mm == current->active_mm);
mm_free_pgd(mm);
destroy_context(mm);
hmm_mm_destroy(mm);

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше