btrfs: use memzero_page() instead of open coded kmap pattern
There are many places where kmap/memset/kunmap patterns occur. Use the newly lifted memzero_page() to eliminate direct uses of kmap and leverage the new core functions use of kmap_local_page(). The development of this patch was aided by the following coccinelle script: // <smpl> // SPDX-License-Identifier: GPL-2.0-only // Find kmap/memset/kunmap pattern and replace with memset*page calls // // NOTE: Offsets and other expressions may be more complex than what the script // will automatically generate. Therefore a catchall rule is provided to find // the pattern which then must be evaluated by hand. // // Confidence: Low // Copyright: (C) 2021 Intel Corporation // URL: http://coccinelle.lip6.fr/ // Comments: // Options: // // Then the memset pattern // @ memset_rule1 @ expression page, V, L, Off; identifier ptr; type VP; @@ ( -VP ptr = kmap(page); | -ptr = kmap(page); | -VP ptr = kmap_atomic(page); | -ptr = kmap_atomic(page); ) <+... ( -memset(ptr, 0, L); +memzero_page(page, 0, L); | -memset(ptr + Off, 0, L); +memzero_page(page, Off, L); | -memset(ptr, V, L); +memset_page(page, V, 0, L); | -memset(ptr + Off, V, L); +memset_page(page, V, Off, L); ) ...+> ( -kunmap(page); | -kunmap_atomic(ptr); ) // Remove any pointers left unused @ depends on memset_rule1 @ identifier memset_rule1.ptr; type VP, VP1; @@ -VP ptr; ... when != ptr; ? VP1 ptr; // // Catch all // @ memset_rule2 @ expression page; identifier ptr; expression GenTo, GenSize, GenValue; type VP; @@ ( -VP ptr = kmap(page); | -ptr = kmap(page); | -VP ptr = kmap_atomic(page); | -ptr = kmap_atomic(page); ) <+... ( // // Some call sites have complex expressions within the memset/memcpy // The follow are catch alls which need to be evaluated by hand. // -memset(GenTo, 0, GenSize); +memzero_pageExtra(page, GenTo, GenSize); | -memset(GenTo, GenValue, GenSize); +memset_pageExtra(page, GenValue, GenTo, GenSize); ) ...+> ( -kunmap(page); | -kunmap_atomic(ptr); ) // Remove any pointers left unused @ depends on memset_rule2 @ identifier memset_rule2.ptr; type VP, VP1; @@ -VP ptr; ... when != ptr; ? VP1 ptr; // </smpl> Link: https://lkml.kernel.org/r/20210309212137.2610186-4-ira.weiny@intel.com Signed-off-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: David Sterba <dsterba@suse.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Cc: Chris Mason <clm@fb.com> Cc: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
28961998f8
Коммит
d048b9c2a7
|
@ -591,16 +591,13 @@ static noinline int add_ra_bio_pages(struct inode *inode,
|
|||
free_extent_map(em);
|
||||
|
||||
if (page->index == end_index) {
|
||||
char *userpage;
|
||||
size_t zero_offset = offset_in_page(isize);
|
||||
|
||||
if (zero_offset) {
|
||||
int zeros;
|
||||
zeros = PAGE_SIZE - zero_offset;
|
||||
userpage = kmap_atomic(page);
|
||||
memset(userpage + zero_offset, 0, zeros);
|
||||
memzero_page(page, zero_offset, zeros);
|
||||
flush_dcache_page(page);
|
||||
kunmap_atomic(userpage);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3421,15 +3421,12 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
|
|||
}
|
||||
|
||||
if (page->index == last_byte >> PAGE_SHIFT) {
|
||||
char *userpage;
|
||||
size_t zero_offset = offset_in_page(last_byte);
|
||||
|
||||
if (zero_offset) {
|
||||
iosize = PAGE_SIZE - zero_offset;
|
||||
userpage = kmap_atomic(page);
|
||||
memset(userpage + zero_offset, 0, iosize);
|
||||
memzero_page(page, zero_offset, iosize);
|
||||
flush_dcache_page(page);
|
||||
kunmap_atomic(userpage);
|
||||
}
|
||||
}
|
||||
begin_page_read(fs_info, page);
|
||||
|
@ -3438,14 +3435,11 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
|
|||
u64 disk_bytenr;
|
||||
|
||||
if (cur >= last_byte) {
|
||||
char *userpage;
|
||||
struct extent_state *cached = NULL;
|
||||
|
||||
iosize = PAGE_SIZE - pg_offset;
|
||||
userpage = kmap_atomic(page);
|
||||
memset(userpage + pg_offset, 0, iosize);
|
||||
memzero_page(page, pg_offset, iosize);
|
||||
flush_dcache_page(page);
|
||||
kunmap_atomic(userpage);
|
||||
set_extent_uptodate(tree, cur, cur + iosize - 1,
|
||||
&cached, GFP_NOFS);
|
||||
unlock_extent_cached(tree, cur,
|
||||
|
@ -3528,13 +3522,10 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
|
|||
|
||||
/* we've found a hole, just zero and go on */
|
||||
if (block_start == EXTENT_MAP_HOLE) {
|
||||
char *userpage;
|
||||
struct extent_state *cached = NULL;
|
||||
|
||||
userpage = kmap_atomic(page);
|
||||
memset(userpage + pg_offset, 0, iosize);
|
||||
memzero_page(page, pg_offset, iosize);
|
||||
flush_dcache_page(page);
|
||||
kunmap_atomic(userpage);
|
||||
|
||||
set_extent_uptodate(tree, cur, cur + iosize - 1,
|
||||
&cached, GFP_NOFS);
|
||||
|
@ -3845,12 +3836,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
|||
}
|
||||
|
||||
if (page->index == end_index) {
|
||||
char *userpage;
|
||||
|
||||
userpage = kmap_atomic(page);
|
||||
memset(userpage + pg_offset, 0,
|
||||
PAGE_SIZE - pg_offset);
|
||||
kunmap_atomic(userpage);
|
||||
memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
|
||||
|
|
|
@ -646,17 +646,12 @@ again:
|
|||
if (!ret) {
|
||||
unsigned long offset = offset_in_page(total_compressed);
|
||||
struct page *page = pages[nr_pages - 1];
|
||||
char *kaddr;
|
||||
|
||||
/* zero the tail end of the last page, we might be
|
||||
* sending it down to disk
|
||||
*/
|
||||
if (offset) {
|
||||
kaddr = kmap_atomic(page);
|
||||
memset(kaddr + offset, 0,
|
||||
PAGE_SIZE - offset);
|
||||
kunmap_atomic(kaddr);
|
||||
}
|
||||
if (offset)
|
||||
memzero_page(page, offset, PAGE_SIZE - offset);
|
||||
will_compress = 1;
|
||||
}
|
||||
}
|
||||
|
@ -4833,7 +4828,6 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
|
|||
struct btrfs_ordered_extent *ordered;
|
||||
struct extent_state *cached_state = NULL;
|
||||
struct extent_changeset *data_reserved = NULL;
|
||||
char *kaddr;
|
||||
bool only_release_metadata = false;
|
||||
u32 blocksize = fs_info->sectorsize;
|
||||
pgoff_t index = from >> PAGE_SHIFT;
|
||||
|
@ -4925,15 +4919,13 @@ again:
|
|||
if (offset != blocksize) {
|
||||
if (!len)
|
||||
len = blocksize - offset;
|
||||
kaddr = kmap(page);
|
||||
if (front)
|
||||
memset(kaddr + (block_start - page_offset(page)),
|
||||
0, offset);
|
||||
memzero_page(page, (block_start - page_offset(page)),
|
||||
offset);
|
||||
else
|
||||
memset(kaddr + (block_start - page_offset(page)) + offset,
|
||||
0, len);
|
||||
memzero_page(page, (block_start - page_offset(page)) + offset,
|
||||
len);
|
||||
flush_dcache_page(page);
|
||||
kunmap(page);
|
||||
}
|
||||
ClearPageChecked(page);
|
||||
set_page_dirty(page);
|
||||
|
@ -6832,11 +6824,9 @@ static noinline int uncompress_inline(struct btrfs_path *path,
|
|||
* cover that region here.
|
||||
*/
|
||||
|
||||
if (max_size + pg_offset < PAGE_SIZE) {
|
||||
char *map = kmap(page);
|
||||
memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
|
||||
kunmap(page);
|
||||
}
|
||||
if (max_size + pg_offset < PAGE_SIZE)
|
||||
memzero_page(page, pg_offset + max_size,
|
||||
PAGE_SIZE - max_size - pg_offset);
|
||||
kfree(tmp);
|
||||
return ret;
|
||||
}
|
||||
|
@ -8506,7 +8496,6 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
|
|||
struct btrfs_ordered_extent *ordered;
|
||||
struct extent_state *cached_state = NULL;
|
||||
struct extent_changeset *data_reserved = NULL;
|
||||
char *kaddr;
|
||||
unsigned long zero_start;
|
||||
loff_t size;
|
||||
vm_fault_t ret;
|
||||
|
@ -8620,10 +8609,8 @@ again:
|
|||
zero_start = PAGE_SIZE;
|
||||
|
||||
if (zero_start != PAGE_SIZE) {
|
||||
kaddr = kmap(page);
|
||||
memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
|
||||
memzero_page(page, zero_start, PAGE_SIZE - zero_start);
|
||||
flush_dcache_page(page);
|
||||
kunmap(page);
|
||||
}
|
||||
ClearPageChecked(page);
|
||||
set_page_dirty(page);
|
||||
|
|
|
@ -129,12 +129,8 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
|
|||
* So what's in the range [500, 4095] corresponds to zeroes.
|
||||
*/
|
||||
if (datal < block_size) {
|
||||
char *map;
|
||||
|
||||
map = kmap(page);
|
||||
memset(map + datal, 0, block_size - datal);
|
||||
memzero_page(page, datal, block_size - datal);
|
||||
flush_dcache_page(page);
|
||||
kunmap(page);
|
||||
}
|
||||
|
||||
SetPageUptodate(page);
|
||||
|
|
|
@ -375,7 +375,6 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
|
|||
unsigned long bytes_left;
|
||||
unsigned long total_out = 0;
|
||||
unsigned long pg_offset = 0;
|
||||
char *kaddr;
|
||||
|
||||
destlen = min_t(unsigned long, destlen, PAGE_SIZE);
|
||||
bytes_left = destlen;
|
||||
|
@ -455,9 +454,7 @@ next:
|
|||
* end of the inline extent (destlen) to the end of the page
|
||||
*/
|
||||
if (pg_offset < destlen) {
|
||||
kaddr = kmap_atomic(dest_page);
|
||||
memset(kaddr + pg_offset, 0, destlen - pg_offset);
|
||||
kunmap_atomic(kaddr);
|
||||
memzero_page(dest_page, pg_offset, destlen - pg_offset);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -631,7 +631,6 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
|
|||
size_t ret2;
|
||||
unsigned long total_out = 0;
|
||||
unsigned long pg_offset = 0;
|
||||
char *kaddr;
|
||||
|
||||
stream = ZSTD_initDStream(
|
||||
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
|
||||
|
@ -696,9 +695,7 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
|
|||
ret = 0;
|
||||
finish:
|
||||
if (pg_offset < destlen) {
|
||||
kaddr = kmap_atomic(dest_page);
|
||||
memset(kaddr + pg_offset, 0, destlen - pg_offset);
|
||||
kunmap_atomic(kaddr);
|
||||
memzero_page(dest_page, pg_offset, destlen - pg_offset);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче