btrfs: convert kmap to kmap_local_page, simple cases
Use a simple coccinelle script to help convert the most common kmap()/kunmap() patterns to kmap_local_page()/kunmap_local(). Note that some kmaps which were caught by this script needed to be handled by hand because of the strict unmapping order of kunmap_local() so they are not included in this patch. But this script got us started. There's another temp variable added for the final length write to the first page so it does not interfere with cpage_out that is used for mapping other pages. The development of this patch was aided by the follow script: // <smpl> // SPDX-License-Identifier: GPL-2.0-only // Find kmap and replace with kmap_local_page then mark kunmap // // Confidence: Low // Copyright: (C) 2021 Intel Corporation // URL: http://coccinelle.lip6.fr/ @ catch_all @ expression e, e2; @@ ( -kmap(e) +kmap_local_page(e) ) ... ( -kunmap(...) +kunmap_local() ) // </smpl> Signed-off-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Родитель
cea628008f
Коммит
58c1a35cd5
|
@ -1611,7 +1611,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
|
|||
curr_sample_pos = 0;
|
||||
while (index < index_end) {
|
||||
page = find_get_page(inode->i_mapping, index);
|
||||
in_data = kmap(page);
|
||||
in_data = kmap_local_page(page);
|
||||
/* Handle case where the start is not aligned to PAGE_SIZE */
|
||||
i = start % PAGE_SIZE;
|
||||
while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
|
||||
|
@ -1624,7 +1624,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
|
|||
start += SAMPLING_INTERVAL;
|
||||
curr_sample_pos += SAMPLING_READ_SIZE;
|
||||
}
|
||||
kunmap(page);
|
||||
kunmap_local(in_data);
|
||||
put_page(page);
|
||||
|
||||
index++;
|
||||
|
|
|
@ -7019,7 +7019,7 @@ next:
|
|||
if (ret)
|
||||
goto out;
|
||||
} else {
|
||||
map = kmap(page);
|
||||
map = kmap_local_page(page);
|
||||
read_extent_buffer(leaf, map + pg_offset, ptr,
|
||||
copy_size);
|
||||
if (pg_offset + copy_size < PAGE_SIZE) {
|
||||
|
@ -7027,7 +7027,7 @@ next:
|
|||
PAGE_SIZE - pg_offset -
|
||||
copy_size);
|
||||
}
|
||||
kunmap(page);
|
||||
kunmap_local(map);
|
||||
}
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
int ret = 0;
|
||||
char *data_in;
|
||||
char *cpage_out;
|
||||
char *cpage_out, *sizes_ptr;
|
||||
int nr_pages = 0;
|
||||
struct page *in_page = NULL;
|
||||
struct page *out_page = NULL;
|
||||
|
@ -258,10 +258,9 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||
}
|
||||
|
||||
/* store the size of all chunks of compressed data */
|
||||
cpage_out = kmap(pages[0]);
|
||||
write_compress_length(cpage_out, tot_out);
|
||||
|
||||
kunmap(pages[0]);
|
||||
sizes_ptr = kmap_local_page(pages[0]);
|
||||
write_compress_length(sizes_ptr, tot_out);
|
||||
kunmap_local(sizes_ptr);
|
||||
|
||||
ret = 0;
|
||||
*total_out = tot_out;
|
||||
|
|
|
@ -2388,13 +2388,13 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
|||
|
||||
/* Check scrubbing parity and repair it */
|
||||
p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
|
||||
parity = kmap(p);
|
||||
parity = kmap_local_page(p);
|
||||
if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
|
||||
copy_page(parity, pointers[rbio->scrubp]);
|
||||
else
|
||||
/* Parity is right, needn't writeback */
|
||||
bitmap_clear(rbio->dbitmap, pagenr, 1);
|
||||
kunmap(p);
|
||||
kunmap_local(parity);
|
||||
|
||||
for (stripe = 0; stripe < nr_data; stripe++)
|
||||
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
|
||||
|
|
Загрузка…
Ссылка в новой задаче