btrfs: remove the second argument of k[un]map_atomic()
Signed-off-by: Cong Wang <amwang@redhat.com>
This commit is contained in:
Родитель
e8e3c3d66f
Коммит
7ac687d9e0
|
@ -120,10 +120,10 @@ static int check_compressed_csum(struct inode *inode,
|
|||
page = cb->compressed_pages[i];
|
||||
csum = ~(u32)0;
|
||||
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
kaddr = kmap_atomic(page);
|
||||
csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE);
|
||||
btrfs_csum_final(csum, (char *)&csum);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
kunmap_atomic(kaddr);
|
||||
|
||||
if (csum != *cb_sum) {
|
||||
printk(KERN_INFO "btrfs csum failed ino %llu "
|
||||
|
@ -521,10 +521,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
|
|||
if (zero_offset) {
|
||||
int zeros;
|
||||
zeros = PAGE_CACHE_SIZE - zero_offset;
|
||||
userpage = kmap_atomic(page, KM_USER0);
|
||||
userpage = kmap_atomic(page);
|
||||
memset(userpage + zero_offset, 0, zeros);
|
||||
flush_dcache_page(page);
|
||||
kunmap_atomic(userpage, KM_USER0);
|
||||
kunmap_atomic(userpage);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -993,9 +993,9 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
|
|||
bytes = min(PAGE_CACHE_SIZE - *pg_offset,
|
||||
PAGE_CACHE_SIZE - buf_offset);
|
||||
bytes = min(bytes, working_bytes);
|
||||
kaddr = kmap_atomic(page_out, KM_USER0);
|
||||
kaddr = kmap_atomic(page_out);
|
||||
memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
kunmap_atomic(kaddr);
|
||||
flush_dcache_page(page_out);
|
||||
|
||||
*pg_offset += bytes;
|
||||
|
|
|
@ -2546,10 +2546,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
|||
|
||||
if (zero_offset) {
|
||||
iosize = PAGE_CACHE_SIZE - zero_offset;
|
||||
userpage = kmap_atomic(page, KM_USER0);
|
||||
userpage = kmap_atomic(page);
|
||||
memset(userpage + zero_offset, 0, iosize);
|
||||
flush_dcache_page(page);
|
||||
kunmap_atomic(userpage, KM_USER0);
|
||||
kunmap_atomic(userpage);
|
||||
}
|
||||
}
|
||||
while (cur <= end) {
|
||||
|
@ -2558,10 +2558,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
|||
struct extent_state *cached = NULL;
|
||||
|
||||
iosize = PAGE_CACHE_SIZE - pg_offset;
|
||||
userpage = kmap_atomic(page, KM_USER0);
|
||||
userpage = kmap_atomic(page);
|
||||
memset(userpage + pg_offset, 0, iosize);
|
||||
flush_dcache_page(page);
|
||||
kunmap_atomic(userpage, KM_USER0);
|
||||
kunmap_atomic(userpage);
|
||||
set_extent_uptodate(tree, cur, cur + iosize - 1,
|
||||
&cached, GFP_NOFS);
|
||||
unlock_extent_cached(tree, cur, cur + iosize - 1,
|
||||
|
@ -2607,10 +2607,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
|||
char *userpage;
|
||||
struct extent_state *cached = NULL;
|
||||
|
||||
userpage = kmap_atomic(page, KM_USER0);
|
||||
userpage = kmap_atomic(page);
|
||||
memset(userpage + pg_offset, 0, iosize);
|
||||
flush_dcache_page(page);
|
||||
kunmap_atomic(userpage, KM_USER0);
|
||||
kunmap_atomic(userpage);
|
||||
|
||||
set_extent_uptodate(tree, cur, cur + iosize - 1,
|
||||
&cached, GFP_NOFS);
|
||||
|
@ -2756,10 +2756,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
|||
if (page->index == end_index) {
|
||||
char *userpage;
|
||||
|
||||
userpage = kmap_atomic(page, KM_USER0);
|
||||
userpage = kmap_atomic(page);
|
||||
memset(userpage + pg_offset, 0,
|
||||
PAGE_CACHE_SIZE - pg_offset);
|
||||
kunmap_atomic(userpage, KM_USER0);
|
||||
kunmap_atomic(userpage);
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
pg_offset = 0;
|
||||
|
|
|
@ -447,13 +447,13 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
|
|||
sums->bytenr = ordered->start;
|
||||
}
|
||||
|
||||
data = kmap_atomic(bvec->bv_page, KM_USER0);
|
||||
data = kmap_atomic(bvec->bv_page);
|
||||
sector_sum->sum = ~(u32)0;
|
||||
sector_sum->sum = btrfs_csum_data(root,
|
||||
data + bvec->bv_offset,
|
||||
sector_sum->sum,
|
||||
bvec->bv_len);
|
||||
kunmap_atomic(data, KM_USER0);
|
||||
kunmap_atomic(data);
|
||||
btrfs_csum_final(sector_sum->sum,
|
||||
(char *)§or_sum->sum);
|
||||
sector_sum->bytenr = disk_bytenr;
|
||||
|
|
|
@ -173,9 +173,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
|
|||
cur_size = min_t(unsigned long, compressed_size,
|
||||
PAGE_CACHE_SIZE);
|
||||
|
||||
kaddr = kmap_atomic(cpage, KM_USER0);
|
||||
kaddr = kmap_atomic(cpage);
|
||||
write_extent_buffer(leaf, kaddr, ptr, cur_size);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
kunmap_atomic(kaddr);
|
||||
|
||||
i++;
|
||||
ptr += cur_size;
|
||||
|
@ -187,10 +187,10 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
|
|||
page = find_get_page(inode->i_mapping,
|
||||
start >> PAGE_CACHE_SHIFT);
|
||||
btrfs_set_file_extent_compression(leaf, ei, 0);
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
kaddr = kmap_atomic(page);
|
||||
offset = start & (PAGE_CACHE_SIZE - 1);
|
||||
write_extent_buffer(leaf, kaddr + offset, ptr, size);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
kunmap_atomic(kaddr);
|
||||
page_cache_release(page);
|
||||
}
|
||||
btrfs_mark_buffer_dirty(leaf);
|
||||
|
@ -422,10 +422,10 @@ again:
|
|||
* sending it down to disk
|
||||
*/
|
||||
if (offset) {
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
kaddr = kmap_atomic(page);
|
||||
memset(kaddr + offset, 0,
|
||||
PAGE_CACHE_SIZE - offset);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
kunmap_atomic(kaddr);
|
||||
}
|
||||
will_compress = 1;
|
||||
}
|
||||
|
@ -1873,7 +1873,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
|
|||
} else {
|
||||
ret = get_state_private(io_tree, start, &private);
|
||||
}
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
kaddr = kmap_atomic(page);
|
||||
if (ret)
|
||||
goto zeroit;
|
||||
|
||||
|
@ -1882,7 +1882,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
|
|||
if (csum != private)
|
||||
goto zeroit;
|
||||
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
kunmap_atomic(kaddr);
|
||||
good:
|
||||
return 0;
|
||||
|
||||
|
@ -1894,7 +1894,7 @@ zeroit:
|
|||
(unsigned long long)private);
|
||||
memset(kaddr + offset, 1, end - start + 1);
|
||||
flush_dcache_page(page);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
kunmap_atomic(kaddr);
|
||||
if (private == 0)
|
||||
return 0;
|
||||
return -EIO;
|
||||
|
@ -4937,12 +4937,12 @@ static noinline int uncompress_inline(struct btrfs_path *path,
|
|||
ret = btrfs_decompress(compress_type, tmp, page,
|
||||
extent_offset, inline_size, max_size);
|
||||
if (ret) {
|
||||
char *kaddr = kmap_atomic(page, KM_USER0);
|
||||
char *kaddr = kmap_atomic(page);
|
||||
unsigned long copy_size = min_t(u64,
|
||||
PAGE_CACHE_SIZE - pg_offset,
|
||||
max_size - extent_offset);
|
||||
memset(kaddr + pg_offset, 0, copy_size);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
kunmap_atomic(kaddr);
|
||||
}
|
||||
kfree(tmp);
|
||||
return 0;
|
||||
|
@ -5719,11 +5719,11 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
|
|||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
kaddr = kmap_atomic(page, KM_IRQ0);
|
||||
kaddr = kmap_atomic(page);
|
||||
csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
|
||||
csum, bvec->bv_len);
|
||||
btrfs_csum_final(csum, (char *)&csum);
|
||||
kunmap_atomic(kaddr, KM_IRQ0);
|
||||
kunmap_atomic(kaddr);
|
||||
local_irq_restore(flags);
|
||||
|
||||
flush_dcache_page(bvec->bv_page);
|
||||
|
|
|
@ -411,9 +411,9 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
|
|||
|
||||
bytes = min_t(unsigned long, destlen, out_len - start_byte);
|
||||
|
||||
kaddr = kmap_atomic(dest_page, KM_USER0);
|
||||
kaddr = kmap_atomic(dest_page);
|
||||
memcpy(kaddr, workspace->buf + start_byte, bytes);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
kunmap_atomic(kaddr);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -591,7 +591,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
|
|||
u64 flags = sbio->spag[ix].flags;
|
||||
|
||||
page = sbio->bio->bi_io_vec[ix].bv_page;
|
||||
buffer = kmap_atomic(page, KM_USER0);
|
||||
buffer = kmap_atomic(page);
|
||||
if (flags & BTRFS_EXTENT_FLAG_DATA) {
|
||||
ret = scrub_checksum_data(sbio->sdev,
|
||||
sbio->spag + ix, buffer);
|
||||
|
@ -603,7 +603,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
|
|||
} else {
|
||||
WARN_ON(1);
|
||||
}
|
||||
kunmap_atomic(buffer, KM_USER0);
|
||||
kunmap_atomic(buffer);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -792,7 +792,7 @@ static void scrub_checksum(struct btrfs_work *work)
|
|||
}
|
||||
for (i = 0; i < sbio->count; ++i) {
|
||||
page = sbio->bio->bi_io_vec[i].bv_page;
|
||||
buffer = kmap_atomic(page, KM_USER0);
|
||||
buffer = kmap_atomic(page);
|
||||
flags = sbio->spag[i].flags;
|
||||
logical = sbio->logical + i * PAGE_SIZE;
|
||||
ret = 0;
|
||||
|
@ -807,7 +807,7 @@ static void scrub_checksum(struct btrfs_work *work)
|
|||
} else {
|
||||
WARN_ON(1);
|
||||
}
|
||||
kunmap_atomic(buffer, KM_USER0);
|
||||
kunmap_atomic(buffer);
|
||||
if (ret) {
|
||||
ret = scrub_recheck_error(sbio, i);
|
||||
if (!ret) {
|
||||
|
|
|
@ -370,9 +370,9 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
|
|||
PAGE_CACHE_SIZE - buf_offset);
|
||||
bytes = min(bytes, bytes_left);
|
||||
|
||||
kaddr = kmap_atomic(dest_page, KM_USER0);
|
||||
kaddr = kmap_atomic(dest_page);
|
||||
memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
kunmap_atomic(kaddr);
|
||||
|
||||
pg_offset += bytes;
|
||||
bytes_left -= bytes;
|
||||
|
|
Загрузка…
Ссылка в новой задаче