2019-07-22 19:26:22 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
2022-03-24 04:29:04 +03:00
|
|
|
* Data verification functions, i.e. hooks for ->readahead()
|
2019-07-22 19:26:22 +03:00
|
|
|
*
|
|
|
|
* Copyright 2019 Google LLC
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "fsverity_private.h"
|
|
|
|
|
|
|
|
#include <crypto/hash.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
|
|
|
|
static struct workqueue_struct *fsverity_read_workqueue;
|
|
|
|
|
|
|
|
static inline int cmp_hashes(const struct fsverity_info *vi,
|
|
|
|
const u8 *want_hash, const u8 *real_hash,
|
2022-12-23 23:36:33 +03:00
|
|
|
u64 data_pos, int level)
|
2019-07-22 19:26:22 +03:00
|
|
|
{
|
|
|
|
const unsigned int hsize = vi->tree_params.digest_size;
|
|
|
|
|
|
|
|
if (memcmp(want_hash, real_hash, hsize) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fsverity_err(vi->inode,
|
2022-12-23 23:36:33 +03:00
|
|
|
"FILE CORRUPTED! pos=%llu, level=%d, want_hash=%s:%*phN, real_hash=%s:%*phN",
|
|
|
|
data_pos, level,
|
2019-07-22 19:26:22 +03:00
|
|
|
vi->tree_params.hash_alg->name, hsize, want_hash,
|
|
|
|
vi->tree_params.hash_alg->name, hsize, real_hash);
|
|
|
|
return -EBADMSG;
|
|
|
|
}
|
|
|
|
|
2022-12-23 23:36:33 +03:00
|
|
|
static bool data_is_zeroed(struct inode *inode, struct page *page,
|
|
|
|
unsigned int len, unsigned int offset)
|
|
|
|
{
|
|
|
|
void *virt = kmap_local_page(page);
|
|
|
|
|
|
|
|
if (memchr_inv(virt + offset, 0, len)) {
|
|
|
|
kunmap_local(virt);
|
|
|
|
fsverity_err(inode,
|
|
|
|
"FILE CORRUPTED! Data past EOF is not zeroed");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
kunmap_local(virt);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns true if the hash block with index @hblock_idx in the tree, located in
|
|
|
|
* @hpage, has already been verified.
|
|
|
|
*/
|
|
|
|
static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
|
|
|
|
unsigned long hblock_idx)
|
|
|
|
{
|
|
|
|
bool verified;
|
|
|
|
unsigned int blocks_per_page;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When the Merkle tree block size and page size are the same, then the
|
|
|
|
* ->hash_block_verified bitmap isn't allocated, and we use PG_checked
|
|
|
|
* to directly indicate whether the page's block has been verified.
|
|
|
|
*
|
|
|
|
* Using PG_checked also guarantees that we re-verify hash pages that
|
|
|
|
* get evicted and re-instantiated from the backing storage, as new
|
|
|
|
* pages always start out with PG_checked cleared.
|
|
|
|
*/
|
|
|
|
if (!vi->hash_block_verified)
|
|
|
|
return PageChecked(hpage);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When the Merkle tree block size and page size differ, we use a bitmap
|
|
|
|
* to indicate whether each hash block has been verified.
|
|
|
|
*
|
|
|
|
* However, we still need to ensure that hash pages that get evicted and
|
|
|
|
* re-instantiated from the backing storage are re-verified. To do
|
|
|
|
* this, we use PG_checked again, but now it doesn't really mean
|
|
|
|
* "checked". Instead, now it just serves as an indicator for whether
|
|
|
|
* the hash page is newly instantiated or not.
|
|
|
|
*
|
|
|
|
* The first thread that sees PG_checked=0 must clear the corresponding
|
|
|
|
* bitmap bits, then set PG_checked=1. This requires a spinlock. To
|
|
|
|
* avoid having to take this spinlock in the common case of
|
|
|
|
* PG_checked=1, we start with an opportunistic lockless read.
|
|
|
|
*/
|
|
|
|
if (PageChecked(hpage)) {
|
|
|
|
/*
|
|
|
|
* A read memory barrier is needed here to give ACQUIRE
|
|
|
|
* semantics to the above PageChecked() test.
|
|
|
|
*/
|
|
|
|
smp_rmb();
|
|
|
|
return test_bit(hblock_idx, vi->hash_block_verified);
|
|
|
|
}
|
|
|
|
spin_lock(&vi->hash_page_init_lock);
|
|
|
|
if (PageChecked(hpage)) {
|
|
|
|
verified = test_bit(hblock_idx, vi->hash_block_verified);
|
|
|
|
} else {
|
|
|
|
blocks_per_page = vi->tree_params.blocks_per_page;
|
|
|
|
hblock_idx = round_down(hblock_idx, blocks_per_page);
|
|
|
|
for (i = 0; i < blocks_per_page; i++)
|
|
|
|
clear_bit(hblock_idx + i, vi->hash_block_verified);
|
|
|
|
/*
|
|
|
|
* A write memory barrier is needed here to give RELEASE
|
|
|
|
* semantics to the below SetPageChecked() operation.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
SetPageChecked(hpage);
|
|
|
|
verified = false;
|
|
|
|
}
|
|
|
|
spin_unlock(&vi->hash_page_init_lock);
|
|
|
|
return verified;
|
|
|
|
}
|
|
|
|
|
2019-07-22 19:26:22 +03:00
|
|
|
/*
|
2022-12-23 23:36:33 +03:00
|
|
|
* Verify a single data block against the file's Merkle tree.
|
2019-07-22 19:26:22 +03:00
|
|
|
*
|
|
|
|
* In principle, we need to verify the entire path to the root node. However,
|
2022-12-23 23:36:33 +03:00
|
|
|
* for efficiency the filesystem may cache the hash blocks. Therefore we need
|
|
|
|
* only ascend the tree until an already-verified hash block is seen, and then
|
|
|
|
* verify the path to that block.
|
2019-07-22 19:26:22 +03:00
|
|
|
*
|
2022-12-23 23:36:33 +03:00
|
|
|
* Return: %true if the data block is valid, else %false.
|
2019-07-22 19:26:22 +03:00
|
|
|
*/
|
2022-12-23 23:36:33 +03:00
|
|
|
static bool
|
|
|
|
verify_data_block(struct inode *inode, struct fsverity_info *vi,
|
|
|
|
struct ahash_request *req, struct page *data_page,
|
|
|
|
u64 data_pos, unsigned int dblock_offset_in_page,
|
|
|
|
unsigned long max_ra_pages)
|
2019-07-22 19:26:22 +03:00
|
|
|
{
|
|
|
|
const struct merkle_tree_params *params = &vi->tree_params;
|
|
|
|
const unsigned int hsize = params->digest_size;
|
|
|
|
int level;
|
|
|
|
u8 _want_hash[FS_VERITY_MAX_DIGEST_SIZE];
|
|
|
|
const u8 *want_hash;
|
|
|
|
u8 real_hash[FS_VERITY_MAX_DIGEST_SIZE];
|
2022-12-23 23:36:33 +03:00
|
|
|
/* The hash blocks that are traversed, indexed by level */
|
|
|
|
struct {
|
|
|
|
/* Page containing the hash block */
|
|
|
|
struct page *page;
|
|
|
|
/* Index of the hash block in the tree overall */
|
|
|
|
unsigned long index;
|
|
|
|
/* Byte offset of the hash block within @page */
|
|
|
|
unsigned int offset_in_page;
|
|
|
|
/* Byte offset of the wanted hash within @page */
|
|
|
|
unsigned int hoffset;
|
|
|
|
} hblocks[FS_VERITY_MAX_LEVELS];
|
|
|
|
/*
|
|
|
|
* The index of the previous level's block within that level; also the
|
|
|
|
* index of that block's hash within the current level.
|
|
|
|
*/
|
|
|
|
u64 hidx = data_pos >> params->log_blocksize;
|
2019-07-22 19:26:22 +03:00
|
|
|
int err;
|
|
|
|
|
2022-12-23 23:36:33 +03:00
|
|
|
if (unlikely(data_pos >= inode->i_size)) {
|
|
|
|
/*
|
|
|
|
* This can happen in the data page spanning EOF when the Merkle
|
|
|
|
* tree block size is less than the page size. The Merkle tree
|
|
|
|
* doesn't cover data blocks fully past EOF. But the entire
|
|
|
|
* page spanning EOF can be visible to userspace via a mmap, and
|
|
|
|
* any part past EOF should be all zeroes. Therefore, we need
|
|
|
|
* to verify that any data blocks fully past EOF are all zeroes.
|
|
|
|
*/
|
|
|
|
return data_is_zeroed(inode, data_page, params->block_size,
|
|
|
|
dblock_offset_in_page);
|
|
|
|
}
|
2019-07-22 19:26:22 +03:00
|
|
|
|
|
|
|
/*
|
2022-12-23 23:36:33 +03:00
|
|
|
* Starting at the leaf level, ascend the tree saving hash blocks along
|
|
|
|
* the way until we find a hash block that has already been verified, or
|
|
|
|
* until we reach the root.
|
2019-07-22 19:26:22 +03:00
|
|
|
*/
|
|
|
|
for (level = 0; level < params->num_levels; level++) {
|
2022-12-23 23:36:33 +03:00
|
|
|
unsigned long next_hidx;
|
|
|
|
unsigned long hblock_idx;
|
|
|
|
pgoff_t hpage_idx;
|
|
|
|
unsigned int hblock_offset_in_page;
|
2019-07-22 19:26:22 +03:00
|
|
|
unsigned int hoffset;
|
|
|
|
struct page *hpage;
|
|
|
|
|
2022-12-23 23:36:33 +03:00
|
|
|
/*
|
|
|
|
* The index of the block in the current level; also the index
|
|
|
|
* of that block's hash within the next level.
|
|
|
|
*/
|
|
|
|
next_hidx = hidx >> params->log_arity;
|
|
|
|
|
|
|
|
/* Index of the hash block in the tree overall */
|
|
|
|
hblock_idx = params->level_start[level] + next_hidx;
|
|
|
|
|
|
|
|
/* Index of the hash page in the tree overall */
|
|
|
|
hpage_idx = hblock_idx >> params->log_blocks_per_page;
|
|
|
|
|
|
|
|
/* Byte offset of the hash block within the page */
|
|
|
|
hblock_offset_in_page =
|
|
|
|
(hblock_idx << params->log_blocksize) & ~PAGE_MASK;
|
|
|
|
|
|
|
|
/* Byte offset of the hash within the page */
|
|
|
|
hoffset = hblock_offset_in_page +
|
|
|
|
((hidx << params->log_digestsize) &
|
|
|
|
(params->block_size - 1));
|
2019-07-22 19:26:22 +03:00
|
|
|
|
2022-12-23 23:36:33 +03:00
|
|
|
hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode,
|
|
|
|
hpage_idx, level == 0 ? min(max_ra_pages,
|
|
|
|
params->tree_pages - hpage_idx) : 0);
|
2019-07-22 19:26:22 +03:00
|
|
|
if (IS_ERR(hpage)) {
|
|
|
|
err = PTR_ERR(hpage);
|
|
|
|
fsverity_err(inode,
|
|
|
|
"Error %d reading Merkle tree page %lu",
|
2022-12-23 23:36:33 +03:00
|
|
|
err, hpage_idx);
|
2019-07-22 19:26:22 +03:00
|
|
|
goto out;
|
|
|
|
}
|
2022-12-23 23:36:33 +03:00
|
|
|
if (is_hash_block_verified(vi, hpage, hblock_idx)) {
|
2022-08-19 01:39:03 +03:00
|
|
|
memcpy_from_page(_want_hash, hpage, hoffset, hsize);
|
2019-07-22 19:26:22 +03:00
|
|
|
want_hash = _want_hash;
|
|
|
|
put_page(hpage);
|
|
|
|
goto descend;
|
|
|
|
}
|
2022-12-23 23:36:33 +03:00
|
|
|
hblocks[level].page = hpage;
|
|
|
|
hblocks[level].index = hblock_idx;
|
|
|
|
hblocks[level].offset_in_page = hblock_offset_in_page;
|
|
|
|
hblocks[level].hoffset = hoffset;
|
|
|
|
hidx = next_hidx;
|
2019-07-22 19:26:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
want_hash = vi->root_hash;
|
|
|
|
descend:
|
2022-12-23 23:36:32 +03:00
|
|
|
/* Descend the tree verifying hash blocks. */
|
2019-07-22 19:26:22 +03:00
|
|
|
for (; level > 0; level--) {
|
2022-12-23 23:36:33 +03:00
|
|
|
struct page *hpage = hblocks[level - 1].page;
|
|
|
|
unsigned long hblock_idx = hblocks[level - 1].index;
|
|
|
|
unsigned int hblock_offset_in_page =
|
|
|
|
hblocks[level - 1].offset_in_page;
|
|
|
|
unsigned int hoffset = hblocks[level - 1].hoffset;
|
2019-07-22 19:26:22 +03:00
|
|
|
|
2022-12-23 23:36:33 +03:00
|
|
|
err = fsverity_hash_block(params, inode, req, hpage,
|
|
|
|
hblock_offset_in_page, real_hash);
|
2019-07-22 19:26:22 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2022-12-23 23:36:33 +03:00
|
|
|
err = cmp_hashes(vi, want_hash, real_hash, data_pos, level - 1);
|
2019-07-22 19:26:22 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2022-12-23 23:36:33 +03:00
|
|
|
/*
|
|
|
|
* Mark the hash block as verified. This must be atomic and
|
|
|
|
* idempotent, as the same hash block might be verified by
|
|
|
|
* multiple threads concurrently.
|
|
|
|
*/
|
|
|
|
if (vi->hash_block_verified)
|
|
|
|
set_bit(hblock_idx, vi->hash_block_verified);
|
|
|
|
else
|
|
|
|
SetPageChecked(hpage);
|
2022-08-19 01:39:03 +03:00
|
|
|
memcpy_from_page(_want_hash, hpage, hoffset, hsize);
|
2019-07-22 19:26:22 +03:00
|
|
|
want_hash = _want_hash;
|
|
|
|
put_page(hpage);
|
|
|
|
}
|
|
|
|
|
2022-12-23 23:36:32 +03:00
|
|
|
/* Finally, verify the data block. */
|
2022-12-23 23:36:33 +03:00
|
|
|
err = fsverity_hash_block(params, inode, req, data_page,
|
|
|
|
dblock_offset_in_page, real_hash);
|
2019-07-22 19:26:22 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2022-12-23 23:36:33 +03:00
|
|
|
err = cmp_hashes(vi, want_hash, real_hash, data_pos, -1);
|
2019-07-22 19:26:22 +03:00
|
|
|
out:
|
|
|
|
for (; level > 0; level--)
|
2022-12-23 23:36:33 +03:00
|
|
|
put_page(hblocks[level - 1].page);
|
2019-07-22 19:26:22 +03:00
|
|
|
|
|
|
|
return err == 0;
|
|
|
|
}
|
|
|
|
|
2022-12-23 23:36:33 +03:00
|
|
|
static bool
|
|
|
|
verify_data_blocks(struct inode *inode, struct fsverity_info *vi,
|
2023-01-28 01:15:29 +03:00
|
|
|
struct ahash_request *req, struct folio *data_folio,
|
|
|
|
size_t len, size_t offset, unsigned long max_ra_pages)
|
2022-12-23 23:36:33 +03:00
|
|
|
{
|
|
|
|
const unsigned int block_size = vi->tree_params.block_size;
|
2023-01-28 01:15:29 +03:00
|
|
|
u64 pos = (u64)data_folio->index << PAGE_SHIFT;
|
2022-12-23 23:36:33 +03:00
|
|
|
|
|
|
|
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offset, block_size)))
|
|
|
|
return false;
|
2023-01-28 01:15:29 +03:00
|
|
|
if (WARN_ON_ONCE(!folio_test_locked(data_folio) ||
|
|
|
|
folio_test_uptodate(data_folio)))
|
2022-12-23 23:36:33 +03:00
|
|
|
return false;
|
|
|
|
do {
|
2023-01-28 01:15:29 +03:00
|
|
|
struct page *data_page =
|
|
|
|
folio_page(data_folio, offset >> PAGE_SHIFT);
|
|
|
|
|
|
|
|
if (!verify_data_block(inode, vi, req, data_page, pos + offset,
|
|
|
|
offset & ~PAGE_MASK, max_ra_pages))
|
2022-12-23 23:36:33 +03:00
|
|
|
return false;
|
|
|
|
offset += block_size;
|
|
|
|
len -= block_size;
|
|
|
|
} while (len);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-22 19:26:22 +03:00
|
|
|
/**
|
2023-01-28 01:15:29 +03:00
|
|
|
* fsverity_verify_blocks() - verify data in a folio
|
|
|
|
* @folio: the folio containing the data to verify
|
|
|
|
* @len: the length of the data to verify in the folio
|
|
|
|
* @offset: the offset of the data to verify in the folio
|
2019-07-22 19:26:22 +03:00
|
|
|
*
|
2022-12-23 23:36:33 +03:00
|
|
|
* Verify data that has just been read from a verity file. The data must be
|
2023-01-28 01:15:29 +03:00
|
|
|
* located in a pagecache folio that is still locked and not yet uptodate. The
|
2022-12-23 23:36:33 +03:00
|
|
|
* length and offset of the data must be Merkle tree block size aligned.
|
2019-07-22 19:26:22 +03:00
|
|
|
*
|
2022-12-23 23:36:33 +03:00
|
|
|
* Return: %true if the data is valid, else %false.
|
2019-07-22 19:26:22 +03:00
|
|
|
*/
|
2023-01-28 01:15:29 +03:00
|
|
|
bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset)
|
2019-07-22 19:26:22 +03:00
|
|
|
{
|
2023-01-28 01:15:29 +03:00
|
|
|
struct inode *inode = folio->mapping->host;
|
2022-12-23 23:36:33 +03:00
|
|
|
struct fsverity_info *vi = inode->i_verity_info;
|
2019-07-22 19:26:22 +03:00
|
|
|
struct ahash_request *req;
|
|
|
|
bool valid;
|
|
|
|
|
2019-12-31 20:55:45 +03:00
|
|
|
/* This allocation never fails, since it's mempool-backed. */
|
|
|
|
req = fsverity_alloc_hash_request(vi->tree_params.hash_alg, GFP_NOFS);
|
2019-07-22 19:26:22 +03:00
|
|
|
|
2023-01-28 01:15:29 +03:00
|
|
|
valid = verify_data_blocks(inode, vi, req, folio, len, offset, 0);
|
2019-07-22 19:26:22 +03:00
|
|
|
|
2019-12-31 20:55:45 +03:00
|
|
|
fsverity_free_hash_request(vi->tree_params.hash_alg, req);
|
2019-07-22 19:26:22 +03:00
|
|
|
|
|
|
|
return valid;
|
|
|
|
}
|
2022-12-23 23:36:33 +03:00
|
|
|
EXPORT_SYMBOL_GPL(fsverity_verify_blocks);
|
2019-07-22 19:26:22 +03:00
|
|
|
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
/**
|
|
|
|
* fsverity_verify_bio() - verify a 'read' bio that has just completed
|
2020-05-11 22:21:17 +03:00
|
|
|
* @bio: the bio to verify
|
2019-07-22 19:26:22 +03:00
|
|
|
*
|
2022-12-23 23:36:33 +03:00
|
|
|
* Verify the bio's data against the file's Merkle tree. All bio data segments
|
|
|
|
* must be aligned to the file's Merkle tree block size. If any data fails
|
|
|
|
* verification, then bio->bi_status is set to an error status.
|
2019-07-22 19:26:22 +03:00
|
|
|
*
|
2022-03-24 04:29:04 +03:00
|
|
|
* This is a helper function for use by the ->readahead() method of filesystems
|
2019-07-22 19:26:22 +03:00
|
|
|
* that issue bios to read data directly into the page cache. Filesystems that
|
|
|
|
* populate the page cache without issuing bios (e.g. non block-based
|
|
|
|
* filesystems) must instead call fsverity_verify_page() directly on each page.
|
|
|
|
* All filesystems must also call fsverity_verify_page() on holes.
|
|
|
|
*/
|
|
|
|
void fsverity_verify_bio(struct bio *bio)
|
|
|
|
{
|
|
|
|
struct inode *inode = bio_first_page_all(bio)->mapping->host;
|
2022-12-23 23:36:33 +03:00
|
|
|
struct fsverity_info *vi = inode->i_verity_info;
|
2019-07-22 19:26:22 +03:00
|
|
|
struct ahash_request *req;
|
2023-01-28 01:15:29 +03:00
|
|
|
struct folio_iter fi;
|
fs-verity: implement readahead of Merkle tree pages
When fs-verity verifies data pages, currently it reads each Merkle tree
page synchronously using read_mapping_page().
Therefore, when the Merkle tree pages aren't already cached, fs-verity
causes an extra 4 KiB I/O request for every 512 KiB of data (assuming
that the Merkle tree uses SHA-256 and 4 KiB blocks). This results in
more I/O requests and performance loss than is strictly necessary.
Therefore, implement readahead of the Merkle tree pages.
For simplicity, we take advantage of the fact that the kernel already
does readahead of the file's *data*, just like it does for any other
file. Due to this, we don't really need a separate readahead state
(struct file_ra_state) just for the Merkle tree, but rather we just need
to piggy-back on the existing data readahead requests.
We also only really need to bother with the first level of the Merkle
tree, since the usual fan-out factor is 128, so normally over 99% of
Merkle tree I/O requests are for the first level.
Therefore, make fsverity_verify_bio() enable readahead of the first
Merkle tree level, for up to 1/4 the number of pages in the bio, when it
sees that the REQ_RAHEAD flag is set on the bio. The readahead size is
then passed down to ->read_merkle_tree_page() for the filesystem to
(optionally) implement if it sees that the requested page is uncached.
While we're at it, also make build_merkle_tree_level() set the Merkle
tree readahead size, since it's easy to do there.
However, for now don't set the readahead size in fsverity_verify_page(),
since currently it's only used to verify holes on ext4 and f2fs, and it
would need parameters added to know how much to read ahead.
This patch significantly improves fs-verity sequential read performance.
Some quick benchmarks with 'cat'-ing a 250MB file after dropping caches:
On an ARM64 phone (using sha256-ce):
Before: 217 MB/s
After: 263 MB/s
(compare to sha256sum of non-verity file: 357 MB/s)
In an x86_64 VM (using sha256-avx2):
Before: 173 MB/s
After: 215 MB/s
(compare to sha256sum of non-verity file: 223 MB/s)
Link: https://lore.kernel.org/r/20200106205533.137005-1-ebiggers@kernel.org
Reviewed-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2020-01-06 23:55:33 +03:00
|
|
|
unsigned long max_ra_pages = 0;
|
2019-07-22 19:26:22 +03:00
|
|
|
|
2019-12-31 20:55:45 +03:00
|
|
|
/* This allocation never fails, since it's mempool-backed. */
|
2022-12-23 23:36:29 +03:00
|
|
|
req = fsverity_alloc_hash_request(vi->tree_params.hash_alg, GFP_NOFS);
|
2019-07-22 19:26:22 +03:00
|
|
|
|
fs-verity: implement readahead of Merkle tree pages
When fs-verity verifies data pages, currently it reads each Merkle tree
page synchronously using read_mapping_page().
Therefore, when the Merkle tree pages aren't already cached, fs-verity
causes an extra 4 KiB I/O request for every 512 KiB of data (assuming
that the Merkle tree uses SHA-256 and 4 KiB blocks). This results in
more I/O requests and performance loss than is strictly necessary.
Therefore, implement readahead of the Merkle tree pages.
For simplicity, we take advantage of the fact that the kernel already
does readahead of the file's *data*, just like it does for any other
file. Due to this, we don't really need a separate readahead state
(struct file_ra_state) just for the Merkle tree, but rather we just need
to piggy-back on the existing data readahead requests.
We also only really need to bother with the first level of the Merkle
tree, since the usual fan-out factor is 128, so normally over 99% of
Merkle tree I/O requests are for the first level.
Therefore, make fsverity_verify_bio() enable readahead of the first
Merkle tree level, for up to 1/4 the number of pages in the bio, when it
sees that the REQ_RAHEAD flag is set on the bio. The readahead size is
then passed down to ->read_merkle_tree_page() for the filesystem to
(optionally) implement if it sees that the requested page is uncached.
While we're at it, also make build_merkle_tree_level() set the Merkle
tree readahead size, since it's easy to do there.
However, for now don't set the readahead size in fsverity_verify_page(),
since currently it's only used to verify holes on ext4 and f2fs, and it
would need parameters added to know how much to read ahead.
This patch significantly improves fs-verity sequential read performance.
Some quick benchmarks with 'cat'-ing a 250MB file after dropping caches:
On an ARM64 phone (using sha256-ce):
Before: 217 MB/s
After: 263 MB/s
(compare to sha256sum of non-verity file: 357 MB/s)
In an x86_64 VM (using sha256-avx2):
Before: 173 MB/s
After: 215 MB/s
(compare to sha256sum of non-verity file: 223 MB/s)
Link: https://lore.kernel.org/r/20200106205533.137005-1-ebiggers@kernel.org
Reviewed-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2020-01-06 23:55:33 +03:00
|
|
|
if (bio->bi_opf & REQ_RAHEAD) {
|
|
|
|
/*
|
|
|
|
* If this bio is for data readahead, then we also do readahead
|
|
|
|
* of the first (largest) level of the Merkle tree. Namely,
|
|
|
|
* when a Merkle tree page is read, we also try to piggy-back on
|
|
|
|
* some additional pages -- up to 1/4 the number of data pages.
|
|
|
|
*
|
|
|
|
* This improves sequential read performance, as it greatly
|
|
|
|
* reduces the number of I/O requests made to the Merkle tree.
|
|
|
|
*/
|
2022-12-23 23:36:29 +03:00
|
|
|
max_ra_pages = bio->bi_iter.bi_size >> (PAGE_SHIFT + 2);
|
fs-verity: implement readahead of Merkle tree pages
When fs-verity verifies data pages, currently it reads each Merkle tree
page synchronously using read_mapping_page().
Therefore, when the Merkle tree pages aren't already cached, fs-verity
causes an extra 4 KiB I/O request for every 512 KiB of data (assuming
that the Merkle tree uses SHA-256 and 4 KiB blocks). This results in
more I/O requests and performance loss than is strictly necessary.
Therefore, implement readahead of the Merkle tree pages.
For simplicity, we take advantage of the fact that the kernel already
does readahead of the file's *data*, just like it does for any other
file. Due to this, we don't really need a separate readahead state
(struct file_ra_state) just for the Merkle tree, but rather we just need
to piggy-back on the existing data readahead requests.
We also only really need to bother with the first level of the Merkle
tree, since the usual fan-out factor is 128, so normally over 99% of
Merkle tree I/O requests are for the first level.
Therefore, make fsverity_verify_bio() enable readahead of the first
Merkle tree level, for up to 1/4 the number of pages in the bio, when it
sees that the REQ_RAHEAD flag is set on the bio. The readahead size is
then passed down to ->read_merkle_tree_page() for the filesystem to
(optionally) implement if it sees that the requested page is uncached.
While we're at it, also make build_merkle_tree_level() set the Merkle
tree readahead size, since it's easy to do there.
However, for now don't set the readahead size in fsverity_verify_page(),
since currently it's only used to verify holes on ext4 and f2fs, and it
would need parameters added to know how much to read ahead.
This patch significantly improves fs-verity sequential read performance.
Some quick benchmarks with 'cat'-ing a 250MB file after dropping caches:
On an ARM64 phone (using sha256-ce):
Before: 217 MB/s
After: 263 MB/s
(compare to sha256sum of non-verity file: 357 MB/s)
In an x86_64 VM (using sha256-avx2):
Before: 173 MB/s
After: 215 MB/s
(compare to sha256sum of non-verity file: 223 MB/s)
Link: https://lore.kernel.org/r/20200106205533.137005-1-ebiggers@kernel.org
Reviewed-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2020-01-06 23:55:33 +03:00
|
|
|
}
|
|
|
|
|
2023-01-28 01:15:29 +03:00
|
|
|
bio_for_each_folio_all(fi, bio) {
|
|
|
|
if (!verify_data_blocks(inode, vi, req, fi.folio, fi.length,
|
|
|
|
fi.offset, max_ra_pages)) {
|
2022-11-29 10:04:01 +03:00
|
|
|
bio->bi_status = BLK_STS_IOERR;
|
|
|
|
break;
|
|
|
|
}
|
2019-07-22 19:26:22 +03:00
|
|
|
}
|
|
|
|
|
2022-12-23 23:36:29 +03:00
|
|
|
fsverity_free_hash_request(vi->tree_params.hash_alg, req);
|
2019-07-22 19:26:22 +03:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fsverity_verify_bio);
|
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue
|
2020-05-11 22:21:17 +03:00
|
|
|
* @work: the work to enqueue
|
2019-07-22 19:26:22 +03:00
|
|
|
*
|
|
|
|
* Enqueue verification work for asynchronous processing.
|
|
|
|
*/
|
|
|
|
void fsverity_enqueue_verify_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
queue_work(fsverity_read_workqueue, work);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
|
|
|
|
|
|
|
|
int __init fsverity_init_workqueue(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Use an unbound workqueue to allow bios to be verified in parallel
|
|
|
|
* even when they happen to complete on the same CPU. This sacrifices
|
|
|
|
* locality, but it's worthwhile since hashing is CPU-intensive.
|
|
|
|
*
|
|
|
|
* Also use a high-priority workqueue to prioritize verification work,
|
|
|
|
* which blocks reads from completing, over regular application tasks.
|
|
|
|
*/
|
|
|
|
fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
|
|
|
|
WQ_UNBOUND | WQ_HIGHPRI,
|
|
|
|
num_online_cpus());
|
|
|
|
if (!fsverity_read_workqueue)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
2019-07-22 19:26:23 +03:00
|
|
|
|
|
|
|
void __init fsverity_exit_workqueue(void)
|
|
|
|
{
|
|
|
|
destroy_workqueue(fsverity_read_workqueue);
|
|
|
|
fsverity_read_workqueue = NULL;
|
|
|
|
}
|