CacheFiles: Don't write a full page if there's only a partial page to cache
cachefiles_write_page() writes a full page to the backing file for the last page of the netfs file, even if the netfs file's last page is only a partial page. This causes the EOF on the backing file to be extended beyond the EOF of the netfs, and thus the backing file will be truncated by cachefiles_attr_changed() called from cachefiles_lookup_object(). So we need to limit the write we make to the backing file on that last page such that it doesn't push the EOF too far. Also, if a backing file that has a partial page at the end is expanded, we discard the partial page and refetch it on the basis that we then have a hole in the file with invalid data, and should the power go out... A better way to deal with this could be to record a note that the partial page contains invalid data until the correct data is written into it. This isn't a problem for netfs's that discard the whole backing file if the file size changes (such as NFS). Signed-off-by: David Howells <dhowells@redhat.com>
This commit is contained in:
Родитель
868411be3f
Коммит
a17754fb8c
|
@ -404,12 +404,26 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
|
||||||
if (oi_size == ni_size)
|
if (oi_size == ni_size)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
newattrs.ia_size = ni_size;
|
|
||||||
newattrs.ia_valid = ATTR_SIZE;
|
|
||||||
|
|
||||||
cachefiles_begin_secure(cache, &saved_cred);
|
cachefiles_begin_secure(cache, &saved_cred);
|
||||||
mutex_lock(&object->backer->d_inode->i_mutex);
|
mutex_lock(&object->backer->d_inode->i_mutex);
|
||||||
|
|
||||||
|
/* if there's an extension to a partial page at the end of the backing
|
||||||
|
* file, we need to discard the partial page so that we pick up new
|
||||||
|
* data after it */
|
||||||
|
if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
|
||||||
|
_debug("discard tail %llx", oi_size);
|
||||||
|
newattrs.ia_valid = ATTR_SIZE;
|
||||||
|
newattrs.ia_size = oi_size & PAGE_MASK;
|
||||||
|
ret = notify_change(object->backer, &newattrs);
|
||||||
|
if (ret < 0)
|
||||||
|
goto truncate_failed;
|
||||||
|
}
|
||||||
|
|
||||||
|
newattrs.ia_valid = ATTR_SIZE;
|
||||||
|
newattrs.ia_size = ni_size;
|
||||||
ret = notify_change(object->backer, &newattrs);
|
ret = notify_change(object->backer, &newattrs);
|
||||||
|
|
||||||
|
truncate_failed:
|
||||||
mutex_unlock(&object->backer->d_inode->i_mutex);
|
mutex_unlock(&object->backer->d_inode->i_mutex);
|
||||||
cachefiles_end_secure(cache, saved_cred);
|
cachefiles_end_secure(cache, saved_cred);
|
||||||
|
|
||||||
|
|
|
@ -803,7 +803,8 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
|
||||||
struct cachefiles_cache *cache;
|
struct cachefiles_cache *cache;
|
||||||
mm_segment_t old_fs;
|
mm_segment_t old_fs;
|
||||||
struct file *file;
|
struct file *file;
|
||||||
loff_t pos;
|
loff_t pos, eof;
|
||||||
|
size_t len;
|
||||||
void *data;
|
void *data;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -837,15 +838,29 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
if (file->f_op->write) {
|
if (file->f_op->write) {
|
||||||
pos = (loff_t) page->index << PAGE_SHIFT;
|
pos = (loff_t) page->index << PAGE_SHIFT;
|
||||||
|
|
||||||
|
/* we mustn't write more data than we have, so we have
|
||||||
|
* to beware of a partial page at EOF */
|
||||||
|
eof = object->fscache.store_limit_l;
|
||||||
|
len = PAGE_SIZE;
|
||||||
|
if (eof & ~PAGE_MASK) {
|
||||||
|
ASSERTCMP(pos, <, eof);
|
||||||
|
if (eof - pos < PAGE_SIZE) {
|
||||||
|
_debug("cut short %llx to %llx",
|
||||||
|
pos, eof);
|
||||||
|
len = eof - pos;
|
||||||
|
ASSERTCMP(pos + len, ==, eof);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
data = kmap(page);
|
data = kmap(page);
|
||||||
old_fs = get_fs();
|
old_fs = get_fs();
|
||||||
set_fs(KERNEL_DS);
|
set_fs(KERNEL_DS);
|
||||||
ret = file->f_op->write(
|
ret = file->f_op->write(
|
||||||
file, (const void __user *) data, PAGE_SIZE,
|
file, (const void __user *) data, len, &pos);
|
||||||
&pos);
|
|
||||||
set_fs(old_fs);
|
set_fs(old_fs);
|
||||||
kunmap(page);
|
kunmap(page);
|
||||||
if (ret != PAGE_SIZE)
|
if (ret != len)
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
}
|
}
|
||||||
fput(file);
|
fput(file);
|
||||||
|
|
|
@ -395,6 +395,7 @@ struct fscache_object {
|
||||||
struct rb_node objlist_link; /* link in global object list */
|
struct rb_node objlist_link; /* link in global object list */
|
||||||
#endif
|
#endif
|
||||||
pgoff_t store_limit; /* current storage limit */
|
pgoff_t store_limit; /* current storage limit */
|
||||||
|
loff_t store_limit_l; /* current storage limit */
|
||||||
};
|
};
|
||||||
|
|
||||||
extern const char *fscache_object_states[];
|
extern const char *fscache_object_states[];
|
||||||
|
@ -439,6 +440,7 @@ void fscache_object_init(struct fscache_object *object,
|
||||||
object->events = object->event_mask = 0;
|
object->events = object->event_mask = 0;
|
||||||
object->flags = 0;
|
object->flags = 0;
|
||||||
object->store_limit = 0;
|
object->store_limit = 0;
|
||||||
|
object->store_limit_l = 0;
|
||||||
object->cache = cache;
|
object->cache = cache;
|
||||||
object->cookie = cookie;
|
object->cookie = cookie;
|
||||||
object->parent = NULL;
|
object->parent = NULL;
|
||||||
|
@ -491,6 +493,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object)
|
||||||
static inline
|
static inline
|
||||||
void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
|
void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
|
||||||
{
|
{
|
||||||
|
object->store_limit_l = i_size;
|
||||||
object->store_limit = i_size >> PAGE_SHIFT;
|
object->store_limit = i_size >> PAGE_SHIFT;
|
||||||
if (i_size & ~PAGE_MASK)
|
if (i_size & ~PAGE_MASK)
|
||||||
object->store_limit++;
|
object->store_limit++;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче