Revert "CIFS: Fix write after setting a read lock for read oplock files"

that solution has data races and can end up two identical writes to the
server: when clientCanCacheAll value can be changed during the execution
of __generic_file_aio_write.

Signed-off-by: Pavel Shilovsky <piastry@etersoft.ru>
Reviewed-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Steve French <smfrench@gmail.com>
This commit is contained in:
Pavel Shilovsky 2012-12-21 15:05:47 +04:00 коммит произвёл Steve French
Родитель 31efee60f4
Коммит ca8aa29c60
3 изменённых файлов: 35 добавлений и 69 удалений

Просмотреть файл

@ -228,7 +228,6 @@ cifs_alloc_inode(struct super_block *sb)
cifs_set_oplock_level(cifs_inode, 0); cifs_set_oplock_level(cifs_inode, 0);
cifs_inode->delete_pending = false; cifs_inode->delete_pending = false;
cifs_inode->invalid_mapping = false; cifs_inode->invalid_mapping = false;
cifs_inode->leave_pages_clean = false;
cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
cifs_inode->server_eof = 0; cifs_inode->server_eof = 0;
cifs_inode->uniqueid = 0; cifs_inode->uniqueid = 0;

Просмотреть файл

@ -1030,7 +1030,6 @@ struct cifsInodeInfo {
bool clientCanCacheAll; /* read and writebehind oplock */ bool clientCanCacheAll; /* read and writebehind oplock */
bool delete_pending; /* DELETE_ON_CLOSE is set */ bool delete_pending; /* DELETE_ON_CLOSE is set */
bool invalid_mapping; /* pagecache is invalid */ bool invalid_mapping; /* pagecache is invalid */
bool leave_pages_clean; /* protected by i_mutex, not set pages dirty */
unsigned long time; /* jiffies of last update of inode */ unsigned long time; /* jiffies of last update of inode */
u64 server_eof; /* current file size on server -- protected by i_lock */ u64 server_eof; /* current file size on server -- protected by i_lock */
u64 uniqueid; /* server inode number */ u64 uniqueid; /* server inode number */

Просмотреть файл

@ -2103,14 +2103,6 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
} else { } else {
rc = copied; rc = copied;
pos += copied; pos += copied;
/*
* When we use strict cache mode and cifs_strict_writev was run
* with level II oplock (indicated by leave_pages_clean field of
* CIFS_I(inode)), we can leave pages clean - cifs_strict_writev
* sent the data to the server itself.
*/
if (!CIFS_I(inode)->leave_pages_clean ||
!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO))
set_page_dirty(page); set_page_dirty(page);
} }
@ -2462,8 +2454,8 @@ ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
} }
static ssize_t static ssize_t
cifs_pagecache_writev(struct kiocb *iocb, const struct iovec *iov, cifs_writev(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos, bool cache_ex) unsigned long nr_segs, loff_t pos)
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
@ -2485,12 +2477,8 @@ cifs_pagecache_writev(struct kiocb *iocb, const struct iovec *iov,
server->vals->exclusive_lock_type, NULL, server->vals->exclusive_lock_type, NULL,
CIFS_WRITE_OP)) { CIFS_WRITE_OP)) {
mutex_lock(&inode->i_mutex); mutex_lock(&inode->i_mutex);
if (!cache_ex)
cinode->leave_pages_clean = true;
rc = __generic_file_aio_write(iocb, iov, nr_segs, rc = __generic_file_aio_write(iocb, iov, nr_segs,
&iocb->ki_pos); &iocb->ki_pos);
if (!cache_ex)
cinode->leave_pages_clean = false;
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
} }
@ -2517,62 +2505,42 @@ cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
struct cifsFileInfo *cfile = (struct cifsFileInfo *) struct cifsFileInfo *cfile = (struct cifsFileInfo *)
iocb->ki_filp->private_data; iocb->ki_filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
ssize_t written, written2;
#ifdef CONFIG_CIFS_SMB2
/* /*
* We need to store clientCanCacheAll here to prevent race * If we have an oplock for read and want to write a data to the file
* conditions - this value can be changed during an execution * we need to store it in the page cache and then push it to the server
* of generic_file_aio_write. For CIFS it can be changed from * to be sure the next read will get a valid data.
* true to false only, but for SMB2 it can be changed both from
* true to false and vice versa. So, we can end up with a data
* stored in the cache, not marked dirty and not sent to the
* server if this value changes its state from false to true
* after cifs_write_end.
*/ */
bool cache_ex = cinode->clientCanCacheAll; if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead) {
bool cache_read = cinode->clientCanCacheRead; ssize_t written;
int rc; int rc;
loff_t saved_pos;
if (cache_ex) { written = generic_file_aio_write(iocb, iov, nr_segs, pos);
if (cap_unix(tcon->ses) && rc = filemap_fdatawrite(inode->i_mapping);
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) && if (rc)
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu( return (ssize_t)rc;
tcon->fsUnixInfo.Capability)))
return generic_file_aio_write(iocb, iov, nr_segs, pos); return written;
return cifs_pagecache_writev(iocb, iov, nr_segs, pos, cache_ex);
} }
#endif
/* /*
* For files without exclusive oplock in strict cache mode we need to * For non-oplocked files in strict cache mode we need to write the data
* write the data to the server exactly from the pos to pos+len-1 rather * to the server exactly from the pos to pos+len-1 rather than flush all
* than flush all affected pages because it may cause a error with * affected pages because it may cause a error with mandatory locks on
* mandatory locks on these pages but not on the region from pos to * these pages but not on the region from pos to ppos+len-1.
* ppos+len-1.
*/ */
written = cifs_user_writev(iocb, iov, nr_segs, pos);
if (!cache_read || written <= 0)
return written;
saved_pos = iocb->ki_pos; if (!cinode->clientCanCacheAll)
iocb->ki_pos = pos; return cifs_user_writev(iocb, iov, nr_segs, pos);
/* we have a read oplock - need to store a data in the page cache */
if (cap_unix(tcon->ses) && if (cap_unix(tcon->ses) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu( ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
tcon->fsUnixInfo.Capability))) return generic_file_aio_write(iocb, iov, nr_segs, pos);
written2 = generic_file_aio_write(iocb, iov, nr_segs, pos);
else return cifs_writev(iocb, iov, nr_segs, pos);
written2 = cifs_pagecache_writev(iocb, iov, nr_segs, pos,
cache_ex);
/* errors occured during writing - invalidate the page cache */
if (written2 < 0) {
rc = cifs_invalidate_mapping(inode);
if (rc)
written = (ssize_t)rc;
else
iocb->ki_pos = saved_pos;
}
return written;
} }
static struct cifs_readdata * static struct cifs_readdata *