btrfs: lower the dirty balance poll interval
Tests show that the original large intervals can easily make the dirty limit exceeded on 100 concurrent dd's. So adapt to as large as the next check point selected by the dirty throttling algorithm. Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
Родитель
dc47ce90c3
Коммит
142349f541
|
@ -1167,6 +1167,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
||||||
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
|
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
|
||||||
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
|
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
|
||||||
(sizeof(struct page *)));
|
(sizeof(struct page *)));
|
||||||
|
nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
|
||||||
|
nrptrs = max(nrptrs, 8);
|
||||||
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
|
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
|
||||||
if (!pages)
|
if (!pages)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче