diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2011-12-16 12:32:57 -0500 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2011-12-16 12:32:57 -0500 |
commit | 142349f541d0bb6bc3e0d4563268105aada42b0b (patch) | |
tree | c86e13b5cf8b87b3b4f3c5218442e221ce17f67b | |
parent | dc47ce90c3a822cd7c9e9339fe4d5f61dcb26b50 (diff) | |
download | op-kernel-dev-142349f541d0bb6bc3e0d4563268105aada42b0b.zip op-kernel-dev-142349f541d0bb6bc3e0d4563268105aada42b0b.tar.gz |
btrfs: lower the dirty balance poll interval
Tests show that the original large intervals can easily make the dirty
limit exceeded on 100 concurrent dd's. So adapt to as large as the
next check point selected by the dirty throttling algorithm.
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r-- | fs/btrfs/file.c | 2 |
1 files changed, 2 insertions, 0 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index dafdfa0..52305a8 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1167,6 +1167,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / (sizeof(struct page *))); + nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); + nrptrs = max(nrptrs, 8); pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); if (!pages) return -ENOMEM; |