Skip to content

Commit e33f2d2

Browse files
committed
Merge branch 'urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/writeback
* 'urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/writeback: squeeze max-pause area and drop pass-good area
2 parents be5378f + bb08229 commit e33f2d2

File tree

2 files changed

+2
-24
lines changed

2 files changed

+2
-24
lines changed

include/linux/writeback.h

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,6 @@
1212
*
1313
* (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
1414
*
15-
* The 1/16 region above the global dirty limit will be put to maximum pauses:
16-
*
17-
* (limit, limit + limit/DIRTY_MAXPAUSE_AREA)
18-
*
19-
* The 1/16 region above the max-pause region, dirty exceeded bdi's will be put
20-
* to loops:
21-
*
22-
* (limit + limit/DIRTY_MAXPAUSE_AREA, limit + limit/DIRTY_PASSGOOD_AREA)
23-
*
2415
* Further beyond, all dirtier tasks will enter a loop waiting (possibly long
2516
* time) for the dirty pages to drop, unless written enough pages.
2617
*
@@ -31,8 +22,6 @@
3122
*/
3223
#define DIRTY_SCOPE 8
3324
#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
34-
#define DIRTY_MAXPAUSE_AREA 16
35-
#define DIRTY_PASSGOOD_AREA 8
3625

3726
/*
3827
* 4MB minimal write chunk size

mm/page-writeback.c

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -754,21 +754,10 @@ static void balance_dirty_pages(struct address_space *mapping,
754754
* 200ms is typically more than enough to curb heavy dirtiers;
755755
* (b) the pause time limit makes the dirtiers more responsive.
756756
*/
757-
if (nr_dirty < dirty_thresh +
758-
dirty_thresh / DIRTY_MAXPAUSE_AREA &&
757+
if (nr_dirty < dirty_thresh &&
758+
bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 &&
759759
time_after(jiffies, start_time + MAX_PAUSE))
760760
break;
761-
/*
762-
* pass-good area. When some bdi gets blocked (eg. NFS server
763-
* not responding), or write bandwidth dropped dramatically due
764-
* to concurrent reads, or dirty threshold suddenly dropped and
765-
* the dirty pages cannot be brought down anytime soon (eg. on
766-
* slow USB stick), at least let go of the good bdi's.
767-
*/
768-
if (nr_dirty < dirty_thresh +
769-
dirty_thresh / DIRTY_PASSGOOD_AREA &&
770-
bdi_dirty < bdi_thresh)
771-
break;
772761

773762
/*
774763
* Increase the delay for each loop, up to our previous

0 commit comments

Comments
 (0)