|
@@ -1623,7 +1623,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
|
|
* 1000+ tasks, all of them start dirtying pages at exactly the same
|
|
* 1000+ tasks, all of them start dirtying pages at exactly the same
|
|
* time, hence all honoured too large initial task->nr_dirtied_pause.
|
|
* time, hence all honoured too large initial task->nr_dirtied_pause.
|
|
*/
|
|
*/
|
|
- p = &__get_cpu_var(bdp_ratelimits);
|
|
|
|
|
|
+ p = this_cpu_ptr(&bdp_ratelimits);
|
|
if (unlikely(current->nr_dirtied >= ratelimit))
|
|
if (unlikely(current->nr_dirtied >= ratelimit))
|
|
*p = 0;
|
|
*p = 0;
|
|
else if (unlikely(*p >= ratelimit_pages)) {
|
|
else if (unlikely(*p >= ratelimit_pages)) {
|
|
@@ -1635,7 +1635,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
|
|
* short-lived tasks (eg. gcc invocations in a kernel build) escaping
|
|
* short-lived tasks (eg. gcc invocations in a kernel build) escaping
|
|
* the dirty throttling and livelock other long-run dirtiers.
|
|
* the dirty throttling and livelock other long-run dirtiers.
|
|
*/
|
|
*/
|
|
- p = &__get_cpu_var(dirty_throttle_leaks);
|
|
|
|
|
|
+ p = this_cpu_ptr(&dirty_throttle_leaks);
|
|
if (*p > 0 && current->nr_dirtied < ratelimit) {
|
|
if (*p > 0 && current->nr_dirtied < ratelimit) {
|
|
unsigned long nr_pages_dirtied;
|
|
unsigned long nr_pages_dirtied;
|
|
nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
|
|
nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
|