|
@@ -1120,28 +1120,30 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
|
|
|
* Cache entries may not be populated. So we cannot rely on the
|
|
|
* size of the clean queue.
|
|
|
*/
|
|
|
- unsigned nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
|
|
|
+ unsigned nr_clean;
|
|
|
|
|
|
- if (idle)
|
|
|
+ if (idle) {
|
|
|
/*
|
|
|
* We'd like to clean everything.
|
|
|
*/
|
|
|
return q_size(&mq->dirty) == 0u;
|
|
|
- else
|
|
|
- return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
|
|
|
- percent_to_target(mq, CLEAN_TARGET);
|
|
|
+ }
|
|
|
+
|
|
|
+ nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
|
|
|
+ return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
|
|
|
+ percent_to_target(mq, CLEAN_TARGET);
|
|
|
}
|
|
|
|
|
|
static bool free_target_met(struct smq_policy *mq, bool idle)
|
|
|
{
|
|
|
- unsigned nr_free = from_cblock(mq->cache_size) -
|
|
|
- mq->cache_alloc.nr_allocated;
|
|
|
+ unsigned nr_free;
|
|
|
|
|
|
- if (idle)
|
|
|
- return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
|
|
|
- percent_to_target(mq, FREE_TARGET);
|
|
|
- else
|
|
|
+ if (!idle)
|
|
|
return true;
|
|
|
+
|
|
|
+ nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
|
|
|
+ return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
|
|
|
+ percent_to_target(mq, FREE_TARGET);
|
|
|
}
|
|
|
|
|
|
/*----------------------------------------------------------------*/
|
|
@@ -1214,7 +1216,11 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
|
|
|
return;
|
|
|
|
|
|
if (allocator_empty(&mq->cache_alloc)) {
|
|
|
- if (!free_target_met(mq, false))
|
|
|
+ /*
|
|
|
+ * We always claim to be 'idle' to ensure some demotions happen
|
|
|
+ * with continuous loads.
|
|
|
+ */
|
|
|
+ if (!free_target_met(mq, true))
|
|
|
queue_demotion(mq);
|
|
|
return;
|
|
|
}
|