|
@@ -976,13 +976,20 @@ static inline u64 rq_clock_task(struct rq *rq)
|
|
return rq->clock_task;
|
|
return rq->clock_task;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void rq_clock_skip_update(struct rq *rq, bool skip)
|
|
|
|
|
|
+static inline void rq_clock_skip_update(struct rq *rq)
|
|
{
|
|
{
|
|
lockdep_assert_held(&rq->lock);
|
|
lockdep_assert_held(&rq->lock);
|
|
- if (skip)
|
|
|
|
- rq->clock_update_flags |= RQCF_REQ_SKIP;
|
|
|
|
- else
|
|
|
|
- rq->clock_update_flags &= ~RQCF_REQ_SKIP;
|
|
|
|
|
|
+ rq->clock_update_flags |= RQCF_REQ_SKIP;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * See rt task throttoling, which is the only time a skip
|
|
|
|
+ * request is cancelled.
|
|
|
|
+ */
|
|
|
|
+static inline void rq_clock_cancel_skipupdate(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+ lockdep_assert_held(&rq->lock);
|
|
|
|
+ rq->clock_update_flags &= ~RQCF_REQ_SKIP;
|
|
}
|
|
}
|
|
|
|
|
|
struct rq_flags {
|
|
struct rq_flags {
|