|
@@ -687,13 +687,20 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
|
#define raw_rq() raw_cpu_ptr(&runqueues)
|
|
#define raw_rq() raw_cpu_ptr(&runqueues)
|
|
|
|
|
|
|
|
+static inline u64 __rq_clock_broken(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+ return ACCESS_ONCE(rq->clock);
|
|
|
|
+}
|
|
|
|
+
|
|
static inline u64 rq_clock(struct rq *rq)
|
|
static inline u64 rq_clock(struct rq *rq)
|
|
{
|
|
{
|
|
|
|
+ lockdep_assert_held(&rq->lock);
|
|
return rq->clock;
|
|
return rq->clock;
|
|
}
|
|
}
|
|
|
|
|
|
static inline u64 rq_clock_task(struct rq *rq)
|
|
static inline u64 rq_clock_task(struct rq *rq)
|
|
{
|
|
{
|
|
|
|
+ lockdep_assert_held(&rq->lock);
|
|
return rq->clock_task;
|
|
return rq->clock_task;
|
|
}
|
|
}
|
|
|
|
|