|
@@ -42,27 +42,10 @@ static inline bool net_busy_loop_on(void)
|
|
|
return sysctl_net_busy_poll;
|
|
|
}
|
|
|
|
|
|
-/* a wrapper to make debug_smp_processor_id() happy
|
|
|
- * we can use sched_clock() because we don't care much about precision
|
|
|
- * we only care that the average is bounded
|
|
|
- */
|
|
|
-#ifdef CONFIG_DEBUG_PREEMPT
|
|
|
-static inline u64 busy_loop_us_clock(void)
|
|
|
-{
|
|
|
- u64 rc;
|
|
|
-
|
|
|
- preempt_disable_notrace();
|
|
|
- rc = sched_clock();
|
|
|
- preempt_enable_no_resched_notrace();
|
|
|
-
|
|
|
- return rc >> 10;
|
|
|
-}
|
|
|
-#else /* CONFIG_DEBUG_PREEMPT */
|
|
|
static inline u64 busy_loop_us_clock(void)
|
|
|
{
|
|
|
- return sched_clock() >> 10;
|
|
|
+ return local_clock() >> 10;
|
|
|
}
|
|
|
-#endif /* CONFIG_DEBUG_PREEMPT */
|
|
|
|
|
|
static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
|
|
|
{
|