|
@@ -136,14 +136,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
|
|
|
* used as a pointer. If the compiler generates a separate fetch
|
|
|
* when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
|
|
|
* between contaminating the pointer value, meaning that
|
|
|
- * ACCESS_ONCE() is required when fetching it.
|
|
|
- *
|
|
|
- * Also, we need a data dependency barrier to be paired with
|
|
|
- * smp_store_release() in __percpu_ref_switch_to_percpu().
|
|
|
- *
|
|
|
- * Use lockless deref which contains both.
|
|
|
+ * READ_ONCE() is required when fetching it.
|
|
|
*/
|
|
|
- percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
|
|
|
+ percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
|
|
|
+
|
|
|
+ /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
|
|
|
+ smp_read_barrier_depends();
|
|
|
|
|
|
/*
|
|
|
* Theoretically, the following could test just ATOMIC; however,
|