|
|
@@ -260,6 +260,22 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
|
|
|
|
|
|
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
|
|
|
+
|
|
|
+/**
|
|
|
+ * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
|
|
|
+ * @ref: percpu_ref to switch to atomic mode
|
|
|
+ *
|
|
|
+ * Schedule switching the ref to atomic mode, and wait for the
|
|
|
+ * switch to complete. Caller must ensure that no other thread
|
|
|
+ * will switch back to percpu mode.
|
|
|
+ */
|
|
|
+void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
|
|
|
+{
|
|
|
+ percpu_ref_switch_to_atomic(ref, NULL);
|
|
|
+ wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
|
|
|
|
|
|
/**
|
|
|
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
|
|
|
@@ -290,6 +306,7 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
|
|
|
|
|
|
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
|
|
|
|
|
|
/**
|
|
|
* percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
|