|
@@ -33,6 +33,7 @@
|
|
|
|
|
|
#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
|
|
|
|
|
|
+static DEFINE_SPINLOCK(percpu_ref_switch_lock);
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
|
|
|
|
|
|
static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
|
|
@@ -82,6 +83,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
|
|
|
atomic_long_set(&ref->count, start_count);
|
|
|
|
|
|
ref->release = release;
|
|
|
+ ref->confirm_switch = NULL;
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(percpu_ref_init);
|
|
@@ -101,6 +103,8 @@ void percpu_ref_exit(struct percpu_ref *ref)
|
|
|
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
|
|
|
|
|
|
if (percpu_count) {
|
|
|
+ /* non-NULL confirm_switch indicates switching in progress */
|
|
|
+ WARN_ON_ONCE(ref->confirm_switch);
|
|
|
free_percpu(percpu_count);
|
|
|
ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
|
|
|
}
|
|
@@ -161,66 +165,23 @@ static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
|
|
|
static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
|
|
|
percpu_ref_func_t *confirm_switch)
|
|
|
{
|
|
|
- if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) {
|
|
|
- /* switching from percpu to atomic */
|
|
|
- ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
|
|
|
-
|
|
|
- /*
|
|
|
- * Non-NULL ->confirm_switch is used to indicate that
|
|
|
- * switching is in progress. Use noop one if unspecified.
|
|
|
- */
|
|
|
- WARN_ON_ONCE(ref->confirm_switch);
|
|
|
- ref->confirm_switch =
|
|
|
- confirm_switch ?: percpu_ref_noop_confirm_switch;
|
|
|
-
|
|
|
- percpu_ref_get(ref); /* put after confirmation */
|
|
|
- call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
|
|
|
- } else if (confirm_switch) {
|
|
|
- /*
|
|
|
- * Somebody already set ATOMIC. Switching may still be in
|
|
|
- * progress. @confirm_switch must be invoked after the
|
|
|
- * switching is complete and a full sched RCU grace period
|
|
|
- * has passed. Wait synchronously for the previous
|
|
|
- * switching and schedule @confirm_switch invocation.
|
|
|
- */
|
|
|
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
|
|
|
- ref->confirm_switch = confirm_switch;
|
|
|
-
|
|
|
- percpu_ref_get(ref); /* put after confirmation */
|
|
|
- call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu);
|
|
|
+ if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
|
|
|
+ if (confirm_switch)
|
|
|
+ confirm_switch(ref);
|
|
|
+ return;
|
|
|
}
|
|
|
-}
|
|
|
|
|
|
-/**
|
|
|
- * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
|
|
|
- * @ref: percpu_ref to switch to atomic mode
|
|
|
- * @confirm_switch: optional confirmation callback
|
|
|
- *
|
|
|
- * There's no reason to use this function for the usual reference counting.
|
|
|
- * Use percpu_ref_kill[_and_confirm]().
|
|
|
- *
|
|
|
- * Schedule switching of @ref to atomic mode. All its percpu counts will
|
|
|
- * be collected to the main atomic counter. On completion, when all CPUs
|
|
|
- * are guaraneed to be in atomic mode, @confirm_switch, which may not
|
|
|
- * block, is invoked. This function may be invoked concurrently with all
|
|
|
- * the get/put operations and can safely be mixed with kill and reinit
|
|
|
- * operations. Note that @ref will stay in atomic mode across kill/reinit
|
|
|
- * cycles until percpu_ref_switch_to_percpu() is called.
|
|
|
- *
|
|
|
- * This function normally doesn't block and can be called from any context
|
|
|
- * but it may block if @confirm_kill is specified and @ref is already in
|
|
|
- * the process of switching to atomic mode. In such cases, @confirm_switch
|
|
|
- * will be invoked after the switching is complete.
|
|
|
- *
|
|
|
- * Due to the way percpu_ref is implemented, @confirm_switch will be called
|
|
|
- * after at least one full sched RCU grace period has passed but this is an
|
|
|
- * implementation detail and must not be depended upon.
|
|
|
- */
|
|
|
-void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
|
|
|
- percpu_ref_func_t *confirm_switch)
|
|
|
-{
|
|
|
- ref->force_atomic = true;
|
|
|
- __percpu_ref_switch_to_atomic(ref, confirm_switch);
|
|
|
+ /* switching from percpu to atomic */
|
|
|
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Non-NULL ->confirm_switch is used to indicate that switching is
|
|
|
+ * in progress. Use noop one if unspecified.
|
|
|
+ */
|
|
|
+ ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
|
|
|
+
|
|
|
+ percpu_ref_get(ref); /* put after confirmation */
|
|
|
+ call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
|
|
|
}
|
|
|
|
|
|
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
|
|
@@ -233,8 +194,6 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
|
|
|
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
|
|
|
return;
|
|
|
|
|
|
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
|
|
|
-
|
|
|
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
|
|
|
|
|
|
/*
|
|
@@ -250,6 +209,58 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
|
|
|
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
|
|
|
}
|
|
|
|
|
|
+static void __percpu_ref_switch_mode(struct percpu_ref *ref,
|
|
|
+ percpu_ref_func_t *confirm_switch)
|
|
|
+{
|
|
|
+ lockdep_assert_held(&percpu_ref_switch_lock);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the previous ATOMIC switching hasn't finished yet, wait for
|
|
|
+ * its completion. If the caller ensures that ATOMIC switching
|
|
|
+ * isn't in progress, this function can be called from any context.
|
|
|
+ */
|
|
|
+ wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
|
|
|
+ percpu_ref_switch_lock);
|
|
|
+
|
|
|
+ if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
|
|
|
+ __percpu_ref_switch_to_atomic(ref, confirm_switch);
|
|
|
+ else
|
|
|
+ __percpu_ref_switch_to_percpu(ref);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
|
|
|
+ * @ref: percpu_ref to switch to atomic mode
|
|
|
+ * @confirm_switch: optional confirmation callback
|
|
|
+ *
|
|
|
+ * There's no reason to use this function for the usual reference counting.
|
|
|
+ * Use percpu_ref_kill[_and_confirm]().
|
|
|
+ *
|
|
|
+ * Schedule switching of @ref to atomic mode. All its percpu counts will
|
|
|
+ * be collected to the main atomic counter. On completion, when all CPUs
|
|
|
+ * are guaraneed to be in atomic mode, @confirm_switch, which may not
|
|
|
+ * block, is invoked. This function may be invoked concurrently with all
|
|
|
+ * the get/put operations and can safely be mixed with kill and reinit
|
|
|
+ * operations. Note that @ref will stay in atomic mode across kill/reinit
|
|
|
+ * cycles until percpu_ref_switch_to_percpu() is called.
|
|
|
+ *
|
|
|
+ * This function may block if @ref is in the process of switching to atomic
|
|
|
+ * mode. If the caller ensures that @ref is not in the process of
|
|
|
+ * switching to atomic mode, this function can be called from any context.
|
|
|
+ */
|
|
|
+void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
|
|
|
+ percpu_ref_func_t *confirm_switch)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
|
|
+
|
|
|
+ ref->force_atomic = true;
|
|
|
+ __percpu_ref_switch_mode(ref, confirm_switch);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
|
|
|
* @ref: percpu_ref to switch to percpu mode
|
|
@@ -264,17 +275,20 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
|
|
|
* dying or dead, the actual switching takes place on the following
|
|
|
* percpu_ref_reinit().
|
|
|
*
|
|
|
- * This function normally doesn't block and can be called from any context
|
|
|
- * but it may block if @ref is in the process of switching to atomic mode
|
|
|
- * by percpu_ref_switch_atomic().
|
|
|
+ * This function may block if @ref is in the process of switching to atomic
|
|
|
+ * mode. If the caller ensures that @ref is not in the process of
|
|
|
+ * switching to atomic mode, this function can be called from any context.
|
|
|
*/
|
|
|
void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
|
|
+
|
|
|
ref->force_atomic = false;
|
|
|
+ __percpu_ref_switch_mode(ref, NULL);
|
|
|
|
|
|
- /* a dying or dead ref can't be switched to percpu mode w/o reinit */
|
|
|
- if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
|
|
|
- __percpu_ref_switch_to_percpu(ref);
|
|
|
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -290,21 +304,23 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
|
|
|
*
|
|
|
* This function normally doesn't block and can be called from any context
|
|
|
* but it may block if @confirm_kill is specified and @ref is in the
|
|
|
- * process of switching to atomic mode by percpu_ref_switch_atomic().
|
|
|
- *
|
|
|
- * Due to the way percpu_ref is implemented, @confirm_switch will be called
|
|
|
- * after at least one full sched RCU grace period has passed but this is an
|
|
|
- * implementation detail and must not be depended upon.
|
|
|
+ * process of switching to atomic mode by percpu_ref_switch_to_atomic().
|
|
|
*/
|
|
|
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
|
|
|
percpu_ref_func_t *confirm_kill)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
|
|
+
|
|
|
WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
|
|
|
"%s called more than once on %pf!", __func__, ref->release);
|
|
|
|
|
|
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
|
|
|
- __percpu_ref_switch_to_atomic(ref, confirm_kill);
|
|
|
+ __percpu_ref_switch_mode(ref, confirm_kill);
|
|
|
percpu_ref_put(ref);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
|
|
|
|
|
@@ -321,11 +337,16 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
|
|
|
*/
|
|
|
void percpu_ref_reinit(struct percpu_ref *ref)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
|
|
+
|
|
|
WARN_ON_ONCE(!percpu_ref_is_zero(ref));
|
|
|
|
|
|
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
|
|
|
percpu_ref_get(ref);
|
|
|
- if (!ref->force_atomic)
|
|
|
- __percpu_ref_switch_to_percpu(ref);
|
|
|
+ __percpu_ref_switch_mode(ref, NULL);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
|