|
@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
|
|
|
|
|
|
void static_key_slow_inc(struct static_key *key)
|
|
void static_key_slow_inc(struct static_key *key)
|
|
{
|
|
{
|
|
|
|
+ int v, v1;
|
|
|
|
+
|
|
STATIC_KEY_CHECK_USE();
|
|
STATIC_KEY_CHECK_USE();
|
|
- if (atomic_inc_not_zero(&key->enabled))
|
|
|
|
- return;
|
|
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Careful if we get concurrent static_key_slow_inc() calls;
|
|
|
|
+ * later calls must wait for the first one to _finish_ the
|
|
|
|
+ * jump_label_update() process. At the same time, however,
|
|
|
|
+ * the jump_label_update() call below wants to see
|
|
|
|
+ * static_key_enabled(&key) for jumps to be updated properly.
|
|
|
|
+ *
|
|
|
|
+ * So give a special meaning to negative key->enabled: it sends
|
|
|
|
+ * static_key_slow_inc() down the slow path, and it is non-zero
|
|
|
|
+ * so it counts as "enabled" in jump_label_update(). Note that
|
|
|
|
+ * atomic_inc_unless_negative() checks >= 0, so roll our own.
|
|
|
|
+ */
|
|
|
|
+ for (v = atomic_read(&key->enabled); v > 0; v = v1) {
|
|
|
|
+ v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
|
|
|
|
+ if (likely(v1 == v))
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
|
|
jump_label_lock();
|
|
jump_label_lock();
|
|
- if (atomic_inc_return(&key->enabled) == 1)
|
|
|
|
|
|
+ if (atomic_read(&key->enabled) == 0) {
|
|
|
|
+ atomic_set(&key->enabled, -1);
|
|
jump_label_update(key);
|
|
jump_label_update(key);
|
|
|
|
+ atomic_set(&key->enabled, 1);
|
|
|
|
+ } else {
|
|
|
|
+ atomic_inc(&key->enabled);
|
|
|
|
+ }
|
|
jump_label_unlock();
|
|
jump_label_unlock();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(static_key_slow_inc);
|
|
EXPORT_SYMBOL_GPL(static_key_slow_inc);
|
|
@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
|
|
static void __static_key_slow_dec(struct static_key *key,
|
|
static void __static_key_slow_dec(struct static_key *key,
|
|
unsigned long rate_limit, struct delayed_work *work)
|
|
unsigned long rate_limit, struct delayed_work *work)
|
|
{
|
|
{
|
|
|
|
+ /*
|
|
|
|
+ * The negative count check is valid even when a negative
|
|
|
|
+ * key->enabled is in use by static_key_slow_inc(); a
|
|
|
|
+ * __static_key_slow_dec() before the first static_key_slow_inc()
|
|
|
|
+ * returns is unbalanced, because all other static_key_slow_inc()
|
|
|
|
+ * instances block while the update is in progress.
|
|
|
|
+ */
|
|
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
|
|
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
|
|
WARN(atomic_read(&key->enabled) < 0,
|
|
WARN(atomic_read(&key->enabled) < 0,
|
|
"jump label: negative count!\n");
|
|
"jump label: negative count!\n");
|