|
@@ -18,6 +18,7 @@
|
|
|
#include <linux/seq_file.h>
|
|
|
#include <linux/radix-tree.h>
|
|
|
#include <linux/blkdev.h>
|
|
|
+#include <linux/atomic.h>
|
|
|
|
|
|
/* Max limits for throttle policy */
|
|
|
#define THROTL_IOPS_MAX UINT_MAX
|
|
@@ -104,7 +105,7 @@ struct blkcg_gq {
|
|
|
struct request_list rl;
|
|
|
|
|
|
/* reference count */
|
|
|
- int refcnt;
|
|
|
+ atomic_t refcnt;
|
|
|
|
|
|
/* is this blkg online? protected by both blkcg and q locks */
|
|
|
bool online;
|
|
@@ -145,7 +146,7 @@ void blkcg_drain_queue(struct request_queue *q);
|
|
|
void blkcg_exit_queue(struct request_queue *q);
|
|
|
|
|
|
/* Blkio controller policy registration */
|
|
|
-int __init blkcg_policy_register(struct blkcg_policy *pol);
|
|
|
+int blkcg_policy_register(struct blkcg_policy *pol);
|
|
|
void blkcg_policy_unregister(struct blkcg_policy *pol);
|
|
|
int blkcg_activate_policy(struct request_queue *q,
|
|
|
const struct blkcg_policy *pol);
|
|
@@ -257,13 +258,12 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
|
|
|
* blkg_get - get a blkg reference
|
|
|
* @blkg: blkg to get
|
|
|
*
|
|
|
- * The caller should be holding queue_lock and an existing reference.
|
|
|
+ * The caller should be holding an existing reference.
|
|
|
*/
|
|
|
static inline void blkg_get(struct blkcg_gq *blkg)
|
|
|
{
|
|
|
- lockdep_assert_held(blkg->q->queue_lock);
|
|
|
- WARN_ON_ONCE(!blkg->refcnt);
|
|
|
- blkg->refcnt++;
|
|
|
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
|
|
|
+ atomic_inc(&blkg->refcnt);
|
|
|
}
|
|
|
|
|
|
void __blkg_release_rcu(struct rcu_head *rcu);
|
|
@@ -271,14 +271,11 @@ void __blkg_release_rcu(struct rcu_head *rcu);
|
|
|
/**
|
|
|
* blkg_put - put a blkg reference
|
|
|
* @blkg: blkg to put
|
|
|
- *
|
|
|
- * The caller should be holding queue_lock.
|
|
|
*/
|
|
|
static inline void blkg_put(struct blkcg_gq *blkg)
|
|
|
{
|
|
|
- lockdep_assert_held(blkg->q->queue_lock);
|
|
|
- WARN_ON_ONCE(blkg->refcnt <= 0);
|
|
|
- if (!--blkg->refcnt)
|
|
|
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
|
|
|
+ if (atomic_dec_and_test(&blkg->refcnt))
|
|
|
call_rcu(&blkg->rcu_head, __blkg_release_rcu);
|
|
|
}
|
|
|
|
|
@@ -580,7 +577,7 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret
|
|
|
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
|
|
|
static inline void blkcg_drain_queue(struct request_queue *q) { }
|
|
|
static inline void blkcg_exit_queue(struct request_queue *q) { }
|
|
|
-static inline int __init blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
|
|
|
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
|
|
|
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
|
|
|
static inline int blkcg_activate_policy(struct request_queue *q,
|
|
|
const struct blkcg_policy *pol) { return 0; }
|