|
@@ -126,7 +126,7 @@ struct blkcg_gq {
|
|
|
struct request_list rl;
|
|
|
|
|
|
/* reference count */
|
|
|
- struct percpu_ref refcnt;
|
|
|
+ atomic_t refcnt;
|
|
|
|
|
|
/* is this blkg online? protected by both blkcg and q locks */
|
|
|
bool online;
|
|
@@ -184,8 +184,6 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
|
|
|
|
|
|
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
|
|
|
struct request_queue *q, bool update_hint);
|
|
|
-struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
|
|
|
- struct request_queue *q);
|
|
|
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
|
|
|
struct request_queue *q);
|
|
|
int blkcg_init_queue(struct request_queue *q);
|
|
@@ -232,59 +230,22 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
|
|
char *input, struct blkg_conf_ctx *ctx);
|
|
|
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
|
|
|
|
|
|
-/**
|
|
|
- * blkcg_css - find the current css
|
|
|
- *
|
|
|
- * Find the css associated with either the kthread or the current task.
|
|
|
- * This may return a dying css, so it is up to the caller to use tryget logic
|
|
|
- * to confirm it is alive and well.
|
|
|
- */
|
|
|
-static inline struct cgroup_subsys_state *blkcg_css(void)
|
|
|
-{
|
|
|
- struct cgroup_subsys_state *css;
|
|
|
-
|
|
|
- css = kthread_blkcg();
|
|
|
- if (css)
|
|
|
- return css;
|
|
|
- return task_css(current, io_cgrp_id);
|
|
|
-}
|
|
|
|
|
|
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
|
|
|
{
|
|
|
return css ? container_of(css, struct blkcg, css) : NULL;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * __bio_blkcg - internal version of bio_blkcg for bfq and cfq
|
|
|
- *
|
|
|
- * DO NOT USE.
|
|
|
- * There is a flaw using this version of the function. In particular, this was
|
|
|
- * used in a broken paradigm where association was called on the given css. It
|
|
|
- * is possible though that the returned css from task_css() is in the process
|
|
|
- * of dying due to migration of the current task. So it is improper to assume
|
|
|
- * *_get() is going to succeed. Both BFQ and CFQ rely on this logic and will
|
|
|
- * take additional work to handle more gracefully.
|
|
|
- */
|
|
|
-static inline struct blkcg *__bio_blkcg(struct bio *bio)
|
|
|
-{
|
|
|
- if (bio && bio->bi_blkg)
|
|
|
- return bio->bi_blkg->blkcg;
|
|
|
- return css_to_blkcg(blkcg_css());
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * bio_blkcg - grab the blkcg associated with a bio
|
|
|
- * @bio: target bio
|
|
|
- *
|
|
|
- * This returns the blkcg associated with a bio, NULL if not associated.
|
|
|
- * Callers are expected to either handle NULL or know association has been
|
|
|
- * done prior to calling this.
|
|
|
- */
|
|
|
static inline struct blkcg *bio_blkcg(struct bio *bio)
|
|
|
{
|
|
|
- if (bio && bio->bi_blkg)
|
|
|
- return bio->bi_blkg->blkcg;
|
|
|
- return NULL;
|
|
|
+ struct cgroup_subsys_state *css;
|
|
|
+
|
|
|
+ if (bio && bio->bi_css)
|
|
|
+ return css_to_blkcg(bio->bi_css);
|
|
|
+ css = kthread_blkcg();
|
|
|
+ if (css)
|
|
|
+ return css_to_blkcg(css);
|
|
|
+ return css_to_blkcg(task_css(current, io_cgrp_id));
|
|
|
}
|
|
|
|
|
|
static inline bool blk_cgroup_congested(void)
|
|
@@ -490,35 +451,26 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
|
|
|
*/
|
|
|
static inline void blkg_get(struct blkcg_gq *blkg)
|
|
|
{
|
|
|
- percpu_ref_get(&blkg->refcnt);
|
|
|
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
|
|
|
+ atomic_inc(&blkg->refcnt);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * blkg_tryget - try and get a blkg reference
|
|
|
+ * blkg_try_get - try and get a blkg reference
|
|
|
* @blkg: blkg to get
|
|
|
*
|
|
|
* This is for use when doing an RCU lookup of the blkg. We may be in the midst
|
|
|
* of freeing this blkg, so we can only use it if the refcnt is not zero.
|
|
|
*/
|
|
|
-static inline bool blkg_tryget(struct blkcg_gq *blkg)
|
|
|
+static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
|
|
|
{
|
|
|
- return percpu_ref_tryget(&blkg->refcnt);
|
|
|
+ if (atomic_inc_not_zero(&blkg->refcnt))
|
|
|
+ return blkg;
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * blkg_tryget_closest - try and get a blkg ref on the closet blkg
|
|
|
- * @blkg: blkg to get
|
|
|
- *
|
|
|
- * This walks up the blkg tree to find the closest non-dying blkg and returns
|
|
|
- * the blkg that it did association with as it may not be the passed in blkg.
|
|
|
- */
|
|
|
-static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
|
|
|
-{
|
|
|
- while (!percpu_ref_tryget(&blkg->refcnt))
|
|
|
- blkg = blkg->parent;
|
|
|
|
|
|
- return blkg;
|
|
|
-}
|
|
|
+void __blkg_release_rcu(struct rcu_head *rcu);
|
|
|
|
|
|
/**
|
|
|
* blkg_put - put a blkg reference
|
|
@@ -526,7 +478,9 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
|
|
|
*/
|
|
|
static inline void blkg_put(struct blkcg_gq *blkg)
|
|
|
{
|
|
|
- percpu_ref_put(&blkg->refcnt);
|
|
|
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
|
|
|
+ if (atomic_dec_and_test(&blkg->refcnt))
|
|
|
+ call_rcu(&blkg->rcu_head, __blkg_release_rcu);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -579,36 +533,25 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
- if (bio && bio->bi_blkg) {
|
|
|
- blkcg = bio->bi_blkg->blkcg;
|
|
|
- if (blkcg == &blkcg_root)
|
|
|
- goto rl_use_root;
|
|
|
-
|
|
|
- blkg_get(bio->bi_blkg);
|
|
|
- rcu_read_unlock();
|
|
|
- return &bio->bi_blkg->rl;
|
|
|
- }
|
|
|
+ blkcg = bio_blkcg(bio);
|
|
|
|
|
|
- blkcg = css_to_blkcg(blkcg_css());
|
|
|
+ /* bypass blkg lookup and use @q->root_rl directly for root */
|
|
|
if (blkcg == &blkcg_root)
|
|
|
- goto rl_use_root;
|
|
|
+ goto root_rl;
|
|
|
|
|
|
+ /*
|
|
|
+ * Try to use blkg->rl. blkg lookup may fail under memory pressure
|
|
|
+ * or if either the blkcg or queue is going away. Fall back to
|
|
|
+ * root_rl in such cases.
|
|
|
+ */
|
|
|
blkg = blkg_lookup(blkcg, q);
|
|
|
if (unlikely(!blkg))
|
|
|
- blkg = __blkg_lookup_create(blkcg, q);
|
|
|
-
|
|
|
- if (blkg->blkcg == &blkcg_root || !blkg_tryget(blkg))
|
|
|
- goto rl_use_root;
|
|
|
+ goto root_rl;
|
|
|
|
|
|
+ blkg_get(blkg);
|
|
|
rcu_read_unlock();
|
|
|
return &blkg->rl;
|
|
|
-
|
|
|
- /*
|
|
|
- * Each blkg has its own request_list, however, the root blkcg
|
|
|
- * uses the request_queue's root_rl. This is to avoid most
|
|
|
- * overhead for the root blkcg.
|
|
|
- */
|
|
|
-rl_use_root:
|
|
|
+root_rl:
|
|
|
rcu_read_unlock();
|
|
|
return &q->root_rl;
|
|
|
}
|
|
@@ -854,26 +797,32 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
|
|
|
struct bio *bio) { return false; }
|
|
|
#endif
|
|
|
|
|
|
-
|
|
|
-static inline void blkcg_bio_issue_init(struct bio *bio)
|
|
|
-{
|
|
|
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
|
|
|
-}
|
|
|
-
|
|
|
static inline bool blkcg_bio_issue_check(struct request_queue *q,
|
|
|
struct bio *bio)
|
|
|
{
|
|
|
+ struct blkcg *blkcg;
|
|
|
struct blkcg_gq *blkg;
|
|
|
bool throtl = false;
|
|
|
|
|
|
rcu_read_lock();
|
|
|
+ blkcg = bio_blkcg(bio);
|
|
|
+
|
|
|
+ /* associate blkcg if bio hasn't attached one */
|
|
|
+ bio_associate_blkcg(bio, &blkcg->css);
|
|
|
|
|
|
- bio_associate_create_blkg(q, bio);
|
|
|
- blkg = bio->bi_blkg;
|
|
|
+ blkg = blkg_lookup(blkcg, q);
|
|
|
+ if (unlikely(!blkg)) {
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
+ blkg = blkg_lookup_create(blkcg, q);
|
|
|
+ if (IS_ERR(blkg))
|
|
|
+ blkg = NULL;
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
+ }
|
|
|
|
|
|
throtl = blk_throtl_bio(q, blkg, bio);
|
|
|
|
|
|
if (!throtl) {
|
|
|
+ blkg = blkg ?: q->root_blkg;
|
|
|
/*
|
|
|
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
|
|
|
* is a split bio and we would have already accounted for the
|
|
@@ -885,8 +834,6 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
|
|
|
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
|
|
|
}
|
|
|
|
|
|
- blkcg_bio_issue_init(bio);
|
|
|
-
|
|
|
rcu_read_unlock();
|
|
|
return !throtl;
|
|
|
}
|
|
@@ -983,7 +930,6 @@ static inline int blkcg_activate_policy(struct request_queue *q,
|
|
|
static inline void blkcg_deactivate_policy(struct request_queue *q,
|
|
|
const struct blkcg_policy *pol) { }
|
|
|
|
|
|
-static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
|
|
|
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
|
|
|
|
|
|
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
|
|
@@ -999,7 +945,6 @@ static inline void blk_put_rl(struct request_list *rl) { }
|
|
|
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
|
|
|
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
|
|
|
|
|
|
-static inline void blkcg_bio_issue_init(struct bio *bio) { }
|
|
|
static inline bool blkcg_bio_issue_check(struct request_queue *q,
|
|
|
struct bio *bio) { return true; }
|
|
|
|