|
@@ -132,7 +132,7 @@ struct htb_class {
|
|
struct htb_class_inner {
|
|
struct htb_class_inner {
|
|
struct htb_prio clprio[TC_HTB_NUMPRIO];
|
|
struct htb_prio clprio[TC_HTB_NUMPRIO];
|
|
} inner;
|
|
} inner;
|
|
- } un;
|
|
|
|
|
|
+ };
|
|
s64 pq_key;
|
|
s64 pq_key;
|
|
|
|
|
|
int prio_activity; /* for which prios are we active */
|
|
int prio_activity; /* for which prios are we active */
|
|
@@ -411,13 +411,13 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
|
|
int prio = ffz(~m);
|
|
int prio = ffz(~m);
|
|
m &= ~(1 << prio);
|
|
m &= ~(1 << prio);
|
|
|
|
|
|
- if (p->un.inner.clprio[prio].feed.rb_node)
|
|
|
|
|
|
+ if (p->inner.clprio[prio].feed.rb_node)
|
|
/* parent already has its feed in use so that
|
|
/* parent already has its feed in use so that
|
|
* reset bit in mask as parent is already ok
|
|
* reset bit in mask as parent is already ok
|
|
*/
|
|
*/
|
|
mask &= ~(1 << prio);
|
|
mask &= ~(1 << prio);
|
|
|
|
|
|
- htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
|
|
|
|
|
|
+ htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
|
|
}
|
|
}
|
|
p->prio_activity |= mask;
|
|
p->prio_activity |= mask;
|
|
cl = p;
|
|
cl = p;
|
|
@@ -447,19 +447,19 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
|
|
int prio = ffz(~m);
|
|
int prio = ffz(~m);
|
|
m &= ~(1 << prio);
|
|
m &= ~(1 << prio);
|
|
|
|
|
|
- if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
|
|
|
|
|
|
+ if (p->inner.clprio[prio].ptr == cl->node + prio) {
|
|
/* we are removing child which is pointed to from
|
|
/* we are removing child which is pointed to from
|
|
* parent feed - forget the pointer but remember
|
|
* parent feed - forget the pointer but remember
|
|
* classid
|
|
* classid
|
|
*/
|
|
*/
|
|
- p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
|
|
|
|
- p->un.inner.clprio[prio].ptr = NULL;
|
|
|
|
|
|
+ p->inner.clprio[prio].last_ptr_id = cl->common.classid;
|
|
|
|
+ p->inner.clprio[prio].ptr = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
htb_safe_rb_erase(cl->node + prio,
|
|
htb_safe_rb_erase(cl->node + prio,
|
|
- &p->un.inner.clprio[prio].feed);
|
|
|
|
|
|
+ &p->inner.clprio[prio].feed);
|
|
|
|
|
|
- if (!p->un.inner.clprio[prio].feed.rb_node)
|
|
|
|
|
|
+ if (!p->inner.clprio[prio].feed.rb_node)
|
|
mask |= 1 << prio;
|
|
mask |= 1 << prio;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -555,7 +555,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
|
|
*/
|
|
*/
|
|
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
|
|
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
|
|
{
|
|
{
|
|
- WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
|
|
|
|
|
|
+ WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
|
|
|
|
|
|
if (!cl->prio_activity) {
|
|
if (!cl->prio_activity) {
|
|
cl->prio_activity = 1 << cl->prio;
|
|
cl->prio_activity = 1 << cl->prio;
|
|
@@ -599,7 +599,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|
__qdisc_drop(skb, to_free);
|
|
__qdisc_drop(skb, to_free);
|
|
return ret;
|
|
return ret;
|
|
#endif
|
|
#endif
|
|
- } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
|
|
|
|
|
|
+ } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
|
|
to_free)) != NET_XMIT_SUCCESS) {
|
|
to_free)) != NET_XMIT_SUCCESS) {
|
|
if (net_xmit_drop_count(ret)) {
|
|
if (net_xmit_drop_count(ret)) {
|
|
qdisc_qstats_drop(sch);
|
|
qdisc_qstats_drop(sch);
|
|
@@ -807,7 +807,7 @@ static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
|
|
cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
|
|
cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
|
|
if (!cl->level)
|
|
if (!cl->level)
|
|
return cl;
|
|
return cl;
|
|
- clp = &cl->un.inner.clprio[prio];
|
|
|
|
|
|
+ clp = &cl->inner.clprio[prio];
|
|
(++sp)->root = clp->feed.rb_node;
|
|
(++sp)->root = clp->feed.rb_node;
|
|
sp->pptr = &clp->ptr;
|
|
sp->pptr = &clp->ptr;
|
|
sp->pid = &clp->last_ptr_id;
|
|
sp->pid = &clp->last_ptr_id;
|
|
@@ -841,7 +841,7 @@ next:
|
|
* graft operation on the leaf since last dequeue;
|
|
* graft operation on the leaf since last dequeue;
|
|
* simply deactivate and skip such class
|
|
* simply deactivate and skip such class
|
|
*/
|
|
*/
|
|
- if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
|
|
|
|
|
|
+ if (unlikely(cl->leaf.q->q.qlen == 0)) {
|
|
struct htb_class *next;
|
|
struct htb_class *next;
|
|
htb_deactivate(q, cl);
|
|
htb_deactivate(q, cl);
|
|
|
|
|
|
@@ -857,12 +857,12 @@ next:
|
|
goto next;
|
|
goto next;
|
|
}
|
|
}
|
|
|
|
|
|
- skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
|
|
|
|
|
|
+ skb = cl->leaf.q->dequeue(cl->leaf.q);
|
|
if (likely(skb != NULL))
|
|
if (likely(skb != NULL))
|
|
break;
|
|
break;
|
|
|
|
|
|
- qdisc_warn_nonwc("htb", cl->un.leaf.q);
|
|
|
|
- htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
|
|
|
|
|
|
+ qdisc_warn_nonwc("htb", cl->leaf.q);
|
|
|
|
+ htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
|
|
&q->hlevel[0].hprio[prio].ptr);
|
|
&q->hlevel[0].hprio[prio].ptr);
|
|
cl = htb_lookup_leaf(hprio, prio);
|
|
cl = htb_lookup_leaf(hprio, prio);
|
|
|
|
|
|
@@ -870,16 +870,16 @@ next:
|
|
|
|
|
|
if (likely(skb != NULL)) {
|
|
if (likely(skb != NULL)) {
|
|
bstats_update(&cl->bstats, skb);
|
|
bstats_update(&cl->bstats, skb);
|
|
- cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
|
|
|
|
- if (cl->un.leaf.deficit[level] < 0) {
|
|
|
|
- cl->un.leaf.deficit[level] += cl->quantum;
|
|
|
|
- htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
|
|
|
|
|
|
+ cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
|
|
|
|
+ if (cl->leaf.deficit[level] < 0) {
|
|
|
|
+ cl->leaf.deficit[level] += cl->quantum;
|
|
|
|
+ htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
|
|
&q->hlevel[0].hprio[prio].ptr);
|
|
&q->hlevel[0].hprio[prio].ptr);
|
|
}
|
|
}
|
|
/* this used to be after charge_class but this constelation
|
|
/* this used to be after charge_class but this constelation
|
|
* gives us slightly better performance
|
|
* gives us slightly better performance
|
|
*/
|
|
*/
|
|
- if (!cl->un.leaf.q->q.qlen)
|
|
|
|
|
|
+ if (!cl->leaf.q->q.qlen)
|
|
htb_deactivate(q, cl);
|
|
htb_deactivate(q, cl);
|
|
htb_charge_class(q, cl, level, skb);
|
|
htb_charge_class(q, cl, level, skb);
|
|
}
|
|
}
|
|
@@ -956,10 +956,10 @@ static void htb_reset(struct Qdisc *sch)
|
|
for (i = 0; i < q->clhash.hashsize; i++) {
|
|
for (i = 0; i < q->clhash.hashsize; i++) {
|
|
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
|
|
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
|
|
if (cl->level)
|
|
if (cl->level)
|
|
- memset(&cl->un.inner, 0, sizeof(cl->un.inner));
|
|
|
|
|
|
+ memset(&cl->inner, 0, sizeof(cl->inner));
|
|
else {
|
|
else {
|
|
- if (cl->un.leaf.q)
|
|
|
|
- qdisc_reset(cl->un.leaf.q);
|
|
|
|
|
|
+ if (cl->leaf.q)
|
|
|
|
+ qdisc_reset(cl->leaf.q);
|
|
}
|
|
}
|
|
cl->prio_activity = 0;
|
|
cl->prio_activity = 0;
|
|
cl->cmode = HTB_CAN_SEND;
|
|
cl->cmode = HTB_CAN_SEND;
|
|
@@ -1082,8 +1082,8 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
|
|
*/
|
|
*/
|
|
tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
|
|
tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
|
|
tcm->tcm_handle = cl->common.classid;
|
|
tcm->tcm_handle = cl->common.classid;
|
|
- if (!cl->level && cl->un.leaf.q)
|
|
|
|
- tcm->tcm_info = cl->un.leaf.q->handle;
|
|
|
|
|
|
+ if (!cl->level && cl->leaf.q)
|
|
|
|
+ tcm->tcm_info = cl->leaf.q->handle;
|
|
|
|
|
|
nest = nla_nest_start(skb, TCA_OPTIONS);
|
|
nest = nla_nest_start(skb, TCA_OPTIONS);
|
|
if (nest == NULL)
|
|
if (nest == NULL)
|
|
@@ -1126,9 +1126,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
|
|
};
|
|
};
|
|
__u32 qlen = 0;
|
|
__u32 qlen = 0;
|
|
|
|
|
|
- if (!cl->level && cl->un.leaf.q) {
|
|
|
|
- qlen = cl->un.leaf.q->q.qlen;
|
|
|
|
- qs.backlog = cl->un.leaf.q->qstats.backlog;
|
|
|
|
|
|
+ if (!cl->level && cl->leaf.q) {
|
|
|
|
+ qlen = cl->leaf.q->q.qlen;
|
|
|
|
+ qs.backlog = cl->leaf.q->qstats.backlog;
|
|
}
|
|
}
|
|
cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
|
|
cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
|
|
INT_MIN, INT_MAX);
|
|
INT_MIN, INT_MAX);
|
|
@@ -1156,14 +1156,14 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
|
|
cl->common.classid, extack)) == NULL)
|
|
cl->common.classid, extack)) == NULL)
|
|
return -ENOBUFS;
|
|
return -ENOBUFS;
|
|
|
|
|
|
- *old = qdisc_replace(sch, new, &cl->un.leaf.q);
|
|
|
|
|
|
+ *old = qdisc_replace(sch, new, &cl->leaf.q);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
|
|
static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
|
|
{
|
|
{
|
|
struct htb_class *cl = (struct htb_class *)arg;
|
|
struct htb_class *cl = (struct htb_class *)arg;
|
|
- return !cl->level ? cl->un.leaf.q : NULL;
|
|
|
|
|
|
+ return !cl->level ? cl->leaf.q : NULL;
|
|
}
|
|
}
|
|
|
|
|
|
static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
|
static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
|
@@ -1189,15 +1189,15 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
|
|
{
|
|
{
|
|
struct htb_class *parent = cl->parent;
|
|
struct htb_class *parent = cl->parent;
|
|
|
|
|
|
- WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
|
|
|
|
|
|
+ WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
|
|
|
|
|
|
if (parent->cmode != HTB_CAN_SEND)
|
|
if (parent->cmode != HTB_CAN_SEND)
|
|
htb_safe_rb_erase(&parent->pq_node,
|
|
htb_safe_rb_erase(&parent->pq_node,
|
|
&q->hlevel[parent->level].wait_pq);
|
|
&q->hlevel[parent->level].wait_pq);
|
|
|
|
|
|
parent->level = 0;
|
|
parent->level = 0;
|
|
- memset(&parent->un.inner, 0, sizeof(parent->un.inner));
|
|
|
|
- parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
|
|
|
|
|
|
+ memset(&parent->inner, 0, sizeof(parent->inner));
|
|
|
|
+ parent->leaf.q = new_q ? new_q : &noop_qdisc;
|
|
parent->tokens = parent->buffer;
|
|
parent->tokens = parent->buffer;
|
|
parent->ctokens = parent->cbuffer;
|
|
parent->ctokens = parent->cbuffer;
|
|
parent->t_c = ktime_get_ns();
|
|
parent->t_c = ktime_get_ns();
|
|
@@ -1207,8 +1207,8 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
|
|
static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
|
|
static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
|
|
{
|
|
{
|
|
if (!cl->level) {
|
|
if (!cl->level) {
|
|
- WARN_ON(!cl->un.leaf.q);
|
|
|
|
- qdisc_destroy(cl->un.leaf.q);
|
|
|
|
|
|
+ WARN_ON(!cl->leaf.q);
|
|
|
|
+ qdisc_destroy(cl->leaf.q);
|
|
}
|
|
}
|
|
gen_kill_estimator(&cl->rate_est);
|
|
gen_kill_estimator(&cl->rate_est);
|
|
tcf_block_put(cl->block);
|
|
tcf_block_put(cl->block);
|
|
@@ -1270,11 +1270,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
|
|
sch_tree_lock(sch);
|
|
sch_tree_lock(sch);
|
|
|
|
|
|
if (!cl->level) {
|
|
if (!cl->level) {
|
|
- unsigned int qlen = cl->un.leaf.q->q.qlen;
|
|
|
|
- unsigned int backlog = cl->un.leaf.q->qstats.backlog;
|
|
|
|
|
|
+ unsigned int qlen = cl->leaf.q->q.qlen;
|
|
|
|
+ unsigned int backlog = cl->leaf.q->qstats.backlog;
|
|
|
|
|
|
- qdisc_reset(cl->un.leaf.q);
|
|
|
|
- qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
|
|
|
|
|
|
+ qdisc_reset(cl->leaf.q);
|
|
|
|
+ qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
|
|
}
|
|
}
|
|
|
|
|
|
/* delete from hash and active; remainder in destroy_class */
|
|
/* delete from hash and active; remainder in destroy_class */
|
|
@@ -1403,13 +1403,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
|
classid, NULL);
|
|
classid, NULL);
|
|
sch_tree_lock(sch);
|
|
sch_tree_lock(sch);
|
|
if (parent && !parent->level) {
|
|
if (parent && !parent->level) {
|
|
- unsigned int qlen = parent->un.leaf.q->q.qlen;
|
|
|
|
- unsigned int backlog = parent->un.leaf.q->qstats.backlog;
|
|
|
|
|
|
+ unsigned int qlen = parent->leaf.q->q.qlen;
|
|
|
|
+ unsigned int backlog = parent->leaf.q->qstats.backlog;
|
|
|
|
|
|
/* turn parent into inner node */
|
|
/* turn parent into inner node */
|
|
- qdisc_reset(parent->un.leaf.q);
|
|
|
|
- qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
|
|
|
|
- qdisc_destroy(parent->un.leaf.q);
|
|
|
|
|
|
+ qdisc_reset(parent->leaf.q);
|
|
|
|
+ qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
|
|
|
|
+ qdisc_destroy(parent->leaf.q);
|
|
if (parent->prio_activity)
|
|
if (parent->prio_activity)
|
|
htb_deactivate(q, parent);
|
|
htb_deactivate(q, parent);
|
|
|
|
|
|
@@ -1420,10 +1420,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
|
}
|
|
}
|
|
parent->level = (parent->parent ? parent->parent->level
|
|
parent->level = (parent->parent ? parent->parent->level
|
|
: TC_HTB_MAXDEPTH) - 1;
|
|
: TC_HTB_MAXDEPTH) - 1;
|
|
- memset(&parent->un.inner, 0, sizeof(parent->un.inner));
|
|
|
|
|
|
+ memset(&parent->inner, 0, sizeof(parent->inner));
|
|
}
|
|
}
|
|
/* leaf (we) needs elementary qdisc */
|
|
/* leaf (we) needs elementary qdisc */
|
|
- cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
|
|
|
|
|
|
+ cl->leaf.q = new_q ? new_q : &noop_qdisc;
|
|
|
|
|
|
cl->common.classid = classid;
|
|
cl->common.classid = classid;
|
|
cl->parent = parent;
|
|
cl->parent = parent;
|
|
@@ -1439,8 +1439,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
|
qdisc_class_hash_insert(&q->clhash, &cl->common);
|
|
qdisc_class_hash_insert(&q->clhash, &cl->common);
|
|
if (parent)
|
|
if (parent)
|
|
parent->children++;
|
|
parent->children++;
|
|
- if (cl->un.leaf.q != &noop_qdisc)
|
|
|
|
- qdisc_hash_add(cl->un.leaf.q, true);
|
|
|
|
|
|
+ if (cl->leaf.q != &noop_qdisc)
|
|
|
|
+ qdisc_hash_add(cl->leaf.q, true);
|
|
} else {
|
|
} else {
|
|
if (tca[TCA_RATE]) {
|
|
if (tca[TCA_RATE]) {
|
|
err = gen_replace_estimator(&cl->bstats, NULL,
|
|
err = gen_replace_estimator(&cl->bstats, NULL,
|
|
@@ -1462,7 +1462,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
|
psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
|
|
psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
|
|
|
|
|
|
/* it used to be a nasty bug here, we have to check that node
|
|
/* it used to be a nasty bug here, we have to check that node
|
|
- * is really leaf before changing cl->un.leaf !
|
|
|
|
|
|
+ * is really leaf before changing cl->leaf !
|
|
*/
|
|
*/
|
|
if (!cl->level) {
|
|
if (!cl->level) {
|
|
u64 quantum = cl->rate.rate_bytes_ps;
|
|
u64 quantum = cl->rate.rate_bytes_ps;
|