|
@@ -3967,22 +3967,22 @@ void rcu_scheduler_starting(void)
|
|
* Compute the per-level fanout, either using the exact fanout specified
|
|
* Compute the per-level fanout, either using the exact fanout specified
|
|
* or balancing the tree, depending on the rcu_fanout_exact boot parameter.
|
|
* or balancing the tree, depending on the rcu_fanout_exact boot parameter.
|
|
*/
|
|
*/
|
|
-static void __init rcu_init_levelspread(struct rcu_state *rsp)
|
|
|
|
|
|
+static void __init rcu_init_levelspread(int *levelspread, const int *levelcnt)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
if (rcu_fanout_exact) {
|
|
if (rcu_fanout_exact) {
|
|
- rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
|
|
|
|
|
|
+ levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
|
|
for (i = rcu_num_lvls - 2; i >= 0; i--)
|
|
for (i = rcu_num_lvls - 2; i >= 0; i--)
|
|
- rsp->levelspread[i] = RCU_FANOUT;
|
|
|
|
|
|
+ levelspread[i] = RCU_FANOUT;
|
|
} else {
|
|
} else {
|
|
int ccur;
|
|
int ccur;
|
|
int cprv;
|
|
int cprv;
|
|
|
|
|
|
cprv = nr_cpu_ids;
|
|
cprv = nr_cpu_ids;
|
|
for (i = rcu_num_lvls - 1; i >= 0; i--) {
|
|
for (i = rcu_num_lvls - 1; i >= 0; i--) {
|
|
- ccur = rsp->levelcnt[i];
|
|
|
|
- rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
|
|
|
|
|
|
+ ccur = levelcnt[i];
|
|
|
|
+ levelspread[i] = (cprv + ccur - 1) / ccur;
|
|
cprv = ccur;
|
|
cprv = ccur;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -4005,6 +4005,9 @@ static void __init rcu_init_one(struct rcu_state *rsp,
|
|
"rcu_node_fqs_2",
|
|
"rcu_node_fqs_2",
|
|
"rcu_node_fqs_3" };
|
|
"rcu_node_fqs_3" };
|
|
static u8 fl_mask = 0x1;
|
|
static u8 fl_mask = 0x1;
|
|
|
|
+
|
|
|
|
+ int levelcnt[RCU_NUM_LVLS]; /* # nodes in each level. */
|
|
|
|
+ int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
|
|
int cpustride = 1;
|
|
int cpustride = 1;
|
|
int i;
|
|
int i;
|
|
int j;
|
|
int j;
|
|
@@ -4019,19 +4022,19 @@ static void __init rcu_init_one(struct rcu_state *rsp,
|
|
/* Initialize the level-tracking arrays. */
|
|
/* Initialize the level-tracking arrays. */
|
|
|
|
|
|
for (i = 0; i < rcu_num_lvls; i++)
|
|
for (i = 0; i < rcu_num_lvls; i++)
|
|
- rsp->levelcnt[i] = num_rcu_lvl[i];
|
|
|
|
|
|
+ levelcnt[i] = num_rcu_lvl[i];
|
|
for (i = 1; i < rcu_num_lvls; i++)
|
|
for (i = 1; i < rcu_num_lvls; i++)
|
|
- rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
|
|
|
|
- rcu_init_levelspread(rsp);
|
|
|
|
|
|
+ rsp->level[i] = rsp->level[i - 1] + levelcnt[i - 1];
|
|
|
|
+ rcu_init_levelspread(levelspread, levelcnt);
|
|
rsp->flavor_mask = fl_mask;
|
|
rsp->flavor_mask = fl_mask;
|
|
fl_mask <<= 1;
|
|
fl_mask <<= 1;
|
|
|
|
|
|
/* Initialize the elements themselves, starting from the leaves. */
|
|
/* Initialize the elements themselves, starting from the leaves. */
|
|
|
|
|
|
for (i = rcu_num_lvls - 1; i >= 0; i--) {
|
|
for (i = rcu_num_lvls - 1; i >= 0; i--) {
|
|
- cpustride *= rsp->levelspread[i];
|
|
|
|
|
|
+ cpustride *= levelspread[i];
|
|
rnp = rsp->level[i];
|
|
rnp = rsp->level[i];
|
|
- for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
|
|
|
|
|
|
+ for (j = 0; j < levelcnt[i]; j++, rnp++) {
|
|
raw_spin_lock_init(&rnp->lock);
|
|
raw_spin_lock_init(&rnp->lock);
|
|
lockdep_set_class_and_name(&rnp->lock,
|
|
lockdep_set_class_and_name(&rnp->lock,
|
|
&rcu_node_class[i], buf[i]);
|
|
&rcu_node_class[i], buf[i]);
|
|
@@ -4051,10 +4054,10 @@ static void __init rcu_init_one(struct rcu_state *rsp,
|
|
rnp->grpmask = 0;
|
|
rnp->grpmask = 0;
|
|
rnp->parent = NULL;
|
|
rnp->parent = NULL;
|
|
} else {
|
|
} else {
|
|
- rnp->grpnum = j % rsp->levelspread[i - 1];
|
|
|
|
|
|
+ rnp->grpnum = j % levelspread[i - 1];
|
|
rnp->grpmask = 1UL << rnp->grpnum;
|
|
rnp->grpmask = 1UL << rnp->grpnum;
|
|
rnp->parent = rsp->level[i - 1] +
|
|
rnp->parent = rsp->level[i - 1] +
|
|
- j / rsp->levelspread[i - 1];
|
|
|
|
|
|
+ j / levelspread[i - 1];
|
|
}
|
|
}
|
|
rnp->level = i;
|
|
rnp->level = i;
|
|
INIT_LIST_HEAD(&rnp->blkd_tasks);
|
|
INIT_LIST_HEAD(&rnp->blkd_tasks);
|