|
|
@@ -156,6 +156,10 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
|
|
static void invoke_rcu_core(void);
|
|
|
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
|
|
|
|
|
|
+/* rcuc/rcub kthread realtime priority */
|
|
|
+static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
|
|
|
+module_param(kthread_prio, int, 0644);
|
|
|
+
|
|
|
/*
|
|
|
* Track the rcutorture test sequence number and the update version
|
|
|
* number within a given test. The rcutorture_testseq is incremented
|
|
|
@@ -3597,17 +3601,35 @@ static int rcu_pm_notify(struct notifier_block *self,
|
|
|
static int __init rcu_spawn_gp_kthread(void)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
+ int kthread_prio_in = kthread_prio;
|
|
|
struct rcu_node *rnp;
|
|
|
struct rcu_state *rsp;
|
|
|
+ struct sched_param sp;
|
|
|
struct task_struct *t;
|
|
|
|
|
|
+ /* Force priority into range. */
|
|
|
+ if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
|
|
|
+ kthread_prio = 1;
|
|
|
+ else if (kthread_prio < 0)
|
|
|
+ kthread_prio = 0;
|
|
|
+ else if (kthread_prio > 99)
|
|
|
+ kthread_prio = 99;
|
|
|
+ if (kthread_prio != kthread_prio_in)
|
|
|
+ pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
|
|
|
+ kthread_prio, kthread_prio_in);
|
|
|
+
|
|
|
rcu_scheduler_fully_active = 1;
|
|
|
for_each_rcu_flavor(rsp) {
|
|
|
- t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
|
|
|
+ t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
|
|
|
BUG_ON(IS_ERR(t));
|
|
|
rnp = rcu_get_root(rsp);
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
rsp->gp_kthread = t;
|
|
|
+ if (kthread_prio) {
|
|
|
+ sp.sched_priority = kthread_prio;
|
|
|
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
|
|
+ }
|
|
|
+ wake_up_process(t);
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
}
|
|
|
rcu_spawn_nocb_kthreads();
|