|
@@ -355,6 +355,7 @@ rcu_perf_writer(void *arg)
|
|
int i = 0;
|
|
int i = 0;
|
|
int i_max;
|
|
int i_max;
|
|
long me = (long)arg;
|
|
long me = (long)arg;
|
|
|
|
+ struct sched_param sp;
|
|
bool started = false, done = false, alldone = false;
|
|
bool started = false, done = false, alldone = false;
|
|
u64 t;
|
|
u64 t;
|
|
u64 *wdp;
|
|
u64 *wdp;
|
|
@@ -365,6 +366,8 @@ rcu_perf_writer(void *arg)
|
|
WARN_ON(rcu_gp_is_normal() && gp_exp);
|
|
WARN_ON(rcu_gp_is_normal() && gp_exp);
|
|
WARN_ON(!wdpp);
|
|
WARN_ON(!wdpp);
|
|
set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
|
|
set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
|
|
|
|
+ sp.sched_priority = 1;
|
|
|
|
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
|
t = ktime_get_mono_fast_ns();
|
|
t = ktime_get_mono_fast_ns();
|
|
if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
|
|
if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
|
|
t_rcu_perf_writer_started = t;
|
|
t_rcu_perf_writer_started = t;
|