|
@@ -536,7 +536,6 @@ void synchronize_rcu(void)
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
|
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
|
|
static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
|
|
-static unsigned long sync_rcu_preempt_exp_count;
|
|
|
|
static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
|
|
static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -704,12 +703,10 @@ void synchronize_rcu_expedited(void)
|
|
{
|
|
{
|
|
struct rcu_node *rnp;
|
|
struct rcu_node *rnp;
|
|
struct rcu_state *rsp = rcu_state_p;
|
|
struct rcu_state *rsp = rcu_state_p;
|
|
- unsigned long snap;
|
|
|
|
|
|
+ unsigned long s;
|
|
int trycount = 0;
|
|
int trycount = 0;
|
|
|
|
|
|
- smp_mb(); /* Caller's modifications seen first by other CPUs. */
|
|
|
|
- snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1;
|
|
|
|
- smp_mb(); /* Above access cannot bleed into critical section. */
|
|
|
|
|
|
+ s = rcu_exp_gp_seq_snap(rsp);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Acquire lock, falling back to synchronize_rcu() if too many
|
|
* Acquire lock, falling back to synchronize_rcu() if too many
|
|
@@ -717,8 +714,7 @@ void synchronize_rcu_expedited(void)
|
|
* expedited grace period for us, just leave.
|
|
* expedited grace period for us, just leave.
|
|
*/
|
|
*/
|
|
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
|
|
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
|
|
- if (ULONG_CMP_LT(snap,
|
|
|
|
- READ_ONCE(sync_rcu_preempt_exp_count)))
|
|
|
|
|
|
+ if (rcu_exp_gp_seq_done(rsp, s))
|
|
goto mb_ret; /* Others did our work for us. */
|
|
goto mb_ret; /* Others did our work for us. */
|
|
if (trycount++ < 10) {
|
|
if (trycount++ < 10) {
|
|
udelay(trycount * num_online_cpus());
|
|
udelay(trycount * num_online_cpus());
|
|
@@ -727,8 +723,9 @@ void synchronize_rcu_expedited(void)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count)))
|
|
|
|
|
|
+ if (rcu_exp_gp_seq_done(rsp, s))
|
|
goto unlock_mb_ret; /* Others did our work for us. */
|
|
goto unlock_mb_ret; /* Others did our work for us. */
|
|
|
|
+ rcu_exp_gp_seq_start(rsp);
|
|
|
|
|
|
/* force all RCU readers onto ->blkd_tasks lists. */
|
|
/* force all RCU readers onto ->blkd_tasks lists. */
|
|
synchronize_sched_expedited();
|
|
synchronize_sched_expedited();
|
|
@@ -750,8 +747,7 @@ void synchronize_rcu_expedited(void)
|
|
sync_rcu_preempt_exp_done(rnp));
|
|
sync_rcu_preempt_exp_done(rnp));
|
|
|
|
|
|
/* Clean up and exit. */
|
|
/* Clean up and exit. */
|
|
- smp_mb(); /* ensure expedited GP seen before counter increment. */
|
|
|
|
- WRITE_ONCE(sync_rcu_preempt_exp_count, sync_rcu_preempt_exp_count + 1);
|
|
|
|
|
|
+ rcu_exp_gp_seq_end(rsp);
|
|
unlock_mb_ret:
|
|
unlock_mb_ret:
|
|
mutex_unlock(&sync_rcu_preempt_exp_mutex);
|
|
mutex_unlock(&sync_rcu_preempt_exp_mutex);
|
|
mb_ret:
|
|
mb_ret:
|