|
@@ -28,7 +28,7 @@ enum {
|
|
|
};
|
|
|
|
|
|
struct call_function_data {
|
|
|
- struct call_single_data __percpu *csd;
|
|
|
+ call_single_data_t __percpu *csd;
|
|
|
cpumask_var_t cpumask;
|
|
|
cpumask_var_t cpumask_ipi;
|
|
|
};
|
|
@@ -51,7 +51,7 @@ int smpcfd_prepare_cpu(unsigned int cpu)
|
|
|
free_cpumask_var(cfd->cpumask);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
- cfd->csd = alloc_percpu(struct call_single_data);
|
|
|
+ cfd->csd = alloc_percpu(call_single_data_t);
|
|
|
if (!cfd->csd) {
|
|
|
free_cpumask_var(cfd->cpumask);
|
|
|
free_cpumask_var(cfd->cpumask_ipi);
|
|
@@ -103,12 +103,12 @@ void __init call_function_init(void)
|
|
|
* previous function call. For multi-cpu calls its even more interesting
|
|
|
* as we'll have to ensure no other cpu is observing our csd.
|
|
|
*/
|
|
|
-static __always_inline void csd_lock_wait(struct call_single_data *csd)
|
|
|
+static __always_inline void csd_lock_wait(call_single_data_t *csd)
|
|
|
{
|
|
|
smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
|
|
|
}
|
|
|
|
|
|
-static __always_inline void csd_lock(struct call_single_data *csd)
|
|
|
+static __always_inline void csd_lock(call_single_data_t *csd)
|
|
|
{
|
|
|
csd_lock_wait(csd);
|
|
|
csd->flags |= CSD_FLAG_LOCK;
|
|
@@ -116,12 +116,12 @@ static __always_inline void csd_lock(struct call_single_data *csd)
|
|
|
/*
|
|
|
* prevent CPU from reordering the above assignment
|
|
|
* to ->flags with any subsequent assignments to other
|
|
|
- * fields of the specified call_single_data structure:
|
|
|
+ * fields of the specified call_single_data_t structure:
|
|
|
*/
|
|
|
smp_wmb();
|
|
|
}
|
|
|
|
|
|
-static __always_inline void csd_unlock(struct call_single_data *csd)
|
|
|
+static __always_inline void csd_unlock(call_single_data_t *csd)
|
|
|
{
|
|
|
WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
|
|
|
|
|
@@ -131,14 +131,14 @@ static __always_inline void csd_unlock(struct call_single_data *csd)
|
|
|
smp_store_release(&csd->flags, 0);
|
|
|
}
|
|
|
|
|
|
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
|
|
|
+static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
|
|
|
|
|
|
/*
|
|
|
- * Insert a previously allocated call_single_data element
|
|
|
+ * Insert a previously allocated call_single_data_t element
|
|
|
* for execution on the given CPU. data must already have
|
|
|
* ->func, ->info, and ->flags set.
|
|
|
*/
|
|
|
-static int generic_exec_single(int cpu, struct call_single_data *csd,
|
|
|
+static int generic_exec_single(int cpu, call_single_data_t *csd,
|
|
|
smp_call_func_t func, void *info)
|
|
|
{
|
|
|
if (cpu == smp_processor_id()) {
|
|
@@ -210,7 +210,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
|
|
|
{
|
|
|
struct llist_head *head;
|
|
|
struct llist_node *entry;
|
|
|
- struct call_single_data *csd, *csd_next;
|
|
|
+ call_single_data_t *csd, *csd_next;
|
|
|
static bool warned;
|
|
|
|
|
|
WARN_ON(!irqs_disabled());
|
|
@@ -268,8 +268,10 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
|
|
|
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
|
|
|
int wait)
|
|
|
{
|
|
|
- struct call_single_data *csd;
|
|
|
- struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
|
|
|
+ call_single_data_t *csd;
|
|
|
+ call_single_data_t csd_stack = {
|
|
|
+ .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
|
|
|
+ };
|
|
|
int this_cpu;
|
|
|
int err;
|
|
|
|
|
@@ -321,7 +323,7 @@ EXPORT_SYMBOL(smp_call_function_single);
|
|
|
* NOTE: Be careful, there is unfortunately no current debugging facility to
|
|
|
* validate the correctness of this serialization.
|
|
|
*/
|
|
|
-int smp_call_function_single_async(int cpu, struct call_single_data *csd)
|
|
|
+int smp_call_function_single_async(int cpu, call_single_data_t *csd)
|
|
|
{
|
|
|
int err = 0;
|
|
|
|
|
@@ -444,7 +446,7 @@ void smp_call_function_many(const struct cpumask *mask,
|
|
|
|
|
|
cpumask_clear(cfd->cpumask_ipi);
|
|
|
for_each_cpu(cpu, cfd->cpumask) {
|
|
|
- struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
|
|
|
+ call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
|
|
|
|
|
|
csd_lock(csd);
|
|
|
if (wait)
|
|
@@ -460,7 +462,7 @@ void smp_call_function_many(const struct cpumask *mask,
|
|
|
|
|
|
if (wait) {
|
|
|
for_each_cpu(cpu, cfd->cpumask) {
|
|
|
- struct call_single_data *csd;
|
|
|
+ call_single_data_t *csd;
|
|
|
|
|
|
csd = per_cpu_ptr(cfd->csd, cpu);
|
|
|
csd_lock_wait(csd);
|