|
@@ -631,15 +631,11 @@ EXPORT_TRACEPOINT_SYMBOL(module_get);
|
|
/* Init the unload section of the module. */
|
|
/* Init the unload section of the module. */
|
|
static int module_unload_init(struct module *mod)
|
|
static int module_unload_init(struct module *mod)
|
|
{
|
|
{
|
|
- mod->refptr = alloc_percpu(struct module_ref);
|
|
|
|
- if (!mod->refptr)
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
INIT_LIST_HEAD(&mod->source_list);
|
|
INIT_LIST_HEAD(&mod->source_list);
|
|
INIT_LIST_HEAD(&mod->target_list);
|
|
INIT_LIST_HEAD(&mod->target_list);
|
|
|
|
|
|
/* Hold reference count during initialization. */
|
|
/* Hold reference count during initialization. */
|
|
- raw_cpu_write(mod->refptr->incs, 1);
|
|
|
|
|
|
+ atomic_set(&mod->refcnt, 1);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -721,8 +717,6 @@ static void module_unload_free(struct module *mod)
|
|
kfree(use);
|
|
kfree(use);
|
|
}
|
|
}
|
|
mutex_unlock(&module_mutex);
|
|
mutex_unlock(&module_mutex);
|
|
-
|
|
|
|
- free_percpu(mod->refptr);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_MODULE_FORCE_UNLOAD
|
|
#ifdef CONFIG_MODULE_FORCE_UNLOAD
|
|
@@ -772,28 +766,7 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
|
|
|
|
|
|
unsigned long module_refcount(struct module *mod)
|
|
unsigned long module_refcount(struct module *mod)
|
|
{
|
|
{
|
|
- unsigned long incs = 0, decs = 0;
|
|
|
|
- int cpu;
|
|
|
|
-
|
|
|
|
- for_each_possible_cpu(cpu)
|
|
|
|
- decs += per_cpu_ptr(mod->refptr, cpu)->decs;
|
|
|
|
- /*
|
|
|
|
- * ensure the incs are added up after the decs.
|
|
|
|
- * module_put ensures incs are visible before decs with smp_wmb.
|
|
|
|
- *
|
|
|
|
- * This 2-count scheme avoids the situation where the refcount
|
|
|
|
- * for CPU0 is read, then CPU0 increments the module refcount,
|
|
|
|
- * then CPU1 drops that refcount, then the refcount for CPU1 is
|
|
|
|
- * read. We would record a decrement but not its corresponding
|
|
|
|
- * increment so we would see a low count (disaster).
|
|
|
|
- *
|
|
|
|
- * Rare situation? But module_refcount can be preempted, and we
|
|
|
|
- * might be tallying up 4096+ CPUs. So it is not impossible.
|
|
|
|
- */
|
|
|
|
- smp_rmb();
|
|
|
|
- for_each_possible_cpu(cpu)
|
|
|
|
- incs += per_cpu_ptr(mod->refptr, cpu)->incs;
|
|
|
|
- return incs - decs;
|
|
|
|
|
|
+ return (unsigned long)atomic_read(&mod->refcnt);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(module_refcount);
|
|
EXPORT_SYMBOL(module_refcount);
|
|
|
|
|
|
@@ -935,7 +908,7 @@ void __module_get(struct module *module)
|
|
{
|
|
{
|
|
if (module) {
|
|
if (module) {
|
|
preempt_disable();
|
|
preempt_disable();
|
|
- __this_cpu_inc(module->refptr->incs);
|
|
|
|
|
|
+ atomic_inc(&module->refcnt);
|
|
trace_module_get(module, _RET_IP_);
|
|
trace_module_get(module, _RET_IP_);
|
|
preempt_enable();
|
|
preempt_enable();
|
|
}
|
|
}
|
|
@@ -950,7 +923,7 @@ bool try_module_get(struct module *module)
|
|
preempt_disable();
|
|
preempt_disable();
|
|
|
|
|
|
if (likely(module_is_live(module))) {
|
|
if (likely(module_is_live(module))) {
|
|
- __this_cpu_inc(module->refptr->incs);
|
|
|
|
|
|
+ atomic_inc(&module->refcnt);
|
|
trace_module_get(module, _RET_IP_);
|
|
trace_module_get(module, _RET_IP_);
|
|
} else
|
|
} else
|
|
ret = false;
|
|
ret = false;
|
|
@@ -965,9 +938,7 @@ void module_put(struct module *module)
|
|
{
|
|
{
|
|
if (module) {
|
|
if (module) {
|
|
preempt_disable();
|
|
preempt_disable();
|
|
- smp_wmb(); /* see comment in module_refcount */
|
|
|
|
- __this_cpu_inc(module->refptr->decs);
|
|
|
|
-
|
|
|
|
|
|
+ atomic_dec(&module->refcnt);
|
|
trace_module_put(module, _RET_IP_);
|
|
trace_module_put(module, _RET_IP_);
|
|
preempt_enable();
|
|
preempt_enable();
|
|
}
|
|
}
|