|
@@ -96,7 +96,7 @@ static DEFINE_SPINLOCK(uidhash_lock);
|
|
|
|
|
|
/* root_user.__count is 1, for init task cred */
|
|
/* root_user.__count is 1, for init task cred */
|
|
struct user_struct root_user = {
|
|
struct user_struct root_user = {
|
|
- .__count = ATOMIC_INIT(1),
|
|
|
|
|
|
+ .__count = REFCOUNT_INIT(1),
|
|
.processes = ATOMIC_INIT(1),
|
|
.processes = ATOMIC_INIT(1),
|
|
.sigpending = ATOMIC_INIT(0),
|
|
.sigpending = ATOMIC_INIT(0),
|
|
.locked_shm = 0,
|
|
.locked_shm = 0,
|
|
@@ -123,7 +123,7 @@ static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
|
|
|
|
|
|
hlist_for_each_entry(user, hashent, uidhash_node) {
|
|
hlist_for_each_entry(user, hashent, uidhash_node) {
|
|
if (uid_eq(user->uid, uid)) {
|
|
if (uid_eq(user->uid, uid)) {
|
|
- atomic_inc(&user->__count);
|
|
|
|
|
|
+ refcount_inc(&user->__count);
|
|
return user;
|
|
return user;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -170,7 +170,7 @@ void free_uid(struct user_struct *up)
|
|
return;
|
|
return;
|
|
|
|
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
- if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
|
|
|
|
|
|
+ if (refcount_dec_and_lock(&up->__count, &uidhash_lock))
|
|
free_user(up, flags);
|
|
free_user(up, flags);
|
|
else
|
|
else
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
@@ -191,7 +191,7 @@ struct user_struct *alloc_uid(kuid_t uid)
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
|
|
|
|
new->uid = uid;
|
|
new->uid = uid;
|
|
- atomic_set(&new->__count, 1);
|
|
|
|
|
|
+ refcount_set(&new->__count, 1);
|
|
ratelimit_state_init(&new->ratelimit, HZ, 100);
|
|
ratelimit_state_init(&new->ratelimit, HZ, 100);
|
|
ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
|
|
ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
|
|
|
|
|