|
@@ -2142,12 +2142,14 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
|
|
*
|
|
*
|
|
* Remember policies even when nobody has shared memory mapped.
|
|
* Remember policies even when nobody has shared memory mapped.
|
|
* The policies are kept in Red-Black tree linked from the inode.
|
|
* The policies are kept in Red-Black tree linked from the inode.
|
|
- * They are protected by the sp->lock spinlock, which should be held
|
|
|
|
|
|
+ * They are protected by the sp->lock rwlock, which should be held
|
|
* for any accesses to the tree.
|
|
* for any accesses to the tree.
|
|
*/
|
|
*/
|
|
|
|
|
|
-/* lookup first element intersecting start-end */
|
|
|
|
-/* Caller holds sp->lock */
|
|
|
|
|
|
+/*
|
|
|
|
+ * lookup first element intersecting start-end. Caller holds sp->lock for
|
|
|
|
+ * reading or for writing
|
|
|
|
+ */
|
|
static struct sp_node *
|
|
static struct sp_node *
|
|
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
|
|
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
|
|
{
|
|
{
|
|
@@ -2178,8 +2180,10 @@ sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
|
|
return rb_entry(n, struct sp_node, nd);
|
|
return rb_entry(n, struct sp_node, nd);
|
|
}
|
|
}
|
|
|
|
|
|
-/* Insert a new shared policy into the list. */
|
|
|
|
-/* Caller holds sp->lock */
|
|
|
|
|
|
+/*
|
|
|
|
+ * Insert a new shared policy into the list. Caller holds sp->lock for
|
|
|
|
+ * writing.
|
|
|
|
+ */
|
|
static void sp_insert(struct shared_policy *sp, struct sp_node *new)
|
|
static void sp_insert(struct shared_policy *sp, struct sp_node *new)
|
|
{
|
|
{
|
|
struct rb_node **p = &sp->root.rb_node;
|
|
struct rb_node **p = &sp->root.rb_node;
|
|
@@ -2211,13 +2215,13 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
|
|
|
|
|
|
if (!sp->root.rb_node)
|
|
if (!sp->root.rb_node)
|
|
return NULL;
|
|
return NULL;
|
|
- spin_lock(&sp->lock);
|
|
|
|
|
|
+ read_lock(&sp->lock);
|
|
sn = sp_lookup(sp, idx, idx+1);
|
|
sn = sp_lookup(sp, idx, idx+1);
|
|
if (sn) {
|
|
if (sn) {
|
|
mpol_get(sn->policy);
|
|
mpol_get(sn->policy);
|
|
pol = sn->policy;
|
|
pol = sn->policy;
|
|
}
|
|
}
|
|
- spin_unlock(&sp->lock);
|
|
|
|
|
|
+ read_unlock(&sp->lock);
|
|
return pol;
|
|
return pol;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2360,7 +2364,7 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
restart:
|
|
restart:
|
|
- spin_lock(&sp->lock);
|
|
|
|
|
|
+ write_lock(&sp->lock);
|
|
n = sp_lookup(sp, start, end);
|
|
n = sp_lookup(sp, start, end);
|
|
/* Take care of old policies in the same range. */
|
|
/* Take care of old policies in the same range. */
|
|
while (n && n->start < end) {
|
|
while (n && n->start < end) {
|
|
@@ -2393,7 +2397,7 @@ restart:
|
|
}
|
|
}
|
|
if (new)
|
|
if (new)
|
|
sp_insert(sp, new);
|
|
sp_insert(sp, new);
|
|
- spin_unlock(&sp->lock);
|
|
|
|
|
|
+ write_unlock(&sp->lock);
|
|
ret = 0;
|
|
ret = 0;
|
|
|
|
|
|
err_out:
|
|
err_out:
|
|
@@ -2405,7 +2409,7 @@ err_out:
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
alloc_new:
|
|
alloc_new:
|
|
- spin_unlock(&sp->lock);
|
|
|
|
|
|
+ write_unlock(&sp->lock);
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
|
|
n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
|
|
if (!n_new)
|
|
if (!n_new)
|
|
@@ -2431,7 +2435,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
sp->root = RB_ROOT; /* empty tree == default mempolicy */
|
|
sp->root = RB_ROOT; /* empty tree == default mempolicy */
|
|
- spin_lock_init(&sp->lock);
|
|
|
|
|
|
+ rwlock_init(&sp->lock);
|
|
|
|
|
|
if (mpol) {
|
|
if (mpol) {
|
|
struct vm_area_struct pvma;
|
|
struct vm_area_struct pvma;
|
|
@@ -2497,14 +2501,14 @@ void mpol_free_shared_policy(struct shared_policy *p)
|
|
|
|
|
|
if (!p->root.rb_node)
|
|
if (!p->root.rb_node)
|
|
return;
|
|
return;
|
|
- spin_lock(&p->lock);
|
|
|
|
|
|
+ write_lock(&p->lock);
|
|
next = rb_first(&p->root);
|
|
next = rb_first(&p->root);
|
|
while (next) {
|
|
while (next) {
|
|
n = rb_entry(next, struct sp_node, nd);
|
|
n = rb_entry(next, struct sp_node, nd);
|
|
next = rb_next(&n->nd);
|
|
next = rb_next(&n->nd);
|
|
sp_delete(p, n);
|
|
sp_delete(p, n);
|
|
}
|
|
}
|
|
- spin_unlock(&p->lock);
|
|
|
|
|
|
+ write_unlock(&p->lock);
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
#ifdef CONFIG_NUMA_BALANCING
|