|
@@ -848,6 +848,46 @@ static inline gfp_t gfp_exact_node(gfp_t flags)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
|
|
|
+{
|
|
|
+ struct kmem_cache_node *n;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Set up the kmem_cache_node for cpu before we can
|
|
|
+ * begin anything. Make sure some other cpu on this
|
|
|
+ * node has not already allocated this
|
|
|
+ */
|
|
|
+ n = get_node(cachep, node);
|
|
|
+ if (n) {
|
|
|
+ spin_lock_irq(&n->list_lock);
|
|
|
+ n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
|
|
|
+ cachep->num;
|
|
|
+ spin_unlock_irq(&n->list_lock);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
|
|
|
+ if (!n)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ kmem_cache_node_init(n);
|
|
|
+ n->next_reap = jiffies + REAPTIMEOUT_NODE +
|
|
|
+ ((unsigned long)cachep) % REAPTIMEOUT_NODE;
|
|
|
+
|
|
|
+ n->free_limit =
|
|
|
+ (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The kmem_cache_nodes don't come and go as CPUs
|
|
|
+ * come and go. slab_mutex is sufficient
|
|
|
+ * protection here.
|
|
|
+ */
|
|
|
+ cachep->node[node] = n;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Allocates and initializes node for a node on each slab cache, used for
|
|
|
* either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
|
|
@@ -859,39 +899,15 @@ static inline gfp_t gfp_exact_node(gfp_t flags)
|
|
|
*/
|
|
|
static int init_cache_node_node(int node)
|
|
|
{
|
|
|
+ int ret;
|
|
|
struct kmem_cache *cachep;
|
|
|
- struct kmem_cache_node *n;
|
|
|
- const size_t memsize = sizeof(struct kmem_cache_node);
|
|
|
|
|
|
list_for_each_entry(cachep, &slab_caches, list) {
|
|
|
- /*
|
|
|
- * Set up the kmem_cache_node for cpu before we can
|
|
|
- * begin anything. Make sure some other cpu on this
|
|
|
- * node has not already allocated this
|
|
|
- */
|
|
|
- n = get_node(cachep, node);
|
|
|
- if (!n) {
|
|
|
- n = kmalloc_node(memsize, GFP_KERNEL, node);
|
|
|
- if (!n)
|
|
|
- return -ENOMEM;
|
|
|
- kmem_cache_node_init(n);
|
|
|
- n->next_reap = jiffies + REAPTIMEOUT_NODE +
|
|
|
- ((unsigned long)cachep) % REAPTIMEOUT_NODE;
|
|
|
-
|
|
|
- /*
|
|
|
- * The kmem_cache_nodes don't come and go as CPUs
|
|
|
- * come and go. slab_mutex is sufficient
|
|
|
- * protection here.
|
|
|
- */
|
|
|
- cachep->node[node] = n;
|
|
|
- }
|
|
|
-
|
|
|
- spin_lock_irq(&n->list_lock);
|
|
|
- n->free_limit =
|
|
|
- (1 + nr_cpus_node(node)) *
|
|
|
- cachep->batchcount + cachep->num;
|
|
|
- spin_unlock_irq(&n->list_lock);
|
|
|
+ ret = init_cache_node(cachep, node, GFP_KERNEL);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
}
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|