|
|
@@ -85,32 +85,10 @@ enum mem_cgroup_events_target {
|
|
|
MEM_CGROUP_NTARGETS,
|
|
|
};
|
|
|
|
|
|
-/*
|
|
|
- * Bits in struct cg_proto.flags
|
|
|
- */
|
|
|
-enum cg_proto_flags {
|
|
|
- /* Currently active and new sockets should be assigned to cgroups */
|
|
|
- MEMCG_SOCK_ACTIVE,
|
|
|
- /* It was ever activated; we must disarm static keys on destruction */
|
|
|
- MEMCG_SOCK_ACTIVATED,
|
|
|
-};
|
|
|
-
|
|
|
struct cg_proto {
|
|
|
struct page_counter memory_allocated; /* Current allocated memory. */
|
|
|
- struct percpu_counter sockets_allocated; /* Current number of sockets. */
|
|
|
int memory_pressure;
|
|
|
- long sysctl_mem[3];
|
|
|
- unsigned long flags;
|
|
|
- /*
|
|
|
- * memcg field is used to find which memcg we belong directly
|
|
|
- * Each memcg struct can hold more than one cg_proto, so container_of
|
|
|
- * won't really cut.
|
|
|
- *
|
|
|
- * The elegant solution would be having an inverse function to
|
|
|
- * proto_cgroup in struct proto, but that means polluting the structure
|
|
|
- * for everybody, instead of just for memcg users.
|
|
|
- */
|
|
|
- struct mem_cgroup *memcg;
|
|
|
+ bool active;
|
|
|
};
|
|
|
|
|
|
#ifdef CONFIG_MEMCG
|
|
|
@@ -192,6 +170,9 @@ struct mem_cgroup {
|
|
|
unsigned long low;
|
|
|
unsigned long high;
|
|
|
|
|
|
+ /* Range enforcement for interrupt charges */
|
|
|
+ struct work_struct high_work;
|
|
|
+
|
|
|
unsigned long soft_limit;
|
|
|
|
|
|
/* vmpressure notifications */
|
|
|
@@ -268,6 +249,10 @@ struct mem_cgroup {
|
|
|
struct wb_domain cgwb_domain;
|
|
|
#endif
|
|
|
|
|
|
+#ifdef CONFIG_INET
|
|
|
+ unsigned long socket_pressure;
|
|
|
+#endif
|
|
|
+
|
|
|
/* List of events which userspace want to receive */
|
|
|
struct list_head event_list;
|
|
|
spinlock_t event_list_lock;
|
|
|
@@ -275,7 +260,8 @@ struct mem_cgroup {
|
|
|
struct mem_cgroup_per_node *nodeinfo[0];
|
|
|
/* WARNING: nodeinfo must be the last member here */
|
|
|
};
|
|
|
-extern struct cgroup_subsys_state *mem_cgroup_root_css;
|
|
|
+
|
|
|
+extern struct mem_cgroup *root_mem_cgroup;
|
|
|
|
|
|
/**
|
|
|
* mem_cgroup_events - count memory events against a cgroup
|
|
|
@@ -308,18 +294,34 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
|
|
|
|
|
|
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
|
|
|
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
|
|
|
-struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
|
|
|
|
|
|
static inline
|
|
|
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
|
|
|
return css ? container_of(css, struct mem_cgroup, css) : NULL;
|
|
|
}
|
|
|
|
|
|
+#define mem_cgroup_from_counter(counter, member) \
|
|
|
+ container_of(counter, struct mem_cgroup, member)
|
|
|
+
|
|
|
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
|
|
|
struct mem_cgroup *,
|
|
|
struct mem_cgroup_reclaim_cookie *);
|
|
|
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
|
|
|
|
|
|
+/**
|
|
|
+ * parent_mem_cgroup - find the accounting parent of a memcg
|
|
|
+ * @memcg: memcg whose parent to find
|
|
|
+ *
|
|
|
+ * Returns the parent memcg, or NULL if this is the root or the memory
|
|
|
+ * controller is in legacy no-hierarchy mode.
|
|
|
+ */
|
|
|
+static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
|
|
|
+{
|
|
|
+ if (!memcg->memory.parent)
|
|
|
+ return NULL;
|
|
|
+ return mem_cgroup_from_counter(memcg->memory.parent, memory);
|
|
|
+}
|
|
|
+
|
|
|
static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
|
|
|
struct mem_cgroup *root)
|
|
|
{
|
|
|
@@ -671,12 +673,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
|
|
|
}
|
|
|
#endif /* CONFIG_MEMCG */
|
|
|
|
|
|
-enum {
|
|
|
- UNDER_LIMIT,
|
|
|
- SOFT_LIMIT,
|
|
|
- OVER_LIMIT,
|
|
|
-};
|
|
|
-
|
|
|
#ifdef CONFIG_CGROUP_WRITEBACK
|
|
|
|
|
|
struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
|
|
|
@@ -703,20 +699,35 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
|
|
|
#endif /* CONFIG_CGROUP_WRITEBACK */
|
|
|
|
|
|
struct sock;
|
|
|
-#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
|
|
|
void sock_update_memcg(struct sock *sk);
|
|
|
void sock_release_memcg(struct sock *sk);
|
|
|
-#else
|
|
|
-static inline void sock_update_memcg(struct sock *sk)
|
|
|
+bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
|
|
|
+void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
|
|
|
+#if defined(CONFIG_MEMCG) && defined(CONFIG_INET)
|
|
|
+extern struct static_key_false memcg_sockets_enabled_key;
|
|
|
+#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
|
|
|
+static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
|
|
|
{
|
|
|
+#ifdef CONFIG_MEMCG_KMEM
|
|
|
+ if (memcg->tcp_mem.memory_pressure)
|
|
|
+ return true;
|
|
|
+#endif
|
|
|
+ do {
|
|
|
+ if (time_before(jiffies, memcg->socket_pressure))
|
|
|
+ return true;
|
|
|
+ } while ((memcg = parent_mem_cgroup(memcg)));
|
|
|
+ return false;
|
|
|
}
|
|
|
-static inline void sock_release_memcg(struct sock *sk)
|
|
|
+#else
|
|
|
+#define mem_cgroup_sockets_enabled 0
|
|
|
+static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
|
|
|
{
|
|
|
+ return false;
|
|
|
}
|
|
|
-#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
|
|
|
+#endif
|
|
|
|
|
|
#ifdef CONFIG_MEMCG_KMEM
|
|
|
-extern struct static_key memcg_kmem_enabled_key;
|
|
|
+extern struct static_key_false memcg_kmem_enabled_key;
|
|
|
|
|
|
extern int memcg_nr_cache_ids;
|
|
|
void memcg_get_cache_ids(void);
|
|
|
@@ -732,7 +743,7 @@ void memcg_put_cache_ids(void);
|
|
|
|
|
|
static inline bool memcg_kmem_enabled(void)
|
|
|
{
|
|
|
- return static_key_false(&memcg_kmem_enabled_key);
|
|
|
+ return static_branch_unlikely(&memcg_kmem_enabled_key);
|
|
|
}
|
|
|
|
|
|
static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
|
|
|
@@ -766,15 +777,13 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
|
|
|
return memcg ? memcg->kmemcg_id : -1;
|
|
|
}
|
|
|
|
|
|
-struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
|
|
|
+struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
|
|
|
void __memcg_kmem_put_cache(struct kmem_cache *cachep);
|
|
|
|
|
|
-static inline bool __memcg_kmem_bypass(gfp_t gfp)
|
|
|
+static inline bool __memcg_kmem_bypass(void)
|
|
|
{
|
|
|
if (!memcg_kmem_enabled())
|
|
|
return true;
|
|
|
- if (gfp & __GFP_NOACCOUNT)
|
|
|
- return true;
|
|
|
if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
|
|
|
return true;
|
|
|
return false;
|
|
|
@@ -791,7 +800,9 @@ static inline bool __memcg_kmem_bypass(gfp_t gfp)
|
|
|
static __always_inline int memcg_kmem_charge(struct page *page,
|
|
|
gfp_t gfp, int order)
|
|
|
{
|
|
|
- if (__memcg_kmem_bypass(gfp))
|
|
|
+ if (__memcg_kmem_bypass())
|
|
|
+ return 0;
|
|
|
+ if (!(gfp & __GFP_ACCOUNT))
|
|
|
return 0;
|
|
|
return __memcg_kmem_charge(page, gfp, order);
|
|
|
}
|
|
|
@@ -810,16 +821,15 @@ static __always_inline void memcg_kmem_uncharge(struct page *page, int order)
|
|
|
/**
|
|
|
* memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
|
|
|
* @cachep: the original global kmem cache
|
|
|
- * @gfp: allocation flags.
|
|
|
*
|
|
|
* All memory allocated from a per-memcg cache is charged to the owner memcg.
|
|
|
*/
|
|
|
static __always_inline struct kmem_cache *
|
|
|
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
|
|
|
{
|
|
|
- if (__memcg_kmem_bypass(gfp))
|
|
|
+ if (__memcg_kmem_bypass())
|
|
|
return cachep;
|
|
|
- return __memcg_kmem_get_cache(cachep);
|
|
|
+ return __memcg_kmem_get_cache(cachep, gfp);
|
|
|
}
|
|
|
|
|
|
static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
|