|
@@ -166,6 +166,15 @@ enum memcg_kmem_state {
|
|
|
KMEM_ONLINE,
|
|
|
};
|
|
|
|
|
|
+#if defined(CONFIG_SMP)
|
|
|
+struct memcg_padding {
|
|
|
+ char x[0];
|
|
|
+} ____cacheline_internodealigned_in_smp;
|
|
|
+#define MEMCG_PADDING(name) struct memcg_padding name;
|
|
|
+#else
|
|
|
+#define MEMCG_PADDING(name)
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* The memory controller data structure. The memory controller controls both
|
|
|
* page cache and RSS per cgroup. We would eventually like to provide
|
|
@@ -212,7 +221,6 @@ struct mem_cgroup {
|
|
|
int oom_kill_disable;
|
|
|
|
|
|
/* memory.events */
|
|
|
- atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
|
|
|
struct cgroup_file events_file;
|
|
|
|
|
|
/* handle for "memory.swap.events" */
|
|
@@ -235,19 +243,26 @@ struct mem_cgroup {
|
|
|
* mem_cgroup ? And what type of charges should we move ?
|
|
|
*/
|
|
|
unsigned long move_charge_at_immigrate;
|
|
|
+ /* taken only while moving_account > 0 */
|
|
|
+ spinlock_t move_lock;
|
|
|
+ unsigned long move_lock_flags;
|
|
|
+
|
|
|
+ MEMCG_PADDING(_pad1_);
|
|
|
+
|
|
|
/*
|
|
|
* set > 0 if pages under this cgroup are moving to other cgroup.
|
|
|
*/
|
|
|
atomic_t moving_account;
|
|
|
- /* taken only while moving_account > 0 */
|
|
|
- spinlock_t move_lock;
|
|
|
struct task_struct *move_lock_task;
|
|
|
- unsigned long move_lock_flags;
|
|
|
|
|
|
/* memory.stat */
|
|
|
struct mem_cgroup_stat_cpu __percpu *stat_cpu;
|
|
|
+
|
|
|
+ MEMCG_PADDING(_pad2_);
|
|
|
+
|
|
|
atomic_long_t stat[MEMCG_NR_STAT];
|
|
|
atomic_long_t events[NR_VM_EVENT_ITEMS];
|
|
|
+ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
|
|
|
|
|
|
unsigned long socket_pressure;
|
|
|
|