|
@@ -87,32 +87,14 @@ unsigned long task_statm(struct mm_struct *mm,
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
/*
|
|
|
- * These functions are for numa_maps but called in generic **maps seq_file
|
|
|
- * ->start(), ->stop() ops.
|
|
|
- *
|
|
|
- * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
|
|
|
- * Each mempolicy object is controlled by reference counting. The problem here
|
|
|
- * is how to avoid accessing dead mempolicy object.
|
|
|
- *
|
|
|
- * Because we're holding mmap_sem while reading seq_file, it's safe to access
|
|
|
- * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
|
|
|
- *
|
|
|
- * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
|
|
|
- * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
|
|
|
- * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
|
|
|
- * gurantee the task never exits under us. But taking task_lock() around
|
|
|
- * get_vma_plicy() causes lock order problem.
|
|
|
- *
|
|
|
- * To access task->mempolicy without lock, we hold a reference count of an
|
|
|
- * object pointed by task->mempolicy and remember it. This will guarantee
|
|
|
- * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
|
|
|
+ * Save get_task_policy() for show_numa_map().
|
|
|
*/
|
|
|
static void hold_task_mempolicy(struct proc_maps_private *priv)
|
|
|
{
|
|
|
struct task_struct *task = priv->task;
|
|
|
|
|
|
task_lock(task);
|
|
|
- priv->task_mempolicy = task->mempolicy;
|
|
|
+ priv->task_mempolicy = get_task_policy(task);
|
|
|
mpol_get(priv->task_mempolicy);
|
|
|
task_unlock(task);
|
|
|
}
|
|
@@ -129,124 +111,154 @@ static void release_task_mempolicy(struct proc_maps_private *priv)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
|
|
|
+static void vma_stop(struct proc_maps_private *priv)
|
|
|
{
|
|
|
- if (vma && vma != priv->tail_vma) {
|
|
|
- struct mm_struct *mm = vma->vm_mm;
|
|
|
- release_task_mempolicy(priv);
|
|
|
- up_read(&mm->mmap_sem);
|
|
|
- mmput(mm);
|
|
|
- }
|
|
|
+ struct mm_struct *mm = priv->mm;
|
|
|
+
|
|
|
+ release_task_mempolicy(priv);
|
|
|
+ up_read(&mm->mmap_sem);
|
|
|
+ mmput(mm);
|
|
|
+}
|
|
|
+
|
|
|
+static struct vm_area_struct *
|
|
|
+m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ if (vma == priv->tail_vma)
|
|
|
+ return NULL;
|
|
|
+ return vma->vm_next ?: priv->tail_vma;
|
|
|
+}
|
|
|
+
|
|
|
+static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ if (m->count < m->size) /* vma is copied successfully */
|
|
|
+ m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL;
|
|
|
}
|
|
|
|
|
|
-static void *m_start(struct seq_file *m, loff_t *pos)
|
|
|
+static void *m_start(struct seq_file *m, loff_t *ppos)
|
|
|
{
|
|
|
struct proc_maps_private *priv = m->private;
|
|
|
unsigned long last_addr = m->version;
|
|
|
struct mm_struct *mm;
|
|
|
- struct vm_area_struct *vma, *tail_vma = NULL;
|
|
|
- loff_t l = *pos;
|
|
|
-
|
|
|
- /* Clear the per syscall fields in priv */
|
|
|
- priv->task = NULL;
|
|
|
- priv->tail_vma = NULL;
|
|
|
-
|
|
|
- /*
|
|
|
- * We remember last_addr rather than next_addr to hit with
|
|
|
- * vmacache most of the time. We have zero last_addr at
|
|
|
- * the beginning and also after lseek. We will have -1 last_addr
|
|
|
- * after the end of the vmas.
|
|
|
- */
|
|
|
+ struct vm_area_struct *vma;
|
|
|
+ unsigned int pos = *ppos;
|
|
|
|
|
|
+ /* See m_cache_vma(). Zero at the start or after lseek. */
|
|
|
if (last_addr == -1UL)
|
|
|
return NULL;
|
|
|
|
|
|
- priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
|
|
|
+ priv->task = get_proc_task(priv->inode);
|
|
|
if (!priv->task)
|
|
|
return ERR_PTR(-ESRCH);
|
|
|
|
|
|
- mm = mm_access(priv->task, PTRACE_MODE_READ);
|
|
|
- if (!mm || IS_ERR(mm))
|
|
|
- return mm;
|
|
|
- down_read(&mm->mmap_sem);
|
|
|
+ mm = priv->mm;
|
|
|
+ if (!mm || !atomic_inc_not_zero(&mm->mm_users))
|
|
|
+ return NULL;
|
|
|
|
|
|
- tail_vma = get_gate_vma(priv->task->mm);
|
|
|
- priv->tail_vma = tail_vma;
|
|
|
+ down_read(&mm->mmap_sem);
|
|
|
hold_task_mempolicy(priv);
|
|
|
- /* Start with last addr hint */
|
|
|
- vma = find_vma(mm, last_addr);
|
|
|
- if (last_addr && vma) {
|
|
|
- vma = vma->vm_next;
|
|
|
- goto out;
|
|
|
+ priv->tail_vma = get_gate_vma(mm);
|
|
|
+
|
|
|
+ if (last_addr) {
|
|
|
+ vma = find_vma(mm, last_addr);
|
|
|
+ if (vma && (vma = m_next_vma(priv, vma)))
|
|
|
+ return vma;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Check the vma index is within the range and do
|
|
|
- * sequential scan until m_index.
|
|
|
- */
|
|
|
- vma = NULL;
|
|
|
- if ((unsigned long)l < mm->map_count) {
|
|
|
- vma = mm->mmap;
|
|
|
- while (l-- && vma)
|
|
|
+ m->version = 0;
|
|
|
+ if (pos < mm->map_count) {
|
|
|
+ for (vma = mm->mmap; pos; pos--) {
|
|
|
+ m->version = vma->vm_start;
|
|
|
vma = vma->vm_next;
|
|
|
- goto out;
|
|
|
+ }
|
|
|
+ return vma;
|
|
|
}
|
|
|
|
|
|
- if (l != mm->map_count)
|
|
|
- tail_vma = NULL; /* After gate vma */
|
|
|
-
|
|
|
-out:
|
|
|
- if (vma)
|
|
|
- return vma;
|
|
|
+ /* we do not bother to update m->version in this case */
|
|
|
+ if (pos == mm->map_count && priv->tail_vma)
|
|
|
+ return priv->tail_vma;
|
|
|
|
|
|
- release_task_mempolicy(priv);
|
|
|
- /* End of vmas has been reached */
|
|
|
- m->version = (tail_vma != NULL)? 0: -1UL;
|
|
|
- up_read(&mm->mmap_sem);
|
|
|
- mmput(mm);
|
|
|
- return tail_vma;
|
|
|
+ vma_stop(priv);
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
{
|
|
|
struct proc_maps_private *priv = m->private;
|
|
|
- struct vm_area_struct *vma = v;
|
|
|
- struct vm_area_struct *tail_vma = priv->tail_vma;
|
|
|
+ struct vm_area_struct *next;
|
|
|
|
|
|
(*pos)++;
|
|
|
- if (vma && (vma != tail_vma) && vma->vm_next)
|
|
|
- return vma->vm_next;
|
|
|
- vma_stop(priv, vma);
|
|
|
- return (vma != tail_vma)? tail_vma: NULL;
|
|
|
+ next = m_next_vma(priv, v);
|
|
|
+ if (!next)
|
|
|
+ vma_stop(priv);
|
|
|
+ return next;
|
|
|
}
|
|
|
|
|
|
static void m_stop(struct seq_file *m, void *v)
|
|
|
{
|
|
|
struct proc_maps_private *priv = m->private;
|
|
|
- struct vm_area_struct *vma = v;
|
|
|
|
|
|
- if (!IS_ERR(vma))
|
|
|
- vma_stop(priv, vma);
|
|
|
- if (priv->task)
|
|
|
+ if (!IS_ERR_OR_NULL(v))
|
|
|
+ vma_stop(priv);
|
|
|
+ if (priv->task) {
|
|
|
put_task_struct(priv->task);
|
|
|
+ priv->task = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int proc_maps_open(struct inode *inode, struct file *file,
|
|
|
+ const struct seq_operations *ops, int psize)
|
|
|
+{
|
|
|
+ struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
|
|
|
+
|
|
|
+ if (!priv)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ priv->inode = inode;
|
|
|
+ priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
|
|
|
+ if (IS_ERR(priv->mm)) {
|
|
|
+ int err = PTR_ERR(priv->mm);
|
|
|
+
|
|
|
+ seq_release_private(inode, file);
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int proc_map_release(struct inode *inode, struct file *file)
|
|
|
+{
|
|
|
+ struct seq_file *seq = file->private_data;
|
|
|
+ struct proc_maps_private *priv = seq->private;
|
|
|
+
|
|
|
+ if (priv->mm)
|
|
|
+ mmdrop(priv->mm);
|
|
|
+
|
|
|
+ return seq_release_private(inode, file);
|
|
|
}
|
|
|
|
|
|
static int do_maps_open(struct inode *inode, struct file *file,
|
|
|
const struct seq_operations *ops)
|
|
|
{
|
|
|
- struct proc_maps_private *priv;
|
|
|
- int ret = -ENOMEM;
|
|
|
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
|
- if (priv) {
|
|
|
- priv->pid = proc_pid(inode);
|
|
|
- ret = seq_open(file, ops);
|
|
|
- if (!ret) {
|
|
|
- struct seq_file *m = file->private_data;
|
|
|
- m->private = priv;
|
|
|
- } else {
|
|
|
- kfree(priv);
|
|
|
- }
|
|
|
+ return proc_maps_open(inode, file, ops,
|
|
|
+ sizeof(struct proc_maps_private));
|
|
|
+}
|
|
|
+
|
|
|
+static pid_t pid_of_stack(struct proc_maps_private *priv,
|
|
|
+ struct vm_area_struct *vma, bool is_pid)
|
|
|
+{
|
|
|
+ struct inode *inode = priv->inode;
|
|
|
+ struct task_struct *task;
|
|
|
+ pid_t ret = 0;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ task = pid_task(proc_pid(inode), PIDTYPE_PID);
|
|
|
+ if (task) {
|
|
|
+ task = task_of_stack(task, vma, is_pid);
|
|
|
+ if (task)
|
|
|
+ ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
|
|
|
}
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -256,7 +268,6 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
struct file *file = vma->vm_file;
|
|
|
struct proc_maps_private *priv = m->private;
|
|
|
- struct task_struct *task = priv->task;
|
|
|
vm_flags_t flags = vma->vm_flags;
|
|
|
unsigned long ino = 0;
|
|
|
unsigned long long pgoff = 0;
|
|
@@ -321,8 +332,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
|
|
|
goto done;
|
|
|
}
|
|
|
|
|
|
- tid = vm_is_stack(task, vma, is_pid);
|
|
|
-
|
|
|
+ tid = pid_of_stack(priv, vma, is_pid);
|
|
|
if (tid != 0) {
|
|
|
/*
|
|
|
* Thread stack in /proc/PID/task/TID/maps or
|
|
@@ -349,15 +359,8 @@ done:
|
|
|
|
|
|
static int show_map(struct seq_file *m, void *v, int is_pid)
|
|
|
{
|
|
|
- struct vm_area_struct *vma = v;
|
|
|
- struct proc_maps_private *priv = m->private;
|
|
|
- struct task_struct *task = priv->task;
|
|
|
-
|
|
|
- show_map_vma(m, vma, is_pid);
|
|
|
-
|
|
|
- if (m->count < m->size) /* vma is copied successfully */
|
|
|
- m->version = (vma != get_gate_vma(task->mm))
|
|
|
- ? vma->vm_start : 0;
|
|
|
+ show_map_vma(m, v, is_pid);
|
|
|
+ m_cache_vma(m, v);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -399,14 +402,14 @@ const struct file_operations proc_pid_maps_operations = {
|
|
|
.open = pid_maps_open,
|
|
|
.read = seq_read,
|
|
|
.llseek = seq_lseek,
|
|
|
- .release = seq_release_private,
|
|
|
+ .release = proc_map_release,
|
|
|
};
|
|
|
|
|
|
const struct file_operations proc_tid_maps_operations = {
|
|
|
.open = tid_maps_open,
|
|
|
.read = seq_read,
|
|
|
.llseek = seq_lseek,
|
|
|
- .release = seq_release_private,
|
|
|
+ .release = proc_map_release,
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -583,8 +586,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
|
|
|
|
|
|
static int show_smap(struct seq_file *m, void *v, int is_pid)
|
|
|
{
|
|
|
- struct proc_maps_private *priv = m->private;
|
|
|
- struct task_struct *task = priv->task;
|
|
|
struct vm_area_struct *vma = v;
|
|
|
struct mem_size_stats mss;
|
|
|
struct mm_walk smaps_walk = {
|
|
@@ -637,10 +638,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
|
|
|
mss.nonlinear >> 10);
|
|
|
|
|
|
show_smap_vma_flags(m, vma);
|
|
|
-
|
|
|
- if (m->count < m->size) /* vma is copied successfully */
|
|
|
- m->version = (vma != get_gate_vma(task->mm))
|
|
|
- ? vma->vm_start : 0;
|
|
|
+ m_cache_vma(m, vma);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -682,14 +680,14 @@ const struct file_operations proc_pid_smaps_operations = {
|
|
|
.open = pid_smaps_open,
|
|
|
.read = seq_read,
|
|
|
.llseek = seq_lseek,
|
|
|
- .release = seq_release_private,
|
|
|
+ .release = proc_map_release,
|
|
|
};
|
|
|
|
|
|
const struct file_operations proc_tid_smaps_operations = {
|
|
|
.open = tid_smaps_open,
|
|
|
.read = seq_read,
|
|
|
.llseek = seq_lseek,
|
|
|
- .release = seq_release_private,
|
|
|
+ .release = proc_map_release,
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -1029,7 +1027,6 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
|
spinlock_t *ptl;
|
|
|
pte_t *pte;
|
|
|
int err = 0;
|
|
|
- pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
|
|
|
|
|
|
/* find the first VMA at or above 'addr' */
|
|
|
vma = find_vma(walk->mm, addr);
|
|
@@ -1043,6 +1040,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
|
|
|
|
for (; addr != end; addr += PAGE_SIZE) {
|
|
|
unsigned long offset;
|
|
|
+ pagemap_entry_t pme;
|
|
|
|
|
|
offset = (addr & ~PAGEMAP_WALK_MASK) >>
|
|
|
PAGE_SHIFT;
|
|
@@ -1057,32 +1055,51 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
|
|
|
|
if (pmd_trans_unstable(pmd))
|
|
|
return 0;
|
|
|
- for (; addr != end; addr += PAGE_SIZE) {
|
|
|
- int flags2;
|
|
|
-
|
|
|
- /* check to see if we've left 'vma' behind
|
|
|
- * and need a new, higher one */
|
|
|
- if (vma && (addr >= vma->vm_end)) {
|
|
|
- vma = find_vma(walk->mm, addr);
|
|
|
- if (vma && (vma->vm_flags & VM_SOFTDIRTY))
|
|
|
- flags2 = __PM_SOFT_DIRTY;
|
|
|
- else
|
|
|
- flags2 = 0;
|
|
|
- pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
|
|
|
+
|
|
|
+ while (1) {
|
|
|
+ /* End of address space hole, which we mark as non-present. */
|
|
|
+ unsigned long hole_end;
|
|
|
+
|
|
|
+ if (vma)
|
|
|
+ hole_end = min(end, vma->vm_start);
|
|
|
+ else
|
|
|
+ hole_end = end;
|
|
|
+
|
|
|
+ for (; addr < hole_end; addr += PAGE_SIZE) {
|
|
|
+ pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
|
|
|
+
|
|
|
+ err = add_to_pagemap(addr, &pme, pm);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
- /* check that 'vma' actually covers this address,
|
|
|
- * and that it isn't a huge page vma */
|
|
|
- if (vma && (vma->vm_start <= addr) &&
|
|
|
- !is_vm_hugetlb_page(vma)) {
|
|
|
+ if (!vma || vma->vm_start >= end)
|
|
|
+ break;
|
|
|
+ /*
|
|
|
+ * We can't possibly be in a hugetlb VMA. In general,
|
|
|
+ * for a mm_walk with a pmd_entry and a hugetlb_entry,
|
|
|
+ * the pmd_entry can only be called on addresses in a
|
|
|
+ * hugetlb if the walk starts in a non-hugetlb VMA and
|
|
|
+ * spans a hugepage VMA. Since pagemap_read walks are
|
|
|
+ * PMD-sized and PMD-aligned, this will never be true.
|
|
|
+ */
|
|
|
+ BUG_ON(is_vm_hugetlb_page(vma));
|
|
|
+
|
|
|
+ /* Addresses in the VMA. */
|
|
|
+ for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
|
|
|
+ pagemap_entry_t pme;
|
|
|
pte = pte_offset_map(pmd, addr);
|
|
|
pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
|
|
|
- /* unmap before userspace copy */
|
|
|
pte_unmap(pte);
|
|
|
+ err = add_to_pagemap(addr, &pme, pm);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
}
|
|
|
- err = add_to_pagemap(addr, &pme, pm);
|
|
|
- if (err)
|
|
|
- return err;
|
|
|
+
|
|
|
+ if (addr == end)
|
|
|
+ break;
|
|
|
+
|
|
|
+ vma = find_vma(walk->mm, addr);
|
|
|
}
|
|
|
|
|
|
cond_resched();
|
|
@@ -1415,7 +1432,6 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
|
|
|
struct vm_area_struct *vma = v;
|
|
|
struct numa_maps *md = &numa_priv->md;
|
|
|
struct file *file = vma->vm_file;
|
|
|
- struct task_struct *task = proc_priv->task;
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
struct mm_walk walk = {};
|
|
|
struct mempolicy *pol;
|
|
@@ -1435,9 +1451,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
|
|
|
walk.private = md;
|
|
|
walk.mm = mm;
|
|
|
|
|
|
- pol = get_vma_policy(task, vma, vma->vm_start);
|
|
|
- mpol_to_str(buffer, sizeof(buffer), pol);
|
|
|
- mpol_cond_put(pol);
|
|
|
+ pol = __get_vma_policy(vma, vma->vm_start);
|
|
|
+ if (pol) {
|
|
|
+ mpol_to_str(buffer, sizeof(buffer), pol);
|
|
|
+ mpol_cond_put(pol);
|
|
|
+ } else {
|
|
|
+ mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
|
|
|
+ }
|
|
|
|
|
|
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
|
|
|
|
|
@@ -1447,7 +1467,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
|
|
|
} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
|
|
|
seq_puts(m, " heap");
|
|
|
} else {
|
|
|
- pid_t tid = vm_is_stack(task, vma, is_pid);
|
|
|
+ pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
|
|
|
if (tid != 0) {
|
|
|
/*
|
|
|
* Thread stack in /proc/PID/task/TID/maps or
|
|
@@ -1495,9 +1515,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
|
|
|
seq_printf(m, " N%d=%lu", nid, md->node[nid]);
|
|
|
out:
|
|
|
seq_putc(m, '\n');
|
|
|
-
|
|
|
- if (m->count < m->size)
|
|
|
- m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
|
|
|
+ m_cache_vma(m, vma);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1528,20 +1546,8 @@ static const struct seq_operations proc_tid_numa_maps_op = {
|
|
|
static int numa_maps_open(struct inode *inode, struct file *file,
|
|
|
const struct seq_operations *ops)
|
|
|
{
|
|
|
- struct numa_maps_private *priv;
|
|
|
- int ret = -ENOMEM;
|
|
|
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
|
- if (priv) {
|
|
|
- priv->proc_maps.pid = proc_pid(inode);
|
|
|
- ret = seq_open(file, ops);
|
|
|
- if (!ret) {
|
|
|
- struct seq_file *m = file->private_data;
|
|
|
- m->private = priv;
|
|
|
- } else {
|
|
|
- kfree(priv);
|
|
|
- }
|
|
|
- }
|
|
|
- return ret;
|
|
|
+ return proc_maps_open(inode, file, ops,
|
|
|
+ sizeof(struct numa_maps_private));
|
|
|
}
|
|
|
|
|
|
static int pid_numa_maps_open(struct inode *inode, struct file *file)
|
|
@@ -1558,13 +1564,13 @@ const struct file_operations proc_pid_numa_maps_operations = {
|
|
|
.open = pid_numa_maps_open,
|
|
|
.read = seq_read,
|
|
|
.llseek = seq_lseek,
|
|
|
- .release = seq_release_private,
|
|
|
+ .release = proc_map_release,
|
|
|
};
|
|
|
|
|
|
const struct file_operations proc_tid_numa_maps_operations = {
|
|
|
.open = tid_numa_maps_open,
|
|
|
.read = seq_read,
|
|
|
.llseek = seq_lseek,
|
|
|
- .release = seq_release_private,
|
|
|
+ .release = proc_map_release,
|
|
|
};
|
|
|
#endif /* CONFIG_NUMA */
|