|
@@ -1794,6 +1794,11 @@ static void task_numa_placement(struct task_struct *p)
|
|
u64 runtime, period;
|
|
u64 runtime, period;
|
|
spinlock_t *group_lock = NULL;
|
|
spinlock_t *group_lock = NULL;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * The p->mm->numa_scan_seq field gets updated without
|
|
|
|
+ * exclusive access. Use READ_ONCE() here to ensure
|
|
|
|
+ * that the field is read in a single access:
|
|
|
|
+ */
|
|
seq = READ_ONCE(p->mm->numa_scan_seq);
|
|
seq = READ_ONCE(p->mm->numa_scan_seq);
|
|
if (p->numa_scan_seq == seq)
|
|
if (p->numa_scan_seq == seq)
|
|
return;
|
|
return;
|
|
@@ -2107,6 +2112,14 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
|
|
|
|
|
|
static void reset_ptenuma_scan(struct task_struct *p)
|
|
static void reset_ptenuma_scan(struct task_struct *p)
|
|
{
|
|
{
|
|
|
|
+ /*
|
|
|
|
+ * We only did a read acquisition of the mmap sem, so
|
|
|
|
+ * p->mm->numa_scan_seq is written to without exclusive access
|
|
|
|
+ * and the update is not guaranteed to be atomic. That's not
|
|
|
|
+ * much of an issue though, since this is just used for
|
|
|
|
+ * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
|
|
|
|
+ * expensive, to avoid any form of compiler optimizations:
|
|
|
|
+ */
|
|
WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
|
|
WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
|
|
p->mm->numa_scan_offset = 0;
|
|
p->mm->numa_scan_offset = 0;
|
|
}
|
|
}
|