|
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
|
|
* a time): we would prefer not to enlarge the shmem inode just for that.
|
|
* a time): we would prefer not to enlarge the shmem inode just for that.
|
|
*/
|
|
*/
|
|
struct shmem_falloc {
|
|
struct shmem_falloc {
|
|
- int mode; /* FALLOC_FL mode currently operating */
|
|
|
|
|
|
+ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
|
|
pgoff_t start; /* start of range currently being fallocated */
|
|
pgoff_t start; /* start of range currently being fallocated */
|
|
pgoff_t next; /* the next page offset to be fallocated */
|
|
pgoff_t next; /* the next page offset to be fallocated */
|
|
pgoff_t nr_falloced; /* how many new pages have been fallocated */
|
|
pgoff_t nr_falloced; /* how many new pages have been fallocated */
|
|
@@ -760,7 +760,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
|
|
spin_lock(&inode->i_lock);
|
|
spin_lock(&inode->i_lock);
|
|
shmem_falloc = inode->i_private;
|
|
shmem_falloc = inode->i_private;
|
|
if (shmem_falloc &&
|
|
if (shmem_falloc &&
|
|
- !shmem_falloc->mode &&
|
|
|
|
|
|
+ !shmem_falloc->waitq &&
|
|
index >= shmem_falloc->start &&
|
|
index >= shmem_falloc->start &&
|
|
index < shmem_falloc->next)
|
|
index < shmem_falloc->next)
|
|
shmem_falloc->nr_unswapped++;
|
|
shmem_falloc->nr_unswapped++;
|
|
@@ -1248,38 +1248,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
* Trinity finds that probing a hole which tmpfs is punching can
|
|
* Trinity finds that probing a hole which tmpfs is punching can
|
|
* prevent the hole-punch from ever completing: which in turn
|
|
* prevent the hole-punch from ever completing: which in turn
|
|
* locks writers out with its hold on i_mutex. So refrain from
|
|
* locks writers out with its hold on i_mutex. So refrain from
|
|
- * faulting pages into the hole while it's being punched, and
|
|
|
|
- * wait on i_mutex to be released if vmf->flags permits.
|
|
|
|
|
|
+ * faulting pages into the hole while it's being punched. Although
|
|
|
|
+ * shmem_undo_range() does remove the additions, it may be unable to
|
|
|
|
+ * keep up, as each new page needs its own unmap_mapping_range() call,
|
|
|
|
+ * and the i_mmap tree grows ever slower to scan if new vmas are added.
|
|
|
|
+ *
|
|
|
|
+ * It does not matter if we sometimes reach this check just before the
|
|
|
|
+ * hole-punch begins, so that one fault then races with the punch:
|
|
|
|
+ * we just need to make racing faults a rare case.
|
|
|
|
+ *
|
|
|
|
+ * The implementation below would be much simpler if we just used a
|
|
|
|
+ * standard mutex or completion: but we cannot take i_mutex in fault,
|
|
|
|
+ * and bloating every shmem inode for this unlikely case would be sad.
|
|
*/
|
|
*/
|
|
if (unlikely(inode->i_private)) {
|
|
if (unlikely(inode->i_private)) {
|
|
struct shmem_falloc *shmem_falloc;
|
|
struct shmem_falloc *shmem_falloc;
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
spin_lock(&inode->i_lock);
|
|
shmem_falloc = inode->i_private;
|
|
shmem_falloc = inode->i_private;
|
|
- if (!shmem_falloc ||
|
|
|
|
- shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
|
|
|
|
- vmf->pgoff < shmem_falloc->start ||
|
|
|
|
- vmf->pgoff >= shmem_falloc->next)
|
|
|
|
- shmem_falloc = NULL;
|
|
|
|
- spin_unlock(&inode->i_lock);
|
|
|
|
- /*
|
|
|
|
- * i_lock has protected us from taking shmem_falloc seriously
|
|
|
|
- * once return from shmem_fallocate() went back up that stack.
|
|
|
|
- * i_lock does not serialize with i_mutex at all, but it does
|
|
|
|
- * not matter if sometimes we wait unnecessarily, or sometimes
|
|
|
|
- * miss out on waiting: we just need to make those cases rare.
|
|
|
|
- */
|
|
|
|
- if (shmem_falloc) {
|
|
|
|
|
|
+ if (shmem_falloc &&
|
|
|
|
+ shmem_falloc->waitq &&
|
|
|
|
+ vmf->pgoff >= shmem_falloc->start &&
|
|
|
|
+ vmf->pgoff < shmem_falloc->next) {
|
|
|
|
+ wait_queue_head_t *shmem_falloc_waitq;
|
|
|
|
+ DEFINE_WAIT(shmem_fault_wait);
|
|
|
|
+
|
|
|
|
+ ret = VM_FAULT_NOPAGE;
|
|
if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
|
|
if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
|
|
!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
|
|
!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
|
|
|
|
+ /* It's polite to up mmap_sem if we can */
|
|
up_read(&vma->vm_mm->mmap_sem);
|
|
up_read(&vma->vm_mm->mmap_sem);
|
|
- mutex_lock(&inode->i_mutex);
|
|
|
|
- mutex_unlock(&inode->i_mutex);
|
|
|
|
- return VM_FAULT_RETRY;
|
|
|
|
|
|
+ ret = VM_FAULT_RETRY;
|
|
}
|
|
}
|
|
- /* cond_resched? Leave that to GUP or return to user */
|
|
|
|
- return VM_FAULT_NOPAGE;
|
|
|
|
|
|
+
|
|
|
|
+ shmem_falloc_waitq = shmem_falloc->waitq;
|
|
|
|
+ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
|
|
|
|
+ TASK_UNINTERRUPTIBLE);
|
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
|
+ schedule();
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * shmem_falloc_waitq points into the shmem_fallocate()
|
|
|
|
+ * stack of the hole-punching task: shmem_falloc_waitq
|
|
|
|
+ * is usually invalid by the time we reach here, but
|
|
|
|
+ * finish_wait() does not dereference it in that case;
|
|
|
|
+ * though i_lock needed lest racing with wake_up_all().
|
|
|
|
+ */
|
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
|
+ finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
|
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
}
|
|
}
|
|
|
|
|
|
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
|
|
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
|
|
@@ -1774,13 +1794,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
|
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
|
|
- shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
|
|
|
|
-
|
|
|
|
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
|
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
|
struct address_space *mapping = file->f_mapping;
|
|
struct address_space *mapping = file->f_mapping;
|
|
loff_t unmap_start = round_up(offset, PAGE_SIZE);
|
|
loff_t unmap_start = round_up(offset, PAGE_SIZE);
|
|
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
|
|
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
|
|
|
|
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
|
|
|
|
|
|
|
|
+ shmem_falloc.waitq = &shmem_falloc_waitq;
|
|
shmem_falloc.start = unmap_start >> PAGE_SHIFT;
|
|
shmem_falloc.start = unmap_start >> PAGE_SHIFT;
|
|
shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
|
|
shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
|
|
spin_lock(&inode->i_lock);
|
|
spin_lock(&inode->i_lock);
|
|
@@ -1792,8 +1812,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
|
1 + unmap_end - unmap_start, 0);
|
|
1 + unmap_end - unmap_start, 0);
|
|
shmem_truncate_range(inode, offset, offset + len - 1);
|
|
shmem_truncate_range(inode, offset, offset + len - 1);
|
|
/* No need to unmap again: hole-punching leaves COWed pages */
|
|
/* No need to unmap again: hole-punching leaves COWed pages */
|
|
|
|
+
|
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
|
+ inode->i_private = NULL;
|
|
|
|
+ wake_up_all(&shmem_falloc_waitq);
|
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
error = 0;
|
|
error = 0;
|
|
- goto undone;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
|
|
/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
|
|
@@ -1809,6 +1834,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ shmem_falloc.waitq = NULL;
|
|
shmem_falloc.start = start;
|
|
shmem_falloc.start = start;
|
|
shmem_falloc.next = start;
|
|
shmem_falloc.next = start;
|
|
shmem_falloc.nr_falloced = 0;
|
|
shmem_falloc.nr_falloced = 0;
|