|
@@ -3778,8 +3778,7 @@ retry:
|
|
|
* handling userfault. Reacquire after handling
|
|
* handling userfault. Reacquire after handling
|
|
|
* fault to make calling code simpler.
|
|
* fault to make calling code simpler.
|
|
|
*/
|
|
*/
|
|
|
- hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
|
|
|
|
|
- idx, haddr);
|
|
|
|
|
|
|
+ hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
|
|
|
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
|
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
|
|
ret = handle_userfault(&vmf, VM_UFFD_MISSING);
|
|
ret = handle_userfault(&vmf, VM_UFFD_MISSING);
|
|
|
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
|
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
|
@@ -3887,21 +3886,14 @@ backout_unlocked:
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
|
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
|
|
|
|
|
- struct vm_area_struct *vma,
|
|
|
|
|
- struct address_space *mapping,
|
|
|
|
|
|
|
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
|
|
|
pgoff_t idx, unsigned long address)
|
|
pgoff_t idx, unsigned long address)
|
|
|
{
|
|
{
|
|
|
unsigned long key[2];
|
|
unsigned long key[2];
|
|
|
u32 hash;
|
|
u32 hash;
|
|
|
|
|
|
|
|
- if (vma->vm_flags & VM_SHARED) {
|
|
|
|
|
- key[0] = (unsigned long) mapping;
|
|
|
|
|
- key[1] = idx;
|
|
|
|
|
- } else {
|
|
|
|
|
- key[0] = (unsigned long) mm;
|
|
|
|
|
- key[1] = address >> huge_page_shift(h);
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ key[0] = (unsigned long) mapping;
|
|
|
|
|
+ key[1] = idx;
|
|
|
|
|
|
|
|
hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
|
|
hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
|
|
|
|
|
|
|
@@ -3912,9 +3904,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
|
|
|
* For uniprocesor systems we always use a single mutex, so just
|
|
* For uniprocesor systems we always use a single mutex, so just
|
|
|
* return 0 and avoid the hashing overhead.
|
|
* return 0 and avoid the hashing overhead.
|
|
|
*/
|
|
*/
|
|
|
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
|
|
|
|
|
- struct vm_area_struct *vma,
|
|
|
|
|
- struct address_space *mapping,
|
|
|
|
|
|
|
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
|
|
|
pgoff_t idx, unsigned long address)
|
|
pgoff_t idx, unsigned long address)
|
|
|
{
|
|
{
|
|
|
return 0;
|
|
return 0;
|
|
@@ -3959,7 +3949,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
* get spurious allocation failures if two CPUs race to instantiate
|
|
* get spurious allocation failures if two CPUs race to instantiate
|
|
|
* the same page in the page cache.
|
|
* the same page in the page cache.
|
|
|
*/
|
|
*/
|
|
|
- hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
|
|
|
|
|
|
|
+ hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
|
|
|
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
|
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
|
|
|
|
|
|
|
entry = huge_ptep_get(ptep);
|
|
entry = huge_ptep_get(ptep);
|