|
@@ -1370,12 +1370,11 @@ void unmap_vmas(struct mmu_gather *tlb,
|
|
|
* @vma: vm_area_struct holding the applicable pages
|
|
|
* @start: starting address of pages to zap
|
|
|
* @size: number of bytes to zap
|
|
|
- * @details: details of shared cache invalidation
|
|
|
*
|
|
|
* Caller must protect the VMA list
|
|
|
*/
|
|
|
void zap_page_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
- unsigned long size, struct zap_details *details)
|
|
|
+ unsigned long size)
|
|
|
{
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
struct mmu_gather tlb;
|
|
@@ -1386,7 +1385,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
update_hiwater_rss(mm);
|
|
|
mmu_notifier_invalidate_range_start(mm, start, end);
|
|
|
for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
|
|
|
- unmap_single_vma(&tlb, vma, start, end, details);
|
|
|
+ unmap_single_vma(&tlb, vma, start, end, NULL);
|
|
|
mmu_notifier_invalidate_range_end(mm, start, end);
|
|
|
tlb_finish_mmu(&tlb, start, end);
|
|
|
}
|