|
@@ -57,7 +57,8 @@ enum scan_result {
|
|
|
SCAN_SWAP_CACHE_PAGE,
|
|
|
SCAN_DEL_PAGE_LRU,
|
|
|
SCAN_ALLOC_HUGE_PAGE_FAIL,
|
|
|
- SCAN_CGROUP_CHARGE_FAIL
|
|
|
+ SCAN_CGROUP_CHARGE_FAIL,
|
|
|
+ SCAN_EXCEED_SWAP_PTE
|
|
|
};
|
|
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
@@ -100,6 +101,7 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
|
|
|
* fault.
|
|
|
*/
|
|
|
static unsigned int khugepaged_max_ptes_none __read_mostly;
|
|
|
+static unsigned int khugepaged_max_ptes_swap __read_mostly;
|
|
|
|
|
|
static int khugepaged(void *none);
|
|
|
static int khugepaged_slab_init(void);
|
|
@@ -598,6 +600,33 @@ static struct kobj_attribute khugepaged_max_ptes_none_attr =
|
|
|
__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
|
|
|
khugepaged_max_ptes_none_store);
|
|
|
|
|
|
+static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
|
|
|
+ struct kobj_attribute *attr,
|
|
|
+ char *buf)
|
|
|
+{
|
|
|
+ return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
|
|
|
+}
|
|
|
+
|
|
|
+static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
|
|
|
+ struct kobj_attribute *attr,
|
|
|
+ const char *buf, size_t count)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ unsigned long max_ptes_swap;
|
|
|
+
|
|
|
+ err = kstrtoul(buf, 10, &max_ptes_swap);
|
|
|
+ if (err || max_ptes_swap > HPAGE_PMD_NR-1)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ khugepaged_max_ptes_swap = max_ptes_swap;
|
|
|
+
|
|
|
+ return count;
|
|
|
+}
|
|
|
+
|
|
|
+static struct kobj_attribute khugepaged_max_ptes_swap_attr =
|
|
|
+ __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
|
|
|
+ khugepaged_max_ptes_swap_store);
|
|
|
+
|
|
|
static struct attribute *khugepaged_attr[] = {
|
|
|
&khugepaged_defrag_attr.attr,
|
|
|
&khugepaged_max_ptes_none_attr.attr,
|
|
@@ -606,6 +635,7 @@ static struct attribute *khugepaged_attr[] = {
|
|
|
&full_scans_attr.attr,
|
|
|
&scan_sleep_millisecs_attr.attr,
|
|
|
&alloc_sleep_millisecs_attr.attr,
|
|
|
+ &khugepaged_max_ptes_swap_attr.attr,
|
|
|
NULL,
|
|
|
};
|
|
|
|
|
@@ -674,6 +704,7 @@ static int __init hugepage_init(void)
|
|
|
|
|
|
khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
|
|
|
khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
|
|
|
+ khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
|
|
|
/*
|
|
|
* hugepages can't be allocated by the buddy allocator
|
|
|
*/
|
|
@@ -2507,7 +2538,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
|
|
|
struct page *page = NULL;
|
|
|
unsigned long _address;
|
|
|
spinlock_t *ptl;
|
|
|
- int node = NUMA_NO_NODE;
|
|
|
+ int node = NUMA_NO_NODE, unmapped = 0;
|
|
|
bool writable = false, referenced = false;
|
|
|
|
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
@@ -2523,6 +2554,14 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
|
|
|
for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
|
|
|
_pte++, _address += PAGE_SIZE) {
|
|
|
pte_t pteval = *_pte;
|
|
|
+ if (is_swap_pte(pteval)) {
|
|
|
+ if (++unmapped <= khugepaged_max_ptes_swap) {
|
|
|
+ continue;
|
|
|
+ } else {
|
|
|
+ result = SCAN_EXCEED_SWAP_PTE;
|
|
|
+ goto out_unmap;
|
|
|
+ }
|
|
|
+ }
|
|
|
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
|
|
|
if (!userfaultfd_armed(vma) &&
|
|
|
++none_or_zero <= khugepaged_max_ptes_none) {
|
|
@@ -2609,7 +2648,7 @@ out_unmap:
|
|
|
}
|
|
|
out:
|
|
|
trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
|
|
|
- none_or_zero, result);
|
|
|
+ none_or_zero, result, unmapped);
|
|
|
return ret;
|
|
|
}
|
|
|
|