|
@@ -142,42 +142,6 @@ static struct shrinker huge_zero_page_shrinker = {
|
|
|
};
|
|
|
|
|
|
#ifdef CONFIG_SYSFS
|
|
|
-
|
|
|
-static ssize_t triple_flag_store(struct kobject *kobj,
|
|
|
- struct kobj_attribute *attr,
|
|
|
- const char *buf, size_t count,
|
|
|
- enum transparent_hugepage_flag enabled,
|
|
|
- enum transparent_hugepage_flag deferred,
|
|
|
- enum transparent_hugepage_flag req_madv)
|
|
|
-{
|
|
|
- if (!memcmp("defer", buf,
|
|
|
- min(sizeof("defer")-1, count))) {
|
|
|
- if (enabled == deferred)
|
|
|
- return -EINVAL;
|
|
|
- clear_bit(enabled, &transparent_hugepage_flags);
|
|
|
- clear_bit(req_madv, &transparent_hugepage_flags);
|
|
|
- set_bit(deferred, &transparent_hugepage_flags);
|
|
|
- } else if (!memcmp("always", buf,
|
|
|
- min(sizeof("always")-1, count))) {
|
|
|
- clear_bit(deferred, &transparent_hugepage_flags);
|
|
|
- clear_bit(req_madv, &transparent_hugepage_flags);
|
|
|
- set_bit(enabled, &transparent_hugepage_flags);
|
|
|
- } else if (!memcmp("madvise", buf,
|
|
|
- min(sizeof("madvise")-1, count))) {
|
|
|
- clear_bit(enabled, &transparent_hugepage_flags);
|
|
|
- clear_bit(deferred, &transparent_hugepage_flags);
|
|
|
- set_bit(req_madv, &transparent_hugepage_flags);
|
|
|
- } else if (!memcmp("never", buf,
|
|
|
- min(sizeof("never")-1, count))) {
|
|
|
- clear_bit(enabled, &transparent_hugepage_flags);
|
|
|
- clear_bit(req_madv, &transparent_hugepage_flags);
|
|
|
- clear_bit(deferred, &transparent_hugepage_flags);
|
|
|
- } else
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- return count;
|
|
|
-}
|
|
|
-
|
|
|
static ssize_t enabled_show(struct kobject *kobj,
|
|
|
struct kobj_attribute *attr, char *buf)
|
|
|
{
|
|
@@ -193,19 +157,28 @@ static ssize_t enabled_store(struct kobject *kobj,
|
|
|
struct kobj_attribute *attr,
|
|
|
const char *buf, size_t count)
|
|
|
{
|
|
|
- ssize_t ret;
|
|
|
+ ssize_t ret = count;
|
|
|
|
|
|
- ret = triple_flag_store(kobj, attr, buf, count,
|
|
|
- TRANSPARENT_HUGEPAGE_FLAG,
|
|
|
- TRANSPARENT_HUGEPAGE_FLAG,
|
|
|
- TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
|
|
|
+ if (!memcmp("always", buf,
|
|
|
+ min(sizeof("always")-1, count))) {
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
|
|
|
+ } else if (!memcmp("madvise", buf,
|
|
|
+ min(sizeof("madvise")-1, count))) {
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
|
|
|
+ set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ } else if (!memcmp("never", buf,
|
|
|
+ min(sizeof("never")-1, count))) {
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ } else
|
|
|
+ ret = -EINVAL;
|
|
|
|
|
|
if (ret > 0) {
|
|
|
int err = start_stop_khugepaged();
|
|
|
if (err)
|
|
|
ret = err;
|
|
|
}
|
|
|
-
|
|
|
return ret;
|
|
|
}
|
|
|
static struct kobj_attribute enabled_attr =
|
|
@@ -241,32 +214,58 @@ ssize_t single_hugepage_flag_store(struct kobject *kobj,
|
|
|
return count;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
|
|
|
- * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
|
|
|
- * memory just to allocate one more hugepage.
|
|
|
- */
|
|
|
static ssize_t defrag_show(struct kobject *kobj,
|
|
|
struct kobj_attribute *attr, char *buf)
|
|
|
{
|
|
|
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
|
|
|
- return sprintf(buf, "[always] defer madvise never\n");
|
|
|
+ return sprintf(buf, "[always] defer defer+madvise madvise never\n");
|
|
|
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
|
|
|
- return sprintf(buf, "always [defer] madvise never\n");
|
|
|
- else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
|
|
|
- return sprintf(buf, "always defer [madvise] never\n");
|
|
|
- else
|
|
|
- return sprintf(buf, "always defer madvise [never]\n");
|
|
|
-
|
|
|
+ return sprintf(buf, "always [defer] defer+madvise madvise never\n");
|
|
|
+ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
|
|
|
+ return sprintf(buf, "always defer [defer+madvise] madvise never\n");
|
|
|
+ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
|
|
|
+ return sprintf(buf, "always defer defer+madvise [madvise] never\n");
|
|
|
+ return sprintf(buf, "always defer defer+madvise madvise [never]\n");
|
|
|
}
|
|
|
+
|
|
|
static ssize_t defrag_store(struct kobject *kobj,
|
|
|
struct kobj_attribute *attr,
|
|
|
const char *buf, size_t count)
|
|
|
{
|
|
|
- return triple_flag_store(kobj, attr, buf, count,
|
|
|
- TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
|
|
|
- TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
|
|
|
- TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
|
|
|
+ if (!memcmp("always", buf,
|
|
|
+ min(sizeof("always")-1, count))) {
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
|
|
|
+ } else if (!memcmp("defer", buf,
|
|
|
+ min(sizeof("defer")-1, count))) {
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
|
|
|
+ } else if (!memcmp("defer+madvise", buf,
|
|
|
+ min(sizeof("defer+madvise")-1, count))) {
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ } else if (!memcmp("madvise", buf,
|
|
|
+ min(sizeof("madvise")-1, count))) {
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ } else if (!memcmp("never", buf,
|
|
|
+ min(sizeof("never")-1, count))) {
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
|
|
|
+ } else
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ return count;
|
|
|
}
|
|
|
static struct kobj_attribute defrag_attr =
|
|
|
__ATTR(defrag, 0644, defrag_show, defrag_store);
|
|
@@ -612,25 +611,28 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * If THP defrag is set to always then directly reclaim/compact as necessary
|
|
|
- * If set to defer then do only background reclaim/compact and defer to khugepaged
|
|
|
- * If set to madvise and the VMA is flagged then directly reclaim/compact
|
|
|
- * When direct reclaim/compact is allowed, don't retry except for flagged VMA's
|
|
|
+ * always: directly stall for all thp allocations
|
|
|
+ * defer: wake kswapd and fail if not immediately available
|
|
|
+ * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
|
|
|
+ * fail if not immediately available
|
|
|
+ * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
|
|
|
+ * available
|
|
|
+ * never: never stall for any thp allocation
|
|
|
*/
|
|
|
static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
|
|
|
{
|
|
|
- bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
|
|
|
+ const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
|
|
|
|
|
|
- if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
|
|
|
- &transparent_hugepage_flags) && vma_madvised)
|
|
|
- return GFP_TRANSHUGE;
|
|
|
- else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
|
|
|
- &transparent_hugepage_flags))
|
|
|
- return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
|
|
|
- else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
|
|
|
- &transparent_hugepage_flags))
|
|
|
+ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
|
|
|
return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
|
|
|
-
|
|
|
+ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
|
|
|
+ return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
|
|
|
+ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
|
|
|
+ return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
|
|
|
+ __GFP_KSWAPD_RECLAIM);
|
|
|
+ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
|
|
|
+ return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
|
|
|
+ 0);
|
|
|
return GFP_TRANSHUGE_LIGHT;
|
|
|
}
|
|
|
|