|
@@ -22,6 +22,7 @@
|
|
|
#include <linux/resource.h>
|
|
|
#include <linux/page_ext.h>
|
|
|
#include <linux/err.h>
|
|
|
+#include <linux/page_ref.h>
|
|
|
|
|
|
struct mempolicy;
|
|
|
struct anon_vma;
|
|
@@ -82,6 +83,27 @@ extern int mmap_rnd_compat_bits __read_mostly;
|
|
|
#define mm_forbids_zeropage(X) (0)
|
|
|
#endif
|
|
|
|
|
|
+/*
|
|
|
+ * Default maximum number of active map areas, this limits the number of vmas
|
|
|
+ * per mm struct. Users can overwrite this number by sysctl but there is a
|
|
|
+ * problem.
|
|
|
+ *
|
|
|
+ * When a program's coredump is generated as ELF format, a section is created
|
|
|
+ * per a vma. In ELF, the number of sections is represented in unsigned short.
|
|
|
+ * This means the number of sections should be smaller than 65535 at coredump.
|
|
|
+ * Because the kernel adds some informative sections to a image of program at
|
|
|
+ * generating coredump, we need some margin. The number of extra sections is
|
|
|
+ * 1-3 now and depends on arch. We use "5" as safe margin, here.
|
|
|
+ *
|
|
|
+ * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
|
|
|
+ * not a hard limit any more. Although some userspace tools can be surprised by
|
|
|
+ * that.
|
|
|
+ */
|
|
|
+#define MAPCOUNT_ELF_CORE_MARGIN (5)
|
|
|
+#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
|
|
|
+
|
|
|
+extern int sysctl_max_map_count;
|
|
|
+
|
|
|
extern unsigned long sysctl_user_reserve_kbytes;
|
|
|
extern unsigned long sysctl_admin_reserve_kbytes;
|
|
|
|
|
@@ -122,6 +144,7 @@ extern unsigned int kobjsize(const void *objp);
|
|
|
|
|
|
/*
|
|
|
* vm_flags in vm_area_struct, see mm_types.h.
|
|
|
+ * When changing, update also include/trace/events/mmflags.h
|
|
|
*/
|
|
|
#define VM_NONE 0x00000000
|
|
|
|
|
@@ -364,8 +387,8 @@ static inline int pmd_devmap(pmd_t pmd)
|
|
|
*/
|
|
|
static inline int put_page_testzero(struct page *page)
|
|
|
{
|
|
|
- VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
|
|
|
- return atomic_dec_and_test(&page->_count);
|
|
|
+ VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
|
|
|
+ return page_ref_dec_and_test(page);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -376,7 +399,7 @@ static inline int put_page_testzero(struct page *page)
|
|
|
*/
|
|
|
static inline int get_page_unless_zero(struct page *page)
|
|
|
{
|
|
|
- return atomic_inc_not_zero(&page->_count);
|
|
|
+ return page_ref_add_unless(page, 1, 0);
|
|
|
}
|
|
|
|
|
|
extern int page_is_ram(unsigned long pfn);
|
|
@@ -464,11 +487,6 @@ static inline int total_mapcount(struct page *page)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-static inline int page_count(struct page *page)
|
|
|
-{
|
|
|
- return atomic_read(&compound_head(page)->_count);
|
|
|
-}
|
|
|
-
|
|
|
static inline struct page *virt_to_head_page(const void *x)
|
|
|
{
|
|
|
struct page *page = virt_to_page(x);
|
|
@@ -476,15 +494,6 @@ static inline struct page *virt_to_head_page(const void *x)
|
|
|
return compound_head(page);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Setup the page count before being freed into the page allocator for
|
|
|
- * the first time (boot or memory hotplug)
|
|
|
- */
|
|
|
-static inline void init_page_count(struct page *page)
|
|
|
-{
|
|
|
- atomic_set(&page->_count, 1);
|
|
|
-}
|
|
|
-
|
|
|
void __put_page(struct page *page);
|
|
|
|
|
|
void put_pages_list(struct list_head *pages);
|
|
@@ -694,8 +703,8 @@ static inline void get_page(struct page *page)
|
|
|
* Getting a normal page or the head of a compound page
|
|
|
* requires to already have an elevated page->_count.
|
|
|
*/
|
|
|
- VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
|
|
|
- atomic_inc(&page->_count);
|
|
|
+ VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
|
|
|
+ page_ref_inc(page);
|
|
|
|
|
|
if (unlikely(is_zone_device_page(page)))
|
|
|
get_zone_device_page(page);
|
|
@@ -1043,8 +1052,6 @@ static inline void clear_page_pfmemalloc(struct page *page)
|
|
|
* just gets major/minor fault counters bumped up.
|
|
|
*/
|
|
|
|
|
|
-#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */
|
|
|
-
|
|
|
#define VM_FAULT_OOM 0x0001
|
|
|
#define VM_FAULT_SIGBUS 0x0002
|
|
|
#define VM_FAULT_MAJOR 0x0004
|
|
@@ -1523,8 +1530,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
- pmd_t *pmd, unsigned long address);
|
|
|
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
|
|
|
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
|
|
|
|
|
|
/*
|
|
@@ -1650,15 +1656,15 @@ static inline void pgtable_page_dtor(struct page *page)
|
|
|
pte_unmap(pte); \
|
|
|
} while (0)
|
|
|
|
|
|
-#define pte_alloc_map(mm, vma, pmd, address) \
|
|
|
- ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
|
|
|
- pmd, address))? \
|
|
|
- NULL: pte_offset_map(pmd, address))
|
|
|
+#define pte_alloc(mm, pmd, address) \
|
|
|
+ (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
|
|
|
+
|
|
|
+#define pte_alloc_map(mm, pmd, address) \
|
|
|
+ (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
|
|
|
|
|
|
#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
|
|
|
- ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
|
|
|
- pmd, address))? \
|
|
|
- NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
|
|
|
+ (pte_alloc(mm, pmd, address) ? \
|
|
|
+ NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
|
|
|
|
|
|
#define pte_alloc_kernel(pmd, address) \
|
|
|
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
|
|
@@ -1853,6 +1859,7 @@ extern int __meminit init_per_zone_wmark_min(void);
|
|
|
extern void mem_init(void);
|
|
|
extern void __init mmap_init(void);
|
|
|
extern void show_mem(unsigned int flags);
|
|
|
+extern long si_mem_available(void);
|
|
|
extern void si_meminfo(struct sysinfo * val);
|
|
|
extern void si_meminfo_node(struct sysinfo *val, int nid);
|
|
|
|
|
@@ -1867,6 +1874,7 @@ extern void zone_pcp_reset(struct zone *zone);
|
|
|
|
|
|
/* page_alloc.c */
|
|
|
extern int min_free_kbytes;
|
|
|
+extern int watermark_scale_factor;
|
|
|
|
|
|
/* nommu.c */
|
|
|
extern atomic_long_t mmap_pages_allocated;
|