|
@@ -29,10 +29,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
|
unsigned long committed;
|
|
unsigned long committed;
|
|
long cached;
|
|
long cached;
|
|
long available;
|
|
long available;
|
|
- unsigned long pagecache;
|
|
|
|
- unsigned long wmark_low = 0;
|
|
|
|
unsigned long pages[NR_LRU_LISTS];
|
|
unsigned long pages[NR_LRU_LISTS];
|
|
- struct zone *zone;
|
|
|
|
int lru;
|
|
int lru;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -51,33 +48,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
|
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
|
|
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
|
|
pages[lru] = global_page_state(NR_LRU_BASE + lru);
|
|
pages[lru] = global_page_state(NR_LRU_BASE + lru);
|
|
|
|
|
|
- for_each_zone(zone)
|
|
|
|
- wmark_low += zone->watermark[WMARK_LOW];
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Estimate the amount of memory available for userspace allocations,
|
|
|
|
- * without causing swapping.
|
|
|
|
- */
|
|
|
|
- available = i.freeram - totalreserve_pages;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Not all the page cache can be freed, otherwise the system will
|
|
|
|
- * start swapping. Assume at least half of the page cache, or the
|
|
|
|
- * low watermark worth of cache, needs to stay.
|
|
|
|
- */
|
|
|
|
- pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
|
|
|
|
- pagecache -= min(pagecache / 2, wmark_low);
|
|
|
|
- available += pagecache;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Part of the reclaimable slab consists of items that are in use,
|
|
|
|
- * and cannot be freed. Cap this estimate at the low watermark.
|
|
|
|
- */
|
|
|
|
- available += global_page_state(NR_SLAB_RECLAIMABLE) -
|
|
|
|
- min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
|
|
|
|
-
|
|
|
|
- if (available < 0)
|
|
|
|
- available = 0;
|
|
|
|
|
|
+ available = si_mem_available();
|
|
|
|
|
|
/*
|
|
/*
|
|
* Tagged format, for easy grepping and expansion.
|
|
* Tagged format, for easy grepping and expansion.
|