|
@@ -3713,6 +3713,49 @@ static inline void show_node(struct zone *zone)
|
|
|
printk("Node %d ", zone_to_nid(zone));
|
|
|
}
|
|
|
|
|
|
+long si_mem_available(void)
|
|
|
+{
|
|
|
+ long available;
|
|
|
+ unsigned long pagecache;
|
|
|
+ unsigned long wmark_low = 0;
|
|
|
+ unsigned long pages[NR_LRU_LISTS];
|
|
|
+ struct zone *zone;
|
|
|
+ int lru;
|
|
|
+
|
|
|
+ for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
|
|
|
+ pages[lru] = global_page_state(NR_LRU_BASE + lru);
|
|
|
+
|
|
|
+ for_each_zone(zone)
|
|
|
+ wmark_low += zone->watermark[WMARK_LOW];
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Estimate the amount of memory available for userspace allocations,
|
|
|
+ * without causing swapping.
|
|
|
+ */
|
|
|
+ available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Not all the page cache can be freed, otherwise the system will
|
|
|
+ * start swapping. Assume at least half of the page cache, or the
|
|
|
+ * low watermark worth of cache, needs to stay.
|
|
|
+ */
|
|
|
+ pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
|
|
|
+ pagecache -= min(pagecache / 2, wmark_low);
|
|
|
+ available += pagecache;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Part of the reclaimable slab consists of items that are in use,
|
|
|
+ * and cannot be freed. Cap this estimate at the low watermark.
|
|
|
+ */
|
|
|
+ available += global_page_state(NR_SLAB_RECLAIMABLE) -
|
|
|
+ min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
|
|
|
+
|
|
|
+ if (available < 0)
|
|
|
+ available = 0;
|
|
|
+ return available;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(si_mem_available);
|
|
|
+
|
|
|
void si_meminfo(struct sysinfo *val)
|
|
|
{
|
|
|
val->totalram = totalram_pages;
|