|
@@ -988,6 +988,12 @@ void do_page_add_anon_rmap(struct page *page,
|
|
|
{
|
|
|
int first = atomic_inc_and_test(&page->_mapcount);
|
|
|
if (first) {
|
|
|
+ /*
|
|
|
+ * We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
|
|
+ * these counters are not modified in interrupt context, and
|
|
|
+ * pte lock(a spinlock) is held, which implies preemption
|
|
|
+ * disabled.
|
|
|
+ */
|
|
|
if (PageTransHuge(page))
|
|
|
__inc_zone_page_state(page,
|
|
|
NR_ANON_TRANSPARENT_HUGEPAGES);
|
|
@@ -1079,6 +1085,11 @@ void page_remove_rmap(struct page *page)
|
|
|
/*
|
|
|
* Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
|
|
|
* and not charged by memcg for now.
|
|
|
+ *
|
|
|
+ * We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
|
|
+ * these counters are not modified in interrupt context, and
|
|
|
+ * these counters are not modified in interrupt context, and
|
|
|
+ * pte lock(a spinlock) is held, which implies preemption disabled.
|
|
|
*/
|
|
|
if (unlikely(PageHuge(page)))
|
|
|
goto out;
|