|
@@ -148,7 +148,7 @@ static inline int page_cache_get_speculative(struct page *page)
|
|
|
|
|
|
#ifdef CONFIG_TINY_RCU
|
|
#ifdef CONFIG_TINY_RCU
|
|
# ifdef CONFIG_PREEMPT_COUNT
|
|
# ifdef CONFIG_PREEMPT_COUNT
|
|
- VM_BUG_ON(!in_atomic());
|
|
|
|
|
|
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
|
|
# endif
|
|
# endif
|
|
/*
|
|
/*
|
|
* Preempt must be disabled here - we rely on rcu_read_lock doing
|
|
* Preempt must be disabled here - we rely on rcu_read_lock doing
|
|
@@ -186,7 +186,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
|
|
|
|
|
|
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
|
|
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
|
|
# ifdef CONFIG_PREEMPT_COUNT
|
|
# ifdef CONFIG_PREEMPT_COUNT
|
|
- VM_BUG_ON(!in_atomic());
|
|
|
|
|
|
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
|
|
# endif
|
|
# endif
|
|
VM_BUG_ON_PAGE(page_count(page) == 0, page);
|
|
VM_BUG_ON_PAGE(page_count(page) == 0, page);
|
|
page_ref_add(page, count);
|
|
page_ref_add(page, count);
|