|
@@ -446,6 +446,12 @@ static inline struct page *compound_head_by_tail(struct page *tail)
|
|
|
return tail;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Since either compound page could be dismantled asynchronously in THP
|
|
|
+ * or we access asynchronously arbitrary positioned struct page, there
|
|
|
+ * would be tail flag race. To handle this race, we should call
|
|
|
+ * smp_rmb() before checking tail flag. compound_head_by_tail() did it.
|
|
|
+ */
|
|
|
static inline struct page *compound_head(struct page *page)
|
|
|
{
|
|
|
if (unlikely(PageTail(page)))
|
|
@@ -453,6 +459,18 @@ static inline struct page *compound_head(struct page *page)
|
|
|
return page;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * If we access compound page synchronously such as access to
|
|
|
+ * allocated page, there is no need to handle tail flag race, so we can
|
|
|
+ * check tail flag directly without any synchronization primitive.
|
|
|
+ */
|
|
|
+static inline struct page *compound_head_fast(struct page *page)
|
|
|
+{
|
|
|
+ if (unlikely(PageTail(page)))
|
|
|
+ return page->first_page;
|
|
|
+ return page;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* The atomic page->_mapcount, starts from -1: so that transitions
|
|
|
* both from it and to it can be tracked, using atomic_inc_and_test
|
|
@@ -531,7 +549,14 @@ static inline void get_page(struct page *page)
|
|
|
static inline struct page *virt_to_head_page(const void *x)
|
|
|
{
|
|
|
struct page *page = virt_to_page(x);
|
|
|
- return compound_head(page);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We don't need to worry about synchronization of tail flag
|
|
|
+ * when we call virt_to_head_page() since it is only called for
|
|
|
+ * already allocated page and this page won't be freed until
|
|
|
+ * this virt_to_head_page() is finished. So use _fast variant.
|
|
|
+ */
|
|
|
+ return compound_head_fast(page);
|
|
|
}
|
|
|
|
|
|
/*
|