|
|
@@ -723,4 +723,54 @@ static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
|
|
|
#define RESYNC_BLOCK_SIZE (64*1024)
|
|
|
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
|
|
|
|
|
|
+/* for managing resync I/O pages */
|
|
|
+struct resync_pages {
|
|
|
+ unsigned idx; /* for get/put page from the pool */
|
|
|
+ void *raid_bio;
|
|
|
+ struct page *pages[RESYNC_PAGES];
|
|
|
+};
|
|
|
+
|
|
|
+static inline int resync_alloc_pages(struct resync_pages *rp,
|
|
|
+ gfp_t gfp_flags)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < RESYNC_PAGES; i++) {
|
|
|
+ rp->pages[i] = alloc_page(gfp_flags);
|
|
|
+ if (!rp->pages[i])
|
|
|
+ goto out_free;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out_free:
|
|
|
+ while (--i >= 0)
|
|
|
+ put_page(rp->pages[i]);
|
|
|
+ return -ENOMEM;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void resync_free_pages(struct resync_pages *rp)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < RESYNC_PAGES; i++)
|
|
|
+ put_page(rp->pages[i]);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void resync_get_all_pages(struct resync_pages *rp)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < RESYNC_PAGES; i++)
|
|
|
+ get_page(rp->pages[i]);
|
|
|
+}
|
|
|
+
|
|
|
+static inline struct page *resync_fetch_page(struct resync_pages *rp,
|
|
|
+ unsigned idx)
|
|
|
+{
|
|
|
+ if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
|
|
|
+ return NULL;
|
|
|
+ return rp->pages[idx];
|
|
|
+}
|
|
|
+
|
|
|
#endif /* _MD_MD_H */
|