|
@@ -74,6 +74,22 @@ void __init hibernate_image_size_init(void)
|
|
|
*/
|
|
|
struct pbe *restore_pblist;
|
|
|
|
|
|
+/* struct linked_page is used to build chains of pages */
|
|
|
+
|
|
|
+#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
|
|
|
+
|
|
|
+struct linked_page {
|
|
|
+ struct linked_page *next;
|
|
|
+ char data[LINKED_PAGE_DATA_SIZE];
|
|
|
+} __packed;
|
|
|
+
|
|
|
+/*
|
|
|
+ * List of "safe" pages (ie. pages that were not used by the image kernel
|
|
|
+ * before hibernation) that may be used as temporary storage for image kernel
|
|
|
+ * memory contents.
|
|
|
+ */
|
|
|
+static struct linked_page *safe_pages_list;
|
|
|
+
|
|
|
/* Pointer to an auxiliary buffer (1 page) */
|
|
|
static void *buffer;
|
|
|
|
|
@@ -113,9 +129,21 @@ static void *get_image_page(gfp_t gfp_mask, int safe_needed)
|
|
|
return res;
|
|
|
}
|
|
|
|
|
|
+static void *__get_safe_page(gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ if (safe_pages_list) {
|
|
|
+ void *ret = safe_pages_list;
|
|
|
+
|
|
|
+ safe_pages_list = safe_pages_list->next;
|
|
|
+ memset(ret, 0, PAGE_SIZE);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ return get_image_page(gfp_mask, PG_SAFE);
|
|
|
+}
|
|
|
+
|
|
|
unsigned long get_safe_page(gfp_t gfp_mask)
|
|
|
{
|
|
|
- return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
|
|
|
+ return (unsigned long)__get_safe_page(gfp_mask);
|
|
|
}
|
|
|
|
|
|
static struct page *alloc_image_page(gfp_t gfp_mask)
|
|
@@ -130,6 +158,14 @@ static struct page *alloc_image_page(gfp_t gfp_mask)
|
|
|
return page;
|
|
|
}
|
|
|
|
|
|
+static void recycle_safe_page(void *page_address)
|
|
|
+{
|
|
|
+ struct linked_page *lp = page_address;
|
|
|
+
|
|
|
+ lp->next = safe_pages_list;
|
|
|
+ safe_pages_list = lp;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* free_image_page - free page represented by @addr, allocated with
|
|
|
* get_image_page (page flags set by it must be cleared)
|
|
@@ -150,15 +186,6 @@ static inline void free_image_page(void *addr, int clear_nosave_free)
|
|
|
__free_page(page);
|
|
|
}
|
|
|
|
|
|
-/* struct linked_page is used to build chains of pages */
|
|
|
-
|
|
|
-#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
|
|
|
-
|
|
|
-struct linked_page {
|
|
|
- struct linked_page *next;
|
|
|
- char data[LINKED_PAGE_DATA_SIZE];
|
|
|
-} __packed;
|
|
|
-
|
|
|
static inline void
|
|
|
free_list_of_pages(struct linked_page *list, int clear_page_nosave)
|
|
|
{
|
|
@@ -208,7 +235,8 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
|
|
|
if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
|
|
|
struct linked_page *lp;
|
|
|
|
|
|
- lp = get_image_page(ca->gfp_mask, ca->safe_needed);
|
|
|
+ lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
|
|
|
+ get_image_page(ca->gfp_mask, PG_ANY);
|
|
|
if (!lp)
|
|
|
return NULL;
|
|
|
|
|
@@ -832,6 +860,34 @@ struct nosave_region {
|
|
|
|
|
|
static LIST_HEAD(nosave_regions);
|
|
|
|
|
|
+static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
|
|
|
+{
|
|
|
+ struct rtree_node *node;
|
|
|
+
|
|
|
+ list_for_each_entry(node, &zone->nodes, list)
|
|
|
+ recycle_safe_page(node->data);
|
|
|
+
|
|
|
+ list_for_each_entry(node, &zone->leaves, list)
|
|
|
+ recycle_safe_page(node->data);
|
|
|
+}
|
|
|
+
|
|
|
+static void memory_bm_recycle(struct memory_bitmap *bm)
|
|
|
+{
|
|
|
+ struct mem_zone_bm_rtree *zone;
|
|
|
+ struct linked_page *p_list;
|
|
|
+
|
|
|
+ list_for_each_entry(zone, &bm->zones, list)
|
|
|
+ recycle_zone_bm_rtree(zone);
|
|
|
+
|
|
|
+ p_list = bm->p_list;
|
|
|
+ while (p_list) {
|
|
|
+ struct linked_page *lp = p_list;
|
|
|
+
|
|
|
+ p_list = lp->next;
|
|
|
+ recycle_safe_page(lp);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* register_nosave_region - register a range of page frames the contents
|
|
|
* of which should not be saved during the suspend (to be used in the early
|
|
@@ -1999,53 +2055,41 @@ int snapshot_read_next(struct snapshot_handle *handle)
|
|
|
return PAGE_SIZE;
|
|
|
}
|
|
|
|
|
|
+static void duplicate_memory_bitmap(struct memory_bitmap *dst,
|
|
|
+ struct memory_bitmap *src)
|
|
|
+{
|
|
|
+ unsigned long pfn;
|
|
|
+
|
|
|
+ memory_bm_position_reset(src);
|
|
|
+ pfn = memory_bm_next_pfn(src);
|
|
|
+ while (pfn != BM_END_OF_MAP) {
|
|
|
+ memory_bm_set_bit(dst, pfn);
|
|
|
+ pfn = memory_bm_next_pfn(src);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* mark_unsafe_pages - mark the pages that cannot be used for storing
|
|
|
* the image during resume, because they conflict with the pages that
|
|
|
* had been used before suspend
|
|
|
*/
|
|
|
|
|
|
-static int mark_unsafe_pages(struct memory_bitmap *bm)
|
|
|
+static void mark_unsafe_pages(struct memory_bitmap *bm)
|
|
|
{
|
|
|
- struct zone *zone;
|
|
|
- unsigned long pfn, max_zone_pfn;
|
|
|
+ unsigned long pfn;
|
|
|
|
|
|
- /* Clear page flags */
|
|
|
- for_each_populated_zone(zone) {
|
|
|
- max_zone_pfn = zone_end_pfn(zone);
|
|
|
- for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
|
|
|
- if (pfn_valid(pfn))
|
|
|
- swsusp_unset_page_free(pfn_to_page(pfn));
|
|
|
+ /* Clear the "free"/"unsafe" bit for all PFNs */
|
|
|
+ memory_bm_position_reset(free_pages_map);
|
|
|
+ pfn = memory_bm_next_pfn(free_pages_map);
|
|
|
+ while (pfn != BM_END_OF_MAP) {
|
|
|
+ memory_bm_clear_current(free_pages_map);
|
|
|
+ pfn = memory_bm_next_pfn(free_pages_map);
|
|
|
}
|
|
|
|
|
|
- /* Mark pages that correspond to the "original" pfns as "unsafe" */
|
|
|
- memory_bm_position_reset(bm);
|
|
|
- do {
|
|
|
- pfn = memory_bm_next_pfn(bm);
|
|
|
- if (likely(pfn != BM_END_OF_MAP)) {
|
|
|
- if (likely(pfn_valid(pfn)))
|
|
|
- swsusp_set_page_free(pfn_to_page(pfn));
|
|
|
- else
|
|
|
- return -EFAULT;
|
|
|
- }
|
|
|
- } while (pfn != BM_END_OF_MAP);
|
|
|
+ /* Mark pages that correspond to the "original" PFNs as "unsafe" */
|
|
|
+ duplicate_memory_bitmap(free_pages_map, bm);
|
|
|
|
|
|
allocated_unsafe_pages = 0;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static void
|
|
|
-duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
|
|
|
-{
|
|
|
- unsigned long pfn;
|
|
|
-
|
|
|
- memory_bm_position_reset(src);
|
|
|
- pfn = memory_bm_next_pfn(src);
|
|
|
- while (pfn != BM_END_OF_MAP) {
|
|
|
- memory_bm_set_bit(dst, pfn);
|
|
|
- pfn = memory_bm_next_pfn(src);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
static int check_header(struct swsusp_info *info)
|
|
@@ -2095,7 +2139,7 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
|
|
|
/* Extract and buffer page key for data page (s390 only). */
|
|
|
page_key_memorize(buf + j);
|
|
|
|
|
|
- if (memory_bm_pfn_present(bm, buf[j]))
|
|
|
+ if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
|
|
|
memory_bm_set_bit(bm, buf[j]);
|
|
|
else
|
|
|
return -EFAULT;
|
|
@@ -2104,11 +2148,6 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/* List of "safe" pages that may be used to store data loaded from the suspend
|
|
|
- * image
|
|
|
- */
|
|
|
-static struct linked_page *safe_pages_list;
|
|
|
-
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
|
/* struct highmem_pbe is used for creating the list of highmem pages that
|
|
|
* should be restored atomically during the resume from disk, because the page
|
|
@@ -2334,7 +2373,7 @@ static int
|
|
|
prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
|
|
|
{
|
|
|
unsigned int nr_pages, nr_highmem;
|
|
|
- struct linked_page *sp_list, *lp;
|
|
|
+ struct linked_page *lp;
|
|
|
int error;
|
|
|
|
|
|
/* If there is no highmem, the buffer will not be necessary */
|
|
@@ -2342,9 +2381,7 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
|
|
|
buffer = NULL;
|
|
|
|
|
|
nr_highmem = count_highmem_image_pages(bm);
|
|
|
- error = mark_unsafe_pages(bm);
|
|
|
- if (error)
|
|
|
- goto Free;
|
|
|
+ mark_unsafe_pages(bm);
|
|
|
|
|
|
error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
|
|
|
if (error)
|
|
@@ -2362,9 +2399,9 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
|
|
|
* NOTE: This way we make sure there will be enough safe pages for the
|
|
|
* chain_alloc() in get_buffer(). It is a bit wasteful, but
|
|
|
* nr_copy_pages cannot be greater than 50% of the memory anyway.
|
|
|
+ *
|
|
|
+ * nr_copy_pages cannot be less than allocated_unsafe_pages too.
|
|
|
*/
|
|
|
- sp_list = NULL;
|
|
|
- /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
|
|
|
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
|
|
|
nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
|
|
|
while (nr_pages > 0) {
|
|
@@ -2373,12 +2410,11 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
|
|
|
error = -ENOMEM;
|
|
|
goto Free;
|
|
|
}
|
|
|
- lp->next = sp_list;
|
|
|
- sp_list = lp;
|
|
|
+ lp->next = safe_pages_list;
|
|
|
+ safe_pages_list = lp;
|
|
|
nr_pages--;
|
|
|
}
|
|
|
/* Preallocate memory for the image */
|
|
|
- safe_pages_list = NULL;
|
|
|
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
|
|
|
while (nr_pages > 0) {
|
|
|
lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
|
|
@@ -2396,12 +2432,6 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
|
|
|
swsusp_set_page_free(virt_to_page(lp));
|
|
|
nr_pages--;
|
|
|
}
|
|
|
- /* Free the reserved safe pages so that chain_alloc() can use them */
|
|
|
- while (sp_list) {
|
|
|
- lp = sp_list->next;
|
|
|
- free_image_page(sp_list, PG_UNSAFE_CLEAR);
|
|
|
- sp_list = lp;
|
|
|
- }
|
|
|
return 0;
|
|
|
|
|
|
Free:
|
|
@@ -2491,6 +2521,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
|
|
|
if (error)
|
|
|
return error;
|
|
|
|
|
|
+ safe_pages_list = NULL;
|
|
|
+
|
|
|
error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
|
|
|
if (error)
|
|
|
return error;
|
|
@@ -2546,9 +2578,9 @@ void snapshot_write_finalize(struct snapshot_handle *handle)
|
|
|
/* Restore page key for data page (s390 only). */
|
|
|
page_key_write(handle->buffer);
|
|
|
page_key_free();
|
|
|
- /* Free only if we have loaded the image entirely */
|
|
|
+ /* Do that only if we have loaded the image entirely */
|
|
|
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
|
|
|
- memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
|
|
|
+ memory_bm_recycle(&orig_bm);
|
|
|
free_highmem_data();
|
|
|
}
|
|
|
}
|