|
@@ -59,7 +59,7 @@ struct virtio_balloon
|
|
|
* Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
|
|
|
* to num_pages above.
|
|
|
*/
|
|
|
- struct balloon_dev_info *vb_dev_info;
|
|
|
+ struct balloon_dev_info vb_dev_info;
|
|
|
|
|
|
/* Synchronize access/update to this struct virtio_balloon elements */
|
|
|
struct mutex balloon_lock;
|
|
@@ -127,7 +127,7 @@ static void set_page_pfns(u32 pfns[], struct page *page)
|
|
|
|
|
|
static void fill_balloon(struct virtio_balloon *vb, size_t num)
|
|
|
{
|
|
|
- struct balloon_dev_info *vb_dev_info = vb->vb_dev_info;
|
|
|
+ struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
|
|
|
|
|
|
/* We can only do one array worth at a time. */
|
|
|
num = min(num, ARRAY_SIZE(vb->pfns));
|
|
@@ -171,7 +171,7 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
|
|
|
static void leak_balloon(struct virtio_balloon *vb, size_t num)
|
|
|
{
|
|
|
struct page *page;
|
|
|
- struct balloon_dev_info *vb_dev_info = vb->vb_dev_info;
|
|
|
+ struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
|
|
|
|
|
|
/* We can only do one array worth at a time. */
|
|
|
num = min(num, ARRAY_SIZE(vb->pfns));
|
|
@@ -353,12 +353,11 @@ static int init_vqs(struct virtio_balloon *vb)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static const struct address_space_operations virtio_balloon_aops;
|
|
|
#ifdef CONFIG_BALLOON_COMPACTION
|
|
|
/*
|
|
|
* virtballoon_migratepage - perform the balloon page migration on behalf of
|
|
|
* a compation thread. (called under page lock)
|
|
|
- * @mapping: the page->mapping which will be assigned to the new migrated page.
|
|
|
+ * @vb_dev_info: the balloon device
|
|
|
* @newpage: page that will replace the isolated page after migration finishes.
|
|
|
* @page : the isolated (old) page that is about to be migrated to newpage.
|
|
|
* @mode : compaction mode -- not used for balloon page migration.
|
|
@@ -373,17 +372,13 @@ static const struct address_space_operations virtio_balloon_aops;
|
|
|
* This function preforms the balloon page migration task.
|
|
|
* Called through balloon_mapping->a_ops->migratepage
|
|
|
*/
|
|
|
-static int virtballoon_migratepage(struct address_space *mapping,
|
|
|
+static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
|
|
|
struct page *newpage, struct page *page, enum migrate_mode mode)
|
|
|
{
|
|
|
- struct balloon_dev_info *vb_dev_info = balloon_page_device(page);
|
|
|
- struct virtio_balloon *vb;
|
|
|
+ struct virtio_balloon *vb = container_of(vb_dev_info,
|
|
|
+ struct virtio_balloon, vb_dev_info);
|
|
|
unsigned long flags;
|
|
|
|
|
|
- BUG_ON(!vb_dev_info);
|
|
|
-
|
|
|
- vb = vb_dev_info->balloon_device;
|
|
|
-
|
|
|
/*
|
|
|
* In order to avoid lock contention while migrating pages concurrently
|
|
|
* to leak_balloon() or fill_balloon() we just give up the balloon_lock
|
|
@@ -399,7 +394,7 @@ static int virtballoon_migratepage(struct address_space *mapping,
|
|
|
|
|
|
/* balloon's page migration 1st step -- inflate "newpage" */
|
|
|
spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
|
|
|
- balloon_page_insert(newpage, mapping, &vb_dev_info->pages);
|
|
|
+ balloon_page_insert(vb_dev_info, newpage);
|
|
|
vb_dev_info->isolated_pages--;
|
|
|
spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
|
|
|
vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
|
|
@@ -418,18 +413,11 @@ static int virtballoon_migratepage(struct address_space *mapping,
|
|
|
|
|
|
return MIGRATEPAGE_SUCCESS;
|
|
|
}
|
|
|
-
|
|
|
-/* define the balloon_mapping->a_ops callback to allow balloon page migration */
|
|
|
-static const struct address_space_operations virtio_balloon_aops = {
|
|
|
- .migratepage = virtballoon_migratepage,
|
|
|
-};
|
|
|
#endif /* CONFIG_BALLOON_COMPACTION */
|
|
|
|
|
|
static int virtballoon_probe(struct virtio_device *vdev)
|
|
|
{
|
|
|
struct virtio_balloon *vb;
|
|
|
- struct address_space *vb_mapping;
|
|
|
- struct balloon_dev_info *vb_devinfo;
|
|
|
int err;
|
|
|
|
|
|
vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
|
|
@@ -445,30 +433,14 @@ static int virtballoon_probe(struct virtio_device *vdev)
|
|
|
vb->vdev = vdev;
|
|
|
vb->need_stats_update = 0;
|
|
|
|
|
|
- vb_devinfo = balloon_devinfo_alloc(vb);
|
|
|
- if (IS_ERR(vb_devinfo)) {
|
|
|
- err = PTR_ERR(vb_devinfo);
|
|
|
- goto out_free_vb;
|
|
|
- }
|
|
|
-
|
|
|
- vb_mapping = balloon_mapping_alloc(vb_devinfo,
|
|
|
- (balloon_compaction_check()) ?
|
|
|
- &virtio_balloon_aops : NULL);
|
|
|
- if (IS_ERR(vb_mapping)) {
|
|
|
- /*
|
|
|
- * IS_ERR(vb_mapping) && PTR_ERR(vb_mapping) == -EOPNOTSUPP
|
|
|
- * This means !CONFIG_BALLOON_COMPACTION, otherwise we get off.
|
|
|
- */
|
|
|
- err = PTR_ERR(vb_mapping);
|
|
|
- if (err != -EOPNOTSUPP)
|
|
|
- goto out_free_vb_devinfo;
|
|
|
- }
|
|
|
-
|
|
|
- vb->vb_dev_info = vb_devinfo;
|
|
|
+ balloon_devinfo_init(&vb->vb_dev_info);
|
|
|
+#ifdef CONFIG_BALLOON_COMPACTION
|
|
|
+ vb->vb_dev_info.migratepage = virtballoon_migratepage;
|
|
|
+#endif
|
|
|
|
|
|
err = init_vqs(vb);
|
|
|
if (err)
|
|
|
- goto out_free_vb_mapping;
|
|
|
+ goto out_free_vb;
|
|
|
|
|
|
vb->thread = kthread_run(balloon, vb, "vballoon");
|
|
|
if (IS_ERR(vb->thread)) {
|
|
@@ -480,10 +452,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
|
|
|
|
|
|
out_del_vqs:
|
|
|
vdev->config->del_vqs(vdev);
|
|
|
-out_free_vb_mapping:
|
|
|
- balloon_mapping_free(vb_mapping);
|
|
|
-out_free_vb_devinfo:
|
|
|
- balloon_devinfo_free(vb_devinfo);
|
|
|
out_free_vb:
|
|
|
kfree(vb);
|
|
|
out:
|
|
@@ -509,8 +477,6 @@ static void virtballoon_remove(struct virtio_device *vdev)
|
|
|
|
|
|
kthread_stop(vb->thread);
|
|
|
remove_common(vb);
|
|
|
- balloon_mapping_free(vb->vb_dev_info->mapping);
|
|
|
- balloon_devinfo_free(vb->vb_dev_info);
|
|
|
kfree(vb);
|
|
|
}
|
|
|
|