|
@@ -342,7 +342,6 @@ struct rbd_device {
|
|
|
|
|
|
struct list_head rq_queue; /* incoming rq queue */
|
|
|
spinlock_t lock; /* queue, flags, open_count */
|
|
|
- struct workqueue_struct *rq_wq;
|
|
|
struct work_struct rq_work;
|
|
|
|
|
|
struct rbd_image_header header;
|
|
@@ -402,6 +401,8 @@ static struct kmem_cache *rbd_segment_name_cache;
|
|
|
static int rbd_major;
|
|
|
static DEFINE_IDA(rbd_dev_id_ida);
|
|
|
|
|
|
+static struct workqueue_struct *rbd_wq;
|
|
|
+
|
|
|
/*
|
|
|
* Default to false for now, as single-major requires >= 0.75 version of
|
|
|
* userspace rbd utility.
|
|
@@ -3452,7 +3453,7 @@ static void rbd_request_fn(struct request_queue *q)
|
|
|
}
|
|
|
|
|
|
if (queued)
|
|
|
- queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work);
|
|
|
+ queue_work(rbd_wq, &rbd_dev->rq_work);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3532,7 +3533,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
|
|
|
page_count = (u32) calc_pages_for(offset, length);
|
|
|
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
|
|
|
if (IS_ERR(pages))
|
|
|
- ret = PTR_ERR(pages);
|
|
|
+ return PTR_ERR(pages);
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
obj_request = rbd_obj_request_create(object_name, offset, length,
|
|
@@ -5242,16 +5243,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
|
|
|
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
|
|
|
set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
|
|
|
|
|
|
- rbd_dev->rq_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
|
|
|
- rbd_dev->disk->disk_name);
|
|
|
- if (!rbd_dev->rq_wq) {
|
|
|
- ret = -ENOMEM;
|
|
|
- goto err_out_mapping;
|
|
|
- }
|
|
|
-
|
|
|
ret = rbd_bus_add_dev(rbd_dev);
|
|
|
if (ret)
|
|
|
- goto err_out_workqueue;
|
|
|
+ goto err_out_mapping;
|
|
|
|
|
|
/* Everything's ready. Announce the disk to the world. */
|
|
|
|
|
@@ -5263,9 +5257,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
-err_out_workqueue:
|
|
|
- destroy_workqueue(rbd_dev->rq_wq);
|
|
|
- rbd_dev->rq_wq = NULL;
|
|
|
err_out_mapping:
|
|
|
rbd_dev_mapping_clear(rbd_dev);
|
|
|
err_out_disk:
|
|
@@ -5512,7 +5503,6 @@ static void rbd_dev_device_release(struct device *dev)
|
|
|
{
|
|
|
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
|
|
|
|
|
|
- destroy_workqueue(rbd_dev->rq_wq);
|
|
|
rbd_free_disk(rbd_dev);
|
|
|
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
|
|
|
rbd_dev_mapping_clear(rbd_dev);
|
|
@@ -5716,11 +5706,21 @@ static int __init rbd_init(void)
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
|
|
|
+ /*
|
|
|
+ * The number of active work items is limited by the number of
|
|
|
+ * rbd devices, so leave @max_active at default.
|
|
|
+ */
|
|
|
+ rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
|
|
|
+ if (!rbd_wq) {
|
|
|
+ rc = -ENOMEM;
|
|
|
+ goto err_out_slab;
|
|
|
+ }
|
|
|
+
|
|
|
if (single_major) {
|
|
|
rbd_major = register_blkdev(0, RBD_DRV_NAME);
|
|
|
if (rbd_major < 0) {
|
|
|
rc = rbd_major;
|
|
|
- goto err_out_slab;
|
|
|
+ goto err_out_wq;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -5738,6 +5738,8 @@ static int __init rbd_init(void)
|
|
|
err_out_blkdev:
|
|
|
if (single_major)
|
|
|
unregister_blkdev(rbd_major, RBD_DRV_NAME);
|
|
|
+err_out_wq:
|
|
|
+ destroy_workqueue(rbd_wq);
|
|
|
err_out_slab:
|
|
|
rbd_slab_exit();
|
|
|
return rc;
|
|
@@ -5749,6 +5751,7 @@ static void __exit rbd_exit(void)
|
|
|
rbd_sysfs_cleanup();
|
|
|
if (single_major)
|
|
|
unregister_blkdev(rbd_major, RBD_DRV_NAME);
|
|
|
+ destroy_workqueue(rbd_wq);
|
|
|
rbd_slab_exit();
|
|
|
}
|
|
|
|