|
@@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
|
|
|
mutex_unlock(&rbd_dev->watch_mutex);
|
|
|
}
|
|
|
|
|
|
+static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
|
|
|
+{
|
|
|
+ struct rbd_client_id cid = rbd_get_cid(rbd_dev);
|
|
|
+
|
|
|
+ strcpy(rbd_dev->lock_cookie, cookie);
|
|
|
+ rbd_set_owner_cid(rbd_dev, &cid);
|
|
|
+ queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* lock_rwsem must be held for write
|
|
|
*/
|
|
|
static int rbd_lock(struct rbd_device *rbd_dev)
|
|
|
{
|
|
|
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
|
|
|
- struct rbd_client_id cid = rbd_get_cid(rbd_dev);
|
|
|
char cookie[32];
|
|
|
int ret;
|
|
|
|
|
@@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
|
|
|
return ret;
|
|
|
|
|
|
rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
|
|
|
- strcpy(rbd_dev->lock_cookie, cookie);
|
|
|
- rbd_set_owner_cid(rbd_dev, &cid);
|
|
|
- queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
|
|
|
+ __rbd_lock(rbd_dev, cookie);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
|
|
|
queue_delayed_work(rbd_dev->task_wq,
|
|
|
&rbd_dev->lock_dwork, 0);
|
|
|
} else {
|
|
|
- strcpy(rbd_dev->lock_cookie, cookie);
|
|
|
+ __rbd_lock(rbd_dev, cookie);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
|
|
|
segment_size = rbd_obj_bytes(&rbd_dev->header);
|
|
|
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
|
|
|
q->limits.max_sectors = queue_max_hw_sectors(q);
|
|
|
- blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
|
|
|
+ blk_queue_max_segments(q, USHRT_MAX);
|
|
|
blk_queue_max_segment_size(q, segment_size);
|
|
|
blk_queue_io_min(q, segment_size);
|
|
|
blk_queue_io_opt(q, segment_size);
|