|
@@ -583,6 +583,7 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
|
|
int number, rollback_i, size;
|
|
int number, rollback_i, size;
|
|
int align, requeue = 0;
|
|
int align, requeue = 0;
|
|
int i = 0;
|
|
int i = 0;
|
|
|
|
+ int discard_granularity = 0;
|
|
|
|
|
|
if (unlikely(cancel))
|
|
if (unlikely(cancel))
|
|
return 0;
|
|
return 0;
|
|
@@ -602,6 +603,12 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (connection->agreed_features & FF_THIN_RESYNC) {
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ discard_granularity = rcu_dereference(device->ldev->disk_conf)->rs_discard_granularity;
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+ }
|
|
|
|
+
|
|
max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
|
|
max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
|
|
number = drbd_rs_number_requests(device);
|
|
number = drbd_rs_number_requests(device);
|
|
if (number <= 0)
|
|
if (number <= 0)
|
|
@@ -666,6 +673,9 @@ next_sector:
|
|
if (sector & ((1<<(align+3))-1))
|
|
if (sector & ((1<<(align+3))-1))
|
|
break;
|
|
break;
|
|
|
|
|
|
|
|
+ if (discard_granularity && size == discard_granularity)
|
|
|
|
+ break;
|
|
|
|
+
|
|
/* do not cross extent boundaries */
|
|
/* do not cross extent boundaries */
|
|
if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
|
|
if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
|
|
break;
|
|
break;
|
|
@@ -712,7 +722,8 @@ next_sector:
|
|
int err;
|
|
int err;
|
|
|
|
|
|
inc_rs_pending(device);
|
|
inc_rs_pending(device);
|
|
- err = drbd_send_drequest(peer_device, P_RS_DATA_REQUEST,
|
|
|
|
|
|
+ err = drbd_send_drequest(peer_device,
|
|
|
|
+ size == discard_granularity ? P_RS_THIN_REQ : P_RS_DATA_REQUEST,
|
|
sector, size, ID_SYNCER);
|
|
sector, size, ID_SYNCER);
|
|
if (err) {
|
|
if (err) {
|
|
drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
|
|
drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
|