|
@@ -1178,7 +1178,7 @@ static void drbd_flush(struct drbd_connection *connection)
|
|
struct drbd_peer_device *peer_device;
|
|
struct drbd_peer_device *peer_device;
|
|
int vnr;
|
|
int vnr;
|
|
|
|
|
|
- if (connection->resource->write_ordering >= WO_bdev_flush) {
|
|
|
|
|
|
+ if (connection->resource->write_ordering >= WO_BDEV_FLUSH) {
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
|
|
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
|
|
struct drbd_device *device = peer_device->device;
|
|
struct drbd_device *device = peer_device->device;
|
|
@@ -1203,7 +1203,7 @@ static void drbd_flush(struct drbd_connection *connection)
|
|
/* would rather check on EOPNOTSUPP, but that is not reliable.
|
|
/* would rather check on EOPNOTSUPP, but that is not reliable.
|
|
* don't try again for ANY return value != 0
|
|
* don't try again for ANY return value != 0
|
|
* if (rv == -EOPNOTSUPP) */
|
|
* if (rv == -EOPNOTSUPP) */
|
|
- drbd_bump_write_ordering(connection->resource, NULL, WO_drain_io);
|
|
|
|
|
|
+ drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
|
|
}
|
|
}
|
|
put_ldev(device);
|
|
put_ldev(device);
|
|
kref_put(&device->kref, drbd_destroy_device);
|
|
kref_put(&device->kref, drbd_destroy_device);
|
|
@@ -1299,10 +1299,10 @@ max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo)
|
|
|
|
|
|
dc = rcu_dereference(bdev->disk_conf);
|
|
dc = rcu_dereference(bdev->disk_conf);
|
|
|
|
|
|
- if (wo == WO_bdev_flush && !dc->disk_flushes)
|
|
|
|
- wo = WO_drain_io;
|
|
|
|
- if (wo == WO_drain_io && !dc->disk_drain)
|
|
|
|
- wo = WO_none;
|
|
|
|
|
|
+ if (wo == WO_BDEV_FLUSH && !dc->disk_flushes)
|
|
|
|
+ wo = WO_DRAIN_IO;
|
|
|
|
+ if (wo == WO_DRAIN_IO && !dc->disk_drain)
|
|
|
|
+ wo = WO_NONE;
|
|
|
|
|
|
return wo;
|
|
return wo;
|
|
}
|
|
}
|
|
@@ -1319,13 +1319,13 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
|
|
enum write_ordering_e pwo;
|
|
enum write_ordering_e pwo;
|
|
int vnr;
|
|
int vnr;
|
|
static char *write_ordering_str[] = {
|
|
static char *write_ordering_str[] = {
|
|
- [WO_none] = "none",
|
|
|
|
- [WO_drain_io] = "drain",
|
|
|
|
- [WO_bdev_flush] = "flush",
|
|
|
|
|
|
+ [WO_NONE] = "none",
|
|
|
|
+ [WO_DRAIN_IO] = "drain",
|
|
|
|
+ [WO_BDEV_FLUSH] = "flush",
|
|
};
|
|
};
|
|
|
|
|
|
pwo = resource->write_ordering;
|
|
pwo = resource->write_ordering;
|
|
- if (wo != WO_bdev_flush)
|
|
|
|
|
|
+ if (wo != WO_BDEV_FLUSH)
|
|
wo = min(pwo, wo);
|
|
wo = min(pwo, wo);
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
idr_for_each_entry(&resource->devices, device, vnr) {
|
|
idr_for_each_entry(&resource->devices, device, vnr) {
|
|
@@ -1343,7 +1343,7 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
|
|
|
|
resource->write_ordering = wo;
|
|
resource->write_ordering = wo;
|
|
- if (pwo != resource->write_ordering || wo == WO_bdev_flush)
|
|
|
|
|
|
+ if (pwo != resource->write_ordering || wo == WO_BDEV_FLUSH)
|
|
drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
|
|
drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1533,7 +1533,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
|
|
* Therefore we must send the barrier_ack after the barrier request was
|
|
* Therefore we must send the barrier_ack after the barrier request was
|
|
* completed. */
|
|
* completed. */
|
|
switch (connection->resource->write_ordering) {
|
|
switch (connection->resource->write_ordering) {
|
|
- case WO_none:
|
|
|
|
|
|
+ case WO_NONE:
|
|
if (rv == FE_RECYCLED)
|
|
if (rv == FE_RECYCLED)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
@@ -1546,8 +1546,8 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
|
|
drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
|
|
drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
|
|
/* Fall through */
|
|
/* Fall through */
|
|
|
|
|
|
- case WO_bdev_flush:
|
|
|
|
- case WO_drain_io:
|
|
|
|
|
|
+ case WO_BDEV_FLUSH:
|
|
|
|
+ case WO_DRAIN_IO:
|
|
conn_wait_active_ee_empty(connection);
|
|
conn_wait_active_ee_empty(connection);
|
|
drbd_flush(connection);
|
|
drbd_flush(connection);
|
|
|
|
|