|
|
@@ -36,6 +36,7 @@
|
|
|
#include "drbd_int.h"
|
|
|
#include "drbd_protocol.h"
|
|
|
#include "drbd_req.h"
|
|
|
+#include "drbd_state_change.h"
|
|
|
#include <asm/unaligned.h>
|
|
|
#include <linux/drbd_limits.h>
|
|
|
#include <linux/kthread.h>
|
|
|
@@ -75,11 +76,17 @@ int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
|
|
|
int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
|
|
|
/* .dumpit */
|
|
|
int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
|
|
|
+int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
|
|
|
|
|
|
#include <linux/drbd_genl_api.h>
|
|
|
#include "drbd_nla.h"
|
|
|
#include <linux/genl_magic_func.h>
|
|
|
|
|
|
+static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
|
|
|
+static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
|
|
|
+
|
|
|
+DEFINE_MUTEX(notification_mutex);
|
|
|
+
|
|
|
/* used blkdev_get_by_path, to claim our meta data device(s) */
|
|
|
static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
|
|
|
|
|
|
@@ -349,6 +356,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
|
|
|
sib.sib_reason = SIB_HELPER_PRE;
|
|
|
sib.helper_name = cmd;
|
|
|
drbd_bcast_event(device, &sib);
|
|
|
+ notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
|
|
|
ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
|
|
|
if (ret)
|
|
|
drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
|
|
|
@@ -361,6 +369,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
|
|
|
sib.sib_reason = SIB_HELPER_POST;
|
|
|
sib.helper_exit_code = ret;
|
|
|
drbd_bcast_event(device, &sib);
|
|
|
+ notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
|
|
|
|
|
|
if (current == connection->worker.task)
|
|
|
clear_bit(CALLBACK_PENDING, &connection->flags);
|
|
|
@@ -388,6 +397,7 @@ static int conn_khelper(struct drbd_connection *connection, char *cmd)
|
|
|
|
|
|
drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
|
|
|
/* TODO: conn_bcast_event() ?? */
|
|
|
+ notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
|
|
|
|
|
|
ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
|
|
|
if (ret)
|
|
|
@@ -399,6 +409,7 @@ static int conn_khelper(struct drbd_connection *connection, char *cmd)
|
|
|
usermode_helper, cmd, resource_name,
|
|
|
(ret >> 8) & 0xff, ret);
|
|
|
/* TODO: conn_bcast_event() ?? */
|
|
|
+ notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
|
|
|
|
|
|
if (ret < 0) /* Ignore any ERRNOs we got. */
|
|
|
ret = 0;
|
|
|
@@ -2248,8 +2259,31 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void connection_to_info(struct connection_info *info,
|
|
|
+ struct drbd_connection *connection)
|
|
|
+{
|
|
|
+ info->conn_connection_state = connection->cstate;
|
|
|
+ info->conn_role = conn_highest_peer(connection);
|
|
|
+}
|
|
|
+
|
|
|
+static void peer_device_to_info(struct peer_device_info *info,
|
|
|
+ struct drbd_peer_device *peer_device)
|
|
|
+{
|
|
|
+ struct drbd_device *device = peer_device->device;
|
|
|
+
|
|
|
+ info->peer_repl_state =
|
|
|
+ max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
|
|
|
+ info->peer_disk_state = device->state.pdsk;
|
|
|
+ info->peer_resync_susp_user = device->state.user_isp;
|
|
|
+ info->peer_resync_susp_peer = device->state.peer_isp;
|
|
|
+ info->peer_resync_susp_dependency = device->state.aftr_isp;
|
|
|
+}
|
|
|
+
|
|
|
int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
|
|
|
{
|
|
|
+ struct connection_info connection_info;
|
|
|
+ enum drbd_notification_type flags;
|
|
|
+ unsigned int peer_devices = 0;
|
|
|
struct drbd_config_context adm_ctx;
|
|
|
struct drbd_peer_device *peer_device;
|
|
|
struct net_conf *old_net_conf, *new_net_conf = NULL;
|
|
|
@@ -2350,6 +2384,22 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
|
|
|
connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
|
|
|
memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
|
|
|
|
|
|
+ idr_for_each_entry(&connection->peer_devices, peer_device, i) {
|
|
|
+ peer_devices++;
|
|
|
+ }
|
|
|
+
|
|
|
+ connection_to_info(&connection_info, connection);
|
|
|
+ flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
|
|
|
+ mutex_lock(¬ification_mutex);
|
|
|
+ notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
|
|
|
+ idr_for_each_entry(&connection->peer_devices, peer_device, i) {
|
|
|
+ struct peer_device_info peer_device_info;
|
|
|
+
|
|
|
+ peer_device_to_info(&peer_device_info, peer_device);
|
|
|
+ flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
|
|
|
+ notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
|
|
|
+ }
|
|
|
+ mutex_unlock(¬ification_mutex);
|
|
|
mutex_unlock(&adm_ctx.resource->conf_update);
|
|
|
|
|
|
rcu_read_lock();
|
|
|
@@ -2431,6 +2481,8 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection
|
|
|
drbd_err(connection,
|
|
|
"unexpected rv2=%d in conn_try_disconnect()\n",
|
|
|
rv2);
|
|
|
+ /* Unlike in DRBD 9, the state engine has generated
|
|
|
+ * NOTIFY_DESTROY events before clearing connection->net_conf. */
|
|
|
}
|
|
|
return rv;
|
|
|
}
|
|
|
@@ -3417,8 +3469,18 @@ drbd_check_resource_name(struct drbd_config_context *adm_ctx)
|
|
|
return NO_ERROR;
|
|
|
}
|
|
|
|
|
|
+static void resource_to_info(struct resource_info *info,
|
|
|
+ struct drbd_resource *resource)
|
|
|
+{
|
|
|
+ info->res_role = conn_highest_role(first_connection(resource));
|
|
|
+ info->res_susp = resource->susp;
|
|
|
+ info->res_susp_nod = resource->susp_nod;
|
|
|
+ info->res_susp_fen = resource->susp_fen;
|
|
|
+}
|
|
|
+
|
|
|
int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
|
|
|
{
|
|
|
+ struct drbd_connection *connection;
|
|
|
struct drbd_config_context adm_ctx;
|
|
|
enum drbd_ret_code retcode;
|
|
|
struct res_opts res_opts;
|
|
|
@@ -3453,14 +3515,32 @@ int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
|
|
/* not yet safe for genl_family.parallel_ops */
|
|
|
mutex_lock(&resources_mutex);
|
|
|
- if (!conn_create(adm_ctx.resource_name, &res_opts))
|
|
|
- retcode = ERR_NOMEM;
|
|
|
+ connection = conn_create(adm_ctx.resource_name, &res_opts);
|
|
|
mutex_unlock(&resources_mutex);
|
|
|
+
|
|
|
+ if (connection) {
|
|
|
+ struct resource_info resource_info;
|
|
|
+
|
|
|
+ mutex_lock(¬ification_mutex);
|
|
|
+ resource_to_info(&resource_info, connection->resource);
|
|
|
+ notify_resource_state(NULL, 0, connection->resource,
|
|
|
+ &resource_info, NOTIFY_CREATE);
|
|
|
+ mutex_unlock(¬ification_mutex);
|
|
|
+ } else
|
|
|
+ retcode = ERR_NOMEM;
|
|
|
+
|
|
|
out:
|
|
|
drbd_adm_finish(&adm_ctx, info, retcode);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void device_to_info(struct device_info *info,
|
|
|
+ struct drbd_device *device)
|
|
|
+{
|
|
|
+ info->dev_disk_state = device->state.disk;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
|
|
|
{
|
|
|
struct drbd_config_context adm_ctx;
|
|
|
@@ -3495,6 +3575,36 @@ int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
|
|
mutex_lock(&adm_ctx.resource->adm_mutex);
|
|
|
retcode = drbd_create_device(&adm_ctx, dh->minor);
|
|
|
+ if (retcode == NO_ERROR) {
|
|
|
+ struct drbd_device *device;
|
|
|
+ struct drbd_peer_device *peer_device;
|
|
|
+ struct device_info info;
|
|
|
+ unsigned int peer_devices = 0;
|
|
|
+ enum drbd_notification_type flags;
|
|
|
+
|
|
|
+ device = minor_to_device(dh->minor);
|
|
|
+ for_each_peer_device(peer_device, device) {
|
|
|
+ if (!has_net_conf(peer_device->connection))
|
|
|
+ continue;
|
|
|
+ peer_devices++;
|
|
|
+ }
|
|
|
+
|
|
|
+ device_to_info(&info, device);
|
|
|
+ mutex_lock(¬ification_mutex);
|
|
|
+ flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
|
|
|
+ notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
|
|
|
+ for_each_peer_device(peer_device, device) {
|
|
|
+ struct peer_device_info peer_device_info;
|
|
|
+
|
|
|
+ if (!has_net_conf(peer_device->connection))
|
|
|
+ continue;
|
|
|
+ peer_device_to_info(&peer_device_info, peer_device);
|
|
|
+ flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
|
|
|
+ notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
|
|
|
+ NOTIFY_CREATE | flags);
|
|
|
+ }
|
|
|
+ mutex_unlock(¬ification_mutex);
|
|
|
+ }
|
|
|
mutex_unlock(&adm_ctx.resource->adm_mutex);
|
|
|
out:
|
|
|
drbd_adm_finish(&adm_ctx, info, retcode);
|
|
|
@@ -3503,13 +3613,35 @@ int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
|
|
static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
|
|
|
{
|
|
|
+ struct drbd_peer_device *peer_device;
|
|
|
+
|
|
|
if (device->state.disk == D_DISKLESS &&
|
|
|
/* no need to be device->state.conn == C_STANDALONE &&
|
|
|
* we may want to delete a minor from a live replication group.
|
|
|
*/
|
|
|
device->state.role == R_SECONDARY) {
|
|
|
+ struct drbd_connection *connection =
|
|
|
+ first_connection(device->resource);
|
|
|
+
|
|
|
_drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
|
|
|
CS_VERBOSE + CS_WAIT_COMPLETE);
|
|
|
+
|
|
|
+ /* If the state engine hasn't stopped the sender thread yet, we
|
|
|
+ * need to flush the sender work queue before generating the
|
|
|
+ * DESTROY events here. */
|
|
|
+ if (get_t_state(&connection->worker) == RUNNING)
|
|
|
+ drbd_flush_workqueue(&connection->sender_work);
|
|
|
+
|
|
|
+ mutex_lock(¬ification_mutex);
|
|
|
+ for_each_peer_device(peer_device, device) {
|
|
|
+ if (!has_net_conf(peer_device->connection))
|
|
|
+ continue;
|
|
|
+ notify_peer_device_state(NULL, 0, peer_device, NULL,
|
|
|
+ NOTIFY_DESTROY | NOTIFY_CONTINUES);
|
|
|
+ }
|
|
|
+ notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
|
|
|
+ mutex_unlock(¬ification_mutex);
|
|
|
+
|
|
|
drbd_delete_device(device);
|
|
|
return NO_ERROR;
|
|
|
} else
|
|
|
@@ -3546,6 +3678,13 @@ static int adm_del_resource(struct drbd_resource *resource)
|
|
|
if (!idr_is_empty(&resource->devices))
|
|
|
return ERR_RES_IN_USE;
|
|
|
|
|
|
+ /* The state engine has stopped the sender thread, so we don't
|
|
|
+ * need to flush the sender work queue before generating the
|
|
|
+ * DESTROY event here. */
|
|
|
+ mutex_lock(¬ification_mutex);
|
|
|
+ notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
|
|
|
+ mutex_unlock(¬ification_mutex);
|
|
|
+
|
|
|
mutex_lock(&resources_mutex);
|
|
|
list_del_rcu(&resource->resources);
|
|
|
mutex_unlock(&resources_mutex);
|
|
|
@@ -3644,7 +3783,6 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
|
|
void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
|
|
|
{
|
|
|
- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
|
|
|
struct sk_buff *msg;
|
|
|
struct drbd_genlmsghdr *d_out;
|
|
|
unsigned seq;
|
|
|
@@ -3679,3 +3817,484 @@ void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
|
|
|
"Event seq:%u sib_reason:%u\n",
|
|
|
err, seq, sib->sib_reason);
|
|
|
}
|
|
|
+
|
|
|
+static void device_to_statistics(struct device_statistics *s,
|
|
|
+ struct drbd_device *device)
|
|
|
+{
|
|
|
+ memset(s, 0, sizeof(*s));
|
|
|
+ s->dev_upper_blocked = !may_inc_ap_bio(device);
|
|
|
+ if (get_ldev(device)) {
|
|
|
+ struct drbd_md *md = &device->ldev->md;
|
|
|
+ u64 *history_uuids = (u64 *)s->history_uuids;
|
|
|
+ struct request_queue *q;
|
|
|
+ int n;
|
|
|
+
|
|
|
+ spin_lock_irq(&md->uuid_lock);
|
|
|
+ s->dev_current_uuid = md->uuid[UI_CURRENT];
|
|
|
+ BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
|
|
|
+ for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
|
|
|
+ history_uuids[n] = md->uuid[UI_HISTORY_START + n];
|
|
|
+ for (; n < HISTORY_UUIDS; n++)
|
|
|
+ history_uuids[n] = 0;
|
|
|
+ s->history_uuids_len = HISTORY_UUIDS;
|
|
|
+ spin_unlock_irq(&md->uuid_lock);
|
|
|
+
|
|
|
+ s->dev_disk_flags = md->flags;
|
|
|
+ q = bdev_get_queue(device->ldev->backing_bdev);
|
|
|
+ s->dev_lower_blocked =
|
|
|
+ bdi_congested(&q->backing_dev_info,
|
|
|
+ (1 << WB_async_congested) |
|
|
|
+ (1 << WB_sync_congested));
|
|
|
+ put_ldev(device);
|
|
|
+ }
|
|
|
+ s->dev_size = drbd_get_capacity(device->this_bdev);
|
|
|
+ s->dev_read = device->read_cnt;
|
|
|
+ s->dev_write = device->writ_cnt;
|
|
|
+ s->dev_al_writes = device->al_writ_cnt;
|
|
|
+ s->dev_bm_writes = device->bm_writ_cnt;
|
|
|
+ s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
|
|
|
+ s->dev_lower_pending = atomic_read(&device->local_cnt);
|
|
|
+ s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
|
|
|
+ s->dev_exposed_data_uuid = device->ed_uuid;
|
|
|
+}
|
|
|
+
|
|
|
+enum mdf_peer_flag {
|
|
|
+ MDF_PEER_CONNECTED = 1 << 0,
|
|
|
+ MDF_PEER_OUTDATED = 1 << 1,
|
|
|
+ MDF_PEER_FENCING = 1 << 2,
|
|
|
+ MDF_PEER_FULL_SYNC = 1 << 3,
|
|
|
+};
|
|
|
+
|
|
|
+static void peer_device_to_statistics(struct peer_device_statistics *s,
|
|
|
+ struct drbd_peer_device *peer_device)
|
|
|
+{
|
|
|
+ struct drbd_device *device = peer_device->device;
|
|
|
+
|
|
|
+ memset(s, 0, sizeof(*s));
|
|
|
+ s->peer_dev_received = device->recv_cnt;
|
|
|
+ s->peer_dev_sent = device->send_cnt;
|
|
|
+ s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
|
|
|
+ atomic_read(&device->rs_pending_cnt);
|
|
|
+ s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
|
|
|
+ s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
|
|
|
+ s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
|
|
|
+ if (get_ldev(device)) {
|
|
|
+ struct drbd_md *md = &device->ldev->md;
|
|
|
+
|
|
|
+ spin_lock_irq(&md->uuid_lock);
|
|
|
+ s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
|
|
|
+ spin_unlock_irq(&md->uuid_lock);
|
|
|
+ s->peer_dev_flags =
|
|
|
+ (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
|
|
|
+ MDF_PEER_CONNECTED : 0) +
|
|
|
+ (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
|
|
|
+ !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
|
|
|
+ MDF_PEER_OUTDATED : 0) +
|
|
|
+ /* FIXME: MDF_PEER_FENCING? */
|
|
|
+ (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
|
|
|
+ MDF_PEER_FULL_SYNC : 0);
|
|
|
+ put_ldev(device);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int nla_put_notification_header(struct sk_buff *msg,
|
|
|
+ enum drbd_notification_type type)
|
|
|
+{
|
|
|
+ struct drbd_notification_header nh = {
|
|
|
+ .nh_type = type,
|
|
|
+ };
|
|
|
+
|
|
|
+ return drbd_notification_header_to_skb(msg, &nh, true);
|
|
|
+}
|
|
|
+
|
|
|
+void notify_resource_state(struct sk_buff *skb,
|
|
|
+ unsigned int seq,
|
|
|
+ struct drbd_resource *resource,
|
|
|
+ struct resource_info *resource_info,
|
|
|
+ enum drbd_notification_type type)
|
|
|
+{
|
|
|
+ struct resource_statistics resource_statistics;
|
|
|
+ struct drbd_genlmsghdr *dh;
|
|
|
+ bool multicast = false;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (!skb) {
|
|
|
+ seq = atomic_inc_return(¬ify_genl_seq);
|
|
|
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
|
|
|
+ err = -ENOMEM;
|
|
|
+ if (!skb)
|
|
|
+ goto failed;
|
|
|
+ multicast = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ err = -EMSGSIZE;
|
|
|
+ dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
|
|
|
+ if (!dh)
|
|
|
+ goto nla_put_failure;
|
|
|
+ dh->minor = -1U;
|
|
|
+ dh->ret_code = NO_ERROR;
|
|
|
+ if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
|
|
|
+ nla_put_notification_header(skb, type) ||
|
|
|
+ ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
|
|
|
+ resource_info_to_skb(skb, resource_info, true)))
|
|
|
+ goto nla_put_failure;
|
|
|
+ resource_statistics.res_stat_write_ordering = resource->write_ordering;
|
|
|
+ err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
|
|
|
+ if (err)
|
|
|
+ goto nla_put_failure;
|
|
|
+ genlmsg_end(skb, dh);
|
|
|
+ if (multicast) {
|
|
|
+ err = drbd_genl_multicast_events(skb, 0);
|
|
|
+ /* skb has been consumed or freed in netlink_broadcast() */
|
|
|
+ if (err && err != -ESRCH)
|
|
|
+ goto failed;
|
|
|
+ }
|
|
|
+ return;
|
|
|
+
|
|
|
+nla_put_failure:
|
|
|
+ nlmsg_free(skb);
|
|
|
+failed:
|
|
|
+ drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
|
|
|
+ err, seq);
|
|
|
+}
|
|
|
+
|
|
|
+void notify_device_state(struct sk_buff *skb,
|
|
|
+ unsigned int seq,
|
|
|
+ struct drbd_device *device,
|
|
|
+ struct device_info *device_info,
|
|
|
+ enum drbd_notification_type type)
|
|
|
+{
|
|
|
+ struct device_statistics device_statistics;
|
|
|
+ struct drbd_genlmsghdr *dh;
|
|
|
+ bool multicast = false;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (!skb) {
|
|
|
+ seq = atomic_inc_return(¬ify_genl_seq);
|
|
|
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
|
|
|
+ err = -ENOMEM;
|
|
|
+ if (!skb)
|
|
|
+ goto failed;
|
|
|
+ multicast = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ err = -EMSGSIZE;
|
|
|
+ dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
|
|
|
+ if (!dh)
|
|
|
+ goto nla_put_failure;
|
|
|
+ dh->minor = device->minor;
|
|
|
+ dh->ret_code = NO_ERROR;
|
|
|
+ if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
|
|
|
+ nla_put_notification_header(skb, type) ||
|
|
|
+ ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
|
|
|
+ device_info_to_skb(skb, device_info, true)))
|
|
|
+ goto nla_put_failure;
|
|
|
+ device_to_statistics(&device_statistics, device);
|
|
|
+ device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
|
|
|
+ genlmsg_end(skb, dh);
|
|
|
+ if (multicast) {
|
|
|
+ err = drbd_genl_multicast_events(skb, 0);
|
|
|
+ /* skb has been consumed or freed in netlink_broadcast() */
|
|
|
+ if (err && err != -ESRCH)
|
|
|
+ goto failed;
|
|
|
+ }
|
|
|
+ return;
|
|
|
+
|
|
|
+nla_put_failure:
|
|
|
+ nlmsg_free(skb);
|
|
|
+failed:
|
|
|
+ drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
|
|
|
+ err, seq);
|
|
|
+}
|
|
|
+
|
|
|
+void notify_connection_state(struct sk_buff *skb,
|
|
|
+ unsigned int seq,
|
|
|
+ struct drbd_connection *connection,
|
|
|
+ struct connection_info *connection_info,
|
|
|
+ enum drbd_notification_type type)
|
|
|
+{
|
|
|
+ struct connection_statistics connection_statistics;
|
|
|
+ struct drbd_genlmsghdr *dh;
|
|
|
+ bool multicast = false;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (!skb) {
|
|
|
+ seq = atomic_inc_return(¬ify_genl_seq);
|
|
|
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
|
|
|
+ err = -ENOMEM;
|
|
|
+ if (!skb)
|
|
|
+ goto failed;
|
|
|
+ multicast = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ err = -EMSGSIZE;
|
|
|
+ dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
|
|
|
+ if (!dh)
|
|
|
+ goto nla_put_failure;
|
|
|
+ dh->minor = -1U;
|
|
|
+ dh->ret_code = NO_ERROR;
|
|
|
+ if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
|
|
|
+ nla_put_notification_header(skb, type) ||
|
|
|
+ ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
|
|
|
+ connection_info_to_skb(skb, connection_info, true)))
|
|
|
+ goto nla_put_failure;
|
|
|
+ connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
|
|
|
+ connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
|
|
|
+ genlmsg_end(skb, dh);
|
|
|
+ if (multicast) {
|
|
|
+ err = drbd_genl_multicast_events(skb, 0);
|
|
|
+ /* skb has been consumed or freed in netlink_broadcast() */
|
|
|
+ if (err && err != -ESRCH)
|
|
|
+ goto failed;
|
|
|
+ }
|
|
|
+ return;
|
|
|
+
|
|
|
+nla_put_failure:
|
|
|
+ nlmsg_free(skb);
|
|
|
+failed:
|
|
|
+ drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
|
|
|
+ err, seq);
|
|
|
+}
|
|
|
+
|
|
|
+void notify_peer_device_state(struct sk_buff *skb,
|
|
|
+ unsigned int seq,
|
|
|
+ struct drbd_peer_device *peer_device,
|
|
|
+ struct peer_device_info *peer_device_info,
|
|
|
+ enum drbd_notification_type type)
|
|
|
+{
|
|
|
+ struct peer_device_statistics peer_device_statistics;
|
|
|
+ struct drbd_resource *resource = peer_device->device->resource;
|
|
|
+ struct drbd_genlmsghdr *dh;
|
|
|
+ bool multicast = false;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (!skb) {
|
|
|
+ seq = atomic_inc_return(¬ify_genl_seq);
|
|
|
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
|
|
|
+ err = -ENOMEM;
|
|
|
+ if (!skb)
|
|
|
+ goto failed;
|
|
|
+ multicast = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ err = -EMSGSIZE;
|
|
|
+ dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
|
|
|
+ if (!dh)
|
|
|
+ goto nla_put_failure;
|
|
|
+ dh->minor = -1U;
|
|
|
+ dh->ret_code = NO_ERROR;
|
|
|
+ if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
|
|
|
+ nla_put_notification_header(skb, type) ||
|
|
|
+ ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
|
|
|
+ peer_device_info_to_skb(skb, peer_device_info, true)))
|
|
|
+ goto nla_put_failure;
|
|
|
+ peer_device_to_statistics(&peer_device_statistics, peer_device);
|
|
|
+ peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
|
|
|
+ genlmsg_end(skb, dh);
|
|
|
+ if (multicast) {
|
|
|
+ err = drbd_genl_multicast_events(skb, 0);
|
|
|
+ /* skb has been consumed or freed in netlink_broadcast() */
|
|
|
+ if (err && err != -ESRCH)
|
|
|
+ goto failed;
|
|
|
+ }
|
|
|
+ return;
|
|
|
+
|
|
|
+nla_put_failure:
|
|
|
+ nlmsg_free(skb);
|
|
|
+failed:
|
|
|
+ drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
|
|
|
+ err, seq);
|
|
|
+}
|
|
|
+
|
|
|
+void notify_helper(enum drbd_notification_type type,
|
|
|
+ struct drbd_device *device, struct drbd_connection *connection,
|
|
|
+ const char *name, int status)
|
|
|
+{
|
|
|
+ struct drbd_resource *resource = device ? device->resource : connection->resource;
|
|
|
+ struct drbd_helper_info helper_info;
|
|
|
+ unsigned int seq = atomic_inc_return(¬ify_genl_seq);
|
|
|
+ struct sk_buff *skb = NULL;
|
|
|
+ struct drbd_genlmsghdr *dh;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
|
|
|
+ helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
|
|
|
+ helper_info.helper_status = status;
|
|
|
+
|
|
|
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
|
|
|
+ err = -ENOMEM;
|
|
|
+ if (!skb)
|
|
|
+ goto fail;
|
|
|
+
|
|
|
+ err = -EMSGSIZE;
|
|
|
+ dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
|
|
|
+ if (!dh)
|
|
|
+ goto fail;
|
|
|
+ dh->minor = device ? device->minor : -1;
|
|
|
+ dh->ret_code = NO_ERROR;
|
|
|
+ mutex_lock(¬ification_mutex);
|
|
|
+ if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
|
|
|
+ nla_put_notification_header(skb, type) ||
|
|
|
+ drbd_helper_info_to_skb(skb, &helper_info, true))
|
|
|
+ goto unlock_fail;
|
|
|
+ genlmsg_end(skb, dh);
|
|
|
+ err = drbd_genl_multicast_events(skb, 0);
|
|
|
+ skb = NULL;
|
|
|
+ /* skb has been consumed or freed in netlink_broadcast() */
|
|
|
+ if (err && err != -ESRCH)
|
|
|
+ goto unlock_fail;
|
|
|
+ mutex_unlock(¬ification_mutex);
|
|
|
+ return;
|
|
|
+
|
|
|
+unlock_fail:
|
|
|
+ mutex_unlock(¬ification_mutex);
|
|
|
+fail:
|
|
|
+ nlmsg_free(skb);
|
|
|
+ drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
|
|
|
+ err, seq);
|
|
|
+}
|
|
|
+
|
|
|
+static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
|
|
|
+{
|
|
|
+ struct drbd_genlmsghdr *dh;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ err = -EMSGSIZE;
|
|
|
+ dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
|
|
|
+ if (!dh)
|
|
|
+ goto nla_put_failure;
|
|
|
+ dh->minor = -1U;
|
|
|
+ dh->ret_code = NO_ERROR;
|
|
|
+ if (nla_put_notification_header(skb, NOTIFY_EXISTS))
|
|
|
+ goto nla_put_failure;
|
|
|
+ genlmsg_end(skb, dh);
|
|
|
+ return;
|
|
|
+
|
|
|
+nla_put_failure:
|
|
|
+ nlmsg_free(skb);
|
|
|
+ pr_err("Error %d sending event. Event seq:%u\n", err, seq);
|
|
|
+}
|
|
|
+
|
|
|
+static void free_state_changes(struct list_head *list)
|
|
|
+{
|
|
|
+ while (!list_empty(list)) {
|
|
|
+ struct drbd_state_change *state_change =
|
|
|
+ list_first_entry(list, struct drbd_state_change, list);
|
|
|
+ list_del(&state_change->list);
|
|
|
+ forget_state_change(state_change);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
|
|
|
+{
|
|
|
+ return 1 +
|
|
|
+ state_change->n_connections +
|
|
|
+ state_change->n_devices +
|
|
|
+ state_change->n_devices * state_change->n_connections;
|
|
|
+}
|
|
|
+
|
|
|
+static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
|
|
|
+{
|
|
|
+ struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
|
|
|
+ unsigned int seq = cb->args[2];
|
|
|
+ unsigned int n;
|
|
|
+ enum drbd_notification_type flags = 0;
|
|
|
+
|
|
|
+ /* There is no need for taking notification_mutex here: it doesn't
|
|
|
+ matter if the initial state events mix with later state chage
|
|
|
+ events; we can always tell the events apart by the NOTIFY_EXISTS
|
|
|
+ flag. */
|
|
|
+
|
|
|
+ cb->args[5]--;
|
|
|
+ if (cb->args[5] == 1) {
|
|
|
+ notify_initial_state_done(skb, seq);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ n = cb->args[4]++;
|
|
|
+ if (cb->args[4] < cb->args[3])
|
|
|
+ flags |= NOTIFY_CONTINUES;
|
|
|
+ if (n < 1) {
|
|
|
+ notify_resource_state_change(skb, seq, state_change->resource,
|
|
|
+ NOTIFY_EXISTS | flags);
|
|
|
+ goto next;
|
|
|
+ }
|
|
|
+ n--;
|
|
|
+ if (n < state_change->n_connections) {
|
|
|
+ notify_connection_state_change(skb, seq, &state_change->connections[n],
|
|
|
+ NOTIFY_EXISTS | flags);
|
|
|
+ goto next;
|
|
|
+ }
|
|
|
+ n -= state_change->n_connections;
|
|
|
+ if (n < state_change->n_devices) {
|
|
|
+ notify_device_state_change(skb, seq, &state_change->devices[n],
|
|
|
+ NOTIFY_EXISTS | flags);
|
|
|
+ goto next;
|
|
|
+ }
|
|
|
+ n -= state_change->n_devices;
|
|
|
+ if (n < state_change->n_devices * state_change->n_connections) {
|
|
|
+ notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
|
|
|
+ NOTIFY_EXISTS | flags);
|
|
|
+ goto next;
|
|
|
+ }
|
|
|
+
|
|
|
+next:
|
|
|
+ if (cb->args[4] == cb->args[3]) {
|
|
|
+ struct drbd_state_change *next_state_change =
|
|
|
+ list_entry(state_change->list.next,
|
|
|
+ struct drbd_state_change, list);
|
|
|
+ cb->args[0] = (long)next_state_change;
|
|
|
+ cb->args[3] = notifications_for_state_change(next_state_change);
|
|
|
+ cb->args[4] = 0;
|
|
|
+ }
|
|
|
+out:
|
|
|
+ return skb->len;
|
|
|
+}
|
|
|
+
|
|
|
+int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
|
|
|
+{
|
|
|
+ struct drbd_resource *resource;
|
|
|
+ LIST_HEAD(head);
|
|
|
+
|
|
|
+ if (cb->args[5] >= 1) {
|
|
|
+ if (cb->args[5] > 1)
|
|
|
+ return get_initial_state(skb, cb);
|
|
|
+ if (cb->args[0]) {
|
|
|
+ struct drbd_state_change *state_change =
|
|
|
+ (struct drbd_state_change *)cb->args[0];
|
|
|
+
|
|
|
+ /* connect list to head */
|
|
|
+ list_add(&head, &state_change->list);
|
|
|
+ free_state_changes(&head);
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ cb->args[5] = 2; /* number of iterations */
|
|
|
+ mutex_lock(&resources_mutex);
|
|
|
+ for_each_resource(resource, &drbd_resources) {
|
|
|
+ struct drbd_state_change *state_change;
|
|
|
+
|
|
|
+ state_change = remember_old_state(resource, GFP_KERNEL);
|
|
|
+ if (!state_change) {
|
|
|
+ if (!list_empty(&head))
|
|
|
+ free_state_changes(&head);
|
|
|
+ mutex_unlock(&resources_mutex);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ copy_old_to_new_state_change(state_change);
|
|
|
+ list_add_tail(&state_change->list, &head);
|
|
|
+ cb->args[5] += notifications_for_state_change(state_change);
|
|
|
+ }
|
|
|
+ mutex_unlock(&resources_mutex);
|
|
|
+
|
|
|
+ if (!list_empty(&head)) {
|
|
|
+ struct drbd_state_change *state_change =
|
|
|
+ list_entry(head.next, struct drbd_state_change, list);
|
|
|
+ cb->args[0] = (long)state_change;
|
|
|
+ cb->args[3] = notifications_for_state_change(state_change);
|
|
|
+ list_del(&head); /* detach list from head */
|
|
|
+ }
|
|
|
+
|
|
|
+ cb->args[2] = cb->nlh->nlmsg_seq;
|
|
|
+ return get_initial_state(skb, cb);
|
|
|
+}
|