Przeglądaj źródła

drbd: prepare for more than 32 bit flags

 - struct drbd_conf { ... unsigned long flags; ... }
 + struct drbd_conf { ... unsigned long drbd_flags[N]; ... }

And introduce wrapper functions for test/set/clear bit operations
on this member.

Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Lars Ellenberg 13 lat temu
rodzic
commit
06f10adbdb

+ 2 - 2
drivers/block/drbd/drbd_actlog.c

@@ -90,7 +90,7 @@ void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backi
 		dt = MAX_SCHEDULE_TIMEOUT;
 		dt = MAX_SCHEDULE_TIMEOUT;
 
 
 	dt = wait_event_timeout(mdev->misc_wait,
 	dt = wait_event_timeout(mdev->misc_wait,
-			*done || test_bit(FORCE_DETACH, &mdev->flags), dt);
+			*done || drbd_test_flag(mdev, FORCE_DETACH), dt);
 	if (dt == 0) {
 	if (dt == 0) {
 		dev_err(DEV, "meta-data IO operation timed out\n");
 		dev_err(DEV, "meta-data IO operation timed out\n");
 		drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
 		drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
@@ -108,7 +108,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
 	mdev->md_io.done = 0;
 	mdev->md_io.done = 0;
 	mdev->md_io.error = -ENODEV;
 	mdev->md_io.error = -ENODEV;
 
 
-	if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
+	if ((rw & WRITE) && !drbd_test_flag(mdev, MD_NO_FUA))
 		rw |= REQ_FUA | REQ_FLUSH;
 		rw |= REQ_FUA | REQ_FLUSH;
 	rw |= REQ_SYNC;
 	rw |= REQ_SYNC;
 
 

+ 44 - 16
drivers/block/drbd/drbd_int.h

@@ -808,7 +808,7 @@ enum {
 #define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)
 #define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)
 
 
 /* global flag bits */
 /* global flag bits */
-enum {
+enum drbd_flag {
 	CREATE_BARRIER,		/* next P_DATA is preceded by a P_BARRIER */
 	CREATE_BARRIER,		/* next P_DATA is preceded by a P_BARRIER */
 	SIGNAL_ASENDER,		/* whether asender wants to be interrupted */
 	SIGNAL_ASENDER,		/* whether asender wants to be interrupted */
 	SEND_PING,		/* whether asender should send a ping asap */
 	SEND_PING,		/* whether asender should send a ping asap */
@@ -858,6 +858,9 @@ enum {
 				 * and potentially deadlock on, this drbd worker.
 				 * and potentially deadlock on, this drbd worker.
 				 */
 				 */
 	DISCONNECT_SENT,	/* Currently the last bit in this 32bit word */
 	DISCONNECT_SENT,	/* Currently the last bit in this 32bit word */
+
+	/* keep last */
+	DRBD_N_FLAGS,
 };
 };
 
 
 struct drbd_bitmap; /* opaque for drbd_conf */
 struct drbd_bitmap; /* opaque for drbd_conf */
@@ -970,8 +973,7 @@ struct fifo_buffer {
 };
 };
 
 
 struct drbd_conf {
 struct drbd_conf {
-	/* things that are stored as / read from meta data on disk */
-	unsigned long flags;
+	unsigned long drbd_flags[(DRBD_N_FLAGS + BITS_PER_LONG -1)/BITS_PER_LONG];
 
 
 	/* configured by drbdsetup */
 	/* configured by drbdsetup */
 	struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
 	struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
@@ -1143,6 +1145,31 @@ struct drbd_conf {
 	unsigned int local_max_bio_size;
 	unsigned int local_max_bio_size;
 };
 };
 
 
+static inline void drbd_set_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+	set_bit(f, &mdev->drbd_flags[0]);
+}
+
+static inline void drbd_clear_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+	clear_bit(f, &mdev->drbd_flags[0]);
+}
+
+static inline int drbd_test_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+	return test_bit(f, &mdev->drbd_flags[0]);
+}
+
+static inline int drbd_test_and_set_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+	return test_and_set_bit(f, &mdev->drbd_flags[0]);
+}
+
+static inline int drbd_test_and_clear_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+	return test_and_clear_bit(f, &mdev->drbd_flags[0]);
+}
+
 static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
 static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
 {
 {
 	struct drbd_conf *mdev;
 	struct drbd_conf *mdev;
@@ -1812,12 +1839,12 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
 static inline void drbd_state_lock(struct drbd_conf *mdev)
 static inline void drbd_state_lock(struct drbd_conf *mdev)
 {
 {
 	wait_event(mdev->misc_wait,
 	wait_event(mdev->misc_wait,
-		   !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags));
+		   !drbd_test_and_set_flag(mdev, CLUSTER_ST_CHANGE));
 }
 }
 
 
 static inline void drbd_state_unlock(struct drbd_conf *mdev)
 static inline void drbd_state_unlock(struct drbd_conf *mdev)
 {
 {
-	clear_bit(CLUSTER_ST_CHANGE, &mdev->flags);
+	drbd_clear_flag(mdev, CLUSTER_ST_CHANGE);
 	wake_up(&mdev->misc_wait);
 	wake_up(&mdev->misc_wait);
 }
 }
 
 
@@ -1874,9 +1901,9 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
 		/* NOTE fall through to detach case if forcedetach set */
 		/* NOTE fall through to detach case if forcedetach set */
 	case EP_DETACH:
 	case EP_DETACH:
 	case EP_CALL_HELPER:
 	case EP_CALL_HELPER:
-		set_bit(WAS_IO_ERROR, &mdev->flags);
+		drbd_set_flag(mdev, WAS_IO_ERROR);
 		if (forcedetach == DRBD_FORCE_DETACH)
 		if (forcedetach == DRBD_FORCE_DETACH)
-			set_bit(FORCE_DETACH, &mdev->flags);
+			drbd_set_flag(mdev, FORCE_DETACH);
 		if (mdev->state.disk > D_FAILED) {
 		if (mdev->state.disk > D_FAILED) {
 			_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
 			_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
 			dev_err(DEV,
 			dev_err(DEV,
@@ -2037,13 +2064,13 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
 
 
 static inline void wake_asender(struct drbd_conf *mdev)
 static inline void wake_asender(struct drbd_conf *mdev)
 {
 {
-	if (test_bit(SIGNAL_ASENDER, &mdev->flags))
+	if (drbd_test_flag(mdev, SIGNAL_ASENDER))
 		force_sig(DRBD_SIG, mdev->asender.task);
 		force_sig(DRBD_SIG, mdev->asender.task);
 }
 }
 
 
 static inline void request_ping(struct drbd_conf *mdev)
 static inline void request_ping(struct drbd_conf *mdev)
 {
 {
-	set_bit(SEND_PING, &mdev->flags);
+	drbd_set_flag(mdev, SEND_PING);
 	wake_asender(mdev);
 	wake_asender(mdev);
 }
 }
 
 
@@ -2374,7 +2401,7 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
 
 
 	if (is_susp(mdev->state))
 	if (is_susp(mdev->state))
 		return false;
 		return false;
-	if (test_bit(SUSPEND_IO, &mdev->flags))
+	if (drbd_test_flag(mdev, SUSPEND_IO))
 		return false;
 		return false;
 
 
 	/* to avoid potential deadlock or bitmap corruption,
 	/* to avoid potential deadlock or bitmap corruption,
@@ -2389,7 +2416,7 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
 	 * and we are within the spinlock anyways, we have this workaround.  */
 	 * and we are within the spinlock anyways, we have this workaround.  */
 	if (atomic_read(&mdev->ap_bio_cnt) > mxb)
 	if (atomic_read(&mdev->ap_bio_cnt) > mxb)
 		return false;
 		return false;
-	if (test_bit(BITMAP_IO, &mdev->flags))
+	if (drbd_test_flag(mdev, BITMAP_IO))
 		return false;
 		return false;
 	return true;
 	return true;
 }
 }
@@ -2427,8 +2454,8 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
 
 
 	D_ASSERT(ap_bio >= 0);
 	D_ASSERT(ap_bio >= 0);
 
 
-	if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
-		if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
+	if (ap_bio == 0 && drbd_test_flag(mdev, BITMAP_IO)) {
+		if (!drbd_test_and_set_flag(mdev, BITMAP_IO_QUEUED))
 			drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
 			drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
 	}
 	}
 
 
@@ -2477,7 +2504,7 @@ static inline void drbd_update_congested(struct drbd_conf *mdev)
 {
 {
 	struct sock *sk = mdev->data.socket->sk;
 	struct sock *sk = mdev->data.socket->sk;
 	if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
 	if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
-		set_bit(NET_CONGESTED, &mdev->flags);
+		drbd_set_flag(mdev, NET_CONGESTED);
 }
 }
 
 
 static inline int drbd_queue_order_type(struct drbd_conf *mdev)
 static inline int drbd_queue_order_type(struct drbd_conf *mdev)
@@ -2494,14 +2521,15 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
 {
 {
 	int r;
 	int r;
 
 
-	if (test_bit(MD_NO_FUA, &mdev->flags))
+	if (drbd_test_flag(mdev, MD_NO_FUA))
 		return;
 		return;
 
 
 	r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
 	r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
 	if (r) {
 	if (r) {
-		set_bit(MD_NO_FUA, &mdev->flags);
+		drbd_set_flag(mdev, MD_NO_FUA);
 		dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
 		dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
 	}
 	}
 }
 }
 
 
+
 #endif
 #endif

+ 41 - 41
drivers/block/drbd/drbd_main.c

@@ -322,7 +322,7 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
 	list_splice_init(&b->requests, &mdev->barrier_acked_requests);
 	list_splice_init(&b->requests, &mdev->barrier_acked_requests);
 
 
 	nob = b->next;
 	nob = b->next;
-	if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
+	if (drbd_test_and_clear_flag(mdev, CREATE_BARRIER)) {
 		_tl_add_barrier(mdev, b);
 		_tl_add_barrier(mdev, b);
 		if (nob)
 		if (nob)
 			mdev->oldest_tle = nob;
 			mdev->oldest_tle = nob;
@@ -381,7 +381,7 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
 				if (b->w.cb == NULL) {
 				if (b->w.cb == NULL) {
 					b->w.cb = w_send_barrier;
 					b->w.cb = w_send_barrier;
 					inc_ap_pending(mdev);
 					inc_ap_pending(mdev);
-					set_bit(CREATE_BARRIER, &mdev->flags);
+					drbd_set_flag(mdev, CREATE_BARRIER);
 				}
 				}
 
 
 				drbd_queue_work(&mdev->data.work, &b->w);
 				drbd_queue_work(&mdev->data.work, &b->w);
@@ -464,7 +464,7 @@ static void _tl_clear(struct drbd_conf *mdev)
 	}
 	}
 
 
 	/* ensure bit indicating barrier is required is clear */
 	/* ensure bit indicating barrier is required is clear */
-	clear_bit(CREATE_BARRIER, &mdev->flags);
+	drbd_clear_flag(mdev, CREATE_BARRIER);
 
 
 	memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
 	memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
 
 
@@ -582,10 +582,10 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
 	unsigned long flags;
 	unsigned long flags;
 	enum drbd_state_rv rv;
 	enum drbd_state_rv rv;
 
 
-	if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
+	if (drbd_test_and_clear_flag(mdev, CL_ST_CHG_SUCCESS))
 		return SS_CW_SUCCESS;
 		return SS_CW_SUCCESS;
 
 
-	if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
+	if (drbd_test_and_clear_flag(mdev, CL_ST_CHG_FAIL))
 		return SS_CW_FAILED_BY_PEER;
 		return SS_CW_FAILED_BY_PEER;
 
 
 	rv = 0;
 	rv = 0;
@@ -660,7 +660,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
 		}
 		}
 
 
 		if (mask.conn == C_MASK && val.conn == C_DISCONNECTING)
 		if (mask.conn == C_MASK && val.conn == C_DISCONNECTING)
-			set_bit(DISCONNECT_SENT, &mdev->flags);
+			drbd_set_flag(mdev, DISCONNECT_SENT);
 
 
 		wait_event(mdev->state_wait,
 		wait_event(mdev->state_wait,
 			(rv = _req_st_cond(mdev, mask, val)));
 			(rv = _req_st_cond(mdev, mask, val)));
@@ -850,7 +850,7 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
 
 
 	/* While establishing a connection only allow cstate to change.
 	/* While establishing a connection only allow cstate to change.
 	   Delay/refuse role changes, detach attach etc... */
 	   Delay/refuse role changes, detach attach etc... */
-	if (test_bit(STATE_SENT, &mdev->flags) &&
+	if (drbd_test_flag(mdev, STATE_SENT) &&
 	    !(os.conn == C_WF_REPORT_PARAMS ||
 	    !(os.conn == C_WF_REPORT_PARAMS ||
 	      (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
 	      (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
 		rv = SS_IN_TRANSIENT_STATE;
 		rv = SS_IN_TRANSIENT_STATE;
@@ -1109,7 +1109,7 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
 
 
 static void drbd_resume_al(struct drbd_conf *mdev)
 static void drbd_resume_al(struct drbd_conf *mdev)
 {
 {
-	if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
+	if (drbd_test_and_clear_flag(mdev, AL_SUSPENDED))
 		dev_info(DEV, "Resumed AL updates\n");
 		dev_info(DEV, "Resumed AL updates\n");
 }
 }
 
 
@@ -1215,8 +1215,8 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
 	if (ns.disk == D_DISKLESS &&
 	if (ns.disk == D_DISKLESS &&
 	    ns.conn == C_STANDALONE &&
 	    ns.conn == C_STANDALONE &&
 	    ns.role == R_SECONDARY &&
 	    ns.role == R_SECONDARY &&
-	    !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
-		set_bit(DEVICE_DYING, &mdev->flags);
+	    !drbd_test_and_set_flag(mdev, CONFIG_PENDING))
+		drbd_set_flag(mdev, DEVICE_DYING);
 
 
 	/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
 	/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
 	 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
 	 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
@@ -1291,7 +1291,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
 						 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
 						 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
 						 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
 						 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
 
 
-		if (test_bit(CRASHED_PRIMARY, &mdev->flags))
+		if (drbd_test_flag(mdev, CRASHED_PRIMARY))
 			mdf |= MDF_CRASHED_PRIMARY;
 			mdf |= MDF_CRASHED_PRIMARY;
 		if (mdev->state.role == R_PRIMARY ||
 		if (mdev->state.role == R_PRIMARY ||
 		    (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
 		    (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
@@ -1316,7 +1316,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
 	/* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
 	/* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
 	if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
 	if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
 	    os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
 	    os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
-		set_bit(CONSIDER_RESYNC, &mdev->flags);
+		drbd_set_flag(mdev, CONSIDER_RESYNC);
 
 
 	/* Receiver should clean up itself */
 	/* Receiver should clean up itself */
 	if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
 	if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
@@ -1400,7 +1400,7 @@ int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
 	D_ASSERT(current == mdev->worker.task);
 	D_ASSERT(current == mdev->worker.task);
 
 
 	/* open coded non-blocking drbd_suspend_io(mdev); */
 	/* open coded non-blocking drbd_suspend_io(mdev); */
-	set_bit(SUSPEND_IO, &mdev->flags);
+	drbd_set_flag(mdev, SUSPEND_IO);
 
 
 	drbd_bm_lock(mdev, why, flags);
 	drbd_bm_lock(mdev, why, flags);
 	rv = io_fn(mdev);
 	rv = io_fn(mdev);
@@ -1426,7 +1426,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 	union drbd_state nsm = (union drbd_state){ .i = -1 };
 	union drbd_state nsm = (union drbd_state){ .i = -1 };
 
 
 	if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
 	if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
-		clear_bit(CRASHED_PRIMARY, &mdev->flags);
+		drbd_clear_flag(mdev, CRASHED_PRIMARY);
 		if (mdev->p_uuid)
 		if (mdev->p_uuid)
 			mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
 			mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
 	}
 	}
@@ -1466,9 +1466,9 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 	if (ns.susp_fen) {
 	if (ns.susp_fen) {
 		/* case1: The outdate peer handler is successful: */
 		/* case1: The outdate peer handler is successful: */
 		if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
 		if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
-			if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
+			if (drbd_test_flag(mdev, NEW_CUR_UUID)) {
 				drbd_uuid_new_current(mdev);
 				drbd_uuid_new_current(mdev);
-				clear_bit(NEW_CUR_UUID, &mdev->flags);
+				drbd_clear_flag(mdev, NEW_CUR_UUID);
 			}
 			}
 			spin_lock_irq(&mdev->req_lock);
 			spin_lock_irq(&mdev->req_lock);
 			_tl_clear(mdev);
 			_tl_clear(mdev);
@@ -1477,7 +1477,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 		}
 		}
 		/* case2: The connection was established again: */
 		/* case2: The connection was established again: */
 		if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
 		if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
-			clear_bit(NEW_CUR_UUID, &mdev->flags);
+			drbd_clear_flag(mdev, NEW_CUR_UUID);
 			what = resend;
 			what = resend;
 			nsm.susp_fen = 0;
 			nsm.susp_fen = 0;
 		}
 		}
@@ -1534,7 +1534,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 			if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
 			if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
 			    mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
 			    mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
 				if (is_susp(mdev->state)) {
 				if (is_susp(mdev->state)) {
-					set_bit(NEW_CUR_UUID, &mdev->flags);
+					drbd_set_flag(mdev, NEW_CUR_UUID);
 				} else {
 				} else {
 					drbd_uuid_new_current(mdev);
 					drbd_uuid_new_current(mdev);
 					drbd_send_uuids(mdev);
 					drbd_send_uuids(mdev);
@@ -1625,7 +1625,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 		 * we might come from an failed Attach before ldev was set. */
 		 * we might come from an failed Attach before ldev was set. */
 		if (mdev->ldev) {
 		if (mdev->ldev) {
 			eh = mdev->ldev->dc.on_io_error;
 			eh = mdev->ldev->dc.on_io_error;
-			was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+			was_io_error = drbd_test_and_clear_flag(mdev, WAS_IO_ERROR);
 
 
 			if (was_io_error && eh == EP_CALL_HELPER)
 			if (was_io_error && eh == EP_CALL_HELPER)
 				drbd_khelper(mdev, "local-io-error");
 				drbd_khelper(mdev, "local-io-error");
@@ -1643,7 +1643,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 			 * So aborting local requests may cause crashes,
 			 * So aborting local requests may cause crashes,
 			 * or even worse, silent data corruption.
 			 * or even worse, silent data corruption.
 			 */
 			 */
-			if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
+			if (drbd_test_and_clear_flag(mdev, FORCE_DETACH))
 				tl_abort_disk_io(mdev);
 				tl_abort_disk_io(mdev);
 
 
 			/* current state still has to be D_FAILED,
 			/* current state still has to be D_FAILED,
@@ -1692,7 +1692,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 
 
 	/* Disks got bigger while they were detached */
 	/* Disks got bigger while they were detached */
 	if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
 	if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
-	    test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
+	    drbd_test_and_clear_flag(mdev, RESYNC_AFTER_NEG)) {
 		if (ns.conn == C_CONNECTED)
 		if (ns.conn == C_CONNECTED)
 			resync_after_online_grow(mdev);
 			resync_after_online_grow(mdev);
 	}
 	}
@@ -1717,7 +1717,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 
 
 	/* Wake up role changes, that were delayed because of connection establishing */
 	/* Wake up role changes, that were delayed because of connection establishing */
 	if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
 	if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
-		clear_bit(STATE_SENT, &mdev->flags);
+		drbd_clear_flag(mdev, STATE_SENT);
 		wake_up(&mdev->state_wait);
 		wake_up(&mdev->state_wait);
 	}
 	}
 
 
@@ -1750,7 +1750,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 		if (os.aftr_isp != ns.aftr_isp)
 		if (os.aftr_isp != ns.aftr_isp)
 			resume_next_sg(mdev);
 			resume_next_sg(mdev);
 		/* set in __drbd_set_state, unless CONFIG_PENDING was set */
 		/* set in __drbd_set_state, unless CONFIG_PENDING was set */
-		if (test_bit(DEVICE_DYING, &mdev->flags))
+		if (drbd_test_flag(mdev, DEVICE_DYING))
 			drbd_thread_stop_nowait(&mdev->worker);
 			drbd_thread_stop_nowait(&mdev->worker);
 	}
 	}
 
 
@@ -2145,7 +2145,7 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
 	mdev->comm_bm_set = drbd_bm_total_weight(mdev);
 	mdev->comm_bm_set = drbd_bm_total_weight(mdev);
 	p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
 	p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
 	uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
 	uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
-	uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
+	uuid_flags |= drbd_test_flag(mdev, CRASHED_PRIMARY) ? 2 : 0;
 	uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
 	uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
 	p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
 	p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
 
 
@@ -2775,7 +2775,7 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
 		offset += sent;
 		offset += sent;
 	} while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
 	} while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
 	set_fs(oldfs);
 	set_fs(oldfs);
-	clear_bit(NET_CONGESTED, &mdev->flags);
+	drbd_clear_flag(mdev, NET_CONGESTED);
 
 
 	ok = (len == 0);
 	ok = (len == 0);
 	if (likely(ok))
 	if (likely(ok))
@@ -2877,7 +2877,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
 		dp_flags |= DP_MAY_SET_IN_SYNC;
 		dp_flags |= DP_MAY_SET_IN_SYNC;
 
 
 	p.dp_flags = cpu_to_be32(dp_flags);
 	p.dp_flags = cpu_to_be32(dp_flags);
-	set_bit(UNPLUG_REMOTE, &mdev->flags);
+	drbd_set_flag(mdev, UNPLUG_REMOTE);
 	ok = (sizeof(p) ==
 	ok = (sizeof(p) ==
 		drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
 		drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
 	if (ok && dgs) {
 	if (ok && dgs) {
@@ -3056,7 +3056,7 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
 	} while (sent < size);
 	} while (sent < size);
 
 
 	if (sock == mdev->data.socket)
 	if (sock == mdev->data.socket)
-		clear_bit(NET_CONGESTED, &mdev->flags);
+		drbd_clear_flag(mdev, NET_CONGESTED);
 
 
 	if (rv <= 0) {
 	if (rv <= 0) {
 		if (rv != -EAGAIN) {
 		if (rv != -EAGAIN) {
@@ -3263,7 +3263,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
 	}
 	}
 
 
 	drbd_free_resources(mdev);
 	drbd_free_resources(mdev);
-	clear_bit(AL_SUSPENDED, &mdev->flags);
+	drbd_clear_flag(mdev, AL_SUSPENDED);
 
 
 	/*
 	/*
 	 * currently we drbd_init_ee only on module load, so
 	 * currently we drbd_init_ee only on module load, so
@@ -3556,7 +3556,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
 		goto out;
 		goto out;
 	}
 	}
 
 
-	if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
+	if (drbd_test_flag(mdev, CALLBACK_PENDING)) {
 		r |= (1 << BDI_async_congested);
 		r |= (1 << BDI_async_congested);
 		/* Without good local data, we would need to read from remote,
 		/* Without good local data, we would need to read from remote,
 		 * and that would need the worker thread as well, which is
 		 * and that would need the worker thread as well, which is
@@ -3580,7 +3580,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
 			reason = 'b';
 			reason = 'b';
 	}
 	}
 
 
-	if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
+	if (bdi_bits & (1 << BDI_async_congested) && drbd_test_flag(mdev, NET_CONGESTED)) {
 		r |= (1 << BDI_async_congested);
 		r |= (1 << BDI_async_congested);
 		reason = reason == 'b' ? 'a' : 'n';
 		reason = reason == 'b' ? 'a' : 'n';
 	}
 	}
@@ -3867,7 +3867,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
 
 
 	del_timer(&mdev->md_sync_timer);
 	del_timer(&mdev->md_sync_timer);
 	/* timer may be rearmed by drbd_md_mark_dirty() now. */
 	/* timer may be rearmed by drbd_md_mark_dirty() now. */
-	if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
+	if (!drbd_test_and_clear_flag(mdev, MD_DIRTY))
 		return;
 		return;
 
 
 	/* We use here D_FAILED and not D_ATTACHING because we try to write
 	/* We use here D_FAILED and not D_ATTACHING because we try to write
@@ -4011,7 +4011,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
 #ifdef DEBUG
 #ifdef DEBUG
 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
 {
 {
-	if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
+	if (!drbd_test_and_set_flag(mdev, MD_DIRTY)) {
 		mod_timer(&mdev->md_sync_timer, jiffies + HZ);
 		mod_timer(&mdev->md_sync_timer, jiffies + HZ);
 		mdev->last_md_mark_dirty.line = line;
 		mdev->last_md_mark_dirty.line = line;
 		mdev->last_md_mark_dirty.func = func;
 		mdev->last_md_mark_dirty.func = func;
@@ -4020,7 +4020,7 @@ void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *
 #else
 #else
 void drbd_md_mark_dirty(struct drbd_conf *mdev)
 void drbd_md_mark_dirty(struct drbd_conf *mdev)
 {
 {
-	if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
+	if (!drbd_test_and_set_flag(mdev, MD_DIRTY))
 		mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
 		mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
 }
 }
 #endif
 #endif
@@ -4182,14 +4182,14 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 		put_ldev(mdev);
 		put_ldev(mdev);
 	}
 	}
 
 
-	clear_bit(BITMAP_IO, &mdev->flags);
+	drbd_clear_flag(mdev, BITMAP_IO);
 	smp_mb__after_clear_bit();
 	smp_mb__after_clear_bit();
 	wake_up(&mdev->misc_wait);
 	wake_up(&mdev->misc_wait);
 
 
 	if (work->done)
 	if (work->done)
 		work->done(mdev, rv);
 		work->done(mdev, rv);
 
 
-	clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
+	drbd_clear_flag(mdev, BITMAP_IO_QUEUED);
 	work->why = NULL;
 	work->why = NULL;
 	work->flags = 0;
 	work->flags = 0;
 
 
@@ -4210,7 +4210,7 @@ void drbd_ldev_destroy(struct drbd_conf *mdev)
 		__free_page(mdev->md_io_tmpp);
 		__free_page(mdev->md_io_tmpp);
 		mdev->md_io_tmpp = NULL;
 		mdev->md_io_tmpp = NULL;
 	}
 	}
-	clear_bit(GO_DISKLESS, &mdev->flags);
+	drbd_clear_flag(mdev, GO_DISKLESS);
 }
 }
 
 
 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
@@ -4227,7 +4227,7 @@ static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused
 void drbd_go_diskless(struct drbd_conf *mdev)
 void drbd_go_diskless(struct drbd_conf *mdev)
 {
 {
 	D_ASSERT(mdev->state.disk == D_FAILED);
 	D_ASSERT(mdev->state.disk == D_FAILED);
-	if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
+	if (!drbd_test_and_set_flag(mdev, GO_DISKLESS))
 		drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
 		drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
 }
 }
 
 
@@ -4250,8 +4250,8 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
 {
 {
 	D_ASSERT(current == mdev->worker.task);
 	D_ASSERT(current == mdev->worker.task);
 
 
-	D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
-	D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
+	D_ASSERT(!drbd_test_flag(mdev, BITMAP_IO_QUEUED));
+	D_ASSERT(!drbd_test_flag(mdev, BITMAP_IO));
 	D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
 	D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
 	if (mdev->bm_io_work.why)
 	if (mdev->bm_io_work.why)
 		dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
 		dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
@@ -4263,9 +4263,9 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
 	mdev->bm_io_work.flags = flags;
 	mdev->bm_io_work.flags = flags;
 
 
 	spin_lock_irq(&mdev->req_lock);
 	spin_lock_irq(&mdev->req_lock);
-	set_bit(BITMAP_IO, &mdev->flags);
+	drbd_set_flag(mdev, BITMAP_IO);
 	if (atomic_read(&mdev->ap_bio_cnt) == 0) {
 	if (atomic_read(&mdev->ap_bio_cnt) == 0) {
-		if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
+		if (!drbd_test_and_set_flag(mdev, BITMAP_IO_QUEUED))
 			drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
 			drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
 	}
 	}
 	spin_unlock_irq(&mdev->req_lock);
 	spin_unlock_irq(&mdev->req_lock);

+ 28 - 28
drivers/block/drbd/drbd_nl.c

@@ -148,7 +148,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
 	int ret;
 	int ret;
 
 
 	if (current == mdev->worker.task)
 	if (current == mdev->worker.task)
-		set_bit(CALLBACK_PENDING, &mdev->flags);
+		drbd_set_flag(mdev, CALLBACK_PENDING);
 
 
 	snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
 	snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
 
 
@@ -193,7 +193,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
 				(ret >> 8) & 0xff, ret);
 				(ret >> 8) & 0xff, ret);
 
 
 	if (current == mdev->worker.task)
 	if (current == mdev->worker.task)
-		clear_bit(CALLBACK_PENDING, &mdev->flags);
+		drbd_clear_flag(mdev, CALLBACK_PENDING);
 
 
 	if (ret < 0) /* Ignore any ERRNOs we got. */
 	if (ret < 0) /* Ignore any ERRNOs we got. */
 		ret = 0;
 		ret = 0;
@@ -295,7 +295,7 @@ static int _try_outdate_peer_async(void *data)
 	*/
 	*/
 	spin_lock_irq(&mdev->req_lock);
 	spin_lock_irq(&mdev->req_lock);
 	ns = mdev->state;
 	ns = mdev->state;
-	if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
+	if (ns.conn < C_WF_REPORT_PARAMS && !drbd_test_flag(mdev, STATE_SENT)) {
 		ns.pdsk = nps;
 		ns.pdsk = nps;
 		_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
 		_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
 	}
 	}
@@ -583,7 +583,7 @@ char *ppsize(char *buf, unsigned long long size)
  */
  */
 void drbd_suspend_io(struct drbd_conf *mdev)
 void drbd_suspend_io(struct drbd_conf *mdev)
 {
 {
-	set_bit(SUSPEND_IO, &mdev->flags);
+	drbd_set_flag(mdev, SUSPEND_IO);
 	if (is_susp(mdev->state))
 	if (is_susp(mdev->state))
 		return;
 		return;
 	wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
 	wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
@@ -591,7 +591,7 @@ void drbd_suspend_io(struct drbd_conf *mdev)
 
 
 void drbd_resume_io(struct drbd_conf *mdev)
 void drbd_resume_io(struct drbd_conf *mdev)
 {
 {
-	clear_bit(SUSPEND_IO, &mdev->flags);
+	drbd_clear_flag(mdev, SUSPEND_IO);
 	wake_up(&mdev->misc_wait);
 	wake_up(&mdev->misc_wait);
 }
 }
 
 
@@ -881,8 +881,8 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
  */
  */
 static void drbd_reconfig_start(struct drbd_conf *mdev)
 static void drbd_reconfig_start(struct drbd_conf *mdev)
 {
 {
-	wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
-	wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
+	wait_event(mdev->state_wait, !drbd_test_and_set_flag(mdev, CONFIG_PENDING));
+	wait_event(mdev->state_wait, !drbd_test_flag(mdev, DEVICE_DYING));
 	drbd_thread_start(&mdev->worker);
 	drbd_thread_start(&mdev->worker);
 	drbd_flush_workqueue(mdev);
 	drbd_flush_workqueue(mdev);
 }
 }
@@ -896,10 +896,10 @@ static void drbd_reconfig_done(struct drbd_conf *mdev)
 	if (mdev->state.disk == D_DISKLESS &&
 	if (mdev->state.disk == D_DISKLESS &&
 	    mdev->state.conn == C_STANDALONE &&
 	    mdev->state.conn == C_STANDALONE &&
 	    mdev->state.role == R_SECONDARY) {
 	    mdev->state.role == R_SECONDARY) {
-		set_bit(DEVICE_DYING, &mdev->flags);
+		drbd_set_flag(mdev, DEVICE_DYING);
 		drbd_thread_stop_nowait(&mdev->worker);
 		drbd_thread_stop_nowait(&mdev->worker);
 	} else
 	} else
-		clear_bit(CONFIG_PENDING, &mdev->flags);
+		drbd_clear_flag(mdev, CONFIG_PENDING);
 	spin_unlock_irq(&mdev->req_lock);
 	spin_unlock_irq(&mdev->req_lock);
 	wake_up(&mdev->state_wait);
 	wake_up(&mdev->state_wait);
 }
 }
@@ -919,7 +919,7 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
 
 
 	spin_lock_irq(&mdev->req_lock);
 	spin_lock_irq(&mdev->req_lock);
 	if (mdev->state.conn < C_CONNECTED)
 	if (mdev->state.conn < C_CONNECTED)
-		s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
+		s = !drbd_test_and_set_flag(mdev, AL_SUSPENDED);
 
 
 	spin_unlock_irq(&mdev->req_lock);
 	spin_unlock_irq(&mdev->req_lock);
 
 
@@ -958,7 +958,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 	wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
 	wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
 
 
 	/* make sure there is no leftover from previous force-detach attempts */
 	/* make sure there is no leftover from previous force-detach attempts */
-	clear_bit(FORCE_DETACH, &mdev->flags);
+	drbd_clear_flag(mdev, FORCE_DETACH);
 
 
 	/* and no leftover from previously aborted resync or verify, either */
 	/* and no leftover from previously aborted resync or verify, either */
 	mdev->rs_total = 0;
 	mdev->rs_total = 0;
@@ -1168,9 +1168,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 	/* Reset the "barriers don't work" bits here, then force meta data to
 	/* Reset the "barriers don't work" bits here, then force meta data to
 	 * be written, to ensure we determine if barriers are supported. */
 	 * be written, to ensure we determine if barriers are supported. */
 	if (nbc->dc.no_md_flush)
 	if (nbc->dc.no_md_flush)
-		set_bit(MD_NO_FUA, &mdev->flags);
+		drbd_set_flag(mdev, MD_NO_FUA);
 	else
 	else
-		clear_bit(MD_NO_FUA, &mdev->flags);
+		drbd_clear_flag(mdev, MD_NO_FUA);
 
 
 	/* Point of no return reached.
 	/* Point of no return reached.
 	 * Devices and memory are no longer released by error cleanup below.
 	 * Devices and memory are no longer released by error cleanup below.
@@ -1186,13 +1186,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 	drbd_bump_write_ordering(mdev, WO_bdev_flush);
 	drbd_bump_write_ordering(mdev, WO_bdev_flush);
 
 
 	if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
 	if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
-		set_bit(CRASHED_PRIMARY, &mdev->flags);
+		drbd_set_flag(mdev, CRASHED_PRIMARY);
 	else
 	else
-		clear_bit(CRASHED_PRIMARY, &mdev->flags);
+		drbd_clear_flag(mdev, CRASHED_PRIMARY);
 
 
 	if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
 	if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
 	    !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
 	    !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
-		set_bit(CRASHED_PRIMARY, &mdev->flags);
+		drbd_set_flag(mdev, CRASHED_PRIMARY);
 		cp_discovered = 1;
 		cp_discovered = 1;
 	}
 	}
 
 
@@ -1217,18 +1217,18 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 	 * so we can automatically recover from a crash of a
 	 * so we can automatically recover from a crash of a
 	 * degraded but active "cluster" after a certain timeout.
 	 * degraded but active "cluster" after a certain timeout.
 	 */
 	 */
-	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
+	drbd_clear_flag(mdev, USE_DEGR_WFC_T);
 	if (mdev->state.role != R_PRIMARY &&
 	if (mdev->state.role != R_PRIMARY &&
 	     drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
 	     drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
 	    !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
 	    !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
-		set_bit(USE_DEGR_WFC_T, &mdev->flags);
+		drbd_set_flag(mdev, USE_DEGR_WFC_T);
 
 
 	dd = drbd_determine_dev_size(mdev, 0);
 	dd = drbd_determine_dev_size(mdev, 0);
 	if (dd == dev_size_error) {
 	if (dd == dev_size_error) {
 		retcode = ERR_NOMEM_BITMAP;
 		retcode = ERR_NOMEM_BITMAP;
 		goto force_diskless_dec;
 		goto force_diskless_dec;
 	} else if (dd == grew)
 	} else if (dd == grew)
-		set_bit(RESYNC_AFTER_NEG, &mdev->flags);
+		drbd_set_flag(mdev, RESYNC_AFTER_NEG);
 
 
 	if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
 	if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
 		dev_info(DEV, "Assuming that all blocks are out of sync "
 		dev_info(DEV, "Assuming that all blocks are out of sync "
@@ -1362,7 +1362,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 	}
 	}
 
 
 	if (dt.detach_force) {
 	if (dt.detach_force) {
-		set_bit(FORCE_DETACH, &mdev->flags);
+		drbd_set_flag(mdev, FORCE_DETACH);
 		drbd_force_state(mdev, NS(disk, D_FAILED));
 		drbd_force_state(mdev, NS(disk, D_FAILED));
 		reply->ret_code = SS_SUCCESS;
 		reply->ret_code = SS_SUCCESS;
 		goto out;
 		goto out;
@@ -1707,7 +1707,7 @@ void resync_after_online_grow(struct drbd_conf *mdev)
 	if (mdev->state.role != mdev->state.peer)
 	if (mdev->state.role != mdev->state.peer)
 		iass = (mdev->state.role == R_PRIMARY);
 		iass = (mdev->state.role == R_PRIMARY);
 	else
 	else
-		iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+		iass = drbd_test_flag(mdev, DISCARD_CONCURRENT);
 
 
 	if (iass)
 	if (iass)
 		drbd_start_resync(mdev, C_SYNC_SOURCE);
 		drbd_start_resync(mdev, C_SYNC_SOURCE);
@@ -1765,7 +1765,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 
 
 	if (mdev->state.conn == C_CONNECTED) {
 	if (mdev->state.conn == C_CONNECTED) {
 		if (dd == grew)
 		if (dd == grew)
-			set_bit(RESIZE_PENDING, &mdev->flags);
+			drbd_set_flag(mdev, RESIZE_PENDING);
 
 
 		drbd_send_uuids(mdev);
 		drbd_send_uuids(mdev);
 		drbd_send_sizes(mdev, 1, ddsf);
 		drbd_send_sizes(mdev, 1, ddsf);
@@ -1983,7 +1983,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
 	 * resync just being finished, wait for it before requesting a new resync.
 	 * resync just being finished, wait for it before requesting a new resync.
 	 * Also wait for it's after_state_ch(). */
 	 * Also wait for it's after_state_ch(). */
 	drbd_suspend_io(mdev);
 	drbd_suspend_io(mdev);
-	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+	wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
 	drbd_flush_workqueue(mdev);
 	drbd_flush_workqueue(mdev);
 
 
 	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
 	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
@@ -2026,7 +2026,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
 	 * resync just being finished, wait for it before requesting a new resync.
 	 * resync just being finished, wait for it before requesting a new resync.
 	 * Also wait for it's after_state_ch(). */
 	 * Also wait for it's after_state_ch(). */
 	drbd_suspend_io(mdev);
 	drbd_suspend_io(mdev);
-	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+	wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
 	drbd_flush_workqueue(mdev);
 	drbd_flush_workqueue(mdev);
 
 
 	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
 	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
@@ -2094,9 +2094,9 @@ static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
 static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 			     struct drbd_nl_cfg_reply *reply)
 			     struct drbd_nl_cfg_reply *reply)
 {
 {
-	if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
+	if (drbd_test_flag(mdev, NEW_CUR_UUID)) {
 		drbd_uuid_new_current(mdev);
 		drbd_uuid_new_current(mdev);
-		clear_bit(NEW_CUR_UUID, &mdev->flags);
+		drbd_clear_flag(mdev, NEW_CUR_UUID);
 	}
 	}
 	drbd_suspend_io(mdev);
 	drbd_suspend_io(mdev);
 	reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
 	reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
@@ -2199,7 +2199,7 @@ static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_r
 	tl = reply->tag_list;
 	tl = reply->tag_list;
 
 
 	rv = mdev->state.pdsk == D_OUTDATED        ? UT_PEER_OUTDATED :
 	rv = mdev->state.pdsk == D_OUTDATED        ? UT_PEER_OUTDATED :
-	  test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
+	  drbd_test_flag(mdev, USE_DEGR_WFC_T) ? UT_DEGRADED : UT_DEFAULT;
 
 
 	tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
 	tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
 	put_unaligned(TT_END, tl++); /* Close the tag list */
 	put_unaligned(TT_END, tl++); /* Close the tag list */
@@ -2224,7 +2224,7 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 	/* If there is still bitmap IO pending, e.g. previous resync or verify
 	/* If there is still bitmap IO pending, e.g. previous resync or verify
 	 * just being finished, wait for it before requesting a new resync. */
 	 * just being finished, wait for it before requesting a new resync. */
 	drbd_suspend_io(mdev);
 	drbd_suspend_io(mdev);
-	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+	wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
 
 
 	/* w_make_ov_request expects start position to be aligned */
 	/* w_make_ov_request expects start position to be aligned */
 	mdev->ov_start_sector = args.start_sector & ~(BM_SECT_PER_BIT-1);
 	mdev->ov_start_sector = args.start_sector & ~(BM_SECT_PER_BIT-1);

+ 1 - 1
drivers/block/drbd/drbd_proc.c

@@ -270,7 +270,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
 			   mdev->state.peer_isp ? 'p' : '-',
 			   mdev->state.peer_isp ? 'p' : '-',
 			   mdev->state.user_isp ? 'u' : '-',
 			   mdev->state.user_isp ? 'u' : '-',
 			   mdev->congestion_reason ?: '-',
 			   mdev->congestion_reason ?: '-',
-			   test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-',
+			   drbd_test_flag(mdev, AL_SUSPENDED) ? 's' : '-',
 			   mdev->send_cnt/2,
 			   mdev->send_cnt/2,
 			   mdev->recv_cnt/2,
 			   mdev->recv_cnt/2,
 			   mdev->writ_cnt/2,
 			   mdev->writ_cnt/2,

+ 38 - 38
drivers/block/drbd/drbd_receiver.c

@@ -525,7 +525,7 @@ static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
 		else if (rv != -ERESTARTSYS)
 		else if (rv != -ERESTARTSYS)
 			dev_err(DEV, "sock_recvmsg returned %d\n", rv);
 			dev_err(DEV, "sock_recvmsg returned %d\n", rv);
 	} else if (rv == 0) {
 	} else if (rv == 0) {
-		if (test_bit(DISCONNECT_SENT, &mdev->flags)) {
+		if (drbd_test_flag(mdev, DISCONNECT_SENT)) {
 			long t; /* time_left */
 			long t; /* time_left */
 			t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
 			t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
 					       mdev->net_conf->ping_timeo * HZ/10);
 					       mdev->net_conf->ping_timeo * HZ/10);
@@ -749,7 +749,7 @@ static int drbd_connect(struct drbd_conf *mdev)
 
 
 	D_ASSERT(!mdev->data.socket);
 	D_ASSERT(!mdev->data.socket);
 
 
-	clear_bit(DISCONNECT_SENT, &mdev->flags);
+	drbd_clear_flag(mdev, DISCONNECT_SENT);
 	if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
 	if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
 		return -2;
 		return -2;
 
 
@@ -772,7 +772,7 @@ static int drbd_connect(struct drbd_conf *mdev)
 				sock = s;
 				sock = s;
 				s = NULL;
 				s = NULL;
 			} else if (!msock) {
 			} else if (!msock) {
-				clear_bit(DISCARD_CONCURRENT, &mdev->flags);
+				drbd_clear_flag(mdev, DISCARD_CONCURRENT);
 				drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
 				drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
 				msock = s;
 				msock = s;
 				s = NULL;
 				s = NULL;
@@ -810,7 +810,7 @@ retry:
 					sock_release(msock);
 					sock_release(msock);
 				}
 				}
 				msock = s;
 				msock = s;
-				set_bit(DISCARD_CONCURRENT, &mdev->flags);
+				drbd_set_flag(mdev, DISCARD_CONCURRENT);
 				break;
 				break;
 			default:
 			default:
 				dev_warn(DEV, "Error receiving initial packet\n");
 				dev_warn(DEV, "Error receiving initial packet\n");
@@ -892,18 +892,18 @@ retry:
 
 
 	if (drbd_send_protocol(mdev) == -1)
 	if (drbd_send_protocol(mdev) == -1)
 		return -1;
 		return -1;
-	set_bit(STATE_SENT, &mdev->flags);
+	drbd_set_flag(mdev, STATE_SENT);
 	drbd_send_sync_param(mdev, &mdev->sync_conf);
 	drbd_send_sync_param(mdev, &mdev->sync_conf);
 	drbd_send_sizes(mdev, 0, 0);
 	drbd_send_sizes(mdev, 0, 0);
 	drbd_send_uuids(mdev);
 	drbd_send_uuids(mdev);
 	drbd_send_current_state(mdev);
 	drbd_send_current_state(mdev);
-	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
-	clear_bit(RESIZE_PENDING, &mdev->flags);
+	drbd_clear_flag(mdev, USE_DEGR_WFC_T);
+	drbd_clear_flag(mdev, RESIZE_PENDING);
 
 
 	spin_lock_irq(&mdev->req_lock);
 	spin_lock_irq(&mdev->req_lock);
 	rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
 	rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
 	if (mdev->state.conn != C_WF_REPORT_PARAMS)
 	if (mdev->state.conn != C_WF_REPORT_PARAMS)
-		clear_bit(STATE_SENT, &mdev->flags);
+		drbd_clear_flag(mdev, STATE_SENT);
 	spin_unlock_irq(&mdev->req_lock);
 	spin_unlock_irq(&mdev->req_lock);
 
 
 	if (rv < SS_SUCCESS)
 	if (rv < SS_SUCCESS)
@@ -1732,7 +1732,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 		/* don't get the req_lock yet,
 		/* don't get the req_lock yet,
 		 * we may sleep in drbd_wait_peer_seq */
 		 * we may sleep in drbd_wait_peer_seq */
 		const int size = e->size;
 		const int size = e->size;
-		const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+		const int discard = drbd_test_flag(mdev, DISCARD_CONCURRENT);
 		DEFINE_WAIT(wait);
 		DEFINE_WAIT(wait);
 		struct drbd_request *i;
 		struct drbd_request *i;
 		struct hlist_node *n;
 		struct hlist_node *n;
@@ -2200,7 +2200,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
 		     "Using discard-least-changes instead\n");
 		     "Using discard-least-changes instead\n");
 	case ASB_DISCARD_ZERO_CHG:
 	case ASB_DISCARD_ZERO_CHG:
 		if (ch_peer == 0 && ch_self == 0) {
 		if (ch_peer == 0 && ch_self == 0) {
-			rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+			rv = drbd_test_flag(mdev, DISCARD_CONCURRENT)
 				? -1 : 1;
 				? -1 : 1;
 			break;
 			break;
 		} else {
 		} else {
@@ -2216,7 +2216,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
 			rv =  1;
 			rv =  1;
 		else /* ( ch_self == ch_peer ) */
 		else /* ( ch_self == ch_peer ) */
 		     /* Well, then use something else. */
 		     /* Well, then use something else. */
-			rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+			rv = drbd_test_flag(mdev, DISCARD_CONCURRENT)
 				? -1 : 1;
 				? -1 : 1;
 		break;
 		break;
 	case ASB_DISCARD_LOCAL:
 	case ASB_DISCARD_LOCAL:
@@ -2420,7 +2420,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
 		}
 		}
 
 
 		/* Common power [off|failure] */
 		/* Common power [off|failure] */
-		rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
+		rct = (drbd_test_flag(mdev, CRASHED_PRIMARY) ? 1 : 0) +
 			(mdev->p_uuid[UI_FLAGS] & 2);
 			(mdev->p_uuid[UI_FLAGS] & 2);
 		/* lowest bit is set when we were primary,
 		/* lowest bit is set when we were primary,
 		 * next bit (weight 2) is set when peer was primary */
 		 * next bit (weight 2) is set when peer was primary */
@@ -2431,7 +2431,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
 		case 1: /*  self_pri && !peer_pri */ return 1;
 		case 1: /*  self_pri && !peer_pri */ return 1;
 		case 2: /* !self_pri &&  peer_pri */ return -1;
 		case 2: /* !self_pri &&  peer_pri */ return -1;
 		case 3: /*  self_pri &&  peer_pri */
 		case 3: /*  self_pri &&  peer_pri */
-			dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+			dc = drbd_test_flag(mdev, DISCARD_CONCURRENT);
 			return dc ? -1 : 1;
 			return dc ? -1 : 1;
 		}
 		}
 	}
 	}
@@ -2648,7 +2648,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
 		}
 		}
 	}
 	}
 
 
-	if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
+	if (mdev->net_conf->dry_run || drbd_test_flag(mdev, CONN_DRY_RUN)) {
 		if (hg == 0)
 		if (hg == 0)
 			dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
 			dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
 		else
 		else
@@ -2716,10 +2716,10 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
 	cf		= be32_to_cpu(p->conn_flags);
 	cf		= be32_to_cpu(p->conn_flags);
 	p_want_lose = cf & CF_WANT_LOSE;
 	p_want_lose = cf & CF_WANT_LOSE;
 
 
-	clear_bit(CONN_DRY_RUN, &mdev->flags);
+	drbd_clear_flag(mdev, CONN_DRY_RUN);
 
 
 	if (cf & CF_DRY_RUN)
 	if (cf & CF_DRY_RUN)
-		set_bit(CONN_DRY_RUN, &mdev->flags);
+		drbd_set_flag(mdev, CONN_DRY_RUN);
 
 
 	if (p_proto != mdev->net_conf->wire_protocol) {
 	if (p_proto != mdev->net_conf->wire_protocol) {
 		dev_err(DEV, "incompatible communication protocols\n");
 		dev_err(DEV, "incompatible communication protocols\n");
@@ -3051,7 +3051,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 			 * needs to know my new size... */
 			 * needs to know my new size... */
 			drbd_send_sizes(mdev, 0, ddsf);
 			drbd_send_sizes(mdev, 0, ddsf);
 		}
 		}
-		if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
+		if (drbd_test_and_clear_flag(mdev, RESIZE_PENDING) ||
 		    (dd == grew && mdev->state.conn == C_CONNECTED)) {
 		    (dd == grew && mdev->state.conn == C_CONNECTED)) {
 			if (mdev->state.pdsk >= D_INCONSISTENT &&
 			if (mdev->state.pdsk >= D_INCONSISTENT &&
 			    mdev->state.disk >= D_INCONSISTENT) {
 			    mdev->state.disk >= D_INCONSISTENT) {
@@ -3060,7 +3060,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 				else
 				else
 					resync_after_online_grow(mdev);
 					resync_after_online_grow(mdev);
 			} else
 			} else
-				set_bit(RESYNC_AFTER_NEG, &mdev->flags);
+				drbd_set_flag(mdev, RESYNC_AFTER_NEG);
 		}
 		}
 	}
 	}
 
 
@@ -3121,7 +3121,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 	   ongoing cluster wide state change is finished. That is important if
 	   ongoing cluster wide state change is finished. That is important if
 	   we are primary and are detaching from our disk. We need to see the
 	   we are primary and are detaching from our disk. We need to see the
 	   new disk state... */
 	   new disk state... */
-	wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
+	wait_event(mdev->misc_wait, !drbd_test_flag(mdev, CLUSTER_ST_CHANGE));
 	if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
 	if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
 		updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
 		updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
 
 
@@ -3170,8 +3170,8 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
 	mask.i = be32_to_cpu(p->mask);
 	mask.i = be32_to_cpu(p->mask);
 	val.i = be32_to_cpu(p->val);
 	val.i = be32_to_cpu(p->val);
 
 
-	if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
-	    test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
+	if (drbd_test_flag(mdev, DISCARD_CONCURRENT) &&
+	    drbd_test_flag(mdev, CLUSTER_ST_CHANGE)) {
 		drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
 		drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
 		return true;
 		return true;
 	}
 	}
@@ -3280,7 +3280,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 			os.disk == D_NEGOTIATING));
 			os.disk == D_NEGOTIATING));
 		/* if we have both been inconsistent, and the peer has been
 		/* if we have both been inconsistent, and the peer has been
 		 * forced to be UpToDate with --overwrite-data */
 		 * forced to be UpToDate with --overwrite-data */
-		cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
+		cr |= drbd_test_flag(mdev, CONSIDER_RESYNC);
 		/* if we had been plain connected, and the admin requested to
 		/* if we had been plain connected, and the admin requested to
 		 * start a sync by "invalidate" or "invalidate-remote" */
 		 * start a sync by "invalidate" or "invalidate-remote" */
 		cr |= (os.conn == C_CONNECTED &&
 		cr |= (os.conn == C_CONNECTED &&
@@ -3300,7 +3300,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 				peer_state.disk = D_DISKLESS;
 				peer_state.disk = D_DISKLESS;
 				real_peer_disk = D_DISKLESS;
 				real_peer_disk = D_DISKLESS;
 			} else {
 			} else {
-				if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
+				if (drbd_test_and_clear_flag(mdev, CONN_DRY_RUN))
 					return false;
 					return false;
 				D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
 				D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
 				drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
 				drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
@@ -3312,7 +3312,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 	spin_lock_irq(&mdev->req_lock);
 	spin_lock_irq(&mdev->req_lock);
 	if (mdev->state.i != os.i)
 	if (mdev->state.i != os.i)
 		goto retry;
 		goto retry;
-	clear_bit(CONSIDER_RESYNC, &mdev->flags);
+	drbd_clear_flag(mdev, CONSIDER_RESYNC);
 	ns.peer = peer_state.role;
 	ns.peer = peer_state.role;
 	ns.pdsk = real_peer_disk;
 	ns.pdsk = real_peer_disk;
 	ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
 	ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
@@ -3320,14 +3320,14 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 		ns.disk = mdev->new_state_tmp.disk;
 		ns.disk = mdev->new_state_tmp.disk;
 	cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
 	cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
 	if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
 	if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
-	    test_bit(NEW_CUR_UUID, &mdev->flags)) {
+	    drbd_test_flag(mdev, NEW_CUR_UUID)) {
 		/* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
 		/* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
 		   for temporal network outages! */
 		   for temporal network outages! */
 		spin_unlock_irq(&mdev->req_lock);
 		spin_unlock_irq(&mdev->req_lock);
 		dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
 		dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
 		tl_clear(mdev);
 		tl_clear(mdev);
 		drbd_uuid_new_current(mdev);
 		drbd_uuid_new_current(mdev);
-		clear_bit(NEW_CUR_UUID, &mdev->flags);
+		drbd_clear_flag(mdev, NEW_CUR_UUID);
 		drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
 		drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
 		return false;
 		return false;
 	}
 	}
@@ -3931,7 +3931,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
 
 
 	/* serialize with bitmap writeout triggered by the state change,
 	/* serialize with bitmap writeout triggered by the state change,
 	 * if any. */
 	 * if any. */
-	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+	wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
 
 
 	/* tcp_close and release of sendpage pages can be deferred.  I don't
 	/* tcp_close and release of sendpage pages can be deferred.  I don't
 	 * want to use SO_LINGER, because apparently it can be deferred for
 	 * want to use SO_LINGER, because apparently it can be deferred for
@@ -4267,9 +4267,9 @@ static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
 	int retcode = be32_to_cpu(p->retcode);
 	int retcode = be32_to_cpu(p->retcode);
 
 
 	if (retcode >= SS_SUCCESS) {
 	if (retcode >= SS_SUCCESS) {
-		set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
+		drbd_set_flag(mdev, CL_ST_CHG_SUCCESS);
 	} else {
 	} else {
-		set_bit(CL_ST_CHG_FAIL, &mdev->flags);
+		drbd_set_flag(mdev, CL_ST_CHG_FAIL);
 		dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
 		dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
 		    drbd_set_st_err_str(retcode), retcode);
 		    drbd_set_st_err_str(retcode), retcode);
 	}
 	}
@@ -4288,7 +4288,7 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
 {
 {
 	/* restore idle timeout */
 	/* restore idle timeout */
 	mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
 	mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
-	if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
+	if (!drbd_test_and_set_flag(mdev, GOT_PING_ACK))
 		wake_up(&mdev->misc_wait);
 		wake_up(&mdev->misc_wait);
 
 
 	return true;
 	return true;
@@ -4504,7 +4504,7 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
 
 
 	if (mdev->state.conn == C_AHEAD &&
 	if (mdev->state.conn == C_AHEAD &&
 	    atomic_read(&mdev->ap_in_flight) == 0 &&
 	    atomic_read(&mdev->ap_in_flight) == 0 &&
-	    !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
+	    !drbd_test_and_set_flag(mdev, AHEAD_TO_SYNC_SOURCE)) {
 		mdev->start_resync_timer.expires = jiffies + HZ;
 		mdev->start_resync_timer.expires = jiffies + HZ;
 		add_timer(&mdev->start_resync_timer);
 		add_timer(&mdev->start_resync_timer);
 	}
 	}
@@ -4614,7 +4614,7 @@ int drbd_asender(struct drbd_thread *thi)
 
 
 	while (get_t_state(thi) == Running) {
 	while (get_t_state(thi) == Running) {
 		drbd_thread_current_set_cpu(mdev);
 		drbd_thread_current_set_cpu(mdev);
-		if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
+		if (drbd_test_and_clear_flag(mdev, SEND_PING)) {
 			ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
 			ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
 			mdev->meta.socket->sk->sk_rcvtimeo =
 			mdev->meta.socket->sk->sk_rcvtimeo =
 				mdev->net_conf->ping_timeo*HZ/10;
 				mdev->net_conf->ping_timeo*HZ/10;
@@ -4627,12 +4627,12 @@ int drbd_asender(struct drbd_thread *thi)
 			3 < atomic_read(&mdev->unacked_cnt))
 			3 < atomic_read(&mdev->unacked_cnt))
 			drbd_tcp_cork(mdev->meta.socket);
 			drbd_tcp_cork(mdev->meta.socket);
 		while (1) {
 		while (1) {
-			clear_bit(SIGNAL_ASENDER, &mdev->flags);
+			drbd_clear_flag(mdev, SIGNAL_ASENDER);
 			flush_signals(current);
 			flush_signals(current);
 			if (!drbd_process_done_ee(mdev))
 			if (!drbd_process_done_ee(mdev))
 				goto reconnect;
 				goto reconnect;
 			/* to avoid race with newly queued ACKs */
 			/* to avoid race with newly queued ACKs */
-			set_bit(SIGNAL_ASENDER, &mdev->flags);
+			drbd_set_flag(mdev, SIGNAL_ASENDER);
 			spin_lock_irq(&mdev->req_lock);
 			spin_lock_irq(&mdev->req_lock);
 			empty = list_empty(&mdev->done_ee);
 			empty = list_empty(&mdev->done_ee);
 			spin_unlock_irq(&mdev->req_lock);
 			spin_unlock_irq(&mdev->req_lock);
@@ -4652,7 +4652,7 @@ int drbd_asender(struct drbd_thread *thi)
 
 
 		rv = drbd_recv_short(mdev, mdev->meta.socket,
 		rv = drbd_recv_short(mdev, mdev->meta.socket,
 				     buf, expect-received, 0);
 				     buf, expect-received, 0);
-		clear_bit(SIGNAL_ASENDER, &mdev->flags);
+		drbd_clear_flag(mdev, SIGNAL_ASENDER);
 
 
 		flush_signals(current);
 		flush_signals(current);
 
 
@@ -4670,7 +4670,7 @@ int drbd_asender(struct drbd_thread *thi)
 			received += rv;
 			received += rv;
 			buf	 += rv;
 			buf	 += rv;
 		} else if (rv == 0) {
 		} else if (rv == 0) {
-			if (test_bit(DISCONNECT_SENT, &mdev->flags)) {
+			if (drbd_test_flag(mdev, DISCONNECT_SENT)) {
 				long t; /* time_left */
 				long t; /* time_left */
 				t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
 				t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
 						       mdev->net_conf->ping_timeo * HZ/10);
 						       mdev->net_conf->ping_timeo * HZ/10);
@@ -4689,7 +4689,7 @@ int drbd_asender(struct drbd_thread *thi)
 				dev_err(DEV, "PingAck did not arrive in time.\n");
 				dev_err(DEV, "PingAck did not arrive in time.\n");
 				goto reconnect;
 				goto reconnect;
 			}
 			}
-			set_bit(SEND_PING, &mdev->flags);
+			drbd_set_flag(mdev, SEND_PING);
 			continue;
 			continue;
 		} else if (rv == -EINTR) {
 		} else if (rv == -EINTR) {
 			continue;
 			continue;
@@ -4747,7 +4747,7 @@ disconnect:
 		drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
 		drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
 		drbd_md_sync(mdev);
 		drbd_md_sync(mdev);
 	}
 	}
-	clear_bit(SIGNAL_ASENDER, &mdev->flags);
+	drbd_clear_flag(mdev, SIGNAL_ASENDER);
 
 
 	D_ASSERT(mdev->state.conn < C_CONNECTED);
 	D_ASSERT(mdev->state.conn < C_CONNECTED);
 	dev_info(DEV, "asender terminated\n");
 	dev_info(DEV, "asender terminated\n");

+ 10 - 10
drivers/block/drbd/drbd_req.c

@@ -118,7 +118,7 @@ static void queue_barrier(struct drbd_conf *mdev)
 	 * barrier/epoch object is added. This is the only place this bit is
 	 * barrier/epoch object is added. This is the only place this bit is
 	 * set. It indicates that the barrier for this epoch is already queued,
 	 * set. It indicates that the barrier for this epoch is already queued,
 	 * and no new epoch has been created yet. */
 	 * and no new epoch has been created yet. */
-	if (test_bit(CREATE_BARRIER, &mdev->flags))
+	if (drbd_test_flag(mdev, CREATE_BARRIER))
 		return;
 		return;
 
 
 	b = mdev->newest_tle;
 	b = mdev->newest_tle;
@@ -129,7 +129,7 @@ static void queue_barrier(struct drbd_conf *mdev)
 	 * or (on connection loss) in tl_clear.  */
 	 * or (on connection loss) in tl_clear.  */
 	inc_ap_pending(mdev);
 	inc_ap_pending(mdev);
 	drbd_queue_work(&mdev->data.work, &b->w);
 	drbd_queue_work(&mdev->data.work, &b->w);
-	set_bit(CREATE_BARRIER, &mdev->flags);
+	drbd_set_flag(mdev, CREATE_BARRIER);
 }
 }
 
 
 static void _about_to_complete_local_write(struct drbd_conf *mdev,
 static void _about_to_complete_local_write(struct drbd_conf *mdev,
@@ -507,7 +507,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 		 * corresponding hlist_del is in _req_may_be_done() */
 		 * corresponding hlist_del is in _req_may_be_done() */
 		hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
 		hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
 
 
-		set_bit(UNPLUG_REMOTE, &mdev->flags);
+		drbd_set_flag(mdev, UNPLUG_REMOTE);
 
 
 		D_ASSERT(req->rq_state & RQ_NET_PENDING);
 		D_ASSERT(req->rq_state & RQ_NET_PENDING);
 		req->rq_state |= RQ_NET_QUEUED;
 		req->rq_state |= RQ_NET_QUEUED;
@@ -541,11 +541,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 		/* otherwise we may lose an unplug, which may cause some remote
 		/* otherwise we may lose an unplug, which may cause some remote
 		 * io-scheduler timeout to expire, increasing maximum latency,
 		 * io-scheduler timeout to expire, increasing maximum latency,
 		 * hurting performance. */
 		 * hurting performance. */
-		set_bit(UNPLUG_REMOTE, &mdev->flags);
+		drbd_set_flag(mdev, UNPLUG_REMOTE);
 
 
 		/* see drbd_make_request_common,
 		/* see drbd_make_request_common,
 		 * just after it grabs the req_lock */
 		 * just after it grabs the req_lock */
-		D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
+		D_ASSERT(drbd_test_flag(mdev, CREATE_BARRIER) == 0);
 
 
 		req->epoch = mdev->newest_tle->br_number;
 		req->epoch = mdev->newest_tle->br_number;
 
 
@@ -888,7 +888,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
 	 * Empty flushes don't need to go into the activity log, they can only
 	 * Empty flushes don't need to go into the activity log, they can only
 	 * flush data for pending writes which are already in there. */
 	 * flush data for pending writes which are already in there. */
 	if (rw == WRITE && local && size
 	if (rw == WRITE && local && size
-	&& !test_bit(AL_SUSPENDED, &mdev->flags)) {
+	&& !drbd_test_flag(mdev, AL_SUSPENDED)) {
 		req->rq_state |= RQ_IN_ACT_LOG;
 		req->rq_state |= RQ_IN_ACT_LOG;
 		drbd_al_begin_io(mdev, sector);
 		drbd_al_begin_io(mdev, sector);
 	}
 	}
@@ -912,7 +912,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
 	 * if we lost that race, we retry.  */
 	 * if we lost that race, we retry.  */
 	if (rw == WRITE && (remote || send_oos) &&
 	if (rw == WRITE && (remote || send_oos) &&
 	    mdev->unused_spare_tle == NULL &&
 	    mdev->unused_spare_tle == NULL &&
-	    test_bit(CREATE_BARRIER, &mdev->flags)) {
+	    drbd_test_flag(mdev, CREATE_BARRIER)) {
 allocate_barrier:
 allocate_barrier:
 		b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
 		b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
 		if (!b) {
 		if (!b) {
@@ -955,7 +955,7 @@ allocate_barrier:
 	}
 	}
 	if (rw == WRITE && (remote || send_oos) &&
 	if (rw == WRITE && (remote || send_oos) &&
 	    mdev->unused_spare_tle == NULL &&
 	    mdev->unused_spare_tle == NULL &&
-	    test_bit(CREATE_BARRIER, &mdev->flags)) {
+	    drbd_test_flag(mdev, CREATE_BARRIER)) {
 		/* someone closed the current epoch
 		/* someone closed the current epoch
 		 * while we were grabbing the spinlock */
 		 * while we were grabbing the spinlock */
 		spin_unlock_irq(&mdev->req_lock);
 		spin_unlock_irq(&mdev->req_lock);
@@ -977,12 +977,12 @@ allocate_barrier:
 	 * make sure that, if this is a write request and it triggered a
 	 * make sure that, if this is a write request and it triggered a
 	 * barrier packet, this request is queued within the same spinlock. */
 	 * barrier packet, this request is queued within the same spinlock. */
 	if ((remote || send_oos) && mdev->unused_spare_tle &&
 	if ((remote || send_oos) && mdev->unused_spare_tle &&
-	    test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
+	    drbd_test_and_clear_flag(mdev, CREATE_BARRIER)) {
 		_tl_add_barrier(mdev, mdev->unused_spare_tle);
 		_tl_add_barrier(mdev, mdev->unused_spare_tle);
 		mdev->unused_spare_tle = NULL;
 		mdev->unused_spare_tle = NULL;
 	} else {
 	} else {
 		D_ASSERT(!(remote && rw == WRITE &&
 		D_ASSERT(!(remote && rw == WRITE &&
-			   test_bit(CREATE_BARRIER, &mdev->flags)));
+			   drbd_test_flag(mdev, CREATE_BARRIER)));
 	}
 	}
 
 
 	/* NOTE
 	/* NOTE

+ 7 - 7
drivers/block/drbd/drbd_worker.c

@@ -793,7 +793,7 @@ int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 	}
 	}
 
 
 	drbd_start_resync(mdev, C_SYNC_SOURCE);
 	drbd_start_resync(mdev, C_SYNC_SOURCE);
-	clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
+	drbd_clear_flag(mdev, AHEAD_TO_SYNC_SOURCE);
 	return 1;
 	return 1;
 }
 }
 
 
@@ -817,10 +817,10 @@ static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int ca
 
 
 static void ping_peer(struct drbd_conf *mdev)
 static void ping_peer(struct drbd_conf *mdev)
 {
 {
-	clear_bit(GOT_PING_ACK, &mdev->flags);
+	drbd_clear_flag(mdev, GOT_PING_ACK);
 	request_ping(mdev);
 	request_ping(mdev);
 	wait_event(mdev->misc_wait,
 	wait_event(mdev->misc_wait,
-		   test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
+		   drbd_test_flag(mdev, GOT_PING_ACK) || mdev->state.conn < C_CONNECTED);
 }
 }
 
 
 int drbd_resync_finished(struct drbd_conf *mdev)
 int drbd_resync_finished(struct drbd_conf *mdev)
@@ -1749,8 +1749,8 @@ int drbd_worker(struct drbd_thread *thi)
 						NS(conn, C_NETWORK_FAILURE));
 						NS(conn, C_NETWORK_FAILURE));
 		}
 		}
 	}
 	}
-	D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
-	D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
+	D_ASSERT(drbd_test_flag(mdev, DEVICE_DYING));
+	D_ASSERT(drbd_test_flag(mdev, CONFIG_PENDING));
 
 
 	spin_lock_irq(&mdev->data.work.q_lock);
 	spin_lock_irq(&mdev->data.work.q_lock);
 	i = 0;
 	i = 0;
@@ -1783,8 +1783,8 @@ int drbd_worker(struct drbd_thread *thi)
 
 
 	dev_info(DEV, "worker terminated\n");
 	dev_info(DEV, "worker terminated\n");
 
 
-	clear_bit(DEVICE_DYING, &mdev->flags);
-	clear_bit(CONFIG_PENDING, &mdev->flags);
+	drbd_clear_flag(mdev, DEVICE_DYING);
+	drbd_clear_flag(mdev, CONFIG_PENDING);
 	wake_up(&mdev->state_wait);
 	wake_up(&mdev->state_wait);
 
 
 	return 0;
 	return 0;