|
@@ -348,6 +348,36 @@ void blk_sync_queue(struct request_queue *q)
|
|
|
}
|
|
|
EXPORT_SYMBOL(blk_sync_queue);
|
|
|
|
|
|
+/**
|
|
|
+ * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY
|
|
|
+ * @q: request queue pointer
|
|
|
+ *
|
|
|
+ * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not
|
|
|
+ * set and 1 if the flag was already set.
|
|
|
+ */
|
|
|
+int blk_set_preempt_only(struct request_queue *q)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ int res;
|
|
|
+
|
|
|
+ spin_lock_irqsave(q->queue_lock, flags);
|
|
|
+ res = queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
|
|
|
+ spin_unlock_irqrestore(q->queue_lock, flags);
|
|
|
+
|
|
|
+ return res;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_set_preempt_only);
|
|
|
+
|
|
|
+void blk_clear_preempt_only(struct request_queue *q)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(q->queue_lock, flags);
|
|
|
+ queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
|
|
|
+ spin_unlock_irqrestore(q->queue_lock, flags);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
|
|
|
+
|
|
|
/**
|
|
|
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
|
|
|
* @q: The queue to run
|