summaryrefslogtreecommitdiff
path: root/block/blk.h
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@wdc.com>2018-03-07 17:10:12 -0800
committerJens Axboe <axboe@kernel.dk>2018-03-08 14:13:48 -0700
commit8a0ac14b8da9b86cfbe7aace40c8d485ed5c5b97 (patch)
tree4c832e2015ec68156744fa4497c0a7531ab3cace /block/blk.h
parent1db2008b79a32db2ad41338c6c74c4735cf74f6d (diff)
downloadlwn-8a0ac14b8da9b86cfbe7aace40c8d485ed5c5b97.tar.gz
lwn-8a0ac14b8da9b86cfbe7aace40c8d485ed5c5b97.zip
block: Move the queue_flag_*() functions from a public into a private header file
This patch helps to avoid that new code gets introduced in block drivers that manipulates queue flags without holding the queue lock when that lock should be held. Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Ming Lei <ming.lei@redhat.com> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk.h')
-rw-r--r--block/blk.h69
1 files changed, 69 insertions, 0 deletions
diff --git a/block/blk.h b/block/blk.h
index 46db5dc83dcb..b034fd2460c4 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -41,6 +41,75 @@ extern struct kmem_cache *request_cachep;
extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida;
+/*
+ * @q->queue_lock is set while a queue is being initialized. Since we know
+ * that no other threads access the queue object before @q->queue_lock has
+ * been set, it is safe to manipulate queue flags without holding the
+ * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
+ * blk_init_allocated_queue().
+ */
+static inline void queue_lockdep_assert_held(struct request_queue *q)
+{
+ if (q->queue_lock)
+ lockdep_assert_held(q->queue_lock);
+}
+
+static inline void queue_flag_set_unlocked(unsigned int flag,
+ struct request_queue *q)
+{
+ if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
+ kref_read(&q->kobj.kref))
+ lockdep_assert_held(q->queue_lock);
+ __set_bit(flag, &q->queue_flags);
+}
+
+static inline void queue_flag_clear_unlocked(unsigned int flag,
+ struct request_queue *q)
+{
+ if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
+ kref_read(&q->kobj.kref))
+ lockdep_assert_held(q->queue_lock);
+ __clear_bit(flag, &q->queue_flags);
+}
+
+static inline int queue_flag_test_and_clear(unsigned int flag,
+ struct request_queue *q)
+{
+ queue_lockdep_assert_held(q);
+
+ if (test_bit(flag, &q->queue_flags)) {
+ __clear_bit(flag, &q->queue_flags);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int queue_flag_test_and_set(unsigned int flag,
+ struct request_queue *q)
+{
+ queue_lockdep_assert_held(q);
+
+ if (!test_bit(flag, &q->queue_flags)) {
+ __set_bit(flag, &q->queue_flags);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
+{
+ queue_lockdep_assert_held(q);
+ __set_bit(flag, &q->queue_flags);
+}
+
+static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
+{
+ queue_lockdep_assert_held(q);
+ __clear_bit(flag, &q->queue_flags);
+}
+
static inline struct blk_flush_queue *blk_get_flush_queue(
struct request_queue *q, struct blk_mq_ctx *ctx)
{