[3/3] block: hide back blk_freeze_queue_start and export its blk-mq alias
Commit Message
blk_freeze_queue_start is used internally for universal queue draining and
externally for blk-mq specific queue freezing. Keep the non-blk-mq name
private and export a blk-mq alias to users.
Signed-off-by: Jinlong Chen <nickyc975@zju.edu.cn>
---
block/blk-core.c | 13 +++++++++++++
block/blk-mq.c | 27 ++++++++++++++-------------
block/blk-pm.c | 2 +-
block/blk.h | 1 +
drivers/nvme/host/core.c | 2 +-
drivers/nvme/host/multipath.c | 2 +-
include/linux/blk-mq.h | 2 +-
7 files changed, 32 insertions(+), 17 deletions(-)
@@ -269,6 +269,19 @@ void blk_put_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_put_queue);
+void blk_freeze_queue_start(struct request_queue *q)
+{
+ mutex_lock(&q->mq_freeze_lock);
+ if (++q->mq_freeze_depth == 1) {
+ percpu_ref_kill(&q->q_usage_counter);
+ mutex_unlock(&q->mq_freeze_lock);
+ if (queue_is_mq(q))
+ blk_mq_run_hw_queues(q, false);
+ } else {
+ mutex_unlock(&q->mq_freeze_lock);
+ }
+}
+
void blk_queue_start_drain(struct request_queue *q)
{
/*
@@ -161,19 +161,20 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
inflight[1] = mi.inflight[1];
}
-void blk_freeze_queue_start(struct request_queue *q)
+void blk_mq_freeze_queue_start(struct request_queue *q)
{
- mutex_lock(&q->mq_freeze_lock);
- if (++q->mq_freeze_depth == 1) {
- percpu_ref_kill(&q->q_usage_counter);
- mutex_unlock(&q->mq_freeze_lock);
- if (queue_is_mq(q))
- blk_mq_run_hw_queues(q, false);
- } else {
- mutex_unlock(&q->mq_freeze_lock);
- }
+ /*
+ * Warn on non-blk-mq usages.
+ */
+ WARN_ON_ONCE(!queue_is_mq(q));
+
+ /*
+ * Just an alias of blk_freeze_queue_start to keep the consistency of the
+ * blk_mq_* namespace.
+ */
+ blk_freeze_queue_start(q);
}
-EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
void blk_mq_freeze_queue_wait(struct request_queue *q)
{
@@ -196,7 +197,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
*/
void blk_mq_freeze_queue(struct request_queue *q)
{
- blk_freeze_queue_start(q);
+ blk_mq_freeze_queue_start(q);
blk_mq_freeze_queue_wait(q);
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
@@ -1570,7 +1571,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
* percpu_ref_tryget directly, because we need to be able to
* obtain a reference even in the short window between the queue
* starting to freeze, by dropping the first reference in
- * blk_freeze_queue_start, and the moment the last request is
+ * blk_mq_freeze_queue_start, and the moment the last request is
* consumed, marked by the instant q_usage_counter reaches
* zero.
*/
@@ -80,7 +80,7 @@ int blk_pre_runtime_suspend(struct request_queue *q)
blk_set_pm_only(q);
ret = -EBUSY;
/* Switch q_usage_counter from per-cpu to atomic mode. */
- blk_freeze_queue_start(q);
+ blk_mq_freeze_queue_start(q);
/*
* Wait until atomic mode has been reached. Since that
* involves calling call_rcu(), it is guaranteed that later
@@ -37,6 +37,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
+void blk_freeze_queue_start(struct request_queue *q);
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_queue_start_drain(struct request_queue *q);
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
@@ -5199,7 +5199,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
- blk_freeze_queue_start(ns->queue);
+ blk_mq_freeze_queue_start(ns->queue);
up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);
@@ -77,7 +77,7 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry)
if (h->disk)
- blk_freeze_queue_start(h->disk->queue);
+ blk_mq_freeze_queue_start(h->disk->queue);
}
void nvme_failover_req(struct request *req)
@@ -887,7 +887,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
-void blk_freeze_queue_start(struct request_queue *q);
+void blk_mq_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);