@@ -515,9 +515,9 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
percpu_ref_put(&q->q_usage_counter);
}
-static int blk_mq_sched_alloc_tags(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_idx)
+static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx)
{
struct blk_mq_tag_set *set = q->tag_set;
int ret;
@@ -609,15 +609,15 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
BLKDEV_DEFAULT_RQ);
queue_for_each_hw_ctx(q, hctx, i) {
- ret = blk_mq_sched_alloc_tags(q, hctx, i);
+ ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
if (ret)
- goto err_free_tags;
+ goto err_free_map_and_rqs;
}
if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) {
ret = blk_mq_init_sched_shared_sbitmap(q);
if (ret)
- goto err_free_tags;
+ goto err_free_map_and_rqs;
}
ret = e->ops.init_sched(q, e);
@@ -645,8 +645,8 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
err_free_sbitmap:
if (blk_mq_is_sbitmap_shared(q->tag_set->flags))
blk_mq_exit_sched_shared_sbitmap(q);
-err_free_tags:
blk_mq_sched_free_requests(q);
+err_free_map_and_rqs:
blk_mq_sched_tags_teardown(q);
q->elevator = NULL;
return ret;