@@ -607,8 +607,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (!new)
return -ENOMEM;
- blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
- blk_mq_free_rq_map(*tagsptr, set->flags);
+ blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
*tagsptr = new;
} else {
/*
@@ -2878,15 +2878,15 @@ static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
return set->tags[hctx_idx];
}
-static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
- unsigned int hctx_idx)
+void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
+ struct blk_mq_tags *tags,
+ unsigned int hctx_idx)
{
unsigned int flags = set->flags;
- if (set->tags && set->tags[hctx_idx]) {
- blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
- blk_mq_free_rq_map(set->tags[hctx_idx], flags);
- set->tags[hctx_idx] = NULL;
+ if (tags) {
+ blk_mq_free_rqs(set, tags, hctx_idx);
+ blk_mq_free_rq_map(tags, flags);
}
}
@@ -2967,8 +2967,10 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* fallback in case of a new remap fails
* allocation
*/
- if (i && set->tags[i])
- blk_mq_free_map_and_requests(set, i);
+ if (i && set->tags[i]) {
+ blk_mq_free_map_and_rqs(set, set->tags[i], i);
+ set->tags[i] = NULL;
+ }
hctx->tags = NULL;
continue;
@@ -3264,8 +3266,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx = hctxs[j];
if (hctx) {
- if (hctx->tags)
- blk_mq_free_map_and_requests(set, j);
+ blk_mq_free_map_and_rqs(set, set->tags[j], j);
+ set->tags[j] = NULL;
blk_mq_exit_hctx(q, set, hctx, j);
hctxs[j] = NULL;
}
@@ -3361,8 +3363,10 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
return 0;
out_unwind:
- while (--i >= 0)
- blk_mq_free_map_and_requests(set, i);
+ while (--i >= 0) {
+ blk_mq_free_map_and_rqs(set, set->tags[i], i);
+ set->tags[i] = NULL;
+ }
return -ENOMEM;
}
@@ -3557,8 +3561,10 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
return 0;
out_free_mq_rq_maps:
- for (i = 0; i < set->nr_hw_queues; i++)
- blk_mq_free_map_and_requests(set, i);
+ for (i = 0; i < set->nr_hw_queues; i++) {
+ blk_mq_free_map_and_rqs(set, set->tags[i], i);
+ set->tags[i] = NULL;
+ }
out_free_mq_map:
for (i = 0; i < set->nr_maps; i++) {
kfree(set->map[i].mq_map);
@@ -3590,8 +3596,10 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
int i, j;
- for (i = 0; i < set->nr_hw_queues; i++)
- blk_mq_free_map_and_requests(set, i);
+ for (i = 0; i < set->nr_hw_queues; i++) {
+ blk_mq_free_map_and_rqs(set, set->tags[i], i);
+ set->tags[i] = NULL;
+ }
if (blk_mq_is_sbitmap_shared(set->flags))
blk_mq_exit_shared_sbitmap(set);
@@ -57,7 +57,9 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
unsigned int hctx_idx, unsigned int depth);
-
+void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
+ struct blk_mq_tags *tags,
+ unsigned int hctx_idx);
/*
* Internal helpers for request insertion into sw queues
*/
Refactor blk_mq_free_map_and_requests() such that it can be used at many sites at which the tag map and rqs are freed. Also rename to blk_mq_free_map_and_rqs(), which is shorter and matches the alloc equivalent. Suggested-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: John Garry <john.garry@huawei.com> --- block/blk-mq-tag.c | 3 +-- block/blk-mq.c | 40 ++++++++++++++++++++++++---------------- block/blk-mq.h | 4 +++- 3 files changed, 28 insertions(+), 19 deletions(-) -- 2.26.2