@@ -545,10 +545,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
}
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
- if (err) {
- page_pool_free(rq->page_pool);
+ if (err)
goto err_free;
- }
for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -613,6 +611,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
xdp_rxq_info_unreg(&rq->xdp_rxq);
+ if (rq->page_pool)
+ page_pool_free(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
return err;
@@ -643,6 +643,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
}
xdp_rxq_info_unreg(&rq->xdp_rxq);
+ if (rq->page_pool)
+ page_pool_free(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
}
@@ -101,6 +101,7 @@ struct page_pool {
struct ptr_ring ring;
atomic_t pages_state_release_cnt;
+ atomic_t user_cnt;
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
@@ -183,6 +184,12 @@ static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
return page->dma_addr;
}
+/* used to prevent pool from deallocation */
+static inline void page_pool_get(struct page_pool *pool)
+{
+ atomic_inc(&pool->user_cnt);
+}
+
static inline bool is_page_pool_compiled_in(void)
{
#ifdef CONFIG_PAGE_POOL
@@ -48,6 +48,7 @@ static int page_pool_init(struct page_pool *pool,
return -ENOMEM;
atomic_set(&pool->pages_state_release_cnt, 0);
+ atomic_set(&pool->user_cnt, 0);
if (pool->p.flags & PP_FLAG_DMA_MAP)
get_device(pool->p.dev);
@@ -70,6 +71,8 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
kfree(pool);
return ERR_PTR(err);
}
+
+ page_pool_get(pool);
return pool;
}
EXPORT_SYMBOL(page_pool_create);
@@ -356,6 +359,10 @@ static void __warn_in_flight(struct page_pool *pool)
void __page_pool_free(struct page_pool *pool)
{
+ /* free only if no users */
+ if (!atomic_dec_and_test(&pool->user_cnt))
+ return;
+
WARN(pool->alloc.count, "API usage violation");
WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty");
@@ -372,6 +372,9 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
mutex_unlock(&mem_id_lock);
+ if (type == MEM_TYPE_PAGE_POOL)
+ page_pool_get(xdp_alloc->page_pool);
+
trace_mem_connect(xdp_alloc, xdp_rxq);
return 0;
err:
Add user counter allowing to delete pool only when no users. It doesn't prevent pool from flush, only prevents freeing the pool instance. Helps when no need to delete the pool and now it's user responsibility to free it by calling page_pool_free() while destroying procedure. It also makes to use page_pool_free() explicitly, not fully hidden in xdp unreg, which looks more correct after page pool "create" routine. Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org> --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 +++++--- include/net/page_pool.h | 7 +++++++ net/core/page_pool.c | 7 +++++++ net/core/xdp.c | 3 +++ 4 files changed, 22 insertions(+), 3 deletions(-) -- 2.17.1