@@ -189,6 +189,7 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
{
return heap->priv;
}
+EXPORT_SYMBOL_GPL(dma_heap_get_drvdata);
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
{
@@ -272,6 +273,7 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
kfree(heap);
return err_ret;
}
+EXPORT_SYMBOL_GPL(dma_heap_add);
static char *dma_heap_devnode(struct device *dev, umode_t *mode)
{
@@ -54,6 +54,7 @@ const char *cma_get_name(const struct cma *cma)
{
return cma->name;
}
+EXPORT_SYMBOL_GPL(cma_get_name);
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
unsigned int align_order)
@@ -498,6 +499,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
pr_debug("%s(): returned %p\n", __func__, page);
return page;
}
+EXPORT_SYMBOL_GPL(cma_alloc);
/*
* cma_alloc_bulk() - allocate high order bulk pages from contiguous area with
@@ -641,6 +643,7 @@ bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
return true;
}
+EXPORT_SYMBOL_GPL(cma_release);
int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
{