@@ -227,6 +227,59 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
return -ENOMEM;
}
+static unsigned long
+__iova_get_aligned_start(unsigned long start, unsigned long size)
+{
+ unsigned long mask = __roundup_pow_of_two(size) - 1;
+
+ return (start + mask) & ~mask;
+}
+
+static int __alloc_and_insert_iova_range_forward(struct iova_domain *iovad,
+ unsigned long size, unsigned long limit_pfn,
+ struct iova *new)
+{
+ struct rb_node *curr;
+ unsigned long flags;
+ unsigned long start, limit;
+
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+
+ curr = rb_first(&iovad->rbroot);
+ limit = limit_pfn;
+ start = __iova_get_aligned_start(iovad->start_pfn, size);
+
+ while (curr) {
+ struct iova *curr_iova = rb_entry(curr, struct iova, node);
+ struct rb_node *next = rb_next(curr);
+
+ start = __iova_get_aligned_start(curr_iova->pfn_hi + 1, size);
+ if (next) {
+ struct iova *next_iova = rb_entry(next, struct iova, node);
+ limit = next_iova->pfn_lo - 1;
+ } else {
+ limit = limit_pfn;
+ }
+
+ if ((start + size) <= limit)
+ break; /* found a free slot */
+ curr = next;
+ }
+
+ if (!curr && start + size > limit) {
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return -ENOMEM;
+ }
+
+ new->pfn_lo = start;
+ new->pfn_hi = new->pfn_lo + size - 1;
+ iova_insert_rbtree(&iovad->rbroot, new, curr);
+
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+
+ return 0;
+}
+
static struct kmem_cache *iova_cache;
static unsigned int iova_cache_users;
static DEFINE_MUTEX(iova_cache_mutex);
@@ -398,6 +451,31 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
}
EXPORT_SYMBOL_GPL(free_iova);
+/**
+ * alloc_iova_first_fit - allocates an iova from the beginning of address space
+ * @iovad: - iova domain in question
+ * @size: - size of page frames to allocate
+ * @limit_pfn: - max limit address
+ * Returns a pfn the allocated iova starts at or IOVA_BAD_ADDR in the case
+ * of a failure.
+*/
+unsigned long
+alloc_iova_first_fit(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn)
+{
+ struct iova *new_iova = alloc_iova_mem();
+
+ if (!new_iova)
+ return IOVA_BAD_ADDR;
+
+ if (__alloc_and_insert_iova_range_forward(iovad, size, limit_pfn, new_iova)) {
+ free_iova_mem(new_iova);
+ return IOVA_BAD_ADDR;
+ }
+ return new_iova->pfn_lo;
+}
+EXPORT_SYMBOL_GPL(alloc_iova_first_fit);
+
/**
* alloc_iova_fast - allocates an iova from rcache
* @iovad: - iova domain in question
@@ -152,6 +152,8 @@ void queue_iova(struct iova_domain *iovad,
unsigned long data);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, bool flush_rcache);
+unsigned long alloc_iova_first_fit(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
Add support for the 'first-fit' allocation algorithm. It will be used for the special case of implementing DMA_ATTR_LOW_ADDRESS, so this path doesn't use IOVA cache. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> --- drivers/iommu/iova.c | 78 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/iova.h | 2 ++ 2 files changed, 80 insertions(+) -- 2.17.1