diff options
Diffstat (limited to 'drivers/pci/iova.c')
-rw-r--r-- | drivers/pci/iova.c | 63 |
1 files changed, 50 insertions, 13 deletions
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c index 717fafaa7e0..a84571c2936 100644 --- a/drivers/pci/iova.c +++ b/drivers/pci/iova.c @@ -57,12 +57,28 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) iovad->cached32_node = rb_next(&free->node); } -static int __alloc_iova_range(struct iova_domain *iovad, - unsigned long size, unsigned long limit_pfn, struct iova *new) +/* Computes the padding size required, to make the + * the start address naturally aligned on its size + */ +static int +iova_get_pad_size(int size, unsigned int limit_pfn) +{ + unsigned int pad_size = 0; + unsigned int order = ilog2(size); + + if (order) + pad_size = (limit_pfn + 1) % (1 << order); + + return pad_size; +} + +static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size, + unsigned long limit_pfn, struct iova *new, bool size_aligned) { struct rb_node *curr = NULL; unsigned long flags; unsigned long saved_pfn; + unsigned int pad_size = 0; /* Walk the tree backwards */ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); @@ -72,22 +88,32 @@ static int __alloc_iova_range(struct iova_domain *iovad, struct iova *curr_iova = container_of(curr, struct iova, node); if (limit_pfn < curr_iova->pfn_lo) goto move_left; - if (limit_pfn < curr_iova->pfn_hi) + else if (limit_pfn < curr_iova->pfn_hi) goto adjust_limit_pfn; - if ((curr_iova->pfn_hi + size) <= limit_pfn) - break; /* found a free slot */ + else { + if (size_aligned) + pad_size = iova_get_pad_size(size, limit_pfn); + if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) + break; /* found a free slot */ + } adjust_limit_pfn: limit_pfn = curr_iova->pfn_lo - 1; move_left: curr = rb_prev(curr); } - if ((!curr) && !(IOVA_START_PFN + size <= limit_pfn)) { - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - return -ENOMEM; + if (!curr) { + if (size_aligned) + pad_size = iova_get_pad_size(size, limit_pfn); + if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + return -ENOMEM; + } } - new->pfn_hi = limit_pfn; - new->pfn_lo = limit_pfn - size + 1; + + /* pfn_lo will point to size aligned address if size_aligned is set */ + new->pfn_lo = limit_pfn - (size + pad_size) + 1; + new->pfn_hi = new->pfn_lo + size - 1; spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return 0; @@ -119,12 +145,16 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) * @iovad - iova domain in question * @size - size of page frames to allocate * @limit_pfn - max limit address + * @size_aligned - set if size_aligned address range is required * This function allocates an iova in the range limit_pfn to IOVA_START_PFN - * looking from limit_pfn instead from IOVA_START_PFN. + * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned + * flag is set then the allocated address iova->pfn_lo will be naturally + * aligned on roundup_power_of_two(size). */ struct iova * alloc_iova(struct iova_domain *iovad, unsigned long size, - unsigned long limit_pfn) + unsigned long limit_pfn, + bool size_aligned) { unsigned long flags; struct iova *new_iova; @@ -134,8 +164,15 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, if (!new_iova) return NULL; + /* If size aligned is set then round the size to + * to next power of two. + */ + if (size_aligned) + size = __roundup_pow_of_two(size); + spin_lock_irqsave(&iovad->iova_alloc_lock, flags); - ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova); + ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova, + size_aligned); if (ret) { spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); |