diff options
Diffstat (limited to 'drivers/pci/iova.c')
-rw-r--r-- | drivers/pci/iova.c | 53 |
1 files changed, 38 insertions, 15 deletions
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c index 8de7ab6c6d0..3ef4ac06431 100644 --- a/drivers/pci/iova.c +++ b/drivers/pci/iova.c @@ -3,7 +3,8 @@ * * This file is released under the GPLv2. * - * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> + * Copyright (C) 2006-2008 Intel Corporation + * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> */ #include "iova.h" @@ -72,10 +73,11 @@ iova_get_pad_size(int size, unsigned int limit_pfn) return pad_size; } -static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size, - unsigned long limit_pfn, struct iova *new, bool size_aligned) +static int __alloc_and_insert_iova_range(struct iova_domain *iovad, + unsigned long size, unsigned long limit_pfn, + struct iova *new, bool size_aligned) { - struct rb_node *curr = NULL; + struct rb_node *prev, *curr = NULL; unsigned long flags; unsigned long saved_pfn; unsigned int pad_size = 0; @@ -84,8 +86,10 @@ static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size, spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); saved_pfn = limit_pfn; curr = __get_cached_rbnode(iovad, &limit_pfn); + prev = curr; while (curr) { struct iova *curr_iova = container_of(curr, struct iova, node); + if (limit_pfn < curr_iova->pfn_lo) goto move_left; else if (limit_pfn < curr_iova->pfn_hi) @@ -99,6 +103,7 @@ static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size, adjust_limit_pfn: limit_pfn = curr_iova->pfn_lo - 1; move_left: + prev = curr; curr = rb_prev(curr); } @@ -115,7 +120,33 @@ move_left: new->pfn_lo = limit_pfn - (size + pad_size) + 1; new->pfn_hi = new->pfn_lo + size - 1; + /* Insert the new_iova into domain rbtree by holding writer lock */ + /* Add new node and rebalance tree. */ + { + struct rb_node **entry = &((prev)), *parent = NULL; + /* Figure out where to put new node */ + while (*entry) { + struct iova *this = container_of(*entry, + struct iova, node); + parent = *entry; + + if (new->pfn_lo < this->pfn_lo) + entry = &((*entry)->rb_left); + else if (new->pfn_lo > this->pfn_lo) + entry = &((*entry)->rb_right); + else + BUG(); /* this should not happen */ + } + + /* Add new node and rebalance tree. */ + rb_link_node(&new->node, parent, entry); + rb_insert_color(&new->node, &iovad->rbroot); + } + __cached_rbnode_insert_update(iovad, saved_pfn, new); + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + + return 0; } @@ -171,23 +202,15 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, size = __roundup_pow_of_two(size); spin_lock_irqsave(&iovad->iova_alloc_lock, flags); - ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova, - size_aligned); + ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, + new_iova, size_aligned); + spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); if (ret) { - spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); free_iova_mem(new_iova); return NULL; } - /* Insert the new_iova into domain rbtree by holding writer lock */ - spin_lock(&iovad->iova_rbtree_lock); - iova_insert_rbtree(&iovad->rbroot, new_iova); - __cached_rbnode_insert_update(iovad, limit_pfn, new_iova); - spin_unlock(&iovad->iova_rbtree_lock); - - spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); - return new_iova; } |