diff options
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r-- | drivers/net/xen-netback/interface.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 037f74f0fcf..f38227afe09 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -80,7 +80,7 @@ static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -int xenvif_poll(struct napi_struct *napi, int budget) +static int xenvif_poll(struct napi_struct *napi, int budget) { struct xenvif_queue *queue = container_of(napi, struct xenvif_queue, napi); @@ -483,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue) * better enable it. The long term solution would be to use just a * bunch of valid page descriptors, without dependency on ballooning */ - err = alloc_xenballooned_pages(MAX_PENDING_REQS, - queue->mmap_pages, - false); + err = gnttab_alloc_pages(MAX_PENDING_REQS, + queue->mmap_pages); if (err) { netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); return -ENOMEM; @@ -664,7 +663,7 @@ void xenvif_disconnect(struct xenvif *vif) */ void xenvif_deinit_queue(struct xenvif_queue *queue) { - free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); + gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages); } void xenvif_free(struct xenvif *vif) |