diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2012-07-14 19:09:17 +1000 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2012-10-03 13:12:52 +1000 |
commit | 3863c9bc887e9638a9d905d55f6038641ece78d6 (patch) | |
tree | 923decce50fc9f0ed28e04d5ad83d6518162bad0 /drivers/gpu/drm/nouveau/nouveau_sgdma.c | |
parent | 8a9b889e668a5bc2f4031015fe4893005c43403d (diff) |
drm/nouveau/instmem: completely new implementation, as a subdev module
v2 (Ben Skeggs):
- some fixes for 64KiB PAGE_SIZE
- fix porting issues in (currently unused) nv41/nv44 pciegart code
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_sgdma.c | 315 |
1 files changed, 14 insertions, 301 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 38483a042bc..464beda94c5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -13,7 +13,7 @@ struct nouveau_sgdma_be { */ struct ttm_dma_tt ttm; struct drm_device *dev; - u64 offset; + struct nouveau_mem *node; }; static void @@ -32,25 +32,18 @@ static int nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; - struct drm_device *dev = nvbe->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; - unsigned i, j, pte; - - NV_DEBUG(dev, "pg=0x%lx\n", mem->start); - - nvbe->offset = mem->start << PAGE_SHIFT; - pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; - for (i = 0; i < ttm->num_pages; i++) { - dma_addr_t dma_offset = nvbe->ttm.dma_address[i]; - uint32_t offset_l = lower_32_bits(dma_offset); + struct nouveau_mem *node = mem->mm_node; + u64 size = mem->num_pages << 12; - for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { - nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); - offset_l += NV_CTXDMA_PAGE_SIZE; - } + if (ttm->sg) { + node->sg = ttm->sg; + nouveau_vm_map_sg_table(&node->vma[0], 0, size, node); + } else { + node->pages = nvbe->ttm.dma_address; + nouveau_vm_map_sg(&node->vma[0], 0, size, node); } + nvbe->node = node; return 0; } @@ -58,22 +51,7 @@ static int nv04_sgdma_unbind(struct ttm_tt *ttm) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; - struct drm_device *dev = nvbe->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; - unsigned i, j, pte; - - NV_DEBUG(dev, "\n"); - - if (ttm->state != tt_bound) - return 0; - - pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; - for (i = 0; i < ttm->num_pages; i++) { - for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) - nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); - } - + nouveau_vm_unmap(&nvbe->node->vma[0]); return 0; } @@ -83,206 +61,6 @@ static struct ttm_backend_func nv04_sgdma_backend = { .destroy = nouveau_sgdma_destroy }; -static void -nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe) -{ - struct drm_device *dev = nvbe->dev; - - nv_wr32(dev, 0x100810, 0x00000022); - if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100)) - NV_ERROR(dev, "vm flush timeout: 0x%08x\n", - nv_rd32(dev, 0x100810)); - nv_wr32(dev, 0x100810, 0x00000000); -} - -static int -nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) -{ - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; - struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; - struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; - dma_addr_t *list = nvbe->ttm.dma_address; - u32 pte = mem->start << 2; - u32 cnt = ttm->num_pages; - - nvbe->offset = mem->start << PAGE_SHIFT; - - while (cnt--) { - nv_wo32(pgt, pte, (*list++ >> 7) | 1); - pte += 4; - } - - nv41_sgdma_flush(nvbe); - return 0; -} - -static int -nv41_sgdma_unbind(struct ttm_tt *ttm) -{ - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; - struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; - struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; - u32 pte = (nvbe->offset >> 12) << 2; - u32 cnt = ttm->num_pages; - - while (cnt--) { - nv_wo32(pgt, pte, 0x00000000); - pte += 4; - } - - nv41_sgdma_flush(nvbe); - return 0; -} - -static struct ttm_backend_func nv41_sgdma_backend = { - .bind = nv41_sgdma_bind, - .unbind = nv41_sgdma_unbind, - .destroy = nouveau_sgdma_destroy -}; - -static void -nv44_sgdma_flush(struct ttm_tt *ttm) -{ - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; - struct drm_device *dev = nvbe->dev; - - nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12); - nv_wr32(dev, 0x100808, nvbe->offset | 0x20); - if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001)) - NV_ERROR(dev, "gart flush timeout: 0x%08x\n", - nv_rd32(dev, 0x100808)); - nv_wr32(dev, 0x100808, 0x00000000); -} - -static void -nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt) -{ - struct drm_nouveau_private *dev_priv = pgt->dev->dev_private; - dma_addr_t dummy = dev_priv->gart_info.dummy.addr; - u32 pte, tmp[4]; - - pte = base >> 2; - base &= ~0x0000000f; - - tmp[0] = nv_ro32(pgt, base + 0x0); - tmp[1] = nv_ro32(pgt, base + 0x4); - tmp[2] = nv_ro32(pgt, base + 0x8); - tmp[3] = nv_ro32(pgt, base + 0xc); - while (cnt--) { - u32 addr = list ? (*list++ >> 12) : (dummy >> 12); - switch (pte++ & 0x3) { - case 0: - tmp[0] &= ~0x07ffffff; - tmp[0] |= addr; - break; - case 1: - tmp[0] &= ~0xf8000000; - tmp[0] |= addr << 27; - tmp[1] &= ~0x003fffff; - tmp[1] |= addr >> 5; - break; - case 2: - tmp[1] &= ~0xffc00000; - tmp[1] |= addr << 22; - tmp[2] &= ~0x0001ffff; - tmp[2] |= addr >> 10; - break; - case 3: - tmp[2] &= ~0xfffe0000; - tmp[2] |= addr << 17; - tmp[3] &= ~0x00000fff; - tmp[3] |= addr >> 15; - break; - } - } - - tmp[3] |= 0x40000000; - - nv_wo32(pgt, base + 0x0, tmp[0]); - nv_wo32(pgt, base + 0x4, tmp[1]); - nv_wo32(pgt, base + 0x8, tmp[2]); - nv_wo32(pgt, base + 0xc, tmp[3]); -} - -static int -nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) -{ - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; - struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; - struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; - dma_addr_t *list = nvbe->ttm.dma_address; - u32 pte = mem->start << 2, tmp[4]; - u32 cnt = ttm->num_pages; - int i; - - nvbe->offset = mem->start << PAGE_SHIFT; - - if (pte & 0x0000000c) { - u32 max = 4 - ((pte >> 2) & 0x3); - u32 part = (cnt > max) ? max : cnt; - nv44_sgdma_fill(pgt, list, pte, part); - pte += (part << 2); - list += part; - cnt -= part; - } - - while (cnt >= 4) { - for (i = 0; i < 4; i++) - tmp[i] = *list++ >> 12; - nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27); - nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22); - nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17); - nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000); - pte += 0x10; - cnt -= 4; - } - - if (cnt) - nv44_sgdma_fill(pgt, list, pte, cnt); - - nv44_sgdma_flush(ttm); - return 0; -} - -static int -nv44_sgdma_unbind(struct ttm_tt *ttm) -{ - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; - struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; - struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; - u32 pte = (nvbe->offset >> 12) << 2; - u32 cnt = ttm->num_pages; - - if (pte & 0x0000000c) { - u32 max = 4 - ((pte >> 2) & 0x3); - u32 part = (cnt > max) ? max : cnt; - nv44_sgdma_fill(pgt, NULL, pte, part); - pte += (part << 2); - cnt -= part; - } - - while (cnt >= 4) { - nv_wo32(pgt, pte + 0x0, 0x00000000); - nv_wo32(pgt, pte + 0x4, 0x00000000); - nv_wo32(pgt, pte + 0x8, 0x00000000); - nv_wo32(pgt, pte + 0xc, 0x00000000); - pte += 0x10; - cnt -= 4; - } - - if (cnt) - nv44_sgdma_fill(pgt, NULL, pte, cnt); - - nv44_sgdma_flush(ttm); - return 0; -} - -static struct ttm_backend_func nv44_sgdma_backend = { - .bind = nv44_sgdma_bind, - .unbind = nv44_sgdma_unbind, - .destroy = nouveau_sgdma_destroy -}; - static int nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) { @@ -337,82 +115,24 @@ int nouveau_sgdma_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj = NULL; - u32 aper_size, align; - int ret; + u32 aper_size; - if (dev_priv->card_type >= NV_40) + if (dev_priv->card_type >= NV_50) aper_size = 512 * 1024 * 1024; else aper_size = 128 * 1024 * 1024; - /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for - * christmas. The cards before it have them, the cards after - * it have them, why is NV44 so unloved? - */ - dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL); - if (!dev_priv->gart_info.dummy.page) - return -ENOMEM; - - dev_priv->gart_info.dummy.addr = - pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page, - 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) { - NV_ERROR(dev, "error mapping dummy page\n"); - __free_page(dev_priv->gart_info.dummy.page); - dev_priv->gart_info.dummy.page = NULL; - return -ENOMEM; - } - if (dev_priv->card_type >= NV_50) { dev_priv->gart_info.aper_base = 0; dev_priv->gart_info.aper_size = aper_size; dev_priv->gart_info.type = NOUVEAU_GART_HW; dev_priv->gart_info.func = &nv50_sgdma_backend; - } else - if (0 && pci_is_pcie(dev->pdev) && - dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { - if (nv44_graph_class(dev)) { - dev_priv->gart_info.func = &nv44_sgdma_backend; - align = 512 * 1024; - } else { - dev_priv->gart_info.func = &nv41_sgdma_backend; - align = 16; - } - - ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align, - NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, &gpuobj); - if (ret) { - NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); - return ret; - } - - dev_priv->gart_info.sg_ctxdma = gpuobj; - dev_priv->gart_info.aper_base = 0; - dev_priv->gart_info.aper_size = aper_size; - dev_priv->gart_info.type = NOUVEAU_GART_HW; } else { - ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16, - NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, &gpuobj); - if (ret) { - NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); - return ret; - } - - nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | - (1 << 12) /* PT present */ | - (0 << 13) /* PT *not* linear */ | - (0 << 14) /* RW */ | - (2 << 16) /* PCI */); - nv_wo32(gpuobj, 4, aper_size - 1); - - dev_priv->gart_info.sg_ctxdma = gpuobj; dev_priv->gart_info.aper_base = 0; dev_priv->gart_info.aper_size = aper_size; dev_priv->gart_info.type = NOUVEAU_GART_PDMA; dev_priv->gart_info.func = &nv04_sgdma_backend; + dev_priv->gart_info.sg_ctxdma = nv04vm_refdma(dev); } return 0; @@ -424,13 +144,6 @@ nouveau_sgdma_takedown(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); - - if (dev_priv->gart_info.dummy.page) { - pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - __free_page(dev_priv->gart_info.dummy.page); - dev_priv->gart_info.dummy.page = NULL; - } } uint32_t |