Commit 649bf3ca authored by Jerome Glisse's avatar Jerome Glisse Committed by Dave Airlie
Browse files

drm/ttm: merge ttm_backend and ttm_tt V5



ttm_backend will only exist with a ttm_tt, and ttm_tt
will only be of interest when bound to a backend. Merge them
to avoid code and data duplication.

V2 Rebase on top of memory accounting overhaul
V3 Rebase on top of more memory accounting changes
V4 Rebase on top of no memory account changes (where/when is my
   delorean when i need it ?)
V5 make sure ttm is unbound before destroying, change commit
   message on suggestion from Tormod Volden
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
parent 822c4d9a
......@@ -343,8 +343,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
*mem = val;
}
static struct ttm_backend *
nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
static struct ttm_tt *
nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
......@@ -352,11 +354,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
switch (dev_priv->gart_info.type) {
#if __OS_HAS_AGP
case NOUVEAU_GART_AGP:
return ttm_agp_backend_init(bdev, dev->agp->bridge);
return ttm_agp_tt_create(bdev, dev->agp->bridge,
size, page_flags, dummy_read_page);
#endif
case NOUVEAU_GART_PDMA:
case NOUVEAU_GART_HW:
return nouveau_sgdma_init_ttm(dev);
return nouveau_sgdma_create_ttm(bdev, size, page_flags,
dummy_read_page);
default:
NV_ERROR(dev, "Unknown GART type %d\n",
dev_priv->gart_info.type);
......@@ -1045,7 +1049,7 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
}
struct ttm_bo_driver nouveau_bo_driver = {
.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
.ttm_tt_create = &nouveau_ttm_tt_create,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
......
......@@ -1000,7 +1000,10 @@ extern int nouveau_sgdma_init(struct drm_device *);
extern void nouveau_sgdma_takedown(struct drm_device *);
extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
uint32_t offset);
extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
unsigned long size,
uint32_t page_flags,
struct page *dummy_read_page);
/* nouveau_debugfs.c */
#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
......
......@@ -8,44 +8,23 @@
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
struct nouveau_sgdma_be {
struct ttm_backend backend;
struct ttm_tt ttm;
struct drm_device *dev;
dma_addr_t *pages;
unsigned nr_pages;
bool unmap_pages;
u64 offset;
bool bound;
};
static int
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
struct page **pages, struct page *dummy_read_page,
dma_addr_t *dma_addrs)
nouveau_sgdma_dma_map(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
int i;
NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
nvbe->pages = dma_addrs;
nvbe->nr_pages = num_pages;
nvbe->unmap_pages = true;
/* this code path isn't called and is incorrect anyways */
if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
nvbe->unmap_pages = false;
return 0;
}
for (i = 0; i < num_pages; i++) {
nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
nvbe->nr_pages = --i;
be->func->clear(be);
for (i = 0; i < ttm->num_pages; i++) {
ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
return -EFAULT;
}
}
......@@ -54,53 +33,52 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
}
static void
nouveau_sgdma_clear(struct ttm_backend *be)
nouveau_sgdma_dma_unmap(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
int i;
if (nvbe->bound)
be->func->unbind(be);
if (nvbe->unmap_pages) {
while (nvbe->nr_pages--) {
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
for (i = 0; i < ttm->num_pages; i++) {
if (ttm->dma_address[i]) {
pci_unmap_page(dev->pdev, ttm->dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
ttm->dma_address[i] = 0;
}
}
static void
nouveau_sgdma_destroy(struct ttm_backend *be)
nouveau_sgdma_destroy(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (be) {
if (ttm) {
NV_DEBUG(nvbe->dev, "\n");
if (nvbe) {
if (nvbe->pages)
be->func->clear(be);
kfree(nvbe);
}
kfree(nvbe);
}
}
static int
nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
unsigned i, j, pte;
int r;
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
r = nouveau_sgdma_dma_map(ttm);
if (r) {
return r;
}
nvbe->offset = mem->start << PAGE_SHIFT;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
for (i = 0; i < nvbe->nr_pages; i++) {
dma_addr_t dma_offset = nvbe->pages[i];
for (i = 0; i < ttm->num_pages; i++) {
dma_addr_t dma_offset = ttm->dma_address[i];
uint32_t offset_l = lower_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
......@@ -109,14 +87,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
}
nvbe->bound = true;
return 0;
}
static int
nv04_sgdma_unbind(struct ttm_backend *be)
nv04_sgdma_unbind(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
......@@ -124,22 +101,20 @@ nv04_sgdma_unbind(struct ttm_backend *be)
NV_DEBUG(dev, "\n");
if (!nvbe->bound)
if (ttm->state != tt_bound)
return 0;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
for (i = 0; i < nvbe->nr_pages; i++) {
for (i = 0; i < ttm->num_pages; i++) {
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
}
nvbe->bound = false;
nouveau_sgdma_dma_unmap(ttm);
return 0;
}
static struct ttm_backend_func nv04_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nv04_sgdma_bind,
.unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
......@@ -158,16 +133,21 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
}
static int
nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
dma_addr_t *list = nvbe->pages;
dma_addr_t *list = ttm->dma_address;
u32 pte = mem->start << 2;
u32 cnt = nvbe->nr_pages;
u32 cnt = ttm->num_pages;
int r;
nvbe->offset = mem->start << PAGE_SHIFT;
r = nouveau_sgdma_dma_map(ttm);
if (r) {
return r;
}
while (cnt--) {
nv_wo32(pgt, pte, (*list++ >> 7) | 1);
......@@ -175,18 +155,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
nv41_sgdma_flush(nvbe);
nvbe->bound = true;
return 0;
}
static int
nv41_sgdma_unbind(struct ttm_backend *be)
nv41_sgdma_unbind(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
u32 cnt = nvbe->nr_pages;
u32 cnt = ttm->num_pages;
while (cnt--) {
nv_wo32(pgt, pte, 0x00000000);
......@@ -194,24 +173,23 @@ nv41_sgdma_unbind(struct ttm_backend *be)
}
nv41_sgdma_flush(nvbe);
nvbe->bound = false;
nouveau_sgdma_dma_unmap(ttm);
return 0;
}
static struct ttm_backend_func nv41_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nv41_sgdma_bind,
.unbind = nv41_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static void
nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
nv44_sgdma_flush(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
......@@ -270,17 +248,21 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
}
static int
nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
dma_addr_t *list = nvbe->pages;
dma_addr_t *list = ttm->dma_address;
u32 pte = mem->start << 2, tmp[4];
u32 cnt = nvbe->nr_pages;
int i;
u32 cnt = ttm->num_pages;
int i, r;
nvbe->offset = mem->start << PAGE_SHIFT;
r = nouveau_sgdma_dma_map(ttm);
if (r) {
return r;
}
if (pte & 0x0000000c) {
u32 max = 4 - ((pte >> 2) & 0x3);
......@@ -305,19 +287,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
if (cnt)
nv44_sgdma_fill(pgt, list, pte, cnt);
nv44_sgdma_flush(nvbe);
nvbe->bound = true;
nv44_sgdma_flush(ttm);
return 0;
}
static int
nv44_sgdma_unbind(struct ttm_backend *be)
nv44_sgdma_unbind(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
u32 cnt = nvbe->nr_pages;
u32 cnt = ttm->num_pages;
if (pte & 0x0000000c) {
u32 max = 4 - ((pte >> 2) & 0x3);
......@@ -339,55 +320,53 @@ nv44_sgdma_unbind(struct ttm_backend *be)
if (cnt)
nv44_sgdma_fill(pgt, NULL, pte, cnt);
nv44_sgdma_flush(nvbe);
nvbe->bound = false;
nv44_sgdma_flush(ttm);
nouveau_sgdma_dma_unmap(ttm);
return 0;
}
static struct ttm_backend_func nv44_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nv44_sgdma_bind,
.unbind = nv44_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static int
nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_mem *node = mem->mm_node;
int r;
/* noop: bound in move_notify() */
node->pages = nvbe->pages;
nvbe->pages = (dma_addr_t *)node;
nvbe->bound = true;
r = nouveau_sgdma_dma_map(ttm);
if (r) {
return r;
}
node->pages = ttm->dma_address;
return 0;
}
static int
nv50_sgdma_unbind(struct ttm_backend *be)
nv50_sgdma_unbind(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
/* noop: unbound in move_notify() */
nvbe->pages = node->pages;
node->pages = NULL;
nvbe->bound = false;
nouveau_sgdma_dma_unmap(ttm);
return 0;
}
static struct ttm_backend_func nv50_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nv50_sgdma_bind,
.unbind = nv50_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
struct ttm_backend *
nouveau_sgdma_init_ttm(struct drm_device *dev)
struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
......@@ -395,9 +374,12 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
return NULL;
nvbe->dev = dev;
nvbe->ttm.func = dev_priv->gart_info.func;
nvbe->backend.func = dev_priv->gart_info.func;
return &nvbe->backend;
if (ttm_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
return NULL;
}
return &nvbe->ttm;
}
int
......
......@@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev)
}
}
struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
static struct ttm_backend*
radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
{
struct radeon_device *rdev;
rdev = radeon_get_rdev(bdev);
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
} else
#endif
{
return radeon_ttm_backend_create(rdev);
}
}
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
......@@ -515,8 +497,90 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}
/*
* TTM backend functions.
*/
struct radeon_ttm_tt {
struct ttm_tt ttm;
struct radeon_device *rdev;
u64 offset;
};
static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct radeon_ttm_tt *gtt;
int r;
gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
if (!ttm->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
r = radeon_gart_bind(gtt->rdev, gtt->offset,
ttm->num_pages, ttm->pages, ttm->dma_address);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
return r;
}
return 0;
}
static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt;
gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
return 0;
}
static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt;
gtt = container_of(ttm, struct radeon_ttm_tt, ttm);
kfree(gtt);
}
static struct ttm_backend_func radeon_backend_func = {
.bind = &radeon_ttm_backend_bind,
.unbind = &radeon_ttm_backend_unbind,
.destroy = &radeon_ttm_backend_destroy,
};
struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt;
rdev = radeon_get_rdev(bdev);
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
size, page_flags, dummy_read_page);
}
#endif
gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
return NULL;
}
gtt->ttm.func = &radeon_backend_func;
gtt->rdev = rdev;
if (ttm_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
return NULL;
}
return &gtt->ttm;
}
static struct ttm_bo_driver radeon_bo_driver = {
.create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
.ttm_tt_create = &radeon_ttm_tt_create,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
.evict_flags = &radeon_evict_flags,
......@@ -680,123 +744,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
}
/*
* TTM backend functions.
*/
struct radeon_ttm_backend {
struct ttm_backend backend;
struct radeon_device *rdev;
unsigned long num_pages;
struct page **pages;
struct page *dummy_read_page;
dma_addr_t *dma_addrs;
bool populated;
bool bound;
unsigned offset;
};
static int radeon_ttm_backend_populate(struct ttm_backend *backend,
unsigned long num_pages,
struct page **pages,
struct page *dummy_read_page,
dma_addr_t *dma_addrs)
{
struct radeon_ttm_backend *gtt;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = pages;
gtt->dma_addrs = dma_addrs;
gtt->num_pages = num_pages;
gtt->dummy_read_page = dummy_read_page;
gtt->populated = true;
return 0;
}
static void radeon_ttm_backend_clear(struct ttm_backend *backend)
{
struct radeon_ttm_backend *gtt;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = NULL;
gtt->dma_addrs = NULL;
gtt->num_pages = 0;
gtt->dummy_read_page = NULL;
gtt->populated = false;
gtt->bound = false;
}