Commit 5e120f6e authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau/fence: convert to exec engine, and improve channel sync



Now have a somewhat simpler semaphore sync implementation for nv17:nv84,
and a switched to using semaphores as fences on nv84+ and making use of
the hardware's >= acquire operation.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent d375e7d5
......@@ -18,6 +18,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv50_fb.o nvc0_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
nve0_fifo.o \
nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \
nv04_software.o nv50_software.o nvc0_software.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \
......
......@@ -119,6 +119,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
struct drm_file *file_priv,
uint32_t vram_handle, uint32_t gart_handle)
{
struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
......@@ -157,8 +158,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
}
NV_DEBUG(dev, "initialising channel %d\n", chan->id);
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
/* setup channel's memory and vm */
ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
......@@ -188,7 +187,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
chan->user_put = 0x40;
chan->user_get = 0x44;
if (dev_priv->card_type >= NV_50)
chan->user_get_hi = 0x60;
chan->user_get_hi = 0x60;
/* disable the fifo caches */
pfifo->reassign(dev, false);
......@@ -211,7 +210,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
OUT_RING (chan, 0x00000000);
FIRE_RING(chan);
ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
if (ret) {
......@@ -219,7 +217,21 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return ret;
}
ret = nouveau_fence_channel_init(chan);
if (dev_priv->card_type < NV_C0) {
ret = RING_SPACE(chan, 2);
if (ret) {
nouveau_channel_put(&chan);
return ret;
}
BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
OUT_RING (chan, NvSw);
FIRE_RING (chan);
}
FIRE_RING(chan);
ret = fence->context_new(chan, NVOBJ_ENGINE_FENCE);
if (ret) {
nouveau_channel_put(&chan);
return ret;
......@@ -291,12 +303,6 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
/* give it chance to idle */
nouveau_channel_idle(chan);
/* ensure all outstanding fences are signaled. they should be if the
* above attempts at idling were OK, but if we failed this'll tell TTM
* we're done with the buffers.
*/
nouveau_fence_channel_fini(chan);
/* boot it off the hardware */
pfifo->reassign(dev, false);
......@@ -305,6 +311,9 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
if (chan->engctx[i])
dev_priv->eng[i]->context_del(chan, i);
/*XXX: clean this up later, order is important */
if (i == NVOBJ_ENGINE_FENCE)
pfifo->destroy_context(chan);
}
pfifo->reassign(dev, true);
......@@ -367,18 +376,14 @@ nouveau_channel_idle(struct nouveau_channel *chan)
struct nouveau_fence *fence = NULL;
int ret;
nouveau_fence_update(chan);
if (chan->fence.sequence != chan->fence.sequence_ack) {
ret = nouveau_fence_new(chan, &fence);
if (!ret) {
ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
if (ret)
NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
ret = nouveau_fence_new(chan, &fence);
if (!ret) {
ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
if (ret)
NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
}
/* cleans up all the fifos from file_priv */
......
......@@ -67,8 +67,6 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
nvchan_rd32(chan, 0x8c));
}
seq_printf(m, "last fence : %d\n", chan->fence.sequence);
seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
return 0;
}
......
......@@ -461,7 +461,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
OUT_RING (chan, 0x00000000);
} else {
BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
OUT_RING (chan, ++chan->fence.sequence);
OUT_RING (chan, 0);
BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
}
FIRE_RING (chan);
......
......@@ -165,6 +165,7 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
#define NVOBJ_ENGINE_BSP 6
#define NVOBJ_ENGINE_VP 7
#define NVOBJ_ENGINE_FENCE 14
#define NVOBJ_ENGINE_DISPLAY 15
#define NVOBJ_ENGINE_NR 16
......@@ -234,17 +235,6 @@ struct nouveau_channel {
uint32_t user_get_hi;
uint32_t user_put;
/* Fencing */
struct {
/* lock protects the pending list only */
spinlock_t lock;
struct list_head pending;
uint32_t sequence;
uint32_t sequence_ack;
atomic_t last_sequence_irq;
struct nouveau_vma vma;
} fence;
/* DMA push buffer */
struct nouveau_gpuobj *pushbuf;
struct nouveau_bo *pushbuf_bo;
......@@ -1443,13 +1433,6 @@ extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
struct nouveau_vma *);
extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
/* nouveau_fence.c */
int nouveau_fence_init(struct drm_device *);
void nouveau_fence_fini(struct drm_device *);
int nouveau_fence_channel_init(struct nouveau_channel *);
void nouveau_fence_channel_fini(struct nouveau_channel *);
void nouveau_fence_work(struct nouveau_fence *fence,
void (*work)(void *priv, bool signalled), void *priv);
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, int size, int align,
uint32_t domain, uint32_t tile_mode,
......@@ -1746,6 +1729,7 @@ nv44_graph_class(struct drm_device *dev)
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
#define NV10_SUBCHAN_REF_CNT 0x00000050
......
......@@ -36,85 +36,71 @@
#include "nouveau_software.h"
#include "nouveau_dma.h"
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
struct nouveau_fence *fence, *fnext;
spin_lock(&fctx->lock);
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
if (fence->work)
fence->work(fence->priv, false);
fence->channel = NULL;
list_del(&fence->head);
nouveau_fence_unref(&fence);
}
spin_unlock(&fctx->lock);
}
void
nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
{
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);
}
void
nouveau_fence_update(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_fence *tmp, *fence;
uint32_t sequence;
spin_lock(&chan->fence.lock);
/* Fetch the last sequence if the channel is still up and running */
if (likely(!list_empty(&chan->fence.pending))) {
if (USE_REFCNT(dev))
sequence = nvchan_rd32(chan, 0x48);
else
sequence = atomic_read(&chan->fence.last_sequence_irq);
if (chan->fence.sequence_ack == sequence)
goto out;
chan->fence.sequence_ack = sequence;
}
struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
struct nouveau_fence *fence, *fnext;
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) {
if (fence->sequence > chan->fence.sequence_ack)
spin_lock(&fctx->lock);
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
if (priv->read(chan) < fence->sequence)
break;
fence->channel = NULL;
list_del(&fence->head);
if (fence->work)
fence->work(fence->priv, true);
fence->channel = NULL;
list_del(&fence->head);
nouveau_fence_unref(&fence);
}
out:
spin_unlock(&chan->fence.lock);
spin_unlock(&fctx->lock);
}
int
nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
int ret;
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
nouveau_fence_update(chan);
BUG_ON(chan->fence.sequence ==
chan->fence.sequence_ack - 1);
}
fence->sequence = ++chan->fence.sequence;
fence->channel = chan;
kref_get(&fence->kref);
spin_lock(&chan->fence.lock);
list_add_tail(&fence->head, &chan->fence.pending);
spin_unlock(&chan->fence.lock);
fence->channel = chan;
fence->timeout = jiffies + (3 * DRM_HZ);
fence->sequence = ++fctx->sequence;
if (USE_REFCNT(dev)) {
if (dev_priv->card_type < NV_C0)
BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
else
BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
} else {
BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
ret = priv->emit(fence);
if (!ret) {
kref_get(&fence->kref);
spin_lock(&fctx->lock);
list_add_tail(&fence->head, &fctx->pending);
spin_unlock(&fctx->lock);
}
OUT_RING (chan, fence->sequence);
FIRE_RING(chan);
fence->timeout = jiffies + 3 * DRM_HZ;
return 0;
return ret;
}
bool
......@@ -158,6 +144,23 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
return ret;
}
int
nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
struct nouveau_channel *prev = fence ? fence->channel : NULL;
struct drm_device *dev = chan->dev;
struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
int ret = 0;
if (unlikely(prev && prev != chan && !nouveau_fence_done(fence))) {
ret = priv->sync(fence, chan);
if (unlikely(ret))
ret = nouveau_fence_wait(fence, true, false);
}
return ret;
}
static void
nouveau_fence_del(struct kref *kref)
{
......@@ -186,6 +189,9 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
struct nouveau_fence *fence;
int ret = 0;
if (unlikely(!chan->engctx[NVOBJ_ENGINE_FENCE]))
return -ENODEV;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
......@@ -200,359 +206,3 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
*pfence = fence;
return ret;
}
struct nouveau_semaphore {
struct kref ref;
struct drm_device *dev;
struct drm_mm_node *mem;
};
void
nouveau_fence_work(struct nouveau_fence *fence,
void (*work)(void *priv, bool signalled),
void *priv)
{
if (!fence->channel) {
work(priv, true);
} else {
fence->work = work;
fence->priv = priv;
}
}
static struct nouveau_semaphore *
semaphore_alloc(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_semaphore *sema;
int size = (dev_priv->chipset < 0x84) ? 4 : 16;
int ret, i;
if (!USE_SEMA(dev))
return NULL;
sema = kmalloc(sizeof(*sema), GFP_KERNEL);
if (!sema)
goto fail;
ret = drm_mm_pre_get(&dev_priv->fence.heap);
if (ret)
goto fail;
spin_lock(&dev_priv->fence.lock);
sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
if (sema->mem)
sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
spin_unlock(&dev_priv->fence.lock);
if (!sema->mem)
goto fail;
kref_init(&sema->ref);
sema->dev = dev;
for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
return sema;
fail:
kfree(sema);
return NULL;
}
static void
semaphore_free(struct kref *ref)
{
struct nouveau_semaphore *sema =
container_of(ref, struct nouveau_semaphore, ref);
struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
spin_lock(&dev_priv->fence.lock);
drm_mm_put_block(sema->mem);
spin_unlock(&dev_priv->fence.lock);
kfree(sema);
}
static void
semaphore_work(void *priv, bool signalled)
{
struct nouveau_semaphore *sema = priv;
struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
if (unlikely(!signalled))
nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
kref_put(&sema->ref, semaphore_free);
}
static int
semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
u64 offset = chan->fence.vma.offset + sema->mem->start;
int ret;
if (dev_priv->chipset < 0x84) {
ret = RING_SPACE(chan, 4);
if (ret)
return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
OUT_RING (chan, NvSema);
OUT_RING (chan, offset);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
OUT_RING (chan, chan->vram_handle);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 1); /* ACQUIRE_EQ */
} else {
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
}
/* Delay semaphore destruction until its work is done */
ret = nouveau_fence_new(chan, &fence);
if (ret)
return ret;
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref(&fence);
return 0;
}
static int
semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
u64 offset = chan->fence.vma.offset + sema->mem->start;
int ret;
if (dev_priv->chipset < 0x84) {
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvSema);
OUT_RING (chan, offset);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
OUT_RING (chan, chan->vram_handle);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 2); /* RELEASE */
} else {
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 0x1002); /* RELEASE */
}
/* Delay semaphore destruction until its work is done */
ret = nouveau_fence_new(chan, &fence);
if (ret)
return ret;
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref(&fence);
return 0;
}
int
nouveau_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *wchan)
{
struct nouveau_channel *chan;
struct drm_device *dev = wchan->dev;
struct nouveau_semaphore *sema;
int ret = 0;
chan = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
if (likely(!chan || chan == wchan || nouveau_fence_done(fence)))
goto out;
sema = semaphore_alloc(dev);
if (!sema) {
/* Early card or broken userspace, fall back to
* software sync. */
ret = nouveau_fence_wait(fence, true, false);
goto out;
}
/* try to take chan's mutex, if we can't take it right away
* we have to fallback to software sync to prevent locking
* order issues
*/
if (!mutex_trylock(&chan->mutex)) {
ret = nouveau_fence_wait(fence, true, false);
goto out_unref;
}
/* Make wchan wait until it gets signalled */
ret = semaphore_acquire(wchan, sema);
if (ret)
goto out_unlock;
/* Signal the semaphore from chan */
ret = semaphore_release(chan, sema);
out_unlock:
mutex_unlock(&chan->mutex);
out_unref:
kref_put(&sema->ref, semaphore_free);
out:
if (chan)
nouveau_channel_put_unlocked(&chan);
return ret;
}
int
nouveau_fence_channel_init(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *obj = NULL;
int ret;
if (dev_priv->card_type < NV_C0) {
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
OUT_RING (chan, NvSw);
FIRE_RING (chan);
}
/* Setup area of memory shared between all channels for x-chan sync */
if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;