Commit 7465280c authored by Alex Deucher's avatar Alex Deucher Committed by Dave Airlie
Browse files

drm/radeon/kms: add support for multiple fence queues v2



For supporting multiple CP ring buffers, async DMA
engines and UVD.  We still need a way to synchronize
between engines.

v2 initialize unused fence driver ring to avoid issue in
   suspend/unload
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Reviewed-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 851a6bd9
......@@ -3018,11 +3018,11 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
radeon_fence_process(rdev);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
radeon_fence_process(rdev);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
......@@ -3221,7 +3221,7 @@ int evergreen_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
r = radeon_fence_driver_init(rdev, 1);
if (r)
return r;
/* initialize AGP */
......
......@@ -1484,7 +1484,7 @@ int cayman_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
r = radeon_fence_driver_init(rdev, 3);
if (r)
return r;
/* initialize memory controller */
......
......@@ -739,7 +739,7 @@ int r100_irq_process(struct radeon_device *rdev)
while (status) {
/* SW interrupt */
if (status & RADEON_SW_INT_TEST) {
radeon_fence_process(rdev);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
/* gui idle interrupt */
if (status & RADEON_GUI_IDLE_STAT) {
......@@ -826,7 +826,7 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
......@@ -4048,7 +4048,7 @@ int r100_init(struct radeon_device *rdev)
/* initialize VRAM */
r100_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
r = radeon_fence_driver_init(rdev, 1);
if (r)
return r;
r = radeon_irq_kms_init(rdev);
......
......@@ -198,7 +198,7 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
......@@ -1518,7 +1518,7 @@ int r300_init(struct radeon_device *rdev)
/* initialize memory controller */
r300_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
r = radeon_fence_driver_init(rdev, 1);
if (r)
return r;
r = radeon_irq_kms_init(rdev);
......
......@@ -387,7 +387,7 @@ int r420_init(struct radeon_device *rdev)
r300_mc_init(rdev);
r420_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
r = radeon_fence_driver_init(rdev, 1);
if (r) {
return r;
}
......
......@@ -278,7 +278,7 @@ int r520_init(struct radeon_device *rdev)
r520_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
r = radeon_fence_driver_init(rdev, 1);
if (r)
return r;
r = radeon_irq_kms_init(rdev);
......
......@@ -2316,7 +2316,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
{
if (rdev->wb.use_event) {
u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
(u64)(rdev->fence_drv[fence->ring].scratch_reg - rdev->scratch.reg_base);
/* flush read cache over gart */
radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
......@@ -2349,7 +2349,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
......@@ -2575,7 +2575,7 @@ int r600_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
r = radeon_fence_driver_init(rdev, 1);
if (r)
return r;
if (rdev->flags & RADEON_IS_AGP) {
......@@ -3459,11 +3459,11 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
radeon_fence_process(rdev);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
radeon_fence_process(rdev);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
......
......@@ -197,7 +197,6 @@ struct radeon_fence_driver {
unsigned long last_jiffies;
unsigned long last_timeout;
wait_queue_head_t queue;
rwlock_t lock;
struct list_head created;
struct list_head emitted;
struct list_head signaled;
......@@ -212,17 +211,19 @@ struct radeon_fence {
uint32_t seq;
bool emitted;
bool signaled;
/* RB, DMA, etc. */
int ring;
};
int radeon_fence_driver_init(struct radeon_device *rdev);
int radeon_fence_driver_init(struct radeon_device *rdev, int num_rings);
void radeon_fence_driver_fini(struct radeon_device *rdev);
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
void radeon_fence_process(struct radeon_device *rdev);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next(struct radeon_device *rdev);
int radeon_fence_wait_last(struct radeon_device *rdev);
int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
......@@ -459,6 +460,18 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
/*
* CP & ring.
*/
/* max number of rings */
#define RADEON_NUM_RINGS 3
/* internal ring indices */
/* r1xx+ has gfx CP ring */
#define RADEON_RING_TYPE_GFX_INDEX 0
/* cayman has 2 compute CP rings */
#define CAYMAN_RING_TYPE_CP1_INDEX 1
#define CAYMAN_RING_TYPE_CP2_INDEX 2
struct radeon_ib {
struct list_head list;
unsigned idx;
......@@ -1235,7 +1248,8 @@ struct radeon_device {
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
struct radeon_mman mman;
struct radeon_fence_driver fence_drv;
rwlock_t fence_lock;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
struct radeon_cp cp;
/* cayman compute rings */
struct radeon_cp cp1;
......
......@@ -43,7 +43,7 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
r = radeon_fence_create(rdev, &fence);
r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
if (r)
return r;
......
......@@ -725,7 +725,7 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
rwlock_init(&rdev->fence_drv.lock);
rwlock_init(&rdev->fence_lock);
INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
......@@ -857,7 +857,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
struct radeon_device *rdev;
struct drm_crtc *crtc;
struct drm_connector *connector;
int r;
int i, r;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
......@@ -896,7 +896,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
/* wait for gpu to finish processing current batch */
radeon_fence_wait_last(rdev);
for (i = 0; i < RADEON_NUM_RINGS; i++)
radeon_fence_wait_last(rdev, i);
radeon_save_bios_scratch_regs(rdev);
......
......@@ -40,32 +40,37 @@
#include "radeon.h"
#include "radeon_trace.h"
static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
{
u32 scratch_index;
if (rdev->wb.enabled) {
u32 scratch_index;
if (rdev->wb.use_event)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
scratch_index = R600_WB_EVENT_OFFSET +
rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
scratch_index = RADEON_WB_SCRATCH_OFFSET +
rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
} else
WREG32(rdev->fence_drv.scratch_reg, seq);
WREG32(rdev->fence_drv[ring].scratch_reg, seq);
}
static u32 radeon_fence_read(struct radeon_device *rdev)
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
{
u32 seq;
u32 seq = 0;
u32 scratch_index;
if (rdev->wb.enabled) {
u32 scratch_index;
if (rdev->wb.use_event)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
scratch_index = R600_WB_EVENT_OFFSET +
rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
scratch_index = RADEON_WB_SCRATCH_OFFSET +
rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
} else
seq = RREG32(rdev->fence_drv.scratch_reg);
seq = RREG32(rdev->fence_drv[ring].scratch_reg);
return seq;
}
......@@ -73,28 +78,28 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
unsigned long irq_flags;
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
write_lock_irqsave(&rdev->fence_lock, irq_flags);
if (fence->emitted) {
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
if (!rdev->cp.ready)
/* FIXME: cp is not running assume everythings is done right
* away
*/
radeon_fence_write(rdev, fence->seq);
radeon_fence_write(rdev, fence->seq, fence->ring);
else
radeon_fence_ring_emit(rdev, fence);
trace_radeon_fence_emit(rdev->ddev, fence->seq);
fence->emitted = true;
list_move_tail(&fence->list, &rdev->fence_drv.emitted);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
static bool radeon_fence_poll_locked(struct radeon_device *rdev)
static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
{
struct radeon_fence *fence;
struct list_head *i, *n;
......@@ -102,34 +107,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
bool wake = false;
unsigned long cjiffies;
seq = radeon_fence_read(rdev);
if (seq != rdev->fence_drv.last_seq) {
rdev->fence_drv.last_seq = seq;
rdev->fence_drv.last_jiffies = jiffies;
rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
seq = radeon_fence_read(rdev, ring);
if (seq != rdev->fence_drv[ring].last_seq) {
rdev->fence_drv[ring].last_seq = seq;
rdev->fence_drv[ring].last_jiffies = jiffies;
rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
} else {
cjiffies = jiffies;
if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
cjiffies -= rdev->fence_drv.last_jiffies;
if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
cjiffies -= rdev->fence_drv[ring].last_jiffies;
if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
/* update the timeout */
rdev->fence_drv.last_timeout -= cjiffies;
rdev->fence_drv[ring].last_timeout -= cjiffies;
} else {
/* the 500ms timeout is elapsed we should test
* for GPU lockup
*/
rdev->fence_drv.last_timeout = 1;
rdev->fence_drv[ring].last_timeout = 1;
}
} else {
/* wrap around update last jiffies, we will just wait
* a little longer
*/
rdev->fence_drv.last_jiffies = cjiffies;
rdev->fence_drv[ring].last_jiffies = cjiffies;
}
return false;
}
n = NULL;
list_for_each(i, &rdev->fence_drv.emitted) {
list_for_each(i, &rdev->fence_drv[ring].emitted) {
fence = list_entry(i, struct radeon_fence, list);
if (fence->seq == seq) {
n = i;
......@@ -141,11 +146,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
i = n;
do {
n = i->prev;
list_move_tail(i, &rdev->fence_drv.signaled);
list_move_tail(i, &rdev->fence_drv[ring].signaled);
fence = list_entry(i, struct radeon_fence, list);
fence->signaled = true;
i = n;
} while (i != &rdev->fence_drv.emitted);
} while (i != &rdev->fence_drv[ring].emitted);
wake = true;
}
return wake;
......@@ -157,14 +162,16 @@ static void radeon_fence_destroy(struct kref *kref)
struct radeon_fence *fence;
fence = container_of(kref, struct radeon_fence, kref);
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
list_del(&fence->list);
fence->emitted = false;
write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
kfree(fence);
}
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
int radeon_fence_create(struct radeon_device *rdev,
struct radeon_fence **fence,
int ring)
{
unsigned long irq_flags;
......@@ -177,15 +184,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
(*fence)->emitted = false;
(*fence)->signaled = false;
(*fence)->seq = 0;
(*fence)->ring = ring;
INIT_LIST_HEAD(&(*fence)->list);
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
write_lock_irqsave(&rdev->fence_lock, irq_flags);
list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
bool radeon_fence_signaled(struct radeon_fence *fence)
{
unsigned long irq_flags;
......@@ -197,7 +204,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
if (fence->rdev->gpu_lockup)
return true;
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
signaled = fence->signaled;
/* if we are shuting down report all fence as signaled */
if (fence->rdev->shutdown) {
......@@ -208,10 +215,10 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
signaled = true;
}
if (!signaled) {
radeon_fence_poll_locked(fence->rdev);
radeon_fence_poll_locked(fence->rdev, fence->ring);
signaled = fence->signaled;
}
write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
return signaled;
}
......@@ -230,14 +237,14 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
if (radeon_fence_signaled(fence)) {
return 0;
}
timeout = rdev->fence_drv.last_timeout;
timeout = rdev->fence_drv[fence->ring].last_timeout;
retry:
/* save current sequence used to check for GPU lockup */
seq = rdev->fence_drv.last_seq;
seq = rdev->fence_drv[fence->ring].last_seq;
trace_radeon_fence_wait_begin(rdev->ddev, seq);
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
if (unlikely(r < 0)) {
......@@ -245,7 +252,7 @@ retry:
}
} else {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,
r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
}
......@@ -258,10 +265,11 @@ retry:
timeout = r;
goto retry;
}
/* don't protect read access to rdev->fence_drv.last_seq
/* don't protect read access to rdev->fence_drv[t].last_seq
* if we experiencing a lockup the value doesn't change
*/
if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
if (seq == rdev->fence_drv[fence->ring].last_seq &&
radeon_gpu_is_lockup(rdev)) {
/* good news we believe it's a lockup */
printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq);
......@@ -272,20 +280,20 @@ retry:
r = radeon_gpu_reset(rdev);
if (r)
return r;
radeon_fence_write(rdev, fence->seq);
radeon_fence_write(rdev, fence->seq, fence->ring);
rdev->gpu_lockup = false;
}
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
rdev->fence_drv.last_jiffies = jiffies;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
write_lock_irqsave(&rdev->fence_lock, irq_flags);
rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
rdev->fence_drv[fence->ring].last_jiffies = jiffies;
write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
goto retry;
}
return 0;
}
int radeon_fence_wait_next(struct radeon_device *rdev)
int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
struct radeon_fence *fence;
......@@ -294,21 +302,21 @@ int radeon_fence_wait_next(struct radeon_device *rdev)
if (rdev->gpu_lockup) {
return 0;
}
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
if (list_empty(&rdev->fence_drv.emitted)) {
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
write_lock_irqsave(&rdev->fence_lock, irq_flags);
if (list_empty(&rdev->fence_drv[ring].emitted)) {
write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
fence = list_entry(rdev->fence_drv.emitted.next,
fence = list_entry(rdev->fence_drv[ring].emitted.next,
struct radeon_fence, list);
radeon_fence_ref(fence);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
}
int radeon_fence_wait_last(struct radeon_device *rdev)
int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
struct radeon_fence *fence;
......@@ -317,15 +325,15 @@ int radeon_fence_wait_last(struct radeon_device *rdev)
if (rdev->gpu_lockup) {
return 0;
}
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
if (list_empty(&rdev->fence_drv.emitted)) {
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
write_lock_irqsave(&rdev->fence_lock, irq_flags);
if (list_empty(&rdev->fence_drv[ring].emitted)) {
write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
fence = list_entry(rdev->fence_drv.emitted.prev,
fence = list_entry(rdev->fence_drv[ring].emitted.prev,
struct radeon_fence, list);
radeon_fence_ref(fence);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
......@@ -347,39 +355,49 @@ void radeon_fence_unref(struct radeon_fence **fence)
}
}
void radeon_fence_process(struct radeon_device *rdev)
void radeon_fence_process(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
bool wake;
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
wake = radeon_fence_poll_locked(rdev);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
write_lock_irqsave(&rdev->fence_lock, irq_flags);
wake = radeon_fence_poll_locked(rdev, ring);
write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
if (wake) {
wake_up_all(&rdev->fence_drv.queue);
wake_up_all(&rdev->fence_drv[ring].queue);
}
}
int radeon_fence_driver_init(struct radeon_device *rdev)
int radeon_fence_driver_init(struct radeon_device *rdev, int num_rings)
{
unsigned long irq_flags;
int r;
int r, ring;
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
if (r) {
dev_err(rdev->dev, "fence failed to get scratch register\n");
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return r;
for (ring = 0; ring < num_rings; ring++) {
write_lock_irqsave(&rdev->fence_lock, irq_flags);
r