Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 3479 → Rev 3480

/drivers/video/drm/i915/intel_ringbuffer.c
320,6 → 320,7
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
333,7 → 334,7
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, scratch_addr);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
 
465,6 → 466,9
if (pc->cpu_page == NULL)
goto err_unpin;
 
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
ring->name, pc->gtt_offset);
 
pc->obj = obj;
ring->private = pc;
return 0;
556,6 → 560,8
 
static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
 
if (!ring->private)
return;
 
605,6 → 611,13
return 0;
}
 
static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->last_seqno < seqno;
}
 
/**
* intel_ring_sync - sync the waiter to the signaller on seqno
*
635,11 → 648,20
if (ret)
return ret;
 
/* If seqno wrap happened, omit the wait with no-ops */
if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
intel_ring_emit(waiter,
dw1 | signaller->semaphore_register[waiter->id]);
dw1 |
signaller->semaphore_register[waiter->id]);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
intel_ring_emit(waiter, MI_NOOP);
} else {
intel_ring_emit(waiter, MI_NOOP);
intel_ring_emit(waiter, MI_NOOP);
intel_ring_emit(waiter, MI_NOOP);
intel_ring_emit(waiter, MI_NOOP);
}
intel_ring_advance(waiter);
 
return 0;
720,6 → 742,12
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
 
static void
ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
 
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
727,6 → 755,13
return pc->cpu_page[0];
}
 
static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct pipe_control *pc = ring->private;
pc->cpu_page[0] = seqno;
}
 
static bool
gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
1156,6 → 1191,10
return ret;
}
 
obj = NULL;
if (!HAS_LLC(dev))
obj = i915_gem_object_create_stolen(dev, ring->size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, ring->size);
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
1174,7 → 1213,7
goto err_unpin;
 
ring->virtual_start =
ioremap(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
1197,7 → 1236,7
return 0;
 
err_unmap:
FreeKernelSpace(ring->virtual_start);
iounmap(ring->virtual_start);
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
1225,7 → 1264,7
 
I915_WRITE_CTL(ring, 0);
 
// drm_core_ioremapfree(&ring->map, ring->dev);
iounmap(ring->virtual_start);
 
i915_gem_object_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
1334,7 → 1373,8
 
msleep(1);
 
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
} while (!time_after(GetTimerTicks(), end));
1396,14 → 1436,35
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
}
 
static int __intel_ring_begin(struct intel_ring_buffer *ring,
int bytes)
{
int ret;
 
if (unlikely(ring->tail + bytes > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
return ret;
}
 
if (unlikely(ring->space < bytes)) {
ret = ring_wait_for_space(ring, bytes);
if (unlikely(ret))
return ret;
}
 
ring->space -= bytes;
return 0;
}
 
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords;
int ret;
 
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
 
1412,20 → 1473,21
if (ret)
return ret;
 
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
return ret;
return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
}
 
if (unlikely(ring->space < n)) {
ret = ring_wait_for_space(ring, n);
if (unlikely(ret))
return ret;
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
BUG_ON(ring->outstanding_lazy_request);
 
if (INTEL_INFO(ring->dev)->gen >= 6) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
}
 
ring->space -= n;
return 0;
ring->set_seqno(ring, seqno);
}
 
void intel_ring_advance(struct intel_ring_buffer *ring)
1433,7 → 1495,7
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
ring->tail &= ring->size - 1;
if (dev_priv->stop_rings & intel_ring_flag(ring))
if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
return;
ring->write_tail(ring, ring->tail);
}
1590,6 → 1652,7
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1600,6 → 1663,7
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
ring->get_seqno = pc_render_get_seqno;
ring->set_seqno = pc_render_set_seqno;
ring->irq_get = gen5_ring_get_irq;
ring->irq_put = gen5_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
1610,6 → 1674,7
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
1682,6 → 1747,7
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
1743,6 → 1809,7
ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
1758,6 → 1825,7
ring->flush = bsd_ring_flush;
ring->add_request = i9xx_add_request;
ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN5(dev)) {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen5_ring_get_irq;
1787,6 → 1855,7
ring->flush = blt_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;