Subversion Repositories Kolibri OS

Rev

Rev 3243 | Rev 3746 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3243 Rev 3480
Line 318... Line 318...
318
	flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
318
	flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
319
		/*
319
		/*
320
		 * TLB invalidate requires a post-sync write.
320
		 * TLB invalidate requires a post-sync write.
321
		 */
321
		 */
322
		flags |= PIPE_CONTROL_QW_WRITE;
322
		flags |= PIPE_CONTROL_QW_WRITE;
-
 
323
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
Line 323... Line 324...
323
 
324
 
324
		/* Workaround: we must issue a pipe_control with CS-stall bit
325
		/* Workaround: we must issue a pipe_control with CS-stall bit
325
		 * set before a pipe_control command that has the state cache
326
		 * set before a pipe_control command that has the state cache
326
		 * invalidate bit set. */
327
		 * invalidate bit set. */
Line 331... Line 332...
331
	if (ret)
332
	if (ret)
332
		return ret;
333
		return ret;
Line 333... Line 334...
333
 
334
 
334
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
335
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
335
	intel_ring_emit(ring, flags);
336
	intel_ring_emit(ring, flags);
336
	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
337
	intel_ring_emit(ring, scratch_addr);
337
	intel_ring_emit(ring, 0);
338
	intel_ring_emit(ring, 0);
Line 338... Line 339...
338
	intel_ring_advance(ring);
339
	intel_ring_advance(ring);
339
 
340
 
Line 463... Line 464...
463
	pc->gtt_offset = obj->gtt_offset;
464
	pc->gtt_offset = obj->gtt_offset;
464
	pc->cpu_page =  (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096, PG_SW);
465
	pc->cpu_page =  (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096, PG_SW);
465
	if (pc->cpu_page == NULL)
466
	if (pc->cpu_page == NULL)
466
		goto err_unpin;
467
		goto err_unpin;
Line -... Line 468...
-
 
468
 
-
 
469
	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
-
 
470
			 ring->name, pc->gtt_offset);
467
 
471
 
468
	pc->obj = obj;
472
	pc->obj = obj;
469
	ring->private = pc;
473
	ring->private = pc;
Line 470... Line 474...
470
	return 0;
474
	return 0;
Line 554... Line 558...
554
	return ret;
558
	return ret;
555
}
559
}
Line 556... Line 560...
556
 
560
 
557
static void render_ring_cleanup(struct intel_ring_buffer *ring)
561
static void render_ring_cleanup(struct intel_ring_buffer *ring)
-
 
562
{
-
 
563
	struct drm_device *dev = ring->dev;
558
{
564
 
559
	if (!ring->private)
565
	if (!ring->private)
Line 560... Line 566...
560
		return;
566
		return;
561
 
567
 
Line 603... Line 609...
603
	intel_ring_advance(ring);
609
	intel_ring_advance(ring);
Line 604... Line 610...
604
 
610
 
605
	return 0;
611
	return 0;
Line -... Line 612...
-
 
612
}
-
 
613
 
-
 
614
static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
-
 
615
					      u32 seqno)
-
 
616
{
-
 
617
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
618
	return dev_priv->last_seqno < seqno;
606
}
619
}
607
 
620
 
608
/**
621
/**
609
 * intel_ring_sync - sync the waiter to the signaller on seqno
622
 * intel_ring_sync - sync the waiter to the signaller on seqno
610
 *
623
 *
Line 633... Line 646...
633
 
646
 
634
	ret = intel_ring_begin(waiter, 4);
647
	ret = intel_ring_begin(waiter, 4);
635
	if (ret)
648
	if (ret)
Line -... Line 649...
-
 
649
		return ret;
-
 
650
 
636
		return ret;
651
	/* If seqno wrap happened, omit the wait with no-ops */
-
 
652
	if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
637
 
653
	intel_ring_emit(waiter,
638
	intel_ring_emit(waiter,
654
				dw1 |
639
			dw1 | signaller->semaphore_register[waiter->id]);
655
				signaller->semaphore_register[waiter->id]);
640
	intel_ring_emit(waiter, seqno);
656
	intel_ring_emit(waiter, seqno);
-
 
657
	intel_ring_emit(waiter, 0);
-
 
658
	intel_ring_emit(waiter, MI_NOOP);
-
 
659
	} else {
-
 
660
		intel_ring_emit(waiter, MI_NOOP);
-
 
661
		intel_ring_emit(waiter, MI_NOOP);
-
 
662
		intel_ring_emit(waiter, MI_NOOP);
641
	intel_ring_emit(waiter, 0);
663
		intel_ring_emit(waiter, MI_NOOP);
Line 642... Line 664...
642
	intel_ring_emit(waiter, MI_NOOP);
664
	}
643
	intel_ring_advance(waiter);
665
	intel_ring_advance(waiter);
Line 718... Line 740...
718
ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
740
ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
719
{
741
{
720
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
742
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
721
}
743
}
Line -... Line 744...
-
 
744
 
-
 
745
static void
-
 
746
ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
-
 
747
{
-
 
748
	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
-
 
749
}
722
 
750
 
723
static u32
751
static u32
724
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
752
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
725
{
753
{
726
	struct pipe_control *pc = ring->private;
754
	struct pipe_control *pc = ring->private;
727
	return pc->cpu_page[0];
755
	return pc->cpu_page[0];
Line -... Line 756...
-
 
756
}
-
 
757
 
-
 
758
static void
-
 
759
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
-
 
760
{
-
 
761
	struct pipe_control *pc = ring->private;
-
 
762
	pc->cpu_page[0] = seqno;
728
}
763
}
729
 
764
 
730
static bool
765
static bool
731
gen5_ring_get_irq(struct intel_ring_buffer *ring)
766
gen5_ring_get_irq(struct intel_ring_buffer *ring)
732
{
767
{
Line 1154... Line 1189...
1154
		ret = init_phys_hws_pga(ring);
1189
		ret = init_phys_hws_pga(ring);
1155
		if (ret)
1190
		if (ret)
1156
			return ret;
1191
			return ret;
1157
	}
1192
	}
Line -... Line 1193...
-
 
1193
 
-
 
1194
	obj = NULL;
-
 
1195
	if (!HAS_LLC(dev))
-
 
1196
		obj = i915_gem_object_create_stolen(dev, ring->size);
1158
 
1197
	if (obj == NULL)
1159
    obj = i915_gem_alloc_object(dev, ring->size);
1198
    obj = i915_gem_alloc_object(dev, ring->size);
1160
	if (obj == NULL) {
1199
	if (obj == NULL) {
1161
		DRM_ERROR("Failed to allocate ringbuffer\n");
1200
		DRM_ERROR("Failed to allocate ringbuffer\n");
1162
		ret = -ENOMEM;
1201
		ret = -ENOMEM;
Line 1172... Line 1211...
1172
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
1211
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
1173
	if (ret)
1212
	if (ret)
1174
		goto err_unpin;
1213
		goto err_unpin;
Line 1175... Line 1214...
1175
 
1214
 
1176
	ring->virtual_start =
1215
	ring->virtual_start =
1177
        ioremap(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
1216
		ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
1178
			   ring->size);
1217
			   ring->size);
1179
	if (ring->virtual_start == NULL) {
1218
	if (ring->virtual_start == NULL) {
1180
		DRM_ERROR("Failed to map ringbuffer.\n");
1219
		DRM_ERROR("Failed to map ringbuffer.\n");
1181
		ret = -EINVAL;
1220
		ret = -EINVAL;
Line 1195... Line 1234...
1195
		ring->effective_size -= 128;
1234
		ring->effective_size -= 128;
Line 1196... Line 1235...
1196
 
1235
 
Line 1197... Line 1236...
1197
	return 0;
1236
	return 0;
1198
 
1237
 
1199
err_unmap:
1238
err_unmap:
1200
    FreeKernelSpace(ring->virtual_start);
1239
	iounmap(ring->virtual_start);
1201
err_unpin:
1240
err_unpin:
1202
	i915_gem_object_unpin(obj);
1241
	i915_gem_object_unpin(obj);
1203
err_unref:
1242
err_unref:
Line 1223... Line 1262...
1223
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1262
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1224
			  ring->name, ret);
1263
			  ring->name, ret);
Line 1225... Line 1264...
1225
 
1264
 
Line 1226... Line 1265...
1226
	I915_WRITE_CTL(ring, 0);
1265
	I915_WRITE_CTL(ring, 0);
Line 1227... Line 1266...
1227
 
1266
 
1228
//   drm_core_ioremapfree(&ring->map, ring->dev);
1267
	iounmap(ring->virtual_start);
1229
 
1268
 
Line 1332... Line 1371...
1332
			return 0;
1371
			return 0;
1333
		}
1372
		}
Line 1334... Line 1373...
1334
 
1373
 
Line 1335... Line 1374...
1335
		msleep(1);
1374
		msleep(1);
-
 
1375
 
1336
 
1376
		ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1337
		ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
1377
					   dev_priv->mm.interruptible);
1338
		if (ret)
1378
		if (ret)
1339
			return ret;
1379
			return ret;
1340
    } while (!time_after(GetTimerTicks(), end));
1380
    } while (!time_after(GetTimerTicks(), end));
Line 1394... Line 1434...
1394
		return 0;
1434
		return 0;
Line 1395... Line 1435...
1395
 
1435
 
1396
	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
1436
	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
Line -... Line 1437...
-
 
1437
}
-
 
1438
 
-
 
1439
static int __intel_ring_begin(struct intel_ring_buffer *ring,
-
 
1440
			      int bytes)
-
 
1441
{
-
 
1442
	int ret;
-
 
1443
 
-
 
1444
	if (unlikely(ring->tail + bytes > ring->effective_size)) {
-
 
1445
		ret = intel_wrap_ring_buffer(ring);
-
 
1446
		if (unlikely(ret))
-
 
1447
			return ret;
-
 
1448
	}
-
 
1449
 
-
 
1450
	if (unlikely(ring->space < bytes)) {
-
 
1451
		ret = ring_wait_for_space(ring, bytes);
-
 
1452
		if (unlikely(ret))
-
 
1453
			return ret;
-
 
1454
	}
-
 
1455
 
-
 
1456
	ring->space -= bytes;
-
 
1457
	return 0;
1397
}
1458
}
1398
 
1459
 
1399
int intel_ring_begin(struct intel_ring_buffer *ring,
1460
int intel_ring_begin(struct intel_ring_buffer *ring,
1400
		     int num_dwords)
1461
		     int num_dwords)
1401
{
-
 
1402
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
1462
{
Line 1403... Line 1463...
1403
	int n = 4*num_dwords;
1463
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-
 
1464
	int ret;
1404
	int ret;
1465
 
1405
 
1466
	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
Line 1406... Line 1467...
1406
	ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
1467
				   dev_priv->mm.interruptible);
1407
	if (ret)
1468
	if (ret)
1408
		return ret;
1469
		return ret;
1409
 
1470
 
Line 1410... Line 1471...
1410
	/* Preallocate the olr before touching the ring */
1471
	/* Preallocate the olr before touching the ring */
1411
	ret = intel_ring_alloc_seqno(ring);
-
 
1412
	if (ret)
-
 
1413
		return ret;
-
 
1414
 
1472
	ret = intel_ring_alloc_seqno(ring);
Line -... Line 1473...
-
 
1473
	if (ret)
-
 
1474
		return ret;
1415
	if (unlikely(ring->tail + n > ring->effective_size)) {
1475
 
-
 
1476
	return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
1416
		ret = intel_wrap_ring_buffer(ring);
1477
}
-
 
1478
 
1417
		if (unlikely(ret))
1479
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
-
 
1480
{
1418
			return ret;
1481
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1419
	}
1482
 
Line 1420... Line 1483...
1420
 
1483
	BUG_ON(ring->outstanding_lazy_request);
1421
	if (unlikely(ring->space < n)) {
-
 
1422
		ret = ring_wait_for_space(ring, n);
1484
 
Line 1423... Line 1485...
1423
		if (unlikely(ret))
1485
	if (INTEL_INFO(ring->dev)->gen >= 6) {
1424
			return ret;
1486
		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1425
	}
1487
		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
Line 1426... Line 1488...
1426
 
1488
	}
1427
	ring->space -= n;
1489
 
1428
	return 0;
1490
	ring->set_seqno(ring, seqno);
1429
}
1491
}
1430
 
1492
 
Line 1588... Line 1650...
1588
		ring->flush = gen6_render_ring_flush;
1650
		ring->flush = gen6_render_ring_flush;
1589
		ring->irq_get = gen6_ring_get_irq;
1651
		ring->irq_get = gen6_ring_get_irq;
1590
		ring->irq_put = gen6_ring_put_irq;
1652
		ring->irq_put = gen6_ring_put_irq;
1591
		ring->irq_enable_mask = GT_USER_INTERRUPT;
1653
		ring->irq_enable_mask = GT_USER_INTERRUPT;
1592
		ring->get_seqno = gen6_ring_get_seqno;
1654
		ring->get_seqno = gen6_ring_get_seqno;
-
 
1655
		ring->set_seqno = ring_set_seqno;
1593
		ring->sync_to = gen6_ring_sync;
1656
		ring->sync_to = gen6_ring_sync;
1594
		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1657
		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1595
		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1658
		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1596
		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1659
		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1597
		ring->signal_mbox[0] = GEN6_VRSYNC;
1660
		ring->signal_mbox[0] = GEN6_VRSYNC;
1598
		ring->signal_mbox[1] = GEN6_BRSYNC;
1661
		ring->signal_mbox[1] = GEN6_BRSYNC;
1599
	} else if (IS_GEN5(dev)) {
1662
	} else if (IS_GEN5(dev)) {
1600
       ring->add_request = pc_render_add_request;
1663
       ring->add_request = pc_render_add_request;
1601
		ring->flush = gen4_render_ring_flush;
1664
		ring->flush = gen4_render_ring_flush;
1602
		ring->get_seqno = pc_render_get_seqno;
1665
		ring->get_seqno = pc_render_get_seqno;
-
 
1666
		ring->set_seqno = pc_render_set_seqno;
1603
		ring->irq_get = gen5_ring_get_irq;
1667
		ring->irq_get = gen5_ring_get_irq;
1604
		ring->irq_put = gen5_ring_put_irq;
1668
		ring->irq_put = gen5_ring_put_irq;
1605
		ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
1669
		ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
1606
	} else {
1670
	} else {
1607
		ring->add_request = i9xx_add_request;
1671
		ring->add_request = i9xx_add_request;
1608
		if (INTEL_INFO(dev)->gen < 4)
1672
		if (INTEL_INFO(dev)->gen < 4)
1609
			ring->flush = gen2_render_ring_flush;
1673
			ring->flush = gen2_render_ring_flush;
1610
		else
1674
		else
1611
			ring->flush = gen4_render_ring_flush;
1675
			ring->flush = gen4_render_ring_flush;
1612
		ring->get_seqno = ring_get_seqno;
1676
		ring->get_seqno = ring_get_seqno;
-
 
1677
		ring->set_seqno = ring_set_seqno;
1613
		if (IS_GEN2(dev)) {
1678
		if (IS_GEN2(dev)) {
1614
			ring->irq_get = i8xx_ring_get_irq;
1679
			ring->irq_get = i8xx_ring_get_irq;
1615
			ring->irq_put = i8xx_ring_put_irq;
1680
			ring->irq_put = i8xx_ring_put_irq;
1616
		} else {
1681
		} else {
1617
			ring->irq_get = i9xx_ring_get_irq;
1682
			ring->irq_get = i9xx_ring_get_irq;
Line 1680... Line 1745...
1680
	if (INTEL_INFO(dev)->gen < 4)
1745
	if (INTEL_INFO(dev)->gen < 4)
1681
		ring->flush = gen2_render_ring_flush;
1746
		ring->flush = gen2_render_ring_flush;
1682
	else
1747
	else
1683
		ring->flush = gen4_render_ring_flush;
1748
		ring->flush = gen4_render_ring_flush;
1684
	ring->get_seqno = ring_get_seqno;
1749
	ring->get_seqno = ring_get_seqno;
-
 
1750
	ring->set_seqno = ring_set_seqno;
1685
	if (IS_GEN2(dev)) {
1751
	if (IS_GEN2(dev)) {
1686
		ring->irq_get = i8xx_ring_get_irq;
1752
		ring->irq_get = i8xx_ring_get_irq;
1687
		ring->irq_put = i8xx_ring_put_irq;
1753
		ring->irq_put = i8xx_ring_put_irq;
1688
	} else {
1754
	} else {
1689
		ring->irq_get = i9xx_ring_get_irq;
1755
		ring->irq_get = i9xx_ring_get_irq;
Line 1741... Line 1807...
1741
		if (IS_GEN6(dev))
1807
		if (IS_GEN6(dev))
1742
			ring->write_tail = gen6_bsd_ring_write_tail;
1808
			ring->write_tail = gen6_bsd_ring_write_tail;
1743
		ring->flush = gen6_ring_flush;
1809
		ring->flush = gen6_ring_flush;
1744
		ring->add_request = gen6_add_request;
1810
		ring->add_request = gen6_add_request;
1745
		ring->get_seqno = gen6_ring_get_seqno;
1811
		ring->get_seqno = gen6_ring_get_seqno;
-
 
1812
		ring->set_seqno = ring_set_seqno;
1746
		ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1813
		ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1747
		ring->irq_get = gen6_ring_get_irq;
1814
		ring->irq_get = gen6_ring_get_irq;
1748
		ring->irq_put = gen6_ring_put_irq;
1815
		ring->irq_put = gen6_ring_put_irq;
1749
		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1816
		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1750
		ring->sync_to = gen6_ring_sync;
1817
		ring->sync_to = gen6_ring_sync;
Line 1756... Line 1823...
1756
	} else {
1823
	} else {
1757
		ring->mmio_base = BSD_RING_BASE;
1824
		ring->mmio_base = BSD_RING_BASE;
1758
		ring->flush = bsd_ring_flush;
1825
		ring->flush = bsd_ring_flush;
1759
		ring->add_request = i9xx_add_request;
1826
		ring->add_request = i9xx_add_request;
1760
		ring->get_seqno = ring_get_seqno;
1827
		ring->get_seqno = ring_get_seqno;
-
 
1828
		ring->set_seqno = ring_set_seqno;
1761
		if (IS_GEN5(dev)) {
1829
		if (IS_GEN5(dev)) {
1762
			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1830
			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1763
			ring->irq_get = gen5_ring_get_irq;
1831
			ring->irq_get = gen5_ring_get_irq;
1764
			ring->irq_put = gen5_ring_put_irq;
1832
			ring->irq_put = gen5_ring_put_irq;
1765
		} else {
1833
		} else {
Line 1785... Line 1853...
1785
	ring->mmio_base = BLT_RING_BASE;
1853
	ring->mmio_base = BLT_RING_BASE;
1786
	ring->write_tail = ring_write_tail;
1854
	ring->write_tail = ring_write_tail;
1787
	ring->flush = blt_ring_flush;
1855
	ring->flush = blt_ring_flush;
1788
	ring->add_request = gen6_add_request;
1856
	ring->add_request = gen6_add_request;
1789
	ring->get_seqno = gen6_ring_get_seqno;
1857
	ring->get_seqno = gen6_ring_get_seqno;
-
 
1858
	ring->set_seqno = ring_set_seqno;
1790
	ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1859
	ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1791
	ring->irq_get = gen6_ring_get_irq;
1860
	ring->irq_get = gen6_ring_get_irq;
1792
	ring->irq_put = gen6_ring_put_irq;
1861
	ring->irq_put = gen6_ring_put_irq;
1793
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1862
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1794
	ring->sync_to = gen6_ring_sync;
1863
	ring->sync_to = gen6_ring_sync;