Subversion Repositories Kolibri OS

Rev

Rev 3482 | Rev 4246 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3482 Rev 3746
Line 31... Line 31...
31
#include "i915_trace.h"
31
#include "i915_trace.h"
32
#include "intel_drv.h"
32
#include "intel_drv.h"
33
#include 
33
#include 
34
#include 
34
#include 
35
//#include 
35
//#include 
-
 
36
#include 
36
#include 
37
#include 
Line 37... Line 38...
37
 
38
 
Line 38... Line 39...
38
extern int x86_clflush_size;
39
extern int x86_clflush_size;
Line 445... Line 446...
445
	loff_t offset;
446
	loff_t offset;
446
	int shmem_page_offset, page_length, ret = 0;
447
	int shmem_page_offset, page_length, ret = 0;
447
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
448
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
448
	int prefaulted = 0;
449
	int prefaulted = 0;
449
	int needs_clflush = 0;
450
	int needs_clflush = 0;
450
	struct scatterlist *sg;
451
	struct sg_page_iter sg_iter;
451
	int i;
-
 
Line 452... Line 452...
452
 
452
 
453
	user_data = (char __user *) (uintptr_t) args->data_ptr;
453
	user_data = to_user_ptr(args->data_ptr);
Line 454... Line 454...
454
	remain = args->size;
454
	remain = args->size;
Line 455... Line 455...
455
 
455
 
Line 475... Line 475...
475
 
475
 
Line 476... Line 476...
476
	i915_gem_object_pin_pages(obj);
476
	i915_gem_object_pin_pages(obj);
Line 477... Line 477...
477
 
477
 
478
	offset = args->offset;
-
 
479
 
-
 
480
	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
478
	offset = args->offset;
481
		struct page *page;
479
 
Line 482... Line 480...
482
 
480
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
483
		if (i < offset >> PAGE_SHIFT)
481
			 offset >> PAGE_SHIFT) {
Line 484... Line 482...
484
			continue;
482
		struct page *page = sg_page_iter_page(&sg_iter);
Line 494... Line 492...
494
		shmem_page_offset = offset_in_page(offset);
492
		shmem_page_offset = offset_in_page(offset);
495
		page_length = remain;
493
		page_length = remain;
496
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
494
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
497
			page_length = PAGE_SIZE - shmem_page_offset;
495
			page_length = PAGE_SIZE - shmem_page_offset;
Line 498... Line -...
498
 
-
 
499
		page = sg_page(sg);
496
 
500
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
497
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
Line 501... Line 498...
501
			(page_to_phys(page) & (1 << 17)) != 0;
498
			(page_to_phys(page) & (1 << 17)) != 0;
502
 
499
 
Line 556... Line 553...
556
 
553
 
557
	if (args->size == 0)
554
	if (args->size == 0)
Line 558... Line 555...
558
		return 0;
555
		return 0;
559
 
556
 
560
	if (!access_ok(VERIFY_WRITE,
557
	if (!access_ok(VERIFY_WRITE,
561
		       (char __user *)(uintptr_t)args->data_ptr,
558
		       to_user_ptr(args->data_ptr),
Line 562... Line 559...
562
		       args->size))
559
		       args->size))
563
		return -EFAULT;
560
		return -EFAULT;
Line 775... Line 772...
775
	int shmem_page_offset, page_length, ret = 0;
772
	int shmem_page_offset, page_length, ret = 0;
776
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
773
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
777
	int hit_slowpath = 0;
774
	int hit_slowpath = 0;
778
	int needs_clflush_after = 0;
775
	int needs_clflush_after = 0;
779
	int needs_clflush_before = 0;
776
	int needs_clflush_before = 0;
780
	int i;
-
 
781
	struct scatterlist *sg;
777
	struct sg_page_iter sg_iter;
Line 782... Line 778...
782
 
778
 
783
	user_data = (char __user *) (uintptr_t) args->data_ptr;
779
	user_data = to_user_ptr(args->data_ptr);
Line 784... Line 780...
784
	remain = args->size;
780
	remain = args->size;
Line 785... Line 781...
785
 
781
 
Line 811... Line 807...
811
	i915_gem_object_pin_pages(obj);
807
	i915_gem_object_pin_pages(obj);
Line 812... Line 808...
812
 
808
 
813
	offset = args->offset;
809
	offset = args->offset;
Line 814... Line 810...
814
	obj->dirty = 1;
810
	obj->dirty = 1;
-
 
811
 
815
 
812
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
816
	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
813
			 offset >> PAGE_SHIFT) {
Line 817... Line -...
817
		struct page *page;
-
 
818
		int partial_cacheline_write;
-
 
819
 
-
 
820
		if (i < offset >> PAGE_SHIFT)
814
		struct page *page = sg_page_iter_page(&sg_iter);
821
			continue;
815
		int partial_cacheline_write;
Line 822... Line 816...
822
 
816
 
823
		if (remain <= 0)
817
		if (remain <= 0)
Line 839... Line 833...
839
		 * overcomplicate things and flush the entire patch. */
833
		 * overcomplicate things and flush the entire patch. */
840
		partial_cacheline_write = needs_clflush_before &&
834
		partial_cacheline_write = needs_clflush_before &&
841
			((shmem_page_offset | page_length)
835
			((shmem_page_offset | page_length)
842
				& (x86_clflush_size - 1));
836
				& (x86_clflush_size - 1));
Line 843... Line -...
843
 
-
 
844
		page = sg_page(sg);
837
 
845
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
838
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
Line 846... Line 839...
846
			(page_to_phys(page) & (1 << 17)) != 0;
839
			(page_to_phys(page) & (1 << 17)) != 0;
847
 
840
 
Line 1100... Line 1093...
1100
	case -EIO:
1093
	case -EIO:
1101
	case -EAGAIN: /* Wedged */
1094
	case -EAGAIN: /* Wedged */
1102
	case -ERESTARTSYS: /* Signal */
1095
	case -ERESTARTSYS: /* Signal */
1103
		return (int)end;
1096
		return (int)end;
1104
	case 0: /* Timeout */
1097
	case 0: /* Timeout */
1105
		if (timeout)
-
 
1106
			set_normalized_timespec(timeout, 0, 0);
-
 
1107
		return -ETIME;
1098
		return -ETIME;
1108
	default: /* Completed */
1099
	default: /* Completed */
1109
		WARN_ON(end < 0); /* We're not aware of other errors */
1100
		WARN_ON(end < 0); /* We're not aware of other errors */
1110
		return 0;
1101
		return 0;
1111
	}
1102
	}
Line 1588... Line 1579...
1588
}
1579
}
Line 1589... Line 1580...
1589
 
1580
 
1590
static void
1581
static void
1591
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1582
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1592
{
-
 
1593
	int page_count = obj->base.size / PAGE_SIZE;
1583
{
1594
	struct scatterlist *sg;
1584
	struct sg_page_iter sg_iter;
Line 1595... Line 1585...
1595
	int ret, i;
1585
	int ret;
Line 1596... Line 1586...
1596
 
1586
 
1597
	BUG_ON(obj->madv == __I915_MADV_PURGED);
1587
	BUG_ON(obj->madv == __I915_MADV_PURGED);
Line 1607... Line 1597...
1607
	}
1597
	}
Line 1608... Line 1598...
1608
 
1598
 
1609
	if (obj->madv == I915_MADV_DONTNEED)
1599
	if (obj->madv == I915_MADV_DONTNEED)
Line 1610... Line 1600...
1610
		obj->dirty = 0;
1600
		obj->dirty = 0;
1611
 
1601
 
Line 1612... Line 1602...
1612
	for_each_sg(obj->pages->sgl, sg, page_count, i) {
1602
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1613
		struct page *page = sg_page(sg);
1603
		struct page *page = sg_page_iter_page(&sg_iter);
1614
 
1604
 
Line 1659... Line 1649...
1659
static int
1649
static int
1660
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1650
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1661
{
1651
{
1662
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1652
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1663
    int page_count, i;
1653
    int page_count, i;
1664
	struct address_space *mapping;
-
 
1665
	struct sg_table *st;
1654
	struct sg_table *st;
1666
	struct scatterlist *sg;
1655
	struct scatterlist *sg;
-
 
1656
	struct sg_page_iter sg_iter;
1667
	struct page *page;
1657
	struct page *page;
-
 
1658
	unsigned long last_pfn = 0;	/* suppress gcc warning */
1668
	gfp_t gfp;
1659
	gfp_t gfp;
Line 1669... Line 1660...
1669
 
1660
 
1670
	/* Assert that the object is not currently in any GPU domain. As it
1661
	/* Assert that the object is not currently in any GPU domain. As it
1671
	 * wasn't in the GTT, there shouldn't be any way it could have been in
1662
	 * wasn't in the GTT, there shouldn't be any way it could have been in
Line 1680... Line 1671...
1680
 
1671
 
1681
	page_count = obj->base.size / PAGE_SIZE;
1672
	page_count = obj->base.size / PAGE_SIZE;
1682
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1673
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1683
		sg_free_table(st);
1674
		sg_free_table(st);
-
 
1675
		kfree(st);
1684
		kfree(st);
1676
        FAIL();
1685
		return -ENOMEM;
1677
		return -ENOMEM;
Line 1686... Line 1678...
1686
	}
1678
	}
1687
 
1679
 
1688
	/* Get the list of pages out of our struct file.  They'll be pinned
1680
	/* Get the list of pages out of our struct file.  They'll be pinned
1689
	 * at this point until we release them.
1681
	 * at this point until we release them.
1690
	 *
1682
	 *
-
 
1683
	 * Fail silently without starting the shrinker
-
 
1684
	 */
1691
	 * Fail silently without starting the shrinker
1685
	sg = st->sgl;
1692
	 */
1686
	st->nents = 0;
1693
	for_each_sg(st->sgl, sg, page_count, i) {
1687
	for (i = 0; i < page_count; i++) {
1694
		page = shmem_read_mapping_page_gfp(obj->base.filp, i, gfp);
1688
		page = shmem_read_mapping_page_gfp(obj->base.filp, i, gfp);
1695
		if (IS_ERR(page)) {
1689
		if (IS_ERR(page)) {
Line 1696... Line 1690...
1696
            dbgprintf("%s invalid page %p\n", __FUNCTION__, page);
1690
            dbgprintf("%s invalid page %p\n", __FUNCTION__, page);
-
 
1691
			goto err_pages;
-
 
1692
 
-
 
1693
		}
-
 
1694
 
-
 
1695
		if (!i || page_to_pfn(page) != last_pfn + 1) {
1697
			goto err_pages;
1696
			if (i)
-
 
1697
				sg = sg_next(sg);
-
 
1698
			st->nents++;
-
 
1699
		sg_set_page(sg, page, PAGE_SIZE, 0);
-
 
1700
		} else {
1698
 
1701
			sg->length += PAGE_SIZE;
Line -... Line 1702...
-
 
1702
		}
1699
		}
1703
		last_pfn = page_to_pfn(page);
Line 1700... Line -...
1700
		sg_set_page(sg, page, PAGE_SIZE, 0);
-
 
1701
	}
-
 
1702
 
1704
	}
Line 1703... Line 1705...
1703
	obj->pages = st;
1705
 
-
 
1706
		sg_mark_end(sg);
1704
 
1707
	obj->pages = st;
1705
//    DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count);
1708
 
1706
 
1709
	return 0;
1707
	return 0;
1710
 
-
 
1711
err_pages:
1708
 
1712
	sg_mark_end(sg);
1709
err_pages:
1713
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Line 1710... Line 1714...
1710
	for_each_sg(st->sgl, sg, i, page_count)
1714
		page_cache_release(sg_page_iter_page(&sg_iter));
1711
		page_cache_release(sg_page(sg));
1715
	sg_free_table(st);
Line 1995... Line 1999...
1995
 
1999
 
1996
		i915_gem_object_move_to_inactive(obj);
2000
		i915_gem_object_move_to_inactive(obj);
1997
	}
2001
	}
Line 1998... Line 2002...
1998
}
2002
}
1999
 
2003
 
2000
static void i915_gem_reset_fences(struct drm_device *dev)
2004
void i915_gem_restore_fences(struct drm_device *dev)
2001
{
2005
{
Line 2002... Line 2006...
2002
	struct drm_i915_private *dev_priv = dev->dev_private;
2006
	struct drm_i915_private *dev_priv = dev->dev_private;
2003
	int i;
2007
	int i;
2004
 
-
 
2005
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
2008
 
2006
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-
 
2007
 
-
 
2008
		i915_gem_write_fence(dev, i, NULL);
-
 
2009
 
-
 
2010
		if (reg->obj)
-
 
2011
			i915_gem_object_fence_lost(reg->obj);
-
 
2012
 
-
 
2013
		reg->pin_count = 0;
2009
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
2014
		reg->obj = NULL;
-
 
2015
		INIT_LIST_HEAD(®->lru_list);
-
 
2016
	}
2010
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Line 2017... Line 2011...
2017
 
2011
		i915_gem_write_fence(dev, i, reg->obj);
2018
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
2012
	}
2019
}
2013
}
Line 2036... Line 2030...
2036
			    mm_list)
2030
			    mm_list)
2037
	{
2031
	{
2038
		obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2032
		obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2039
	}
2033
	}
Line 2040... Line -...
2040
 
-
 
2041
	/* The fence registers are invalidated so clear them out */
2034
 
2042
	i915_gem_reset_fences(dev);
2035
	i915_gem_restore_fences(dev);
Line 2043... Line 2036...
2043
}
2036
}
2044
 
2037
 
2045
/**
2038
/**
Line 2508... Line 2501...
2508
			       struct drm_i915_fence_reg *fence)
2501
			       struct drm_i915_fence_reg *fence)
2509
{
2502
{
2510
	return fence - dev_priv->fence_regs;
2503
	return fence - dev_priv->fence_regs;
2511
}
2504
}
Line -... Line 2505...
-
 
2505
 
-
 
2506
static void i915_gem_write_fence__ipi(void *data)
-
 
2507
{
-
 
2508
    asm volatile("wbinvd");
-
 
2509
 
-
 
2510
}
2512
 
2511
 
2513
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2512
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2514
					 struct drm_i915_fence_reg *fence,
2513
					 struct drm_i915_fence_reg *fence,
2515
					 bool enable)
2514
					 bool enable)
-
 
2515
{
2516
{
2516
	struct drm_device *dev = obj->base.dev;
2517
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2517
	struct drm_i915_private *dev_priv = dev->dev_private;
Line -... Line 2518...
-
 
2518
	int fence_reg = fence_number(dev_priv, fence);
-
 
2519
 
-
 
2520
	/* In order to fully serialize access to the fenced region and
-
 
2521
	 * the update to the fence register we need to take extreme
-
 
2522
	 * measures on SNB+. In theory, the write to the fence register
-
 
2523
	 * flushes all memory transactions before, and coupled with the
-
 
2524
	 * mb() placed around the register write we serialise all memory
-
 
2525
	 * operations with respect to the changes in the tiler. Yet, on
-
 
2526
	 * SNB+ we need to take a step further and emit an explicit wbinvd()
-
 
2527
	 * on each processor in order to manually flush all memory
-
 
2528
	 * transactions before updating the fence register.
-
 
2529
	 */
2518
	int reg = fence_number(dev_priv, fence);
2530
	if (HAS_LLC(obj->base.dev))
Line 2519... Line 2531...
2519
 
2531
		on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
2520
	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2532
	i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
2521
 
2533
 
2522
	if (enable) {
2534
	if (enable) {
2523
		obj->fence_reg = reg;
2535
		obj->fence_reg = fence_reg;
2524
		fence->obj = obj;
2536
		fence->obj = obj;
2525
		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2537
		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
Line 2547... Line 2559...
2547
 
2559
 
2548
int
2560
int
2549
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2561
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2550
{
2562
{
-
 
2563
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2551
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2564
	struct drm_i915_fence_reg *fence;
Line 2552... Line 2565...
2552
	int ret;
2565
	int ret;
2553
 
2566
 
2554
	ret = i915_gem_object_wait_fence(obj);
2567
	ret = i915_gem_object_wait_fence(obj);
Line 2555... Line 2568...
2555
	if (ret)
2568
	if (ret)
2556
		return ret;
2569
		return ret;
Line 2557... Line -...
2557
 
-
 
2558
	if (obj->fence_reg == I915_FENCE_REG_NONE)
2570
 
2559
		return 0;
-
 
-
 
2571
	if (obj->fence_reg == I915_FENCE_REG_NONE)
2560
 
2572
		return 0;
-
 
2573
 
Line 2561... Line 2574...
2561
	i915_gem_object_update_fence(obj,
2574
	fence = &dev_priv->fence_regs[obj->fence_reg];
2562
				     &dev_priv->fence_regs[obj->fence_reg],
2575
 
Line 2563... Line 2576...
2563
				     false);
2576
	i915_gem_object_fence_lost(obj);
Line 2772... Line 2785...
2772
	 * before evicting everything in a vain attempt to find space.
2785
	 * before evicting everything in a vain attempt to find space.
2773
	 */
2786
	 */
2774
	if (obj->base.size >
2787
	if (obj->base.size >
2775
	    (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
2788
	    (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
2776
		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2789
		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
-
 
2790
        FAIL();
2777
		return -E2BIG;
2791
		return -E2BIG;
2778
	}
2792
	}
Line 2779... Line 2793...
2779
 
2793
 
2780
	ret = i915_gem_object_get_pages(obj);
2794
	ret = i915_gem_object_get_pages(obj);
Line 3631... Line 3645...
3631
{
3645
{
3632
	struct drm_i915_gem_object *obj;
3646
	struct drm_i915_gem_object *obj;
3633
	struct address_space *mapping;
3647
	struct address_space *mapping;
3634
	gfp_t mask;
3648
	gfp_t mask;
Line -... Line 3649...
-
 
3649
 
-
 
3650
	obj = i915_gem_object_alloc(dev);
3635
 
3651
 
3636
	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3652
	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-
 
3653
	if (obj == NULL)
-
 
3654
    {
3637
	if (obj == NULL)
3655
        FAIL();
-
 
3656
		return NULL;
Line 3638... Line 3657...
3638
		return NULL;
3657
    };
3639
 
3658
 
-
 
3659
	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3640
	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3660
		kfree(obj);
3641
		kfree(obj);
3661
        FAIL();
Line 3642... Line 3662...
3642
		return NULL;
3662
		return NULL;
Line 3744... Line 3764...
3744
 
3764
 
3745
	/* Under UMS, be paranoid and evict. */
3765
	/* Under UMS, be paranoid and evict. */
3746
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
3766
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
Line 3747... Line -...
3747
		i915_gem_evict_everything(dev);
-
 
3748
 
-
 
3749
	i915_gem_reset_fences(dev);
3767
		i915_gem_evict_everything(dev);
3750
 
3768
 
3751
	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
3769
	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
3752
	 * We need to replace this with a semaphore, or something.
3770
	 * We need to replace this with a semaphore, or something.
3753
	 * And not confound mm.suspended!
3771
	 * And not confound mm.suspended!
Line 3885... Line 3903...
3885
		return -EIO;
3903
		return -EIO;
Line 3886... Line 3904...
3886
 
3904
 
3887
	if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3905
	if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
Line -... Line 3906...
-
 
3906
		I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
-
 
3907
 
-
 
3908
	if (HAS_PCH_NOP(dev)) {
-
 
3909
		u32 temp = I915_READ(GEN7_MSG_CTL);
-
 
3910
		temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
-
 
3911
		I915_WRITE(GEN7_MSG_CTL, temp);
3888
		I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3912
	}
Line 3889... Line 3913...
3889
 
3913
 
Line 3890... Line 3914...
3890
	i915_gem_l3_remap(dev);
3914
	i915_gem_l3_remap(dev);
Line 3898... Line 3922...
3898
	/*
3922
	/*
3899
	 * XXX: There was some w/a described somewhere suggesting loading
3923
	 * XXX: There was some w/a described somewhere suggesting loading
3900
	 * contexts before PPGTT.
3924
	 * contexts before PPGTT.
3901
	 */
3925
	 */
3902
	i915_gem_context_init(dev);
3926
	i915_gem_context_init(dev);
-
 
3927
	if (dev_priv->mm.aliasing_ppgtt) {
-
 
3928
		ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
-
 
3929
		if (ret) {
3903
	i915_gem_init_ppgtt(dev);
3930
			i915_gem_cleanup_aliasing_ppgtt(dev);
-
 
3931
			DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
-
 
3932
		}
-
 
3933
	}
Line 3904... Line 3934...
3904
 
3934
 
3905
	return 0;
3935
	return 0;
Line 3906... Line 3936...
3906
}
3936
}
Line 3911... Line 3941...
3911
{
3941
{
3912
	struct drm_i915_private *dev_priv = dev->dev_private;
3942
	struct drm_i915_private *dev_priv = dev->dev_private;
3913
	int ret;
3943
	int ret;
Line 3914... Line 3944...
3914
 
3944
 
-
 
3945
	mutex_lock(&dev->struct_mutex);
-
 
3946
 
-
 
3947
	if (IS_VALLEYVIEW(dev)) {
-
 
3948
		/* VLVA0 (potential hack), BIOS isn't actually waking us */
-
 
3949
		I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
-
 
3950
		if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
-
 
3951
			DRM_DEBUG_DRIVER("allow wake ack timed out\n");
-
 
3952
	}
3915
	mutex_lock(&dev->struct_mutex);
3953
 
-
 
3954
	i915_gem_init_global_gtt(dev);
3916
	i915_gem_init_global_gtt(dev);
3955
 
3917
	ret = i915_gem_init_hw(dev);
3956
	ret = i915_gem_init_hw(dev);
3918
	mutex_unlock(&dev->struct_mutex);
3957
	mutex_unlock(&dev->struct_mutex);
3919
	if (ret) {
3958
	if (ret) {
3920
		i915_gem_cleanup_aliasing_ppgtt(dev);
3959
		i915_gem_cleanup_aliasing_ppgtt(dev);
3921
		return ret;
3960
		return ret;
Line -... Line 3961...
-
 
3961
	}
3922
	}
3962
 
3923
 
3963
 
Line 3924... Line 3964...
3924
    return 0;
3964
    return 0;
3925
}
3965
}
Line 4036... Line 4076...
4036
			   _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4076
			   _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4037
    }
4077
    }
Line 4038... Line 4078...
4038
 
4078
 
Line -... Line 4079...
-
 
4079
    dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
-
 
4080
 
4039
    dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4081
	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4040
 
4082
		dev_priv->num_fence_regs = 32;
4041
    if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4083
	else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4042
        dev_priv->num_fence_regs = 16;
4084
        dev_priv->num_fence_regs = 16;
Line 4043... Line 4085...
4043
    else
4085
    else
-
 
4086
        dev_priv->num_fence_regs = 8;
4044
        dev_priv->num_fence_regs = 8;
4087
 
Line 4045... Line 4088...
4045
 
4088
    /* Initialize fence registers to zero */
Line 4046... Line 4089...
4046
    /* Initialize fence registers to zero */
4089
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);