Subversion Repositories Kolibri OS

Rev

Rev 5346 | Rev 6661 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5346 Rev 6104
Line 115... Line 115...
115
extern int radeon_vm_block_size;
115
extern int radeon_vm_block_size;
116
extern int radeon_deep_color;
116
extern int radeon_deep_color;
117
extern int radeon_use_pflipirq;
117
extern int radeon_use_pflipirq;
118
extern int radeon_bapm;
118
extern int radeon_bapm;
119
extern int radeon_backlight;
119
extern int radeon_backlight;
120
 
-
 
121
 
-
 
122
typedef struct pm_message {
-
 
123
    int event;
120
extern int radeon_auxch;
124
} pm_message_t;
-
 
125
 
-
 
126
typedef struct
-
 
127
{
-
 
128
  int width;
-
 
129
  int height;
121
extern int radeon_mst;
130
  int bpp;
-
 
131
  int freq;
-
 
132
}videomode_t;
-
 
133
 
-
 
Line 134... Line 122...
134
 
122
 
135
 
123
 
136
static inline u32 ioread32(const volatile void __iomem *addr)
124
static inline u32 ioread32(const volatile void __iomem *addr)
Line 272... Line 260...
272
 
260
 
273
/*
261
/*
274
 * Dummy page
262
 * Dummy page
275
 */
263
 */
-
 
264
struct radeon_dummy_page {
276
struct radeon_dummy_page {
265
	uint64_t	entry;
277
	struct page	*page;
266
	struct page	*page;
278
	dma_addr_t	addr;
267
	dma_addr_t	addr;
279
};
268
};
280
int radeon_dummy_page_init(struct radeon_device *rdev);
269
int radeon_dummy_page_init(struct radeon_device *rdev);
Line 533... Line 522...
533
	struct drm_gem_object		gem_base;
522
	struct drm_gem_object		gem_base;
Line 534... Line 523...
534
 
523
 
Line 535... Line 524...
535
	pid_t				pid;
524
	pid_t				pid;
-
 
525
 
536
 
526
	struct radeon_mn		*mn;
537
	struct radeon_mn		*mn;
527
	struct list_head		mn_list;
Line 538... Line 528...
538
};
528
};
Line 673... Line 663...
673
	void				*ptr;
663
	void				*ptr;
674
    unsigned            num_gpu_pages;
664
	unsigned			num_gpu_pages;
675
    unsigned            num_cpu_pages;
665
	unsigned			num_cpu_pages;
676
    unsigned            table_size;
666
	unsigned			table_size;
677
    struct page         **pages;
667
	struct page			**pages;
678
    dma_addr_t          *pages_addr;
668
	uint64_t			*pages_entry;
679
    bool                ready;
669
	bool				ready;
680
};
670
};
Line 681... Line 671...
681
 
671
 
682
int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
672
int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
Line 744... Line 734...
744
	/* doorbell mmio */
734
	/* doorbell mmio */
745
	resource_size_t			base;
735
	resource_size_t		base;
746
	resource_size_t			size;
736
	resource_size_t		size;
747
	u32 __iomem		*ptr;
737
	u32 __iomem		*ptr;
748
	u32			num_doorbells;	/* Number of doorbells actually reserved for radeon. */
738
	u32			num_doorbells;	/* Number of doorbells actually reserved for radeon. */
749
	unsigned long		used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)];
739
	DECLARE_BITMAP(used, RADEON_MAX_DOORBELLS);
750
};
740
};
Line 751... Line 741...
751
 
741
 
752
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
742
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
753
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
743
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
Line 954... Line 944...
954
	struct list_head		invalidated;
944
	struct list_head	invalidated;
Line 955... Line 945...
955
 
945
 
956
	/* BOs freed, but not yet updated in the PT */
946
	/* BOs freed, but not yet updated in the PT */
Line -... Line 947...
-
 
947
	struct list_head	freed;
-
 
948
 
-
 
949
	/* BOs cleared in the PT */
957
	struct list_head		freed;
950
	struct list_head	cleared;
958
 
951
 
959
	/* contains the page directory */
952
	/* contains the page directory */
Line 960... Line 953...
960
	struct radeon_bo		*page_directory;
953
	struct radeon_bo	*page_directory;
Line 1580... Line 1573...
1580
	void                    *priv;
1573
	void                    *priv;
1581
	u32			new_active_crtcs;
1574
	u32			new_active_crtcs;
1582
	int			new_active_crtc_count;
1575
	int			new_active_crtc_count;
1583
	u32			current_active_crtcs;
1576
	u32			current_active_crtcs;
1584
	int			current_active_crtc_count;
1577
	int			current_active_crtc_count;
-
 
1578
	bool single_display;
1585
	struct radeon_dpm_dynamic_state dyn_state;
1579
	struct radeon_dpm_dynamic_state dyn_state;
1586
	struct radeon_dpm_fan fan;
1580
	struct radeon_dpm_fan fan;
1587
	u32 tdp_limit;
1581
	u32 tdp_limit;
1588
	u32 near_tdp_limit;
1582
	u32 near_tdp_limit;
1589
	u32 near_tdp_limit_adjusted;
1583
	u32 near_tdp_limit_adjusted;
Line 1668... Line 1662...
1668
	u8                      fan_pulses_per_revolution;
1662
	u8                      fan_pulses_per_revolution;
1669
	u8                      fan_min_rpm;
1663
	u8                      fan_min_rpm;
1670
	u8                      fan_max_rpm;
1664
	u8                      fan_max_rpm;
1671
	/* dpm */
1665
	/* dpm */
1672
	bool                    dpm_enabled;
1666
	bool                    dpm_enabled;
-
 
1667
	bool                    sysfs_initialized;
1673
	struct radeon_dpm       dpm;
1668
	struct radeon_dpm       dpm;
1674
};
1669
};
Line 1675... Line 1670...
1675
 
1670
 
1676
int radeon_pm_get_type_index(struct radeon_device *rdev,
1671
int radeon_pm_get_type_index(struct radeon_device *rdev,
Line 1685... Line 1680...
1685
 
1680
 
1686
struct radeon_uvd {
1681
struct radeon_uvd {
1687
	struct radeon_bo	*vcpu_bo;
1682
	struct radeon_bo	*vcpu_bo;
1688
	void			*cpu_addr;
1683
	void			*cpu_addr;
1689
	uint64_t		gpu_addr;
-
 
1690
	void			*saved_bo;
1684
	uint64_t		gpu_addr;
1691
	atomic_t		handles[RADEON_MAX_UVD_HANDLES];
1685
	atomic_t		handles[RADEON_MAX_UVD_HANDLES];
1692
	struct drm_file		*filp[RADEON_MAX_UVD_HANDLES];
1686
	struct drm_file		*filp[RADEON_MAX_UVD_HANDLES];
1693
	unsigned		img_size[RADEON_MAX_UVD_HANDLES];
1687
	unsigned		img_size[RADEON_MAX_UVD_HANDLES];
1694
	struct delayed_work	idle_work;
1688
	struct delayed_work	idle_work;
Line 1722... Line 1716...
1722
 
1716
 
1723
/*
1717
/*
1724
 * VCE
1718
 * VCE
1725
 */
1719
 */
1726
#define RADEON_MAX_VCE_HANDLES	16
-
 
1727
#define RADEON_VCE_STACK_SIZE	(1024*1024)
-
 
Line 1728... Line 1720...
1728
#define RADEON_VCE_HEAP_SIZE	(4*1024*1024)
1720
#define RADEON_MAX_VCE_HANDLES	16
1729
 
1721
 
1730
struct radeon_vce {
1722
struct radeon_vce {
1731
	struct radeon_bo	*vcpu_bo;
1723
	struct radeon_bo	*vcpu_bo;
1732
	uint64_t		gpu_addr;
1724
	uint64_t		gpu_addr;
1733
	unsigned		fw_version;
1725
	unsigned		fw_version;
1734
	unsigned		fb_version;
1726
	unsigned		fb_version;
1735
	atomic_t		handles[RADEON_MAX_VCE_HANDLES];
1727
	atomic_t		handles[RADEON_MAX_VCE_HANDLES];
1736
	struct drm_file		*filp[RADEON_MAX_VCE_HANDLES];
1728
	struct drm_file		*filp[RADEON_MAX_VCE_HANDLES];
-
 
1729
	unsigned		img_size[RADEON_MAX_VCE_HANDLES];
1737
	unsigned		img_size[RADEON_MAX_VCE_HANDLES];
1730
	struct delayed_work	idle_work;
Line 1738... Line 1731...
1738
	struct delayed_work	idle_work;
1731
	uint32_t		keyselect;
1739
};
1732
};
1740
 
1733
 
Line 1773... Line 1766...
1773
 
1766
 
1774
struct r600_audio {
1767
struct r600_audio {
1775
	bool enabled;
1768
	bool enabled;
1776
	struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
1769
	struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
-
 
1770
	int num_pins;
-
 
1771
	struct radeon_audio_funcs *hdmi_funcs;
-
 
1772
	struct radeon_audio_funcs *dp_funcs;
1777
	int num_pins;
1773
	struct radeon_audio_basic_funcs *funcs;
Line 1778... Line 1774...
1778
};
1774
};
1779
 
1775
 
1780
/*
1776
/*
Line 1793... Line 1789...
1793
void radeon_test_syncing(struct radeon_device *rdev);
1789
void radeon_test_syncing(struct radeon_device *rdev);
Line 1794... Line 1790...
1794
 
1790
 
1795
/*
1791
/*
1796
 * MMU Notifier
1792
 * MMU Notifier
-
 
1793
 */
1797
 */
1794
#if defined(CONFIG_MMU_NOTIFIER)
1798
int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
1795
int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
-
 
1796
void radeon_mn_unregister(struct radeon_bo *bo);
-
 
1797
#else
-
 
1798
static inline int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
-
 
1799
{
-
 
1800
	return -ENODEV;
-
 
1801
}
-
 
1802
static inline void radeon_mn_unregister(struct radeon_bo *bo) {}
Line 1799... Line 1803...
1799
void radeon_mn_unregister(struct radeon_bo *bo);
1803
#endif
1800
 
1804
 
1801
/*
1805
/*
1802
 * Debugfs
1806
 * Debugfs
Line 1860... Line 1864...
1860
	int (*mc_wait_for_idle)(struct radeon_device *rdev);
1864
	int (*mc_wait_for_idle)(struct radeon_device *rdev);
1861
	/* get the reference clock */
1865
	/* get the reference clock */
1862
	u32 (*get_xclk)(struct radeon_device *rdev);
1866
	u32 (*get_xclk)(struct radeon_device *rdev);
1863
	/* get the gpu clock counter */
1867
	/* get the gpu clock counter */
1864
	uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
1868
	uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
-
 
1869
	/* get register for info ioctl */
-
 
1870
	int (*get_allowed_info_register)(struct radeon_device *rdev, u32 reg, u32 *val);
1865
	/* gart */
1871
	/* gart */
1866
	struct {
1872
	struct {
1867
		void (*tlb_flush)(struct radeon_device *rdev);
1873
		void (*tlb_flush)(struct radeon_device *rdev);
-
 
1874
		uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
1868
		void (*set_page)(struct radeon_device *rdev, unsigned i,
1875
		void (*set_page)(struct radeon_device *rdev, unsigned i,
1869
				 uint64_t addr, uint32_t flags);
1876
				 uint64_t entry);
1870
	} gart;
1877
	} gart;
1871
	struct {
1878
	struct {
1872
		int (*init)(struct radeon_device *rdev);
1879
		int (*init)(struct radeon_device *rdev);
1873
		void (*fini)(struct radeon_device *rdev);
1880
		void (*fini)(struct radeon_device *rdev);
1874
		void (*copy_pages)(struct radeon_device *rdev,
1881
		void (*copy_pages)(struct radeon_device *rdev,
Line 1983... Line 1990...
1983
		void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
1990
		void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
1984
		int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
1991
		int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
1985
		bool (*vblank_too_short)(struct radeon_device *rdev);
1992
		bool (*vblank_too_short)(struct radeon_device *rdev);
1986
		void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
1993
		void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
1987
		void (*enable_bapm)(struct radeon_device *rdev, bool enable);
1994
		void (*enable_bapm)(struct radeon_device *rdev, bool enable);
-
 
1995
		void (*fan_ctrl_set_mode)(struct radeon_device *rdev, u32 mode);
-
 
1996
		u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev);
-
 
1997
		int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed);
-
 
1998
		int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed);
-
 
1999
		u32 (*get_current_sclk)(struct radeon_device *rdev);
-
 
2000
		u32 (*get_current_mclk)(struct radeon_device *rdev);
1988
	} dpm;
2001
	} dpm;
1989
	/* pageflipping */
2002
	/* pageflipping */
1990
	struct {
2003
	struct {
1991
		void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
2004
		void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
1992
		bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
2005
		bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
Line 2392... Line 2405...
2392
	struct mutex			gpu_clock_mutex;
2405
	struct mutex			gpu_clock_mutex;
2393
	/* memory stats */
2406
	/* memory stats */
2394
	atomic64_t			vram_usage;
2407
	atomic64_t			vram_usage;
2395
	atomic64_t			gtt_usage;
2408
	atomic64_t			gtt_usage;
2396
	atomic64_t			num_bytes_moved;
2409
	atomic64_t			num_bytes_moved;
-
 
2410
	atomic_t			gpu_reset_counter;
2397
	/* ACPI interface */
2411
	/* ACPI interface */
2398
	struct radeon_atif		atif;
2412
	struct radeon_atif		atif;
2399
	struct radeon_atcs		atcs;
2413
	struct radeon_atcs		atcs;
2400
	/* srbm instance registers */
2414
	/* srbm instance registers */
2401
	struct mutex			srbm_mutex;
2415
	struct mutex			srbm_mutex;
Line 2423... Line 2437...
2423
void radeon_device_fini(struct radeon_device *rdev);
2437
void radeon_device_fini(struct radeon_device *rdev);
2424
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
2438
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
Line 2425... Line 2439...
2425
 
2439
 
Line -... Line 2440...
-
 
2440
#define RADEON_MIN_MMIO_SIZE 0x10000
-
 
2441
 
2426
#define RADEON_MIN_MMIO_SIZE 0x10000
2442
uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg);
2427
 
2443
void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v);
2428
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
2444
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
2429
				    bool always_indirect)
2445
				    bool always_indirect)
2430
{
2446
{
2431
	/* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
2447
	/* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
2432
	if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
2448
	if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
2433
		return readl(((void __iomem *)rdev->rmmio) + reg);
-
 
2434
	else {
-
 
2435
		unsigned long flags;
-
 
2436
		uint32_t ret;
-
 
2437
 
-
 
2438
		spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
-
 
2439
		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
2449
		return readl(((void __iomem *)rdev->rmmio) + reg);
2440
		ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
-
 
2441
		spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
-
 
2442
 
2450
	else
2443
		return ret;
-
 
2444
	}
-
 
2445
}
2451
		return r100_mm_rreg_slow(rdev, reg);
2446
 
2452
}
2447
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
2453
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
2448
				bool always_indirect)
2454
				bool always_indirect)
2449
{
2455
{
2450
	if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
2456
	if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
2451
		writel(v, ((void __iomem *)rdev->rmmio) + reg);
-
 
2452
	else {
-
 
2453
		unsigned long flags;
-
 
2454
 
-
 
2455
		spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
-
 
2456
		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
2457
		writel(v, ((void __iomem *)rdev->rmmio) + reg);
2457
		writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
-
 
2458
		spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
2458
	else
Line 2459... Line 2459...
2459
	}
2459
		r100_mm_wreg_slow(rdev, reg, v);
2460
}
2460
}
Line 2530... Line 2530...
2530
		uint32_t tmp_ = RREG32_PLL(reg);		\
2530
		uint32_t tmp_ = RREG32_PLL(reg);		\
2531
		tmp_ &= (mask);					\
2531
		tmp_ &= (mask);					\
2532
		tmp_ |= ((val) & ~(mask));			\
2532
		tmp_ |= ((val) & ~(mask));			\
2533
		WREG32_PLL(reg, tmp_);				\
2533
		WREG32_PLL(reg, tmp_);				\
2534
	} while (0)
2534
	} while (0)
-
 
2535
#define WREG32_SMC_P(reg, val, mask)				\
-
 
2536
	do {							\
-
 
2537
		uint32_t tmp_ = RREG32_SMC(reg);		\
-
 
2538
		tmp_ &= (mask);					\
-
 
2539
		tmp_ |= ((val) & ~(mask));			\
-
 
2540
		WREG32_SMC(reg, tmp_);				\
-
 
2541
	} while (0)
2535
#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
2542
#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
2536
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
2543
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
2537
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
2544
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
Line 2538... Line 2545...
2538
 
2545
 
2539
#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
2546
#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
Line 2540... Line 2547...
2540
#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
2547
#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
2541
 
2548
 
2542
/*
-
 
2543
 * Indirect registers accessor
2549
/*
2544
 */
-
 
2545
static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
2550
 * Indirect registers accessors.
2546
{
-
 
2547
	unsigned long flags;
-
 
2548
	uint32_t r;
2551
 * They used to be inlined, but this increases code size by ~65 kbytes.
2549
 
2552
 * Since each performs a pair of MMIO ops
2550
	spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
-
 
2551
	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
2553
 * within a spin_lock_irqsave/spin_unlock_irqrestore region,
2552
	r = RREG32(RADEON_PCIE_DATA);
-
 
2553
	spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
2554
 * the cost of call+ret is almost negligible. MMIO and locking
2554
	return r;
-
 
2555
}
2555
 * costs several dozens of cycles each at best, call+ret is ~5 cycles.
2556
 
-
 
2557
static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-
 
2558
{
-
 
2559
	unsigned long flags;
-
 
2560
 
-
 
2561
	spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
-
 
2562
	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
2556
 */
2563
	WREG32(RADEON_PCIE_DATA, (v));
-
 
2564
	spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
-
 
2565
}
2557
uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
2566
 
-
 
2567
static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
-
 
2568
{
-
 
2569
	unsigned long flags;
-
 
2570
	u32 r;
-
 
2571
 
-
 
2572
	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
-
 
2573
	WREG32(TN_SMC_IND_INDEX_0, (reg));
-
 
2574
	r = RREG32(TN_SMC_IND_DATA_0);
-
 
2575
	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
-
 
2576
	return r;
-
 
2577
}
2558
void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
2578
 
-
 
2579
static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-
 
2580
{
-
 
2581
	unsigned long flags;
-
 
2582
 
-
 
2583
	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
-
 
2584
	WREG32(TN_SMC_IND_INDEX_0, (reg));
-
 
2585
	WREG32(TN_SMC_IND_DATA_0, (v));
-
 
2586
	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
-
 
2587
}
2559
u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg);
2588
 
-
 
2589
static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
-
 
2590
{
-
 
2591
	unsigned long flags;
-
 
2592
	u32 r;
-
 
2593
 
-
 
2594
	spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
-
 
2595
	WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
-
 
2596
	r = RREG32(R600_RCU_DATA);
-
 
2597
	spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
-
 
2598
	return r;
-
 
2599
}
2560
void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2600
 
-
 
2601
static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-
 
2602
{
-
 
2603
	unsigned long flags;
-
 
2604
 
-
 
2605
	spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
-
 
2606
	WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
-
 
2607
	WREG32(R600_RCU_DATA, (v));
-
 
2608
	spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
-
 
2609
}
2561
u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg);
2610
 
-
 
2611
static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
-
 
2612
{
-
 
2613
	unsigned long flags;
-
 
2614
	u32 r;
-
 
2615
 
-
 
2616
	spin_lock_irqsave(&rdev->cg_idx_lock, flags);
-
 
2617
	WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
-
 
2618
	r = RREG32(EVERGREEN_CG_IND_DATA);
-
 
2619
	spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
-
 
2620
	return r;
-
 
2621
}
2562
void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2622
 
-
 
2623
static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-
 
2624
{
-
 
2625
	unsigned long flags;
-
 
2626
 
-
 
2627
	spin_lock_irqsave(&rdev->cg_idx_lock, flags);
-
 
2628
	WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
-
 
2629
	WREG32(EVERGREEN_CG_IND_DATA, (v));
-
 
2630
	spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
-
 
2631
}
2563
u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg);
2632
 
-
 
2633
static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
-
 
2634
{
-
 
2635
	unsigned long flags;
-
 
2636
	u32 r;
-
 
2637
 
-
 
2638
	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
-
 
2639
	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
-
 
2640
	r = RREG32(EVERGREEN_PIF_PHY0_DATA);
-
 
2641
	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
-
 
2642
	return r;
-
 
2643
}
2564
void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2644
 
-
 
2645
static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-
 
2646
{
-
 
2647
	unsigned long flags;
-
 
2648
 
-
 
2649
	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
-
 
2650
	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
-
 
2651
	WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
-
 
2652
	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
-
 
2653
}
2565
u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg);
2654
 
-
 
2655
static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
-
 
2656
{
-
 
2657
	unsigned long flags;
-
 
2658
	u32 r;
-
 
2659
 
-
 
2660
	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
-
 
2661
	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
-
 
2662
	r = RREG32(EVERGREEN_PIF_PHY1_DATA);
-
 
2663
	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
-
 
2664
	return r;
-
 
2665
}
2566
void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2666
 
-
 
2667
static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-
 
2668
{
-
 
2669
	unsigned long flags;
-
 
2670
 
-
 
2671
	spin_lock_irqsave(&rdev->pif_idx_lock, flags);
-
 
2672
	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
-
 
2673
	WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
-
 
2674
	spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
-
 
2675
}
2567
u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg);
2676
 
-
 
2677
static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
-
 
2678
{
-
 
2679
	unsigned long flags;
-
 
2680
	u32 r;
-
 
2681
 
-
 
2682
	spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
-
 
2683
	WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
-
 
2684
	r = RREG32(R600_UVD_CTX_DATA);
-
 
2685
	spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
-
 
2686
	return r;
-
 
2687
}
2568
void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2688
 
-
 
2689
static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-
 
2690
{
-
 
2691
	unsigned long flags;
-
 
2692
 
-
 
2693
	spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
-
 
2694
	WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
-
 
2695
	WREG32(R600_UVD_CTX_DATA, (v));
-
 
2696
	spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
-
 
2697
}
-
 
2698
 
2569
u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg);
2699
 
-
 
2700
static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
-
 
2701
{
-
 
2702
	unsigned long flags;
-
 
2703
	u32 r;
-
 
2704
 
-
 
2705
	spin_lock_irqsave(&rdev->didt_idx_lock, flags);
-
 
2706
	WREG32(CIK_DIDT_IND_INDEX, (reg));
-
 
2707
	r = RREG32(CIK_DIDT_IND_DATA);
-
 
2708
	spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
-
 
2709
	return r;
-
 
2710
}
2570
void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2711
 
-
 
2712
static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-
 
2713
{
-
 
2714
	unsigned long flags;
-
 
2715
 
-
 
2716
	spin_lock_irqsave(&rdev->didt_idx_lock, flags);
-
 
2717
	WREG32(CIK_DIDT_IND_INDEX, (reg));
-
 
2718
	WREG32(CIK_DIDT_IND_DATA, (v));
-
 
Line 2719... Line 2571...
2719
	spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
2571
u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg);
Line 2720... Line 2572...
2720
}
2572
void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v);
Line 2827... Line 2679...
2827
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
2679
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
2828
#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
2680
#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
2829
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
2681
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
2830
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
2682
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
2831
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
2683
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-
 
2684
#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
2832
#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
2685
#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
2833
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
2686
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
2834
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
2687
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
2835
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
2688
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
2836
#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2689
#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2837
#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2690
#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
Line 2888... Line 2741...
2888
#define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
2741
#define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
2889
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
2742
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
2890
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
2743
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
2891
#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
2744
#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
2892
#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
2745
#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
-
 
2746
#define radeon_get_allowed_info_register(rdev, r, v) (rdev)->asic->get_allowed_info_register((rdev), (r), (v))
2893
#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
2747
#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
2894
#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
2748
#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
2895
#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
2749
#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
2896
#define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev))
2750
#define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev))
2897
#define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
2751
#define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
Line 2906... Line 2760...
2906
#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
2760
#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
2907
#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
2761
#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
2908
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
2762
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
2909
#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
2763
#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
2910
#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
2764
#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
-
 
2765
#define radeon_dpm_get_current_sclk(rdev) rdev->asic->dpm.get_current_sclk((rdev))
-
 
2766
#define radeon_dpm_get_current_mclk(rdev) rdev->asic->dpm.get_current_mclk((rdev))
Line 2911... Line 2767...
2911
 
2767
 
2912
/* Common functions */
2768
/* Common functions */
2913
/* AGP */
2769
/* AGP */
2914
extern int radeon_gpu_reset(struct radeon_device *rdev);
2770
extern int radeon_gpu_reset(struct radeon_device *rdev);
Line 3072... Line 2928...
3072
			       uint32_t *vline_status);
2928
			       uint32_t *vline_status);
Line 3073... Line 2929...
3073
 
2929
 
Line 3074... Line 2930...
3074
#include "radeon_object.h"
2930
#include "radeon_object.h"
-
 
2931
 
Line 3075... Line 2932...
3075
 
2932
#define PCI_DEVICE_ID_ATI_RADEON_QY     0x5159
3076
#define PCI_DEVICE_ID_ATI_RADEON_QY     0x5159
2933
#define PCI_VENDOR_ID_ATI               0x1002
3077
 
2934
 
3078
resource_size_t
2935
resource_size_t