Subversion Repositories Kolibri OS

Rev

Rev 5078 | Rev 6104 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 5271
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include 
28
#include 
29
#include 
29
#include 
30
#include "radeon.h"
30
#include "radeon.h"
31
 
31
 
32
 
32
 
33
static inline void *
33
static inline void *
34
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
34
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
35
                      addr_t *dma_handle)
35
                      addr_t *dma_handle)
36
{
36
{
37
 
37
 
38
    size = (size + 0x7FFF) & ~0x7FFF;
38
    size = (size + 0x7FFF) & ~0x7FFF;
39
 
39
 
40
    *dma_handle = AllocPages(size >> 12);
40
    *dma_handle = AllocPages(size >> 12);
41
    return (void*)MapIoMem(*dma_handle, size, PG_SW+PG_NOCACHE);
41
    return (void*)MapIoMem(*dma_handle, size, PG_SW+PG_NOCACHE);
42
}
42
}
43
 
43
 
44
/*
44
/*
45
 * GART
45
 * GART
46
 * The GART (Graphics Aperture Remapping Table) is an aperture
46
 * The GART (Graphics Aperture Remapping Table) is an aperture
47
 * in the GPU's address space.  System pages can be mapped into
47
 * in the GPU's address space.  System pages can be mapped into
48
 * the aperture and look like contiguous pages from the GPU's
48
 * the aperture and look like contiguous pages from the GPU's
49
 * perspective.  A page table maps the pages in the aperture
49
 * perspective.  A page table maps the pages in the aperture
50
 * to the actual backing pages in system memory.
50
 * to the actual backing pages in system memory.
51
 *
51
 *
52
 * Radeon GPUs support both an internal GART, as described above,
52
 * Radeon GPUs support both an internal GART, as described above,
53
 * and AGP.  AGP works similarly, but the GART table is configured
53
 * and AGP.  AGP works similarly, but the GART table is configured
54
 * and maintained by the northbridge rather than the driver.
54
 * and maintained by the northbridge rather than the driver.
55
 * Radeon hw has a separate AGP aperture that is programmed to
55
 * Radeon hw has a separate AGP aperture that is programmed to
56
 * point to the AGP aperture provided by the northbridge and the
56
 * point to the AGP aperture provided by the northbridge and the
57
 * requests are passed through to the northbridge aperture.
57
 * requests are passed through to the northbridge aperture.
58
 * Both AGP and internal GART can be used at the same time, however
58
 * Both AGP and internal GART can be used at the same time, however
59
 * that is not currently supported by the driver.
59
 * that is not currently supported by the driver.
60
 *
60
 *
61
 * This file handles the common internal GART management.
61
 * This file handles the common internal GART management.
62
 */
62
 */
63
 
63
 
64
/*
64
/*
65
 * Common GART table functions.
65
 * Common GART table functions.
66
 */
66
 */
67
/**
67
/**
68
 * radeon_gart_table_ram_alloc - allocate system ram for gart page table
68
 * radeon_gart_table_ram_alloc - allocate system ram for gart page table
69
 *
69
 *
70
 * @rdev: radeon_device pointer
70
 * @rdev: radeon_device pointer
71
 *
71
 *
72
 * Allocate system memory for GART page table
72
 * Allocate system memory for GART page table
73
 * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
73
 * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
74
 * gart table to be in system memory.
74
 * gart table to be in system memory.
75
 * Returns 0 for success, -ENOMEM for failure.
75
 * Returns 0 for success, -ENOMEM for failure.
76
 */
76
 */
77
int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
77
int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
78
{
78
{
79
	void *ptr;
79
	void *ptr;
80
 
80
 
81
    ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
81
    ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
82
                  &rdev->gart.table_addr);
82
                  &rdev->gart.table_addr);
83
	if (ptr == NULL) {
83
	if (ptr == NULL) {
84
		return -ENOMEM;
84
		return -ENOMEM;
85
	}
85
	}
86
#ifdef CONFIG_X86
86
#ifdef CONFIG_X86
87
	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
87
	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
88
	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
88
	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
89
		set_memory_uc((unsigned long)ptr,
89
		set_memory_uc((unsigned long)ptr,
90
			      rdev->gart.table_size >> PAGE_SHIFT);
90
			      rdev->gart.table_size >> PAGE_SHIFT);
91
	}
91
	}
92
#endif
92
#endif
93
	rdev->gart.ptr = ptr;
93
	rdev->gart.ptr = ptr;
94
	memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
94
	memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
95
	return 0;
95
	return 0;
96
}
96
}
97
 
97
 
98
/**
98
/**
99
 * radeon_gart_table_ram_free - free system ram for gart page table
99
 * radeon_gart_table_ram_free - free system ram for gart page table
100
 *
100
 *
101
 * @rdev: radeon_device pointer
101
 * @rdev: radeon_device pointer
102
 *
102
 *
103
 * Free system memory for GART page table
103
 * Free system memory for GART page table
104
 * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
104
 * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
105
 * gart table to be in system memory.
105
 * gart table to be in system memory.
106
 */
106
 */
107
void radeon_gart_table_ram_free(struct radeon_device *rdev)
107
void radeon_gart_table_ram_free(struct radeon_device *rdev)
108
{
108
{
109
	if (rdev->gart.ptr == NULL) {
109
	if (rdev->gart.ptr == NULL) {
110
		return;
110
		return;
111
	}
111
	}
112
#ifdef CONFIG_X86
112
#ifdef CONFIG_X86
113
	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
113
	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
114
	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
114
	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
115
		set_memory_wb((unsigned long)rdev->gart.ptr,
115
		set_memory_wb((unsigned long)rdev->gart.ptr,
116
			      rdev->gart.table_size >> PAGE_SHIFT);
116
			      rdev->gart.table_size >> PAGE_SHIFT);
117
	}
117
	}
118
#endif
118
#endif
119
	rdev->gart.ptr = NULL;
119
	rdev->gart.ptr = NULL;
120
	rdev->gart.table_addr = 0;
120
	rdev->gart.table_addr = 0;
121
}
121
}
122
 
122
 
123
/**
123
/**
124
 * radeon_gart_table_vram_alloc - allocate vram for gart page table
124
 * radeon_gart_table_vram_alloc - allocate vram for gart page table
125
 *
125
 *
126
 * @rdev: radeon_device pointer
126
 * @rdev: radeon_device pointer
127
 *
127
 *
128
 * Allocate video memory for GART page table
128
 * Allocate video memory for GART page table
129
 * (pcie r4xx, r5xx+).  These asics require the
129
 * (pcie r4xx, r5xx+).  These asics require the
130
 * gart table to be in video memory.
130
 * gart table to be in video memory.
131
 * Returns 0 for success, error for failure.
131
 * Returns 0 for success, error for failure.
132
 */
132
 */
133
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
133
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
134
{
134
{
135
    int r;
135
    int r;
136
 
136
 
137
	if (rdev->gart.robj == NULL) {
137
	if (rdev->gart.robj == NULL) {
138
		r = radeon_bo_create(rdev, rdev->gart.table_size,
138
		r = radeon_bo_create(rdev, rdev->gart.table_size,
139
				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
139
				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
140
				     0, NULL, &rdev->gart.robj);
140
				     0, NULL, NULL, &rdev->gart.robj);
141
        if (r) {
141
        if (r) {
142
            return r;
142
            return r;
143
        }
143
        }
144
    }
144
    }
145
	return 0;
145
	return 0;
146
}
146
}
147
 
147
 
148
/**
148
/**
149
 * radeon_gart_table_vram_pin - pin gart page table in vram
149
 * radeon_gart_table_vram_pin - pin gart page table in vram
150
 *
150
 *
151
 * @rdev: radeon_device pointer
151
 * @rdev: radeon_device pointer
152
 *
152
 *
153
 * Pin the GART page table in vram so it will not be moved
153
 * Pin the GART page table in vram so it will not be moved
154
 * by the memory manager (pcie r4xx, r5xx+).  These asics require the
154
 * by the memory manager (pcie r4xx, r5xx+).  These asics require the
155
 * gart table to be in video memory.
155
 * gart table to be in video memory.
156
 * Returns 0 for success, error for failure.
156
 * Returns 0 for success, error for failure.
157
 */
157
 */
158
int radeon_gart_table_vram_pin(struct radeon_device *rdev)
158
int radeon_gart_table_vram_pin(struct radeon_device *rdev)
159
{
159
{
160
	uint64_t gpu_addr;
160
	uint64_t gpu_addr;
161
	int r;
161
	int r;
162
 
162
 
163
	r = radeon_bo_reserve(rdev->gart.robj, false);
163
	r = radeon_bo_reserve(rdev->gart.robj, false);
164
	if (unlikely(r != 0))
164
	if (unlikely(r != 0))
165
		return r;
165
		return r;
166
	r = radeon_bo_pin(rdev->gart.robj,
166
	r = radeon_bo_pin(rdev->gart.robj,
167
                  RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
167
                  RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
168
    if (r) {
168
    if (r) {
169
		radeon_bo_unreserve(rdev->gart.robj);
169
		radeon_bo_unreserve(rdev->gart.robj);
170
        return r;
170
        return r;
171
    }
171
    }
172
	r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
172
	r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
173
	if (r)
173
	if (r)
174
		radeon_bo_unpin(rdev->gart.robj);
174
		radeon_bo_unpin(rdev->gart.robj);
175
	radeon_bo_unreserve(rdev->gart.robj);
175
	radeon_bo_unreserve(rdev->gart.robj);
176
	rdev->gart.table_addr = gpu_addr;
176
	rdev->gart.table_addr = gpu_addr;
177
    return r;
177
    return r;
178
}
178
}
179
 
179
 
180
/**
180
/**
181
 * radeon_gart_table_vram_unpin - unpin gart page table in vram
181
 * radeon_gart_table_vram_unpin - unpin gart page table in vram
182
 *
182
 *
183
 * @rdev: radeon_device pointer
183
 * @rdev: radeon_device pointer
184
 *
184
 *
185
 * Unpin the GART page table in vram (pcie r4xx, r5xx+).
185
 * Unpin the GART page table in vram (pcie r4xx, r5xx+).
186
 * These asics require the gart table to be in video memory.
186
 * These asics require the gart table to be in video memory.
187
 */
187
 */
188
void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
188
void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
189
{
189
{
190
	int r;
190
	int r;
191
 
191
 
192
	if (rdev->gart.robj == NULL) {
192
	if (rdev->gart.robj == NULL) {
193
		return;
193
		return;
194
	}
194
	}
195
	r = radeon_bo_reserve(rdev->gart.robj, false);
195
	r = radeon_bo_reserve(rdev->gart.robj, false);
196
	if (likely(r == 0)) {
196
	if (likely(r == 0)) {
197
		radeon_bo_kunmap(rdev->gart.robj);
197
		radeon_bo_kunmap(rdev->gart.robj);
198
		radeon_bo_unpin(rdev->gart.robj);
198
		radeon_bo_unpin(rdev->gart.robj);
199
		radeon_bo_unreserve(rdev->gart.robj);
199
		radeon_bo_unreserve(rdev->gart.robj);
200
		rdev->gart.ptr = NULL;
200
		rdev->gart.ptr = NULL;
201
	}
201
	}
202
}
202
}
203
 
203
 
204
/**
204
/**
205
 * radeon_gart_table_vram_free - free gart page table vram
205
 * radeon_gart_table_vram_free - free gart page table vram
206
 *
206
 *
207
 * @rdev: radeon_device pointer
207
 * @rdev: radeon_device pointer
208
 *
208
 *
209
 * Free the video memory used for the GART page table
209
 * Free the video memory used for the GART page table
210
 * (pcie r4xx, r5xx+).  These asics require the gart table to
210
 * (pcie r4xx, r5xx+).  These asics require the gart table to
211
 * be in video memory.
211
 * be in video memory.
212
 */
212
 */
213
void radeon_gart_table_vram_free(struct radeon_device *rdev)
213
void radeon_gart_table_vram_free(struct radeon_device *rdev)
214
{
214
{
215
	if (rdev->gart.robj == NULL) {
215
	if (rdev->gart.robj == NULL) {
216
		return;
216
		return;
217
	}
217
	}
218
	radeon_bo_unref(&rdev->gart.robj);
218
	radeon_bo_unref(&rdev->gart.robj);
219
}
219
}
220
 
220
 
221
/*
221
/*
222
 * Common gart functions.
222
 * Common gart functions.
223
 */
223
 */
224
/**
224
/**
225
 * radeon_gart_unbind - unbind pages from the gart page table
225
 * radeon_gart_unbind - unbind pages from the gart page table
226
 *
226
 *
227
 * @rdev: radeon_device pointer
227
 * @rdev: radeon_device pointer
228
 * @offset: offset into the GPU's gart aperture
228
 * @offset: offset into the GPU's gart aperture
229
 * @pages: number of pages to unbind
229
 * @pages: number of pages to unbind
230
 *
230
 *
231
 * Unbinds the requested pages from the gart page table and
231
 * Unbinds the requested pages from the gart page table and
232
 * replaces them with the dummy page (all asics).
232
 * replaces them with the dummy page (all asics).
233
 */
233
 */
234
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
234
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
235
			int pages)
235
			int pages)
236
{
236
{
237
	unsigned t;
237
	unsigned t;
238
	unsigned p;
238
	unsigned p;
239
	int i, j;
239
	int i, j;
240
	u64 page_base;
240
	u64 page_base;
241
 
241
 
242
	if (!rdev->gart.ready) {
242
	if (!rdev->gart.ready) {
243
		WARN(1, "trying to unbind memory from uninitialized GART !\n");
243
		WARN(1, "trying to unbind memory from uninitialized GART !\n");
244
		return;
244
		return;
245
	}
245
	}
246
	t = offset / RADEON_GPU_PAGE_SIZE;
246
	t = offset / RADEON_GPU_PAGE_SIZE;
247
	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
247
	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
248
	for (i = 0; i < pages; i++, p++) {
248
	for (i = 0; i < pages; i++, p++) {
249
		if (rdev->gart.pages[p]) {
249
		if (rdev->gart.pages[p]) {
250
			rdev->gart.pages[p] = NULL;
250
			rdev->gart.pages[p] = NULL;
251
			rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
251
			rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
252
			page_base = rdev->gart.pages_addr[p];
252
			page_base = rdev->gart.pages_addr[p];
253
			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
253
			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
254
				if (rdev->gart.ptr) {
254
				if (rdev->gart.ptr) {
255
					radeon_gart_set_page(rdev, t, page_base,
255
					radeon_gart_set_page(rdev, t, page_base,
256
							     RADEON_GART_PAGE_DUMMY);
256
							     RADEON_GART_PAGE_DUMMY);
257
				}
257
				}
258
				page_base += RADEON_GPU_PAGE_SIZE;
258
				page_base += RADEON_GPU_PAGE_SIZE;
259
			}
259
			}
260
		}
260
		}
261
	}
261
	}
262
	mb();
262
	mb();
263
	radeon_gart_tlb_flush(rdev);
263
	radeon_gart_tlb_flush(rdev);
264
}
264
}
265
 
265
 
266
/**
266
/**
267
 * radeon_gart_bind - bind pages into the gart page table
267
 * radeon_gart_bind - bind pages into the gart page table
268
 *
268
 *
269
 * @rdev: radeon_device pointer
269
 * @rdev: radeon_device pointer
270
 * @offset: offset into the GPU's gart aperture
270
 * @offset: offset into the GPU's gart aperture
271
 * @pages: number of pages to bind
271
 * @pages: number of pages to bind
272
 * @pagelist: pages to bind
272
 * @pagelist: pages to bind
273
 * @dma_addr: DMA addresses of pages
273
 * @dma_addr: DMA addresses of pages
274
 * @flags: RADEON_GART_PAGE_* flags
274
 * @flags: RADEON_GART_PAGE_* flags
275
 *
275
 *
276
 * Binds the requested pages to the gart page table
276
 * Binds the requested pages to the gart page table
277
 * (all asics).
277
 * (all asics).
278
 * Returns 0 for success, -EINVAL for failure.
278
 * Returns 0 for success, -EINVAL for failure.
279
 */
279
 */
280
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
280
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
281
		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
281
		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
282
		     uint32_t flags)
282
		     uint32_t flags)
283
{
283
{
284
    unsigned t;
284
    unsigned t;
285
    unsigned p;
285
    unsigned p;
286
    uint64_t page_base;
286
    uint64_t page_base;
287
    int i, j;
287
    int i, j;
288
 
288
 
289
    if (!rdev->gart.ready) {
289
    if (!rdev->gart.ready) {
290
		WARN(1, "trying to bind memory to uninitialized GART !\n");
290
		WARN(1, "trying to bind memory to uninitialized GART !\n");
291
        return -EINVAL;
291
        return -EINVAL;
292
    }
292
    }
293
	t = offset / RADEON_GPU_PAGE_SIZE;
293
	t = offset / RADEON_GPU_PAGE_SIZE;
294
	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
294
	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
295
 
295
 
296
    for (i = 0; i < pages; i++, p++) {
296
    for (i = 0; i < pages; i++, p++) {
297
		rdev->gart.pages_addr[p] = dma_addr[i];
297
		rdev->gart.pages_addr[p] = dma_addr[i];
298
        rdev->gart.pages[p] = pagelist[i];
298
        rdev->gart.pages[p] = pagelist[i];
299
		if (rdev->gart.ptr) {
299
		if (rdev->gart.ptr) {
300
		page_base = rdev->gart.pages_addr[p];
300
		page_base = rdev->gart.pages_addr[p];
301
		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
301
		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
302
				radeon_gart_set_page(rdev, t, page_base, flags);
302
				radeon_gart_set_page(rdev, t, page_base, flags);
303
			page_base += RADEON_GPU_PAGE_SIZE;
303
			page_base += RADEON_GPU_PAGE_SIZE;
304
        }
304
        }
305
    }
305
    }
306
	}
306
	}
307
    mb();
307
    mb();
308
    radeon_gart_tlb_flush(rdev);
308
    radeon_gart_tlb_flush(rdev);
309
    return 0;
309
    return 0;
310
}
310
}
311
 
311
 
312
/**
312
/**
313
 * radeon_gart_init - init the driver info for managing the gart
313
 * radeon_gart_init - init the driver info for managing the gart
314
 *
314
 *
315
 * @rdev: radeon_device pointer
315
 * @rdev: radeon_device pointer
316
 *
316
 *
317
 * Allocate the dummy page and init the gart driver info (all asics).
317
 * Allocate the dummy page and init the gart driver info (all asics).
318
 * Returns 0 for success, error for failure.
318
 * Returns 0 for success, error for failure.
319
 */
319
 */
320
int radeon_gart_init(struct radeon_device *rdev)
320
int radeon_gart_init(struct radeon_device *rdev)
321
{
321
{
322
	int r, i;
322
	int r, i;
323
 
323
 
324
    if (rdev->gart.pages) {
324
    if (rdev->gart.pages) {
325
        return 0;
325
        return 0;
326
    }
326
    }
327
	/* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
327
	/* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
328
	if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
328
	if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
329
        DRM_ERROR("Page size is smaller than GPU page size!\n");
329
        DRM_ERROR("Page size is smaller than GPU page size!\n");
330
        return -EINVAL;
330
        return -EINVAL;
331
    }
331
    }
332
	r = radeon_dummy_page_init(rdev);
332
	r = radeon_dummy_page_init(rdev);
333
	if (r)
333
	if (r)
334
		return r;
334
		return r;
335
    /* Compute table size */
335
    /* Compute table size */
336
    rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
336
    rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
337
	rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
337
	rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
338
    DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
338
    DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
339
         rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
339
         rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
340
    /* Allocate pages table */
340
    /* Allocate pages table */
341
	rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
341
	rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
342
    if (rdev->gart.pages == NULL) {
342
    if (rdev->gart.pages == NULL) {
343
		radeon_gart_fini(rdev);
343
		radeon_gart_fini(rdev);
344
        return -ENOMEM;
344
        return -ENOMEM;
345
    }
345
    }
346
	rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
346
	rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
347
					rdev->gart.num_cpu_pages);
347
					rdev->gart.num_cpu_pages);
348
    if (rdev->gart.pages_addr == NULL) {
348
    if (rdev->gart.pages_addr == NULL) {
349
		radeon_gart_fini(rdev);
349
		radeon_gart_fini(rdev);
350
        return -ENOMEM;
350
        return -ENOMEM;
351
    }
351
    }
352
	/* set GART entry to point to the dummy page by default */
352
	/* set GART entry to point to the dummy page by default */
353
	for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
353
	for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
354
		rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
354
		rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
355
	}
355
	}
356
    return 0;
356
    return 0;
357
}
357
}
358
 
358
 
359
/**
359
/**
360
 * radeon_gart_fini - tear down the driver info for managing the gart
360
 * radeon_gart_fini - tear down the driver info for managing the gart
361
 *
361
 *
362
 * @rdev: radeon_device pointer
362
 * @rdev: radeon_device pointer
363
 *
363
 *
364
 * Tear down the gart driver info and free the dummy page (all asics).
364
 * Tear down the gart driver info and free the dummy page (all asics).
365
 */
365
 */
366
void radeon_gart_fini(struct radeon_device *rdev)
366
void radeon_gart_fini(struct radeon_device *rdev)
367
{
367
{
368
	if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
368
	if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
369
		/* unbind pages */
369
		/* unbind pages */
370
		radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
370
		radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
371
	}
371
	}
372
	rdev->gart.ready = false;
372
	rdev->gart.ready = false;
373
	vfree(rdev->gart.pages);
373
	vfree(rdev->gart.pages);
374
	vfree(rdev->gart.pages_addr);
374
	vfree(rdev->gart.pages_addr);
375
	rdev->gart.pages = NULL;
375
	rdev->gart.pages = NULL;
376
	rdev->gart.pages_addr = NULL;
376
	rdev->gart.pages_addr = NULL;
377
 
377
 
378
	radeon_dummy_page_fini(rdev);
378
	radeon_dummy_page_fini(rdev);
379
}
379
}