Rev 1275 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1275 | Rev 1313 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
2 | * Copyright 2009 Jerome Glisse. |
3 | * All Rights Reserved. |
3 | * All Rights Reserved. |
4 | * |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
11 | * the following conditions: |
12 | * |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
23 | * of the Software. |
24 | * |
24 | * |
25 | */ |
25 | */ |
26 | /* |
26 | /* |
27 | * Authors: |
27 | * Authors: |
28 | * Jerome Glisse |
28 | * Jerome Glisse |
29 | * Thomas Hellstrom |
29 | * Thomas Hellstrom |
30 | * Dave Airlie |
30 | * Dave Airlie |
31 | */ |
31 | */ |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | #include "radeon_drm.h" |
34 | #include "radeon_drm.h" |
35 | #include "radeon.h" |
35 | #include "radeon.h" |
36 | #include |
36 | #include |
37 | #include "radeon_object.h" |
37 | #include "radeon_object.h" |
38 | 38 | ||
39 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
39 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
40 | int pages, u32_t *pagelist); |
40 | int pages, u32_t *pagelist); |
41 | 41 | ||
42 | 42 | ||
43 | 43 | ||
44 | 44 | ||
45 | static struct drm_mm mm_gtt; |
45 | static struct drm_mm mm_gtt; |
46 | static struct drm_mm mm_vram; |
46 | static struct drm_mm mm_vram; |
47 | 47 | ||
48 | 48 | ||
49 | int radeon_object_init(struct radeon_device *rdev) |
49 | int radeon_object_init(struct radeon_device *rdev) |
50 | { |
50 | { |
51 | int r = 0; |
51 | int r = 0; |
52 | 52 | ||
53 | ENTER(); |
53 | ENTER(); |
54 | 54 | ||
55 | r = drm_mm_init(&mm_vram, 0x800000 >> PAGE_SHIFT, |
55 | r = drm_mm_init(&mm_vram, 0xC00000 >> PAGE_SHIFT, |
56 | ((rdev->mc.real_vram_size - 0x800000) >> PAGE_SHIFT)); |
56 | ((rdev->mc.real_vram_size - 0xC00000) >> PAGE_SHIFT)); |
57 | if (r) { |
57 | if (r) { |
58 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
58 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
59 | return r; |
59 | return r; |
60 | }; |
60 | }; |
61 | 61 | ||
62 | r = drm_mm_init(&mm_gtt, 0, ((rdev->mc.gtt_size) >> PAGE_SHIFT)); |
62 | r = drm_mm_init(&mm_gtt, 0, ((rdev->mc.gtt_size) >> PAGE_SHIFT)); |
63 | if (r) { |
63 | if (r) { |
64 | DRM_ERROR("Failed initializing GTT heap.\n"); |
64 | DRM_ERROR("Failed initializing GTT heap.\n"); |
65 | return r; |
65 | return r; |
66 | } |
66 | } |
67 | 67 | ||
68 | return r; |
68 | return r; |
69 | // return radeon_ttm_init(rdev); |
69 | // return radeon_ttm_init(rdev); |
70 | } |
70 | } |
71 | 71 | ||
72 | static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) |
72 | static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) |
73 | { |
73 | { |
74 | uint32_t flags = 0; |
74 | uint32_t flags = 0; |
75 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
75 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
76 | flags |= TTM_PL_FLAG_VRAM; |
76 | flags |= TTM_PL_FLAG_VRAM; |
77 | } |
77 | } |
78 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
78 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
79 | flags |= TTM_PL_FLAG_TT; |
79 | flags |= TTM_PL_FLAG_TT; |
80 | } |
80 | } |
81 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
81 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
82 | flags |= TTM_PL_FLAG_SYSTEM; |
82 | flags |= TTM_PL_FLAG_SYSTEM; |
83 | } |
83 | } |
84 | if (!flags) { |
84 | if (!flags) { |
85 | flags |= TTM_PL_FLAG_SYSTEM; |
85 | flags |= TTM_PL_FLAG_SYSTEM; |
86 | } |
86 | } |
87 | return flags; |
87 | return flags; |
88 | } |
88 | } |
89 | 89 | ||
90 | 90 | ||
91 | int radeon_object_create(struct radeon_device *rdev, |
91 | int radeon_object_create(struct radeon_device *rdev, |
92 | struct drm_gem_object *gobj, |
92 | struct drm_gem_object *gobj, |
93 | unsigned long size, |
93 | unsigned long size, |
94 | bool kernel, |
94 | bool kernel, |
95 | uint32_t domain, |
95 | uint32_t domain, |
96 | bool interruptible, |
96 | bool interruptible, |
97 | struct radeon_object **robj_ptr) |
97 | struct radeon_object **robj_ptr) |
98 | { |
98 | { |
99 | struct radeon_object *robj; |
99 | struct radeon_object *robj; |
100 | enum ttm_bo_type type; |
100 | enum ttm_bo_type type; |
101 | uint32_t flags; |
101 | uint32_t flags; |
102 | int r; |
102 | int r; |
103 | 103 | ||
104 | if (kernel) { |
104 | if (kernel) { |
105 | type = ttm_bo_type_kernel; |
105 | type = ttm_bo_type_kernel; |
106 | } else { |
106 | } else { |
107 | type = ttm_bo_type_device; |
107 | type = ttm_bo_type_device; |
108 | } |
108 | } |
109 | *robj_ptr = NULL; |
109 | *robj_ptr = NULL; |
110 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
110 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
111 | if (robj == NULL) { |
111 | if (robj == NULL) { |
112 | return -ENOMEM; |
112 | return -ENOMEM; |
113 | } |
113 | } |
114 | robj->rdev = rdev; |
114 | robj->rdev = rdev; |
115 | // robj->gobj = gobj; |
115 | // robj->gobj = gobj; |
116 | INIT_LIST_HEAD(&robj->list); |
116 | INIT_LIST_HEAD(&robj->list); |
117 | 117 | ||
118 | flags = radeon_object_flags_from_domain(domain); |
118 | flags = radeon_object_flags_from_domain(domain); |
119 | 119 | ||
120 | robj->flags = flags; |
120 | robj->flags = flags; |
121 | 121 | ||
122 | if( flags & TTM_PL_FLAG_VRAM) |
122 | if( flags & TTM_PL_FLAG_VRAM) |
123 | { |
123 | { |
124 | size_t num_pages; |
124 | size_t num_pages; |
125 | 125 | ||
126 | struct drm_mm_node *vm_node; |
126 | struct drm_mm_node *vm_node; |
127 | 127 | ||
128 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
128 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
129 | 129 | ||
130 | if (num_pages == 0) { |
130 | if (num_pages == 0) { |
131 | dbgprintf("Illegal buffer object size.\n"); |
131 | dbgprintf("Illegal buffer object size.\n"); |
132 | return -EINVAL; |
132 | return -EINVAL; |
133 | } |
133 | } |
134 | retry_pre_get: |
134 | retry_pre_get: |
135 | r = drm_mm_pre_get(&mm_vram); |
135 | r = drm_mm_pre_get(&mm_vram); |
136 | 136 | ||
137 | if (unlikely(r != 0)) |
137 | if (unlikely(r != 0)) |
138 | return r; |
138 | return r; |
139 | 139 | ||
140 | vm_node = drm_mm_search_free(&mm_vram, num_pages, 0, 0); |
140 | vm_node = drm_mm_search_free(&mm_vram, num_pages, 0, 0); |
141 | 141 | ||
142 | if (unlikely(vm_node == NULL)) { |
142 | if (unlikely(vm_node == NULL)) { |
143 | r = -ENOMEM; |
143 | r = -ENOMEM; |
144 | return r; |
144 | return r; |
145 | } |
145 | } |
146 | 146 | ||
147 | robj->mm_node = drm_mm_get_block_atomic(vm_node, num_pages, 0); |
147 | robj->mm_node = drm_mm_get_block_atomic(vm_node, num_pages, 0); |
148 | 148 | ||
149 | if (unlikely(robj->mm_node == NULL)) { |
149 | if (unlikely(robj->mm_node == NULL)) { |
150 | goto retry_pre_get; |
150 | goto retry_pre_get; |
151 | } |
151 | } |
152 | 152 | ||
153 | robj->vm_addr = ((uint32_t)robj->mm_node->start); |
153 | robj->vm_addr = ((uint32_t)robj->mm_node->start); |
154 | 154 | ||
155 | // dbgprintf("alloc vram: base %x size %x\n", |
155 | // dbgprintf("alloc vram: base %x size %x\n", |
156 | // robj->vm_addr << PAGE_SHIFT, num_pages << PAGE_SHIFT); |
156 | // robj->vm_addr << PAGE_SHIFT, num_pages << PAGE_SHIFT); |
157 | 157 | ||
158 | }; |
158 | }; |
159 | 159 | ||
160 | if( flags & TTM_PL_FLAG_TT) |
160 | if( flags & TTM_PL_FLAG_TT) |
161 | { |
161 | { |
162 | size_t num_pages; |
162 | size_t num_pages; |
163 | 163 | ||
164 | struct drm_mm_node *vm_node; |
164 | struct drm_mm_node *vm_node; |
165 | 165 | ||
166 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
166 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
167 | 167 | ||
168 | if (num_pages == 0) { |
168 | if (num_pages == 0) { |
169 | dbgprintf("Illegal buffer object size.\n"); |
169 | dbgprintf("Illegal buffer object size.\n"); |
170 | return -EINVAL; |
170 | return -EINVAL; |
171 | } |
171 | } |
172 | retry_pre_get1: |
172 | retry_pre_get1: |
173 | r = drm_mm_pre_get(&mm_gtt); |
173 | r = drm_mm_pre_get(&mm_gtt); |
174 | 174 | ||
175 | if (unlikely(r != 0)) |
175 | if (unlikely(r != 0)) |
176 | return r; |
176 | return r; |
177 | 177 | ||
178 | vm_node = drm_mm_search_free(&mm_gtt, num_pages, 0, 0); |
178 | vm_node = drm_mm_search_free(&mm_gtt, num_pages, 0, 0); |
179 | 179 | ||
180 | if (unlikely(vm_node == NULL)) { |
180 | if (unlikely(vm_node == NULL)) { |
181 | r = -ENOMEM; |
181 | r = -ENOMEM; |
182 | return r; |
182 | return r; |
183 | } |
183 | } |
184 | 184 | ||
185 | robj->mm_node = drm_mm_get_block_atomic(vm_node, num_pages, 0); |
185 | robj->mm_node = drm_mm_get_block_atomic(vm_node, num_pages, 0); |
186 | 186 | ||
187 | if (unlikely(robj->mm_node == NULL)) { |
187 | if (unlikely(robj->mm_node == NULL)) { |
188 | goto retry_pre_get1; |
188 | goto retry_pre_get1; |
189 | } |
189 | } |
190 | 190 | ||
191 | robj->vm_addr = ((uint32_t)robj->mm_node->start) ; |
191 | robj->vm_addr = ((uint32_t)robj->mm_node->start) ; |
192 | 192 | ||
193 | // dbgprintf("alloc gtt: base %x size %x\n", |
193 | // dbgprintf("alloc gtt: base %x size %x\n", |
194 | // robj->vm_addr << PAGE_SHIFT, num_pages << PAGE_SHIFT); |
194 | // robj->vm_addr << PAGE_SHIFT, num_pages << PAGE_SHIFT); |
195 | }; |
195 | }; |
196 | 196 | ||
197 | // r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, |
197 | // r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, |
198 | // 0, 0, false, NULL, size, |
198 | // 0, 0, false, NULL, size, |
199 | // &radeon_ttm_object_object_destroy); |
199 | // &radeon_ttm_object_object_destroy); |
200 | if (unlikely(r != 0)) { |
200 | if (unlikely(r != 0)) { |
201 | /* ttm call radeon_ttm_object_object_destroy if error happen */ |
201 | /* ttm call radeon_ttm_object_object_destroy if error happen */ |
202 | DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", |
202 | DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", |
203 | size, flags, 0); |
203 | size, flags, 0); |
204 | return r; |
204 | return r; |
205 | } |
205 | } |
206 | *robj_ptr = robj; |
206 | *robj_ptr = robj; |
207 | // if (gobj) { |
207 | // if (gobj) { |
208 | // list_add_tail(&robj->list, &rdev->gem.objects); |
208 | // list_add_tail(&robj->list, &rdev->gem.objects); |
209 | // } |
209 | // } |
210 | return 0; |
210 | return 0; |
211 | } |
211 | } |
212 | 212 | ||
213 | #define page_tabs 0xFDC00000 |
213 | #define page_tabs 0xFDC00000 |
214 | 214 | ||
215 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, |
215 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, |
216 | uint64_t *gpu_addr) |
216 | uint64_t *gpu_addr) |
217 | { |
217 | { |
218 | uint32_t flags; |
218 | uint32_t flags; |
219 | uint32_t tmp; |
219 | uint32_t tmp; |
220 | int r = 0; |
220 | int r = 0; |
221 | 221 | ||
222 | // flags = radeon_object_flags_from_domain(domain); |
222 | // flags = radeon_object_flags_from_domain(domain); |
223 | // spin_lock(&robj->tobj.lock); |
223 | // spin_lock(&robj->tobj.lock); |
224 | if (robj->pin_count) { |
224 | if (robj->pin_count) { |
225 | robj->pin_count++; |
225 | robj->pin_count++; |
226 | if (gpu_addr != NULL) { |
226 | if (gpu_addr != NULL) { |
227 | *gpu_addr = robj->gpu_addr; |
227 | *gpu_addr = robj->gpu_addr; |
228 | } |
228 | } |
229 | // spin_unlock(&robj->tobj.lock); |
229 | // spin_unlock(&robj->tobj.lock); |
230 | return 0; |
230 | return 0; |
231 | } |
231 | } |
232 | // spin_unlock(&robj->tobj.lock); |
232 | // spin_unlock(&robj->tobj.lock); |
233 | // r = radeon_object_reserve(robj, false); |
233 | // r = radeon_object_reserve(robj, false); |
234 | // if (unlikely(r != 0)) { |
234 | // if (unlikely(r != 0)) { |
235 | // DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); |
235 | // DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); |
236 | // return r; |
236 | // return r; |
237 | // } |
237 | // } |
238 | // tmp = robj->tobj.mem.placement; |
238 | // tmp = robj->tobj.mem.placement; |
239 | // ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); |
239 | // ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); |
240 | // robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; |
240 | // robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; |
241 | // r = ttm_buffer_object_validate(&robj->tobj, |
241 | // r = ttm_buffer_object_validate(&robj->tobj, |
242 | // robj->tobj.proposed_placement, |
242 | // robj->tobj.proposed_placement, |
243 | // false, false); |
243 | // false, false); |
244 | 244 | ||
245 | robj->gpu_addr = ((u64)robj->vm_addr) << PAGE_SHIFT; |
245 | robj->gpu_addr = ((u64)robj->vm_addr) << PAGE_SHIFT; |
246 | 246 | ||
247 | if(robj->flags & TTM_PL_FLAG_VRAM) |
247 | if(robj->flags & TTM_PL_FLAG_VRAM) |
248 | robj->gpu_addr += (u64)robj->rdev->mc.vram_location; |
248 | robj->gpu_addr += (u64)robj->rdev->mc.vram_location; |
249 | else if (robj->flags & TTM_PL_FLAG_TT) |
249 | else if (robj->flags & TTM_PL_FLAG_TT) |
250 | { |
250 | { |
251 | u32_t *pagelist; |
251 | u32_t *pagelist; |
252 | robj->kptr = KernelAlloc( robj->mm_node->size << PAGE_SHIFT ); |
252 | robj->kptr = KernelAlloc( robj->mm_node->size << PAGE_SHIFT ); |
253 | dbgprintf("kernel alloc %x\n", robj->kptr ); |
253 | dbgprintf("kernel alloc %x\n", robj->kptr ); |
254 | 254 | ||
255 | pagelist = &((u32_t*)page_tabs)[(u32_t)robj->kptr >> 12]; |
255 | pagelist = &((u32_t*)page_tabs)[(u32_t)robj->kptr >> 12]; |
256 | dbgprintf("pagelist %x\n", pagelist); |
256 | dbgprintf("pagelist %x\n", pagelist); |
257 | radeon_gart_bind(robj->rdev, robj->gpu_addr, |
257 | radeon_gart_bind(robj->rdev, robj->gpu_addr, |
258 | robj->mm_node->size, pagelist); |
258 | robj->mm_node->size, pagelist); |
259 | robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; |
259 | robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; |
260 | } |
260 | } |
261 | else |
261 | else |
262 | { |
262 | { |
263 | DRM_ERROR("Unknown placement %d\n", robj->flags); |
263 | DRM_ERROR("Unknown placement %d\n", robj->flags); |
264 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; |
264 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; |
265 | r = -1; |
265 | r = -1; |
266 | }; |
266 | }; |
267 | 267 | ||
268 | // flags & TTM_PL_FLAG_VRAM |
268 | // flags & TTM_PL_FLAG_VRAM |
269 | if (gpu_addr != NULL) { |
269 | if (gpu_addr != NULL) { |
270 | *gpu_addr = robj->gpu_addr; |
270 | *gpu_addr = robj->gpu_addr; |
271 | } |
271 | } |
272 | robj->pin_count = 1; |
272 | robj->pin_count = 1; |
273 | if (unlikely(r != 0)) { |
273 | if (unlikely(r != 0)) { |
274 | DRM_ERROR("radeon: failed to pin object.\n"); |
274 | DRM_ERROR("radeon: failed to pin object.\n"); |
275 | } |
275 | } |
276 | 276 | ||
277 | return r; |
277 | return r; |
278 | } |
278 | } |
279 | 279 | ||
280 | int radeon_object_kmap(struct radeon_object *robj, void **ptr) |
280 | int radeon_object_kmap(struct radeon_object *robj, void **ptr) |
281 | { |
281 | { |
282 | int r = 0; |
282 | int r = 0; |
283 | 283 | ||
284 | // spin_lock(&robj->tobj.lock); |
284 | // spin_lock(&robj->tobj.lock); |
285 | if (robj->kptr) { |
285 | if (robj->kptr) { |
286 | if (ptr) { |
286 | if (ptr) { |
287 | *ptr = robj->kptr; |
287 | *ptr = robj->kptr; |
288 | } |
288 | } |
289 | // spin_unlock(&robj->tobj.lock); |
289 | // spin_unlock(&robj->tobj.lock); |
290 | return 0; |
290 | return 0; |
291 | } |
291 | } |
292 | // spin_unlock(&robj->tobj.lock); |
292 | // spin_unlock(&robj->tobj.lock); |
293 | 293 | ||
294 | if(robj->flags & TTM_PL_FLAG_VRAM) |
294 | if(robj->flags & TTM_PL_FLAG_VRAM) |
295 | { |
295 | { |
296 | robj->cpu_addr = robj->rdev->mc.aper_base + |
296 | robj->cpu_addr = robj->rdev->mc.aper_base + |
297 | (robj->vm_addr << PAGE_SHIFT); |
297 | (robj->vm_addr << PAGE_SHIFT); |
298 | robj->kptr = (void*)MapIoMem(robj->cpu_addr, |
298 | robj->kptr = (void*)MapIoMem(robj->cpu_addr, |
299 | robj->mm_node->size << 12, PG_SW); |
299 | robj->mm_node->size << 12, PG_SW); |
300 | // dbgprintf("map io mem %x at %x\n", robj->cpu_addr, robj->kptr); |
- | |
301 | - | ||
302 | } |
300 | } |
303 | else |
301 | else |
304 | { |
302 | { |
305 | return -1; |
303 | return -1; |
306 | } |
304 | } |
307 | 305 | ||
308 | if (ptr) { |
306 | if (ptr) { |
309 | *ptr = robj->kptr; |
307 | *ptr = robj->kptr; |
310 | } |
308 | } |
311 | 309 | ||
312 | return 0; |
310 | return 0; |
313 | } |
311 | } |
314 | 312 | ||
315 | void radeon_object_kunmap(struct radeon_object *robj) |
313 | void radeon_object_kunmap(struct radeon_object *robj) |
316 | { |
314 | { |
317 | // spin_lock(&robj->tobj.lock); |
315 | // spin_lock(&robj->tobj.lock); |
318 | if (robj->kptr == NULL) { |
316 | if (robj->kptr == NULL) { |
319 | // spin_unlock(&robj->tobj.lock); |
317 | // spin_unlock(&robj->tobj.lock); |
320 | return; |
318 | return; |
321 | } |
319 | } |
322 | 320 | ||
323 | if (robj->flags & TTM_PL_FLAG_VRAM) |
321 | if (robj->flags & TTM_PL_FLAG_VRAM) |
324 | { |
322 | { |
325 | FreeKernelSpace(robj->kptr); |
323 | FreeKernelSpace(robj->kptr); |
326 | robj->kptr = NULL; |
324 | robj->kptr = NULL; |
327 | } |
325 | } |
328 | // spin_unlock(&robj->tobj.lock); |
326 | // spin_unlock(&robj->tobj.lock); |
329 | } |
327 | } |
330 | - | ||
331 | #if 0 |
328 | |
332 | 329 | ||
333 | void radeon_object_unpin(struct radeon_object *robj) |
330 | void radeon_object_unpin(struct radeon_object *robj) |
334 | { |
331 | { |
335 | uint32_t flags; |
332 | uint32_t flags; |
336 | int r; |
333 | int r; |
337 | 334 | ||
338 | // spin_lock(&robj->tobj.lock); |
335 | // spin_lock(&robj->tobj.lock); |
339 | if (!robj->pin_count) { |
336 | if (!robj->pin_count) { |
340 | // spin_unlock(&robj->tobj.lock); |
337 | // spin_unlock(&robj->tobj.lock); |
341 | printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); |
338 | printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); |
342 | return; |
339 | return; |
343 | } |
340 | } |
344 | robj->pin_count--; |
341 | robj->pin_count--; |
345 | if (robj->pin_count) { |
342 | if (robj->pin_count) { |
346 | // spin_unlock(&robj->tobj.lock); |
343 | // spin_unlock(&robj->tobj.lock); |
347 | return; |
344 | return; |
348 | } |
345 | } |
349 | // spin_unlock(&robj->tobj.lock); |
346 | // spin_unlock(&robj->tobj.lock); |
350 | r = radeon_object_reserve(robj, false); |
- | |
351 | if (unlikely(r != 0)) { |
- | |
352 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); |
- | |
353 | return; |
- | |
354 | } |
- | |
355 | flags = robj->tobj.mem.placement; |
- | |
356 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; |
- | |
357 | r = ttm_buffer_object_validate(&robj->tobj, |
- | |
358 | robj->tobj.proposed_placement, |
- | |
359 | false, false); |
- | |
360 | if (unlikely(r != 0)) { |
- | |
361 | DRM_ERROR("radeon: failed to unpin buffer.\n"); |
- | |
362 | } |
- | |
363 | radeon_object_unreserve(robj); |
- | |
364 | } |
- | |
- | 347 | ||
- | 348 | drm_mm_put_block(robj->mm_node); |
|
- | 349 | ||
- | 350 | kfree(robj); |
|
- | 351 | } |
|
365 | 352 | ||
366 | 353 | ||
367 | 354 | #if 0 |
|
368 | 355 | ||
369 | 356 | ||
370 | /* |
357 | /* |
371 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all |
358 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all |
372 | * function are calling it. |
359 | * function are calling it. |
373 | */ |
360 | */ |
374 | 361 | ||
375 | static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) |
362 | static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) |
376 | { |
363 | { |
377 | return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); |
364 | return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); |
378 | } |
365 | } |
379 | 366 | ||
380 | static void radeon_object_unreserve(struct radeon_object *robj) |
367 | static void radeon_object_unreserve(struct radeon_object *robj) |
381 | { |
368 | { |
382 | ttm_bo_unreserve(&robj->tobj); |
369 | ttm_bo_unreserve(&robj->tobj); |
383 | } |
370 | } |
384 | 371 | ||
385 | static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) |
372 | static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) |
386 | { |
373 | { |
387 | struct radeon_object *robj; |
374 | struct radeon_object *robj; |
388 | 375 | ||
389 | robj = container_of(tobj, struct radeon_object, tobj); |
376 | robj = container_of(tobj, struct radeon_object, tobj); |
390 | // list_del_init(&robj->list); |
377 | // list_del_init(&robj->list); |
391 | kfree(robj); |
378 | kfree(robj); |
392 | } |
379 | } |
393 | 380 | ||
394 | static inline void radeon_object_gpu_addr(struct radeon_object *robj) |
381 | static inline void radeon_object_gpu_addr(struct radeon_object *robj) |
395 | { |
382 | { |
396 | /* Default gpu address */ |
383 | /* Default gpu address */ |
397 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; |
384 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; |
398 | if (robj->tobj.mem.mm_node == NULL) { |
385 | if (robj->tobj.mem.mm_node == NULL) { |
399 | return; |
386 | return; |
400 | } |
387 | } |
401 | robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT; |
388 | robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT; |
402 | switch (robj->tobj.mem.mem_type) { |
389 | switch (robj->tobj.mem.mem_type) { |
403 | case TTM_PL_VRAM: |
390 | case TTM_PL_VRAM: |
404 | robj->gpu_addr += (u64)robj->rdev->mc.vram_location; |
391 | robj->gpu_addr += (u64)robj->rdev->mc.vram_location; |
405 | break; |
392 | break; |
406 | case TTM_PL_TT: |
393 | case TTM_PL_TT: |
407 | robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; |
394 | robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; |
408 | break; |
395 | break; |
409 | default: |
396 | default: |
410 | DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type); |
397 | DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type); |
411 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; |
398 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; |
412 | return; |
399 | return; |
413 | } |
400 | } |
414 | } |
401 | } |
415 | 402 | ||
416 | 403 | ||
417 | int radeon_object_create(struct radeon_device *rdev, |
404 | int radeon_object_create(struct radeon_device *rdev, |
418 | struct drm_gem_object *gobj, |
405 | struct drm_gem_object *gobj, |
419 | unsigned long size, |
406 | unsigned long size, |
420 | bool kernel, |
407 | bool kernel, |
421 | uint32_t domain, |
408 | uint32_t domain, |
422 | bool interruptible, |
409 | bool interruptible, |
423 | struct radeon_object **robj_ptr) |
410 | struct radeon_object **robj_ptr) |
424 | { |
411 | { |
425 | struct radeon_object *robj; |
412 | struct radeon_object *robj; |
426 | enum ttm_bo_type type; |
413 | enum ttm_bo_type type; |
427 | uint32_t flags; |
414 | uint32_t flags; |
428 | int r; |
415 | int r; |
429 | 416 | ||
430 | // if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
417 | // if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
431 | // rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; |
418 | // rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; |
432 | // } |
419 | // } |
433 | if (kernel) { |
420 | if (kernel) { |
434 | type = ttm_bo_type_kernel; |
421 | type = ttm_bo_type_kernel; |
435 | } else { |
422 | } else { |
436 | type = ttm_bo_type_device; |
423 | type = ttm_bo_type_device; |
437 | } |
424 | } |
438 | *robj_ptr = NULL; |
425 | *robj_ptr = NULL; |
439 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
426 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); |
440 | if (robj == NULL) { |
427 | if (robj == NULL) { |
441 | return -ENOMEM; |
428 | return -ENOMEM; |
442 | } |
429 | } |
443 | robj->rdev = rdev; |
430 | robj->rdev = rdev; |
444 | robj->gobj = gobj; |
431 | robj->gobj = gobj; |
445 | // INIT_LIST_HEAD(&robj->list); |
432 | // INIT_LIST_HEAD(&robj->list); |
446 | 433 | ||
447 | flags = radeon_object_flags_from_domain(domain); |
434 | flags = radeon_object_flags_from_domain(domain); |
448 | // r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, |
435 | // r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, |
449 | // 0, 0, false, NULL, size, |
436 | // 0, 0, false, NULL, size, |
450 | // &radeon_ttm_object_object_destroy); |
437 | // &radeon_ttm_object_object_destroy); |
451 | if (unlikely(r != 0)) { |
438 | if (unlikely(r != 0)) { |
452 | /* ttm call radeon_ttm_object_object_destroy if error happen */ |
439 | /* ttm call radeon_ttm_object_object_destroy if error happen */ |
453 | DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", |
440 | DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", |
454 | size, flags, 0); |
441 | size, flags, 0); |
455 | return r; |
442 | return r; |
456 | } |
443 | } |
457 | *robj_ptr = robj; |
444 | *robj_ptr = robj; |
458 | // if (gobj) { |
445 | // if (gobj) { |
459 | // list_add_tail(&robj->list, &rdev->gem.objects); |
446 | // list_add_tail(&robj->list, &rdev->gem.objects); |
460 | // } |
447 | // } |
461 | return 0; |
448 | return 0; |
462 | } |
449 | } |
463 | 450 | ||
464 | int radeon_object_kmap(struct radeon_object *robj, void **ptr) |
451 | int radeon_object_kmap(struct radeon_object *robj, void **ptr) |
465 | { |
452 | { |
466 | int r; |
453 | int r; |
467 | 454 | ||
468 | // spin_lock(&robj->tobj.lock); |
455 | // spin_lock(&robj->tobj.lock); |
469 | if (robj->kptr) { |
456 | if (robj->kptr) { |
470 | if (ptr) { |
457 | if (ptr) { |
471 | *ptr = robj->kptr; |
458 | *ptr = robj->kptr; |
472 | } |
459 | } |
473 | // spin_unlock(&robj->tobj.lock); |
460 | // spin_unlock(&robj->tobj.lock); |
474 | return 0; |
461 | return 0; |
475 | } |
462 | } |
476 | // spin_unlock(&robj->tobj.lock); |
463 | // spin_unlock(&robj->tobj.lock); |
477 | r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap); |
464 | r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap); |
478 | if (r) { |
465 | if (r) { |
479 | return r; |
466 | return r; |
480 | } |
467 | } |
481 | // spin_lock(&robj->tobj.lock); |
468 | // spin_lock(&robj->tobj.lock); |
482 | robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem); |
469 | robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem); |
483 | // spin_unlock(&robj->tobj.lock); |
470 | // spin_unlock(&robj->tobj.lock); |
484 | if (ptr) { |
471 | if (ptr) { |
485 | *ptr = robj->kptr; |
472 | *ptr = robj->kptr; |
486 | } |
473 | } |
487 | return 0; |
474 | return 0; |
488 | } |
475 | } |
489 | 476 | ||
490 | void radeon_object_kunmap(struct radeon_object *robj) |
477 | void radeon_object_kunmap(struct radeon_object *robj) |
491 | { |
478 | { |
492 | // spin_lock(&robj->tobj.lock); |
479 | // spin_lock(&robj->tobj.lock); |
493 | if (robj->kptr == NULL) { |
480 | if (robj->kptr == NULL) { |
494 | // spin_unlock(&robj->tobj.lock); |
481 | // spin_unlock(&robj->tobj.lock); |
495 | return; |
482 | return; |
496 | } |
483 | } |
497 | robj->kptr = NULL; |
484 | robj->kptr = NULL; |
498 | // spin_unlock(&robj->tobj.lock); |
485 | // spin_unlock(&robj->tobj.lock); |
499 | ttm_bo_kunmap(&robj->kmap); |
486 | ttm_bo_kunmap(&robj->kmap); |
500 | } |
487 | } |
501 | 488 | ||
502 | void radeon_object_unref(struct radeon_object **robj) |
489 | void radeon_object_unref(struct radeon_object **robj) |
503 | { |
490 | { |
504 | struct ttm_buffer_object *tobj; |
491 | struct ttm_buffer_object *tobj; |
505 | 492 | ||
506 | if ((*robj) == NULL) { |
493 | if ((*robj) == NULL) { |
507 | return; |
494 | return; |
508 | } |
495 | } |
509 | tobj = &((*robj)->tobj); |
496 | tobj = &((*robj)->tobj); |
510 | ttm_bo_unref(&tobj); |
497 | ttm_bo_unref(&tobj); |
511 | if (tobj == NULL) { |
498 | if (tobj == NULL) { |
512 | *robj = NULL; |
499 | *robj = NULL; |
513 | } |
500 | } |
514 | } |
501 | } |
515 | 502 | ||
516 | int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset) |
503 | int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset) |
517 | { |
504 | { |
518 | *offset = robj->tobj.addr_space_offset; |
505 | *offset = robj->tobj.addr_space_offset; |
519 | return 0; |
506 | return 0; |
520 | } |
507 | } |
521 | 508 | ||
522 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, |
509 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, |
523 | uint64_t *gpu_addr) |
510 | uint64_t *gpu_addr) |
524 | { |
511 | { |
525 | uint32_t flags; |
512 | uint32_t flags; |
526 | uint32_t tmp; |
513 | uint32_t tmp; |
527 | int r; |
514 | int r; |
528 | 515 | ||
529 | flags = radeon_object_flags_from_domain(domain); |
516 | flags = radeon_object_flags_from_domain(domain); |
530 | // spin_lock(&robj->tobj.lock); |
517 | // spin_lock(&robj->tobj.lock); |
531 | if (robj->pin_count) { |
518 | if (robj->pin_count) { |
532 | robj->pin_count++; |
519 | robj->pin_count++; |
533 | if (gpu_addr != NULL) { |
520 | if (gpu_addr != NULL) { |
534 | *gpu_addr = robj->gpu_addr; |
521 | *gpu_addr = robj->gpu_addr; |
535 | } |
522 | } |
536 | // spin_unlock(&robj->tobj.lock); |
523 | // spin_unlock(&robj->tobj.lock); |
537 | return 0; |
524 | return 0; |
538 | } |
525 | } |
539 | // spin_unlock(&robj->tobj.lock); |
526 | // spin_unlock(&robj->tobj.lock); |
540 | r = radeon_object_reserve(robj, false); |
527 | r = radeon_object_reserve(robj, false); |
541 | if (unlikely(r != 0)) { |
528 | if (unlikely(r != 0)) { |
542 | DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); |
529 | DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); |
543 | return r; |
530 | return r; |
544 | } |
531 | } |
545 | tmp = robj->tobj.mem.placement; |
532 | tmp = robj->tobj.mem.placement; |
546 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); |
533 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); |
547 | robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; |
534 | robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; |
548 | r = ttm_buffer_object_validate(&robj->tobj, |
535 | r = ttm_buffer_object_validate(&robj->tobj, |
549 | robj->tobj.proposed_placement, |
536 | robj->tobj.proposed_placement, |
550 | false, false); |
537 | false, false); |
551 | radeon_object_gpu_addr(robj); |
538 | radeon_object_gpu_addr(robj); |
552 | if (gpu_addr != NULL) { |
539 | if (gpu_addr != NULL) { |
553 | *gpu_addr = robj->gpu_addr; |
540 | *gpu_addr = robj->gpu_addr; |
554 | } |
541 | } |
555 | robj->pin_count = 1; |
542 | robj->pin_count = 1; |
556 | if (unlikely(r != 0)) { |
543 | if (unlikely(r != 0)) { |
557 | DRM_ERROR("radeon: failed to pin object.\n"); |
544 | DRM_ERROR("radeon: failed to pin object.\n"); |
558 | } |
545 | } |
559 | radeon_object_unreserve(robj); |
546 | radeon_object_unreserve(robj); |
560 | return r; |
547 | return r; |
561 | } |
548 | } |
562 | 549 | ||
563 | void radeon_object_unpin(struct radeon_object *robj) |
550 | void radeon_object_unpin(struct radeon_object *robj) |
564 | { |
551 | { |
565 | uint32_t flags; |
552 | uint32_t flags; |
566 | int r; |
553 | int r; |
567 | 554 | ||
568 | // spin_lock(&robj->tobj.lock); |
555 | // spin_lock(&robj->tobj.lock); |
569 | if (!robj->pin_count) { |
556 | if (!robj->pin_count) { |
570 | // spin_unlock(&robj->tobj.lock); |
557 | // spin_unlock(&robj->tobj.lock); |
571 | printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); |
558 | printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); |
572 | return; |
559 | return; |
573 | } |
560 | } |
574 | robj->pin_count--; |
561 | robj->pin_count--; |
575 | if (robj->pin_count) { |
562 | if (robj->pin_count) { |
576 | // spin_unlock(&robj->tobj.lock); |
563 | // spin_unlock(&robj->tobj.lock); |
577 | return; |
564 | return; |
578 | } |
565 | } |
579 | // spin_unlock(&robj->tobj.lock); |
566 | // spin_unlock(&robj->tobj.lock); |
580 | r = radeon_object_reserve(robj, false); |
567 | r = radeon_object_reserve(robj, false); |
581 | if (unlikely(r != 0)) { |
568 | if (unlikely(r != 0)) { |
582 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); |
569 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); |
583 | return; |
570 | return; |
584 | } |
571 | } |
585 | flags = robj->tobj.mem.placement; |
572 | flags = robj->tobj.mem.placement; |
586 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; |
573 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; |
587 | r = ttm_buffer_object_validate(&robj->tobj, |
574 | r = ttm_buffer_object_validate(&robj->tobj, |
588 | robj->tobj.proposed_placement, |
575 | robj->tobj.proposed_placement, |
589 | false, false); |
576 | false, false); |
590 | if (unlikely(r != 0)) { |
577 | if (unlikely(r != 0)) { |
591 | DRM_ERROR("radeon: failed to unpin buffer.\n"); |
578 | DRM_ERROR("radeon: failed to unpin buffer.\n"); |
592 | } |
579 | } |
593 | radeon_object_unreserve(robj); |
580 | radeon_object_unreserve(robj); |
594 | } |
581 | } |
595 | 582 | ||
596 | int radeon_object_wait(struct radeon_object *robj) |
583 | int radeon_object_wait(struct radeon_object *robj) |
597 | { |
584 | { |
598 | int r = 0; |
585 | int r = 0; |
599 | 586 | ||
600 | /* FIXME: should use block reservation instead */ |
587 | /* FIXME: should use block reservation instead */ |
601 | r = radeon_object_reserve(robj, true); |
588 | r = radeon_object_reserve(robj, true); |
602 | if (unlikely(r != 0)) { |
589 | if (unlikely(r != 0)) { |
603 | DRM_ERROR("radeon: failed to reserve object for waiting.\n"); |
590 | DRM_ERROR("radeon: failed to reserve object for waiting.\n"); |
604 | return r; |
591 | return r; |
605 | } |
592 | } |
606 | // spin_lock(&robj->tobj.lock); |
593 | // spin_lock(&robj->tobj.lock); |
607 | if (robj->tobj.sync_obj) { |
594 | if (robj->tobj.sync_obj) { |
608 | r = ttm_bo_wait(&robj->tobj, true, false, false); |
595 | r = ttm_bo_wait(&robj->tobj, true, false, false); |
609 | } |
596 | } |
610 | // spin_unlock(&robj->tobj.lock); |
597 | // spin_unlock(&robj->tobj.lock); |
611 | radeon_object_unreserve(robj); |
598 | radeon_object_unreserve(robj); |
612 | return r; |
599 | return r; |
613 | } |
600 | } |
614 | 601 | ||
615 | int radeon_object_evict_vram(struct radeon_device *rdev) |
602 | int radeon_object_evict_vram(struct radeon_device *rdev) |
616 | { |
603 | { |
617 | if (rdev->flags & RADEON_IS_IGP) { |
604 | if (rdev->flags & RADEON_IS_IGP) { |
618 | /* Useless to evict on IGP chips */ |
605 | /* Useless to evict on IGP chips */ |
619 | return 0; |
606 | return 0; |
620 | } |
607 | } |
621 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
608 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
622 | } |
609 | } |
623 | 610 | ||
624 | void radeon_object_force_delete(struct radeon_device *rdev) |
611 | void radeon_object_force_delete(struct radeon_device *rdev) |
625 | { |
612 | { |
626 | struct radeon_object *robj, *n; |
613 | struct radeon_object *robj, *n; |
627 | struct drm_gem_object *gobj; |
614 | struct drm_gem_object *gobj; |
628 | 615 | ||
629 | if (list_empty(&rdev->gem.objects)) { |
616 | if (list_empty(&rdev->gem.objects)) { |
630 | return; |
617 | return; |
631 | } |
618 | } |
632 | DRM_ERROR("Userspace still has active objects !\n"); |
619 | DRM_ERROR("Userspace still has active objects !\n"); |
633 | list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { |
620 | list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { |
634 | mutex_lock(&rdev->ddev->struct_mutex); |
621 | mutex_lock(&rdev->ddev->struct_mutex); |
635 | gobj = robj->gobj; |
622 | gobj = robj->gobj; |
636 | DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", |
623 | DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", |
637 | gobj, robj, (unsigned long)gobj->size, |
624 | gobj, robj, (unsigned long)gobj->size, |
638 | *((unsigned long *)&gobj->refcount)); |
625 | *((unsigned long *)&gobj->refcount)); |
639 | list_del_init(&robj->list); |
626 | list_del_init(&robj->list); |
640 | radeon_object_unref(&robj); |
627 | radeon_object_unref(&robj); |
641 | gobj->driver_private = NULL; |
628 | gobj->driver_private = NULL; |
642 | drm_gem_object_unreference(gobj); |
629 | drm_gem_object_unreference(gobj); |
643 | mutex_unlock(&rdev->ddev->struct_mutex); |
630 | mutex_unlock(&rdev->ddev->struct_mutex); |
644 | } |
631 | } |
645 | } |
632 | } |
646 | 633 | ||
647 | void radeon_object_fini(struct radeon_device *rdev) |
634 | void radeon_object_fini(struct radeon_device *rdev) |
648 | { |
635 | { |
649 | radeon_ttm_fini(rdev); |
636 | radeon_ttm_fini(rdev); |
650 | } |
637 | } |
651 | 638 | ||
652 | void radeon_object_list_add_object(struct radeon_object_list *lobj, |
639 | void radeon_object_list_add_object(struct radeon_object_list *lobj, |
653 | struct list_head *head) |
640 | struct list_head *head) |
654 | { |
641 | { |
655 | if (lobj->wdomain) { |
642 | if (lobj->wdomain) { |
656 | list_add(&lobj->list, head); |
643 | list_add(&lobj->list, head); |
657 | } else { |
644 | } else { |
658 | list_add_tail(&lobj->list, head); |
645 | list_add_tail(&lobj->list, head); |
659 | } |
646 | } |
660 | } |
647 | } |
661 | 648 | ||
662 | int radeon_object_list_reserve(struct list_head *head) |
649 | int radeon_object_list_reserve(struct list_head *head) |
663 | { |
650 | { |
664 | struct radeon_object_list *lobj; |
651 | struct radeon_object_list *lobj; |
665 | struct list_head *i; |
652 | struct list_head *i; |
666 | int r; |
653 | int r; |
667 | 654 | ||
668 | list_for_each(i, head) { |
655 | list_for_each(i, head) { |
669 | lobj = list_entry(i, struct radeon_object_list, list); |
656 | lobj = list_entry(i, struct radeon_object_list, list); |
670 | if (!lobj->robj->pin_count) { |
657 | if (!lobj->robj->pin_count) { |
671 | r = radeon_object_reserve(lobj->robj, true); |
658 | r = radeon_object_reserve(lobj->robj, true); |
672 | if (unlikely(r != 0)) { |
659 | if (unlikely(r != 0)) { |
673 | DRM_ERROR("radeon: failed to reserve object.\n"); |
660 | DRM_ERROR("radeon: failed to reserve object.\n"); |
674 | return r; |
661 | return r; |
675 | } |
662 | } |
676 | } else { |
663 | } else { |
677 | } |
664 | } |
678 | } |
665 | } |
679 | return 0; |
666 | return 0; |
680 | } |
667 | } |
681 | 668 | ||
682 | void radeon_object_list_unreserve(struct list_head *head) |
669 | void radeon_object_list_unreserve(struct list_head *head) |
683 | { |
670 | { |
684 | struct radeon_object_list *lobj; |
671 | struct radeon_object_list *lobj; |
685 | struct list_head *i; |
672 | struct list_head *i; |
686 | 673 | ||
687 | list_for_each(i, head) { |
674 | list_for_each(i, head) { |
688 | lobj = list_entry(i, struct radeon_object_list, list); |
675 | lobj = list_entry(i, struct radeon_object_list, list); |
689 | if (!lobj->robj->pin_count) { |
676 | if (!lobj->robj->pin_count) { |
690 | radeon_object_unreserve(lobj->robj); |
677 | radeon_object_unreserve(lobj->robj); |
691 | } else { |
678 | } else { |
692 | } |
679 | } |
693 | } |
680 | } |
694 | } |
681 | } |
695 | 682 | ||
696 | int radeon_object_list_validate(struct list_head *head, void *fence) |
683 | int radeon_object_list_validate(struct list_head *head, void *fence) |
697 | { |
684 | { |
698 | struct radeon_object_list *lobj; |
685 | struct radeon_object_list *lobj; |
699 | struct radeon_object *robj; |
686 | struct radeon_object *robj; |
700 | struct radeon_fence *old_fence = NULL; |
687 | struct radeon_fence *old_fence = NULL; |
701 | struct list_head *i; |
688 | struct list_head *i; |
702 | uint32_t flags; |
689 | uint32_t flags; |
703 | int r; |
690 | int r; |
704 | 691 | ||
705 | r = radeon_object_list_reserve(head); |
692 | r = radeon_object_list_reserve(head); |
706 | if (unlikely(r != 0)) { |
693 | if (unlikely(r != 0)) { |
707 | radeon_object_list_unreserve(head); |
694 | radeon_object_list_unreserve(head); |
708 | return r; |
695 | return r; |
709 | } |
696 | } |
710 | list_for_each(i, head) { |
697 | list_for_each(i, head) { |
711 | lobj = list_entry(i, struct radeon_object_list, list); |
698 | lobj = list_entry(i, struct radeon_object_list, list); |
712 | robj = lobj->robj; |
699 | robj = lobj->robj; |
713 | if (lobj->wdomain) { |
700 | if (lobj->wdomain) { |
714 | flags = radeon_object_flags_from_domain(lobj->wdomain); |
701 | flags = radeon_object_flags_from_domain(lobj->wdomain); |
715 | flags |= TTM_PL_FLAG_TT; |
702 | flags |= TTM_PL_FLAG_TT; |
716 | } else { |
703 | } else { |
717 | flags = radeon_object_flags_from_domain(lobj->rdomain); |
704 | flags = radeon_object_flags_from_domain(lobj->rdomain); |
718 | flags |= TTM_PL_FLAG_TT; |
705 | flags |= TTM_PL_FLAG_TT; |
719 | flags |= TTM_PL_FLAG_VRAM; |
706 | flags |= TTM_PL_FLAG_VRAM; |
720 | } |
707 | } |
721 | if (!robj->pin_count) { |
708 | if (!robj->pin_count) { |
722 | robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; |
709 | robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; |
723 | r = ttm_buffer_object_validate(&robj->tobj, |
710 | r = ttm_buffer_object_validate(&robj->tobj, |
724 | robj->tobj.proposed_placement, |
711 | robj->tobj.proposed_placement, |
725 | true, false); |
712 | true, false); |
726 | if (unlikely(r)) { |
713 | if (unlikely(r)) { |
727 | radeon_object_list_unreserve(head); |
714 | radeon_object_list_unreserve(head); |
728 | DRM_ERROR("radeon: failed to validate.\n"); |
715 | DRM_ERROR("radeon: failed to validate.\n"); |
729 | return r; |
716 | return r; |
730 | } |
717 | } |
731 | radeon_object_gpu_addr(robj); |
718 | radeon_object_gpu_addr(robj); |
732 | } |
719 | } |
733 | lobj->gpu_offset = robj->gpu_addr; |
720 | lobj->gpu_offset = robj->gpu_addr; |
734 | if (fence) { |
721 | if (fence) { |
735 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; |
722 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; |
736 | robj->tobj.sync_obj = radeon_fence_ref(fence); |
723 | robj->tobj.sync_obj = radeon_fence_ref(fence); |
737 | robj->tobj.sync_obj_arg = NULL; |
724 | robj->tobj.sync_obj_arg = NULL; |
738 | } |
725 | } |
739 | if (old_fence) { |
726 | if (old_fence) { |
740 | radeon_fence_unref(&old_fence); |
727 | radeon_fence_unref(&old_fence); |
741 | } |
728 | } |
742 | } |
729 | } |
743 | return 0; |
730 | return 0; |
744 | } |
731 | } |
745 | 732 | ||
746 | void radeon_object_list_unvalidate(struct list_head *head) |
733 | void radeon_object_list_unvalidate(struct list_head *head) |
747 | { |
734 | { |
748 | struct radeon_object_list *lobj; |
735 | struct radeon_object_list *lobj; |
749 | struct radeon_fence *old_fence = NULL; |
736 | struct radeon_fence *old_fence = NULL; |
750 | struct list_head *i; |
737 | struct list_head *i; |
751 | 738 | ||
752 | list_for_each(i, head) { |
739 | list_for_each(i, head) { |
753 | lobj = list_entry(i, struct radeon_object_list, list); |
740 | lobj = list_entry(i, struct radeon_object_list, list); |
754 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; |
741 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; |
755 | lobj->robj->tobj.sync_obj = NULL; |
742 | lobj->robj->tobj.sync_obj = NULL; |
756 | if (old_fence) { |
743 | if (old_fence) { |
757 | radeon_fence_unref(&old_fence); |
744 | radeon_fence_unref(&old_fence); |
758 | } |
745 | } |
759 | } |
746 | } |
760 | radeon_object_list_unreserve(head); |
747 | radeon_object_list_unreserve(head); |
761 | } |
748 | } |
762 | 749 | ||
763 | void radeon_object_list_clean(struct list_head *head) |
750 | void radeon_object_list_clean(struct list_head *head) |
764 | { |
751 | { |
765 | radeon_object_list_unreserve(head); |
752 | radeon_object_list_unreserve(head); |
766 | } |
753 | } |
767 | 754 | ||
768 | int radeon_object_fbdev_mmap(struct radeon_object *robj, |
755 | int radeon_object_fbdev_mmap(struct radeon_object *robj, |
769 | struct vm_area_struct *vma) |
756 | struct vm_area_struct *vma) |
770 | { |
757 | { |
771 | return ttm_fbdev_mmap(vma, &robj->tobj); |
758 | return ttm_fbdev_mmap(vma, &robj->tobj); |
772 | } |
759 | } |
773 | 760 | ||
774 | #endif |
761 | #endif |
775 | 762 | ||
776 | unsigned long radeon_object_size(struct radeon_object *robj) |
763 | unsigned long radeon_object_size(struct radeon_object *robj) |
777 | { |
764 | { |
778 | return robj->tobj.num_pages << PAGE_SHIFT; |
765 | return robj->tobj.num_pages << PAGE_SHIFT; |
779 | }><>><>><>><>><>><>><>><>><>><> |
766 | }><>><>><>><>><>><>><>><>><>><> |