Rev 2997 | Rev 5271 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2997 | Serge | 1 | /* |
2 | * Copyright 2011 Red Hat Inc. |
||
3 | * All Rights Reserved. |
||
4 | * |
||
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
6 | * copy of this software and associated documentation files (the |
||
7 | * "Software"), to deal in the Software without restriction, including |
||
8 | * without limitation the rights to use, copy, modify, merge, publish, |
||
9 | * distribute, sub license, and/or sell copies of the Software, and to |
||
10 | * permit persons to whom the Software is furnished to do so, subject to |
||
11 | * the following conditions: |
||
12 | * |
||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
20 | * |
||
21 | * The above copyright notice and this permission notice (including the |
||
22 | * next paragraph) shall be included in all copies or substantial portions |
||
23 | * of the Software. |
||
24 | * |
||
25 | */ |
||
26 | /* |
||
27 | * Authors: |
||
28 | * Jerome Glisse |
||
29 | */ |
||
30 | /* Algorithm: |
||
31 | * |
||
32 | * We store the last allocated bo in "hole", we always try to allocate |
||
33 | * after the last allocated bo. Principle is that in a linear GPU ring |
||
34 | * progression was is after last is the oldest bo we allocated and thus |
||
35 | * the first one that should no longer be in use by the GPU. |
||
36 | * |
||
37 | * If it's not the case we skip over the bo after last to the closest |
||
38 | * done bo if such one exist. If none exist and we are not asked to |
||
39 | * block we report failure to allocate. |
||
40 | * |
||
41 | * If we are asked to block we wait on all the oldest fence of all |
||
42 | * rings. We just wait for any of those fence to complete. |
||
43 | */ |
||
44 | #include |
||
45 | #include "radeon.h" |
||
46 | |||
47 | static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo); |
||
48 | static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager); |
||
49 | |||
50 | int radeon_sa_bo_manager_init(struct radeon_device *rdev, |
||
51 | struct radeon_sa_manager *sa_manager, |
||
52 | unsigned size, u32 domain) |
||
53 | { |
||
54 | int i, r; |
||
55 | |||
56 | init_waitqueue_head(&sa_manager->wq); |
||
57 | sa_manager->bo = NULL; |
||
58 | sa_manager->size = size; |
||
59 | sa_manager->domain = domain; |
||
60 | sa_manager->hole = &sa_manager->olist; |
||
61 | INIT_LIST_HEAD(&sa_manager->olist); |
||
62 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
||
63 | INIT_LIST_HEAD(&sa_manager->flist[i]); |
||
64 | } |
||
65 | |||
66 | r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, |
||
3120 | serge | 67 | domain, NULL, &sa_manager->bo); |
2997 | Serge | 68 | if (r) { |
69 | dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); |
||
70 | return r; |
||
71 | } |
||
72 | |||
73 | return r; |
||
74 | } |
||
75 | |||
76 | void radeon_sa_bo_manager_fini(struct radeon_device *rdev, |
||
77 | struct radeon_sa_manager *sa_manager) |
||
78 | { |
||
79 | struct radeon_sa_bo *sa_bo, *tmp; |
||
80 | |||
81 | if (!list_empty(&sa_manager->olist)) { |
||
82 | sa_manager->hole = &sa_manager->olist, |
||
83 | radeon_sa_bo_try_free(sa_manager); |
||
84 | if (!list_empty(&sa_manager->olist)) { |
||
85 | dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n"); |
||
86 | } |
||
87 | } |
||
88 | list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) { |
||
89 | radeon_sa_bo_remove_locked(sa_bo); |
||
90 | } |
||
91 | radeon_bo_unref(&sa_manager->bo); |
||
92 | sa_manager->size = 0; |
||
93 | } |
||
94 | |||
95 | int radeon_sa_bo_manager_start(struct radeon_device *rdev, |
||
96 | struct radeon_sa_manager *sa_manager) |
||
97 | { |
||
98 | int r; |
||
99 | |||
100 | if (sa_manager->bo == NULL) { |
||
101 | dev_err(rdev->dev, "no bo for sa manager\n"); |
||
102 | return -EINVAL; |
||
103 | } |
||
104 | |||
105 | /* map the buffer */ |
||
106 | r = radeon_bo_reserve(sa_manager->bo, false); |
||
107 | if (r) { |
||
108 | dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r); |
||
109 | return r; |
||
110 | } |
||
111 | r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr); |
||
112 | if (r) { |
||
113 | radeon_bo_unreserve(sa_manager->bo); |
||
114 | dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r); |
||
115 | return r; |
||
116 | } |
||
117 | r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); |
||
118 | radeon_bo_unreserve(sa_manager->bo); |
||
119 | return r; |
||
120 | } |
||
121 | |||
122 | int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, |
||
123 | struct radeon_sa_manager *sa_manager) |
||
124 | { |
||
125 | int r; |
||
126 | |||
127 | if (sa_manager->bo == NULL) { |
||
128 | dev_err(rdev->dev, "no bo for sa manager\n"); |
||
129 | return -EINVAL; |
||
130 | } |
||
131 | |||
132 | r = radeon_bo_reserve(sa_manager->bo, false); |
||
133 | if (!r) { |
||
134 | radeon_bo_kunmap(sa_manager->bo); |
||
135 | radeon_bo_unpin(sa_manager->bo); |
||
136 | radeon_bo_unreserve(sa_manager->bo); |
||
137 | } |
||
138 | return r; |
||
139 | } |
||
140 | |||
141 | static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo) |
||
142 | { |
||
143 | struct radeon_sa_manager *sa_manager = sa_bo->manager; |
||
144 | if (sa_manager->hole == &sa_bo->olist) { |
||
145 | sa_manager->hole = sa_bo->olist.prev; |
||
146 | } |
||
147 | list_del_init(&sa_bo->olist); |
||
148 | list_del_init(&sa_bo->flist); |
||
149 | radeon_fence_unref(&sa_bo->fence); |
||
150 | kfree(sa_bo); |
||
151 | } |
||
152 | |||
153 | static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager) |
||
154 | { |
||
155 | struct radeon_sa_bo *sa_bo, *tmp; |
||
156 | |||
157 | if (sa_manager->hole->next == &sa_manager->olist) |
||
158 | return; |
||
159 | |||
160 | sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist); |
||
161 | list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { |
||
162 | if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) { |
||
163 | return; |
||
164 | } |
||
165 | radeon_sa_bo_remove_locked(sa_bo); |
||
166 | } |
||
167 | } |
||
168 | |||
169 | static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager) |
||
170 | { |
||
171 | struct list_head *hole = sa_manager->hole; |
||
172 | |||
173 | if (hole != &sa_manager->olist) { |
||
174 | return list_entry(hole, struct radeon_sa_bo, olist)->eoffset; |
||
175 | } |
||
176 | return 0; |
||
177 | } |
||
178 | |||
179 | static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager) |
||
180 | { |
||
181 | struct list_head *hole = sa_manager->hole; |
||
182 | |||
183 | if (hole->next != &sa_manager->olist) { |
||
184 | return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset; |
||
185 | } |
||
186 | return sa_manager->size; |
||
187 | } |
||
188 | |||
189 | static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager, |
||
190 | struct radeon_sa_bo *sa_bo, |
||
191 | unsigned size, unsigned align) |
||
192 | { |
||
193 | unsigned soffset, eoffset, wasted; |
||
194 | |||
195 | soffset = radeon_sa_bo_hole_soffset(sa_manager); |
||
196 | eoffset = radeon_sa_bo_hole_eoffset(sa_manager); |
||
197 | wasted = (align - (soffset % align)) % align; |
||
198 | |||
199 | if ((eoffset - soffset) >= (size + wasted)) { |
||
200 | soffset += wasted; |
||
201 | |||
202 | sa_bo->manager = sa_manager; |
||
203 | sa_bo->soffset = soffset; |
||
204 | sa_bo->eoffset = soffset + size; |
||
205 | list_add(&sa_bo->olist, sa_manager->hole); |
||
206 | INIT_LIST_HEAD(&sa_bo->flist); |
||
207 | sa_manager->hole = &sa_bo->olist; |
||
208 | return true; |
||
209 | } |
||
210 | return false; |
||
211 | } |
||
212 | |||
213 | /** |
||
214 | * radeon_sa_event - Check if we can stop waiting |
||
215 | * |
||
216 | * @sa_manager: pointer to the sa_manager |
||
217 | * @size: number of bytes we want to allocate |
||
218 | * @align: alignment we need to match |
||
219 | * |
||
220 | * Check if either there is a fence we can wait for or |
||
221 | * enough free memory to satisfy the allocation directly |
||
222 | */ |
||
223 | static bool radeon_sa_event(struct radeon_sa_manager *sa_manager, |
||
224 | unsigned size, unsigned align) |
||
225 | { |
||
226 | unsigned soffset, eoffset, wasted; |
||
227 | int i; |
||
228 | |||
229 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
||
230 | if (!list_empty(&sa_manager->flist[i])) { |
||
231 | return true; |
||
232 | } |
||
233 | } |
||
234 | |||
235 | soffset = radeon_sa_bo_hole_soffset(sa_manager); |
||
236 | eoffset = radeon_sa_bo_hole_eoffset(sa_manager); |
||
237 | wasted = (align - (soffset % align)) % align; |
||
238 | |||
239 | if ((eoffset - soffset) >= (size + wasted)) { |
||
240 | return true; |
||
241 | } |
||
242 | |||
243 | return false; |
||
244 | } |
||
245 | |||
246 | static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager, |
||
247 | struct radeon_fence **fences, |
||
248 | unsigned *tries) |
||
249 | { |
||
250 | struct radeon_sa_bo *best_bo = NULL; |
||
251 | unsigned i, soffset, best, tmp; |
||
252 | |||
253 | /* if hole points to the end of the buffer */ |
||
254 | if (sa_manager->hole->next == &sa_manager->olist) { |
||
255 | /* try again with its beginning */ |
||
256 | sa_manager->hole = &sa_manager->olist; |
||
257 | return true; |
||
258 | } |
||
259 | |||
260 | soffset = radeon_sa_bo_hole_soffset(sa_manager); |
||
261 | /* to handle wrap around we add sa_manager->size */ |
||
262 | best = sa_manager->size * 2; |
||
263 | /* go over all fence list and try to find the closest sa_bo |
||
264 | * of the current last |
||
265 | */ |
||
266 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
||
267 | struct radeon_sa_bo *sa_bo; |
||
268 | |||
269 | if (list_empty(&sa_manager->flist[i])) { |
||
270 | continue; |
||
271 | } |
||
272 | |||
273 | sa_bo = list_first_entry(&sa_manager->flist[i], |
||
274 | struct radeon_sa_bo, flist); |
||
275 | |||
276 | if (!radeon_fence_signaled(sa_bo->fence)) { |
||
277 | fences[i] = sa_bo->fence; |
||
278 | continue; |
||
279 | } |
||
280 | |||
281 | /* limit the number of tries each ring gets */ |
||
282 | if (tries[i] > 2) { |
||
283 | continue; |
||
284 | } |
||
285 | |||
286 | tmp = sa_bo->soffset; |
||
287 | if (tmp < soffset) { |
||
288 | /* wrap around, pretend it's after */ |
||
289 | tmp += sa_manager->size; |
||
290 | } |
||
291 | tmp -= soffset; |
||
292 | if (tmp < best) { |
||
293 | /* this sa bo is the closest one */ |
||
294 | best = tmp; |
||
295 | best_bo = sa_bo; |
||
296 | } |
||
297 | } |
||
298 | |||
299 | if (best_bo) { |
||
300 | ++tries[best_bo->fence->ring]; |
||
301 | sa_manager->hole = best_bo->olist.prev; |
||
302 | |||
303 | /* we knew that this one is signaled, |
||
304 | so it's save to remote it */ |
||
305 | radeon_sa_bo_remove_locked(best_bo); |
||
306 | return true; |
||
307 | } |
||
308 | return false; |
||
309 | } |
||
310 | |||
311 | int radeon_sa_bo_new(struct radeon_device *rdev, |
||
312 | struct radeon_sa_manager *sa_manager, |
||
313 | struct radeon_sa_bo **sa_bo, |
||
314 | unsigned size, unsigned align, bool block) |
||
315 | { |
||
316 | struct radeon_fence *fences[RADEON_NUM_RINGS]; |
||
317 | unsigned tries[RADEON_NUM_RINGS]; |
||
318 | int i, r; |
||
319 | |||
320 | BUG_ON(align > RADEON_GPU_PAGE_SIZE); |
||
321 | BUG_ON(size > sa_manager->size); |
||
322 | |||
323 | *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL); |
||
324 | if ((*sa_bo) == NULL) { |
||
325 | return -ENOMEM; |
||
326 | } |
||
327 | (*sa_bo)->manager = sa_manager; |
||
328 | (*sa_bo)->fence = NULL; |
||
329 | INIT_LIST_HEAD(&(*sa_bo)->olist); |
||
330 | INIT_LIST_HEAD(&(*sa_bo)->flist); |
||
331 | |||
332 | spin_lock(&sa_manager->wq.lock); |
||
333 | do { |
||
334 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
||
335 | fences[i] = NULL; |
||
336 | tries[i] = 0; |
||
337 | } |
||
338 | |||
339 | do { |
||
340 | radeon_sa_bo_try_free(sa_manager); |
||
341 | |||
342 | if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo, |
||
343 | size, align)) { |
||
344 | spin_unlock(&sa_manager->wq.lock); |
||
345 | return 0; |
||
346 | } |
||
347 | |||
348 | /* see if we can skip over some allocations */ |
||
349 | } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); |
||
350 | |||
351 | spin_unlock(&sa_manager->wq.lock); |
||
352 | r = radeon_fence_wait_any(rdev, fences, false); |
||
353 | spin_lock(&sa_manager->wq.lock); |
||
354 | /* if we have nothing to wait for block */ |
||
355 | if (r == -ENOENT && block) { |
||
356 | // r = wait_event_interruptible_locked( |
||
357 | // sa_manager->wq, |
||
358 | // radeon_sa_event(sa_manager, size, align) |
||
359 | // ); |
||
360 | |||
361 | } else if (r == -ENOENT) { |
||
362 | r = -ENOMEM; |
||
363 | } |
||
364 | |||
365 | } while (!r); |
||
366 | |||
367 | spin_unlock(&sa_manager->wq.lock); |
||
368 | kfree(*sa_bo); |
||
369 | *sa_bo = NULL; |
||
370 | return r; |
||
371 | } |
||
372 | |||
373 | void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, |
||
374 | struct radeon_fence *fence) |
||
375 | { |
||
376 | struct radeon_sa_manager *sa_manager; |
||
377 | |||
378 | if (sa_bo == NULL || *sa_bo == NULL) { |
||
379 | return; |
||
380 | } |
||
381 | |||
382 | sa_manager = (*sa_bo)->manager; |
||
383 | spin_lock(&sa_manager->wq.lock); |
||
384 | if (fence && !radeon_fence_signaled(fence)) { |
||
385 | (*sa_bo)->fence = radeon_fence_ref(fence); |
||
386 | list_add_tail(&(*sa_bo)->flist, |
||
387 | &sa_manager->flist[fence->ring]); |
||
388 | } else { |
||
389 | radeon_sa_bo_remove_locked(*sa_bo); |
||
390 | } |
||
391 | // wake_up_all_locked(&sa_manager->wq); |
||
392 | spin_unlock(&sa_manager->wq.lock); |
||
393 | *sa_bo = NULL; |
||
394 | } |
||
395 | |||
396 | #if defined(CONFIG_DEBUG_FS) |
||
397 | void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager, |
||
398 | struct seq_file *m) |
||
399 | { |
||
400 | struct radeon_sa_bo *i; |
||
401 | |||
402 | spin_lock(&sa_manager->wq.lock); |
||
403 | list_for_each_entry(i, &sa_manager->olist, olist) { |
||
404 | if (&i->olist == sa_manager->hole) { |
||
405 | seq_printf(m, ">"); |
||
406 | } else { |
||
407 | seq_printf(m, " "); |
||
408 | } |
||
409 | seq_printf(m, "[0x%08x 0x%08x] size %8d", |
||
410 | i->soffset, i->eoffset, i->eoffset - i->soffset); |
||
411 | if (i->fence) { |
||
412 | seq_printf(m, " protected by 0x%016llx on ring %d", |
||
413 | i->fence->seq, i->fence->ring); |
||
414 | } |
||
415 | seq_printf(m, "\n"); |
||
416 | } |
||
417 | spin_unlock(&sa_manager->wq.lock); |
||
418 | } |
||
419 | #endif>>>>>> |