Rev 3031 | Rev 3764 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3031 | Rev 3192 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
2 | * Copyright 2009 Jerome Glisse. |
3 | * All Rights Reserved. |
3 | * All Rights Reserved. |
4 | * |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
11 | * the following conditions: |
12 | * |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
23 | * of the Software. |
24 | * |
24 | * |
25 | */ |
25 | */ |
26 | /* |
26 | /* |
27 | * Authors: |
27 | * Authors: |
28 | * Jerome Glisse |
28 | * Jerome Glisse |
29 | * Dave Airlie |
29 | * Dave Airlie |
30 | */ |
30 | */ |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | //#include |
33 | #include |
34 | #include |
34 | #include |
35 | #include |
35 | #include |
36 | #include |
36 | #include |
37 | #include |
37 | #include |
38 | #include "radeon_reg.h" |
38 | #include "radeon_reg.h" |
39 | #include "radeon.h" |
39 | #include "radeon.h" |
40 | 40 | ||
41 | /* |
41 | /* |
42 | * Fences |
42 | * Fences |
43 | * Fences mark an event in the GPUs pipeline and are used |
43 | * Fences mark an event in the GPUs pipeline and are used |
44 | * for GPU/CPU synchronization. When the fence is written, |
44 | * for GPU/CPU synchronization. When the fence is written, |
45 | * it is expected that all buffers associated with that fence |
45 | * it is expected that all buffers associated with that fence |
46 | * are no longer in use by the associated ring on the GPU and |
46 | * are no longer in use by the associated ring on the GPU and |
47 | * that the the relevant GPU caches have been flushed. Whether |
47 | * that the the relevant GPU caches have been flushed. Whether |
48 | * we use a scratch register or memory location depends on the asic |
48 | * we use a scratch register or memory location depends on the asic |
49 | * and whether writeback is enabled. |
49 | * and whether writeback is enabled. |
50 | */ |
50 | */ |
51 | 51 | ||
52 | /** |
52 | /** |
53 | * radeon_fence_write - write a fence value |
53 | * radeon_fence_write - write a fence value |
54 | * |
54 | * |
55 | * @rdev: radeon_device pointer |
55 | * @rdev: radeon_device pointer |
56 | * @seq: sequence number to write |
56 | * @seq: sequence number to write |
57 | * @ring: ring index the fence is associated with |
57 | * @ring: ring index the fence is associated with |
58 | * |
58 | * |
59 | * Writes a fence value to memory or a scratch register (all asics). |
59 | * Writes a fence value to memory or a scratch register (all asics). |
60 | */ |
60 | */ |
61 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) |
61 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) |
62 | { |
62 | { |
63 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
63 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
64 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
64 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
65 | *drv->cpu_addr = cpu_to_le32(seq); |
65 | *drv->cpu_addr = cpu_to_le32(seq); |
66 | } else { |
66 | } else { |
67 | WREG32(drv->scratch_reg, seq); |
67 | WREG32(drv->scratch_reg, seq); |
68 | } |
68 | } |
69 | } |
69 | } |
70 | 70 | ||
71 | /** |
71 | /** |
72 | * radeon_fence_read - read a fence value |
72 | * radeon_fence_read - read a fence value |
73 | * |
73 | * |
74 | * @rdev: radeon_device pointer |
74 | * @rdev: radeon_device pointer |
75 | * @ring: ring index the fence is associated with |
75 | * @ring: ring index the fence is associated with |
76 | * |
76 | * |
77 | * Reads a fence value from memory or a scratch register (all asics). |
77 | * Reads a fence value from memory or a scratch register (all asics). |
78 | * Returns the value of the fence read from memory or register. |
78 | * Returns the value of the fence read from memory or register. |
79 | */ |
79 | */ |
80 | static u32 radeon_fence_read(struct radeon_device *rdev, int ring) |
80 | static u32 radeon_fence_read(struct radeon_device *rdev, int ring) |
81 | { |
81 | { |
82 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
82 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
83 | u32 seq = 0; |
83 | u32 seq = 0; |
84 | 84 | ||
85 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
85 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
86 | seq = le32_to_cpu(*drv->cpu_addr); |
86 | seq = le32_to_cpu(*drv->cpu_addr); |
87 | } else { |
87 | } else { |
88 | seq = RREG32(drv->scratch_reg); |
88 | seq = RREG32(drv->scratch_reg); |
89 | } |
89 | } |
90 | return seq; |
90 | return seq; |
91 | } |
91 | } |
92 | 92 | ||
93 | /** |
93 | /** |
94 | * radeon_fence_emit - emit a fence on the requested ring |
94 | * radeon_fence_emit - emit a fence on the requested ring |
95 | * |
95 | * |
96 | * @rdev: radeon_device pointer |
96 | * @rdev: radeon_device pointer |
97 | * @fence: radeon fence object |
97 | * @fence: radeon fence object |
98 | * @ring: ring index the fence is associated with |
98 | * @ring: ring index the fence is associated with |
99 | * |
99 | * |
100 | * Emits a fence command on the requested ring (all asics). |
100 | * Emits a fence command on the requested ring (all asics). |
101 | * Returns 0 on success, -ENOMEM on failure. |
101 | * Returns 0 on success, -ENOMEM on failure. |
102 | */ |
102 | */ |
103 | int radeon_fence_emit(struct radeon_device *rdev, |
103 | int radeon_fence_emit(struct radeon_device *rdev, |
104 | struct radeon_fence **fence, |
104 | struct radeon_fence **fence, |
105 | int ring) |
105 | int ring) |
106 | { |
106 | { |
107 | /* we are protected by the ring emission mutex */ |
107 | /* we are protected by the ring emission mutex */ |
108 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
108 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
109 | if ((*fence) == NULL) { |
109 | if ((*fence) == NULL) { |
110 | return -ENOMEM; |
110 | return -ENOMEM; |
111 | } |
111 | } |
112 | kref_init(&((*fence)->kref)); |
112 | kref_init(&((*fence)->kref)); |
113 | (*fence)->rdev = rdev; |
113 | (*fence)->rdev = rdev; |
114 | (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
114 | (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
115 | (*fence)->ring = ring; |
115 | (*fence)->ring = ring; |
116 | radeon_fence_ring_emit(rdev, ring, *fence); |
116 | radeon_fence_ring_emit(rdev, ring, *fence); |
117 | // trace_radeon_fence_emit(rdev->ddev, (*fence)->seq); |
117 | // trace_radeon_fence_emit(rdev->ddev, (*fence)->seq); |
118 | return 0; |
118 | return 0; |
119 | } |
119 | } |
120 | 120 | ||
121 | /** |
121 | /** |
122 | * radeon_fence_process - process a fence |
122 | * radeon_fence_process - process a fence |
123 | * |
123 | * |
124 | * @rdev: radeon_device pointer |
124 | * @rdev: radeon_device pointer |
125 | * @ring: ring index the fence is associated with |
125 | * @ring: ring index the fence is associated with |
126 | * |
126 | * |
127 | * Checks the current fence value and wakes the fence queue |
127 | * Checks the current fence value and wakes the fence queue |
128 | * if the sequence number has increased (all asics). |
128 | * if the sequence number has increased (all asics). |
129 | */ |
129 | */ |
130 | void radeon_fence_process(struct radeon_device *rdev, int ring) |
130 | void radeon_fence_process(struct radeon_device *rdev, int ring) |
131 | { |
131 | { |
132 | uint64_t seq, last_seq, last_emitted; |
132 | uint64_t seq, last_seq, last_emitted; |
133 | unsigned count_loop = 0; |
133 | unsigned count_loop = 0; |
134 | bool wake = false; |
134 | bool wake = false; |
135 | 135 | ||
136 | /* Note there is a scenario here for an infinite loop but it's |
136 | /* Note there is a scenario here for an infinite loop but it's |
137 | * very unlikely to happen. For it to happen, the current polling |
137 | * very unlikely to happen. For it to happen, the current polling |
138 | * process need to be interrupted by another process and another |
138 | * process need to be interrupted by another process and another |
139 | * process needs to update the last_seq btw the atomic read and |
139 | * process needs to update the last_seq btw the atomic read and |
140 | * xchg of the current process. |
140 | * xchg of the current process. |
141 | * |
141 | * |
142 | * More over for this to go in infinite loop there need to be |
142 | * More over for this to go in infinite loop there need to be |
143 | * continuously new fence signaled ie radeon_fence_read needs |
143 | * continuously new fence signaled ie radeon_fence_read needs |
144 | * to return a different value each time for both the currently |
144 | * to return a different value each time for both the currently |
145 | * polling process and the other process that xchg the last_seq |
145 | * polling process and the other process that xchg the last_seq |
146 | * btw atomic read and xchg of the current process. And the |
146 | * btw atomic read and xchg of the current process. And the |
147 | * value the other process set as last seq must be higher than |
147 | * value the other process set as last seq must be higher than |
148 | * the seq value we just read. Which means that current process |
148 | * the seq value we just read. Which means that current process |
149 | * need to be interrupted after radeon_fence_read and before |
149 | * need to be interrupted after radeon_fence_read and before |
150 | * atomic xchg. |
150 | * atomic xchg. |
151 | * |
151 | * |
152 | * To be even more safe we count the number of time we loop and |
152 | * To be even more safe we count the number of time we loop and |
153 | * we bail after 10 loop just accepting the fact that we might |
153 | * we bail after 10 loop just accepting the fact that we might |
154 | * have temporarly set the last_seq not to the true real last |
154 | * have temporarly set the last_seq not to the true real last |
155 | * seq but to an older one. |
155 | * seq but to an older one. |
156 | */ |
156 | */ |
157 | last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
157 | last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
158 | do { |
158 | do { |
159 | last_emitted = rdev->fence_drv[ring].sync_seq[ring]; |
159 | last_emitted = rdev->fence_drv[ring].sync_seq[ring]; |
160 | seq = radeon_fence_read(rdev, ring); |
160 | seq = radeon_fence_read(rdev, ring); |
161 | seq |= last_seq & 0xffffffff00000000LL; |
161 | seq |= last_seq & 0xffffffff00000000LL; |
162 | if (seq < last_seq) { |
162 | if (seq < last_seq) { |
163 | seq &= 0xffffffff; |
163 | seq &= 0xffffffff; |
164 | seq |= last_emitted & 0xffffffff00000000LL; |
164 | seq |= last_emitted & 0xffffffff00000000LL; |
165 | } |
165 | } |
166 | 166 | ||
167 | if (seq <= last_seq || seq > last_emitted) { |
167 | if (seq <= last_seq || seq > last_emitted) { |
168 | break; |
168 | break; |
169 | } |
169 | } |
170 | /* If we loop over we don't want to return without |
170 | /* If we loop over we don't want to return without |
171 | * checking if a fence is signaled as it means that the |
171 | * checking if a fence is signaled as it means that the |
172 | * seq we just read is different from the previous on. |
172 | * seq we just read is different from the previous on. |
173 | */ |
173 | */ |
174 | wake = true; |
174 | wake = true; |
175 | last_seq = seq; |
175 | last_seq = seq; |
176 | if ((count_loop++) > 10) { |
176 | if ((count_loop++) > 10) { |
177 | /* We looped over too many time leave with the |
177 | /* We looped over too many time leave with the |
178 | * fact that we might have set an older fence |
178 | * fact that we might have set an older fence |
179 | * seq then the current real last seq as signaled |
179 | * seq then the current real last seq as signaled |
180 | * by the hw. |
180 | * by the hw. |
181 | */ |
181 | */ |
182 | break; |
182 | break; |
183 | } |
183 | } |
184 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
184 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
185 | 185 | ||
186 | if (wake) { |
186 | if (wake) { |
187 | rdev->fence_drv[ring].last_activity = GetTimerTicks(); |
187 | rdev->fence_drv[ring].last_activity = GetTimerTicks(); |
188 | wake_up_all(&rdev->fence_queue); |
188 | wake_up_all(&rdev->fence_queue); |
189 | } |
189 | } |
190 | } |
190 | } |
191 | 191 | ||
192 | /** |
192 | /** |
193 | * radeon_fence_destroy - destroy a fence |
193 | * radeon_fence_destroy - destroy a fence |
194 | * |
194 | * |
195 | * @kref: fence kref |
195 | * @kref: fence kref |
196 | * |
196 | * |
197 | * Frees the fence object (all asics). |
197 | * Frees the fence object (all asics). |
198 | */ |
198 | */ |
199 | static void radeon_fence_destroy(struct kref *kref) |
199 | static void radeon_fence_destroy(struct kref *kref) |
200 | { |
200 | { |
201 | struct radeon_fence *fence; |
201 | struct radeon_fence *fence; |
202 | 202 | ||
203 | fence = container_of(kref, struct radeon_fence, kref); |
203 | fence = container_of(kref, struct radeon_fence, kref); |
204 | kfree(fence); |
204 | kfree(fence); |
205 | } |
205 | } |
206 | 206 | ||
207 | /** |
207 | /** |
208 | * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled |
208 | * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled |
209 | * |
209 | * |
210 | * @rdev: radeon device pointer |
210 | * @rdev: radeon device pointer |
211 | * @seq: sequence number |
211 | * @seq: sequence number |
212 | * @ring: ring index the fence is associated with |
212 | * @ring: ring index the fence is associated with |
213 | * |
213 | * |
214 | * Check if the last singled fence sequnce number is >= the requested |
214 | * Check if the last singled fence sequnce number is >= the requested |
215 | * sequence number (all asics). |
215 | * sequence number (all asics). |
216 | * Returns true if the fence has signaled (current fence value |
216 | * Returns true if the fence has signaled (current fence value |
217 | * is >= requested value) or false if it has not (current fence |
217 | * is >= requested value) or false if it has not (current fence |
218 | * value is < the requested value. Helper function for |
218 | * value is < the requested value. Helper function for |
219 | * radeon_fence_signaled(). |
219 | * radeon_fence_signaled(). |
220 | */ |
220 | */ |
221 | static bool radeon_fence_seq_signaled(struct radeon_device *rdev, |
221 | static bool radeon_fence_seq_signaled(struct radeon_device *rdev, |
222 | u64 seq, unsigned ring) |
222 | u64 seq, unsigned ring) |
223 | { |
223 | { |
224 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
224 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
225 | return true; |
225 | return true; |
226 | } |
226 | } |
227 | /* poll new last sequence at least once */ |
227 | /* poll new last sequence at least once */ |
228 | radeon_fence_process(rdev, ring); |
228 | radeon_fence_process(rdev, ring); |
229 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
229 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
230 | return true; |
230 | return true; |
231 | } |
231 | } |
232 | return false; |
232 | return false; |
233 | } |
233 | } |
234 | 234 | ||
235 | /** |
235 | /** |
236 | * radeon_fence_signaled - check if a fence has signaled |
236 | * radeon_fence_signaled - check if a fence has signaled |
237 | * |
237 | * |
238 | * @fence: radeon fence object |
238 | * @fence: radeon fence object |
239 | * |
239 | * |
240 | * Check if the requested fence has signaled (all asics). |
240 | * Check if the requested fence has signaled (all asics). |
241 | * Returns true if the fence has signaled or false if it has not. |
241 | * Returns true if the fence has signaled or false if it has not. |
242 | */ |
242 | */ |
243 | bool radeon_fence_signaled(struct radeon_fence *fence) |
243 | bool radeon_fence_signaled(struct radeon_fence *fence) |
244 | { |
244 | { |
245 | if (!fence) { |
245 | if (!fence) { |
246 | return true; |
246 | return true; |
247 | } |
247 | } |
248 | if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { |
248 | if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { |
249 | return true; |
249 | return true; |
250 | } |
250 | } |
251 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
251 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
252 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
252 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
253 | return true; |
253 | return true; |
254 | } |
254 | } |
255 | return false; |
255 | return false; |
256 | } |
256 | } |
257 | 257 | ||
258 | /** |
258 | /** |
259 | * radeon_fence_wait_seq - wait for a specific sequence number |
259 | * radeon_fence_wait_seq - wait for a specific sequence number |
260 | * |
260 | * |
261 | * @rdev: radeon device pointer |
261 | * @rdev: radeon device pointer |
262 | * @target_seq: sequence number we want to wait for |
262 | * @target_seq: sequence number we want to wait for |
263 | * @ring: ring index the fence is associated with |
263 | * @ring: ring index the fence is associated with |
264 | * @intr: use interruptable sleep |
264 | * @intr: use interruptable sleep |
265 | * @lock_ring: whether the ring should be locked or not |
265 | * @lock_ring: whether the ring should be locked or not |
266 | * |
266 | * |
267 | * Wait for the requested sequence number to be written (all asics). |
267 | * Wait for the requested sequence number to be written (all asics). |
268 | * @intr selects whether to use interruptable (true) or non-interruptable |
268 | * @intr selects whether to use interruptable (true) or non-interruptable |
269 | * (false) sleep when waiting for the sequence number. Helper function |
269 | * (false) sleep when waiting for the sequence number. Helper function |
270 | * for radeon_fence_wait(), et al. |
270 | * for radeon_fence_wait(), et al. |
271 | * Returns 0 if the sequence number has passed, error for all other cases. |
271 | * Returns 0 if the sequence number has passed, error for all other cases. |
272 | * -EDEADLK is returned when a GPU lockup has been detected and the ring is |
272 | * -EDEADLK is returned when a GPU lockup has been detected and the ring is |
273 | * marked as not ready so no further jobs get scheduled until a successful |
273 | * marked as not ready so no further jobs get scheduled until a successful |
274 | * reset. |
274 | * reset. |
275 | */ |
275 | */ |
276 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, |
276 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, |
277 | unsigned ring, bool intr, bool lock_ring) |
277 | unsigned ring, bool intr, bool lock_ring) |
278 | { |
278 | { |
279 | unsigned long timeout, last_activity; |
279 | unsigned long timeout, last_activity; |
280 | uint64_t seq; |
280 | uint64_t seq; |
281 | unsigned i; |
281 | unsigned i; |
282 | bool signaled; |
282 | bool signaled; |
283 | int r; |
283 | int r; |
284 | 284 | ||
285 | while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { |
285 | while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { |
286 | if (!rdev->ring[ring].ready) { |
286 | if (!rdev->ring[ring].ready) { |
287 | return -EBUSY; |
287 | return -EBUSY; |
288 | } |
288 | } |
289 | 289 | ||
290 | timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT; |
290 | timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT; |
291 | if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { |
291 | if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { |
292 | /* the normal case, timeout is somewhere before last_activity */ |
292 | /* the normal case, timeout is somewhere before last_activity */ |
293 | timeout = rdev->fence_drv[ring].last_activity - timeout; |
293 | timeout = rdev->fence_drv[ring].last_activity - timeout; |
294 | } else { |
294 | } else { |
295 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms |
295 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms |
296 | * anyway we will just wait for the minimum amount and then check for a lockup |
296 | * anyway we will just wait for the minimum amount and then check for a lockup |
297 | */ |
297 | */ |
298 | timeout = 1; |
298 | timeout = 1; |
299 | } |
299 | } |
300 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
300 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
301 | /* Save current last activity valuee, used to check for GPU lockups */ |
301 | /* Save current last activity valuee, used to check for GPU lockups */ |
302 | last_activity = rdev->fence_drv[ring].last_activity; |
302 | last_activity = rdev->fence_drv[ring].last_activity; |
303 | 303 | ||
304 | // trace_radeon_fence_wait_begin(rdev->ddev, seq); |
304 | // trace_radeon_fence_wait_begin(rdev->ddev, seq); |
305 | radeon_irq_kms_sw_irq_get(rdev, ring); |
305 | radeon_irq_kms_sw_irq_get(rdev, ring); |
306 | // if (intr) { |
306 | if (intr) { |
307 | // r = wait_event_interruptible_timeout(rdev->fence_queue, |
307 | r = wait_event_interruptible_timeout(rdev->fence_queue, |
308 | // (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
308 | (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
309 | // timeout); |
309 | timeout); |
310 | // } else { |
310 | } else { |
311 | // r = wait_event_timeout(rdev->fence_queue, |
311 | r = wait_event_timeout(rdev->fence_queue, |
312 | // (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
312 | (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
313 | // timeout); |
313 | timeout); |
314 | // } |
- | |
315 | delay(1); |
314 | } |
316 | - | ||
317 | radeon_irq_kms_sw_irq_put(rdev, ring); |
315 | radeon_irq_kms_sw_irq_put(rdev, ring); |
318 | // if (unlikely(r < 0)) { |
316 | if (unlikely(r < 0)) { |
319 | // return r; |
317 | return r; |
320 | // } |
318 | } |
321 | // trace_radeon_fence_wait_end(rdev->ddev, seq); |
319 | // trace_radeon_fence_wait_end(rdev->ddev, seq); |
322 | 320 | ||
323 | if (unlikely(!signaled)) { |
321 | if (unlikely(!signaled)) { |
324 | /* we were interrupted for some reason and fence |
322 | /* we were interrupted for some reason and fence |
325 | * isn't signaled yet, resume waiting */ |
323 | * isn't signaled yet, resume waiting */ |
326 | if (r) { |
324 | if (r) { |
327 | continue; |
325 | continue; |
328 | } |
326 | } |
329 | 327 | ||
330 | /* check if sequence value has changed since last_activity */ |
328 | /* check if sequence value has changed since last_activity */ |
331 | if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { |
329 | if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { |
332 | continue; |
330 | continue; |
333 | } |
331 | } |
334 | 332 | ||
335 | if (lock_ring) { |
333 | if (lock_ring) { |
336 | mutex_lock(&rdev->ring_lock); |
334 | mutex_lock(&rdev->ring_lock); |
337 | } |
335 | } |
338 | 336 | ||
339 | /* test if somebody else has already decided that this is a lockup */ |
337 | /* test if somebody else has already decided that this is a lockup */ |
340 | if (last_activity != rdev->fence_drv[ring].last_activity) { |
338 | if (last_activity != rdev->fence_drv[ring].last_activity) { |
341 | if (lock_ring) { |
339 | if (lock_ring) { |
342 | mutex_unlock(&rdev->ring_lock); |
340 | mutex_unlock(&rdev->ring_lock); |
343 | } |
341 | } |
344 | continue; |
342 | continue; |
345 | } |
343 | } |
346 | 344 | ||
347 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
345 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
348 | /* good news we believe it's a lockup */ |
346 | /* good news we believe it's a lockup */ |
349 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", |
347 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", |
350 | target_seq, seq); |
348 | target_seq, seq); |
351 | 349 | ||
352 | /* change last activity so nobody else think there is a lockup */ |
350 | /* change last activity so nobody else think there is a lockup */ |
353 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
351 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
354 | rdev->fence_drv[i].last_activity = GetTimerTicks(); |
352 | rdev->fence_drv[i].last_activity = GetTimerTicks(); |
355 | } |
353 | } |
356 | 354 | ||
357 | /* mark the ring as not ready any more */ |
355 | /* mark the ring as not ready any more */ |
358 | rdev->ring[ring].ready = false; |
356 | rdev->ring[ring].ready = false; |
359 | if (lock_ring) { |
357 | if (lock_ring) { |
360 | mutex_unlock(&rdev->ring_lock); |
358 | mutex_unlock(&rdev->ring_lock); |
361 | } |
359 | } |
362 | return -EDEADLK; |
360 | return -EDEADLK; |
363 | } |
361 | } |
364 | 362 | ||
365 | if (lock_ring) { |
363 | if (lock_ring) { |
366 | mutex_unlock(&rdev->ring_lock); |
364 | mutex_unlock(&rdev->ring_lock); |
367 | } |
365 | } |
368 | } |
366 | } |
369 | } |
367 | } |
370 | return 0; |
368 | return 0; |
371 | } |
369 | } |
372 | 370 | ||
373 | /** |
371 | /** |
374 | * radeon_fence_wait - wait for a fence to signal |
372 | * radeon_fence_wait - wait for a fence to signal |
375 | * |
373 | * |
376 | * @fence: radeon fence object |
374 | * @fence: radeon fence object |
377 | * @intr: use interruptable sleep |
375 | * @intr: use interruptable sleep |
378 | * |
376 | * |
379 | * Wait for the requested fence to signal (all asics). |
377 | * Wait for the requested fence to signal (all asics). |
380 | * @intr selects whether to use interruptable (true) or non-interruptable |
378 | * @intr selects whether to use interruptable (true) or non-interruptable |
381 | * (false) sleep when waiting for the fence. |
379 | * (false) sleep when waiting for the fence. |
382 | * Returns 0 if the fence has passed, error for all other cases. |
380 | * Returns 0 if the fence has passed, error for all other cases. |
383 | */ |
381 | */ |
384 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
382 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
385 | { |
383 | { |
386 | int r; |
384 | int r; |
387 | 385 | ||
388 | if (fence == NULL) { |
386 | if (fence == NULL) { |
389 | WARN(1, "Querying an invalid fence : %p !\n", fence); |
387 | WARN(1, "Querying an invalid fence : %p !\n", fence); |
390 | return -EINVAL; |
388 | return -EINVAL; |
391 | } |
389 | } |
392 | 390 | ||
393 | r = radeon_fence_wait_seq(fence->rdev, fence->seq, |
391 | r = radeon_fence_wait_seq(fence->rdev, fence->seq, |
394 | fence->ring, intr, true); |
392 | fence->ring, intr, true); |
395 | if (r) { |
393 | if (r) { |
396 | return r; |
394 | return r; |
397 | } |
395 | } |
398 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
396 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
399 | return 0; |
397 | return 0; |
400 | } |
398 | } |
401 | 399 | ||
402 | static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) |
400 | static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) |
403 | { |
401 | { |
404 | unsigned i; |
402 | unsigned i; |
405 | 403 | ||
406 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
404 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
407 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) { |
405 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) { |
408 | return true; |
406 | return true; |
409 | } |
407 | } |
410 | } |
408 | } |
411 | return false; |
409 | return false; |
412 | } |
410 | } |
413 | 411 | ||
414 | /** |
412 | /** |
415 | * radeon_fence_wait_any_seq - wait for a sequence number on any ring |
413 | * radeon_fence_wait_any_seq - wait for a sequence number on any ring |
416 | * |
414 | * |
417 | * @rdev: radeon device pointer |
415 | * @rdev: radeon device pointer |
418 | * @target_seq: sequence number(s) we want to wait for |
416 | * @target_seq: sequence number(s) we want to wait for |
419 | * @intr: use interruptable sleep |
417 | * @intr: use interruptable sleep |
420 | * |
418 | * |
421 | * Wait for the requested sequence number(s) to be written by any ring |
419 | * Wait for the requested sequence number(s) to be written by any ring |
422 | * (all asics). Sequnce number array is indexed by ring id. |
420 | * (all asics). Sequnce number array is indexed by ring id. |
423 | * @intr selects whether to use interruptable (true) or non-interruptable |
421 | * @intr selects whether to use interruptable (true) or non-interruptable |
424 | * (false) sleep when waiting for the sequence number. Helper function |
422 | * (false) sleep when waiting for the sequence number. Helper function |
425 | * for radeon_fence_wait_any(), et al. |
423 | * for radeon_fence_wait_any(), et al. |
426 | * Returns 0 if the sequence number has passed, error for all other cases. |
424 | * Returns 0 if the sequence number has passed, error for all other cases. |
427 | */ |
425 | */ |
428 | static int radeon_fence_wait_any_seq(struct radeon_device *rdev, |
426 | static int radeon_fence_wait_any_seq(struct radeon_device *rdev, |
429 | u64 *target_seq, bool intr) |
427 | u64 *target_seq, bool intr) |
430 | { |
428 | { |
431 | unsigned long timeout, last_activity, tmp; |
429 | unsigned long timeout, last_activity, tmp; |
432 | unsigned i, ring = RADEON_NUM_RINGS; |
430 | unsigned i, ring = RADEON_NUM_RINGS; |
433 | bool signaled; |
431 | bool signaled; |
434 | int r; |
432 | int r; |
435 | 433 | ||
436 | for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { |
434 | for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { |
437 | if (!target_seq[i]) { |
435 | if (!target_seq[i]) { |
438 | continue; |
436 | continue; |
439 | } |
437 | } |
440 | 438 | ||
441 | /* use the most recent one as indicator */ |
439 | /* use the most recent one as indicator */ |
442 | if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { |
440 | if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { |
443 | last_activity = rdev->fence_drv[i].last_activity; |
441 | last_activity = rdev->fence_drv[i].last_activity; |
444 | } |
442 | } |
445 | 443 | ||
446 | /* For lockup detection just pick the lowest ring we are |
444 | /* For lockup detection just pick the lowest ring we are |
447 | * actively waiting for |
445 | * actively waiting for |
448 | */ |
446 | */ |
449 | if (i < ring) { |
447 | if (i < ring) { |
450 | ring = i; |
448 | ring = i; |
451 | } |
449 | } |
452 | } |
450 | } |
453 | 451 | ||
454 | /* nothing to wait for ? */ |
452 | /* nothing to wait for ? */ |
455 | if (ring == RADEON_NUM_RINGS) { |
453 | if (ring == RADEON_NUM_RINGS) { |
456 | return -ENOENT; |
454 | return -ENOENT; |
457 | } |
455 | } |
458 | 456 | ||
459 | while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { |
457 | while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { |
460 | timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT; |
458 | timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT; |
461 | if (time_after(last_activity, timeout)) { |
459 | if (time_after(last_activity, timeout)) { |
462 | /* the normal case, timeout is somewhere before last_activity */ |
460 | /* the normal case, timeout is somewhere before last_activity */ |
463 | timeout = last_activity - timeout; |
461 | timeout = last_activity - timeout; |
464 | } else { |
462 | } else { |
465 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms |
463 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms |
466 | * anyway we will just wait for the minimum amount and then check for a lockup |
464 | * anyway we will just wait for the minimum amount and then check for a lockup |
467 | */ |
465 | */ |
468 | timeout = 1; |
466 | timeout = 1; |
469 | } |
467 | } |
470 | 468 | ||
471 | // trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]); |
469 | // trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]); |
472 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
470 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
473 | if (target_seq[i]) { |
471 | if (target_seq[i]) { |
474 | radeon_irq_kms_sw_irq_get(rdev, i); |
472 | radeon_irq_kms_sw_irq_get(rdev, i); |
475 | } |
473 | } |
476 | } |
474 | } |
- | 475 | if (intr) { |
|
- | 476 | r = wait_event_interruptible_timeout(rdev->fence_queue, |
|
- | 477 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), |
|
- | 478 | timeout); |
|
477 | 479 | } else { |
|
478 | // WaitEvent(fence->evnt); |
480 | r = wait_event_timeout(rdev->fence_queue, |
479 | - | ||
- | 481 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), |
|
480 | r = 1; |
482 | timeout); |
481 | 483 | } |
|
482 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
484 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
483 | if (target_seq[i]) { |
485 | if (target_seq[i]) { |
484 | radeon_irq_kms_sw_irq_put(rdev, i); |
486 | radeon_irq_kms_sw_irq_put(rdev, i); |
485 | } |
487 | } |
486 | } |
488 | } |
487 | if (unlikely(r < 0)) { |
489 | if (unlikely(r < 0)) { |
488 | return r; |
490 | return r; |
489 | } |
491 | } |
490 | // trace_radeon_fence_wait_end(rdev->ddev, seq); |
492 | // trace_radeon_fence_wait_end(rdev->ddev, seq); |
491 | 493 | ||
492 | if (unlikely(!signaled)) { |
494 | if (unlikely(!signaled)) { |
493 | /* we were interrupted for some reason and fence |
495 | /* we were interrupted for some reason and fence |
494 | * isn't signaled yet, resume waiting */ |
496 | * isn't signaled yet, resume waiting */ |
495 | if (r) { |
497 | if (r) { |
496 | continue; |
498 | continue; |
497 | } |
499 | } |
498 | 500 | ||
499 | mutex_lock(&rdev->ring_lock); |
501 | mutex_lock(&rdev->ring_lock); |
500 | for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { |
502 | for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { |
501 | if (time_after(rdev->fence_drv[i].last_activity, tmp)) { |
503 | if (time_after(rdev->fence_drv[i].last_activity, tmp)) { |
502 | tmp = rdev->fence_drv[i].last_activity; |
504 | tmp = rdev->fence_drv[i].last_activity; |
503 | } |
505 | } |
504 | } |
506 | } |
505 | /* test if somebody else has already decided that this is a lockup */ |
507 | /* test if somebody else has already decided that this is a lockup */ |
506 | if (last_activity != tmp) { |
508 | if (last_activity != tmp) { |
507 | last_activity = tmp; |
509 | last_activity = tmp; |
508 | mutex_unlock(&rdev->ring_lock); |
510 | mutex_unlock(&rdev->ring_lock); |
509 | continue; |
511 | continue; |
510 | } |
512 | } |
511 | 513 | ||
512 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
514 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
513 | /* good news we believe it's a lockup */ |
515 | /* good news we believe it's a lockup */ |
514 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n", |
516 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n", |
515 | target_seq[ring]); |
517 | target_seq[ring]); |
516 | 518 | ||
517 | /* change last activity so nobody else think there is a lockup */ |
519 | /* change last activity so nobody else think there is a lockup */ |
518 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
520 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
519 | rdev->fence_drv[i].last_activity = GetTimerTicks(); |
521 | rdev->fence_drv[i].last_activity = GetTimerTicks(); |
520 | } |
522 | } |
521 | 523 | ||
522 | /* mark the ring as not ready any more */ |
524 | /* mark the ring as not ready any more */ |
523 | rdev->ring[ring].ready = false; |
525 | rdev->ring[ring].ready = false; |
524 | mutex_unlock(&rdev->ring_lock); |
526 | mutex_unlock(&rdev->ring_lock); |
525 | return -EDEADLK; |
527 | return -EDEADLK; |
526 | } |
528 | } |
527 | mutex_unlock(&rdev->ring_lock); |
529 | mutex_unlock(&rdev->ring_lock); |
528 | } |
530 | } |
529 | } |
531 | } |
530 | return 0; |
532 | return 0; |
531 | } |
533 | } |
532 | 534 | ||
533 | /** |
535 | /** |
534 | * radeon_fence_wait_any - wait for a fence to signal on any ring |
536 | * radeon_fence_wait_any - wait for a fence to signal on any ring |
535 | * |
537 | * |
536 | * @rdev: radeon device pointer |
538 | * @rdev: radeon device pointer |
537 | * @fences: radeon fence object(s) |
539 | * @fences: radeon fence object(s) |
538 | * @intr: use interruptable sleep |
540 | * @intr: use interruptable sleep |
539 | * |
541 | * |
540 | * Wait for any requested fence to signal (all asics). Fence |
542 | * Wait for any requested fence to signal (all asics). Fence |
541 | * array is indexed by ring id. @intr selects whether to use |
543 | * array is indexed by ring id. @intr selects whether to use |
542 | * interruptable (true) or non-interruptable (false) sleep when |
544 | * interruptable (true) or non-interruptable (false) sleep when |
543 | * waiting for the fences. Used by the suballocator. |
545 | * waiting for the fences. Used by the suballocator. |
544 | * Returns 0 if any fence has passed, error for all other cases. |
546 | * Returns 0 if any fence has passed, error for all other cases. |
545 | */ |
547 | */ |
546 | int radeon_fence_wait_any(struct radeon_device *rdev, |
548 | int radeon_fence_wait_any(struct radeon_device *rdev, |
547 | struct radeon_fence **fences, |
549 | struct radeon_fence **fences, |
548 | bool intr) |
550 | bool intr) |
549 | { |
551 | { |
550 | uint64_t seq[RADEON_NUM_RINGS]; |
552 | uint64_t seq[RADEON_NUM_RINGS]; |
551 | unsigned i; |
553 | unsigned i; |
552 | int r; |
554 | int r; |
553 | 555 | ||
554 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
556 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
555 | seq[i] = 0; |
557 | seq[i] = 0; |
556 | 558 | ||
557 | if (!fences[i]) { |
559 | if (!fences[i]) { |
558 | continue; |
560 | continue; |
559 | } |
561 | } |
560 | 562 | ||
561 | if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) { |
563 | if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) { |
562 | /* something was allready signaled */ |
564 | /* something was allready signaled */ |
563 | return 0; |
565 | return 0; |
564 | } |
566 | } |
565 | 567 | ||
566 | seq[i] = fences[i]->seq; |
568 | seq[i] = fences[i]->seq; |
567 | } |
569 | } |
568 | 570 | ||
569 | r = radeon_fence_wait_any_seq(rdev, seq, intr); |
571 | r = radeon_fence_wait_any_seq(rdev, seq, intr); |
570 | if (r) { |
572 | if (r) { |
571 | return r; |
573 | return r; |
572 | } |
574 | } |
573 | return 0; |
575 | return 0; |
574 | } |
576 | } |
575 | 577 | ||
576 | /** |
578 | /** |
577 | * radeon_fence_wait_next_locked - wait for the next fence to signal |
579 | * radeon_fence_wait_next_locked - wait for the next fence to signal |
578 | * |
580 | * |
579 | * @rdev: radeon device pointer |
581 | * @rdev: radeon device pointer |
580 | * @ring: ring index the fence is associated with |
582 | * @ring: ring index the fence is associated with |
581 | * |
583 | * |
582 | * Wait for the next fence on the requested ring to signal (all asics). |
584 | * Wait for the next fence on the requested ring to signal (all asics). |
583 | * Returns 0 if the next fence has passed, error for all other cases. |
585 | * Returns 0 if the next fence has passed, error for all other cases. |
584 | * Caller must hold ring lock. |
586 | * Caller must hold ring lock. |
585 | */ |
587 | */ |
586 | int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) |
588 | int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) |
587 | { |
589 | { |
588 | uint64_t seq; |
590 | uint64_t seq; |
589 | 591 | ||
590 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
592 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
591 | if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { |
593 | if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { |
592 | /* nothing to wait for, last_seq is |
594 | /* nothing to wait for, last_seq is |
593 | already the last emited fence */ |
595 | already the last emited fence */ |
594 | return -ENOENT; |
596 | return -ENOENT; |
595 | } |
597 | } |
596 | return radeon_fence_wait_seq(rdev, seq, ring, false, false); |
598 | return radeon_fence_wait_seq(rdev, seq, ring, false, false); |
597 | } |
599 | } |
598 | 600 | ||
599 | /** |
601 | /** |
600 | * radeon_fence_wait_empty_locked - wait for all fences to signal |
602 | * radeon_fence_wait_empty_locked - wait for all fences to signal |
601 | * |
603 | * |
602 | * @rdev: radeon device pointer |
604 | * @rdev: radeon device pointer |
603 | * @ring: ring index the fence is associated with |
605 | * @ring: ring index the fence is associated with |
604 | * |
606 | * |
605 | * Wait for all fences on the requested ring to signal (all asics). |
607 | * Wait for all fences on the requested ring to signal (all asics). |
606 | * Returns 0 if the fences have passed, error for all other cases. |
608 | * Returns 0 if the fences have passed, error for all other cases. |
607 | * Caller must hold ring lock. |
609 | * Caller must hold ring lock. |
608 | */ |
610 | */ |
609 | void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) |
611 | int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) |
610 | { |
612 | { |
611 | uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; |
613 | uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; |
612 | - | ||
613 | while(1) { |
- | |
614 | int r; |
614 | int r; |
- | 615 | ||
615 | r = radeon_fence_wait_seq(rdev, seq, ring, false, false); |
616 | r = radeon_fence_wait_seq(rdev, seq, ring, false, false); |
- | 617 | if (r) { |
|
616 | if (r == -EDEADLK) { |
618 | if (r == -EDEADLK) { |
617 | mutex_unlock(&rdev->ring_lock); |
- | |
618 | r = radeon_gpu_reset(rdev); |
- | |
619 | mutex_lock(&rdev->ring_lock); |
- | |
620 | if (!r) |
- | |
621 | continue; |
619 | return -EDEADLK; |
622 | } |
620 | } |
623 | if (r) { |
- | |
624 | dev_err(rdev->dev, "error waiting for ring to become" |
621 | dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", |
625 | " idle (%d)\n", r); |
- | |
626 | } |
- | |
627 | return; |
622 | ring, r); |
628 | } |
623 | } |
- | 624 | return 0; |
|
629 | } |
625 | } |
630 | 626 | ||
631 | /** |
627 | /** |
632 | * radeon_fence_ref - take a ref on a fence |
628 | * radeon_fence_ref - take a ref on a fence |
633 | * |
629 | * |
634 | * @fence: radeon fence object |
630 | * @fence: radeon fence object |
635 | * |
631 | * |
636 | * Take a reference on a fence (all asics). |
632 | * Take a reference on a fence (all asics). |
637 | * Returns the fence. |
633 | * Returns the fence. |
638 | */ |
634 | */ |
639 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
635 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
640 | { |
636 | { |
641 | kref_get(&fence->kref); |
637 | kref_get(&fence->kref); |
642 | return fence; |
638 | return fence; |
643 | } |
639 | } |
644 | 640 | ||
645 | /** |
641 | /** |
646 | * radeon_fence_unref - remove a ref on a fence |
642 | * radeon_fence_unref - remove a ref on a fence |
647 | * |
643 | * |
648 | * @fence: radeon fence object |
644 | * @fence: radeon fence object |
649 | * |
645 | * |
650 | * Remove a reference on a fence (all asics). |
646 | * Remove a reference on a fence (all asics). |
651 | */ |
647 | */ |
652 | void radeon_fence_unref(struct radeon_fence **fence) |
648 | void radeon_fence_unref(struct radeon_fence **fence) |
653 | { |
649 | { |
654 | struct radeon_fence *tmp = *fence; |
650 | struct radeon_fence *tmp = *fence; |
655 | 651 | ||
656 | *fence = NULL; |
652 | *fence = NULL; |
657 | if (tmp) { |
653 | if (tmp) { |
658 | kref_put(&tmp->kref, radeon_fence_destroy); |
654 | kref_put(&tmp->kref, radeon_fence_destroy); |
659 | } |
655 | } |
660 | } |
656 | } |
661 | 657 | ||
662 | /** |
658 | /** |
663 | * radeon_fence_count_emitted - get the count of emitted fences |
659 | * radeon_fence_count_emitted - get the count of emitted fences |
664 | * |
660 | * |
665 | * @rdev: radeon device pointer |
661 | * @rdev: radeon device pointer |
666 | * @ring: ring index the fence is associated with |
662 | * @ring: ring index the fence is associated with |
667 | * |
663 | * |
668 | * Get the number of fences emitted on the requested ring (all asics). |
664 | * Get the number of fences emitted on the requested ring (all asics). |
669 | * Returns the number of emitted fences on the ring. Used by the |
665 | * Returns the number of emitted fences on the ring. Used by the |
670 | * dynpm code to ring track activity. |
666 | * dynpm code to ring track activity. |
671 | */ |
667 | */ |
672 | unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) |
668 | unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) |
673 | { |
669 | { |
674 | uint64_t emitted; |
670 | uint64_t emitted; |
675 | 671 | ||
676 | /* We are not protected by ring lock when reading the last sequence |
672 | /* We are not protected by ring lock when reading the last sequence |
677 | * but it's ok to report slightly wrong fence count here. |
673 | * but it's ok to report slightly wrong fence count here. |
678 | */ |
674 | */ |
679 | radeon_fence_process(rdev, ring); |
675 | radeon_fence_process(rdev, ring); |
680 | emitted = rdev->fence_drv[ring].sync_seq[ring] |
676 | emitted = rdev->fence_drv[ring].sync_seq[ring] |
681 | - atomic64_read(&rdev->fence_drv[ring].last_seq); |
677 | - atomic64_read(&rdev->fence_drv[ring].last_seq); |
682 | /* to avoid 32bits warp around */ |
678 | /* to avoid 32bits warp around */ |
683 | if (emitted > 0x10000000) { |
679 | if (emitted > 0x10000000) { |
684 | emitted = 0x10000000; |
680 | emitted = 0x10000000; |
685 | } |
681 | } |
686 | return (unsigned)emitted; |
682 | return (unsigned)emitted; |
687 | } |
683 | } |
688 | 684 | ||
689 | /** |
685 | /** |
690 | * radeon_fence_need_sync - do we need a semaphore |
686 | * radeon_fence_need_sync - do we need a semaphore |
691 | * |
687 | * |
692 | * @fence: radeon fence object |
688 | * @fence: radeon fence object |
693 | * @dst_ring: which ring to check against |
689 | * @dst_ring: which ring to check against |
694 | * |
690 | * |
695 | * Check if the fence needs to be synced against another ring |
691 | * Check if the fence needs to be synced against another ring |
696 | * (all asics). If so, we need to emit a semaphore. |
692 | * (all asics). If so, we need to emit a semaphore. |
697 | * Returns true if we need to sync with another ring, false if |
693 | * Returns true if we need to sync with another ring, false if |
698 | * not. |
694 | * not. |
699 | */ |
695 | */ |
700 | bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) |
696 | bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) |
701 | { |
697 | { |
702 | struct radeon_fence_driver *fdrv; |
698 | struct radeon_fence_driver *fdrv; |
703 | 699 | ||
704 | if (!fence) { |
700 | if (!fence) { |
705 | return false; |
701 | return false; |
706 | } |
702 | } |
707 | 703 | ||
708 | if (fence->ring == dst_ring) { |
704 | if (fence->ring == dst_ring) { |
709 | return false; |
705 | return false; |
710 | } |
706 | } |
711 | 707 | ||
712 | /* we are protected by the ring mutex */ |
708 | /* we are protected by the ring mutex */ |
713 | fdrv = &fence->rdev->fence_drv[dst_ring]; |
709 | fdrv = &fence->rdev->fence_drv[dst_ring]; |
714 | if (fence->seq <= fdrv->sync_seq[fence->ring]) { |
710 | if (fence->seq <= fdrv->sync_seq[fence->ring]) { |
715 | return false; |
711 | return false; |
716 | } |
712 | } |
717 | 713 | ||
718 | return true; |
714 | return true; |
719 | } |
715 | } |
720 | 716 | ||
721 | /** |
717 | /** |
722 | * radeon_fence_note_sync - record the sync point |
718 | * radeon_fence_note_sync - record the sync point |
723 | * |
719 | * |
724 | * @fence: radeon fence object |
720 | * @fence: radeon fence object |
725 | * @dst_ring: which ring to check against |
721 | * @dst_ring: which ring to check against |
726 | * |
722 | * |
727 | * Note the sequence number at which point the fence will |
723 | * Note the sequence number at which point the fence will |
728 | * be synced with the requested ring (all asics). |
724 | * be synced with the requested ring (all asics). |
729 | */ |
725 | */ |
730 | void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) |
726 | void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) |
731 | { |
727 | { |
732 | struct radeon_fence_driver *dst, *src; |
728 | struct radeon_fence_driver *dst, *src; |
733 | unsigned i; |
729 | unsigned i; |
734 | 730 | ||
735 | if (!fence) { |
731 | if (!fence) { |
736 | return; |
732 | return; |
737 | } |
733 | } |
738 | 734 | ||
739 | if (fence->ring == dst_ring) { |
735 | if (fence->ring == dst_ring) { |
740 | return; |
736 | return; |
741 | } |
737 | } |
742 | 738 | ||
743 | /* we are protected by the ring mutex */ |
739 | /* we are protected by the ring mutex */ |
744 | src = &fence->rdev->fence_drv[fence->ring]; |
740 | src = &fence->rdev->fence_drv[fence->ring]; |
745 | dst = &fence->rdev->fence_drv[dst_ring]; |
741 | dst = &fence->rdev->fence_drv[dst_ring]; |
746 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
742 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
747 | if (i == dst_ring) { |
743 | if (i == dst_ring) { |
748 | continue; |
744 | continue; |
749 | } |
745 | } |
750 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); |
746 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); |
751 | } |
747 | } |
752 | } |
748 | } |
753 | 749 | ||
754 | /** |
750 | /** |
755 | * radeon_fence_driver_start_ring - make the fence driver |
751 | * radeon_fence_driver_start_ring - make the fence driver |
756 | * ready for use on the requested ring. |
752 | * ready for use on the requested ring. |
757 | * |
753 | * |
758 | * @rdev: radeon device pointer |
754 | * @rdev: radeon device pointer |
759 | * @ring: ring index to start the fence driver on |
755 | * @ring: ring index to start the fence driver on |
760 | * |
756 | * |
761 | * Make the fence driver ready for processing (all asics). |
757 | * Make the fence driver ready for processing (all asics). |
762 | * Not all asics have all rings, so each asic will only |
758 | * Not all asics have all rings, so each asic will only |
763 | * start the fence driver on the rings it has. |
759 | * start the fence driver on the rings it has. |
764 | * Returns 0 for success, errors for failure. |
760 | * Returns 0 for success, errors for failure. |
765 | */ |
761 | */ |
766 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) |
762 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) |
767 | { |
763 | { |
768 | uint64_t index; |
764 | uint64_t index; |
769 | int r; |
765 | int r; |
770 | 766 | ||
771 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
767 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
772 | if (rdev->wb.use_event) { |
768 | if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { |
773 | rdev->fence_drv[ring].scratch_reg = 0; |
769 | rdev->fence_drv[ring].scratch_reg = 0; |
774 | index = R600_WB_EVENT_OFFSET + ring * 4; |
770 | index = R600_WB_EVENT_OFFSET + ring * 4; |
775 | } else { |
771 | } else { |
776 | r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); |
772 | r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); |
777 | if (r) { |
773 | if (r) { |
778 | dev_err(rdev->dev, "fence failed to get scratch register\n"); |
774 | dev_err(rdev->dev, "fence failed to get scratch register\n"); |
779 | return r; |
775 | return r; |
780 | } |
776 | } |
781 | index = RADEON_WB_SCRATCH_OFFSET + |
777 | index = RADEON_WB_SCRATCH_OFFSET + |
782 | rdev->fence_drv[ring].scratch_reg - |
778 | rdev->fence_drv[ring].scratch_reg - |
783 | rdev->scratch.reg_base; |
779 | rdev->scratch.reg_base; |
784 | } |
780 | } |
785 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
781 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
786 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; |
782 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; |
787 | radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); |
783 | radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); |
788 | rdev->fence_drv[ring].initialized = true; |
784 | rdev->fence_drv[ring].initialized = true; |
789 | dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", |
785 | dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", |
790 | ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); |
786 | ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); |
791 | return 0; |
787 | return 0; |
792 | } |
788 | } |
793 | 789 | ||
794 | /** |
790 | /** |
795 | * radeon_fence_driver_init_ring - init the fence driver |
791 | * radeon_fence_driver_init_ring - init the fence driver |
796 | * for the requested ring. |
792 | * for the requested ring. |
797 | * |
793 | * |
798 | * @rdev: radeon device pointer |
794 | * @rdev: radeon device pointer |
799 | * @ring: ring index to start the fence driver on |
795 | * @ring: ring index to start the fence driver on |
800 | * |
796 | * |
801 | * Init the fence driver for the requested ring (all asics). |
797 | * Init the fence driver for the requested ring (all asics). |
802 | * Helper function for radeon_fence_driver_init(). |
798 | * Helper function for radeon_fence_driver_init(). |
803 | */ |
799 | */ |
804 | static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) |
800 | static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) |
805 | { |
801 | { |
806 | int i; |
802 | int i; |
807 | 803 | ||
808 | rdev->fence_drv[ring].scratch_reg = -1; |
804 | rdev->fence_drv[ring].scratch_reg = -1; |
809 | rdev->fence_drv[ring].cpu_addr = NULL; |
805 | rdev->fence_drv[ring].cpu_addr = NULL; |
810 | rdev->fence_drv[ring].gpu_addr = 0; |
806 | rdev->fence_drv[ring].gpu_addr = 0; |
811 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
807 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
812 | rdev->fence_drv[ring].sync_seq[i] = 0; |
808 | rdev->fence_drv[ring].sync_seq[i] = 0; |
813 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
809 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
814 | rdev->fence_drv[ring].last_activity = GetTimerTicks(); |
810 | rdev->fence_drv[ring].last_activity = GetTimerTicks(); |
815 | rdev->fence_drv[ring].initialized = false; |
811 | rdev->fence_drv[ring].initialized = false; |
816 | } |
812 | } |
817 | 813 | ||
818 | /** |
814 | /** |
819 | * radeon_fence_driver_init - init the fence driver |
815 | * radeon_fence_driver_init - init the fence driver |
820 | * for all possible rings. |
816 | * for all possible rings. |
821 | * |
817 | * |
822 | * @rdev: radeon device pointer |
818 | * @rdev: radeon device pointer |
823 | * |
819 | * |
824 | * Init the fence driver for all possible rings (all asics). |
820 | * Init the fence driver for all possible rings (all asics). |
825 | * Not all asics have all rings, so each asic will only |
821 | * Not all asics have all rings, so each asic will only |
826 | * start the fence driver on the rings it has using |
822 | * start the fence driver on the rings it has using |
827 | * radeon_fence_driver_start_ring(). |
823 | * radeon_fence_driver_start_ring(). |
828 | * Returns 0 for success. |
824 | * Returns 0 for success. |
829 | */ |
825 | */ |
830 | int radeon_fence_driver_init(struct radeon_device *rdev) |
826 | int radeon_fence_driver_init(struct radeon_device *rdev) |
831 | { |
827 | { |
832 | int ring; |
828 | int ring; |
833 | 829 | ||
834 | init_waitqueue_head(&rdev->fence_queue); |
830 | init_waitqueue_head(&rdev->fence_queue); |
835 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
831 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
836 | radeon_fence_driver_init_ring(rdev, ring); |
832 | radeon_fence_driver_init_ring(rdev, ring); |
837 | } |
833 | } |
838 | if (radeon_debugfs_fence_init(rdev)) { |
834 | if (radeon_debugfs_fence_init(rdev)) { |
839 | dev_err(rdev->dev, "fence debugfs file creation failed\n"); |
835 | dev_err(rdev->dev, "fence debugfs file creation failed\n"); |
840 | } |
836 | } |
841 | return 0; |
837 | return 0; |
842 | } |
838 | } |
843 | 839 | ||
844 | /** |
840 | /** |
845 | * radeon_fence_driver_fini - tear down the fence driver |
841 | * radeon_fence_driver_fini - tear down the fence driver |
846 | * for all possible rings. |
842 | * for all possible rings. |
847 | * |
843 | * |
848 | * @rdev: radeon device pointer |
844 | * @rdev: radeon device pointer |
849 | * |
845 | * |
850 | * Tear down the fence driver for all possible rings (all asics). |
846 | * Tear down the fence driver for all possible rings (all asics). |
851 | */ |
847 | */ |
852 | void radeon_fence_driver_fini(struct radeon_device *rdev) |
848 | void radeon_fence_driver_fini(struct radeon_device *rdev) |
853 | { |
849 | { |
854 | int ring; |
850 | int ring, r; |
855 | 851 | ||
856 | mutex_lock(&rdev->ring_lock); |
852 | mutex_lock(&rdev->ring_lock); |
857 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
853 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
858 | if (!rdev->fence_drv[ring].initialized) |
854 | if (!rdev->fence_drv[ring].initialized) |
859 | continue; |
855 | continue; |
860 | radeon_fence_wait_empty_locked(rdev, ring); |
856 | r = radeon_fence_wait_empty_locked(rdev, ring); |
- | 857 | if (r) { |
|
- | 858 | /* no need to trigger GPU reset as we are unloading */ |
|
- | 859 | radeon_fence_driver_force_completion(rdev); |
|
- | 860 | } |
|
861 | wake_up_all(&rdev->fence_queue); |
861 | wake_up_all(&rdev->fence_queue); |
862 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
862 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
863 | rdev->fence_drv[ring].initialized = false; |
863 | rdev->fence_drv[ring].initialized = false; |
864 | } |
864 | } |
865 | mutex_unlock(&rdev->ring_lock); |
865 | mutex_unlock(&rdev->ring_lock); |
866 | } |
866 | } |
- | 867 | ||
- | 868 | /** |
|
- | 869 | * radeon_fence_driver_force_completion - force all fence waiter to complete |
|
- | 870 | * |
|
- | 871 | * @rdev: radeon device pointer |
|
- | 872 | * |
|
- | 873 | * In case of GPU reset failure make sure no process keep waiting on fence |
|
- | 874 | * that will never complete. |
|
- | 875 | */ |
|
- | 876 | void radeon_fence_driver_force_completion(struct radeon_device *rdev) |
|
- | 877 | { |
|
- | 878 | int ring; |
|
- | 879 | ||
- | 880 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
|
- | 881 | if (!rdev->fence_drv[ring].initialized) |
|
- | 882 | continue; |
|
- | 883 | radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); |
|
- | 884 | } |
|
- | 885 | } |
|
867 | 886 | ||
868 | 887 | ||
869 | /* |
888 | /* |
870 | * Fence debugfs |
889 | * Fence debugfs |
871 | */ |
890 | */ |
872 | #if defined(CONFIG_DEBUG_FS) |
891 | #if defined(CONFIG_DEBUG_FS) |
873 | static int radeon_debugfs_fence_info(struct seq_file *m, void *data) |
892 | static int radeon_debugfs_fence_info(struct seq_file *m, void *data) |
874 | { |
893 | { |
875 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
894 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
876 | struct drm_device *dev = node->minor->dev; |
895 | struct drm_device *dev = node->minor->dev; |
877 | struct radeon_device *rdev = dev->dev_private; |
896 | struct radeon_device *rdev = dev->dev_private; |
878 | int i, j; |
897 | int i, j; |
879 | 898 | ||
880 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
899 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
881 | if (!rdev->fence_drv[i].initialized) |
900 | if (!rdev->fence_drv[i].initialized) |
882 | continue; |
901 | continue; |
883 | 902 | ||
884 | seq_printf(m, "--- ring %d ---\n", i); |
903 | seq_printf(m, "--- ring %d ---\n", i); |
885 | seq_printf(m, "Last signaled fence 0x%016llx\n", |
904 | seq_printf(m, "Last signaled fence 0x%016llx\n", |
886 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); |
905 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); |
887 | seq_printf(m, "Last emitted 0x%016llx\n", |
906 | seq_printf(m, "Last emitted 0x%016llx\n", |
888 | rdev->fence_drv[i].sync_seq[i]); |
907 | rdev->fence_drv[i].sync_seq[i]); |
889 | 908 | ||
890 | for (j = 0; j < RADEON_NUM_RINGS; ++j) { |
909 | for (j = 0; j < RADEON_NUM_RINGS; ++j) { |
891 | if (i != j && rdev->fence_drv[j].initialized) |
910 | if (i != j && rdev->fence_drv[j].initialized) |
892 | seq_printf(m, "Last sync to ring %d 0x%016llx\n", |
911 | seq_printf(m, "Last sync to ring %d 0x%016llx\n", |
893 | j, rdev->fence_drv[i].sync_seq[j]); |
912 | j, rdev->fence_drv[i].sync_seq[j]); |
894 | } |
913 | } |
895 | } |
914 | } |
896 | return 0; |
915 | return 0; |
897 | } |
916 | } |
898 | 917 | ||
899 | static struct drm_info_list radeon_debugfs_fence_list[] = { |
918 | static struct drm_info_list radeon_debugfs_fence_list[] = { |
900 | {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, |
919 | {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, |
901 | }; |
920 | }; |
902 | #endif |
921 | #endif |
903 | 922 | ||
904 | int radeon_debugfs_fence_init(struct radeon_device *rdev) |
923 | int radeon_debugfs_fence_init(struct radeon_device *rdev) |
905 | { |
924 | { |
906 | #if defined(CONFIG_DEBUG_FS) |
925 | #if defined(CONFIG_DEBUG_FS) |
907 | return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); |
926 | return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); |
908 | #else |
927 | #else |
909 | return 0; |
928 | return 0; |
910 | #endif |
929 | #endif |
911 | }>>>>>>=>>>>>>>>>>>>>=>> |
930 | }>>>>>>>=>>>>>>>>>>>>>=>> |