Rev 2005 | Rev 3031 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2005 | Rev 2997 | ||
---|---|---|---|
Line 32... | Line 32... | ||
32 | #include |
32 | #include |
33 | //#include |
33 | //#include |
34 | #include |
34 | #include |
35 | #include |
35 | #include |
36 | #include |
36 | #include |
37 | #include "drmP.h" |
37 | #include |
38 | #include "drm.h" |
- | |
39 | #include "radeon_reg.h" |
38 | #include "radeon_reg.h" |
40 | #include "radeon.h" |
39 | #include "radeon.h" |
Line -... | Line 40... | ||
- | 40 | ||
- | 41 | /* |
|
- | 42 | * Fences |
|
- | 43 | * Fences mark an event in the GPUs pipeline and are used |
|
- | 44 | * for GPU/CPU synchronization. When the fence is written, |
|
- | 45 | * it is expected that all buffers associated with that fence |
|
- | 46 | * are no longer in use by the associated ring on the GPU and |
|
- | 47 | * that the the relevant GPU caches have been flushed. Whether |
|
- | 48 | * we use a scratch register or memory location depends on the asic |
|
- | 49 | * and whether writeback is enabled. |
|
- | 50 | */ |
|
- | 51 | ||
- | 52 | /** |
|
- | 53 | * radeon_fence_write - write a fence value |
|
- | 54 | * |
|
- | 55 | * @rdev: radeon_device pointer |
|
- | 56 | * @seq: sequence number to write |
|
- | 57 | * @ring: ring index the fence is associated with |
|
- | 58 | * |
|
- | 59 | * Writes a fence value to memory or a scratch register (all asics). |
|
41 | 60 | */ |
|
42 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq) |
61 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) |
43 | { |
- | |
44 | if (rdev->wb.enabled) { |
- | |
45 | u32 scratch_index; |
- | |
46 | if (rdev->wb.use_event) |
62 | { |
47 | scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; |
- | |
48 | else |
63 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
49 | scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; |
64 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
50 | rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);; |
65 | *drv->cpu_addr = cpu_to_le32(seq); |
51 | } else |
66 | } else { |
52 | WREG32(rdev->fence_drv.scratch_reg, seq); |
67 | WREG32(drv->scratch_reg, seq); |
53 | } |
- | |
54 | - | ||
55 | static u32 radeon_fence_read(struct radeon_device *rdev) |
- | |
56 | { |
- | |
57 | u32 seq; |
- | |
58 | - | ||
59 | if (rdev->wb.enabled) { |
- | |
60 | u32 scratch_index; |
- | |
61 | if (rdev->wb.use_event) |
- | |
62 | scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; |
- | |
63 | else |
- | |
64 | scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; |
- | |
65 | seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); |
- | |
66 | } else |
- | |
67 | seq = RREG32(rdev->fence_drv.scratch_reg); |
- | |
68 | return seq; |
68 | } |
Line -... | Line 69... | ||
- | 69 | } |
|
- | 70 | ||
- | 71 | /** |
|
- | 72 | * radeon_fence_read - read a fence value |
|
- | 73 | * |
|
- | 74 | * @rdev: radeon_device pointer |
|
- | 75 | * @ring: ring index the fence is associated with |
|
- | 76 | * |
|
- | 77 | * Reads a fence value from memory or a scratch register (all asics). |
|
69 | } |
78 | * Returns the value of the fence read from memory or register. |
70 | 79 | */ |
|
- | 80 | static u32 radeon_fence_read(struct radeon_device *rdev, int ring) |
|
71 | int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) |
81 | { |
Line 72... | Line 82... | ||
72 | { |
82 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
- | 83 | u32 seq = 0; |
|
- | 84 | ||
- | 85 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
|
- | 86 | seq = le32_to_cpu(*drv->cpu_addr); |
|
- | 87 | } else { |
|
- | 88 | seq = RREG32(drv->scratch_reg); |
|
- | 89 | } |
|
- | 90 | return seq; |
|
- | 91 | } |
|
- | 92 | ||
- | 93 | /** |
|
73 | unsigned long irq_flags; |
94 | * radeon_fence_emit - emit a fence on the requested ring |
- | 95 | * |
|
- | 96 | * @rdev: radeon_device pointer |
|
- | 97 | * @fence: radeon fence object |
|
- | 98 | * @ring: ring index the fence is associated with |
|
- | 99 | * |
|
- | 100 | * Emits a fence command on the requested ring (all asics). |
|
- | 101 | * Returns 0 on success, -ENOMEM on failure. |
|
- | 102 | */ |
|
- | 103 | int radeon_fence_emit(struct radeon_device *rdev, |
|
- | 104 | struct radeon_fence **fence, |
|
74 | 105 | int ring) |
|
- | 106 | { |
|
75 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
107 | /* we are protected by the ring emission mutex */ |
76 | if (fence->emited) { |
108 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
77 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
109 | if ((*fence) == NULL) { |
78 | return 0; |
110 | return -ENOMEM; |
79 | } |
111 | } |
80 | fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); |
- | |
81 | if (!rdev->cp.ready) |
- | |
82 | /* FIXME: cp is not running assume everythings is done right |
112 | kref_init(&((*fence)->kref)); |
83 | * away |
- | |
84 | */ |
113 | (*fence)->rdev = rdev; |
85 | radeon_fence_write(rdev, fence->seq); |
- | |
86 | else |
114 | (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
87 | radeon_fence_ring_emit(rdev, fence); |
- | |
88 | - | ||
89 | // trace_radeon_fence_emit(rdev->ddev, fence->seq); |
- | |
90 | fence->emited = true; |
115 | (*fence)->ring = ring; |
91 | list_move_tail(&fence->list, &rdev->fence_drv.emited); |
116 | radeon_fence_ring_emit(rdev, ring, *fence); |
Line -... | Line 117... | ||
- | 117 | // trace_radeon_fence_emit(rdev->ddev, (*fence)->seq); |
|
- | 118 | return 0; |
|
- | 119 | } |
|
- | 120 | ||
- | 121 | /** |
|
- | 122 | * radeon_fence_process - process a fence |
|
- | 123 | * |
|
- | 124 | * @rdev: radeon_device pointer |
|
- | 125 | * @ring: ring index the fence is associated with |
|
92 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
126 | * |
93 | return 0; |
127 | * Checks the current fence value and wakes the fence queue |
94 | } |
128 | * if the sequence number has increased (all asics). |
95 | 129 | */ |
|
96 | static bool radeon_fence_poll_locked(struct radeon_device *rdev) |
- | |
97 | { |
130 | void radeon_fence_process(struct radeon_device *rdev, int ring) |
98 | struct radeon_fence *fence; |
- | |
Line 99... | Line 131... | ||
99 | struct list_head *i, *n; |
131 | { |
100 | uint32_t seq; |
132 | uint64_t seq, last_seq, last_emitted; |
101 | bool wake = false; |
133 | unsigned count_loop = 0; |
102 | unsigned long cjiffies; |
134 | bool wake = false; |
103 | 135 | ||
104 | seq = radeon_fence_read(rdev); |
136 | /* Note there is a scenario here for an infinite loop but it's |
105 | if (seq != rdev->fence_drv.last_seq) { |
137 | * very unlikely to happen. For it to happen, the current polling |
106 | rdev->fence_drv.last_seq = seq; |
138 | * process need to be interrupted by another process and another |
- | 139 | * process needs to update the last_seq btw the atomic read and |
|
- | 140 | * xchg of the current process. |
|
107 | rdev->fence_drv.last_jiffies = GetTimerTicks(); |
141 | * |
108 | rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; |
142 | * More over for this to go in infinite loop there need to be |
109 | } else { |
143 | * continuously new fence signaled ie radeon_fence_read needs |
110 | cjiffies = GetTimerTicks(); |
144 | * to return a different value each time for both the currently |
111 | if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) { |
145 | * polling process and the other process that xchg the last_seq |
- | 146 | * btw atomic read and xchg of the current process. And the |
|
- | 147 | * value the other process set as last seq must be higher than |
|
- | 148 | * the seq value we just read. Which means that current process |
|
112 | cjiffies -= rdev->fence_drv.last_jiffies; |
149 | * need to be interrupted after radeon_fence_read and before |
113 | if (time_after(rdev->fence_drv.last_timeout, cjiffies)) { |
150 | * atomic xchg. |
114 | /* update the timeout */ |
151 | * |
- | 152 | * To be even more safe we count the number of time we loop and |
|
- | 153 | * we bail after 10 loop just accepting the fact that we might |
|
- | 154 | * have temporarly set the last_seq not to the true real last |
|
115 | rdev->fence_drv.last_timeout -= cjiffies; |
155 | * seq but to an older one. |
- | 156 | */ |
|
- | 157 | last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
|
- | 158 | do { |
|
- | 159 | last_emitted = rdev->fence_drv[ring].sync_seq[ring]; |
|
116 | } else { |
160 | seq = radeon_fence_read(rdev, ring); |
- | 161 | seq |= last_seq & 0xffffffff00000000LL; |
|
- | 162 | if (seq < last_seq) { |
|
117 | /* the 500ms timeout is elapsed we should test |
163 | seq &= 0xffffffff; |
- | 164 | seq |= last_emitted & 0xffffffff00000000LL; |
|
- | 165 | } |
|
- | 166 | ||
- | 167 | if (seq <= last_seq || seq > last_emitted) { |
|
- | 168 | break; |
|
- | 169 | } |
|
- | 170 | /* If we loop over we don't want to return without |
|
- | 171 | * checking if a fence is signaled as it means that the |
|
118 | * for GPU lockup |
172 | * seq we just read is different from the previous on. |
- | 173 | */ |
|
- | 174 | wake = true; |
|
119 | */ |
175 | last_seq = seq; |
120 | rdev->fence_drv.last_timeout = 1; |
176 | if ((count_loop++) > 10) { |
121 | } |
177 | /* We looped over too many time leave with the |
122 | } else { |
178 | * fact that we might have set an older fence |
- | 179 | * seq then the current real last seq as signaled |
|
- | 180 | * by the hw. |
|
123 | /* wrap around update last jiffies, we will just wait |
181 | */ |
- | 182 | break; |
|
- | 183 | } |
|
124 | * a little longer |
184 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
125 | */ |
- | |
126 | rdev->fence_drv.last_jiffies = cjiffies; |
- | |
127 | } |
- | |
128 | return false; |
- | |
129 | } |
- | |
130 | n = NULL; |
- | |
131 | list_for_each(i, &rdev->fence_drv.emited) { |
185 | |
- | 186 | if (wake) { |
|
- | 187 | rdev->fence_drv[ring].last_activity = GetTimerTicks(); |
|
- | 188 | wake_up_all(&rdev->fence_queue); |
|
- | 189 | } |
|
- | 190 | } |
|
- | 191 | ||
- | 192 | /** |
|
- | 193 | * radeon_fence_destroy - destroy a fence |
|
- | 194 | * |
|
- | 195 | * @kref: fence kref |
|
- | 196 | * |
|
- | 197 | * Frees the fence object (all asics). |
|
- | 198 | */ |
|
- | 199 | static void radeon_fence_destroy(struct kref *kref) |
|
132 | fence = list_entry(i, struct radeon_fence, list); |
200 | { |
- | 201 | struct radeon_fence *fence; |
|
- | 202 | ||
133 | if (fence->seq == seq) { |
203 | fence = container_of(kref, struct radeon_fence, kref); |
134 | n = i; |
204 | kfree(fence); |
135 | break; |
205 | } |
136 | } |
206 | |
137 | } |
207 | /** |
138 | /* all fence previous to this one are considered as signaled */ |
208 | * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled |
- | 209 | * |
|
139 | if (n) { |
210 | * @rdev: radeon device pointer |
140 | kevent_t event; |
211 | * @seq: sequence number |
- | 212 | * @ring: ring index the fence is associated with |
|
141 | event.code = -1; |
213 | * |
142 | i = n; |
214 | * Check if the last singled fence sequnce number is >= the requested |
- | 215 | * sequence number (all asics). |
|
143 | do { |
216 | * Returns true if the fence has signaled (current fence value |
144 | n = i->prev; |
217 | * is >= requested value) or false if it has not (current fence |
145 | list_move_tail(i, &rdev->fence_drv.signaled); |
218 | * value is < the requested value. Helper function for |
146 | fence = list_entry(i, struct radeon_fence, list); |
219 | * radeon_fence_signaled(). |
147 | fence->signaled = true; |
220 | */ |
148 | // dbgprintf("fence %x done\n", fence); |
221 | static bool radeon_fence_seq_signaled(struct radeon_device *rdev, |
- | 222 | u64 seq, unsigned ring) |
|
- | 223 | { |
|
- | 224 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
|
- | 225 | return true; |
|
- | 226 | } |
|
149 | RaiseEvent(fence->evnt, 0, &event); |
227 | /* poll new last sequence at least once */ |
150 | i = n; |
228 | radeon_fence_process(rdev, ring); |
Line -... | Line 229... | ||
- | 229 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
|
- | 230 | return true; |
|
- | 231 | } |
|
- | 232 | return false; |
|
- | 233 | } |
|
- | 234 | ||
- | 235 | /** |
|
- | 236 | * radeon_fence_signaled - check if a fence has signaled |
|
- | 237 | * |
|
- | 238 | * @fence: radeon fence object |
|
- | 239 | * |
|
- | 240 | * Check if the requested fence has signaled (all asics). |
|
- | 241 | * Returns true if the fence has signaled or false if it has not. |
|
- | 242 | */ |
|
- | 243 | bool radeon_fence_signaled(struct radeon_fence *fence) |
|
- | 244 | { |
|
- | 245 | if (!fence) { |
|
- | 246 | return true; |
|
- | 247 | } |
|
- | 248 | if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { |
|
- | 249 | return true; |
|
- | 250 | } |
|
Line -... | Line 251... | ||
- | 251 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
|
- | 252 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
|
- | 253 | return true; |
|
- | 254 | } |
|
- | 255 | return false; |
|
- | 256 | } |
|
- | 257 | ||
- | 258 | /** |
|
- | 259 | * radeon_fence_wait_seq - wait for a specific sequence number |
|
- | 260 | * |
|
- | 261 | * @rdev: radeon device pointer |
|
- | 262 | * @target_seq: sequence number we want to wait for |
|
- | 263 | * @ring: ring index the fence is associated with |
|
- | 264 | * @intr: use interruptable sleep |
|
- | 265 | * @lock_ring: whether the ring should be locked or not |
|
- | 266 | * |
|
- | 267 | * Wait for the requested sequence number to be written (all asics). |
|
- | 268 | * @intr selects whether to use interruptable (true) or non-interruptable |
|
151 | } while (i != &rdev->fence_drv.emited); |
269 | * (false) sleep when waiting for the sequence number. Helper function |
- | 270 | * for radeon_fence_wait(), et al. |
|
152 | wake = true; |
271 | * Returns 0 if the sequence number has passed, error for all other cases. |
153 | } |
272 | * -EDEADLK is returned when a GPU lockup has been detected and the ring is |
- | 273 | * marked as not ready so no further jobs get scheduled until a successful |
|
- | 274 | * reset. |
|
- | 275 | */ |
|
- | 276 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, |
|
Line 154... | Line 277... | ||
154 | return wake; |
277 | unsigned ring, bool intr, bool lock_ring) |
155 | } |
278 | { |
156 | 279 | unsigned long timeout, last_activity; |
|
157 | 280 | uint64_t seq; |
|
Line 158... | Line 281... | ||
158 | int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence) |
281 | unsigned i; |
159 | { |
282 | bool signaled; |
160 | unsigned long irq_flags; |
- | |
161 | 283 | int r; |
|
162 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
284 | |
163 | if ((*fence) == NULL) { |
285 | while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { |
164 | return -ENOMEM; |
- | |
165 | } |
- | |
166 | 286 | if (!rdev->ring[ring].ready) { |
|
167 | (*fence)->evnt = CreateEvent(NULL, MANUAL_DESTROY); |
- | |
168 | // kref_init(&((*fence)->kref)); |
287 | return -EBUSY; |
- | 288 | } |
|
169 | (*fence)->rdev = rdev; |
289 | |
170 | (*fence)->emited = false; |
290 | timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT; |
- | 291 | if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { |
|
- | 292 | /* the normal case, timeout is somewhere before last_activity */ |
|
- | 293 | timeout = rdev->fence_drv[ring].last_activity - timeout; |
|
Line -... | Line 294... | ||
- | 294 | } else { |
|
- | 295 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms |
|
- | 296 | * anyway we will just wait for the minimum amount and then check for a lockup |
|
- | 297 | */ |
|
- | 298 | timeout = 1; |
|
- | 299 | } |
|
- | 300 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
|
- | 301 | /* Save current last activity valuee, used to check for GPU lockups */ |
|
- | 302 | last_activity = rdev->fence_drv[ring].last_activity; |
|
- | 303 | ||
- | 304 | // trace_radeon_fence_wait_begin(rdev->ddev, seq); |
|
- | 305 | radeon_irq_kms_sw_irq_get(rdev, ring); |
|
Line 171... | Line 306... | ||
171 | (*fence)->signaled = false; |
306 | // if (intr) { |
172 | (*fence)->seq = 0; |
- | |
173 | INIT_LIST_HEAD(&(*fence)->list); |
307 | // r = wait_event_interruptible_timeout(rdev->fence_queue, |
174 | 308 | // (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
|
- | 309 | // timeout); |
|
- | 310 | // } else { |
|
Line -... | Line 311... | ||
- | 311 | // r = wait_event_timeout(rdev->fence_queue, |
|
- | 312 | // (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
|
- | 313 | // timeout); |
|
175 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
314 | // } |
176 | list_add_tail(&(*fence)->list, &rdev->fence_drv.created); |
315 | delay(1); |
- | 316 | ||
Line -... | Line 317... | ||
- | 317 | radeon_irq_kms_sw_irq_put(rdev, ring); |
|
177 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
318 | // if (unlikely(r < 0)) { |
178 | return 0; |
319 | // return r; |
- | 320 | // } |
|
Line 179... | Line -... | ||
179 | } |
- | |
180 | - | ||
181 | - | ||
182 | bool radeon_fence_signaled(struct radeon_fence *fence) |
- | |
183 | { |
- | |
184 | unsigned long irq_flags; |
- | |
185 | bool signaled = false; |
- | |
186 | - | ||
187 | if (!fence) |
- | |
188 | return true; |
- | |
189 | 321 | // trace_radeon_fence_wait_end(rdev->ddev, seq); |
|
190 | if (fence->rdev->gpu_lockup) |
- | |
191 | return true; |
322 | |
192 | 323 | if (unlikely(!signaled)) { |
|
- | 324 | /* we were interrupted for some reason and fence |
|
- | 325 | * isn't signaled yet, resume waiting */ |
|
193 | write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); |
326 | if (r) { |
194 | signaled = fence->signaled; |
327 | continue; |
- | 328 | } |
|
195 | /* if we are shuting down report all fence as signaled */ |
329 | |
- | 330 | /* check if sequence value has changed since last_activity */ |
|
- | 331 | if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { |
|
- | 332 | continue; |
|
- | 333 | } |
|
- | 334 | ||
- | 335 | if (lock_ring) { |
|
- | 336 | mutex_lock(&rdev->ring_lock); |
|
Line -... | Line 337... | ||
- | 337 | } |
|
- | 338 | ||
- | 339 | /* test if somebody else has already decided that this is a lockup */ |
|
- | 340 | if (last_activity != rdev->fence_drv[ring].last_activity) { |
|
- | 341 | if (lock_ring) { |
|
- | 342 | mutex_unlock(&rdev->ring_lock); |
|
- | 343 | } |
|
- | 344 | continue; |
|
- | 345 | } |
|
- | 346 | ||
- | 347 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
|
- | 348 | /* good news we believe it's a lockup */ |
|
- | 349 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", |
|
- | 350 | target_seq, seq); |
|
- | 351 | ||
- | 352 | /* change last activity so nobody else think there is a lockup */ |
|
- | 353 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
|
- | 354 | rdev->fence_drv[i].last_activity = jiffies; |
|
- | 355 | } |
|
- | 356 | ||
- | 357 | /* mark the ring as not ready any more */ |
|
- | 358 | rdev->ring[ring].ready = false; |
|
- | 359 | if (lock_ring) { |
|
- | 360 | mutex_unlock(&rdev->ring_lock); |
|
- | 361 | } |
|
- | 362 | return -EDEADLK; |
|
- | 363 | } |
|
- | 364 | ||
- | 365 | if (lock_ring) { |
|
- | 366 | mutex_unlock(&rdev->ring_lock); |
|
- | 367 | } |
|
- | 368 | } |
|
196 | if (fence->rdev->shutdown) { |
369 | } |
197 | signaled = true; |
370 | return 0; |
198 | } |
- | |
199 | if (!fence->emited) { |
- | |
200 | WARN(1, "Querying an unemited fence : %p !\n", fence); |
- | |
201 | signaled = true; |
371 | } |
Line 202... | Line 372... | ||
202 | } |
372 | |
203 | if (!signaled) { |
373 | /** |
204 | radeon_fence_poll_locked(fence->rdev); |
374 | * radeon_fence_wait - wait for a fence to signal |
205 | signaled = fence->signaled; |
375 | * |
- | 376 | * @fence: radeon fence object |
|
- | 377 | * @intr: use interruptable sleep |
|
206 | } |
378 | * |
- | 379 | * Wait for the requested fence to signal (all asics). |
|
- | 380 | * @intr selects whether to use interruptable (true) or non-interruptable |
|
- | 381 | * (false) sleep when waiting for the fence. |
|
207 | write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); |
382 | * Returns 0 if the fence has passed, error for all other cases. |
208 | return signaled; |
383 | */ |
209 | } |
384 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
210 | - | ||
211 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
- | |
212 | { |
- | |
213 | struct radeon_device *rdev; |
- | |
214 | unsigned long irq_flags, timeout; |
- | |
215 | u32 seq; |
- | |
216 | int r; |
- | |
217 | - | ||
218 | if (fence == NULL) { |
- | |
Line -... | Line 385... | ||
- | 385 | { |
|
- | 386 | int r; |
|
219 | WARN(1, "Querying an invalid fence : %p !\n", fence); |
387 | |
Line -... | Line 388... | ||
- | 388 | if (fence == NULL) { |
|
- | 389 | WARN(1, "Querying an invalid fence : %p !\n", fence); |
|
- | 390 | return -EINVAL; |
|
- | 391 | } |
|
- | 392 | ||
- | 393 | r = radeon_fence_wait_seq(fence->rdev, fence->seq, |
|
- | 394 | fence->ring, intr, true); |
|
- | 395 | if (r) { |
|
- | 396 | return r; |
|
- | 397 | } |
|
- | 398 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
|
- | 399 | return 0; |
|
- | 400 | } |
|
- | 401 | ||
- | 402 | static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) |
|
- | 403 | { |
|
- | 404 | unsigned i; |
|
- | 405 | ||
- | 406 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
|
220 | return 0; |
407 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) { |
- | 408 | return true; |
|
- | 409 | } |
|
- | 410 | } |
|
- | 411 | return false; |
|
- | 412 | } |
|
- | 413 | ||
- | 414 | /** |
|
- | 415 | * radeon_fence_wait_any_seq - wait for a sequence number on any ring |
|
- | 416 | * |
|
- | 417 | * @rdev: radeon device pointer |
|
- | 418 | * @target_seq: sequence number(s) we want to wait for |
|
221 | } |
419 | * @intr: use interruptable sleep |
- | 420 | * |
|
- | 421 | * Wait for the requested sequence number(s) to be written by any ring |
|
- | 422 | * (all asics). Sequnce number array is indexed by ring id. |
|
- | 423 | * @intr selects whether to use interruptable (true) or non-interruptable |
|
- | 424 | * (false) sleep when waiting for the sequence number. Helper function |
|
- | 425 | * for radeon_fence_wait_any(), et al. |
|
- | 426 | * Returns 0 if the sequence number has passed, error for all other cases. |
|
- | 427 | */ |
|
- | 428 | static int radeon_fence_wait_any_seq(struct radeon_device *rdev, |
|
- | 429 | u64 *target_seq, bool intr) |
|
- | 430 | { |
|
- | 431 | unsigned long timeout, last_activity, tmp; |
|
222 | rdev = fence->rdev; |
432 | unsigned i, ring = RADEON_NUM_RINGS; |
223 | if (radeon_fence_signaled(fence)) { |
433 | bool signaled; |
- | 434 | int r; |
|
- | 435 | ||
- | 436 | for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { |
|
- | 437 | if (!target_seq[i]) { |
|
- | 438 | continue; |
|
- | 439 | } |
|
- | 440 | ||
- | 441 | /* use the most recent one as indicator */ |
|
- | 442 | if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { |
|
- | 443 | last_activity = rdev->fence_drv[i].last_activity; |
|
- | 444 | } |
|
- | 445 | ||
224 | return 0; |
446 | /* For lockup detection just pick the lowest ring we are |
- | 447 | * actively waiting for |
|
- | 448 | */ |
|
- | 449 | if (i < ring) { |
|
225 | } |
450 | ring = i; |
- | 451 | } |
|
- | 452 | } |
|
226 | timeout = rdev->fence_drv.last_timeout; |
453 | |
- | 454 | /* nothing to wait for ? */ |
|
- | 455 | if (ring == RADEON_NUM_RINGS) { |
|
227 | retry: |
456 | return -ENOENT; |
- | 457 | } |
|
- | 458 | ||
Line 228... | Line 459... | ||
228 | /* save current sequence used to check for GPU lockup */ |
459 | while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { |
Line -... | Line 460... | ||
- | 460 | timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT; |
|
- | 461 | if (time_after(last_activity, timeout)) { |
|
- | 462 | /* the normal case, timeout is somewhere before last_activity */ |
|
- | 463 | timeout = last_activity - timeout; |
|
229 | seq = rdev->fence_drv.last_seq; |
464 | } else { |
- | 465 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms |
|
- | 466 | * anyway we will just wait for the minimum amount and then check for a lockup |
|
- | 467 | */ |
|
- | 468 | timeout = 1; |
|
230 | // trace_radeon_fence_wait_begin(rdev->ddev, seq); |
469 | } |
231 | if (intr) { |
470 | |
- | 471 | // trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]); |
|
232 | radeon_irq_kms_sw_irq_get(rdev); |
472 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
233 | // r = wait_event_interruptible_timeout(rdev->fence_drv.queue, |
473 | if (target_seq[i]) { |
234 | // radeon_fence_signaled(fence), timeout); |
474 | radeon_irq_kms_sw_irq_get(rdev, i); |
235 | - | ||
236 | WaitEvent(fence->evnt); |
475 | } |
237 | 476 | } |
|
238 | radeon_irq_kms_sw_irq_put(rdev); |
- | |
239 | if (unlikely(r < 0)) { |
477 | |
- | 478 | // WaitEvent(fence->evnt); |
|
- | 479 | ||
- | 480 | r = 1; |
|
- | 481 | ||
240 | return r; |
482 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
- | 483 | if (target_seq[i]) { |
|
- | 484 | radeon_irq_kms_sw_irq_put(rdev, i); |
|
241 | } |
485 | } |
- | 486 | } |
|
- | 487 | if (unlikely(r < 0)) { |
|
- | 488 | return r; |
|
- | 489 | } |
|
242 | } else { |
490 | // trace_radeon_fence_wait_end(rdev->ddev, seq); |
- | 491 | ||
243 | radeon_irq_kms_sw_irq_get(rdev); |
492 | if (unlikely(!signaled)) { |
244 | // r = wait_event_timeout(rdev->fence_drv.queue, |
493 | /* we were interrupted for some reason and fence |
245 | // radeon_fence_signaled(fence), timeout); |
494 | * isn't signaled yet, resume waiting */ |
246 | 495 | if (r) { |
|
- | 496 | continue; |
|
247 | WaitEvent(fence->evnt); |
497 | } |
248 | 498 | ||
- | 499 | mutex_lock(&rdev->ring_lock); |
|
249 | radeon_irq_kms_sw_irq_put(rdev); |
500 | for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { |
250 | } |
- | |
251 | // trace_radeon_fence_wait_end(rdev->ddev, seq); |
- | |
252 | if (unlikely(!radeon_fence_signaled(fence))) { |
- | |
253 | /* we were interrupted for some reason and fence isn't |
- | |
254 | * isn't signaled yet, resume wait |
- | |
Line 255... | Line 501... | ||
255 | */ |
501 | if (time_after(rdev->fence_drv[i].last_activity, tmp)) { |
256 | if (r) { |
502 | tmp = rdev->fence_drv[i].last_activity; |
- | 503 | } |
|
- | 504 | } |
|
- | 505 | /* test if somebody else has already decided that this is a lockup */ |
|
- | 506 | if (last_activity != tmp) { |
|
257 | timeout = r; |
507 | last_activity = tmp; |
258 | goto retry; |
- | |
259 | } |
- | |
260 | /* don't protect read access to rdev->fence_drv.last_seq |
- | |
261 | * if we experiencing a lockup the value doesn't change |
- | |
262 | */ |
- | |
263 | if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { |
- | |
264 | /* good news we believe it's a lockup */ |
508 | mutex_unlock(&rdev->ring_lock); |
265 | WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", |
509 | continue; |
266 | fence->seq, seq); |
510 | } |
Line -... | Line 511... | ||
- | 511 | ||
- | 512 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
|
267 | /* FIXME: what should we do ? marking everyone |
513 | /* good news we believe it's a lockup */ |
- | 514 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n", |
|
- | 515 | target_seq[ring]); |
|
- | 516 | ||
- | 517 | /* change last activity so nobody else think there is a lockup */ |
|
- | 518 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
|
- | 519 | rdev->fence_drv[i].last_activity = GetTimerTicks(); |
|
- | 520 | } |
|
- | 521 | ||
- | 522 | /* mark the ring as not ready any more */ |
|
- | 523 | rdev->ring[ring].ready = false; |
|
268 | * as signaled for now |
524 | mutex_unlock(&rdev->ring_lock); |
- | 525 | return -EDEADLK; |
|
- | 526 | } |
|
269 | */ |
527 | mutex_unlock(&rdev->ring_lock); |
270 | rdev->gpu_lockup = true; |
528 | } |
271 | // r = radeon_gpu_reset(rdev); |
529 | } |
272 | // if (r) |
530 | return 0; |
Line -... | Line 531... | ||
- | 531 | } |
|
- | 532 | ||
- | 533 | /** |
|
273 | // return r; |
534 | * radeon_fence_wait_any - wait for a fence to signal on any ring |
274 | return true; |
535 | * |
275 | 536 | * @rdev: radeon device pointer |
|
- | 537 | * @fences: radeon fence object(s) |
|
276 | // radeon_fence_write(rdev, fence->seq); |
538 | * @intr: use interruptable sleep |
277 | // rdev->gpu_lockup = false; |
539 | * |
278 | } |
- | |
279 | timeout = RADEON_FENCE_JIFFIES_TIMEOUT; |
540 | * Wait for any requested fence to signal (all asics). Fence |
280 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
541 | * array is indexed by ring id. @intr selects whether to use |
281 | rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; |
- | |
- | 542 | * interruptable (true) or non-interruptable (false) sleep when |
|
282 | rdev->fence_drv.last_jiffies = GetTimerTicks(); |
543 | * waiting for the fences. Used by the suballocator. |
283 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
544 | * Returns 0 if any fence has passed, error for all other cases. |
284 | goto retry; |
- | |
- | 545 | */ |
|
285 | } |
546 | int radeon_fence_wait_any(struct radeon_device *rdev, |
286 | return 0; |
547 | struct radeon_fence **fences, |
287 | } |
548 | bool intr) |
288 | 549 | { |
|
- | 550 | uint64_t seq[RADEON_NUM_RINGS]; |
|
- | 551 | unsigned i; |
|
Line -... | Line 552... | ||
- | 552 | int r; |
|
- | 553 | ||
- | 554 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
|
- | 555 | seq[i] = 0; |
|
- | 556 | ||
- | 557 | if (!fences[i]) { |
|
- | 558 | continue; |
|
- | 559 | } |
|
- | 560 | ||
- | 561 | if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) { |
|
289 | #if 0 |
562 | /* something was allready signaled */ |
290 | int radeon_fence_wait_next(struct radeon_device *rdev) |
563 | return 0; |
291 | { |
- | |
292 | unsigned long irq_flags; |
- | |
293 | struct radeon_fence *fence; |
564 | } |
Line -... | Line 565... | ||
- | 565 | ||
- | 566 | seq[i] = fences[i]->seq; |
|
- | 567 | } |
|
- | 568 | ||
- | 569 | r = radeon_fence_wait_any_seq(rdev, seq, intr); |
|
- | 570 | if (r) { |
|
- | 571 | return r; |
|
- | 572 | } |
|
- | 573 | return 0; |
|
- | 574 | } |
|
- | 575 | ||
- | 576 | /** |
|
- | 577 | * radeon_fence_wait_next_locked - wait for the next fence to signal |
|
- | 578 | * |
|
- | 579 | * @rdev: radeon device pointer |
|
- | 580 | * @ring: ring index the fence is associated with |
|
- | 581 | * |
|
- | 582 | * Wait for the next fence on the requested ring to signal (all asics). |
|
- | 583 | * Returns 0 if the next fence has passed, error for all other cases. |
|
- | 584 | * Caller must hold ring lock. |
|
- | 585 | */ |
|
- | 586 | int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) |
|
- | 587 | { |
|
- | 588 | uint64_t seq; |
|
- | 589 | ||
- | 590 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
|
294 | int r; |
591 | if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { |
- | 592 | /* nothing to wait for, last_seq is |
|
- | 593 | already the last emited fence */ |
|
- | 594 | return -ENOENT; |
|
- | 595 | } |
|
295 | 596 | return radeon_fence_wait_seq(rdev, seq, ring, false, false); |
|
296 | if (rdev->gpu_lockup) { |
597 | } |
297 | return 0; |
598 | |
298 | } |
599 | /** |
299 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
600 | * radeon_fence_wait_empty_locked - wait for all fences to signal |
- | 601 | * |
|
300 | if (list_empty(&rdev->fence_drv.emited)) { |
602 | * @rdev: radeon device pointer |
301 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
603 | * @ring: ring index the fence is associated with |
302 | return 0; |
- | |
303 | } |
- | |
304 | fence = list_entry(rdev->fence_drv.emited.next, |
- | |
305 | struct radeon_fence, list); |
- | |
306 | radeon_fence_ref(fence); |
- | |
307 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
- | |
308 | r = radeon_fence_wait(fence, false); |
- | |
309 | radeon_fence_unref(&fence); |
604 | * |
Line -... | Line 605... | ||
- | 605 | * Wait for all fences on the requested ring to signal (all asics). |
|
- | 606 | * Returns 0 if the fences have passed, error for all other cases. |
|
- | 607 | * Caller must hold ring lock. |
|
- | 608 | */ |
|
- | 609 | void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) |
|
- | 610 | { |
|
- | 611 | uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; |
|
- | 612 | ||
310 | return r; |
613 | while(1) { |
311 | } |
614 | int r; |
312 | 615 | r = radeon_fence_wait_seq(rdev, seq, ring, false, false); |
|
313 | int radeon_fence_wait_last(struct radeon_device *rdev) |
616 | if (r == -EDEADLK) { |
314 | { |
617 | mutex_unlock(&rdev->ring_lock); |
Line 315... | Line 618... | ||
315 | unsigned long irq_flags; |
618 | r = radeon_gpu_reset(rdev); |
- | 619 | mutex_lock(&rdev->ring_lock); |
|
316 | struct radeon_fence *fence; |
620 | if (!r) |
- | 621 | continue; |
|
- | 622 | } |
|
- | 623 | if (r) { |
|
- | 624 | dev_err(rdev->dev, "error waiting for ring to become" |
|
317 | int r; |
625 | " idle (%d)\n", r); |
318 | 626 | } |
|
319 | if (rdev->gpu_lockup) { |
- | |
320 | return 0; |
627 | return; |
Line 321... | Line 628... | ||
321 | } |
628 | } |
- | 629 | } |
|
- | 630 | ||
- | 631 | /** |
|
- | 632 | * radeon_fence_ref - take a ref on a fence |
|
Line -... | Line 633... | ||
- | 633 | * |
|
- | 634 | * @fence: radeon fence object |
|
- | 635 | * |
|
- | 636 | * Take a reference on a fence (all asics). |
|
- | 637 | * Returns the fence. |
|
- | 638 | */ |
|
- | 639 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
|
- | 640 | { |
|
- | 641 | kref_get(&fence->kref); |
|
- | 642 | return fence; |
|
- | 643 | } |
|
- | 644 | ||
- | 645 | /** |
|
- | 646 | * radeon_fence_unref - remove a ref on a fence |
|
- | 647 | * |
|
- | 648 | * @fence: radeon fence object |
|
- | 649 | * |
|
- | 650 | * Remove a reference on a fence (all asics). |
|
- | 651 | */ |
|
- | 652 | void radeon_fence_unref(struct radeon_fence **fence) |
|
- | 653 | { |
|
- | 654 | struct radeon_fence *tmp = *fence; |
|
- | 655 | ||
- | 656 | *fence = NULL; |
|
- | 657 | if (tmp) { |
|
- | 658 | kref_put(&tmp->kref, radeon_fence_destroy); |
|
- | 659 | } |
|
- | 660 | } |
|
- | 661 | ||
- | 662 | /** |
|
- | 663 | * radeon_fence_count_emitted - get the count of emitted fences |
|
- | 664 | * |
|
- | 665 | * @rdev: radeon device pointer |
|
- | 666 | * @ring: ring index the fence is associated with |
|
- | 667 | * |
|
- | 668 | * Get the number of fences emitted on the requested ring (all asics). |
|
322 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
669 | * Returns the number of emitted fences on the ring. Used by the |
- | 670 | * dynpm code to ring track activity. |
|
- | 671 | */ |
|
323 | if (list_empty(&rdev->fence_drv.emited)) { |
672 | unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) |
324 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
673 | { |
- | 674 | uint64_t emitted; |
|
- | 675 | ||
- | 676 | /* We are not protected by ring lock when reading the last sequence |
|
- | 677 | * but it's ok to report slightly wrong fence count here. |
|
- | 678 | */ |
|
325 | return 0; |
679 | radeon_fence_process(rdev, ring); |
326 | } |
680 | emitted = rdev->fence_drv[ring].sync_seq[ring] |
- | 681 | - atomic64_read(&rdev->fence_drv[ring].last_seq); |
|
- | 682 | /* to avoid 32bits warp around */ |
|
- | 683 | if (emitted > 0x10000000) { |
|
327 | fence = list_entry(rdev->fence_drv.emited.prev, |
684 | emitted = 0x10000000; |
- | 685 | } |
|
- | 686 | return (unsigned)emitted; |
|
328 | struct radeon_fence, list); |
687 | } |
- | 688 | ||
- | 689 | /** |
|
329 | radeon_fence_ref(fence); |
690 | * radeon_fence_need_sync - do we need a semaphore |
Line -... | Line 691... | ||
- | 691 | * |
|
- | 692 | * @fence: radeon fence object |
|
- | 693 | * @dst_ring: which ring to check against |
|
- | 694 | * |
|
- | 695 | * Check if the fence needs to be synced against another ring |
|
- | 696 | * (all asics). If so, we need to emit a semaphore. |
|
- | 697 | * Returns true if we need to sync with another ring, false if |
|
- | 698 | * not. |
|
- | 699 | */ |
|
330 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
700 | bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) |
331 | r = radeon_fence_wait(fence, false); |
701 | { |
332 | radeon_fence_unref(&fence); |
702 | struct radeon_fence_driver *fdrv; |
333 | return r; |
703 | |
Line 334... | Line 704... | ||
334 | } |
704 | if (!fence) { |
335 | 705 | return false; |
|
336 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
- | |
337 | { |
706 | } |
Line -... | Line 707... | ||
- | 707 | ||
- | 708 | if (fence->ring == dst_ring) { |
|
- | 709 | return false; |
|
- | 710 | } |
|
- | 711 | ||
- | 712 | /* we are protected by the ring mutex */ |
|
- | 713 | fdrv = &fence->rdev->fence_drv[dst_ring]; |
|
- | 714 | if (fence->seq <= fdrv->sync_seq[fence->ring]) { |
|
- | 715 | return false; |
|
- | 716 | } |
|
- | 717 | ||
- | 718 | return true; |
|
- | 719 | } |
|
- | 720 | ||
- | 721 | /** |
|
- | 722 | * radeon_fence_note_sync - record the sync point |
|
- | 723 | * |
|
- | 724 | * @fence: radeon fence object |
|
- | 725 | * @dst_ring: which ring to check against |
|
- | 726 | * |
|
- | 727 | * Note the sequence number at which point the fence will |
|
- | 728 | * be synced with the requested ring (all asics). |
|
- | 729 | */ |
|
- | 730 | void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) |
|
- | 731 | { |
|
- | 732 | struct radeon_fence_driver *dst, *src; |
|
- | 733 | unsigned i; |
|
338 | kref_get(&fence->kref); |
734 | |
339 | return fence; |
735 | if (!fence) { |
340 | } |
736 | return; |
341 | 737 | } |
|
Line 342... | Line 738... | ||
342 | #endif |
738 | |
- | 739 | if (fence->ring == dst_ring) { |
|
- | 740 | return; |
|
- | 741 | } |
|
- | 742 | ||
343 | 743 | /* we are protected by the ring mutex */ |
|
344 | void radeon_fence_unref(struct radeon_fence **fence) |
744 | src = &fence->rdev->fence_drv[fence->ring]; |
345 | { |
745 | dst = &fence->rdev->fence_drv[dst_ring]; |
346 | unsigned long irq_flags; |
- | |
347 | struct radeon_fence *tmp = *fence; |
746 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
348 | 747 | if (i == dst_ring) { |
|
349 | *fence = NULL; |
748 | continue; |
350 | 749 | } |
|
351 | if(tmp) |
750 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); |
- | 751 | } |
|
352 | { |
752 | } |
353 | write_lock_irqsave(&tmp->rdev->fence_drv.lock, irq_flags); |
753 | |
354 | list_del(&tmp->list); |
754 | /** |
355 | tmp->emited = false; |
755 | * radeon_fence_driver_start_ring - make the fence driver |
- | 756 | * ready for use on the requested ring. |
|
356 | write_unlock_irqrestore(&tmp->rdev->fence_drv.lock, irq_flags); |
757 | * |
357 | }; |
758 | * @rdev: radeon device pointer |
358 | } |
759 | * @ring: ring index to start the fence driver on |
Line -... | Line 760... | ||
- | 760 | * |
|
- | 761 | * Make the fence driver ready for processing (all asics). |
|
- | 762 | * Not all asics have all rings, so each asic will only |
|
- | 763 | * start the fence driver on the rings it has. |
|
- | 764 | * Returns 0 for success, errors for failure. |
|
- | 765 | */ |
|
- | 766 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) |
|
- | 767 | { |
|
- | 768 | uint64_t index; |
|
- | 769 | int r; |
|
- | 770 | ||
- | 771 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
|
- | 772 | if (rdev->wb.use_event) { |
|
- | 773 | rdev->fence_drv[ring].scratch_reg = 0; |
|
- | 774 | index = R600_WB_EVENT_OFFSET + ring * 4; |
|
- | 775 | } else { |
|
- | 776 | r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); |
|
- | 777 | if (r) { |
|
- | 778 | dev_err(rdev->dev, "fence failed to get scratch register\n"); |
|
- | 779 | return r; |
|
- | 780 | } |
|
- | 781 | index = RADEON_WB_SCRATCH_OFFSET + |
|
- | 782 | rdev->fence_drv[ring].scratch_reg - |
|
- | 783 | rdev->scratch.reg_base; |
|
- | 784 | } |
|
- | 785 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
|
- | 786 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; |
|
- | 787 | radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); |
|
- | 788 | rdev->fence_drv[ring].initialized = true; |
|
- | 789 | dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", |
|
- | 790 | ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); |
|
- | 791 | return 0; |
|
- | 792 | } |
|
- | 793 | ||
- | 794 | /** |
|
- | 795 | * radeon_fence_driver_init_ring - init the fence driver |
|
- | 796 | * for the requested ring. |
|
- | 797 | * |
|
- | 798 | * @rdev: radeon device pointer |
|
- | 799 | * @ring: ring index to start the fence driver on |
|
- | 800 | * |
|
- | 801 | * Init the fence driver for the requested ring (all asics). |
|
- | 802 | * Helper function for radeon_fence_driver_init(). |
|
- | 803 | */ |
|
- | 804 | static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) |
|
- | 805 | { |
|
- | 806 | int i; |
|
- | 807 | ||
- | 808 | rdev->fence_drv[ring].scratch_reg = -1; |
|
- | 809 | rdev->fence_drv[ring].cpu_addr = NULL; |
|
- | 810 | rdev->fence_drv[ring].gpu_addr = 0; |
|
- | 811 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
|
- | 812 | rdev->fence_drv[ring].sync_seq[i] = 0; |
|
- | 813 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
|
- | 814 | rdev->fence_drv[ring].last_activity = jiffies; |
|
- | 815 | rdev->fence_drv[ring].initialized = false; |
|
- | 816 | } |
|
- | 817 | ||
- | 818 | /** |
|
- | 819 | * radeon_fence_driver_init - init the fence driver |
|
- | 820 | * for all possible rings. |
|
- | 821 | * |
|
- | 822 | * @rdev: radeon device pointer |
|
- | 823 | * |
|
- | 824 | * Init the fence driver for all possible rings (all asics). |
|
- | 825 | * Not all asics have all rings, so each asic will only |
|
- | 826 | * start the fence driver on the rings it has using |
|
- | 827 | * radeon_fence_driver_start_ring(). |
|
- | 828 | * Returns 0 for success. |
|
- | 829 | */ |
|
- | 830 | int radeon_fence_driver_init(struct radeon_device *rdev) |
|
- | 831 | { |
|
- | 832 | int ring; |
|
- | 833 | ||
Line 359... | Line 834... | ||
359 | 834 | init_waitqueue_head(&rdev->fence_queue); |
|
360 | void radeon_fence_process(struct radeon_device *rdev) |
835 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
361 | { |
836 | radeon_fence_driver_init_ring(rdev, ring); |
362 | unsigned long irq_flags; |
837 | } |
363 | bool wake; |
838 | if (radeon_debugfs_fence_init(rdev)) { |
364 | 839 | dev_err(rdev->dev, "fence debugfs file creation failed\n"); |
|
365 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
840 | } |
366 | wake = radeon_fence_poll_locked(rdev); |
841 | return 0; |
367 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
842 | } |
368 | } |
843 | |
Line -... | Line 844... | ||
- | 844 | /** |
|
- | 845 | * radeon_fence_driver_fini - tear down the fence driver |
|
- | 846 | * for all possible rings. |
|
- | 847 | * |
|
- | 848 | * @rdev: radeon device pointer |
|
369 | 849 | * |
|
370 | int radeon_fence_driver_init(struct radeon_device *rdev) |
850 | * Tear down the fence driver for all possible rings (all asics). |
371 | { |
851 | */ |
372 | unsigned long irq_flags; |
852 | void radeon_fence_driver_fini(struct radeon_device *rdev) |
- | 853 | { |
|
- | 854 | int ring; |
|
373 | int r; |
855 | |
374 | 856 | mutex_lock(&rdev->ring_lock); |
|
375 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
857 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
- | 858 | if (!rdev->fence_drv[ring].initialized) |
|
376 | r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); |
859 | continue; |
377 | if (r) { |
860 | radeon_fence_wait_empty_locked(rdev, ring); |
378 | dev_err(rdev->dev, "fence failed to get scratch register\n"); |
861 | wake_up_all(&rdev->fence_queue); |
Line 379... | Line 862... | ||
379 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
862 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |