Rev 4126 | Rev 5060 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4126 | Rev 4560 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2008 Intel Corporation |
2 | * Copyright (c) 2008 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
21 | * IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Eric Anholt |
24 | * Eric Anholt |
25 | * Keith Packard |
25 | * Keith Packard |
26 | * Mika Kuoppala |
26 | * Mika Kuoppala |
27 | * |
27 | * |
28 | */ |
28 | */ |
29 | 29 | ||
30 | #include "i915_drv.h" |
30 | #include "i915_drv.h" |
31 | 31 | ||
32 | #if 0 |
32 | #if 0 |
33 | static const char *yesno(int v) |
33 | static const char *yesno(int v) |
34 | { |
34 | { |
35 | return v ? "yes" : "no"; |
35 | return v ? "yes" : "no"; |
36 | } |
36 | } |
37 | 37 | ||
38 | static const char *ring_str(int ring) |
38 | static const char *ring_str(int ring) |
39 | { |
39 | { |
40 | switch (ring) { |
40 | switch (ring) { |
41 | case RCS: return "render"; |
41 | case RCS: return "render"; |
42 | case VCS: return "bsd"; |
42 | case VCS: return "bsd"; |
43 | case BCS: return "blt"; |
43 | case BCS: return "blt"; |
44 | case VECS: return "vebox"; |
44 | case VECS: return "vebox"; |
45 | default: return ""; |
45 | default: return ""; |
46 | } |
46 | } |
47 | } |
47 | } |
48 | 48 | ||
49 | static const char *pin_flag(int pinned) |
49 | static const char *pin_flag(int pinned) |
50 | { |
50 | { |
51 | if (pinned > 0) |
51 | if (pinned > 0) |
52 | return " P"; |
52 | return " P"; |
53 | else if (pinned < 0) |
53 | else if (pinned < 0) |
54 | return " p"; |
54 | return " p"; |
55 | else |
55 | else |
56 | return ""; |
56 | return ""; |
57 | } |
57 | } |
58 | 58 | ||
59 | static const char *tiling_flag(int tiling) |
59 | static const char *tiling_flag(int tiling) |
60 | { |
60 | { |
61 | switch (tiling) { |
61 | switch (tiling) { |
62 | default: |
62 | default: |
63 | case I915_TILING_NONE: return ""; |
63 | case I915_TILING_NONE: return ""; |
64 | case I915_TILING_X: return " X"; |
64 | case I915_TILING_X: return " X"; |
65 | case I915_TILING_Y: return " Y"; |
65 | case I915_TILING_Y: return " Y"; |
66 | } |
66 | } |
67 | } |
67 | } |
68 | 68 | ||
69 | static const char *dirty_flag(int dirty) |
69 | static const char *dirty_flag(int dirty) |
70 | { |
70 | { |
71 | return dirty ? " dirty" : ""; |
71 | return dirty ? " dirty" : ""; |
72 | } |
72 | } |
73 | 73 | ||
74 | static const char *purgeable_flag(int purgeable) |
74 | static const char *purgeable_flag(int purgeable) |
75 | { |
75 | { |
76 | return purgeable ? " purgeable" : ""; |
76 | return purgeable ? " purgeable" : ""; |
77 | } |
77 | } |
78 | 78 | ||
79 | static bool __i915_error_ok(struct drm_i915_error_state_buf *e) |
79 | static bool __i915_error_ok(struct drm_i915_error_state_buf *e) |
80 | { |
80 | { |
81 | 81 | ||
82 | if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { |
82 | if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { |
83 | e->err = -ENOSPC; |
83 | e->err = -ENOSPC; |
84 | return false; |
84 | return false; |
85 | } |
85 | } |
86 | 86 | ||
87 | if (e->bytes == e->size - 1 || e->err) |
87 | if (e->bytes == e->size - 1 || e->err) |
88 | return false; |
88 | return false; |
89 | 89 | ||
90 | return true; |
90 | return true; |
91 | } |
91 | } |
92 | 92 | ||
93 | static bool __i915_error_seek(struct drm_i915_error_state_buf *e, |
93 | static bool __i915_error_seek(struct drm_i915_error_state_buf *e, |
94 | unsigned len) |
94 | unsigned len) |
95 | { |
95 | { |
96 | if (e->pos + len <= e->start) { |
96 | if (e->pos + len <= e->start) { |
97 | e->pos += len; |
97 | e->pos += len; |
98 | return false; |
98 | return false; |
99 | } |
99 | } |
100 | 100 | ||
101 | /* First vsnprintf needs to fit in its entirety for memmove */ |
101 | /* First vsnprintf needs to fit in its entirety for memmove */ |
102 | if (len >= e->size) { |
102 | if (len >= e->size) { |
103 | e->err = -EIO; |
103 | e->err = -EIO; |
104 | return false; |
104 | return false; |
105 | } |
105 | } |
106 | 106 | ||
107 | return true; |
107 | return true; |
108 | } |
108 | } |
109 | 109 | ||
110 | static void __i915_error_advance(struct drm_i915_error_state_buf *e, |
110 | static void __i915_error_advance(struct drm_i915_error_state_buf *e, |
111 | unsigned len) |
111 | unsigned len) |
112 | { |
112 | { |
113 | /* If this is first printf in this window, adjust it so that |
113 | /* If this is first printf in this window, adjust it so that |
114 | * start position matches start of the buffer |
114 | * start position matches start of the buffer |
115 | */ |
115 | */ |
116 | 116 | ||
117 | if (e->pos < e->start) { |
117 | if (e->pos < e->start) { |
118 | const size_t off = e->start - e->pos; |
118 | const size_t off = e->start - e->pos; |
119 | 119 | ||
120 | /* Should not happen but be paranoid */ |
120 | /* Should not happen but be paranoid */ |
121 | if (off > len || e->bytes) { |
121 | if (off > len || e->bytes) { |
122 | e->err = -EIO; |
122 | e->err = -EIO; |
123 | return; |
123 | return; |
124 | } |
124 | } |
125 | 125 | ||
126 | memmove(e->buf, e->buf + off, len - off); |
126 | memmove(e->buf, e->buf + off, len - off); |
127 | e->bytes = len - off; |
127 | e->bytes = len - off; |
128 | e->pos = e->start; |
128 | e->pos = e->start; |
129 | return; |
129 | return; |
130 | } |
130 | } |
131 | 131 | ||
132 | e->bytes += len; |
132 | e->bytes += len; |
133 | e->pos += len; |
133 | e->pos += len; |
134 | } |
134 | } |
135 | 135 | ||
136 | static void i915_error_vprintf(struct drm_i915_error_state_buf *e, |
136 | static void i915_error_vprintf(struct drm_i915_error_state_buf *e, |
137 | const char *f, va_list args) |
137 | const char *f, va_list args) |
138 | { |
138 | { |
139 | unsigned len; |
139 | unsigned len; |
140 | 140 | ||
141 | if (!__i915_error_ok(e)) |
141 | if (!__i915_error_ok(e)) |
142 | return; |
142 | return; |
143 | 143 | ||
144 | /* Seek the first printf which is hits start position */ |
144 | /* Seek the first printf which is hits start position */ |
145 | if (e->pos < e->start) { |
145 | if (e->pos < e->start) { |
146 | va_list tmp; |
146 | va_list tmp; |
147 | 147 | ||
148 | va_copy(tmp, args); |
148 | va_copy(tmp, args); |
149 | if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp))) |
149 | if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp))) |
150 | return; |
150 | return; |
151 | } |
151 | } |
152 | 152 | ||
153 | len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); |
153 | len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); |
154 | if (len >= e->size - e->bytes) |
154 | if (len >= e->size - e->bytes) |
155 | len = e->size - e->bytes - 1; |
155 | len = e->size - e->bytes - 1; |
156 | 156 | ||
157 | __i915_error_advance(e, len); |
157 | __i915_error_advance(e, len); |
158 | } |
158 | } |
159 | 159 | ||
160 | static void i915_error_puts(struct drm_i915_error_state_buf *e, |
160 | static void i915_error_puts(struct drm_i915_error_state_buf *e, |
161 | const char *str) |
161 | const char *str) |
162 | { |
162 | { |
163 | unsigned len; |
163 | unsigned len; |
164 | 164 | ||
165 | if (!__i915_error_ok(e)) |
165 | if (!__i915_error_ok(e)) |
166 | return; |
166 | return; |
167 | 167 | ||
168 | len = strlen(str); |
168 | len = strlen(str); |
169 | 169 | ||
170 | /* Seek the first printf which is hits start position */ |
170 | /* Seek the first printf which is hits start position */ |
171 | if (e->pos < e->start) { |
171 | if (e->pos < e->start) { |
172 | if (!__i915_error_seek(e, len)) |
172 | if (!__i915_error_seek(e, len)) |
173 | return; |
173 | return; |
174 | } |
174 | } |
175 | 175 | ||
176 | if (len >= e->size - e->bytes) |
176 | if (len >= e->size - e->bytes) |
177 | len = e->size - e->bytes - 1; |
177 | len = e->size - e->bytes - 1; |
178 | memcpy(e->buf + e->bytes, str, len); |
178 | memcpy(e->buf + e->bytes, str, len); |
179 | 179 | ||
180 | __i915_error_advance(e, len); |
180 | __i915_error_advance(e, len); |
181 | } |
181 | } |
182 | 182 | ||
183 | #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) |
183 | #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) |
184 | #define err_puts(e, s) i915_error_puts(e, s) |
184 | #define err_puts(e, s) i915_error_puts(e, s) |
185 | 185 | ||
186 | static void print_error_buffers(struct drm_i915_error_state_buf *m, |
186 | static void print_error_buffers(struct drm_i915_error_state_buf *m, |
187 | const char *name, |
187 | const char *name, |
188 | struct drm_i915_error_buffer *err, |
188 | struct drm_i915_error_buffer *err, |
189 | int count) |
189 | int count) |
190 | { |
190 | { |
191 | err_printf(m, "%s [%d]:\n", name, count); |
191 | err_printf(m, "%s [%d]:\n", name, count); |
192 | 192 | ||
193 | while (count--) { |
193 | while (count--) { |
194 | err_printf(m, " %08x %8u %02x %02x %x %x", |
194 | err_printf(m, " %08x %8u %02x %02x %x %x", |
195 | err->gtt_offset, |
195 | err->gtt_offset, |
196 | err->size, |
196 | err->size, |
197 | err->read_domains, |
197 | err->read_domains, |
198 | err->write_domain, |
198 | err->write_domain, |
199 | err->rseqno, err->wseqno); |
199 | err->rseqno, err->wseqno); |
200 | err_puts(m, pin_flag(err->pinned)); |
200 | err_puts(m, pin_flag(err->pinned)); |
201 | err_puts(m, tiling_flag(err->tiling)); |
201 | err_puts(m, tiling_flag(err->tiling)); |
202 | err_puts(m, dirty_flag(err->dirty)); |
202 | err_puts(m, dirty_flag(err->dirty)); |
203 | err_puts(m, purgeable_flag(err->purgeable)); |
203 | err_puts(m, purgeable_flag(err->purgeable)); |
204 | err_puts(m, err->ring != -1 ? " " : ""); |
204 | err_puts(m, err->ring != -1 ? " " : ""); |
205 | err_puts(m, ring_str(err->ring)); |
205 | err_puts(m, ring_str(err->ring)); |
206 | err_puts(m, i915_cache_level_str(err->cache_level)); |
206 | err_puts(m, i915_cache_level_str(err->cache_level)); |
207 | 207 | ||
208 | if (err->name) |
208 | if (err->name) |
209 | err_printf(m, " (name: %d)", err->name); |
209 | err_printf(m, " (name: %d)", err->name); |
210 | if (err->fence_reg != I915_FENCE_REG_NONE) |
210 | if (err->fence_reg != I915_FENCE_REG_NONE) |
211 | err_printf(m, " (fence: %d)", err->fence_reg); |
211 | err_printf(m, " (fence: %d)", err->fence_reg); |
212 | 212 | ||
213 | err_puts(m, "\n"); |
213 | err_puts(m, "\n"); |
214 | err++; |
214 | err++; |
215 | } |
215 | } |
216 | } |
216 | } |
- | 217 | ||
- | 218 | static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a) |
|
- | 219 | { |
|
- | 220 | switch (a) { |
|
- | 221 | case HANGCHECK_IDLE: |
|
- | 222 | return "idle"; |
|
- | 223 | case HANGCHECK_WAIT: |
|
- | 224 | return "wait"; |
|
- | 225 | case HANGCHECK_ACTIVE: |
|
- | 226 | return "active"; |
|
- | 227 | case HANGCHECK_KICK: |
|
- | 228 | return "kick"; |
|
- | 229 | case HANGCHECK_HUNG: |
|
- | 230 | return "hung"; |
|
- | 231 | } |
|
- | 232 | ||
- | 233 | return "unknown"; |
|
- | 234 | } |
|
217 | 235 | ||
218 | static void i915_ring_error_state(struct drm_i915_error_state_buf *m, |
236 | static void i915_ring_error_state(struct drm_i915_error_state_buf *m, |
219 | struct drm_device *dev, |
237 | struct drm_device *dev, |
220 | struct drm_i915_error_state *error, |
238 | struct drm_i915_error_state *error, |
221 | unsigned ring) |
239 | unsigned ring) |
222 | { |
240 | { |
223 | BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ |
241 | BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ |
- | 242 | if (!error->ring[ring].valid) |
|
- | 243 | return; |
|
- | 244 | ||
224 | err_printf(m, "%s command stream:\n", ring_str(ring)); |
245 | err_printf(m, "%s command stream:\n", ring_str(ring)); |
225 | err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); |
246 | err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); |
226 | err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); |
247 | err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); |
227 | err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); |
248 | err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); |
228 | err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); |
249 | err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); |
229 | err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); |
250 | err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); |
230 | err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); |
251 | err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); |
231 | err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); |
252 | err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); |
232 | if (ring == RCS && INTEL_INFO(dev)->gen >= 4) |
253 | if (INTEL_INFO(dev)->gen >= 4) { |
233 | err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); |
254 | err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]); |
234 | - | ||
235 | if (INTEL_INFO(dev)->gen >= 4) |
255 | err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]); |
236 | err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); |
256 | err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); |
- | 257 | } |
|
237 | err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); |
258 | err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); |
238 | err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); |
259 | err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); |
239 | if (INTEL_INFO(dev)->gen >= 6) { |
260 | if (INTEL_INFO(dev)->gen >= 6) { |
240 | err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); |
261 | err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); |
241 | err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); |
262 | err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); |
242 | err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", |
263 | err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", |
243 | error->semaphore_mboxes[ring][0], |
264 | error->semaphore_mboxes[ring][0], |
244 | error->semaphore_seqno[ring][0]); |
265 | error->semaphore_seqno[ring][0]); |
245 | err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", |
266 | err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", |
246 | error->semaphore_mboxes[ring][1], |
267 | error->semaphore_mboxes[ring][1], |
247 | error->semaphore_seqno[ring][1]); |
268 | error->semaphore_seqno[ring][1]); |
248 | if (HAS_VEBOX(dev)) { |
269 | if (HAS_VEBOX(dev)) { |
249 | err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n", |
270 | err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n", |
250 | error->semaphore_mboxes[ring][2], |
271 | error->semaphore_mboxes[ring][2], |
251 | error->semaphore_seqno[ring][2]); |
272 | error->semaphore_seqno[ring][2]); |
252 | } |
273 | } |
253 | } |
274 | } |
254 | err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); |
275 | err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); |
255 | err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); |
276 | err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); |
256 | err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); |
277 | err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); |
257 | err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); |
278 | err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); |
- | 279 | err_printf(m, " hangcheck: %s [%d]\n", |
|
- | 280 | hangcheck_action_to_str(error->hangcheck_action[ring]), |
|
- | 281 | error->hangcheck_score[ring]); |
|
258 | } |
282 | } |
259 | 283 | ||
260 | void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) |
284 | void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) |
261 | { |
285 | { |
262 | va_list args; |
286 | va_list args; |
263 | 287 | ||
264 | va_start(args, f); |
288 | va_start(args, f); |
265 | i915_error_vprintf(e, f, args); |
289 | i915_error_vprintf(e, f, args); |
266 | va_end(args); |
290 | va_end(args); |
267 | } |
291 | } |
268 | 292 | ||
269 | int i915_error_state_to_str(struct drm_i915_error_state_buf *m, |
293 | int i915_error_state_to_str(struct drm_i915_error_state_buf *m, |
270 | const struct i915_error_state_file_priv *error_priv) |
294 | const struct i915_error_state_file_priv *error_priv) |
271 | { |
295 | { |
272 | struct drm_device *dev = error_priv->dev; |
296 | struct drm_device *dev = error_priv->dev; |
273 | drm_i915_private_t *dev_priv = dev->dev_private; |
297 | drm_i915_private_t *dev_priv = dev->dev_private; |
274 | struct drm_i915_error_state *error = error_priv->error; |
298 | struct drm_i915_error_state *error = error_priv->error; |
275 | struct intel_ring_buffer *ring; |
- | |
276 | int i, j, page, offset, elt; |
299 | int i, j, page, offset, elt; |
277 | 300 | ||
278 | if (!error) { |
301 | if (!error) { |
279 | err_printf(m, "no error state collected\n"); |
302 | err_printf(m, "no error state collected\n"); |
280 | goto out; |
303 | goto out; |
281 | } |
304 | } |
282 | 305 | ||
283 | err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, |
306 | err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, |
284 | error->time.tv_usec); |
307 | error->time.tv_usec); |
285 | err_printf(m, "Kernel: " UTS_RELEASE "\n"); |
308 | err_printf(m, "Kernel: " UTS_RELEASE "\n"); |
286 | err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); |
309 | err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); |
287 | err_printf(m, "EIR: 0x%08x\n", error->eir); |
310 | err_printf(m, "EIR: 0x%08x\n", error->eir); |
288 | err_printf(m, "IER: 0x%08x\n", error->ier); |
311 | err_printf(m, "IER: 0x%08x\n", error->ier); |
289 | err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); |
312 | err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); |
290 | err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); |
313 | err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); |
291 | err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); |
314 | err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); |
292 | err_printf(m, "CCID: 0x%08x\n", error->ccid); |
315 | err_printf(m, "CCID: 0x%08x\n", error->ccid); |
- | 316 | err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings); |
|
293 | 317 | ||
294 | for (i = 0; i < dev_priv->num_fence_regs; i++) |
318 | for (i = 0; i < dev_priv->num_fence_regs; i++) |
295 | err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); |
319 | err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); |
296 | 320 | ||
297 | for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) |
321 | for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) |
298 | err_printf(m, " INSTDONE_%d: 0x%08x\n", i, |
322 | err_printf(m, " INSTDONE_%d: 0x%08x\n", i, |
299 | error->extra_instdone[i]); |
323 | error->extra_instdone[i]); |
300 | 324 | ||
301 | if (INTEL_INFO(dev)->gen >= 6) { |
325 | if (INTEL_INFO(dev)->gen >= 6) { |
302 | err_printf(m, "ERROR: 0x%08x\n", error->error); |
326 | err_printf(m, "ERROR: 0x%08x\n", error->error); |
303 | err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); |
327 | err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); |
304 | } |
328 | } |
305 | 329 | ||
306 | if (INTEL_INFO(dev)->gen == 7) |
330 | if (INTEL_INFO(dev)->gen == 7) |
307 | err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); |
331 | err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); |
308 | 332 | ||
309 | for_each_ring(ring, dev_priv, i) |
333 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) |
310 | i915_ring_error_state(m, dev, error, i); |
334 | i915_ring_error_state(m, dev, error, i); |
311 | 335 | ||
312 | if (error->active_bo) |
336 | if (error->active_bo) |
313 | print_error_buffers(m, "Active", |
337 | print_error_buffers(m, "Active", |
314 | error->active_bo[0], |
338 | error->active_bo[0], |
315 | error->active_bo_count[0]); |
339 | error->active_bo_count[0]); |
316 | 340 | ||
317 | if (error->pinned_bo) |
341 | if (error->pinned_bo) |
318 | print_error_buffers(m, "Pinned", |
342 | print_error_buffers(m, "Pinned", |
319 | error->pinned_bo[0], |
343 | error->pinned_bo[0], |
320 | error->pinned_bo_count[0]); |
344 | error->pinned_bo_count[0]); |
321 | 345 | ||
322 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { |
346 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { |
323 | struct drm_i915_error_object *obj; |
347 | struct drm_i915_error_object *obj; |
324 | 348 | ||
325 | if ((obj = error->ring[i].batchbuffer)) { |
349 | if ((obj = error->ring[i].batchbuffer)) { |
326 | err_printf(m, "%s --- gtt_offset = 0x%08x\n", |
350 | err_printf(m, "%s --- gtt_offset = 0x%08x\n", |
327 | dev_priv->ring[i].name, |
351 | dev_priv->ring[i].name, |
328 | obj->gtt_offset); |
352 | obj->gtt_offset); |
329 | offset = 0; |
353 | offset = 0; |
330 | for (page = 0; page < obj->page_count; page++) { |
354 | for (page = 0; page < obj->page_count; page++) { |
331 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { |
355 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { |
332 | err_printf(m, "%08x : %08x\n", offset, |
356 | err_printf(m, "%08x : %08x\n", offset, |
333 | obj->pages[page][elt]); |
357 | obj->pages[page][elt]); |
334 | offset += 4; |
358 | offset += 4; |
335 | } |
359 | } |
336 | } |
360 | } |
337 | } |
361 | } |
338 | 362 | ||
339 | if (error->ring[i].num_requests) { |
363 | if (error->ring[i].num_requests) { |
340 | err_printf(m, "%s --- %d requests\n", |
364 | err_printf(m, "%s --- %d requests\n", |
341 | dev_priv->ring[i].name, |
365 | dev_priv->ring[i].name, |
342 | error->ring[i].num_requests); |
366 | error->ring[i].num_requests); |
343 | for (j = 0; j < error->ring[i].num_requests; j++) { |
367 | for (j = 0; j < error->ring[i].num_requests; j++) { |
344 | err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", |
368 | err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", |
345 | error->ring[i].requests[j].seqno, |
369 | error->ring[i].requests[j].seqno, |
346 | error->ring[i].requests[j].jiffies, |
370 | error->ring[i].requests[j].jiffies, |
347 | error->ring[i].requests[j].tail); |
371 | error->ring[i].requests[j].tail); |
348 | } |
372 | } |
349 | } |
373 | } |
350 | 374 | ||
351 | if ((obj = error->ring[i].ringbuffer)) { |
375 | if ((obj = error->ring[i].ringbuffer)) { |
352 | err_printf(m, "%s --- ringbuffer = 0x%08x\n", |
376 | err_printf(m, "%s --- ringbuffer = 0x%08x\n", |
353 | dev_priv->ring[i].name, |
377 | dev_priv->ring[i].name, |
354 | obj->gtt_offset); |
378 | obj->gtt_offset); |
355 | offset = 0; |
379 | offset = 0; |
356 | for (page = 0; page < obj->page_count; page++) { |
380 | for (page = 0; page < obj->page_count; page++) { |
357 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { |
381 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { |
358 | err_printf(m, "%08x : %08x\n", |
382 | err_printf(m, "%08x : %08x\n", |
359 | offset, |
383 | offset, |
360 | obj->pages[page][elt]); |
384 | obj->pages[page][elt]); |
361 | offset += 4; |
385 | offset += 4; |
362 | } |
386 | } |
363 | } |
387 | } |
364 | } |
388 | } |
365 | 389 | ||
366 | obj = error->ring[i].ctx; |
- | |
367 | if (obj) { |
390 | if ((obj = error->ring[i].ctx)) { |
368 | err_printf(m, "%s --- HW Context = 0x%08x\n", |
391 | err_printf(m, "%s --- HW Context = 0x%08x\n", |
369 | dev_priv->ring[i].name, |
392 | dev_priv->ring[i].name, |
370 | obj->gtt_offset); |
393 | obj->gtt_offset); |
371 | offset = 0; |
394 | offset = 0; |
372 | for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { |
395 | for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { |
373 | err_printf(m, "[%04x] %08x %08x %08x %08x\n", |
396 | err_printf(m, "[%04x] %08x %08x %08x %08x\n", |
374 | offset, |
397 | offset, |
375 | obj->pages[0][elt], |
398 | obj->pages[0][elt], |
376 | obj->pages[0][elt+1], |
399 | obj->pages[0][elt+1], |
377 | obj->pages[0][elt+2], |
400 | obj->pages[0][elt+2], |
378 | obj->pages[0][elt+3]); |
401 | obj->pages[0][elt+3]); |
379 | offset += 16; |
402 | offset += 16; |
380 | } |
403 | } |
381 | } |
404 | } |
382 | } |
405 | } |
383 | 406 | ||
384 | if (error->overlay) |
407 | if (error->overlay) |
385 | intel_overlay_print_error_state(m, error->overlay); |
408 | intel_overlay_print_error_state(m, error->overlay); |
386 | 409 | ||
387 | if (error->display) |
410 | if (error->display) |
388 | intel_display_print_error_state(m, dev, error->display); |
411 | intel_display_print_error_state(m, dev, error->display); |
389 | 412 | ||
390 | out: |
413 | out: |
391 | if (m->bytes == 0 && m->err) |
414 | if (m->bytes == 0 && m->err) |
392 | return m->err; |
415 | return m->err; |
393 | 416 | ||
394 | return 0; |
417 | return 0; |
395 | } |
418 | } |
396 | 419 | ||
397 | int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, |
420 | int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, |
398 | size_t count, loff_t pos) |
421 | size_t count, loff_t pos) |
399 | { |
422 | { |
400 | memset(ebuf, 0, sizeof(*ebuf)); |
423 | memset(ebuf, 0, sizeof(*ebuf)); |
401 | 424 | ||
402 | /* We need to have enough room to store any i915_error_state printf |
425 | /* We need to have enough room to store any i915_error_state printf |
403 | * so that we can move it to start position. |
426 | * so that we can move it to start position. |
404 | */ |
427 | */ |
405 | ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; |
428 | ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; |
406 | ebuf->buf = kmalloc(ebuf->size, |
429 | ebuf->buf = kmalloc(ebuf->size, |
407 | GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); |
430 | GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); |
408 | 431 | ||
409 | if (ebuf->buf == NULL) { |
432 | if (ebuf->buf == NULL) { |
410 | ebuf->size = PAGE_SIZE; |
433 | ebuf->size = PAGE_SIZE; |
411 | ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); |
434 | ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); |
412 | } |
435 | } |
413 | 436 | ||
414 | if (ebuf->buf == NULL) { |
437 | if (ebuf->buf == NULL) { |
415 | ebuf->size = 128; |
438 | ebuf->size = 128; |
416 | ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); |
439 | ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); |
417 | } |
440 | } |
418 | 441 | ||
419 | if (ebuf->buf == NULL) |
442 | if (ebuf->buf == NULL) |
420 | return -ENOMEM; |
443 | return -ENOMEM; |
421 | 444 | ||
422 | ebuf->start = pos; |
445 | ebuf->start = pos; |
423 | 446 | ||
424 | return 0; |
447 | return 0; |
425 | } |
448 | } |
426 | 449 | ||
427 | static void i915_error_object_free(struct drm_i915_error_object *obj) |
450 | static void i915_error_object_free(struct drm_i915_error_object *obj) |
428 | { |
451 | { |
429 | int page; |
452 | int page; |
430 | 453 | ||
431 | if (obj == NULL) |
454 | if (obj == NULL) |
432 | return; |
455 | return; |
433 | 456 | ||
434 | for (page = 0; page < obj->page_count; page++) |
457 | for (page = 0; page < obj->page_count; page++) |
435 | kfree(obj->pages[page]); |
458 | kfree(obj->pages[page]); |
436 | 459 | ||
437 | kfree(obj); |
460 | kfree(obj); |
438 | } |
461 | } |
439 | 462 | ||
440 | static void i915_error_state_free(struct kref *error_ref) |
463 | static void i915_error_state_free(struct kref *error_ref) |
441 | { |
464 | { |
442 | struct drm_i915_error_state *error = container_of(error_ref, |
465 | struct drm_i915_error_state *error = container_of(error_ref, |
443 | typeof(*error), ref); |
466 | typeof(*error), ref); |
444 | int i; |
467 | int i; |
445 | 468 | ||
446 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { |
469 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { |
447 | i915_error_object_free(error->ring[i].batchbuffer); |
470 | i915_error_object_free(error->ring[i].batchbuffer); |
448 | i915_error_object_free(error->ring[i].ringbuffer); |
471 | i915_error_object_free(error->ring[i].ringbuffer); |
449 | i915_error_object_free(error->ring[i].ctx); |
472 | i915_error_object_free(error->ring[i].ctx); |
450 | kfree(error->ring[i].requests); |
473 | kfree(error->ring[i].requests); |
451 | } |
474 | } |
452 | 475 | ||
453 | kfree(error->active_bo); |
476 | kfree(error->active_bo); |
454 | kfree(error->overlay); |
477 | kfree(error->overlay); |
455 | kfree(error->display); |
478 | kfree(error->display); |
456 | kfree(error); |
479 | kfree(error); |
457 | } |
480 | } |
458 | 481 | ||
459 | static struct drm_i915_error_object * |
482 | static struct drm_i915_error_object * |
460 | i915_error_object_create_sized(struct drm_i915_private *dev_priv, |
483 | i915_error_object_create_sized(struct drm_i915_private *dev_priv, |
461 | struct drm_i915_gem_object *src, |
484 | struct drm_i915_gem_object *src, |
462 | const int num_pages) |
485 | const int num_pages) |
463 | { |
486 | { |
464 | struct drm_i915_error_object *dst; |
487 | struct drm_i915_error_object *dst; |
465 | int i; |
488 | int i; |
466 | u32 reloc_offset; |
489 | u32 reloc_offset; |
467 | 490 | ||
468 | if (src == NULL || src->pages == NULL) |
491 | if (src == NULL || src->pages == NULL) |
469 | return NULL; |
492 | return NULL; |
470 | 493 | ||
471 | dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); |
494 | dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); |
472 | if (dst == NULL) |
495 | if (dst == NULL) |
473 | return NULL; |
496 | return NULL; |
474 | 497 | ||
475 | reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); |
498 | reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); |
476 | for (i = 0; i < num_pages; i++) { |
499 | for (i = 0; i < num_pages; i++) { |
477 | unsigned long flags; |
500 | unsigned long flags; |
478 | void *d; |
501 | void *d; |
479 | 502 | ||
480 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); |
503 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); |
481 | if (d == NULL) |
504 | if (d == NULL) |
482 | goto unwind; |
505 | goto unwind; |
483 | 506 | ||
484 | local_irq_save(flags); |
507 | local_irq_save(flags); |
485 | if (reloc_offset < dev_priv->gtt.mappable_end && |
508 | if (reloc_offset < dev_priv->gtt.mappable_end && |
486 | src->has_global_gtt_mapping) { |
509 | src->has_global_gtt_mapping) { |
487 | void __iomem *s; |
510 | void __iomem *s; |
488 | 511 | ||
489 | /* Simply ignore tiling or any overlapping fence. |
512 | /* Simply ignore tiling or any overlapping fence. |
490 | * It's part of the error state, and this hopefully |
513 | * It's part of the error state, and this hopefully |
491 | * captures what the GPU read. |
514 | * captures what the GPU read. |
492 | */ |
515 | */ |
493 | 516 | ||
494 | s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
517 | s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
495 | reloc_offset); |
518 | reloc_offset); |
496 | memcpy_fromio(d, s, PAGE_SIZE); |
519 | memcpy_fromio(d, s, PAGE_SIZE); |
497 | io_mapping_unmap_atomic(s); |
520 | io_mapping_unmap_atomic(s); |
498 | } else if (src->stolen) { |
521 | } else if (src->stolen) { |
499 | unsigned long offset; |
522 | unsigned long offset; |
500 | 523 | ||
501 | offset = dev_priv->mm.stolen_base; |
524 | offset = dev_priv->mm.stolen_base; |
502 | offset += src->stolen->start; |
525 | offset += src->stolen->start; |
503 | offset += i << PAGE_SHIFT; |
526 | offset += i << PAGE_SHIFT; |
504 | 527 | ||
505 | memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); |
528 | memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); |
506 | } else { |
529 | } else { |
507 | struct page *page; |
530 | struct page *page; |
508 | void *s; |
531 | void *s; |
509 | 532 | ||
510 | page = i915_gem_object_get_page(src, i); |
533 | page = i915_gem_object_get_page(src, i); |
511 | 534 | ||
512 | drm_clflush_pages(&page, 1); |
535 | drm_clflush_pages(&page, 1); |
513 | 536 | ||
514 | s = kmap_atomic(page); |
537 | s = kmap_atomic(page); |
515 | memcpy(d, s, PAGE_SIZE); |
538 | memcpy(d, s, PAGE_SIZE); |
516 | kunmap_atomic(s); |
539 | kunmap_atomic(s); |
517 | 540 | ||
518 | drm_clflush_pages(&page, 1); |
541 | drm_clflush_pages(&page, 1); |
519 | } |
542 | } |
520 | local_irq_restore(flags); |
543 | local_irq_restore(flags); |
521 | 544 | ||
522 | dst->pages[i] = d; |
545 | dst->pages[i] = d; |
523 | 546 | ||
524 | reloc_offset += PAGE_SIZE; |
547 | reloc_offset += PAGE_SIZE; |
525 | } |
548 | } |
526 | dst->page_count = num_pages; |
549 | dst->page_count = num_pages; |
527 | 550 | ||
528 | return dst; |
551 | return dst; |
529 | 552 | ||
530 | unwind: |
553 | unwind: |
531 | while (i--) |
554 | while (i--) |
532 | kfree(dst->pages[i]); |
555 | kfree(dst->pages[i]); |
533 | kfree(dst); |
556 | kfree(dst); |
534 | return NULL; |
557 | return NULL; |
535 | } |
558 | } |
536 | #define i915_error_object_create(dev_priv, src) \ |
559 | #define i915_error_object_create(dev_priv, src) \ |
537 | i915_error_object_create_sized((dev_priv), (src), \ |
560 | i915_error_object_create_sized((dev_priv), (src), \ |
538 | (src)->base.size>>PAGE_SHIFT) |
561 | (src)->base.size>>PAGE_SHIFT) |
539 | 562 | ||
540 | static void capture_bo(struct drm_i915_error_buffer *err, |
563 | static void capture_bo(struct drm_i915_error_buffer *err, |
541 | struct drm_i915_gem_object *obj) |
564 | struct drm_i915_gem_object *obj) |
542 | { |
565 | { |
543 | err->size = obj->base.size; |
566 | err->size = obj->base.size; |
544 | err->name = obj->base.name; |
567 | err->name = obj->base.name; |
545 | err->rseqno = obj->last_read_seqno; |
568 | err->rseqno = obj->last_read_seqno; |
546 | err->wseqno = obj->last_write_seqno; |
569 | err->wseqno = obj->last_write_seqno; |
547 | err->gtt_offset = i915_gem_obj_ggtt_offset(obj); |
570 | err->gtt_offset = i915_gem_obj_ggtt_offset(obj); |
548 | err->read_domains = obj->base.read_domains; |
571 | err->read_domains = obj->base.read_domains; |
549 | err->write_domain = obj->base.write_domain; |
572 | err->write_domain = obj->base.write_domain; |
550 | err->fence_reg = obj->fence_reg; |
573 | err->fence_reg = obj->fence_reg; |
551 | err->pinned = 0; |
574 | err->pinned = 0; |
552 | if (obj->pin_count > 0) |
575 | if (obj->pin_count > 0) |
553 | err->pinned = 1; |
576 | err->pinned = 1; |
554 | if (obj->user_pin_count > 0) |
577 | if (obj->user_pin_count > 0) |
555 | err->pinned = -1; |
578 | err->pinned = -1; |
556 | err->tiling = obj->tiling_mode; |
579 | err->tiling = obj->tiling_mode; |
557 | err->dirty = obj->dirty; |
580 | err->dirty = obj->dirty; |
558 | err->purgeable = obj->madv != I915_MADV_WILLNEED; |
581 | err->purgeable = obj->madv != I915_MADV_WILLNEED; |
559 | err->ring = obj->ring ? obj->ring->id : -1; |
582 | err->ring = obj->ring ? obj->ring->id : -1; |
560 | err->cache_level = obj->cache_level; |
583 | err->cache_level = obj->cache_level; |
561 | } |
584 | } |
562 | 585 | ||
563 | static u32 capture_active_bo(struct drm_i915_error_buffer *err, |
586 | static u32 capture_active_bo(struct drm_i915_error_buffer *err, |
564 | int count, struct list_head *head) |
587 | int count, struct list_head *head) |
565 | { |
588 | { |
566 | struct i915_vma *vma; |
589 | struct i915_vma *vma; |
567 | int i = 0; |
590 | int i = 0; |
568 | 591 | ||
569 | list_for_each_entry(vma, head, mm_list) { |
592 | list_for_each_entry(vma, head, mm_list) { |
570 | capture_bo(err++, vma->obj); |
593 | capture_bo(err++, vma->obj); |
571 | if (++i == count) |
594 | if (++i == count) |
572 | break; |
595 | break; |
573 | } |
596 | } |
574 | 597 | ||
575 | return i; |
598 | return i; |
576 | } |
599 | } |
577 | 600 | ||
578 | static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, |
601 | static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, |
579 | int count, struct list_head *head) |
602 | int count, struct list_head *head) |
580 | { |
603 | { |
581 | struct drm_i915_gem_object *obj; |
604 | struct drm_i915_gem_object *obj; |
582 | int i = 0; |
605 | int i = 0; |
583 | 606 | ||
584 | list_for_each_entry(obj, head, global_list) { |
607 | list_for_each_entry(obj, head, global_list) { |
585 | if (obj->pin_count == 0) |
608 | if (obj->pin_count == 0) |
586 | continue; |
609 | continue; |
587 | 610 | ||
588 | capture_bo(err++, obj); |
611 | capture_bo(err++, obj); |
589 | if (++i == count) |
612 | if (++i == count) |
590 | break; |
613 | break; |
591 | } |
614 | } |
592 | 615 | ||
593 | return i; |
616 | return i; |
594 | } |
617 | } |
595 | 618 | ||
596 | static void i915_gem_record_fences(struct drm_device *dev, |
619 | static void i915_gem_record_fences(struct drm_device *dev, |
597 | struct drm_i915_error_state *error) |
620 | struct drm_i915_error_state *error) |
598 | { |
621 | { |
599 | struct drm_i915_private *dev_priv = dev->dev_private; |
622 | struct drm_i915_private *dev_priv = dev->dev_private; |
600 | int i; |
623 | int i; |
601 | 624 | ||
602 | /* Fences */ |
625 | /* Fences */ |
603 | switch (INTEL_INFO(dev)->gen) { |
626 | switch (INTEL_INFO(dev)->gen) { |
- | 627 | case 8: |
|
604 | case 7: |
628 | case 7: |
605 | case 6: |
629 | case 6: |
606 | for (i = 0; i < dev_priv->num_fence_regs; i++) |
630 | for (i = 0; i < dev_priv->num_fence_regs; i++) |
607 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
631 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
608 | break; |
632 | break; |
609 | case 5: |
633 | case 5: |
610 | case 4: |
634 | case 4: |
611 | for (i = 0; i < 16; i++) |
635 | for (i = 0; i < 16; i++) |
612 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); |
636 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); |
613 | break; |
637 | break; |
614 | case 3: |
638 | case 3: |
615 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
639 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
616 | for (i = 0; i < 8; i++) |
640 | for (i = 0; i < 8; i++) |
617 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); |
641 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); |
618 | case 2: |
642 | case 2: |
619 | for (i = 0; i < 8; i++) |
643 | for (i = 0; i < 8; i++) |
620 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); |
644 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); |
621 | break; |
645 | break; |
622 | 646 | ||
623 | default: |
647 | default: |
624 | BUG(); |
648 | BUG(); |
625 | } |
649 | } |
626 | } |
650 | } |
627 | 651 | ||
628 | static struct drm_i915_error_object * |
652 | static struct drm_i915_error_object * |
629 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, |
653 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, |
630 | struct intel_ring_buffer *ring) |
654 | struct intel_ring_buffer *ring) |
631 | { |
655 | { |
632 | struct i915_address_space *vm; |
656 | struct i915_address_space *vm; |
633 | struct i915_vma *vma; |
657 | struct i915_vma *vma; |
634 | struct drm_i915_gem_object *obj; |
658 | struct drm_i915_gem_object *obj; |
635 | u32 seqno; |
659 | u32 seqno; |
636 | 660 | ||
637 | if (!ring->get_seqno) |
661 | if (!ring->get_seqno) |
638 | return NULL; |
662 | return NULL; |
639 | 663 | ||
640 | if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { |
664 | if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { |
641 | u32 acthd = I915_READ(ACTHD); |
665 | u32 acthd = I915_READ(ACTHD); |
642 | 666 | ||
643 | if (WARN_ON(ring->id != RCS)) |
667 | if (WARN_ON(ring->id != RCS)) |
644 | return NULL; |
668 | return NULL; |
645 | 669 | ||
646 | obj = ring->scratch.obj; |
670 | obj = ring->scratch.obj; |
- | 671 | if (obj != NULL && |
|
647 | if (acthd >= i915_gem_obj_ggtt_offset(obj) && |
672 | acthd >= i915_gem_obj_ggtt_offset(obj) && |
648 | acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) |
673 | acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) |
649 | return i915_error_object_create(dev_priv, obj); |
674 | return i915_error_object_create(dev_priv, obj); |
650 | } |
675 | } |
651 | 676 | ||
652 | seqno = ring->get_seqno(ring, false); |
677 | seqno = ring->get_seqno(ring, false); |
653 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
678 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
654 | list_for_each_entry(vma, &vm->active_list, mm_list) { |
679 | list_for_each_entry(vma, &vm->active_list, mm_list) { |
655 | obj = vma->obj; |
680 | obj = vma->obj; |
656 | if (obj->ring != ring) |
681 | if (obj->ring != ring) |
657 | continue; |
682 | continue; |
658 | 683 | ||
659 | if (i915_seqno_passed(seqno, obj->last_read_seqno)) |
684 | if (i915_seqno_passed(seqno, obj->last_read_seqno)) |
660 | continue; |
685 | continue; |
661 | 686 | ||
662 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) |
687 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) |
663 | continue; |
688 | continue; |
664 | 689 | ||
665 | /* We need to copy these to an anonymous buffer as the simplest |
690 | /* We need to copy these to an anonymous buffer as the simplest |
666 | * method to avoid being overwritten by userspace. |
691 | * method to avoid being overwritten by userspace. |
667 | */ |
692 | */ |
668 | return i915_error_object_create(dev_priv, obj); |
693 | return i915_error_object_create(dev_priv, obj); |
669 | } |
694 | } |
670 | } |
695 | } |
671 | 696 | ||
672 | return NULL; |
697 | return NULL; |
673 | } |
698 | } |
674 | 699 | ||
675 | static void i915_record_ring_state(struct drm_device *dev, |
700 | static void i915_record_ring_state(struct drm_device *dev, |
676 | struct drm_i915_error_state *error, |
701 | struct drm_i915_error_state *error, |
677 | struct intel_ring_buffer *ring) |
702 | struct intel_ring_buffer *ring) |
678 | { |
703 | { |
679 | struct drm_i915_private *dev_priv = dev->dev_private; |
704 | struct drm_i915_private *dev_priv = dev->dev_private; |
680 | 705 | ||
681 | if (INTEL_INFO(dev)->gen >= 6) { |
706 | if (INTEL_INFO(dev)->gen >= 6) { |
682 | error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); |
707 | error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); |
683 | error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); |
708 | error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); |
684 | error->semaphore_mboxes[ring->id][0] |
709 | error->semaphore_mboxes[ring->id][0] |
685 | = I915_READ(RING_SYNC_0(ring->mmio_base)); |
710 | = I915_READ(RING_SYNC_0(ring->mmio_base)); |
686 | error->semaphore_mboxes[ring->id][1] |
711 | error->semaphore_mboxes[ring->id][1] |
687 | = I915_READ(RING_SYNC_1(ring->mmio_base)); |
712 | = I915_READ(RING_SYNC_1(ring->mmio_base)); |
688 | error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; |
713 | error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; |
689 | error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; |
714 | error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; |
690 | } |
715 | } |
691 | 716 | ||
692 | if (HAS_VEBOX(dev)) { |
717 | if (HAS_VEBOX(dev)) { |
693 | error->semaphore_mboxes[ring->id][2] = |
718 | error->semaphore_mboxes[ring->id][2] = |
694 | I915_READ(RING_SYNC_2(ring->mmio_base)); |
719 | I915_READ(RING_SYNC_2(ring->mmio_base)); |
695 | error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2]; |
720 | error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2]; |
696 | } |
721 | } |
697 | 722 | ||
698 | if (INTEL_INFO(dev)->gen >= 4) { |
723 | if (INTEL_INFO(dev)->gen >= 4) { |
699 | error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); |
724 | error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); |
700 | error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); |
725 | error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); |
701 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); |
726 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); |
702 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); |
727 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); |
703 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); |
728 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); |
- | 729 | error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base)); |
|
704 | if (ring->id == RCS) |
730 | if (INTEL_INFO(dev)->gen >= 8) |
- | 731 | error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; |
|
705 | error->bbaddr = I915_READ64(BB_ADDR); |
732 | error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base)); |
706 | } else { |
733 | } else { |
707 | error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); |
734 | error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); |
708 | error->ipeir[ring->id] = I915_READ(IPEIR); |
735 | error->ipeir[ring->id] = I915_READ(IPEIR); |
709 | error->ipehr[ring->id] = I915_READ(IPEHR); |
736 | error->ipehr[ring->id] = I915_READ(IPEHR); |
710 | error->instdone[ring->id] = I915_READ(INSTDONE); |
737 | error->instdone[ring->id] = I915_READ(INSTDONE); |
711 | } |
738 | } |
712 | 739 | ||
713 | error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); |
740 | error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); |
714 | error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); |
741 | error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); |
715 | error->seqno[ring->id] = ring->get_seqno(ring, false); |
742 | error->seqno[ring->id] = ring->get_seqno(ring, false); |
716 | error->acthd[ring->id] = intel_ring_get_active_head(ring); |
743 | error->acthd[ring->id] = intel_ring_get_active_head(ring); |
717 | error->head[ring->id] = I915_READ_HEAD(ring); |
744 | error->head[ring->id] = I915_READ_HEAD(ring); |
718 | error->tail[ring->id] = I915_READ_TAIL(ring); |
745 | error->tail[ring->id] = I915_READ_TAIL(ring); |
719 | error->ctl[ring->id] = I915_READ_CTL(ring); |
746 | error->ctl[ring->id] = I915_READ_CTL(ring); |
720 | 747 | ||
721 | error->cpu_ring_head[ring->id] = ring->head; |
748 | error->cpu_ring_head[ring->id] = ring->head; |
722 | error->cpu_ring_tail[ring->id] = ring->tail; |
749 | error->cpu_ring_tail[ring->id] = ring->tail; |
- | 750 | ||
- | 751 | error->hangcheck_score[ring->id] = ring->hangcheck.score; |
|
- | 752 | error->hangcheck_action[ring->id] = ring->hangcheck.action; |
|
723 | } |
753 | } |
724 | 754 | ||
725 | 755 | ||
726 | static void i915_gem_record_active_context(struct intel_ring_buffer *ring, |
756 | static void i915_gem_record_active_context(struct intel_ring_buffer *ring, |
727 | struct drm_i915_error_state *error, |
757 | struct drm_i915_error_state *error, |
728 | struct drm_i915_error_ring *ering) |
758 | struct drm_i915_error_ring *ering) |
729 | { |
759 | { |
730 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
760 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
731 | struct drm_i915_gem_object *obj; |
761 | struct drm_i915_gem_object *obj; |
732 | 762 | ||
733 | /* Currently render ring is the only HW context user */ |
763 | /* Currently render ring is the only HW context user */ |
734 | if (ring->id != RCS || !error->ccid) |
764 | if (ring->id != RCS || !error->ccid) |
735 | return; |
765 | return; |
736 | 766 | ||
737 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
767 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
738 | if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { |
768 | if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { |
739 | ering->ctx = i915_error_object_create_sized(dev_priv, |
769 | ering->ctx = i915_error_object_create_sized(dev_priv, |
740 | obj, 1); |
770 | obj, 1); |
741 | break; |
771 | break; |
742 | } |
772 | } |
743 | } |
773 | } |
744 | } |
774 | } |
745 | 775 | ||
746 | static void i915_gem_record_rings(struct drm_device *dev, |
776 | static void i915_gem_record_rings(struct drm_device *dev, |
747 | struct drm_i915_error_state *error) |
777 | struct drm_i915_error_state *error) |
748 | { |
778 | { |
749 | struct drm_i915_private *dev_priv = dev->dev_private; |
779 | struct drm_i915_private *dev_priv = dev->dev_private; |
750 | struct intel_ring_buffer *ring; |
- | |
751 | struct drm_i915_gem_request *request; |
780 | struct drm_i915_gem_request *request; |
752 | int i, count; |
781 | int i, count; |
- | 782 | ||
753 | 783 | for (i = 0; i < I915_NUM_RINGS; i++) { |
|
- | 784 | struct intel_ring_buffer *ring = &dev_priv->ring[i]; |
|
- | 785 | ||
- | 786 | if (ring->dev == NULL) |
|
- | 787 | continue; |
|
- | 788 | ||
- | 789 | error->ring[i].valid = true; |
|
754 | for_each_ring(ring, dev_priv, i) { |
790 | |
755 | i915_record_ring_state(dev, error, ring); |
791 | i915_record_ring_state(dev, error, ring); |
756 | 792 | ||
757 | error->ring[i].batchbuffer = |
793 | error->ring[i].batchbuffer = |
758 | i915_error_first_batchbuffer(dev_priv, ring); |
794 | i915_error_first_batchbuffer(dev_priv, ring); |
759 | 795 | ||
760 | error->ring[i].ringbuffer = |
796 | error->ring[i].ringbuffer = |
761 | i915_error_object_create(dev_priv, ring->obj); |
797 | i915_error_object_create(dev_priv, ring->obj); |
762 | 798 | ||
763 | 799 | ||
764 | i915_gem_record_active_context(ring, error, &error->ring[i]); |
800 | i915_gem_record_active_context(ring, error, &error->ring[i]); |
765 | 801 | ||
766 | count = 0; |
802 | count = 0; |
767 | list_for_each_entry(request, &ring->request_list, list) |
803 | list_for_each_entry(request, &ring->request_list, list) |
768 | count++; |
804 | count++; |
769 | 805 | ||
770 | error->ring[i].num_requests = count; |
806 | error->ring[i].num_requests = count; |
771 | error->ring[i].requests = |
807 | error->ring[i].requests = |
772 | kmalloc(count*sizeof(struct drm_i915_error_request), |
808 | kcalloc(count, sizeof(*error->ring[i].requests), |
773 | GFP_ATOMIC); |
809 | GFP_ATOMIC); |
774 | if (error->ring[i].requests == NULL) { |
810 | if (error->ring[i].requests == NULL) { |
775 | error->ring[i].num_requests = 0; |
811 | error->ring[i].num_requests = 0; |
776 | continue; |
812 | continue; |
777 | } |
813 | } |
778 | 814 | ||
779 | count = 0; |
815 | count = 0; |
780 | list_for_each_entry(request, &ring->request_list, list) { |
816 | list_for_each_entry(request, &ring->request_list, list) { |
781 | struct drm_i915_error_request *erq; |
817 | struct drm_i915_error_request *erq; |
782 | 818 | ||
783 | erq = &error->ring[i].requests[count++]; |
819 | erq = &error->ring[i].requests[count++]; |
784 | erq->seqno = request->seqno; |
820 | erq->seqno = request->seqno; |
785 | erq->jiffies = request->emitted_jiffies; |
821 | erq->jiffies = request->emitted_jiffies; |
786 | erq->tail = request->tail; |
822 | erq->tail = request->tail; |
787 | } |
823 | } |
788 | } |
824 | } |
789 | } |
825 | } |
790 | 826 | ||
791 | /* FIXME: Since pin count/bound list is global, we duplicate what we capture per |
827 | /* FIXME: Since pin count/bound list is global, we duplicate what we capture per |
792 | * VM. |
828 | * VM. |
793 | */ |
829 | */ |
794 | static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, |
830 | static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, |
795 | struct drm_i915_error_state *error, |
831 | struct drm_i915_error_state *error, |
796 | struct i915_address_space *vm, |
832 | struct i915_address_space *vm, |
797 | const int ndx) |
833 | const int ndx) |
798 | { |
834 | { |
799 | struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL; |
835 | struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL; |
800 | struct drm_i915_gem_object *obj; |
836 | struct drm_i915_gem_object *obj; |
801 | struct i915_vma *vma; |
837 | struct i915_vma *vma; |
802 | int i; |
838 | int i; |
803 | 839 | ||
804 | i = 0; |
840 | i = 0; |
805 | list_for_each_entry(vma, &vm->active_list, mm_list) |
841 | list_for_each_entry(vma, &vm->active_list, mm_list) |
806 | i++; |
842 | i++; |
807 | error->active_bo_count[ndx] = i; |
843 | error->active_bo_count[ndx] = i; |
808 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) |
844 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) |
809 | if (obj->pin_count) |
845 | if (obj->pin_count) |
810 | i++; |
846 | i++; |
811 | error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; |
847 | error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; |
812 | 848 | ||
813 | if (i) { |
849 | if (i) { |
814 | active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC); |
850 | active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC); |
815 | if (active_bo) |
851 | if (active_bo) |
816 | pinned_bo = active_bo + error->active_bo_count[ndx]; |
852 | pinned_bo = active_bo + error->active_bo_count[ndx]; |
817 | } |
853 | } |
818 | 854 | ||
819 | if (active_bo) |
855 | if (active_bo) |
820 | error->active_bo_count[ndx] = |
856 | error->active_bo_count[ndx] = |
821 | capture_active_bo(active_bo, |
857 | capture_active_bo(active_bo, |
822 | error->active_bo_count[ndx], |
858 | error->active_bo_count[ndx], |
823 | &vm->active_list); |
859 | &vm->active_list); |
824 | 860 | ||
825 | if (pinned_bo) |
861 | if (pinned_bo) |
826 | error->pinned_bo_count[ndx] = |
862 | error->pinned_bo_count[ndx] = |
827 | capture_pinned_bo(pinned_bo, |
863 | capture_pinned_bo(pinned_bo, |
828 | error->pinned_bo_count[ndx], |
864 | error->pinned_bo_count[ndx], |
829 | &dev_priv->mm.bound_list); |
865 | &dev_priv->mm.bound_list); |
830 | error->active_bo[ndx] = active_bo; |
866 | error->active_bo[ndx] = active_bo; |
831 | error->pinned_bo[ndx] = pinned_bo; |
867 | error->pinned_bo[ndx] = pinned_bo; |
832 | } |
868 | } |
833 | 869 | ||
834 | static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, |
870 | static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, |
835 | struct drm_i915_error_state *error) |
871 | struct drm_i915_error_state *error) |
836 | { |
872 | { |
837 | struct i915_address_space *vm; |
873 | struct i915_address_space *vm; |
838 | int cnt = 0, i = 0; |
874 | int cnt = 0, i = 0; |
839 | 875 | ||
840 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) |
876 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) |
841 | cnt++; |
877 | cnt++; |
842 | 878 | ||
843 | if (WARN(cnt > 1, "Multiple VMs not yet supported\n")) |
879 | if (WARN(cnt > 1, "Multiple VMs not yet supported\n")) |
844 | cnt = 1; |
880 | cnt = 1; |
845 | 881 | ||
846 | vm = &dev_priv->gtt.base; |
882 | vm = &dev_priv->gtt.base; |
847 | 883 | ||
848 | error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); |
884 | error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); |
849 | error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); |
885 | error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); |
850 | error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), |
886 | error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), |
851 | GFP_ATOMIC); |
887 | GFP_ATOMIC); |
852 | error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), |
888 | error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), |
853 | GFP_ATOMIC); |
889 | GFP_ATOMIC); |
854 | 890 | ||
855 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) |
891 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) |
856 | i915_gem_capture_vm(dev_priv, error, vm, i++); |
892 | i915_gem_capture_vm(dev_priv, error, vm, i++); |
857 | } |
893 | } |
858 | 894 | ||
859 | /** |
895 | /** |
860 | * i915_capture_error_state - capture an error record for later analysis |
896 | * i915_capture_error_state - capture an error record for later analysis |
861 | * @dev: drm device |
897 | * @dev: drm device |
862 | * |
898 | * |
863 | * Should be called when an error is detected (either a hang or an error |
899 | * Should be called when an error is detected (either a hang or an error |
864 | * interrupt) to capture error state from the time of the error. Fills |
900 | * interrupt) to capture error state from the time of the error. Fills |
865 | * out a structure which becomes available in debugfs for user level tools |
901 | * out a structure which becomes available in debugfs for user level tools |
866 | * to pick up. |
902 | * to pick up. |
867 | */ |
903 | */ |
868 | void i915_capture_error_state(struct drm_device *dev) |
904 | void i915_capture_error_state(struct drm_device *dev) |
869 | { |
905 | { |
870 | struct drm_i915_private *dev_priv = dev->dev_private; |
906 | struct drm_i915_private *dev_priv = dev->dev_private; |
871 | struct drm_i915_error_state *error; |
907 | struct drm_i915_error_state *error; |
872 | unsigned long flags; |
908 | unsigned long flags; |
873 | int pipe; |
909 | int pipe; |
874 | 910 | ||
875 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
911 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
876 | error = dev_priv->gpu_error.first_error; |
912 | error = dev_priv->gpu_error.first_error; |
877 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
913 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
878 | if (error) |
914 | if (error) |
879 | return; |
915 | return; |
880 | 916 | ||
881 | /* Account for pipe specific data like PIPE*STAT */ |
917 | /* Account for pipe specific data like PIPE*STAT */ |
882 | error = kzalloc(sizeof(*error), GFP_ATOMIC); |
918 | error = kzalloc(sizeof(*error), GFP_ATOMIC); |
883 | if (!error) { |
919 | if (!error) { |
884 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
920 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
885 | return; |
921 | return; |
886 | } |
922 | } |
887 | 923 | ||
888 | DRM_INFO("capturing error event; look for more information in " |
924 | DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", |
- | 925 | dev->primary->index); |
|
- | 926 | DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); |
|
- | 927 | DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); |
|
- | 928 | DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); |
|
889 | "/sys/class/drm/card%d/error\n", dev->primary->index); |
929 | DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); |
890 | 930 | ||
891 | kref_init(&error->ref); |
931 | kref_init(&error->ref); |
892 | error->eir = I915_READ(EIR); |
932 | error->eir = I915_READ(EIR); |
893 | error->pgtbl_er = I915_READ(PGTBL_ER); |
933 | error->pgtbl_er = I915_READ(PGTBL_ER); |
894 | if (HAS_HW_CONTEXTS(dev)) |
934 | if (HAS_HW_CONTEXTS(dev)) |
895 | error->ccid = I915_READ(CCID); |
935 | error->ccid = I915_READ(CCID); |
896 | 936 | ||
897 | if (HAS_PCH_SPLIT(dev)) |
937 | if (HAS_PCH_SPLIT(dev)) |
898 | error->ier = I915_READ(DEIER) | I915_READ(GTIER); |
938 | error->ier = I915_READ(DEIER) | I915_READ(GTIER); |
899 | else if (IS_VALLEYVIEW(dev)) |
939 | else if (IS_VALLEYVIEW(dev)) |
900 | error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); |
940 | error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); |
901 | else if (IS_GEN2(dev)) |
941 | else if (IS_GEN2(dev)) |
902 | error->ier = I915_READ16(IER); |
942 | error->ier = I915_READ16(IER); |
903 | else |
943 | else |
904 | error->ier = I915_READ(IER); |
944 | error->ier = I915_READ(IER); |
905 | 945 | ||
906 | if (INTEL_INFO(dev)->gen >= 6) |
946 | if (INTEL_INFO(dev)->gen >= 6) |
907 | error->derrmr = I915_READ(DERRMR); |
947 | error->derrmr = I915_READ(DERRMR); |
908 | 948 | ||
909 | if (IS_VALLEYVIEW(dev)) |
949 | if (IS_VALLEYVIEW(dev)) |
910 | error->forcewake = I915_READ(FORCEWAKE_VLV); |
950 | error->forcewake = I915_READ(FORCEWAKE_VLV); |
911 | else if (INTEL_INFO(dev)->gen >= 7) |
951 | else if (INTEL_INFO(dev)->gen >= 7) |
912 | error->forcewake = I915_READ(FORCEWAKE_MT); |
952 | error->forcewake = I915_READ(FORCEWAKE_MT); |
913 | else if (INTEL_INFO(dev)->gen == 6) |
953 | else if (INTEL_INFO(dev)->gen == 6) |
914 | error->forcewake = I915_READ(FORCEWAKE); |
954 | error->forcewake = I915_READ(FORCEWAKE); |
915 | 955 | ||
916 | if (!HAS_PCH_SPLIT(dev)) |
956 | if (!HAS_PCH_SPLIT(dev)) |
917 | for_each_pipe(pipe) |
957 | for_each_pipe(pipe) |
918 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); |
958 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); |
919 | 959 | ||
920 | if (INTEL_INFO(dev)->gen >= 6) { |
960 | if (INTEL_INFO(dev)->gen >= 6) { |
921 | error->error = I915_READ(ERROR_GEN6); |
961 | error->error = I915_READ(ERROR_GEN6); |
922 | error->done_reg = I915_READ(DONE_REG); |
962 | error->done_reg = I915_READ(DONE_REG); |
923 | } |
963 | } |
924 | 964 | ||
925 | if (INTEL_INFO(dev)->gen == 7) |
965 | if (INTEL_INFO(dev)->gen == 7) |
926 | error->err_int = I915_READ(GEN7_ERR_INT); |
966 | error->err_int = I915_READ(GEN7_ERR_INT); |
927 | 967 | ||
928 | i915_get_extra_instdone(dev, error->extra_instdone); |
968 | i915_get_extra_instdone(dev, error->extra_instdone); |
929 | 969 | ||
930 | i915_gem_capture_buffers(dev_priv, error); |
970 | i915_gem_capture_buffers(dev_priv, error); |
931 | i915_gem_record_fences(dev, error); |
971 | i915_gem_record_fences(dev, error); |
932 | i915_gem_record_rings(dev, error); |
972 | i915_gem_record_rings(dev, error); |
933 | 973 | ||
934 | do_gettimeofday(&error->time); |
974 | do_gettimeofday(&error->time); |
935 | 975 | ||
936 | error->overlay = intel_overlay_capture_error_state(dev); |
976 | error->overlay = intel_overlay_capture_error_state(dev); |
937 | error->display = intel_display_capture_error_state(dev); |
977 | error->display = intel_display_capture_error_state(dev); |
938 | 978 | ||
939 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
979 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
940 | if (dev_priv->gpu_error.first_error == NULL) { |
980 | if (dev_priv->gpu_error.first_error == NULL) { |
941 | dev_priv->gpu_error.first_error = error; |
981 | dev_priv->gpu_error.first_error = error; |
942 | error = NULL; |
982 | error = NULL; |
943 | } |
983 | } |
944 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
984 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
945 | 985 | ||
946 | if (error) |
986 | if (error) |
947 | i915_error_state_free(&error->ref); |
987 | i915_error_state_free(&error->ref); |
948 | } |
988 | } |
949 | 989 | ||
950 | void i915_error_state_get(struct drm_device *dev, |
990 | void i915_error_state_get(struct drm_device *dev, |
951 | struct i915_error_state_file_priv *error_priv) |
991 | struct i915_error_state_file_priv *error_priv) |
952 | { |
992 | { |
953 | struct drm_i915_private *dev_priv = dev->dev_private; |
993 | struct drm_i915_private *dev_priv = dev->dev_private; |
954 | unsigned long flags; |
994 | unsigned long flags; |
955 | 995 | ||
956 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
996 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
957 | error_priv->error = dev_priv->gpu_error.first_error; |
997 | error_priv->error = dev_priv->gpu_error.first_error; |
958 | if (error_priv->error) |
998 | if (error_priv->error) |
959 | kref_get(&error_priv->error->ref); |
999 | kref_get(&error_priv->error->ref); |
960 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
1000 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
961 | 1001 | ||
962 | } |
1002 | } |
963 | 1003 | ||
964 | void i915_error_state_put(struct i915_error_state_file_priv *error_priv) |
1004 | void i915_error_state_put(struct i915_error_state_file_priv *error_priv) |
965 | { |
1005 | { |
966 | if (error_priv->error) |
1006 | if (error_priv->error) |
967 | kref_put(&error_priv->error->ref, i915_error_state_free); |
1007 | kref_put(&error_priv->error->ref, i915_error_state_free); |
968 | } |
1008 | } |
969 | 1009 | ||
970 | void i915_destroy_error_state(struct drm_device *dev) |
1010 | void i915_destroy_error_state(struct drm_device *dev) |
971 | { |
1011 | { |
972 | struct drm_i915_private *dev_priv = dev->dev_private; |
1012 | struct drm_i915_private *dev_priv = dev->dev_private; |
973 | struct drm_i915_error_state *error; |
1013 | struct drm_i915_error_state *error; |
974 | unsigned long flags; |
1014 | unsigned long flags; |
975 | 1015 | ||
976 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
1016 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
977 | error = dev_priv->gpu_error.first_error; |
1017 | error = dev_priv->gpu_error.first_error; |
978 | dev_priv->gpu_error.first_error = NULL; |
1018 | dev_priv->gpu_error.first_error = NULL; |
979 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
1019 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
980 | 1020 | ||
981 | if (error) |
1021 | if (error) |
982 | kref_put(&error->ref, i915_error_state_free); |
1022 | kref_put(&error->ref, i915_error_state_free); |
983 | } |
1023 | } |
984 | 1024 | ||
985 | const char *i915_cache_level_str(int type) |
1025 | const char *i915_cache_level_str(int type) |
986 | { |
1026 | { |
987 | switch (type) { |
1027 | switch (type) { |
988 | case I915_CACHE_NONE: return " uncached"; |
1028 | case I915_CACHE_NONE: return " uncached"; |
989 | case I915_CACHE_LLC: return " snooped or LLC"; |
1029 | case I915_CACHE_LLC: return " snooped or LLC"; |
990 | case I915_CACHE_L3_LLC: return " L3+LLC"; |
1030 | case I915_CACHE_L3_LLC: return " L3+LLC"; |
- | 1031 | case I915_CACHE_WT: return " WT"; |
|
991 | default: return ""; |
1032 | default: return ""; |
992 | } |
1033 | } |
993 | } |
1034 | } |
994 | #endif |
1035 | #endif |
995 | 1036 | ||
996 | /* NB: please notice the memset */ |
1037 | /* NB: please notice the memset */ |
997 | void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) |
1038 | void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) |
998 | { |
1039 | { |
999 | struct drm_i915_private *dev_priv = dev->dev_private; |
1040 | struct drm_i915_private *dev_priv = dev->dev_private; |
1000 | memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); |
1041 | memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); |
1001 | 1042 | ||
1002 | switch (INTEL_INFO(dev)->gen) { |
1043 | switch (INTEL_INFO(dev)->gen) { |
1003 | case 2: |
1044 | case 2: |
1004 | case 3: |
1045 | case 3: |
1005 | instdone[0] = I915_READ(INSTDONE); |
1046 | instdone[0] = I915_READ(INSTDONE); |
1006 | break; |
1047 | break; |
1007 | case 4: |
1048 | case 4: |
1008 | case 5: |
1049 | case 5: |
1009 | case 6: |
1050 | case 6: |
1010 | instdone[0] = I915_READ(INSTDONE_I965); |
1051 | instdone[0] = I915_READ(INSTDONE_I965); |
1011 | instdone[1] = I915_READ(INSTDONE1); |
1052 | instdone[1] = I915_READ(INSTDONE1); |
1012 | break; |
1053 | break; |
1013 | default: |
1054 | default: |
1014 | WARN_ONCE(1, "Unsupported platform\n"); |
1055 | WARN_ONCE(1, "Unsupported platform\n"); |
1015 | case 7: |
1056 | case 7: |
- | 1057 | case 8: |
|
1016 | instdone[0] = I915_READ(GEN7_INSTDONE_1); |
1058 | instdone[0] = I915_READ(GEN7_INSTDONE_1); |
1017 | instdone[1] = I915_READ(GEN7_SC_INSTDONE); |
1059 | instdone[1] = I915_READ(GEN7_SC_INSTDONE); |
1018 | instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); |
1060 | instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); |
1019 | instdone[3] = I915_READ(GEN7_ROW_INSTDONE); |
1061 | instdone[3] = I915_READ(GEN7_ROW_INSTDONE); |
1020 | break; |
1062 | break; |
1021 | } |
1063 | } |
1022 | }>>>>>><>>>>>>>>>>>>>>>>>=>> |
1064 | }>><>>>>>>><>>>>>>>>>>>>>>>>>>=>> |