Rev 3037 | Rev 3243 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3037 | Rev 3039 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2008 Intel Corporation |
2 | * Copyright © 2008 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
21 | * IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Eric Anholt |
24 | * Eric Anholt |
25 | * |
25 | * |
26 | */ |
26 | */ |
27 | 27 | ||
28 | #include |
28 | #include |
29 | #include |
29 | #include |
30 | #include "i915_drv.h" |
30 | #include "i915_drv.h" |
31 | #include "i915_trace.h" |
31 | #include "i915_trace.h" |
32 | #include "intel_drv.h" |
32 | #include "intel_drv.h" |
33 | //#include |
33 | //#include |
34 | #include |
34 | #include |
35 | //#include |
35 | //#include |
36 | #include |
36 | #include |
37 | 37 | ||
38 | extern int x86_clflush_size; |
38 | extern int x86_clflush_size; |
39 | 39 | ||
40 | #undef mb |
40 | #undef mb |
41 | #undef rmb |
41 | #undef rmb |
42 | #undef wmb |
42 | #undef wmb |
43 | #define mb() asm volatile("mfence") |
43 | #define mb() asm volatile("mfence") |
44 | #define rmb() asm volatile ("lfence") |
44 | #define rmb() asm volatile ("lfence") |
45 | #define wmb() asm volatile ("sfence") |
45 | #define wmb() asm volatile ("sfence") |
46 | 46 | ||
47 | static inline void clflush(volatile void *__p) |
47 | static inline void clflush(volatile void *__p) |
48 | { |
48 | { |
49 | asm volatile("clflush %0" : "+m" (*(volatile char*)__p)); |
49 | asm volatile("clflush %0" : "+m" (*(volatile char*)__p)); |
50 | } |
50 | } |
51 | 51 | ||
52 | #define MAX_ERRNO 4095 |
52 | #define MAX_ERRNO 4095 |
53 | 53 | ||
54 | #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
54 | #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
55 | 55 | ||
56 | static inline long IS_ERR(const void *ptr) |
56 | static inline long IS_ERR(const void *ptr) |
57 | { |
57 | { |
58 | return IS_ERR_VALUE((unsigned long)ptr); |
58 | return IS_ERR_VALUE((unsigned long)ptr); |
59 | } |
59 | } |
60 | 60 | ||
61 | static inline void *ERR_PTR(long error) |
61 | static inline void *ERR_PTR(long error) |
62 | { |
62 | { |
63 | return (void *) error; |
63 | return (void *) error; |
64 | } |
64 | } |
65 | 65 | ||
66 | static inline long PTR_ERR(const void *ptr) |
66 | static inline long PTR_ERR(const void *ptr) |
67 | { |
67 | { |
68 | return (long) ptr; |
68 | return (long) ptr; |
69 | } |
69 | } |
70 | 70 | ||
71 | void |
71 | void |
72 | drm_gem_object_free(struct kref *kref) |
72 | drm_gem_object_free(struct kref *kref) |
73 | { |
73 | { |
74 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; |
74 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; |
75 | struct drm_device *dev = obj->dev; |
75 | struct drm_device *dev = obj->dev; |
76 | 76 | ||
77 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
77 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
78 | 78 | ||
79 | i915_gem_free_object(obj); |
79 | i915_gem_free_object(obj); |
80 | } |
80 | } |
81 | 81 | ||
82 | /** |
82 | /** |
83 | * Initialize an already allocated GEM object of the specified size with |
83 | * Initialize an already allocated GEM object of the specified size with |
84 | * shmfs backing store. |
84 | * shmfs backing store. |
85 | */ |
85 | */ |
86 | int drm_gem_object_init(struct drm_device *dev, |
86 | int drm_gem_object_init(struct drm_device *dev, |
87 | struct drm_gem_object *obj, size_t size) |
87 | struct drm_gem_object *obj, size_t size) |
88 | { |
88 | { |
89 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
89 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
90 | 90 | ||
91 | obj->dev = dev; |
91 | obj->dev = dev; |
92 | kref_init(&obj->refcount); |
92 | kref_init(&obj->refcount); |
93 | atomic_set(&obj->handle_count, 0); |
93 | atomic_set(&obj->handle_count, 0); |
94 | obj->size = size; |
94 | obj->size = size; |
95 | 95 | ||
96 | return 0; |
96 | return 0; |
97 | } |
97 | } |
98 | 98 | ||
99 | void |
99 | void |
100 | drm_gem_object_release(struct drm_gem_object *obj) |
100 | drm_gem_object_release(struct drm_gem_object *obj) |
101 | { } |
101 | { } |
102 | 102 | ||
103 | 103 | ||
104 | #define I915_EXEC_CONSTANTS_MASK (3<<6) |
104 | #define I915_EXEC_CONSTANTS_MASK (3<<6) |
105 | #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ |
105 | #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ |
106 | #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) |
106 | #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) |
107 | #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ |
107 | #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ |
108 | 108 | ||
109 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
109 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
110 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
110 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
111 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
111 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
112 | unsigned alignment, |
112 | unsigned alignment, |
113 | bool map_and_fenceable, |
113 | bool map_and_fenceable, |
114 | bool nonblocking); |
114 | bool nonblocking); |
115 | static int i915_gem_phys_pwrite(struct drm_device *dev, |
115 | static int i915_gem_phys_pwrite(struct drm_device *dev, |
116 | struct drm_i915_gem_object *obj, |
116 | struct drm_i915_gem_object *obj, |
117 | struct drm_i915_gem_pwrite *args, |
117 | struct drm_i915_gem_pwrite *args, |
118 | struct drm_file *file); |
118 | struct drm_file *file); |
119 | 119 | ||
120 | static void i915_gem_write_fence(struct drm_device *dev, int reg, |
120 | static void i915_gem_write_fence(struct drm_device *dev, int reg, |
121 | struct drm_i915_gem_object *obj); |
121 | struct drm_i915_gem_object *obj); |
122 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
122 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
123 | struct drm_i915_fence_reg *fence, |
123 | struct drm_i915_fence_reg *fence, |
124 | bool enable); |
124 | bool enable); |
125 | 125 | ||
126 | static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); |
126 | static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); |
127 | static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); |
127 | static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); |
128 | static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); |
128 | static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); |
129 | 129 | ||
130 | static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) |
130 | static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) |
131 | { |
131 | { |
132 | if (obj->tiling_mode) |
132 | if (obj->tiling_mode) |
133 | i915_gem_release_mmap(obj); |
133 | i915_gem_release_mmap(obj); |
134 | 134 | ||
135 | /* As we do not have an associated fence register, we will force |
135 | /* As we do not have an associated fence register, we will force |
136 | * a tiling change if we ever need to acquire one. |
136 | * a tiling change if we ever need to acquire one. |
137 | */ |
137 | */ |
138 | obj->fence_dirty = false; |
138 | obj->fence_dirty = false; |
139 | obj->fence_reg = I915_FENCE_REG_NONE; |
139 | obj->fence_reg = I915_FENCE_REG_NONE; |
140 | } |
140 | } |
141 | 141 | ||
142 | /* some bookkeeping */ |
142 | /* some bookkeeping */ |
143 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, |
143 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, |
144 | size_t size) |
144 | size_t size) |
145 | { |
145 | { |
146 | dev_priv->mm.object_count++; |
146 | dev_priv->mm.object_count++; |
147 | dev_priv->mm.object_memory += size; |
147 | dev_priv->mm.object_memory += size; |
148 | } |
148 | } |
149 | 149 | ||
150 | static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, |
150 | static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, |
151 | size_t size) |
151 | size_t size) |
152 | { |
152 | { |
153 | dev_priv->mm.object_count--; |
153 | dev_priv->mm.object_count--; |
154 | dev_priv->mm.object_memory -= size; |
154 | dev_priv->mm.object_memory -= size; |
155 | } |
155 | } |
156 | 156 | ||
157 | #if 0 |
157 | #if 0 |
158 | 158 | ||
159 | static int |
159 | static int |
160 | i915_gem_wait_for_error(struct drm_device *dev) |
160 | i915_gem_wait_for_error(struct drm_device *dev) |
161 | { |
161 | { |
162 | struct drm_i915_private *dev_priv = dev->dev_private; |
162 | struct drm_i915_private *dev_priv = dev->dev_private; |
163 | struct completion *x = &dev_priv->error_completion; |
163 | struct completion *x = &dev_priv->error_completion; |
164 | unsigned long flags; |
164 | unsigned long flags; |
165 | int ret; |
165 | int ret; |
166 | 166 | ||
167 | if (!atomic_read(&dev_priv->mm.wedged)) |
167 | if (!atomic_read(&dev_priv->mm.wedged)) |
168 | return 0; |
168 | return 0; |
169 | 169 | ||
170 | /* |
170 | /* |
171 | * Only wait 10 seconds for the gpu reset to complete to avoid hanging |
171 | * Only wait 10 seconds for the gpu reset to complete to avoid hanging |
172 | * userspace. If it takes that long something really bad is going on and |
172 | * userspace. If it takes that long something really bad is going on and |
173 | * we should simply try to bail out and fail as gracefully as possible. |
173 | * we should simply try to bail out and fail as gracefully as possible. |
174 | */ |
174 | */ |
175 | ret = wait_for_completion_interruptible_timeout(x, 10*HZ); |
175 | ret = wait_for_completion_interruptible_timeout(x, 10*HZ); |
176 | if (ret == 0) { |
176 | if (ret == 0) { |
177 | DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); |
177 | DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); |
178 | return -EIO; |
178 | return -EIO; |
179 | } else if (ret < 0) { |
179 | } else if (ret < 0) { |
180 | return ret; |
180 | return ret; |
181 | } |
181 | } |
182 | 182 | ||
183 | if (atomic_read(&dev_priv->mm.wedged)) { |
183 | if (atomic_read(&dev_priv->mm.wedged)) { |
184 | /* GPU is hung, bump the completion count to account for |
184 | /* GPU is hung, bump the completion count to account for |
185 | * the token we just consumed so that we never hit zero and |
185 | * the token we just consumed so that we never hit zero and |
186 | * end up waiting upon a subsequent completion event that |
186 | * end up waiting upon a subsequent completion event that |
187 | * will never happen. |
187 | * will never happen. |
188 | */ |
188 | */ |
189 | spin_lock_irqsave(&x->wait.lock, flags); |
189 | spin_lock_irqsave(&x->wait.lock, flags); |
190 | x->done++; |
190 | x->done++; |
191 | spin_unlock_irqrestore(&x->wait.lock, flags); |
191 | spin_unlock_irqrestore(&x->wait.lock, flags); |
192 | } |
192 | } |
193 | return 0; |
193 | return 0; |
194 | } |
194 | } |
195 | 195 | ||
196 | int i915_mutex_lock_interruptible(struct drm_device *dev) |
196 | int i915_mutex_lock_interruptible(struct drm_device *dev) |
197 | { |
197 | { |
198 | int ret; |
198 | int ret; |
199 | 199 | ||
200 | ret = i915_gem_wait_for_error(dev); |
200 | ret = i915_gem_wait_for_error(dev); |
201 | if (ret) |
201 | if (ret) |
202 | return ret; |
202 | return ret; |
203 | 203 | ||
204 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
204 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
205 | if (ret) |
205 | if (ret) |
206 | return ret; |
206 | return ret; |
207 | 207 | ||
208 | WARN_ON(i915_verify_lists(dev)); |
208 | WARN_ON(i915_verify_lists(dev)); |
209 | return 0; |
209 | return 0; |
210 | } |
210 | } |
211 | #endif |
211 | #endif |
212 | 212 | ||
213 | static inline bool |
213 | static inline bool |
214 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) |
214 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) |
215 | { |
215 | { |
216 | return obj->gtt_space && !obj->active; |
216 | return obj->gtt_space && !obj->active; |
217 | } |
217 | } |
218 | 218 | ||
219 | 219 | ||
220 | #if 0 |
220 | #if 0 |
221 | 221 | ||
222 | int |
222 | int |
223 | i915_gem_init_ioctl(struct drm_device *dev, void *data, |
223 | i915_gem_init_ioctl(struct drm_device *dev, void *data, |
224 | struct drm_file *file) |
224 | struct drm_file *file) |
225 | { |
225 | { |
226 | struct drm_i915_gem_init *args = data; |
226 | struct drm_i915_gem_init *args = data; |
227 | 227 | ||
228 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
228 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
229 | return -ENODEV; |
229 | return -ENODEV; |
230 | 230 | ||
231 | if (args->gtt_start >= args->gtt_end || |
231 | if (args->gtt_start >= args->gtt_end || |
232 | (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) |
232 | (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) |
233 | return -EINVAL; |
233 | return -EINVAL; |
234 | 234 | ||
235 | /* GEM with user mode setting was never supported on ilk and later. */ |
235 | /* GEM with user mode setting was never supported on ilk and later. */ |
236 | if (INTEL_INFO(dev)->gen >= 5) |
236 | if (INTEL_INFO(dev)->gen >= 5) |
237 | return -ENODEV; |
237 | return -ENODEV; |
238 | 238 | ||
239 | mutex_lock(&dev->struct_mutex); |
239 | mutex_lock(&dev->struct_mutex); |
240 | i915_gem_init_global_gtt(dev, args->gtt_start, |
240 | i915_gem_init_global_gtt(dev, args->gtt_start, |
241 | args->gtt_end, args->gtt_end); |
241 | args->gtt_end, args->gtt_end); |
242 | mutex_unlock(&dev->struct_mutex); |
242 | mutex_unlock(&dev->struct_mutex); |
243 | 243 | ||
244 | return 0; |
244 | return 0; |
245 | } |
245 | } |
246 | #endif |
246 | #endif |
247 | 247 | ||
248 | int |
248 | int |
249 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
249 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
250 | struct drm_file *file) |
250 | struct drm_file *file) |
251 | { |
251 | { |
252 | struct drm_i915_private *dev_priv = dev->dev_private; |
252 | struct drm_i915_private *dev_priv = dev->dev_private; |
253 | struct drm_i915_gem_get_aperture *args = data; |
253 | struct drm_i915_gem_get_aperture *args = data; |
254 | struct drm_i915_gem_object *obj; |
254 | struct drm_i915_gem_object *obj; |
255 | size_t pinned; |
255 | size_t pinned; |
256 | 256 | ||
257 | pinned = 0; |
257 | pinned = 0; |
258 | mutex_lock(&dev->struct_mutex); |
258 | mutex_lock(&dev->struct_mutex); |
259 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) |
259 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) |
260 | if (obj->pin_count) |
260 | if (obj->pin_count) |
261 | pinned += obj->gtt_space->size; |
261 | pinned += obj->gtt_space->size; |
262 | mutex_unlock(&dev->struct_mutex); |
262 | mutex_unlock(&dev->struct_mutex); |
263 | 263 | ||
264 | args->aper_size = dev_priv->mm.gtt_total; |
264 | args->aper_size = dev_priv->mm.gtt_total; |
265 | args->aper_available_size = args->aper_size - pinned; |
265 | args->aper_available_size = args->aper_size - pinned; |
266 | 266 | ||
267 | return 0; |
267 | return 0; |
268 | } |
268 | } |
269 | 269 | ||
270 | #if 0 |
270 | #if 0 |
271 | static int |
271 | static int |
272 | i915_gem_create(struct drm_file *file, |
272 | i915_gem_create(struct drm_file *file, |
273 | struct drm_device *dev, |
273 | struct drm_device *dev, |
274 | uint64_t size, |
274 | uint64_t size, |
275 | uint32_t *handle_p) |
275 | uint32_t *handle_p) |
276 | { |
276 | { |
277 | struct drm_i915_gem_object *obj; |
277 | struct drm_i915_gem_object *obj; |
278 | int ret; |
278 | int ret; |
279 | u32 handle; |
279 | u32 handle; |
280 | 280 | ||
281 | size = roundup(size, PAGE_SIZE); |
281 | size = roundup(size, PAGE_SIZE); |
282 | if (size == 0) |
282 | if (size == 0) |
283 | return -EINVAL; |
283 | return -EINVAL; |
284 | 284 | ||
285 | /* Allocate the new object */ |
285 | /* Allocate the new object */ |
286 | obj = i915_gem_alloc_object(dev, size); |
286 | obj = i915_gem_alloc_object(dev, size); |
287 | if (obj == NULL) |
287 | if (obj == NULL) |
288 | return -ENOMEM; |
288 | return -ENOMEM; |
289 | 289 | ||
290 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
290 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
291 | if (ret) { |
291 | if (ret) { |
292 | drm_gem_object_release(&obj->base); |
292 | drm_gem_object_release(&obj->base); |
293 | i915_gem_info_remove_obj(dev->dev_private, obj->base.size); |
293 | i915_gem_info_remove_obj(dev->dev_private, obj->base.size); |
294 | kfree(obj); |
294 | kfree(obj); |
295 | return ret; |
295 | return ret; |
296 | } |
296 | } |
297 | 297 | ||
298 | /* drop reference from allocate - handle holds it now */ |
298 | /* drop reference from allocate - handle holds it now */ |
299 | drm_gem_object_unreference(&obj->base); |
299 | drm_gem_object_unreference(&obj->base); |
300 | trace_i915_gem_object_create(obj); |
300 | trace_i915_gem_object_create(obj); |
301 | 301 | ||
302 | *handle_p = handle; |
302 | *handle_p = handle; |
303 | return 0; |
303 | return 0; |
304 | } |
304 | } |
305 | 305 | ||
306 | int |
306 | int |
307 | i915_gem_dumb_create(struct drm_file *file, |
307 | i915_gem_dumb_create(struct drm_file *file, |
308 | struct drm_device *dev, |
308 | struct drm_device *dev, |
309 | struct drm_mode_create_dumb *args) |
309 | struct drm_mode_create_dumb *args) |
310 | { |
310 | { |
311 | /* have to work out size/pitch and return them */ |
311 | /* have to work out size/pitch and return them */ |
312 | args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); |
312 | args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); |
313 | args->size = args->pitch * args->height; |
313 | args->size = args->pitch * args->height; |
314 | return i915_gem_create(file, dev, |
314 | return i915_gem_create(file, dev, |
315 | args->size, &args->handle); |
315 | args->size, &args->handle); |
316 | } |
316 | } |
317 | 317 | ||
318 | int i915_gem_dumb_destroy(struct drm_file *file, |
318 | int i915_gem_dumb_destroy(struct drm_file *file, |
319 | struct drm_device *dev, |
319 | struct drm_device *dev, |
320 | uint32_t handle) |
320 | uint32_t handle) |
321 | { |
321 | { |
322 | return drm_gem_handle_delete(file, handle); |
322 | return drm_gem_handle_delete(file, handle); |
323 | } |
323 | } |
324 | 324 | ||
325 | /** |
325 | /** |
326 | * Creates a new mm object and returns a handle to it. |
326 | * Creates a new mm object and returns a handle to it. |
327 | */ |
327 | */ |
328 | int |
328 | int |
329 | i915_gem_create_ioctl(struct drm_device *dev, void *data, |
329 | i915_gem_create_ioctl(struct drm_device *dev, void *data, |
330 | struct drm_file *file) |
330 | struct drm_file *file) |
331 | { |
331 | { |
332 | struct drm_i915_gem_create *args = data; |
332 | struct drm_i915_gem_create *args = data; |
333 | 333 | ||
334 | return i915_gem_create(file, dev, |
334 | return i915_gem_create(file, dev, |
335 | args->size, &args->handle); |
335 | args->size, &args->handle); |
336 | } |
336 | } |
337 | 337 | ||
338 | static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
338 | static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
339 | { |
339 | { |
340 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
340 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
341 | 341 | ||
342 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
342 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
343 | obj->tiling_mode != I915_TILING_NONE; |
343 | obj->tiling_mode != I915_TILING_NONE; |
344 | } |
344 | } |
345 | 345 | ||
346 | static inline int |
346 | static inline int |
347 | __copy_to_user_swizzled(char __user *cpu_vaddr, |
347 | __copy_to_user_swizzled(char __user *cpu_vaddr, |
348 | const char *gpu_vaddr, int gpu_offset, |
348 | const char *gpu_vaddr, int gpu_offset, |
349 | int length) |
349 | int length) |
350 | { |
350 | { |
351 | int ret, cpu_offset = 0; |
351 | int ret, cpu_offset = 0; |
352 | 352 | ||
353 | while (length > 0) { |
353 | while (length > 0) { |
354 | int cacheline_end = ALIGN(gpu_offset + 1, 64); |
354 | int cacheline_end = ALIGN(gpu_offset + 1, 64); |
355 | int this_length = min(cacheline_end - gpu_offset, length); |
355 | int this_length = min(cacheline_end - gpu_offset, length); |
356 | int swizzled_gpu_offset = gpu_offset ^ 64; |
356 | int swizzled_gpu_offset = gpu_offset ^ 64; |
357 | 357 | ||
358 | ret = __copy_to_user(cpu_vaddr + cpu_offset, |
358 | ret = __copy_to_user(cpu_vaddr + cpu_offset, |
359 | gpu_vaddr + swizzled_gpu_offset, |
359 | gpu_vaddr + swizzled_gpu_offset, |
360 | this_length); |
360 | this_length); |
361 | if (ret) |
361 | if (ret) |
362 | return ret + length; |
362 | return ret + length; |
363 | 363 | ||
364 | cpu_offset += this_length; |
364 | cpu_offset += this_length; |
365 | gpu_offset += this_length; |
365 | gpu_offset += this_length; |
366 | length -= this_length; |
366 | length -= this_length; |
367 | } |
367 | } |
368 | 368 | ||
369 | return 0; |
369 | return 0; |
370 | } |
370 | } |
371 | 371 | ||
372 | static inline int |
372 | static inline int |
373 | __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, |
373 | __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, |
374 | const char __user *cpu_vaddr, |
374 | const char __user *cpu_vaddr, |
375 | int length) |
375 | int length) |
376 | { |
376 | { |
377 | int ret, cpu_offset = 0; |
377 | int ret, cpu_offset = 0; |
378 | 378 | ||
379 | while (length > 0) { |
379 | while (length > 0) { |
380 | int cacheline_end = ALIGN(gpu_offset + 1, 64); |
380 | int cacheline_end = ALIGN(gpu_offset + 1, 64); |
381 | int this_length = min(cacheline_end - gpu_offset, length); |
381 | int this_length = min(cacheline_end - gpu_offset, length); |
382 | int swizzled_gpu_offset = gpu_offset ^ 64; |
382 | int swizzled_gpu_offset = gpu_offset ^ 64; |
383 | 383 | ||
384 | ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, |
384 | ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, |
385 | cpu_vaddr + cpu_offset, |
385 | cpu_vaddr + cpu_offset, |
386 | this_length); |
386 | this_length); |
387 | if (ret) |
387 | if (ret) |
388 | return ret + length; |
388 | return ret + length; |
389 | 389 | ||
390 | cpu_offset += this_length; |
390 | cpu_offset += this_length; |
391 | gpu_offset += this_length; |
391 | gpu_offset += this_length; |
392 | length -= this_length; |
392 | length -= this_length; |
393 | } |
393 | } |
394 | 394 | ||
395 | return 0; |
395 | return 0; |
396 | } |
396 | } |
397 | 397 | ||
398 | /* Per-page copy function for the shmem pread fastpath. |
398 | /* Per-page copy function for the shmem pread fastpath. |
399 | * Flushes invalid cachelines before reading the target if |
399 | * Flushes invalid cachelines before reading the target if |
400 | * needs_clflush is set. */ |
400 | * needs_clflush is set. */ |
401 | static int |
401 | static int |
402 | shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, |
402 | shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, |
403 | char __user *user_data, |
403 | char __user *user_data, |
404 | bool page_do_bit17_swizzling, bool needs_clflush) |
404 | bool page_do_bit17_swizzling, bool needs_clflush) |
405 | { |
405 | { |
406 | char *vaddr; |
406 | char *vaddr; |
407 | int ret; |
407 | int ret; |
408 | 408 | ||
409 | if (unlikely(page_do_bit17_swizzling)) |
409 | if (unlikely(page_do_bit17_swizzling)) |
410 | return -EINVAL; |
410 | return -EINVAL; |
411 | 411 | ||
412 | vaddr = kmap_atomic(page); |
412 | vaddr = kmap_atomic(page); |
413 | if (needs_clflush) |
413 | if (needs_clflush) |
414 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
414 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
415 | page_length); |
415 | page_length); |
416 | ret = __copy_to_user_inatomic(user_data, |
416 | ret = __copy_to_user_inatomic(user_data, |
417 | vaddr + shmem_page_offset, |
417 | vaddr + shmem_page_offset, |
418 | page_length); |
418 | page_length); |
419 | kunmap_atomic(vaddr); |
419 | kunmap_atomic(vaddr); |
420 | 420 | ||
421 | return ret ? -EFAULT : 0; |
421 | return ret ? -EFAULT : 0; |
422 | } |
422 | } |
423 | 423 | ||
424 | static void |
424 | static void |
425 | shmem_clflush_swizzled_range(char *addr, unsigned long length, |
425 | shmem_clflush_swizzled_range(char *addr, unsigned long length, |
426 | bool swizzled) |
426 | bool swizzled) |
427 | { |
427 | { |
428 | if (unlikely(swizzled)) { |
428 | if (unlikely(swizzled)) { |
429 | unsigned long start = (unsigned long) addr; |
429 | unsigned long start = (unsigned long) addr; |
430 | unsigned long end = (unsigned long) addr + length; |
430 | unsigned long end = (unsigned long) addr + length; |
431 | 431 | ||
432 | /* For swizzling simply ensure that we always flush both |
432 | /* For swizzling simply ensure that we always flush both |
433 | * channels. Lame, but simple and it works. Swizzled |
433 | * channels. Lame, but simple and it works. Swizzled |
434 | * pwrite/pread is far from a hotpath - current userspace |
434 | * pwrite/pread is far from a hotpath - current userspace |
435 | * doesn't use it at all. */ |
435 | * doesn't use it at all. */ |
436 | start = round_down(start, 128); |
436 | start = round_down(start, 128); |
437 | end = round_up(end, 128); |
437 | end = round_up(end, 128); |
438 | 438 | ||
439 | drm_clflush_virt_range((void *)start, end - start); |
439 | drm_clflush_virt_range((void *)start, end - start); |
440 | } else { |
440 | } else { |
441 | drm_clflush_virt_range(addr, length); |
441 | drm_clflush_virt_range(addr, length); |
442 | } |
442 | } |
443 | 443 | ||
444 | } |
444 | } |
445 | 445 | ||
446 | /* Only difference to the fast-path function is that this can handle bit17 |
446 | /* Only difference to the fast-path function is that this can handle bit17 |
447 | * and uses non-atomic copy and kmap functions. */ |
447 | * and uses non-atomic copy and kmap functions. */ |
448 | static int |
448 | static int |
449 | shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, |
449 | shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, |
450 | char __user *user_data, |
450 | char __user *user_data, |
451 | bool page_do_bit17_swizzling, bool needs_clflush) |
451 | bool page_do_bit17_swizzling, bool needs_clflush) |
452 | { |
452 | { |
453 | char *vaddr; |
453 | char *vaddr; |
454 | int ret; |
454 | int ret; |
455 | 455 | ||
456 | vaddr = kmap(page); |
456 | vaddr = kmap(page); |
457 | if (needs_clflush) |
457 | if (needs_clflush) |
458 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, |
458 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, |
459 | page_length, |
459 | page_length, |
460 | page_do_bit17_swizzling); |
460 | page_do_bit17_swizzling); |
461 | 461 | ||
462 | if (page_do_bit17_swizzling) |
462 | if (page_do_bit17_swizzling) |
463 | ret = __copy_to_user_swizzled(user_data, |
463 | ret = __copy_to_user_swizzled(user_data, |
464 | vaddr, shmem_page_offset, |
464 | vaddr, shmem_page_offset, |
465 | page_length); |
465 | page_length); |
466 | else |
466 | else |
467 | ret = __copy_to_user(user_data, |
467 | ret = __copy_to_user(user_data, |
468 | vaddr + shmem_page_offset, |
468 | vaddr + shmem_page_offset, |
469 | page_length); |
469 | page_length); |
470 | kunmap(page); |
470 | kunmap(page); |
471 | 471 | ||
472 | return ret ? - EFAULT : 0; |
472 | return ret ? - EFAULT : 0; |
473 | } |
473 | } |
474 | 474 | ||
475 | static int |
475 | static int |
476 | i915_gem_shmem_pread(struct drm_device *dev, |
476 | i915_gem_shmem_pread(struct drm_device *dev, |
477 | struct drm_i915_gem_object *obj, |
477 | struct drm_i915_gem_object *obj, |
478 | struct drm_i915_gem_pread *args, |
478 | struct drm_i915_gem_pread *args, |
479 | struct drm_file *file) |
479 | struct drm_file *file) |
480 | { |
480 | { |
481 | char __user *user_data; |
481 | char __user *user_data; |
482 | ssize_t remain; |
482 | ssize_t remain; |
483 | loff_t offset; |
483 | loff_t offset; |
484 | int shmem_page_offset, page_length, ret = 0; |
484 | int shmem_page_offset, page_length, ret = 0; |
485 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; |
485 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; |
486 | int hit_slowpath = 0; |
486 | int hit_slowpath = 0; |
487 | int prefaulted = 0; |
487 | int prefaulted = 0; |
488 | int needs_clflush = 0; |
488 | int needs_clflush = 0; |
489 | struct scatterlist *sg; |
489 | struct scatterlist *sg; |
490 | int i; |
490 | int i; |
491 | 491 | ||
492 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
492 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
493 | remain = args->size; |
493 | remain = args->size; |
494 | 494 | ||
495 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
495 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
496 | 496 | ||
497 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { |
497 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { |
498 | /* If we're not in the cpu read domain, set ourself into the gtt |
498 | /* If we're not in the cpu read domain, set ourself into the gtt |
499 | * read domain and manually flush cachelines (if required). This |
499 | * read domain and manually flush cachelines (if required). This |
500 | * optimizes for the case when the gpu will dirty the data |
500 | * optimizes for the case when the gpu will dirty the data |
501 | * anyway again before the next pread happens. */ |
501 | * anyway again before the next pread happens. */ |
502 | if (obj->cache_level == I915_CACHE_NONE) |
502 | if (obj->cache_level == I915_CACHE_NONE) |
503 | needs_clflush = 1; |
503 | needs_clflush = 1; |
504 | if (obj->gtt_space) { |
504 | if (obj->gtt_space) { |
505 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
505 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
506 | if (ret) |
506 | if (ret) |
507 | return ret; |
507 | return ret; |
508 | } |
508 | } |
509 | } |
509 | } |
510 | 510 | ||
511 | ret = i915_gem_object_get_pages(obj); |
511 | ret = i915_gem_object_get_pages(obj); |
512 | if (ret) |
512 | if (ret) |
513 | return ret; |
513 | return ret; |
514 | 514 | ||
515 | i915_gem_object_pin_pages(obj); |
515 | i915_gem_object_pin_pages(obj); |
516 | 516 | ||
517 | offset = args->offset; |
517 | offset = args->offset; |
518 | 518 | ||
519 | for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { |
519 | for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { |
520 | struct page *page; |
520 | struct page *page; |
521 | 521 | ||
522 | if (i < offset >> PAGE_SHIFT) |
522 | if (i < offset >> PAGE_SHIFT) |
523 | continue; |
523 | continue; |
524 | 524 | ||
525 | if (remain <= 0) |
525 | if (remain <= 0) |
526 | break; |
526 | break; |
527 | 527 | ||
528 | /* Operation in this page |
528 | /* Operation in this page |
529 | * |
529 | * |
530 | * shmem_page_offset = offset within page in shmem file |
530 | * shmem_page_offset = offset within page in shmem file |
531 | * page_length = bytes to copy for this page |
531 | * page_length = bytes to copy for this page |
532 | */ |
532 | */ |
533 | shmem_page_offset = offset_in_page(offset); |
533 | shmem_page_offset = offset_in_page(offset); |
534 | page_length = remain; |
534 | page_length = remain; |
535 | if ((shmem_page_offset + page_length) > PAGE_SIZE) |
535 | if ((shmem_page_offset + page_length) > PAGE_SIZE) |
536 | page_length = PAGE_SIZE - shmem_page_offset; |
536 | page_length = PAGE_SIZE - shmem_page_offset; |
537 | 537 | ||
538 | page = sg_page(sg); |
538 | page = sg_page(sg); |
539 | page_do_bit17_swizzling = obj_do_bit17_swizzling && |
539 | page_do_bit17_swizzling = obj_do_bit17_swizzling && |
540 | (page_to_phys(page) & (1 << 17)) != 0; |
540 | (page_to_phys(page) & (1 << 17)) != 0; |
541 | 541 | ||
542 | ret = shmem_pread_fast(page, shmem_page_offset, page_length, |
542 | ret = shmem_pread_fast(page, shmem_page_offset, page_length, |
543 | user_data, page_do_bit17_swizzling, |
543 | user_data, page_do_bit17_swizzling, |
544 | needs_clflush); |
544 | needs_clflush); |
545 | if (ret == 0) |
545 | if (ret == 0) |
546 | goto next_page; |
546 | goto next_page; |
547 | 547 | ||
548 | hit_slowpath = 1; |
548 | hit_slowpath = 1; |
549 | mutex_unlock(&dev->struct_mutex); |
549 | mutex_unlock(&dev->struct_mutex); |
550 | 550 | ||
551 | if (!prefaulted) { |
551 | if (!prefaulted) { |
552 | ret = fault_in_multipages_writeable(user_data, remain); |
552 | ret = fault_in_multipages_writeable(user_data, remain); |
553 | /* Userspace is tricking us, but we've already clobbered |
553 | /* Userspace is tricking us, but we've already clobbered |
554 | * its pages with the prefault and promised to write the |
554 | * its pages with the prefault and promised to write the |
555 | * data up to the first fault. Hence ignore any errors |
555 | * data up to the first fault. Hence ignore any errors |
556 | * and just continue. */ |
556 | * and just continue. */ |
557 | (void)ret; |
557 | (void)ret; |
558 | prefaulted = 1; |
558 | prefaulted = 1; |
559 | } |
559 | } |
560 | 560 | ||
561 | ret = shmem_pread_slow(page, shmem_page_offset, page_length, |
561 | ret = shmem_pread_slow(page, shmem_page_offset, page_length, |
562 | user_data, page_do_bit17_swizzling, |
562 | user_data, page_do_bit17_swizzling, |
563 | needs_clflush); |
563 | needs_clflush); |
564 | 564 | ||
565 | mutex_lock(&dev->struct_mutex); |
565 | mutex_lock(&dev->struct_mutex); |
566 | 566 | ||
567 | next_page: |
567 | next_page: |
568 | mark_page_accessed(page); |
568 | mark_page_accessed(page); |
569 | 569 | ||
570 | if (ret) |
570 | if (ret) |
571 | goto out; |
571 | goto out; |
572 | 572 | ||
573 | remain -= page_length; |
573 | remain -= page_length; |
574 | user_data += page_length; |
574 | user_data += page_length; |
575 | offset += page_length; |
575 | offset += page_length; |
576 | } |
576 | } |
577 | 577 | ||
578 | out: |
578 | out: |
579 | i915_gem_object_unpin_pages(obj); |
579 | i915_gem_object_unpin_pages(obj); |
580 | 580 | ||
581 | if (hit_slowpath) { |
581 | if (hit_slowpath) { |
582 | /* Fixup: Kill any reinstated backing storage pages */ |
582 | /* Fixup: Kill any reinstated backing storage pages */ |
583 | if (obj->madv == __I915_MADV_PURGED) |
583 | if (obj->madv == __I915_MADV_PURGED) |
584 | i915_gem_object_truncate(obj); |
584 | i915_gem_object_truncate(obj); |
585 | } |
585 | } |
586 | 586 | ||
587 | return ret; |
587 | return ret; |
588 | } |
588 | } |
589 | 589 | ||
590 | /** |
590 | /** |
591 | * Reads data from the object referenced by handle. |
591 | * Reads data from the object referenced by handle. |
592 | * |
592 | * |
593 | * On error, the contents of *data are undefined. |
593 | * On error, the contents of *data are undefined. |
594 | */ |
594 | */ |
595 | int |
595 | int |
596 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
596 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
597 | struct drm_file *file) |
597 | struct drm_file *file) |
598 | { |
598 | { |
599 | struct drm_i915_gem_pread *args = data; |
599 | struct drm_i915_gem_pread *args = data; |
600 | struct drm_i915_gem_object *obj; |
600 | struct drm_i915_gem_object *obj; |
601 | int ret = 0; |
601 | int ret = 0; |
602 | 602 | ||
603 | if (args->size == 0) |
603 | if (args->size == 0) |
604 | return 0; |
604 | return 0; |
605 | 605 | ||
606 | if (!access_ok(VERIFY_WRITE, |
606 | if (!access_ok(VERIFY_WRITE, |
607 | (char __user *)(uintptr_t)args->data_ptr, |
607 | (char __user *)(uintptr_t)args->data_ptr, |
608 | args->size)) |
608 | args->size)) |
609 | return -EFAULT; |
609 | return -EFAULT; |
610 | 610 | ||
611 | ret = i915_mutex_lock_interruptible(dev); |
611 | ret = i915_mutex_lock_interruptible(dev); |
612 | if (ret) |
612 | if (ret) |
613 | return ret; |
613 | return ret; |
614 | 614 | ||
615 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
615 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
616 | if (&obj->base == NULL) { |
616 | if (&obj->base == NULL) { |
617 | ret = -ENOENT; |
617 | ret = -ENOENT; |
618 | goto unlock; |
618 | goto unlock; |
619 | } |
619 | } |
620 | 620 | ||
621 | /* Bounds check source. */ |
621 | /* Bounds check source. */ |
622 | if (args->offset > obj->base.size || |
622 | if (args->offset > obj->base.size || |
623 | args->size > obj->base.size - args->offset) { |
623 | args->size > obj->base.size - args->offset) { |
624 | ret = -EINVAL; |
624 | ret = -EINVAL; |
625 | goto out; |
625 | goto out; |
626 | } |
626 | } |
627 | 627 | ||
628 | /* prime objects have no backing filp to GEM pread/pwrite |
628 | /* prime objects have no backing filp to GEM pread/pwrite |
629 | * pages from. |
629 | * pages from. |
630 | */ |
630 | */ |
631 | if (!obj->base.filp) { |
631 | if (!obj->base.filp) { |
632 | ret = -EINVAL; |
632 | ret = -EINVAL; |
633 | goto out; |
633 | goto out; |
634 | } |
634 | } |
635 | 635 | ||
636 | trace_i915_gem_object_pread(obj, args->offset, args->size); |
636 | trace_i915_gem_object_pread(obj, args->offset, args->size); |
637 | 637 | ||
638 | ret = i915_gem_shmem_pread(dev, obj, args, file); |
638 | ret = i915_gem_shmem_pread(dev, obj, args, file); |
639 | 639 | ||
640 | out: |
640 | out: |
641 | drm_gem_object_unreference(&obj->base); |
641 | drm_gem_object_unreference(&obj->base); |
642 | unlock: |
642 | unlock: |
643 | mutex_unlock(&dev->struct_mutex); |
643 | mutex_unlock(&dev->struct_mutex); |
644 | return ret; |
644 | return ret; |
645 | } |
645 | } |
646 | 646 | ||
647 | /* This is the fast write path which cannot handle |
647 | /* This is the fast write path which cannot handle |
648 | * page faults in the source data |
648 | * page faults in the source data |
649 | */ |
649 | */ |
650 | 650 | ||
651 | static inline int |
651 | static inline int |
652 | fast_user_write(struct io_mapping *mapping, |
652 | fast_user_write(struct io_mapping *mapping, |
653 | loff_t page_base, int page_offset, |
653 | loff_t page_base, int page_offset, |
654 | char __user *user_data, |
654 | char __user *user_data, |
655 | int length) |
655 | int length) |
656 | { |
656 | { |
657 | void __iomem *vaddr_atomic; |
657 | void __iomem *vaddr_atomic; |
658 | void *vaddr; |
658 | void *vaddr; |
659 | unsigned long unwritten; |
659 | unsigned long unwritten; |
660 | 660 | ||
661 | vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); |
661 | vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); |
662 | /* We can use the cpu mem copy function because this is X86. */ |
662 | /* We can use the cpu mem copy function because this is X86. */ |
663 | vaddr = (void __force*)vaddr_atomic + page_offset; |
663 | vaddr = (void __force*)vaddr_atomic + page_offset; |
664 | unwritten = __copy_from_user_inatomic_nocache(vaddr, |
664 | unwritten = __copy_from_user_inatomic_nocache(vaddr, |
665 | user_data, length); |
665 | user_data, length); |
666 | io_mapping_unmap_atomic(vaddr_atomic); |
666 | io_mapping_unmap_atomic(vaddr_atomic); |
667 | return unwritten; |
667 | return unwritten; |
668 | } |
668 | } |
669 | 669 | ||
670 | /** |
670 | /** |
671 | * This is the fast pwrite path, where we copy the data directly from the |
671 | * This is the fast pwrite path, where we copy the data directly from the |
672 | * user into the GTT, uncached. |
672 | * user into the GTT, uncached. |
673 | */ |
673 | */ |
674 | static int |
674 | static int |
675 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, |
675 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, |
676 | struct drm_i915_gem_object *obj, |
676 | struct drm_i915_gem_object *obj, |
677 | struct drm_i915_gem_pwrite *args, |
677 | struct drm_i915_gem_pwrite *args, |
678 | struct drm_file *file) |
678 | struct drm_file *file) |
679 | { |
679 | { |
680 | drm_i915_private_t *dev_priv = dev->dev_private; |
680 | drm_i915_private_t *dev_priv = dev->dev_private; |
681 | ssize_t remain; |
681 | ssize_t remain; |
682 | loff_t offset, page_base; |
682 | loff_t offset, page_base; |
683 | char __user *user_data; |
683 | char __user *user_data; |
684 | int page_offset, page_length, ret; |
684 | int page_offset, page_length, ret; |
685 | 685 | ||
686 | ret = i915_gem_object_pin(obj, 0, true, true); |
686 | ret = i915_gem_object_pin(obj, 0, true, true); |
687 | if (ret) |
687 | if (ret) |
688 | goto out; |
688 | goto out; |
689 | 689 | ||
690 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
690 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
691 | if (ret) |
691 | if (ret) |
692 | goto out_unpin; |
692 | goto out_unpin; |
693 | 693 | ||
694 | ret = i915_gem_object_put_fence(obj); |
694 | ret = i915_gem_object_put_fence(obj); |
695 | if (ret) |
695 | if (ret) |
696 | goto out_unpin; |
696 | goto out_unpin; |
697 | 697 | ||
698 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
698 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
699 | remain = args->size; |
699 | remain = args->size; |
700 | 700 | ||
701 | offset = obj->gtt_offset + args->offset; |
701 | offset = obj->gtt_offset + args->offset; |
702 | 702 | ||
703 | while (remain > 0) { |
703 | while (remain > 0) { |
704 | /* Operation in this page |
704 | /* Operation in this page |
705 | * |
705 | * |
706 | * page_base = page offset within aperture |
706 | * page_base = page offset within aperture |
707 | * page_offset = offset within page |
707 | * page_offset = offset within page |
708 | * page_length = bytes to copy for this page |
708 | * page_length = bytes to copy for this page |
709 | */ |
709 | */ |
710 | page_base = offset & PAGE_MASK; |
710 | page_base = offset & PAGE_MASK; |
711 | page_offset = offset_in_page(offset); |
711 | page_offset = offset_in_page(offset); |
712 | page_length = remain; |
712 | page_length = remain; |
713 | if ((page_offset + remain) > PAGE_SIZE) |
713 | if ((page_offset + remain) > PAGE_SIZE) |
714 | page_length = PAGE_SIZE - page_offset; |
714 | page_length = PAGE_SIZE - page_offset; |
715 | 715 | ||
716 | /* If we get a fault while copying data, then (presumably) our |
716 | /* If we get a fault while copying data, then (presumably) our |
717 | * source page isn't available. Return the error and we'll |
717 | * source page isn't available. Return the error and we'll |
718 | * retry in the slow path. |
718 | * retry in the slow path. |
719 | */ |
719 | */ |
720 | if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, |
720 | if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, |
721 | page_offset, user_data, page_length)) { |
721 | page_offset, user_data, page_length)) { |
722 | ret = -EFAULT; |
722 | ret = -EFAULT; |
723 | goto out_unpin; |
723 | goto out_unpin; |
724 | } |
724 | } |
725 | 725 | ||
726 | remain -= page_length; |
726 | remain -= page_length; |
727 | user_data += page_length; |
727 | user_data += page_length; |
728 | offset += page_length; |
728 | offset += page_length; |
729 | } |
729 | } |
730 | 730 | ||
731 | out_unpin: |
731 | out_unpin: |
732 | i915_gem_object_unpin(obj); |
732 | i915_gem_object_unpin(obj); |
733 | out: |
733 | out: |
734 | return ret; |
734 | return ret; |
735 | } |
735 | } |
736 | 736 | ||
737 | /* Per-page copy function for the shmem pwrite fastpath. |
737 | /* Per-page copy function for the shmem pwrite fastpath. |
738 | * Flushes invalid cachelines before writing to the target if |
738 | * Flushes invalid cachelines before writing to the target if |
739 | * needs_clflush_before is set and flushes out any written cachelines after |
739 | * needs_clflush_before is set and flushes out any written cachelines after |
740 | * writing if needs_clflush is set. */ |
740 | * writing if needs_clflush is set. */ |
741 | static int |
741 | static int |
742 | shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, |
742 | shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, |
743 | char __user *user_data, |
743 | char __user *user_data, |
744 | bool page_do_bit17_swizzling, |
744 | bool page_do_bit17_swizzling, |
745 | bool needs_clflush_before, |
745 | bool needs_clflush_before, |
746 | bool needs_clflush_after) |
746 | bool needs_clflush_after) |
747 | { |
747 | { |
748 | char *vaddr; |
748 | char *vaddr; |
749 | int ret; |
749 | int ret; |
750 | 750 | ||
751 | if (unlikely(page_do_bit17_swizzling)) |
751 | if (unlikely(page_do_bit17_swizzling)) |
752 | return -EINVAL; |
752 | return -EINVAL; |
753 | 753 | ||
754 | vaddr = kmap_atomic(page); |
754 | vaddr = kmap_atomic(page); |
755 | if (needs_clflush_before) |
755 | if (needs_clflush_before) |
756 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
756 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
757 | page_length); |
757 | page_length); |
758 | ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, |
758 | ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, |
759 | user_data, |
759 | user_data, |
760 | page_length); |
760 | page_length); |
761 | if (needs_clflush_after) |
761 | if (needs_clflush_after) |
762 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
762 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
763 | page_length); |
763 | page_length); |
764 | kunmap_atomic(vaddr); |
764 | kunmap_atomic(vaddr); |
765 | 765 | ||
766 | return ret ? -EFAULT : 0; |
766 | return ret ? -EFAULT : 0; |
767 | } |
767 | } |
768 | 768 | ||
769 | /* Only difference to the fast-path function is that this can handle bit17 |
769 | /* Only difference to the fast-path function is that this can handle bit17 |
770 | * and uses non-atomic copy and kmap functions. */ |
770 | * and uses non-atomic copy and kmap functions. */ |
771 | static int |
771 | static int |
772 | shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, |
772 | shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, |
773 | char __user *user_data, |
773 | char __user *user_data, |
774 | bool page_do_bit17_swizzling, |
774 | bool page_do_bit17_swizzling, |
775 | bool needs_clflush_before, |
775 | bool needs_clflush_before, |
776 | bool needs_clflush_after) |
776 | bool needs_clflush_after) |
777 | { |
777 | { |
778 | char *vaddr; |
778 | char *vaddr; |
779 | int ret; |
779 | int ret; |
780 | 780 | ||
781 | vaddr = kmap(page); |
781 | vaddr = kmap(page); |
782 | if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) |
782 | if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) |
783 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, |
783 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, |
784 | page_length, |
784 | page_length, |
785 | page_do_bit17_swizzling); |
785 | page_do_bit17_swizzling); |
786 | if (page_do_bit17_swizzling) |
786 | if (page_do_bit17_swizzling) |
787 | ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, |
787 | ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, |
788 | user_data, |
788 | user_data, |
789 | page_length); |
789 | page_length); |
790 | else |
790 | else |
791 | ret = __copy_from_user(vaddr + shmem_page_offset, |
791 | ret = __copy_from_user(vaddr + shmem_page_offset, |
792 | user_data, |
792 | user_data, |
793 | page_length); |
793 | page_length); |
794 | if (needs_clflush_after) |
794 | if (needs_clflush_after) |
795 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, |
795 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, |
796 | page_length, |
796 | page_length, |
797 | page_do_bit17_swizzling); |
797 | page_do_bit17_swizzling); |
798 | kunmap(page); |
798 | kunmap(page); |
799 | 799 | ||
800 | return ret ? -EFAULT : 0; |
800 | return ret ? -EFAULT : 0; |
801 | } |
801 | } |
802 | 802 | ||
803 | static int |
803 | static int |
804 | i915_gem_shmem_pwrite(struct drm_device *dev, |
804 | i915_gem_shmem_pwrite(struct drm_device *dev, |
805 | struct drm_i915_gem_object *obj, |
805 | struct drm_i915_gem_object *obj, |
806 | struct drm_i915_gem_pwrite *args, |
806 | struct drm_i915_gem_pwrite *args, |
807 | struct drm_file *file) |
807 | struct drm_file *file) |
808 | { |
808 | { |
809 | ssize_t remain; |
809 | ssize_t remain; |
810 | loff_t offset; |
810 | loff_t offset; |
811 | char __user *user_data; |
811 | char __user *user_data; |
812 | int shmem_page_offset, page_length, ret = 0; |
812 | int shmem_page_offset, page_length, ret = 0; |
813 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; |
813 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; |
814 | int hit_slowpath = 0; |
814 | int hit_slowpath = 0; |
815 | int needs_clflush_after = 0; |
815 | int needs_clflush_after = 0; |
816 | int needs_clflush_before = 0; |
816 | int needs_clflush_before = 0; |
817 | int i; |
817 | int i; |
818 | struct scatterlist *sg; |
818 | struct scatterlist *sg; |
819 | 819 | ||
820 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
820 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
821 | remain = args->size; |
821 | remain = args->size; |
822 | 822 | ||
823 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
823 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
824 | 824 | ||
825 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
825 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
826 | /* If we're not in the cpu write domain, set ourself into the gtt |
826 | /* If we're not in the cpu write domain, set ourself into the gtt |
827 | * write domain and manually flush cachelines (if required). This |
827 | * write domain and manually flush cachelines (if required). This |
828 | * optimizes for the case when the gpu will use the data |
828 | * optimizes for the case when the gpu will use the data |
829 | * right away and we therefore have to clflush anyway. */ |
829 | * right away and we therefore have to clflush anyway. */ |
830 | if (obj->cache_level == I915_CACHE_NONE) |
830 | if (obj->cache_level == I915_CACHE_NONE) |
831 | needs_clflush_after = 1; |
831 | needs_clflush_after = 1; |
832 | if (obj->gtt_space) { |
832 | if (obj->gtt_space) { |
833 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
833 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
834 | if (ret) |
834 | if (ret) |
835 | return ret; |
835 | return ret; |
836 | } |
836 | } |
837 | } |
837 | } |
838 | /* Same trick applies for invalidate partially written cachelines before |
838 | /* Same trick applies for invalidate partially written cachelines before |
839 | * writing. */ |
839 | * writing. */ |
840 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU) |
840 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU) |
841 | && obj->cache_level == I915_CACHE_NONE) |
841 | && obj->cache_level == I915_CACHE_NONE) |
842 | needs_clflush_before = 1; |
842 | needs_clflush_before = 1; |
843 | 843 | ||
844 | ret = i915_gem_object_get_pages(obj); |
844 | ret = i915_gem_object_get_pages(obj); |
845 | if (ret) |
845 | if (ret) |
846 | return ret; |
846 | return ret; |
847 | 847 | ||
848 | i915_gem_object_pin_pages(obj); |
848 | i915_gem_object_pin_pages(obj); |
849 | 849 | ||
850 | offset = args->offset; |
850 | offset = args->offset; |
851 | obj->dirty = 1; |
851 | obj->dirty = 1; |
852 | 852 | ||
853 | for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { |
853 | for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { |
854 | struct page *page; |
854 | struct page *page; |
855 | int partial_cacheline_write; |
855 | int partial_cacheline_write; |
856 | 856 | ||
857 | if (i < offset >> PAGE_SHIFT) |
857 | if (i < offset >> PAGE_SHIFT) |
858 | continue; |
858 | continue; |
859 | 859 | ||
860 | if (remain <= 0) |
860 | if (remain <= 0) |
861 | break; |
861 | break; |
862 | 862 | ||
863 | /* Operation in this page |
863 | /* Operation in this page |
864 | * |
864 | * |
865 | * shmem_page_offset = offset within page in shmem file |
865 | * shmem_page_offset = offset within page in shmem file |
866 | * page_length = bytes to copy for this page |
866 | * page_length = bytes to copy for this page |
867 | */ |
867 | */ |
868 | shmem_page_offset = offset_in_page(offset); |
868 | shmem_page_offset = offset_in_page(offset); |
869 | 869 | ||
870 | page_length = remain; |
870 | page_length = remain; |
871 | if ((shmem_page_offset + page_length) > PAGE_SIZE) |
871 | if ((shmem_page_offset + page_length) > PAGE_SIZE) |
872 | page_length = PAGE_SIZE - shmem_page_offset; |
872 | page_length = PAGE_SIZE - shmem_page_offset; |
873 | 873 | ||
874 | /* If we don't overwrite a cacheline completely we need to be |
874 | /* If we don't overwrite a cacheline completely we need to be |
875 | * careful to have up-to-date data by first clflushing. Don't |
875 | * careful to have up-to-date data by first clflushing. Don't |
876 | * overcomplicate things and flush the entire patch. */ |
876 | * overcomplicate things and flush the entire patch. */ |
877 | partial_cacheline_write = needs_clflush_before && |
877 | partial_cacheline_write = needs_clflush_before && |
878 | ((shmem_page_offset | page_length) |
878 | ((shmem_page_offset | page_length) |
879 | & (boot_cpu_data.x86_clflush_size - 1)); |
879 | & (boot_cpu_data.x86_clflush_size - 1)); |
880 | 880 | ||
881 | page = sg_page(sg); |
881 | page = sg_page(sg); |
882 | page_do_bit17_swizzling = obj_do_bit17_swizzling && |
882 | page_do_bit17_swizzling = obj_do_bit17_swizzling && |
883 | (page_to_phys(page) & (1 << 17)) != 0; |
883 | (page_to_phys(page) & (1 << 17)) != 0; |
884 | 884 | ||
885 | ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, |
885 | ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, |
886 | user_data, page_do_bit17_swizzling, |
886 | user_data, page_do_bit17_swizzling, |
887 | partial_cacheline_write, |
887 | partial_cacheline_write, |
888 | needs_clflush_after); |
888 | needs_clflush_after); |
889 | if (ret == 0) |
889 | if (ret == 0) |
890 | goto next_page; |
890 | goto next_page; |
891 | 891 | ||
892 | hit_slowpath = 1; |
892 | hit_slowpath = 1; |
893 | mutex_unlock(&dev->struct_mutex); |
893 | mutex_unlock(&dev->struct_mutex); |
894 | ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, |
894 | ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, |
895 | user_data, page_do_bit17_swizzling, |
895 | user_data, page_do_bit17_swizzling, |
896 | partial_cacheline_write, |
896 | partial_cacheline_write, |
897 | needs_clflush_after); |
897 | needs_clflush_after); |
898 | 898 | ||
899 | mutex_lock(&dev->struct_mutex); |
899 | mutex_lock(&dev->struct_mutex); |
900 | 900 | ||
901 | next_page: |
901 | next_page: |
902 | set_page_dirty(page); |
902 | set_page_dirty(page); |
903 | mark_page_accessed(page); |
903 | mark_page_accessed(page); |
904 | 904 | ||
905 | if (ret) |
905 | if (ret) |
906 | goto out; |
906 | goto out; |
907 | 907 | ||
908 | remain -= page_length; |
908 | remain -= page_length; |
909 | user_data += page_length; |
909 | user_data += page_length; |
910 | offset += page_length; |
910 | offset += page_length; |
911 | } |
911 | } |
912 | 912 | ||
913 | out: |
913 | out: |
914 | i915_gem_object_unpin_pages(obj); |
914 | i915_gem_object_unpin_pages(obj); |
915 | 915 | ||
916 | if (hit_slowpath) { |
916 | if (hit_slowpath) { |
917 | /* Fixup: Kill any reinstated backing storage pages */ |
917 | /* Fixup: Kill any reinstated backing storage pages */ |
918 | if (obj->madv == __I915_MADV_PURGED) |
918 | if (obj->madv == __I915_MADV_PURGED) |
919 | i915_gem_object_truncate(obj); |
919 | i915_gem_object_truncate(obj); |
920 | /* and flush dirty cachelines in case the object isn't in the cpu write |
920 | /* and flush dirty cachelines in case the object isn't in the cpu write |
921 | * domain anymore. */ |
921 | * domain anymore. */ |
922 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
922 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
923 | i915_gem_clflush_object(obj); |
923 | i915_gem_clflush_object(obj); |
924 | intel_gtt_chipset_flush(); |
924 | intel_gtt_chipset_flush(); |
925 | } |
925 | } |
926 | } |
926 | } |
927 | 927 | ||
928 | if (needs_clflush_after) |
928 | if (needs_clflush_after) |
929 | intel_gtt_chipset_flush(); |
929 | intel_gtt_chipset_flush(); |
930 | 930 | ||
931 | return ret; |
931 | return ret; |
932 | } |
932 | } |
933 | 933 | ||
934 | /** |
934 | /** |
935 | * Writes data to the object referenced by handle. |
935 | * Writes data to the object referenced by handle. |
936 | * |
936 | * |
937 | * On error, the contents of the buffer that were to be modified are undefined. |
937 | * On error, the contents of the buffer that were to be modified are undefined. |
938 | */ |
938 | */ |
939 | int |
939 | int |
940 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
940 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
941 | struct drm_file *file) |
941 | struct drm_file *file) |
942 | { |
942 | { |
943 | struct drm_i915_gem_pwrite *args = data; |
943 | struct drm_i915_gem_pwrite *args = data; |
944 | struct drm_i915_gem_object *obj; |
944 | struct drm_i915_gem_object *obj; |
945 | int ret; |
945 | int ret; |
946 | 946 | ||
947 | if (args->size == 0) |
947 | if (args->size == 0) |
948 | return 0; |
948 | return 0; |
949 | 949 | ||
950 | if (!access_ok(VERIFY_READ, |
950 | if (!access_ok(VERIFY_READ, |
951 | (char __user *)(uintptr_t)args->data_ptr, |
951 | (char __user *)(uintptr_t)args->data_ptr, |
952 | args->size)) |
952 | args->size)) |
953 | return -EFAULT; |
953 | return -EFAULT; |
954 | 954 | ||
955 | ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr, |
955 | ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr, |
956 | args->size); |
956 | args->size); |
957 | if (ret) |
957 | if (ret) |
958 | return -EFAULT; |
958 | return -EFAULT; |
959 | 959 | ||
960 | ret = i915_mutex_lock_interruptible(dev); |
960 | ret = i915_mutex_lock_interruptible(dev); |
961 | if (ret) |
961 | if (ret) |
962 | return ret; |
962 | return ret; |
963 | 963 | ||
964 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
964 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
965 | if (&obj->base == NULL) { |
965 | if (&obj->base == NULL) { |
966 | ret = -ENOENT; |
966 | ret = -ENOENT; |
967 | goto unlock; |
967 | goto unlock; |
968 | } |
968 | } |
969 | 969 | ||
970 | /* Bounds check destination. */ |
970 | /* Bounds check destination. */ |
971 | if (args->offset > obj->base.size || |
971 | if (args->offset > obj->base.size || |
972 | args->size > obj->base.size - args->offset) { |
972 | args->size > obj->base.size - args->offset) { |
973 | ret = -EINVAL; |
973 | ret = -EINVAL; |
974 | goto out; |
974 | goto out; |
975 | } |
975 | } |
976 | 976 | ||
977 | /* prime objects have no backing filp to GEM pread/pwrite |
977 | /* prime objects have no backing filp to GEM pread/pwrite |
978 | * pages from. |
978 | * pages from. |
979 | */ |
979 | */ |
980 | if (!obj->base.filp) { |
980 | if (!obj->base.filp) { |
981 | ret = -EINVAL; |
981 | ret = -EINVAL; |
982 | goto out; |
982 | goto out; |
983 | } |
983 | } |
984 | 984 | ||
985 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); |
985 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); |
986 | 986 | ||
987 | ret = -EFAULT; |
987 | ret = -EFAULT; |
988 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
988 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
989 | * it would end up going through the fenced access, and we'll get |
989 | * it would end up going through the fenced access, and we'll get |
990 | * different detiling behavior between reading and writing. |
990 | * different detiling behavior between reading and writing. |
991 | * pread/pwrite currently are reading and writing from the CPU |
991 | * pread/pwrite currently are reading and writing from the CPU |
992 | * perspective, requiring manual detiling by the client. |
992 | * perspective, requiring manual detiling by the client. |
993 | */ |
993 | */ |
994 | if (obj->phys_obj) { |
994 | if (obj->phys_obj) { |
995 | ret = i915_gem_phys_pwrite(dev, obj, args, file); |
995 | ret = i915_gem_phys_pwrite(dev, obj, args, file); |
996 | goto out; |
996 | goto out; |
997 | } |
997 | } |
998 | 998 | ||
999 | if (obj->cache_level == I915_CACHE_NONE && |
999 | if (obj->cache_level == I915_CACHE_NONE && |
1000 | obj->tiling_mode == I915_TILING_NONE && |
1000 | obj->tiling_mode == I915_TILING_NONE && |
1001 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
1001 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
1002 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); |
1002 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); |
1003 | /* Note that the gtt paths might fail with non-page-backed user |
1003 | /* Note that the gtt paths might fail with non-page-backed user |
1004 | * pointers (e.g. gtt mappings when moving data between |
1004 | * pointers (e.g. gtt mappings when moving data between |
1005 | * textures). Fallback to the shmem path in that case. */ |
1005 | * textures). Fallback to the shmem path in that case. */ |
1006 | } |
1006 | } |
1007 | 1007 | ||
1008 | if (ret == -EFAULT || ret == -ENOSPC) |
1008 | if (ret == -EFAULT || ret == -ENOSPC) |
1009 | ret = i915_gem_shmem_pwrite(dev, obj, args, file); |
1009 | ret = i915_gem_shmem_pwrite(dev, obj, args, file); |
1010 | 1010 | ||
1011 | out: |
1011 | out: |
1012 | drm_gem_object_unreference(&obj->base); |
1012 | drm_gem_object_unreference(&obj->base); |
1013 | unlock: |
1013 | unlock: |
1014 | mutex_unlock(&dev->struct_mutex); |
1014 | mutex_unlock(&dev->struct_mutex); |
1015 | return ret; |
1015 | return ret; |
1016 | } |
1016 | } |
1017 | 1017 | ||
1018 | #endif |
1018 | #endif |
1019 | 1019 | ||
1020 | int |
1020 | int |
1021 | i915_gem_check_wedge(struct drm_i915_private *dev_priv, |
1021 | i915_gem_check_wedge(struct drm_i915_private *dev_priv, |
1022 | bool interruptible) |
1022 | bool interruptible) |
1023 | { |
1023 | { |
1024 | if (atomic_read(&dev_priv->mm.wedged)) { |
1024 | if (atomic_read(&dev_priv->mm.wedged)) { |
1025 | struct completion *x = &dev_priv->error_completion; |
1025 | struct completion *x = &dev_priv->error_completion; |
1026 | bool recovery_complete; |
1026 | bool recovery_complete; |
1027 | unsigned long flags; |
1027 | unsigned long flags; |
1028 | 1028 | ||
1029 | /* Give the error handler a chance to run. */ |
1029 | /* Give the error handler a chance to run. */ |
1030 | spin_lock_irqsave(&x->wait.lock, flags); |
1030 | spin_lock_irqsave(&x->wait.lock, flags); |
1031 | recovery_complete = x->done > 0; |
1031 | recovery_complete = x->done > 0; |
1032 | spin_unlock_irqrestore(&x->wait.lock, flags); |
1032 | spin_unlock_irqrestore(&x->wait.lock, flags); |
1033 | 1033 | ||
1034 | /* Non-interruptible callers can't handle -EAGAIN, hence return |
1034 | /* Non-interruptible callers can't handle -EAGAIN, hence return |
1035 | * -EIO unconditionally for these. */ |
1035 | * -EIO unconditionally for these. */ |
1036 | if (!interruptible) |
1036 | if (!interruptible) |
1037 | return -EIO; |
1037 | return -EIO; |
1038 | 1038 | ||
1039 | /* Recovery complete, but still wedged means reset failure. */ |
1039 | /* Recovery complete, but still wedged means reset failure. */ |
1040 | if (recovery_complete) |
1040 | if (recovery_complete) |
1041 | return -EIO; |
1041 | return -EIO; |
1042 | 1042 | ||
1043 | return -EAGAIN; |
1043 | return -EAGAIN; |
1044 | } |
1044 | } |
1045 | 1045 | ||
1046 | return 0; |
1046 | return 0; |
1047 | } |
1047 | } |
1048 | 1048 | ||
1049 | /* |
1049 | /* |
1050 | * Compare seqno against outstanding lazy request. Emit a request if they are |
1050 | * Compare seqno against outstanding lazy request. Emit a request if they are |
1051 | * equal. |
1051 | * equal. |
1052 | */ |
1052 | */ |
1053 | static int |
1053 | static int |
1054 | i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) |
1054 | i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) |
1055 | { |
1055 | { |
1056 | int ret; |
1056 | int ret; |
1057 | 1057 | ||
1058 | BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); |
1058 | BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); |
1059 | 1059 | ||
1060 | ret = 0; |
1060 | ret = 0; |
1061 | if (seqno == ring->outstanding_lazy_request) |
1061 | if (seqno == ring->outstanding_lazy_request) |
1062 | ret = i915_add_request(ring, NULL, NULL); |
1062 | ret = i915_add_request(ring, NULL, NULL); |
1063 | 1063 | ||
1064 | return ret; |
1064 | return ret; |
1065 | } |
1065 | } |
1066 | 1066 | ||
1067 | /** |
1067 | /** |
1068 | * __wait_seqno - wait until execution of seqno has finished |
1068 | * __wait_seqno - wait until execution of seqno has finished |
1069 | * @ring: the ring expected to report seqno |
1069 | * @ring: the ring expected to report seqno |
1070 | * @seqno: duh! |
1070 | * @seqno: duh! |
1071 | * @interruptible: do an interruptible wait (normally yes) |
1071 | * @interruptible: do an interruptible wait (normally yes) |
1072 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining |
1072 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining |
1073 | * |
1073 | * |
1074 | * Returns 0 if the seqno was found within the alloted time. Else returns the |
1074 | * Returns 0 if the seqno was found within the alloted time. Else returns the |
1075 | * errno with remaining time filled in timeout argument. |
1075 | * errno with remaining time filled in timeout argument. |
1076 | */ |
1076 | */ |
1077 | static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, |
1077 | static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, |
1078 | bool interruptible, struct timespec *timeout) |
1078 | bool interruptible, struct timespec *timeout) |
1079 | { |
1079 | { |
1080 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1080 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1081 | struct timespec before, now, wait_time={1,0}; |
1081 | struct timespec before, now, wait_time={1,0}; |
1082 | unsigned long timeout_jiffies; |
1082 | unsigned long timeout_jiffies; |
1083 | long end; |
1083 | long end; |
1084 | bool wait_forever = true; |
1084 | bool wait_forever = true; |
1085 | int ret; |
1085 | int ret; |
1086 | 1086 | ||
1087 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) |
1087 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) |
1088 | return 0; |
1088 | return 0; |
1089 | 1089 | ||
1090 | trace_i915_gem_request_wait_begin(ring, seqno); |
1090 | trace_i915_gem_request_wait_begin(ring, seqno); |
1091 | 1091 | ||
1092 | if (timeout != NULL) { |
1092 | if (timeout != NULL) { |
1093 | wait_time = *timeout; |
1093 | wait_time = *timeout; |
1094 | wait_forever = false; |
1094 | wait_forever = false; |
1095 | } |
1095 | } |
1096 | 1096 | ||
1097 | // timeout_jiffies = timespec_to_jiffies(&wait_time); |
1097 | // timeout_jiffies = timespec_to_jiffies(&wait_time); |
1098 | 1098 | ||
1099 | if (WARN_ON(!ring->irq_get(ring))) |
1099 | if (WARN_ON(!ring->irq_get(ring))) |
1100 | return -ENODEV; |
1100 | return -ENODEV; |
1101 | #if 0 |
1101 | #if 0 |
1102 | 1102 | ||
1103 | /* Record current time in case interrupted by signal, or wedged * */ |
1103 | /* Record current time in case interrupted by signal, or wedged * */ |
1104 | getrawmonotonic(&before); |
1104 | getrawmonotonic(&before); |
1105 | 1105 | ||
1106 | #define EXIT_COND \ |
1106 | #define EXIT_COND \ |
1107 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ |
1107 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ |
1108 | atomic_read(&dev_priv->mm.wedged)) |
1108 | atomic_read(&dev_priv->mm.wedged)) |
1109 | do { |
1109 | do { |
1110 | end = wait_event_timeout(ring->irq_queue, EXIT_COND, |
1110 | end = wait_event_timeout(ring->irq_queue, EXIT_COND, |
1111 | timeout_jiffies); |
1111 | timeout_jiffies); |
1112 | 1112 | ||
1113 | ret = i915_gem_check_wedge(dev_priv, interruptible); |
1113 | ret = i915_gem_check_wedge(dev_priv, interruptible); |
1114 | if (ret) |
1114 | if (ret) |
1115 | end = ret; |
1115 | end = ret; |
1116 | } while (end == 0 && wait_forever); |
1116 | } while (end == 0 && wait_forever); |
1117 | 1117 | ||
1118 | getrawmonotonic(&now); |
1118 | getrawmonotonic(&now); |
1119 | 1119 | ||
1120 | ring->irq_put(ring); |
1120 | ring->irq_put(ring); |
1121 | trace_i915_gem_request_wait_end(ring, seqno); |
1121 | trace_i915_gem_request_wait_end(ring, seqno); |
1122 | #undef EXIT_COND |
1122 | #undef EXIT_COND |
1123 | 1123 | ||
1124 | if (timeout) { |
1124 | if (timeout) { |
1125 | // struct timespec sleep_time = timespec_sub(now, before); |
1125 | // struct timespec sleep_time = timespec_sub(now, before); |
1126 | // *timeout = timespec_sub(*timeout, sleep_time); |
1126 | // *timeout = timespec_sub(*timeout, sleep_time); |
1127 | } |
1127 | } |
1128 | 1128 | ||
1129 | switch (end) { |
1129 | switch (end) { |
1130 | case -EIO: |
1130 | case -EIO: |
1131 | case -EAGAIN: /* Wedged */ |
1131 | case -EAGAIN: /* Wedged */ |
1132 | case -ERESTARTSYS: /* Signal */ |
1132 | case -ERESTARTSYS: /* Signal */ |
1133 | return (int)end; |
1133 | return (int)end; |
1134 | case 0: /* Timeout */ |
1134 | case 0: /* Timeout */ |
1135 | // if (timeout) |
1135 | // if (timeout) |
1136 | // set_normalized_timespec(timeout, 0, 0); |
1136 | // set_normalized_timespec(timeout, 0, 0); |
1137 | return -ETIME; |
1137 | return -ETIME; |
1138 | default: /* Completed */ |
1138 | default: /* Completed */ |
1139 | WARN_ON(end < 0); /* We're not aware of other errors */ |
1139 | WARN_ON(end < 0); /* We're not aware of other errors */ |
1140 | return 0; |
1140 | return 0; |
1141 | } |
1141 | } |
1142 | #endif |
1142 | #endif |
1143 | 1143 | ||
1144 | #define EXIT_COND \ |
1144 | #define EXIT_COND \ |
1145 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ |
1145 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ |
1146 | atomic_read(&dev_priv->mm.wedged)) |
1146 | atomic_read(&dev_priv->mm.wedged)) |
1147 | wait_event(ring->irq_queue, EXIT_COND); |
1147 | wait_event(ring->irq_queue, EXIT_COND); |
1148 | #undef EXIT_COND |
1148 | #undef EXIT_COND |
1149 | ring->irq_put(ring); |
1149 | ring->irq_put(ring); |
1150 | 1150 | ||
1151 | return 0; |
1151 | return 0; |
1152 | } |
1152 | } |
1153 | 1153 | ||
1154 | /** |
1154 | /** |
1155 | * Waits for a sequence number to be signaled, and cleans up the |
1155 | * Waits for a sequence number to be signaled, and cleans up the |
1156 | * request and object lists appropriately for that event. |
1156 | * request and object lists appropriately for that event. |
1157 | */ |
1157 | */ |
1158 | int |
1158 | int |
1159 | i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) |
1159 | i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) |
1160 | { |
1160 | { |
1161 | struct drm_device *dev = ring->dev; |
1161 | struct drm_device *dev = ring->dev; |
1162 | struct drm_i915_private *dev_priv = dev->dev_private; |
1162 | struct drm_i915_private *dev_priv = dev->dev_private; |
1163 | bool interruptible = dev_priv->mm.interruptible; |
1163 | bool interruptible = dev_priv->mm.interruptible; |
1164 | int ret; |
1164 | int ret; |
1165 | 1165 | ||
1166 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
1166 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
1167 | BUG_ON(seqno == 0); |
1167 | BUG_ON(seqno == 0); |
1168 | 1168 | ||
1169 | ret = i915_gem_check_wedge(dev_priv, interruptible); |
1169 | ret = i915_gem_check_wedge(dev_priv, interruptible); |
1170 | if (ret) |
1170 | if (ret) |
1171 | return ret; |
1171 | return ret; |
1172 | 1172 | ||
1173 | ret = i915_gem_check_olr(ring, seqno); |
1173 | ret = i915_gem_check_olr(ring, seqno); |
1174 | if (ret) |
1174 | if (ret) |
1175 | return ret; |
1175 | return ret; |
1176 | 1176 | ||
1177 | return __wait_seqno(ring, seqno, interruptible, NULL); |
1177 | return __wait_seqno(ring, seqno, interruptible, NULL); |
1178 | } |
1178 | } |
1179 | 1179 | ||
1180 | /** |
1180 | /** |
1181 | * Ensures that all rendering to the object has completed and the object is |
1181 | * Ensures that all rendering to the object has completed and the object is |
1182 | * safe to unbind from the GTT or access from the CPU. |
1182 | * safe to unbind from the GTT or access from the CPU. |
1183 | */ |
1183 | */ |
1184 | static __must_check int |
1184 | static __must_check int |
1185 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
1185 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
1186 | bool readonly) |
1186 | bool readonly) |
1187 | { |
1187 | { |
1188 | struct intel_ring_buffer *ring = obj->ring; |
1188 | struct intel_ring_buffer *ring = obj->ring; |
1189 | u32 seqno; |
1189 | u32 seqno; |
1190 | int ret; |
1190 | int ret; |
1191 | 1191 | ||
1192 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; |
1192 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; |
1193 | if (seqno == 0) |
1193 | if (seqno == 0) |
1194 | return 0; |
1194 | return 0; |
1195 | 1195 | ||
1196 | ret = i915_wait_seqno(ring, seqno); |
1196 | ret = i915_wait_seqno(ring, seqno); |
1197 | if (ret) |
1197 | if (ret) |
1198 | return ret; |
1198 | return ret; |
1199 | 1199 | ||
1200 | i915_gem_retire_requests_ring(ring); |
1200 | i915_gem_retire_requests_ring(ring); |
1201 | 1201 | ||
1202 | /* Manually manage the write flush as we may have not yet |
1202 | /* Manually manage the write flush as we may have not yet |
1203 | * retired the buffer. |
1203 | * retired the buffer. |
1204 | */ |
1204 | */ |
1205 | if (obj->last_write_seqno && |
1205 | if (obj->last_write_seqno && |
1206 | i915_seqno_passed(seqno, obj->last_write_seqno)) { |
1206 | i915_seqno_passed(seqno, obj->last_write_seqno)) { |
1207 | obj->last_write_seqno = 0; |
1207 | obj->last_write_seqno = 0; |
1208 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
1208 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
1209 | } |
1209 | } |
1210 | 1210 | ||
1211 | return 0; |
1211 | return 0; |
1212 | } |
1212 | } |
1213 | 1213 | ||
1214 | 1214 | ||
1215 | 1215 | ||
1216 | 1216 | ||
1217 | 1217 | ||
1218 | 1218 | ||
1219 | 1219 | ||
1220 | 1220 | ||
1221 | 1221 | ||
1222 | 1222 | ||
1223 | 1223 | ||
1224 | 1224 | ||
1225 | 1225 | ||
1226 | 1226 | ||
1227 | 1227 | ||
1228 | 1228 | ||
1229 | 1229 | ||
1230 | 1230 | ||
1231 | 1231 | ||
1232 | 1232 | ||
1233 | 1233 | ||
1234 | 1234 | ||
1235 | 1235 | ||
1236 | 1236 | ||
1237 | 1237 | ||
1238 | 1238 | ||
1239 | 1239 | ||
1240 | 1240 | ||
1241 | 1241 | ||
1242 | 1242 | ||
1243 | 1243 | ||
1244 | 1244 | ||
1245 | 1245 | ||
1246 | 1246 | ||
1247 | 1247 | ||
1248 | 1248 | ||
1249 | 1249 | ||
1250 | 1250 | ||
1251 | 1251 | ||
1252 | 1252 | ||
1253 | 1253 | ||
1254 | 1254 | ||
1255 | 1255 | ||
1256 | /** |
1256 | /** |
1257 | * i915_gem_release_mmap - remove physical page mappings |
1257 | * i915_gem_release_mmap - remove physical page mappings |
1258 | * @obj: obj in question |
1258 | * @obj: obj in question |
1259 | * |
1259 | * |
1260 | * Preserve the reservation of the mmapping with the DRM core code, but |
1260 | * Preserve the reservation of the mmapping with the DRM core code, but |
1261 | * relinquish ownership of the pages back to the system. |
1261 | * relinquish ownership of the pages back to the system. |
1262 | * |
1262 | * |
1263 | * It is vital that we remove the page mapping if we have mapped a tiled |
1263 | * It is vital that we remove the page mapping if we have mapped a tiled |
1264 | * object through the GTT and then lose the fence register due to |
1264 | * object through the GTT and then lose the fence register due to |
1265 | * resource pressure. Similarly if the object has been moved out of the |
1265 | * resource pressure. Similarly if the object has been moved out of the |
1266 | * aperture, than pages mapped into userspace must be revoked. Removing the |
1266 | * aperture, than pages mapped into userspace must be revoked. Removing the |
1267 | * mapping will then trigger a page fault on the next user access, allowing |
1267 | * mapping will then trigger a page fault on the next user access, allowing |
1268 | * fixup by i915_gem_fault(). |
1268 | * fixup by i915_gem_fault(). |
1269 | */ |
1269 | */ |
1270 | void |
1270 | void |
1271 | i915_gem_release_mmap(struct drm_i915_gem_object *obj) |
1271 | i915_gem_release_mmap(struct drm_i915_gem_object *obj) |
1272 | { |
1272 | { |
1273 | if (!obj->fault_mappable) |
1273 | if (!obj->fault_mappable) |
1274 | return; |
1274 | return; |
1275 | 1275 | ||
1276 | if (obj->base.dev->dev_mapping) |
1276 | if (obj->base.dev->dev_mapping) |
1277 | // unmap_mapping_range(obj->base.dev->dev_mapping, |
1277 | // unmap_mapping_range(obj->base.dev->dev_mapping, |
1278 | // (loff_t)obj->base.map_list.hash.key< |
1278 | // (loff_t)obj->base.map_list.hash.key< |
1279 | // obj->base.size, 1); |
1279 | // obj->base.size, 1); |
1280 | 1280 | ||
1281 | obj->fault_mappable = false; |
1281 | obj->fault_mappable = false; |
1282 | } |
1282 | } |
1283 | 1283 | ||
1284 | static uint32_t |
1284 | static uint32_t |
1285 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) |
1285 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) |
1286 | { |
1286 | { |
1287 | uint32_t gtt_size; |
1287 | uint32_t gtt_size; |
1288 | 1288 | ||
1289 | if (INTEL_INFO(dev)->gen >= 4 || |
1289 | if (INTEL_INFO(dev)->gen >= 4 || |
1290 | tiling_mode == I915_TILING_NONE) |
1290 | tiling_mode == I915_TILING_NONE) |
1291 | return size; |
1291 | return size; |
1292 | 1292 | ||
1293 | /* Previous chips need a power-of-two fence region when tiling */ |
1293 | /* Previous chips need a power-of-two fence region when tiling */ |
1294 | if (INTEL_INFO(dev)->gen == 3) |
1294 | if (INTEL_INFO(dev)->gen == 3) |
1295 | gtt_size = 1024*1024; |
1295 | gtt_size = 1024*1024; |
1296 | else |
1296 | else |
1297 | gtt_size = 512*1024; |
1297 | gtt_size = 512*1024; |
1298 | 1298 | ||
1299 | while (gtt_size < size) |
1299 | while (gtt_size < size) |
1300 | gtt_size <<= 1; |
1300 | gtt_size <<= 1; |
1301 | 1301 | ||
1302 | return gtt_size; |
1302 | return gtt_size; |
1303 | } |
1303 | } |
1304 | 1304 | ||
1305 | /** |
1305 | /** |
1306 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object |
1306 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object |
1307 | * @obj: object to check |
1307 | * @obj: object to check |
1308 | * |
1308 | * |
1309 | * Return the required GTT alignment for an object, taking into account |
1309 | * Return the required GTT alignment for an object, taking into account |
1310 | * potential fence register mapping. |
1310 | * potential fence register mapping. |
1311 | */ |
1311 | */ |
1312 | static uint32_t |
1312 | static uint32_t |
1313 | i915_gem_get_gtt_alignment(struct drm_device *dev, |
1313 | i915_gem_get_gtt_alignment(struct drm_device *dev, |
1314 | uint32_t size, |
1314 | uint32_t size, |
1315 | int tiling_mode) |
1315 | int tiling_mode) |
1316 | { |
1316 | { |
1317 | /* |
1317 | /* |
1318 | * Minimum alignment is 4k (GTT page size), but might be greater |
1318 | * Minimum alignment is 4k (GTT page size), but might be greater |
1319 | * if a fence register is needed for the object. |
1319 | * if a fence register is needed for the object. |
1320 | */ |
1320 | */ |
1321 | if (INTEL_INFO(dev)->gen >= 4 || |
1321 | if (INTEL_INFO(dev)->gen >= 4 || |
1322 | tiling_mode == I915_TILING_NONE) |
1322 | tiling_mode == I915_TILING_NONE) |
1323 | return 4096; |
1323 | return 4096; |
1324 | 1324 | ||
1325 | /* |
1325 | /* |
1326 | * Previous chips need to be aligned to the size of the smallest |
1326 | * Previous chips need to be aligned to the size of the smallest |
1327 | * fence register that can contain the object. |
1327 | * fence register that can contain the object. |
1328 | */ |
1328 | */ |
1329 | return i915_gem_get_gtt_size(dev, size, tiling_mode); |
1329 | return i915_gem_get_gtt_size(dev, size, tiling_mode); |
1330 | } |
1330 | } |
1331 | 1331 | ||
1332 | /** |
1332 | /** |
1333 | * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an |
1333 | * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an |
1334 | * unfenced object |
1334 | * unfenced object |
1335 | * @dev: the device |
1335 | * @dev: the device |
1336 | * @size: size of the object |
1336 | * @size: size of the object |
1337 | * @tiling_mode: tiling mode of the object |
1337 | * @tiling_mode: tiling mode of the object |
1338 | * |
1338 | * |
1339 | * Return the required GTT alignment for an object, only taking into account |
1339 | * Return the required GTT alignment for an object, only taking into account |
1340 | * unfenced tiled surface requirements. |
1340 | * unfenced tiled surface requirements. |
1341 | */ |
1341 | */ |
1342 | uint32_t |
1342 | uint32_t |
1343 | i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, |
1343 | i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, |
1344 | uint32_t size, |
1344 | uint32_t size, |
1345 | int tiling_mode) |
1345 | int tiling_mode) |
1346 | { |
1346 | { |
1347 | /* |
1347 | /* |
1348 | * Minimum alignment is 4k (GTT page size) for sane hw. |
1348 | * Minimum alignment is 4k (GTT page size) for sane hw. |
1349 | */ |
1349 | */ |
1350 | if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || |
1350 | if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || |
1351 | tiling_mode == I915_TILING_NONE) |
1351 | tiling_mode == I915_TILING_NONE) |
1352 | return 4096; |
1352 | return 4096; |
1353 | 1353 | ||
1354 | /* Previous hardware however needs to be aligned to a power-of-two |
1354 | /* Previous hardware however needs to be aligned to a power-of-two |
1355 | * tile height. The simplest method for determining this is to reuse |
1355 | * tile height. The simplest method for determining this is to reuse |
1356 | * the power-of-tile object size. |
1356 | * the power-of-tile object size. |
1357 | */ |
1357 | */ |
1358 | return i915_gem_get_gtt_size(dev, size, tiling_mode); |
1358 | return i915_gem_get_gtt_size(dev, size, tiling_mode); |
1359 | } |
1359 | } |
1360 | 1360 | ||
1361 | /* Immediately discard the backing storage */ |
1361 | /* Immediately discard the backing storage */ |
1362 | static void |
1362 | static void |
1363 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
1363 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
1364 | { |
1364 | { |
1365 | // struct inode *inode; |
1365 | // struct inode *inode; |
1366 | 1366 | ||
1367 | // i915_gem_object_free_mmap_offset(obj); |
1367 | // i915_gem_object_free_mmap_offset(obj); |
1368 | 1368 | ||
1369 | // if (obj->base.filp == NULL) |
1369 | // if (obj->base.filp == NULL) |
1370 | // return; |
1370 | // return; |
1371 | 1371 | ||
1372 | /* Our goal here is to return as much of the memory as |
1372 | /* Our goal here is to return as much of the memory as |
1373 | * is possible back to the system as we are called from OOM. |
1373 | * is possible back to the system as we are called from OOM. |
1374 | * To do this we must instruct the shmfs to drop all of its |
1374 | * To do this we must instruct the shmfs to drop all of its |
1375 | * backing pages, *now*. |
1375 | * backing pages, *now*. |
1376 | */ |
1376 | */ |
1377 | // inode = obj->base.filp->f_path.dentry->d_inode; |
1377 | // inode = obj->base.filp->f_path.dentry->d_inode; |
1378 | // shmem_truncate_range(inode, 0, (loff_t)-1); |
1378 | // shmem_truncate_range(inode, 0, (loff_t)-1); |
1379 | 1379 | ||
1380 | obj->madv = __I915_MADV_PURGED; |
1380 | obj->madv = __I915_MADV_PURGED; |
1381 | } |
1381 | } |
1382 | 1382 | ||
1383 | static inline int |
1383 | static inline int |
1384 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) |
1384 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) |
1385 | { |
1385 | { |
1386 | return obj->madv == I915_MADV_DONTNEED; |
1386 | return obj->madv == I915_MADV_DONTNEED; |
1387 | } |
1387 | } |
1388 | 1388 | ||
1389 | static void |
1389 | static void |
1390 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) |
1390 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) |
1391 | { |
1391 | { |
1392 | int ret, i; |
1392 | int ret, i; |
1393 | 1393 | ||
1394 | BUG_ON(obj->madv == __I915_MADV_PURGED); |
1394 | BUG_ON(obj->madv == __I915_MADV_PURGED); |
1395 | 1395 | ||
1396 | ret = i915_gem_object_set_to_cpu_domain(obj, true); |
1396 | ret = i915_gem_object_set_to_cpu_domain(obj, true); |
1397 | if (ret) { |
1397 | if (ret) { |
1398 | /* In the event of a disaster, abandon all caches and |
1398 | /* In the event of a disaster, abandon all caches and |
1399 | * hope for the best. |
1399 | * hope for the best. |
1400 | */ |
1400 | */ |
1401 | WARN_ON(ret != -EIO); |
1401 | WARN_ON(ret != -EIO); |
1402 | i915_gem_clflush_object(obj); |
1402 | i915_gem_clflush_object(obj); |
1403 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
1403 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
1404 | } |
1404 | } |
1405 | 1405 | ||
1406 | if (obj->madv == I915_MADV_DONTNEED) |
1406 | if (obj->madv == I915_MADV_DONTNEED) |
1407 | obj->dirty = 0; |
1407 | obj->dirty = 0; |
1408 | 1408 | ||
1409 | for (i = 0; i < obj->pages.nents; i++) |
1409 | for (i = 0; i < obj->pages.nents; i++) |
1410 | FreePage(obj->pages.page[i]); |
1410 | FreePage(obj->pages.page[i]); |
1411 | 1411 | ||
1412 | DRM_DEBUG_KMS("%s free %d pages\n", __FUNCTION__, obj->pages.nents); |
1412 | DRM_DEBUG_KMS("%s release %d pages\n", __FUNCTION__, obj->pages.nents); |
1413 | obj->dirty = 0; |
1413 | obj->dirty = 0; |
1414 | kfree(obj->pages.page); |
1414 | kfree(obj->pages.page); |
1415 | } |
1415 | } |
1416 | 1416 | ||
1417 | static int |
1417 | static int |
1418 | i915_gem_object_put_pages(struct drm_i915_gem_object *obj) |
1418 | i915_gem_object_put_pages(struct drm_i915_gem_object *obj) |
1419 | { |
1419 | { |
1420 | const struct drm_i915_gem_object_ops *ops = obj->ops; |
1420 | const struct drm_i915_gem_object_ops *ops = obj->ops; |
- | 1421 | ||
- | 1422 | // printf("page %x pin count %d\n", |
|
- | 1423 | // obj->pages.page, obj->pages_pin_count ); |
|
1421 | 1424 | ||
1422 | if (obj->pages.page == NULL) |
1425 | if (obj->pages.page == NULL) |
1423 | return 0; |
1426 | return 0; |
1424 | 1427 | ||
1425 | BUG_ON(obj->gtt_space); |
1428 | BUG_ON(obj->gtt_space); |
1426 | 1429 | ||
1427 | if (obj->pages_pin_count) |
1430 | if (obj->pages_pin_count) |
1428 | return -EBUSY; |
1431 | return -EBUSY; |
1429 | 1432 | ||
1430 | ops->put_pages(obj); |
1433 | ops->put_pages(obj); |
1431 | obj->pages.page = NULL; |
1434 | obj->pages.page = NULL; |
1432 | 1435 | ||
1433 | list_del(&obj->gtt_list); |
1436 | list_del(&obj->gtt_list); |
1434 | if (i915_gem_object_is_purgeable(obj)) |
1437 | if (i915_gem_object_is_purgeable(obj)) |
1435 | i915_gem_object_truncate(obj); |
1438 | i915_gem_object_truncate(obj); |
1436 | 1439 | ||
1437 | return 0; |
1440 | return 0; |
1438 | } |
1441 | } |
1439 | 1442 | ||
1440 | 1443 | ||
1441 | 1444 | ||
1442 | 1445 | ||
1443 | 1446 | ||
1444 | 1447 | ||
1445 | 1448 | ||
1446 | 1449 | ||
1447 | static int |
1450 | static int |
1448 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) |
1451 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) |
1449 | { |
1452 | { |
1450 | dma_addr_t page; |
1453 | dma_addr_t page; |
1451 | int page_count, i; |
1454 | int page_count, i; |
1452 | 1455 | ||
1453 | /* Get the list of pages out of our struct file. They'll be pinned |
1456 | /* Get the list of pages out of our struct file. They'll be pinned |
1454 | * at this point until we release them. |
1457 | * at this point until we release them. |
1455 | */ |
1458 | */ |
1456 | page_count = obj->base.size / PAGE_SIZE; |
1459 | page_count = obj->base.size / PAGE_SIZE; |
1457 | BUG_ON(obj->pages.page != NULL); |
1460 | BUG_ON(obj->pages.page != NULL); |
1458 | obj->pages.page = malloc(page_count * sizeof(dma_addr_t)); |
1461 | obj->pages.page = malloc(page_count * sizeof(dma_addr_t)); |
1459 | if (obj->pages.page == NULL) |
1462 | if (obj->pages.page == NULL) |
1460 | return -ENOMEM; |
1463 | return -ENOMEM; |
1461 | 1464 | ||
1462 | for (i = 0; i < page_count; i++) { |
1465 | for (i = 0; i < page_count; i++) { |
1463 | page = AllocPage(); // oh-oh |
1466 | page = AllocPage(); // oh-oh |
1464 | if ( page == 0 ) |
1467 | if ( page == 0 ) |
1465 | goto err_pages; |
1468 | goto err_pages; |
1466 | 1469 | ||
1467 | obj->pages.page[i] = page; |
1470 | obj->pages.page[i] = page; |
1468 | }; |
1471 | }; |
1469 | DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count); |
1472 | DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count); |
1470 | obj->pages.nents = page_count; |
1473 | obj->pages.nents = page_count; |
1471 | 1474 | ||
1472 | 1475 | ||
1473 | // if (obj->tiling_mode != I915_TILING_NONE) |
1476 | // if (obj->tiling_mode != I915_TILING_NONE) |
1474 | // i915_gem_object_do_bit_17_swizzle(obj); |
1477 | // i915_gem_object_do_bit_17_swizzle(obj); |
1475 | 1478 | ||
1476 | return 0; |
1479 | return 0; |
1477 | 1480 | ||
1478 | err_pages: |
1481 | err_pages: |
1479 | while (i--) |
1482 | while (i--) |
1480 | FreePage(obj->pages.page[i]); |
1483 | FreePage(obj->pages.page[i]); |
1481 | 1484 | ||
1482 | free(obj->pages.page); |
1485 | free(obj->pages.page); |
1483 | obj->pages.page = NULL; |
1486 | obj->pages.page = NULL; |
1484 | obj->pages.nents = 0; |
1487 | obj->pages.nents = 0; |
1485 | 1488 | ||
1486 | return -ENOMEM; |
1489 | return -ENOMEM; |
1487 | } |
1490 | } |
1488 | 1491 | ||
1489 | /* Ensure that the associated pages are gathered from the backing storage |
1492 | /* Ensure that the associated pages are gathered from the backing storage |
1490 | * and pinned into our object. i915_gem_object_get_pages() may be called |
1493 | * and pinned into our object. i915_gem_object_get_pages() may be called |
1491 | * multiple times before they are released by a single call to |
1494 | * multiple times before they are released by a single call to |
1492 | * i915_gem_object_put_pages() - once the pages are no longer referenced |
1495 | * i915_gem_object_put_pages() - once the pages are no longer referenced |
1493 | * either as a result of memory pressure (reaping pages under the shrinker) |
1496 | * either as a result of memory pressure (reaping pages under the shrinker) |
1494 | * or as the object is itself released. |
1497 | * or as the object is itself released. |
1495 | */ |
1498 | */ |
1496 | int |
1499 | int |
1497 | i915_gem_object_get_pages(struct drm_i915_gem_object *obj) |
1500 | i915_gem_object_get_pages(struct drm_i915_gem_object *obj) |
1498 | { |
1501 | { |
1499 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
1502 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
1500 | const struct drm_i915_gem_object_ops *ops = obj->ops; |
1503 | const struct drm_i915_gem_object_ops *ops = obj->ops; |
1501 | int ret; |
1504 | int ret; |
1502 | 1505 | ||
1503 | if (obj->pages.page) |
1506 | if (obj->pages.page) |
1504 | return 0; |
1507 | return 0; |
1505 | 1508 | ||
1506 | BUG_ON(obj->pages_pin_count); |
1509 | BUG_ON(obj->pages_pin_count); |
1507 | 1510 | ||
1508 | ret = ops->get_pages(obj); |
1511 | ret = ops->get_pages(obj); |
1509 | if (ret) |
1512 | if (ret) |
1510 | return ret; |
1513 | return ret; |
1511 | 1514 | ||
1512 | list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); |
1515 | list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); |
1513 | return 0; |
1516 | return 0; |
1514 | } |
1517 | } |
1515 | 1518 | ||
1516 | void |
1519 | void |
1517 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1520 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1518 | struct intel_ring_buffer *ring, |
1521 | struct intel_ring_buffer *ring, |
1519 | u32 seqno) |
1522 | u32 seqno) |
1520 | { |
1523 | { |
1521 | struct drm_device *dev = obj->base.dev; |
1524 | struct drm_device *dev = obj->base.dev; |
1522 | struct drm_i915_private *dev_priv = dev->dev_private; |
1525 | struct drm_i915_private *dev_priv = dev->dev_private; |
1523 | 1526 | ||
1524 | BUG_ON(ring == NULL); |
1527 | BUG_ON(ring == NULL); |
1525 | obj->ring = ring; |
1528 | obj->ring = ring; |
1526 | 1529 | ||
1527 | /* Add a reference if we're newly entering the active list. */ |
1530 | /* Add a reference if we're newly entering the active list. */ |
1528 | if (!obj->active) { |
1531 | if (!obj->active) { |
1529 | drm_gem_object_reference(&obj->base); |
1532 | drm_gem_object_reference(&obj->base); |
1530 | obj->active = 1; |
1533 | obj->active = 1; |
1531 | } |
1534 | } |
1532 | 1535 | ||
1533 | /* Move from whatever list we were on to the tail of execution. */ |
1536 | /* Move from whatever list we were on to the tail of execution. */ |
1534 | list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); |
1537 | list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); |
1535 | list_move_tail(&obj->ring_list, &ring->active_list); |
1538 | list_move_tail(&obj->ring_list, &ring->active_list); |
1536 | 1539 | ||
1537 | obj->last_read_seqno = seqno; |
1540 | obj->last_read_seqno = seqno; |
1538 | 1541 | ||
1539 | if (obj->fenced_gpu_access) { |
1542 | if (obj->fenced_gpu_access) { |
1540 | obj->last_fenced_seqno = seqno; |
1543 | obj->last_fenced_seqno = seqno; |
1541 | 1544 | ||
1542 | /* Bump MRU to take account of the delayed flush */ |
1545 | /* Bump MRU to take account of the delayed flush */ |
1543 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
1546 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
1544 | struct drm_i915_fence_reg *reg; |
1547 | struct drm_i915_fence_reg *reg; |
1545 | 1548 | ||
1546 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
1549 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
1547 | list_move_tail(®->lru_list, |
1550 | list_move_tail(®->lru_list, |
1548 | &dev_priv->mm.fence_list); |
1551 | &dev_priv->mm.fence_list); |
1549 | } |
1552 | } |
1550 | } |
1553 | } |
1551 | } |
1554 | } |
1552 | 1555 | ||
1553 | static void |
1556 | static void |
1554 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
1557 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
1555 | { |
1558 | { |
1556 | struct drm_device *dev = obj->base.dev; |
1559 | struct drm_device *dev = obj->base.dev; |
1557 | struct drm_i915_private *dev_priv = dev->dev_private; |
1560 | struct drm_i915_private *dev_priv = dev->dev_private; |
1558 | 1561 | ||
1559 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
1562 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
1560 | BUG_ON(!obj->active); |
1563 | BUG_ON(!obj->active); |
1561 | 1564 | ||
1562 | if (obj->pin_count) /* are we a framebuffer? */ |
1565 | if (obj->pin_count) /* are we a framebuffer? */ |
1563 | intel_mark_fb_idle(obj); |
1566 | intel_mark_fb_idle(obj); |
1564 | 1567 | ||
1565 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1568 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1566 | 1569 | ||
1567 | list_del_init(&obj->ring_list); |
1570 | list_del_init(&obj->ring_list); |
1568 | obj->ring = NULL; |
1571 | obj->ring = NULL; |
1569 | 1572 | ||
1570 | obj->last_read_seqno = 0; |
1573 | obj->last_read_seqno = 0; |
1571 | obj->last_write_seqno = 0; |
1574 | obj->last_write_seqno = 0; |
1572 | obj->base.write_domain = 0; |
1575 | obj->base.write_domain = 0; |
1573 | 1576 | ||
1574 | obj->last_fenced_seqno = 0; |
1577 | obj->last_fenced_seqno = 0; |
1575 | obj->fenced_gpu_access = false; |
1578 | obj->fenced_gpu_access = false; |
1576 | 1579 | ||
1577 | obj->active = 0; |
1580 | obj->active = 0; |
1578 | drm_gem_object_unreference(&obj->base); |
1581 | drm_gem_object_unreference(&obj->base); |
1579 | 1582 | ||
1580 | WARN_ON(i915_verify_lists(dev)); |
1583 | WARN_ON(i915_verify_lists(dev)); |
1581 | } |
1584 | } |
1582 | 1585 | ||
1583 | static u32 |
1586 | static u32 |
1584 | i915_gem_get_seqno(struct drm_device *dev) |
1587 | i915_gem_get_seqno(struct drm_device *dev) |
1585 | { |
1588 | { |
1586 | drm_i915_private_t *dev_priv = dev->dev_private; |
1589 | drm_i915_private_t *dev_priv = dev->dev_private; |
1587 | u32 seqno = dev_priv->next_seqno; |
1590 | u32 seqno = dev_priv->next_seqno; |
1588 | 1591 | ||
1589 | /* reserve 0 for non-seqno */ |
1592 | /* reserve 0 for non-seqno */ |
1590 | if (++dev_priv->next_seqno == 0) |
1593 | if (++dev_priv->next_seqno == 0) |
1591 | dev_priv->next_seqno = 1; |
1594 | dev_priv->next_seqno = 1; |
1592 | 1595 | ||
1593 | return seqno; |
1596 | return seqno; |
1594 | } |
1597 | } |
1595 | 1598 | ||
1596 | u32 |
1599 | u32 |
1597 | i915_gem_next_request_seqno(struct intel_ring_buffer *ring) |
1600 | i915_gem_next_request_seqno(struct intel_ring_buffer *ring) |
1598 | { |
1601 | { |
1599 | if (ring->outstanding_lazy_request == 0) |
1602 | if (ring->outstanding_lazy_request == 0) |
1600 | ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev); |
1603 | ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev); |
1601 | 1604 | ||
1602 | return ring->outstanding_lazy_request; |
1605 | return ring->outstanding_lazy_request; |
1603 | } |
1606 | } |
1604 | 1607 | ||
1605 | int |
1608 | int |
1606 | i915_add_request(struct intel_ring_buffer *ring, |
1609 | i915_add_request(struct intel_ring_buffer *ring, |
1607 | struct drm_file *file, |
1610 | struct drm_file *file, |
1608 | u32 *out_seqno) |
1611 | u32 *out_seqno) |
1609 | { |
1612 | { |
1610 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1613 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1611 | struct drm_i915_gem_request *request; |
1614 | struct drm_i915_gem_request *request; |
1612 | u32 request_ring_position; |
1615 | u32 request_ring_position; |
1613 | u32 seqno; |
1616 | u32 seqno; |
1614 | int was_empty; |
1617 | int was_empty; |
1615 | int ret; |
1618 | int ret; |
1616 | 1619 | ||
1617 | /* |
1620 | /* |
1618 | * Emit any outstanding flushes - execbuf can fail to emit the flush |
1621 | * Emit any outstanding flushes - execbuf can fail to emit the flush |
1619 | * after having emitted the batchbuffer command. Hence we need to fix |
1622 | * after having emitted the batchbuffer command. Hence we need to fix |
1620 | * things up similar to emitting the lazy request. The difference here |
1623 | * things up similar to emitting the lazy request. The difference here |
1621 | * is that the flush _must_ happen before the next request, no matter |
1624 | * is that the flush _must_ happen before the next request, no matter |
1622 | * what. |
1625 | * what. |
1623 | */ |
1626 | */ |
1624 | ret = intel_ring_flush_all_caches(ring); |
1627 | ret = intel_ring_flush_all_caches(ring); |
1625 | if (ret) |
1628 | if (ret) |
1626 | return ret; |
1629 | return ret; |
1627 | 1630 | ||
1628 | request = kmalloc(sizeof(*request), GFP_KERNEL); |
1631 | request = kmalloc(sizeof(*request), GFP_KERNEL); |
1629 | if (request == NULL) |
1632 | if (request == NULL) |
1630 | return -ENOMEM; |
1633 | return -ENOMEM; |
1631 | 1634 | ||
1632 | seqno = i915_gem_next_request_seqno(ring); |
1635 | seqno = i915_gem_next_request_seqno(ring); |
1633 | 1636 | ||
1634 | /* Record the position of the start of the request so that |
1637 | /* Record the position of the start of the request so that |
1635 | * should we detect the updated seqno part-way through the |
1638 | * should we detect the updated seqno part-way through the |
1636 | * GPU processing the request, we never over-estimate the |
1639 | * GPU processing the request, we never over-estimate the |
1637 | * position of the head. |
1640 | * position of the head. |
1638 | */ |
1641 | */ |
1639 | request_ring_position = intel_ring_get_tail(ring); |
1642 | request_ring_position = intel_ring_get_tail(ring); |
1640 | 1643 | ||
1641 | ret = ring->add_request(ring, &seqno); |
1644 | ret = ring->add_request(ring, &seqno); |
1642 | if (ret) { |
1645 | if (ret) { |
1643 | kfree(request); |
1646 | kfree(request); |
1644 | return ret; |
1647 | return ret; |
1645 | } |
1648 | } |
1646 | 1649 | ||
1647 | trace_i915_gem_request_add(ring, seqno); |
1650 | trace_i915_gem_request_add(ring, seqno); |
1648 | 1651 | ||
1649 | request->seqno = seqno; |
1652 | request->seqno = seqno; |
1650 | request->ring = ring; |
1653 | request->ring = ring; |
1651 | request->tail = request_ring_position; |
1654 | request->tail = request_ring_position; |
1652 | request->emitted_jiffies = GetTimerTicks(); |
1655 | request->emitted_jiffies = GetTimerTicks(); |
1653 | was_empty = list_empty(&ring->request_list); |
1656 | was_empty = list_empty(&ring->request_list); |
1654 | list_add_tail(&request->list, &ring->request_list); |
1657 | list_add_tail(&request->list, &ring->request_list); |
1655 | request->file_priv = NULL; |
1658 | request->file_priv = NULL; |
1656 | 1659 | ||
1657 | 1660 | ||
1658 | ring->outstanding_lazy_request = 0; |
1661 | ring->outstanding_lazy_request = 0; |
1659 | 1662 | ||
1660 | if (!dev_priv->mm.suspended) { |
1663 | if (!dev_priv->mm.suspended) { |
1661 | if (i915_enable_hangcheck) { |
1664 | if (i915_enable_hangcheck) { |
1662 | // mod_timer(&dev_priv->hangcheck_timer, |
1665 | // mod_timer(&dev_priv->hangcheck_timer, |
1663 | // jiffies + |
1666 | // jiffies + |
1664 | // msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
1667 | // msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
1665 | } |
1668 | } |
1666 | if (was_empty) { |
1669 | if (was_empty) { |
1667 | queue_delayed_work(dev_priv->wq, |
1670 | queue_delayed_work(dev_priv->wq, |
1668 | &dev_priv->mm.retire_work, HZ); |
1671 | &dev_priv->mm.retire_work, HZ); |
1669 | intel_mark_busy(dev_priv->dev); |
1672 | intel_mark_busy(dev_priv->dev); |
1670 | } |
1673 | } |
1671 | } |
1674 | } |
1672 | 1675 | ||
1673 | if (out_seqno) |
1676 | if (out_seqno) |
1674 | *out_seqno = seqno; |
1677 | *out_seqno = seqno; |
1675 | return 0; |
1678 | return 0; |
1676 | } |
1679 | } |
1677 | 1680 | ||
1678 | 1681 | ||
1679 | 1682 | ||
1680 | 1683 | ||
1681 | static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, |
1684 | static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, |
1682 | struct intel_ring_buffer *ring) |
1685 | struct intel_ring_buffer *ring) |
1683 | { |
1686 | { |
1684 | while (!list_empty(&ring->request_list)) { |
1687 | while (!list_empty(&ring->request_list)) { |
1685 | struct drm_i915_gem_request *request; |
1688 | struct drm_i915_gem_request *request; |
1686 | 1689 | ||
1687 | request = list_first_entry(&ring->request_list, |
1690 | request = list_first_entry(&ring->request_list, |
1688 | struct drm_i915_gem_request, |
1691 | struct drm_i915_gem_request, |
1689 | list); |
1692 | list); |
1690 | 1693 | ||
1691 | list_del(&request->list); |
1694 | list_del(&request->list); |
1692 | // i915_gem_request_remove_from_client(request); |
1695 | // i915_gem_request_remove_from_client(request); |
1693 | kfree(request); |
1696 | kfree(request); |
1694 | } |
1697 | } |
1695 | 1698 | ||
1696 | while (!list_empty(&ring->active_list)) { |
1699 | while (!list_empty(&ring->active_list)) { |
1697 | struct drm_i915_gem_object *obj; |
1700 | struct drm_i915_gem_object *obj; |
1698 | 1701 | ||
1699 | obj = list_first_entry(&ring->active_list, |
1702 | obj = list_first_entry(&ring->active_list, |
1700 | struct drm_i915_gem_object, |
1703 | struct drm_i915_gem_object, |
1701 | ring_list); |
1704 | ring_list); |
1702 | 1705 | ||
1703 | i915_gem_object_move_to_inactive(obj); |
1706 | i915_gem_object_move_to_inactive(obj); |
1704 | } |
1707 | } |
1705 | } |
1708 | } |
1706 | 1709 | ||
1707 | static void i915_gem_reset_fences(struct drm_device *dev) |
1710 | static void i915_gem_reset_fences(struct drm_device *dev) |
1708 | { |
1711 | { |
1709 | struct drm_i915_private *dev_priv = dev->dev_private; |
1712 | struct drm_i915_private *dev_priv = dev->dev_private; |
1710 | int i; |
1713 | int i; |
1711 | 1714 | ||
1712 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
1715 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
1713 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
1716 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
1714 | 1717 | ||
1715 | i915_gem_write_fence(dev, i, NULL); |
1718 | i915_gem_write_fence(dev, i, NULL); |
1716 | 1719 | ||
1717 | if (reg->obj) |
1720 | if (reg->obj) |
1718 | i915_gem_object_fence_lost(reg->obj); |
1721 | i915_gem_object_fence_lost(reg->obj); |
1719 | 1722 | ||
1720 | reg->pin_count = 0; |
1723 | reg->pin_count = 0; |
1721 | reg->obj = NULL; |
1724 | reg->obj = NULL; |
1722 | INIT_LIST_HEAD(®->lru_list); |
1725 | INIT_LIST_HEAD(®->lru_list); |
1723 | } |
1726 | } |
1724 | 1727 | ||
1725 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
1728 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
1726 | } |
1729 | } |
1727 | 1730 | ||
1728 | void i915_gem_reset(struct drm_device *dev) |
1731 | void i915_gem_reset(struct drm_device *dev) |
1729 | { |
1732 | { |
1730 | struct drm_i915_private *dev_priv = dev->dev_private; |
1733 | struct drm_i915_private *dev_priv = dev->dev_private; |
1731 | struct drm_i915_gem_object *obj; |
1734 | struct drm_i915_gem_object *obj; |
1732 | struct intel_ring_buffer *ring; |
1735 | struct intel_ring_buffer *ring; |
1733 | int i; |
1736 | int i; |
1734 | 1737 | ||
1735 | for_each_ring(ring, dev_priv, i) |
1738 | for_each_ring(ring, dev_priv, i) |
1736 | i915_gem_reset_ring_lists(dev_priv, ring); |
1739 | i915_gem_reset_ring_lists(dev_priv, ring); |
1737 | 1740 | ||
1738 | /* Move everything out of the GPU domains to ensure we do any |
1741 | /* Move everything out of the GPU domains to ensure we do any |
1739 | * necessary invalidation upon reuse. |
1742 | * necessary invalidation upon reuse. |
1740 | */ |
1743 | */ |
1741 | list_for_each_entry(obj, |
1744 | list_for_each_entry(obj, |
1742 | &dev_priv->mm.inactive_list, |
1745 | &dev_priv->mm.inactive_list, |
1743 | mm_list) |
1746 | mm_list) |
1744 | { |
1747 | { |
1745 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
1748 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
1746 | } |
1749 | } |
1747 | 1750 | ||
1748 | /* The fence registers are invalidated so clear them out */ |
1751 | /* The fence registers are invalidated so clear them out */ |
1749 | i915_gem_reset_fences(dev); |
1752 | i915_gem_reset_fences(dev); |
1750 | } |
1753 | } |
1751 | 1754 | ||
1752 | /** |
1755 | /** |
1753 | * This function clears the request list as sequence numbers are passed. |
1756 | * This function clears the request list as sequence numbers are passed. |
1754 | */ |
1757 | */ |
1755 | void |
1758 | void |
1756 | i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) |
1759 | i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) |
1757 | { |
1760 | { |
1758 | uint32_t seqno; |
1761 | uint32_t seqno; |
1759 | int i; |
1762 | int i; |
1760 | 1763 | ||
1761 | if (list_empty(&ring->request_list)) |
1764 | if (list_empty(&ring->request_list)) |
1762 | return; |
1765 | return; |
1763 | 1766 | ||
1764 | WARN_ON(i915_verify_lists(ring->dev)); |
1767 | WARN_ON(i915_verify_lists(ring->dev)); |
1765 | 1768 | ||
1766 | seqno = ring->get_seqno(ring, true); |
1769 | seqno = ring->get_seqno(ring, true); |
1767 | 1770 | ||
1768 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) |
1771 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) |
1769 | if (seqno >= ring->sync_seqno[i]) |
1772 | if (seqno >= ring->sync_seqno[i]) |
1770 | ring->sync_seqno[i] = 0; |
1773 | ring->sync_seqno[i] = 0; |
1771 | 1774 | ||
1772 | while (!list_empty(&ring->request_list)) { |
1775 | while (!list_empty(&ring->request_list)) { |
1773 | struct drm_i915_gem_request *request; |
1776 | struct drm_i915_gem_request *request; |
1774 | 1777 | ||
1775 | request = list_first_entry(&ring->request_list, |
1778 | request = list_first_entry(&ring->request_list, |
1776 | struct drm_i915_gem_request, |
1779 | struct drm_i915_gem_request, |
1777 | list); |
1780 | list); |
1778 | 1781 | ||
1779 | if (!i915_seqno_passed(seqno, request->seqno)) |
1782 | if (!i915_seqno_passed(seqno, request->seqno)) |
1780 | break; |
1783 | break; |
1781 | 1784 | ||
1782 | trace_i915_gem_request_retire(ring, request->seqno); |
1785 | trace_i915_gem_request_retire(ring, request->seqno); |
1783 | /* We know the GPU must have read the request to have |
1786 | /* We know the GPU must have read the request to have |
1784 | * sent us the seqno + interrupt, so use the position |
1787 | * sent us the seqno + interrupt, so use the position |
1785 | * of tail of the request to update the last known position |
1788 | * of tail of the request to update the last known position |
1786 | * of the GPU head. |
1789 | * of the GPU head. |
1787 | */ |
1790 | */ |
1788 | ring->last_retired_head = request->tail; |
1791 | ring->last_retired_head = request->tail; |
1789 | 1792 | ||
1790 | list_del(&request->list); |
1793 | list_del(&request->list); |
1791 | kfree(request); |
1794 | kfree(request); |
1792 | } |
1795 | } |
1793 | 1796 | ||
1794 | /* Move any buffers on the active list that are no longer referenced |
1797 | /* Move any buffers on the active list that are no longer referenced |
1795 | * by the ringbuffer to the flushing/inactive lists as appropriate. |
1798 | * by the ringbuffer to the flushing/inactive lists as appropriate. |
1796 | */ |
1799 | */ |
1797 | while (!list_empty(&ring->active_list)) { |
1800 | while (!list_empty(&ring->active_list)) { |
1798 | struct drm_i915_gem_object *obj; |
1801 | struct drm_i915_gem_object *obj; |
1799 | 1802 | ||
1800 | obj = list_first_entry(&ring->active_list, |
1803 | obj = list_first_entry(&ring->active_list, |
1801 | struct drm_i915_gem_object, |
1804 | struct drm_i915_gem_object, |
1802 | ring_list); |
1805 | ring_list); |
1803 | 1806 | ||
1804 | if (!i915_seqno_passed(seqno, obj->last_read_seqno)) |
1807 | if (!i915_seqno_passed(seqno, obj->last_read_seqno)) |
1805 | break; |
1808 | break; |
1806 | 1809 | ||
1807 | i915_gem_object_move_to_inactive(obj); |
1810 | i915_gem_object_move_to_inactive(obj); |
1808 | } |
1811 | } |
1809 | 1812 | ||
1810 | if (unlikely(ring->trace_irq_seqno && |
1813 | if (unlikely(ring->trace_irq_seqno && |
1811 | i915_seqno_passed(seqno, ring->trace_irq_seqno))) { |
1814 | i915_seqno_passed(seqno, ring->trace_irq_seqno))) { |
1812 | ring->irq_put(ring); |
1815 | ring->irq_put(ring); |
1813 | ring->trace_irq_seqno = 0; |
1816 | ring->trace_irq_seqno = 0; |
1814 | } |
1817 | } |
1815 | 1818 | ||
1816 | WARN_ON(i915_verify_lists(ring->dev)); |
1819 | WARN_ON(i915_verify_lists(ring->dev)); |
1817 | } |
1820 | } |
1818 | 1821 | ||
1819 | void |
1822 | void |
1820 | i915_gem_retire_requests(struct drm_device *dev) |
1823 | i915_gem_retire_requests(struct drm_device *dev) |
1821 | { |
1824 | { |
1822 | drm_i915_private_t *dev_priv = dev->dev_private; |
1825 | drm_i915_private_t *dev_priv = dev->dev_private; |
1823 | struct intel_ring_buffer *ring; |
1826 | struct intel_ring_buffer *ring; |
1824 | int i; |
1827 | int i; |
1825 | 1828 | ||
1826 | for_each_ring(ring, dev_priv, i) |
1829 | for_each_ring(ring, dev_priv, i) |
1827 | i915_gem_retire_requests_ring(ring); |
1830 | i915_gem_retire_requests_ring(ring); |
1828 | } |
1831 | } |
1829 | 1832 | ||
1830 | static void |
1833 | static void |
1831 | i915_gem_retire_work_handler(struct work_struct *work) |
1834 | i915_gem_retire_work_handler(struct work_struct *work) |
1832 | { |
1835 | { |
1833 | drm_i915_private_t *dev_priv; |
1836 | drm_i915_private_t *dev_priv; |
1834 | struct drm_device *dev; |
1837 | struct drm_device *dev; |
1835 | struct intel_ring_buffer *ring; |
1838 | struct intel_ring_buffer *ring; |
1836 | bool idle; |
1839 | bool idle; |
1837 | int i; |
1840 | int i; |
1838 | 1841 | ||
1839 | dev_priv = container_of(work, drm_i915_private_t, |
1842 | dev_priv = container_of(work, drm_i915_private_t, |
1840 | mm.retire_work.work); |
1843 | mm.retire_work.work); |
1841 | dev = dev_priv->dev; |
1844 | dev = dev_priv->dev; |
1842 | 1845 | ||
1843 | /* Come back later if the device is busy... */ |
1846 | /* Come back later if the device is busy... */ |
1844 | if (!mutex_trylock(&dev->struct_mutex)) { |
1847 | if (!mutex_trylock(&dev->struct_mutex)) { |
1845 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
1848 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
1846 | return; |
1849 | return; |
1847 | } |
1850 | } |
1848 | 1851 | ||
1849 | i915_gem_retire_requests(dev); |
1852 | i915_gem_retire_requests(dev); |
1850 | 1853 | ||
1851 | /* Send a periodic flush down the ring so we don't hold onto GEM |
1854 | /* Send a periodic flush down the ring so we don't hold onto GEM |
1852 | * objects indefinitely. |
1855 | * objects indefinitely. |
1853 | */ |
1856 | */ |
1854 | idle = true; |
1857 | idle = true; |
1855 | for_each_ring(ring, dev_priv, i) { |
1858 | for_each_ring(ring, dev_priv, i) { |
1856 | if (ring->gpu_caches_dirty) |
1859 | if (ring->gpu_caches_dirty) |
1857 | i915_add_request(ring, NULL, NULL); |
1860 | i915_add_request(ring, NULL, NULL); |
1858 | 1861 | ||
1859 | idle &= list_empty(&ring->request_list); |
1862 | idle &= list_empty(&ring->request_list); |
1860 | } |
1863 | } |
1861 | 1864 | ||
1862 | if (!dev_priv->mm.suspended && !idle) |
1865 | if (!dev_priv->mm.suspended && !idle) |
1863 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
1866 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
1864 | if (idle) |
1867 | if (idle) |
1865 | intel_mark_idle(dev); |
1868 | intel_mark_idle(dev); |
1866 | 1869 | ||
1867 | mutex_unlock(&dev->struct_mutex); |
1870 | mutex_unlock(&dev->struct_mutex); |
1868 | } |
1871 | } |
1869 | 1872 | ||
1870 | /** |
1873 | /** |
1871 | * Ensures that an object will eventually get non-busy by flushing any required |
1874 | * Ensures that an object will eventually get non-busy by flushing any required |
1872 | * write domains, emitting any outstanding lazy request and retiring and |
1875 | * write domains, emitting any outstanding lazy request and retiring and |
1873 | * completed requests. |
1876 | * completed requests. |
1874 | */ |
1877 | */ |
1875 | static int |
1878 | static int |
1876 | i915_gem_object_flush_active(struct drm_i915_gem_object *obj) |
1879 | i915_gem_object_flush_active(struct drm_i915_gem_object *obj) |
1877 | { |
1880 | { |
1878 | int ret; |
1881 | int ret; |
1879 | 1882 | ||
1880 | if (obj->active) { |
1883 | if (obj->active) { |
1881 | ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno); |
1884 | ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno); |
1882 | if (ret) |
1885 | if (ret) |
1883 | return ret; |
1886 | return ret; |
1884 | 1887 | ||
1885 | i915_gem_retire_requests_ring(obj->ring); |
1888 | i915_gem_retire_requests_ring(obj->ring); |
1886 | } |
1889 | } |
1887 | 1890 | ||
1888 | return 0; |
1891 | return 0; |
1889 | } |
1892 | } |
1890 | 1893 | ||
1891 | 1894 | ||
1892 | 1895 | ||
1893 | 1896 | ||
1894 | 1897 | ||
1895 | 1898 | ||
1896 | 1899 | ||
1897 | 1900 | ||
1898 | 1901 | ||
1899 | 1902 | ||
1900 | /** |
1903 | /** |
1901 | * i915_gem_object_sync - sync an object to a ring. |
1904 | * i915_gem_object_sync - sync an object to a ring. |
1902 | * |
1905 | * |
1903 | * @obj: object which may be in use on another ring. |
1906 | * @obj: object which may be in use on another ring. |
1904 | * @to: ring we wish to use the object on. May be NULL. |
1907 | * @to: ring we wish to use the object on. May be NULL. |
1905 | * |
1908 | * |
1906 | * This code is meant to abstract object synchronization with the GPU. |
1909 | * This code is meant to abstract object synchronization with the GPU. |
1907 | * Calling with NULL implies synchronizing the object with the CPU |
1910 | * Calling with NULL implies synchronizing the object with the CPU |
1908 | * rather than a particular GPU ring. |
1911 | * rather than a particular GPU ring. |
1909 | * |
1912 | * |
1910 | * Returns 0 if successful, else propagates up the lower layer error. |
1913 | * Returns 0 if successful, else propagates up the lower layer error. |
1911 | */ |
1914 | */ |
1912 | int |
1915 | int |
1913 | i915_gem_object_sync(struct drm_i915_gem_object *obj, |
1916 | i915_gem_object_sync(struct drm_i915_gem_object *obj, |
1914 | struct intel_ring_buffer *to) |
1917 | struct intel_ring_buffer *to) |
1915 | { |
1918 | { |
1916 | struct intel_ring_buffer *from = obj->ring; |
1919 | struct intel_ring_buffer *from = obj->ring; |
1917 | u32 seqno; |
1920 | u32 seqno; |
1918 | int ret, idx; |
1921 | int ret, idx; |
1919 | 1922 | ||
1920 | if (from == NULL || to == from) |
1923 | if (from == NULL || to == from) |
1921 | return 0; |
1924 | return 0; |
1922 | 1925 | ||
1923 | if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) |
1926 | if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) |
1924 | return i915_gem_object_wait_rendering(obj, false); |
1927 | return i915_gem_object_wait_rendering(obj, false); |
1925 | 1928 | ||
1926 | idx = intel_ring_sync_index(from, to); |
1929 | idx = intel_ring_sync_index(from, to); |
1927 | 1930 | ||
1928 | seqno = obj->last_read_seqno; |
1931 | seqno = obj->last_read_seqno; |
1929 | if (seqno <= from->sync_seqno[idx]) |
1932 | if (seqno <= from->sync_seqno[idx]) |
1930 | return 0; |
1933 | return 0; |
1931 | 1934 | ||
1932 | ret = i915_gem_check_olr(obj->ring, seqno); |
1935 | ret = i915_gem_check_olr(obj->ring, seqno); |
1933 | if (ret) |
1936 | if (ret) |
1934 | return ret; |
1937 | return ret; |
1935 | 1938 | ||
1936 | ret = to->sync_to(to, from, seqno); |
1939 | ret = to->sync_to(to, from, seqno); |
1937 | if (!ret) |
1940 | if (!ret) |
1938 | from->sync_seqno[idx] = seqno; |
1941 | from->sync_seqno[idx] = seqno; |
1939 | 1942 | ||
1940 | return ret; |
1943 | return ret; |
1941 | } |
1944 | } |
1942 | 1945 | ||
1943 | static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) |
1946 | static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) |
1944 | { |
1947 | { |
1945 | u32 old_write_domain, old_read_domains; |
1948 | u32 old_write_domain, old_read_domains; |
1946 | 1949 | ||
1947 | /* Act a barrier for all accesses through the GTT */ |
1950 | /* Act a barrier for all accesses through the GTT */ |
1948 | mb(); |
1951 | mb(); |
1949 | 1952 | ||
1950 | /* Force a pagefault for domain tracking on next user access */ |
1953 | /* Force a pagefault for domain tracking on next user access */ |
1951 | // i915_gem_release_mmap(obj); |
1954 | // i915_gem_release_mmap(obj); |
1952 | 1955 | ||
1953 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) |
1956 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) |
1954 | return; |
1957 | return; |
1955 | 1958 | ||
1956 | old_read_domains = obj->base.read_domains; |
1959 | old_read_domains = obj->base.read_domains; |
1957 | old_write_domain = obj->base.write_domain; |
1960 | old_write_domain = obj->base.write_domain; |
1958 | 1961 | ||
1959 | obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; |
1962 | obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; |
1960 | obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; |
1963 | obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; |
1961 | 1964 | ||
1962 | trace_i915_gem_object_change_domain(obj, |
1965 | trace_i915_gem_object_change_domain(obj, |
1963 | old_read_domains, |
1966 | old_read_domains, |
1964 | old_write_domain); |
1967 | old_write_domain); |
1965 | } |
1968 | } |
1966 | 1969 | ||
1967 | /** |
1970 | /** |
1968 | * Unbinds an object from the GTT aperture. |
1971 | * Unbinds an object from the GTT aperture. |
1969 | */ |
1972 | */ |
1970 | int |
1973 | int |
1971 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
1974 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
1972 | { |
1975 | { |
1973 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
1976 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
1974 | int ret = 0; |
1977 | int ret = 0; |
1975 | 1978 | ||
1976 | if (obj->gtt_space == NULL) |
1979 | if (obj->gtt_space == NULL) |
1977 | return 0; |
1980 | return 0; |
1978 | 1981 | ||
1979 | if (obj->pin_count) |
1982 | if (obj->pin_count) |
1980 | return -EBUSY; |
1983 | return -EBUSY; |
1981 | 1984 | ||
1982 | BUG_ON(obj->pages.page == NULL); |
1985 | BUG_ON(obj->pages.page == NULL); |
1983 | 1986 | ||
1984 | ret = i915_gem_object_finish_gpu(obj); |
1987 | ret = i915_gem_object_finish_gpu(obj); |
1985 | if (ret) |
1988 | if (ret) |
1986 | return ret; |
1989 | return ret; |
1987 | /* Continue on if we fail due to EIO, the GPU is hung so we |
1990 | /* Continue on if we fail due to EIO, the GPU is hung so we |
1988 | * should be safe and we need to cleanup or else we might |
1991 | * should be safe and we need to cleanup or else we might |
1989 | * cause memory corruption through use-after-free. |
1992 | * cause memory corruption through use-after-free. |
1990 | */ |
1993 | */ |
1991 | 1994 | ||
1992 | i915_gem_object_finish_gtt(obj); |
1995 | i915_gem_object_finish_gtt(obj); |
1993 | 1996 | ||
1994 | /* release the fence reg _after_ flushing */ |
1997 | /* release the fence reg _after_ flushing */ |
1995 | ret = i915_gem_object_put_fence(obj); |
1998 | ret = i915_gem_object_put_fence(obj); |
1996 | if (ret) |
1999 | if (ret) |
1997 | return ret; |
2000 | return ret; |
1998 | 2001 | ||
1999 | trace_i915_gem_object_unbind(obj); |
2002 | trace_i915_gem_object_unbind(obj); |
2000 | 2003 | ||
2001 | if (obj->has_global_gtt_mapping) |
2004 | if (obj->has_global_gtt_mapping) |
2002 | i915_gem_gtt_unbind_object(obj); |
2005 | i915_gem_gtt_unbind_object(obj); |
2003 | if (obj->has_aliasing_ppgtt_mapping) { |
2006 | if (obj->has_aliasing_ppgtt_mapping) { |
2004 | i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); |
2007 | i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); |
2005 | obj->has_aliasing_ppgtt_mapping = 0; |
2008 | obj->has_aliasing_ppgtt_mapping = 0; |
2006 | } |
2009 | } |
2007 | i915_gem_gtt_finish_object(obj); |
2010 | i915_gem_gtt_finish_object(obj); |
2008 | 2011 | ||
2009 | list_del(&obj->mm_list); |
2012 | list_del(&obj->mm_list); |
2010 | list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); |
2013 | list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); |
2011 | /* Avoid an unnecessary call to unbind on rebind. */ |
2014 | /* Avoid an unnecessary call to unbind on rebind. */ |
2012 | obj->map_and_fenceable = true; |
2015 | obj->map_and_fenceable = true; |
2013 | 2016 | ||
2014 | drm_mm_put_block(obj->gtt_space); |
2017 | drm_mm_put_block(obj->gtt_space); |
2015 | obj->gtt_space = NULL; |
2018 | obj->gtt_space = NULL; |
2016 | obj->gtt_offset = 0; |
2019 | obj->gtt_offset = 0; |
2017 | 2020 | ||
2018 | return 0; |
2021 | return 0; |
2019 | } |
2022 | } |
2020 | 2023 | ||
2021 | static int i915_ring_idle(struct intel_ring_buffer *ring) |
2024 | static int i915_ring_idle(struct intel_ring_buffer *ring) |
2022 | { |
2025 | { |
2023 | if (list_empty(&ring->active_list)) |
2026 | if (list_empty(&ring->active_list)) |
2024 | return 0; |
2027 | return 0; |
2025 | 2028 | ||
2026 | return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); |
2029 | return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); |
2027 | } |
2030 | } |
2028 | 2031 | ||
2029 | int i915_gpu_idle(struct drm_device *dev) |
2032 | int i915_gpu_idle(struct drm_device *dev) |
2030 | { |
2033 | { |
2031 | drm_i915_private_t *dev_priv = dev->dev_private; |
2034 | drm_i915_private_t *dev_priv = dev->dev_private; |
2032 | struct intel_ring_buffer *ring; |
2035 | struct intel_ring_buffer *ring; |
2033 | int ret, i; |
2036 | int ret, i; |
2034 | 2037 | ||
2035 | /* Flush everything onto the inactive list. */ |
2038 | /* Flush everything onto the inactive list. */ |
2036 | for_each_ring(ring, dev_priv, i) { |
2039 | for_each_ring(ring, dev_priv, i) { |
2037 | ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); |
2040 | ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); |
2038 | if (ret) |
2041 | if (ret) |
2039 | return ret; |
2042 | return ret; |
2040 | 2043 | ||
2041 | ret = i915_ring_idle(ring); |
2044 | ret = i915_ring_idle(ring); |
2042 | if (ret) |
2045 | if (ret) |
2043 | return ret; |
2046 | return ret; |
2044 | } |
2047 | } |
2045 | 2048 | ||
2046 | return 0; |
2049 | return 0; |
2047 | } |
2050 | } |
2048 | 2051 | ||
2049 | static void sandybridge_write_fence_reg(struct drm_device *dev, int reg, |
2052 | static void sandybridge_write_fence_reg(struct drm_device *dev, int reg, |
2050 | struct drm_i915_gem_object *obj) |
2053 | struct drm_i915_gem_object *obj) |
2051 | { |
2054 | { |
2052 | drm_i915_private_t *dev_priv = dev->dev_private; |
2055 | drm_i915_private_t *dev_priv = dev->dev_private; |
2053 | uint64_t val; |
2056 | uint64_t val; |
2054 | 2057 | ||
2055 | if (obj) { |
2058 | if (obj) { |
2056 | u32 size = obj->gtt_space->size; |
2059 | u32 size = obj->gtt_space->size; |
2057 | 2060 | ||
2058 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2061 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2059 | 0xfffff000) << 32; |
2062 | 0xfffff000) << 32; |
2060 | val |= obj->gtt_offset & 0xfffff000; |
2063 | val |= obj->gtt_offset & 0xfffff000; |
2061 | val |= (uint64_t)((obj->stride / 128) - 1) << |
2064 | val |= (uint64_t)((obj->stride / 128) - 1) << |
2062 | SANDYBRIDGE_FENCE_PITCH_SHIFT; |
2065 | SANDYBRIDGE_FENCE_PITCH_SHIFT; |
2063 | 2066 | ||
2064 | if (obj->tiling_mode == I915_TILING_Y) |
2067 | if (obj->tiling_mode == I915_TILING_Y) |
2065 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2068 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2066 | val |= I965_FENCE_REG_VALID; |
2069 | val |= I965_FENCE_REG_VALID; |
2067 | } else |
2070 | } else |
2068 | val = 0; |
2071 | val = 0; |
2069 | 2072 | ||
2070 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val); |
2073 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val); |
2071 | POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8); |
2074 | POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8); |
2072 | } |
2075 | } |
2073 | 2076 | ||
2074 | static void i965_write_fence_reg(struct drm_device *dev, int reg, |
2077 | static void i965_write_fence_reg(struct drm_device *dev, int reg, |
2075 | struct drm_i915_gem_object *obj) |
2078 | struct drm_i915_gem_object *obj) |
2076 | { |
2079 | { |
2077 | drm_i915_private_t *dev_priv = dev->dev_private; |
2080 | drm_i915_private_t *dev_priv = dev->dev_private; |
2078 | uint64_t val; |
2081 | uint64_t val; |
2079 | 2082 | ||
2080 | if (obj) { |
2083 | if (obj) { |
2081 | u32 size = obj->gtt_space->size; |
2084 | u32 size = obj->gtt_space->size; |
2082 | 2085 | ||
2083 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2086 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2084 | 0xfffff000) << 32; |
2087 | 0xfffff000) << 32; |
2085 | val |= obj->gtt_offset & 0xfffff000; |
2088 | val |= obj->gtt_offset & 0xfffff000; |
2086 | val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; |
2089 | val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; |
2087 | if (obj->tiling_mode == I915_TILING_Y) |
2090 | if (obj->tiling_mode == I915_TILING_Y) |
2088 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2091 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2089 | val |= I965_FENCE_REG_VALID; |
2092 | val |= I965_FENCE_REG_VALID; |
2090 | } else |
2093 | } else |
2091 | val = 0; |
2094 | val = 0; |
2092 | 2095 | ||
2093 | I915_WRITE64(FENCE_REG_965_0 + reg * 8, val); |
2096 | I915_WRITE64(FENCE_REG_965_0 + reg * 8, val); |
2094 | POSTING_READ(FENCE_REG_965_0 + reg * 8); |
2097 | POSTING_READ(FENCE_REG_965_0 + reg * 8); |
2095 | } |
2098 | } |
2096 | 2099 | ||
2097 | static void i915_write_fence_reg(struct drm_device *dev, int reg, |
2100 | static void i915_write_fence_reg(struct drm_device *dev, int reg, |
2098 | struct drm_i915_gem_object *obj) |
2101 | struct drm_i915_gem_object *obj) |
2099 | { |
2102 | { |
2100 | drm_i915_private_t *dev_priv = dev->dev_private; |
2103 | drm_i915_private_t *dev_priv = dev->dev_private; |
2101 | u32 val; |
2104 | u32 val; |
2102 | 2105 | ||
2103 | if (obj) { |
2106 | if (obj) { |
2104 | u32 size = obj->gtt_space->size; |
2107 | u32 size = obj->gtt_space->size; |
2105 | int pitch_val; |
2108 | int pitch_val; |
2106 | int tile_width; |
2109 | int tile_width; |
2107 | 2110 | ||
2108 | WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || |
2111 | WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || |
2109 | (size & -size) != size || |
2112 | (size & -size) != size || |
2110 | (obj->gtt_offset & (size - 1)), |
2113 | (obj->gtt_offset & (size - 1)), |
2111 | "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", |
2114 | "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", |
2112 | obj->gtt_offset, obj->map_and_fenceable, size); |
2115 | obj->gtt_offset, obj->map_and_fenceable, size); |
2113 | 2116 | ||
2114 | if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) |
2117 | if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) |
2115 | tile_width = 128; |
2118 | tile_width = 128; |
2116 | else |
2119 | else |
2117 | tile_width = 512; |
2120 | tile_width = 512; |
2118 | 2121 | ||
2119 | /* Note: pitch better be a power of two tile widths */ |
2122 | /* Note: pitch better be a power of two tile widths */ |
2120 | pitch_val = obj->stride / tile_width; |
2123 | pitch_val = obj->stride / tile_width; |
2121 | pitch_val = ffs(pitch_val) - 1; |
2124 | pitch_val = ffs(pitch_val) - 1; |
2122 | 2125 | ||
2123 | val = obj->gtt_offset; |
2126 | val = obj->gtt_offset; |
2124 | if (obj->tiling_mode == I915_TILING_Y) |
2127 | if (obj->tiling_mode == I915_TILING_Y) |
2125 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
2128 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
2126 | val |= I915_FENCE_SIZE_BITS(size); |
2129 | val |= I915_FENCE_SIZE_BITS(size); |
2127 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
2130 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
2128 | val |= I830_FENCE_REG_VALID; |
2131 | val |= I830_FENCE_REG_VALID; |
2129 | } else |
2132 | } else |
2130 | val = 0; |
2133 | val = 0; |
2131 | 2134 | ||
2132 | if (reg < 8) |
2135 | if (reg < 8) |
2133 | reg = FENCE_REG_830_0 + reg * 4; |
2136 | reg = FENCE_REG_830_0 + reg * 4; |
2134 | else |
2137 | else |
2135 | reg = FENCE_REG_945_8 + (reg - 8) * 4; |
2138 | reg = FENCE_REG_945_8 + (reg - 8) * 4; |
2136 | 2139 | ||
2137 | I915_WRITE(reg, val); |
2140 | I915_WRITE(reg, val); |
2138 | POSTING_READ(reg); |
2141 | POSTING_READ(reg); |
2139 | } |
2142 | } |
2140 | 2143 | ||
2141 | static void i830_write_fence_reg(struct drm_device *dev, int reg, |
2144 | static void i830_write_fence_reg(struct drm_device *dev, int reg, |
2142 | struct drm_i915_gem_object *obj) |
2145 | struct drm_i915_gem_object *obj) |
2143 | { |
2146 | { |
2144 | drm_i915_private_t *dev_priv = dev->dev_private; |
2147 | drm_i915_private_t *dev_priv = dev->dev_private; |
2145 | uint32_t val; |
2148 | uint32_t val; |
2146 | 2149 | ||
2147 | if (obj) { |
2150 | if (obj) { |
2148 | u32 size = obj->gtt_space->size; |
2151 | u32 size = obj->gtt_space->size; |
2149 | uint32_t pitch_val; |
2152 | uint32_t pitch_val; |
2150 | 2153 | ||
2151 | WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || |
2154 | WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || |
2152 | (size & -size) != size || |
2155 | (size & -size) != size || |
2153 | (obj->gtt_offset & (size - 1)), |
2156 | (obj->gtt_offset & (size - 1)), |
2154 | "object 0x%08x not 512K or pot-size 0x%08x aligned\n", |
2157 | "object 0x%08x not 512K or pot-size 0x%08x aligned\n", |
2155 | obj->gtt_offset, size); |
2158 | obj->gtt_offset, size); |
2156 | 2159 | ||
2157 | pitch_val = obj->stride / 128; |
2160 | pitch_val = obj->stride / 128; |
2158 | pitch_val = ffs(pitch_val) - 1; |
2161 | pitch_val = ffs(pitch_val) - 1; |
2159 | 2162 | ||
2160 | val = obj->gtt_offset; |
2163 | val = obj->gtt_offset; |
2161 | if (obj->tiling_mode == I915_TILING_Y) |
2164 | if (obj->tiling_mode == I915_TILING_Y) |
2162 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
2165 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
2163 | val |= I830_FENCE_SIZE_BITS(size); |
2166 | val |= I830_FENCE_SIZE_BITS(size); |
2164 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
2167 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
2165 | val |= I830_FENCE_REG_VALID; |
2168 | val |= I830_FENCE_REG_VALID; |
2166 | } else |
2169 | } else |
2167 | val = 0; |
2170 | val = 0; |
2168 | 2171 | ||
2169 | I915_WRITE(FENCE_REG_830_0 + reg * 4, val); |
2172 | I915_WRITE(FENCE_REG_830_0 + reg * 4, val); |
2170 | POSTING_READ(FENCE_REG_830_0 + reg * 4); |
2173 | POSTING_READ(FENCE_REG_830_0 + reg * 4); |
2171 | } |
2174 | } |
2172 | 2175 | ||
2173 | static void i915_gem_write_fence(struct drm_device *dev, int reg, |
2176 | static void i915_gem_write_fence(struct drm_device *dev, int reg, |
2174 | struct drm_i915_gem_object *obj) |
2177 | struct drm_i915_gem_object *obj) |
2175 | { |
2178 | { |
2176 | switch (INTEL_INFO(dev)->gen) { |
2179 | switch (INTEL_INFO(dev)->gen) { |
2177 | case 7: |
2180 | case 7: |
2178 | case 6: sandybridge_write_fence_reg(dev, reg, obj); break; |
2181 | case 6: sandybridge_write_fence_reg(dev, reg, obj); break; |
2179 | case 5: |
2182 | case 5: |
2180 | case 4: i965_write_fence_reg(dev, reg, obj); break; |
2183 | case 4: i965_write_fence_reg(dev, reg, obj); break; |
2181 | case 3: i915_write_fence_reg(dev, reg, obj); break; |
2184 | case 3: i915_write_fence_reg(dev, reg, obj); break; |
2182 | case 2: i830_write_fence_reg(dev, reg, obj); break; |
2185 | case 2: i830_write_fence_reg(dev, reg, obj); break; |
2183 | default: break; |
2186 | default: break; |
2184 | } |
2187 | } |
2185 | } |
2188 | } |
2186 | 2189 | ||
2187 | static inline int fence_number(struct drm_i915_private *dev_priv, |
2190 | static inline int fence_number(struct drm_i915_private *dev_priv, |
2188 | struct drm_i915_fence_reg *fence) |
2191 | struct drm_i915_fence_reg *fence) |
2189 | { |
2192 | { |
2190 | return fence - dev_priv->fence_regs; |
2193 | return fence - dev_priv->fence_regs; |
2191 | } |
2194 | } |
2192 | 2195 | ||
2193 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
2196 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
2194 | struct drm_i915_fence_reg *fence, |
2197 | struct drm_i915_fence_reg *fence, |
2195 | bool enable) |
2198 | bool enable) |
2196 | { |
2199 | { |
2197 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2200 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2198 | int reg = fence_number(dev_priv, fence); |
2201 | int reg = fence_number(dev_priv, fence); |
2199 | 2202 | ||
2200 | i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); |
2203 | i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); |
2201 | 2204 | ||
2202 | if (enable) { |
2205 | if (enable) { |
2203 | obj->fence_reg = reg; |
2206 | obj->fence_reg = reg; |
2204 | fence->obj = obj; |
2207 | fence->obj = obj; |
2205 | list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); |
2208 | list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); |
2206 | } else { |
2209 | } else { |
2207 | obj->fence_reg = I915_FENCE_REG_NONE; |
2210 | obj->fence_reg = I915_FENCE_REG_NONE; |
2208 | fence->obj = NULL; |
2211 | fence->obj = NULL; |
2209 | list_del_init(&fence->lru_list); |
2212 | list_del_init(&fence->lru_list); |
2210 | } |
2213 | } |
2211 | } |
2214 | } |
2212 | 2215 | ||
2213 | static int |
2216 | static int |
2214 | i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) |
2217 | i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) |
2215 | { |
2218 | { |
2216 | if (obj->last_fenced_seqno) { |
2219 | if (obj->last_fenced_seqno) { |
2217 | int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); |
2220 | int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); |
2218 | if (ret) |
2221 | if (ret) |
2219 | return ret; |
2222 | return ret; |
2220 | 2223 | ||
2221 | obj->last_fenced_seqno = 0; |
2224 | obj->last_fenced_seqno = 0; |
2222 | } |
2225 | } |
2223 | 2226 | ||
2224 | /* Ensure that all CPU reads are completed before installing a fence |
2227 | /* Ensure that all CPU reads are completed before installing a fence |
2225 | * and all writes before removing the fence. |
2228 | * and all writes before removing the fence. |
2226 | */ |
2229 | */ |
2227 | if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) |
2230 | if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) |
2228 | mb(); |
2231 | mb(); |
2229 | 2232 | ||
2230 | obj->fenced_gpu_access = false; |
2233 | obj->fenced_gpu_access = false; |
2231 | return 0; |
2234 | return 0; |
2232 | } |
2235 | } |
2233 | 2236 | ||
2234 | int |
2237 | int |
2235 | i915_gem_object_put_fence(struct drm_i915_gem_object *obj) |
2238 | i915_gem_object_put_fence(struct drm_i915_gem_object *obj) |
2236 | { |
2239 | { |
2237 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2240 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2238 | int ret; |
2241 | int ret; |
2239 | 2242 | ||
2240 | ret = i915_gem_object_flush_fence(obj); |
2243 | ret = i915_gem_object_flush_fence(obj); |
2241 | if (ret) |
2244 | if (ret) |
2242 | return ret; |
2245 | return ret; |
2243 | 2246 | ||
2244 | if (obj->fence_reg == I915_FENCE_REG_NONE) |
2247 | if (obj->fence_reg == I915_FENCE_REG_NONE) |
2245 | return 0; |
2248 | return 0; |
2246 | 2249 | ||
2247 | i915_gem_object_update_fence(obj, |
2250 | i915_gem_object_update_fence(obj, |
2248 | &dev_priv->fence_regs[obj->fence_reg], |
2251 | &dev_priv->fence_regs[obj->fence_reg], |
2249 | false); |
2252 | false); |
2250 | i915_gem_object_fence_lost(obj); |
2253 | i915_gem_object_fence_lost(obj); |
2251 | 2254 | ||
2252 | return 0; |
2255 | return 0; |
2253 | } |
2256 | } |
2254 | 2257 | ||
2255 | static struct drm_i915_fence_reg * |
2258 | static struct drm_i915_fence_reg * |
2256 | i915_find_fence_reg(struct drm_device *dev) |
2259 | i915_find_fence_reg(struct drm_device *dev) |
2257 | { |
2260 | { |
2258 | struct drm_i915_private *dev_priv = dev->dev_private; |
2261 | struct drm_i915_private *dev_priv = dev->dev_private; |
2259 | struct drm_i915_fence_reg *reg, *avail; |
2262 | struct drm_i915_fence_reg *reg, *avail; |
2260 | int i; |
2263 | int i; |
2261 | 2264 | ||
2262 | /* First try to find a free reg */ |
2265 | /* First try to find a free reg */ |
2263 | avail = NULL; |
2266 | avail = NULL; |
2264 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { |
2267 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { |
2265 | reg = &dev_priv->fence_regs[i]; |
2268 | reg = &dev_priv->fence_regs[i]; |
2266 | if (!reg->obj) |
2269 | if (!reg->obj) |
2267 | return reg; |
2270 | return reg; |
2268 | 2271 | ||
2269 | if (!reg->pin_count) |
2272 | if (!reg->pin_count) |
2270 | avail = reg; |
2273 | avail = reg; |
2271 | } |
2274 | } |
2272 | 2275 | ||
2273 | if (avail == NULL) |
2276 | if (avail == NULL) |
2274 | return NULL; |
2277 | return NULL; |
2275 | 2278 | ||
2276 | /* None available, try to steal one or wait for a user to finish */ |
2279 | /* None available, try to steal one or wait for a user to finish */ |
2277 | list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { |
2280 | list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { |
2278 | if (reg->pin_count) |
2281 | if (reg->pin_count) |
2279 | continue; |
2282 | continue; |
2280 | 2283 | ||
2281 | return reg; |
2284 | return reg; |
2282 | } |
2285 | } |
2283 | 2286 | ||
2284 | return NULL; |
2287 | return NULL; |
2285 | } |
2288 | } |
2286 | 2289 | ||
2287 | /** |
2290 | /** |
2288 | * i915_gem_object_get_fence - set up fencing for an object |
2291 | * i915_gem_object_get_fence - set up fencing for an object |
2289 | * @obj: object to map through a fence reg |
2292 | * @obj: object to map through a fence reg |
2290 | * |
2293 | * |
2291 | * When mapping objects through the GTT, userspace wants to be able to write |
2294 | * When mapping objects through the GTT, userspace wants to be able to write |
2292 | * to them without having to worry about swizzling if the object is tiled. |
2295 | * to them without having to worry about swizzling if the object is tiled. |
2293 | * This function walks the fence regs looking for a free one for @obj, |
2296 | * This function walks the fence regs looking for a free one for @obj, |
2294 | * stealing one if it can't find any. |
2297 | * stealing one if it can't find any. |
2295 | * |
2298 | * |
2296 | * It then sets up the reg based on the object's properties: address, pitch |
2299 | * It then sets up the reg based on the object's properties: address, pitch |
2297 | * and tiling format. |
2300 | * and tiling format. |
2298 | * |
2301 | * |
2299 | * For an untiled surface, this removes any existing fence. |
2302 | * For an untiled surface, this removes any existing fence. |
2300 | */ |
2303 | */ |
2301 | int |
2304 | int |
2302 | i915_gem_object_get_fence(struct drm_i915_gem_object *obj) |
2305 | i915_gem_object_get_fence(struct drm_i915_gem_object *obj) |
2303 | { |
2306 | { |
2304 | struct drm_device *dev = obj->base.dev; |
2307 | struct drm_device *dev = obj->base.dev; |
2305 | struct drm_i915_private *dev_priv = dev->dev_private; |
2308 | struct drm_i915_private *dev_priv = dev->dev_private; |
2306 | bool enable = obj->tiling_mode != I915_TILING_NONE; |
2309 | bool enable = obj->tiling_mode != I915_TILING_NONE; |
2307 | struct drm_i915_fence_reg *reg; |
2310 | struct drm_i915_fence_reg *reg; |
2308 | int ret; |
2311 | int ret; |
2309 | 2312 | ||
2310 | /* Have we updated the tiling parameters upon the object and so |
2313 | /* Have we updated the tiling parameters upon the object and so |
2311 | * will need to serialise the write to the associated fence register? |
2314 | * will need to serialise the write to the associated fence register? |
2312 | */ |
2315 | */ |
2313 | if (obj->fence_dirty) { |
2316 | if (obj->fence_dirty) { |
2314 | ret = i915_gem_object_flush_fence(obj); |
2317 | ret = i915_gem_object_flush_fence(obj); |
2315 | if (ret) |
2318 | if (ret) |
2316 | return ret; |
2319 | return ret; |
2317 | } |
2320 | } |
2318 | 2321 | ||
2319 | /* Just update our place in the LRU if our fence is getting reused. */ |
2322 | /* Just update our place in the LRU if our fence is getting reused. */ |
2320 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
2323 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
2321 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
2324 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
2322 | if (!obj->fence_dirty) { |
2325 | if (!obj->fence_dirty) { |
2323 | list_move_tail(®->lru_list, |
2326 | list_move_tail(®->lru_list, |
2324 | &dev_priv->mm.fence_list); |
2327 | &dev_priv->mm.fence_list); |
2325 | return 0; |
2328 | return 0; |
2326 | } |
2329 | } |
2327 | } else if (enable) { |
2330 | } else if (enable) { |
2328 | reg = i915_find_fence_reg(dev); |
2331 | reg = i915_find_fence_reg(dev); |
2329 | if (reg == NULL) |
2332 | if (reg == NULL) |
2330 | return -EDEADLK; |
2333 | return -EDEADLK; |
2331 | 2334 | ||
2332 | if (reg->obj) { |
2335 | if (reg->obj) { |
2333 | struct drm_i915_gem_object *old = reg->obj; |
2336 | struct drm_i915_gem_object *old = reg->obj; |
2334 | 2337 | ||
2335 | ret = i915_gem_object_flush_fence(old); |
2338 | ret = i915_gem_object_flush_fence(old); |
2336 | if (ret) |
2339 | if (ret) |
2337 | return ret; |
2340 | return ret; |
2338 | 2341 | ||
2339 | i915_gem_object_fence_lost(old); |
2342 | i915_gem_object_fence_lost(old); |
2340 | } |
2343 | } |
2341 | } else |
2344 | } else |
2342 | return 0; |
2345 | return 0; |
2343 | 2346 | ||
2344 | i915_gem_object_update_fence(obj, reg, enable); |
2347 | i915_gem_object_update_fence(obj, reg, enable); |
2345 | obj->fence_dirty = false; |
2348 | obj->fence_dirty = false; |
2346 | 2349 | ||
2347 | return 0; |
2350 | return 0; |
2348 | } |
2351 | } |
2349 | 2352 | ||
2350 | static bool i915_gem_valid_gtt_space(struct drm_device *dev, |
2353 | static bool i915_gem_valid_gtt_space(struct drm_device *dev, |
2351 | struct drm_mm_node *gtt_space, |
2354 | struct drm_mm_node *gtt_space, |
2352 | unsigned long cache_level) |
2355 | unsigned long cache_level) |
2353 | { |
2356 | { |
2354 | struct drm_mm_node *other; |
2357 | struct drm_mm_node *other; |
2355 | 2358 | ||
2356 | /* On non-LLC machines we have to be careful when putting differing |
2359 | /* On non-LLC machines we have to be careful when putting differing |
2357 | * types of snoopable memory together to avoid the prefetcher |
2360 | * types of snoopable memory together to avoid the prefetcher |
2358 | * crossing memory domains and dieing. |
2361 | * crossing memory domains and dieing. |
2359 | */ |
2362 | */ |
2360 | if (HAS_LLC(dev)) |
2363 | if (HAS_LLC(dev)) |
2361 | return true; |
2364 | return true; |
2362 | 2365 | ||
2363 | if (gtt_space == NULL) |
2366 | if (gtt_space == NULL) |
2364 | return true; |
2367 | return true; |
2365 | 2368 | ||
2366 | if (list_empty(>t_space->node_list)) |
2369 | if (list_empty(>t_space->node_list)) |
2367 | return true; |
2370 | return true; |
2368 | 2371 | ||
2369 | other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); |
2372 | other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); |
2370 | if (other->allocated && !other->hole_follows && other->color != cache_level) |
2373 | if (other->allocated && !other->hole_follows && other->color != cache_level) |
2371 | return false; |
2374 | return false; |
2372 | 2375 | ||
2373 | other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); |
2376 | other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); |
2374 | if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) |
2377 | if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) |
2375 | return false; |
2378 | return false; |
2376 | 2379 | ||
2377 | return true; |
2380 | return true; |
2378 | } |
2381 | } |
2379 | 2382 | ||
2380 | static void i915_gem_verify_gtt(struct drm_device *dev) |
2383 | static void i915_gem_verify_gtt(struct drm_device *dev) |
2381 | { |
2384 | { |
2382 | #if WATCH_GTT |
2385 | #if WATCH_GTT |
2383 | struct drm_i915_private *dev_priv = dev->dev_private; |
2386 | struct drm_i915_private *dev_priv = dev->dev_private; |
2384 | struct drm_i915_gem_object *obj; |
2387 | struct drm_i915_gem_object *obj; |
2385 | int err = 0; |
2388 | int err = 0; |
2386 | 2389 | ||
2387 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { |
2390 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { |
2388 | if (obj->gtt_space == NULL) { |
2391 | if (obj->gtt_space == NULL) { |
2389 | printk(KERN_ERR "object found on GTT list with no space reserved\n"); |
2392 | printk(KERN_ERR "object found on GTT list with no space reserved\n"); |
2390 | err++; |
2393 | err++; |
2391 | continue; |
2394 | continue; |
2392 | } |
2395 | } |
2393 | 2396 | ||
2394 | if (obj->cache_level != obj->gtt_space->color) { |
2397 | if (obj->cache_level != obj->gtt_space->color) { |
2395 | printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", |
2398 | printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", |
2396 | obj->gtt_space->start, |
2399 | obj->gtt_space->start, |
2397 | obj->gtt_space->start + obj->gtt_space->size, |
2400 | obj->gtt_space->start + obj->gtt_space->size, |
2398 | obj->cache_level, |
2401 | obj->cache_level, |
2399 | obj->gtt_space->color); |
2402 | obj->gtt_space->color); |
2400 | err++; |
2403 | err++; |
2401 | continue; |
2404 | continue; |
2402 | } |
2405 | } |
2403 | 2406 | ||
2404 | if (!i915_gem_valid_gtt_space(dev, |
2407 | if (!i915_gem_valid_gtt_space(dev, |
2405 | obj->gtt_space, |
2408 | obj->gtt_space, |
2406 | obj->cache_level)) { |
2409 | obj->cache_level)) { |
2407 | printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", |
2410 | printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", |
2408 | obj->gtt_space->start, |
2411 | obj->gtt_space->start, |
2409 | obj->gtt_space->start + obj->gtt_space->size, |
2412 | obj->gtt_space->start + obj->gtt_space->size, |
2410 | obj->cache_level); |
2413 | obj->cache_level); |
2411 | err++; |
2414 | err++; |
2412 | continue; |
2415 | continue; |
2413 | } |
2416 | } |
2414 | } |
2417 | } |
2415 | 2418 | ||
2416 | WARN_ON(err); |
2419 | WARN_ON(err); |
2417 | #endif |
2420 | #endif |
2418 | } |
2421 | } |
2419 | 2422 | ||
2420 | /** |
2423 | /** |
2421 | * Finds free space in the GTT aperture and binds the object there. |
2424 | * Finds free space in the GTT aperture and binds the object there. |
2422 | */ |
2425 | */ |
2423 | static int |
2426 | static int |
2424 | i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
2427 | i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
2425 | unsigned alignment, |
2428 | unsigned alignment, |
2426 | bool map_and_fenceable, |
2429 | bool map_and_fenceable, |
2427 | bool nonblocking) |
2430 | bool nonblocking) |
2428 | { |
2431 | { |
2429 | struct drm_device *dev = obj->base.dev; |
2432 | struct drm_device *dev = obj->base.dev; |
2430 | drm_i915_private_t *dev_priv = dev->dev_private; |
2433 | drm_i915_private_t *dev_priv = dev->dev_private; |
2431 | struct drm_mm_node *free_space; |
2434 | struct drm_mm_node *free_space; |
2432 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
2435 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
2433 | bool mappable, fenceable; |
2436 | bool mappable, fenceable; |
2434 | int ret; |
2437 | int ret; |
2435 | 2438 | ||
2436 | if (obj->madv != I915_MADV_WILLNEED) { |
2439 | if (obj->madv != I915_MADV_WILLNEED) { |
2437 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
2440 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
2438 | return -EINVAL; |
2441 | return -EINVAL; |
2439 | } |
2442 | } |
2440 | 2443 | ||
2441 | fence_size = i915_gem_get_gtt_size(dev, |
2444 | fence_size = i915_gem_get_gtt_size(dev, |
2442 | obj->base.size, |
2445 | obj->base.size, |
2443 | obj->tiling_mode); |
2446 | obj->tiling_mode); |
2444 | fence_alignment = i915_gem_get_gtt_alignment(dev, |
2447 | fence_alignment = i915_gem_get_gtt_alignment(dev, |
2445 | obj->base.size, |
2448 | obj->base.size, |
2446 | obj->tiling_mode); |
2449 | obj->tiling_mode); |
2447 | unfenced_alignment = |
2450 | unfenced_alignment = |
2448 | i915_gem_get_unfenced_gtt_alignment(dev, |
2451 | i915_gem_get_unfenced_gtt_alignment(dev, |
2449 | obj->base.size, |
2452 | obj->base.size, |
2450 | obj->tiling_mode); |
2453 | obj->tiling_mode); |
2451 | 2454 | ||
2452 | if (alignment == 0) |
2455 | if (alignment == 0) |
2453 | alignment = map_and_fenceable ? fence_alignment : |
2456 | alignment = map_and_fenceable ? fence_alignment : |
2454 | unfenced_alignment; |
2457 | unfenced_alignment; |
2455 | if (map_and_fenceable && alignment & (fence_alignment - 1)) { |
2458 | if (map_and_fenceable && alignment & (fence_alignment - 1)) { |
2456 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); |
2459 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); |
2457 | return -EINVAL; |
2460 | return -EINVAL; |
2458 | } |
2461 | } |
2459 | 2462 | ||
2460 | size = map_and_fenceable ? fence_size : obj->base.size; |
2463 | size = map_and_fenceable ? fence_size : obj->base.size; |
2461 | 2464 | ||
2462 | /* If the object is bigger than the entire aperture, reject it early |
2465 | /* If the object is bigger than the entire aperture, reject it early |
2463 | * before evicting everything in a vain attempt to find space. |
2466 | * before evicting everything in a vain attempt to find space. |
2464 | */ |
2467 | */ |
2465 | if (obj->base.size > |
2468 | if (obj->base.size > |
2466 | (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { |
2469 | (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { |
2467 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
2470 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
2468 | return -E2BIG; |
2471 | return -E2BIG; |
2469 | } |
2472 | } |
2470 | 2473 | ||
2471 | ret = i915_gem_object_get_pages(obj); |
2474 | ret = i915_gem_object_get_pages(obj); |
2472 | if (ret) |
2475 | if (ret) |
2473 | return ret; |
2476 | return ret; |
2474 | 2477 | ||
2475 | search_free: |
2478 | search_free: |
2476 | if (map_and_fenceable) |
2479 | if (map_and_fenceable) |
2477 | free_space = |
2480 | free_space = |
2478 | drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, |
2481 | drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, |
2479 | size, alignment, obj->cache_level, |
2482 | size, alignment, obj->cache_level, |
2480 | 0, dev_priv->mm.gtt_mappable_end, |
2483 | 0, dev_priv->mm.gtt_mappable_end, |
2481 | false); |
2484 | false); |
2482 | else |
2485 | else |
2483 | free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, |
2486 | free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, |
2484 | size, alignment, obj->cache_level, |
2487 | size, alignment, obj->cache_level, |
2485 | false); |
2488 | false); |
2486 | 2489 | ||
2487 | if (free_space != NULL) { |
2490 | if (free_space != NULL) { |
2488 | if (map_and_fenceable) |
2491 | if (map_and_fenceable) |
2489 | obj->gtt_space = |
2492 | obj->gtt_space = |
2490 | drm_mm_get_block_range_generic(free_space, |
2493 | drm_mm_get_block_range_generic(free_space, |
2491 | size, alignment, obj->cache_level, |
2494 | size, alignment, obj->cache_level, |
2492 | 0, dev_priv->mm.gtt_mappable_end, |
2495 | 0, dev_priv->mm.gtt_mappable_end, |
2493 | false); |
2496 | false); |
2494 | else |
2497 | else |
2495 | obj->gtt_space = |
2498 | obj->gtt_space = |
2496 | drm_mm_get_block_generic(free_space, |
2499 | drm_mm_get_block_generic(free_space, |
2497 | size, alignment, obj->cache_level, |
2500 | size, alignment, obj->cache_level, |
2498 | false); |
2501 | false); |
2499 | } |
2502 | } |
2500 | if (obj->gtt_space == NULL) { |
2503 | if (obj->gtt_space == NULL) { |
2501 | ret = 1; //i915_gem_evict_something(dev, size, alignment, |
2504 | ret = 1; //i915_gem_evict_something(dev, size, alignment, |
2502 | // map_and_fenceable); |
2505 | // map_and_fenceable); |
2503 | if (ret) |
2506 | if (ret) |
2504 | return ret; |
2507 | return ret; |
2505 | 2508 | ||
2506 | goto search_free; |
2509 | goto search_free; |
2507 | } |
2510 | } |
2508 | if (WARN_ON(!i915_gem_valid_gtt_space(dev, |
2511 | if (WARN_ON(!i915_gem_valid_gtt_space(dev, |
2509 | obj->gtt_space, |
2512 | obj->gtt_space, |
2510 | obj->cache_level))) { |
2513 | obj->cache_level))) { |
2511 | drm_mm_put_block(obj->gtt_space); |
2514 | drm_mm_put_block(obj->gtt_space); |
2512 | obj->gtt_space = NULL; |
2515 | obj->gtt_space = NULL; |
2513 | return -EINVAL; |
2516 | return -EINVAL; |
2514 | } |
2517 | } |
2515 | 2518 | ||
2516 | 2519 | ||
2517 | ret = i915_gem_gtt_prepare_object(obj); |
2520 | ret = i915_gem_gtt_prepare_object(obj); |
2518 | if (ret) { |
2521 | if (ret) { |
2519 | drm_mm_put_block(obj->gtt_space); |
2522 | drm_mm_put_block(obj->gtt_space); |
2520 | obj->gtt_space = NULL; |
2523 | obj->gtt_space = NULL; |
2521 | return ret; |
2524 | return ret; |
2522 | } |
2525 | } |
2523 | 2526 | ||
2524 | if (!dev_priv->mm.aliasing_ppgtt) |
2527 | if (!dev_priv->mm.aliasing_ppgtt) |
2525 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
2528 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
2526 | 2529 | ||
2527 | list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); |
2530 | list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); |
2528 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
2531 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
2529 | 2532 | ||
2530 | obj->gtt_offset = obj->gtt_space->start; |
2533 | obj->gtt_offset = obj->gtt_space->start; |
2531 | 2534 | ||
2532 | fenceable = |
2535 | fenceable = |
2533 | obj->gtt_space->size == fence_size && |
2536 | obj->gtt_space->size == fence_size && |
2534 | (obj->gtt_space->start & (fence_alignment - 1)) == 0; |
2537 | (obj->gtt_space->start & (fence_alignment - 1)) == 0; |
2535 | 2538 | ||
2536 | mappable = |
2539 | mappable = |
2537 | obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; |
2540 | obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; |
2538 | 2541 | ||
2539 | obj->map_and_fenceable = mappable && fenceable; |
2542 | obj->map_and_fenceable = mappable && fenceable; |
2540 | 2543 | ||
2541 | trace_i915_gem_object_bind(obj, map_and_fenceable); |
2544 | trace_i915_gem_object_bind(obj, map_and_fenceable); |
2542 | i915_gem_verify_gtt(dev); |
2545 | i915_gem_verify_gtt(dev); |
2543 | return 0; |
2546 | return 0; |
2544 | } |
2547 | } |
2545 | 2548 | ||
2546 | void |
2549 | void |
2547 | i915_gem_clflush_object(struct drm_i915_gem_object *obj) |
2550 | i915_gem_clflush_object(struct drm_i915_gem_object *obj) |
2548 | { |
2551 | { |
2549 | /* If we don't have a page list set up, then we're not pinned |
2552 | /* If we don't have a page list set up, then we're not pinned |
2550 | * to GPU, and we can ignore the cache flush because it'll happen |
2553 | * to GPU, and we can ignore the cache flush because it'll happen |
2551 | * again at bind time. |
2554 | * again at bind time. |
2552 | */ |
2555 | */ |
2553 | if (obj->pages.page == NULL) |
2556 | if (obj->pages.page == NULL) |
2554 | return; |
2557 | return; |
2555 | 2558 | ||
2556 | /* If the GPU is snooping the contents of the CPU cache, |
2559 | /* If the GPU is snooping the contents of the CPU cache, |
2557 | * we do not need to manually clear the CPU cache lines. However, |
2560 | * we do not need to manually clear the CPU cache lines. However, |
2558 | * the caches are only snooped when the render cache is |
2561 | * the caches are only snooped when the render cache is |
2559 | * flushed/invalidated. As we always have to emit invalidations |
2562 | * flushed/invalidated. As we always have to emit invalidations |
2560 | * and flushes when moving into and out of the RENDER domain, correct |
2563 | * and flushes when moving into and out of the RENDER domain, correct |
2561 | * snooping behaviour occurs naturally as the result of our domain |
2564 | * snooping behaviour occurs naturally as the result of our domain |
2562 | * tracking. |
2565 | * tracking. |
2563 | */ |
2566 | */ |
2564 | if (obj->cache_level != I915_CACHE_NONE) |
2567 | if (obj->cache_level != I915_CACHE_NONE) |
2565 | return; |
2568 | return; |
2566 | 2569 | ||
2567 | if(obj->mapped != NULL) |
2570 | if(obj->mapped != NULL) |
2568 | { |
2571 | { |
2569 | uint8_t *page_virtual; |
2572 | uint8_t *page_virtual; |
2570 | unsigned int i; |
2573 | unsigned int i; |
2571 | 2574 | ||
2572 | page_virtual = obj->mapped; |
2575 | page_virtual = obj->mapped; |
2573 | asm volatile("mfence"); |
2576 | asm volatile("mfence"); |
2574 | for (i = 0; i < obj->base.size; i += x86_clflush_size) |
2577 | for (i = 0; i < obj->base.size; i += x86_clflush_size) |
2575 | clflush(page_virtual + i); |
2578 | clflush(page_virtual + i); |
2576 | asm volatile("mfence"); |
2579 | asm volatile("mfence"); |
2577 | } |
2580 | } |
2578 | else |
2581 | else |
2579 | { |
2582 | { |
2580 | uint8_t *page_virtual; |
2583 | uint8_t *page_virtual; |
2581 | unsigned int i; |
2584 | unsigned int i; |
2582 | page_virtual = AllocKernelSpace(obj->base.size); |
2585 | page_virtual = AllocKernelSpace(obj->base.size); |
2583 | if(page_virtual != NULL) |
2586 | if(page_virtual != NULL) |
2584 | { |
2587 | { |
2585 | dma_addr_t *src, *dst; |
2588 | dma_addr_t *src, *dst; |
2586 | u32 count; |
2589 | u32 count; |
2587 | 2590 | ||
2588 | #define page_tabs 0xFDC00000 /* really dirty hack */ |
2591 | #define page_tabs 0xFDC00000 /* really dirty hack */ |
2589 | 2592 | ||
2590 | src = obj->pages.page; |
2593 | src = obj->pages.page; |
2591 | dst = &((dma_addr_t*)page_tabs)[(u32_t)page_virtual >> 12]; |
2594 | dst = &((dma_addr_t*)page_tabs)[(u32_t)page_virtual >> 12]; |
2592 | count = obj->base.size/4096; |
2595 | count = obj->base.size/4096; |
2593 | 2596 | ||
2594 | while(count--) |
2597 | while(count--) |
2595 | { |
2598 | { |
2596 | *dst++ = (0xFFFFF000 & *src++) | 0x001 ; |
2599 | *dst++ = (0xFFFFF000 & *src++) | 0x001 ; |
2597 | }; |
2600 | }; |
2598 | 2601 | ||
2599 | asm volatile("mfence"); |
2602 | asm volatile("mfence"); |
2600 | for (i = 0; i < obj->base.size; i += x86_clflush_size) |
2603 | for (i = 0; i < obj->base.size; i += x86_clflush_size) |
2601 | clflush(page_virtual + i); |
2604 | clflush(page_virtual + i); |
2602 | asm volatile("mfence"); |
2605 | asm volatile("mfence"); |
2603 | FreeKernelSpace(page_virtual); |
2606 | FreeKernelSpace(page_virtual); |
2604 | } |
2607 | } |
2605 | else |
2608 | else |
2606 | { |
2609 | { |
2607 | asm volatile ( |
2610 | asm volatile ( |
2608 | "mfence \n" |
2611 | "mfence \n" |
2609 | "wbinvd \n" /* this is really ugly */ |
2612 | "wbinvd \n" /* this is really ugly */ |
2610 | "mfence"); |
2613 | "mfence"); |
2611 | } |
2614 | } |
2612 | } |
2615 | } |
2613 | } |
2616 | } |
2614 | 2617 | ||
2615 | /** Flushes the GTT write domain for the object if it's dirty. */ |
2618 | /** Flushes the GTT write domain for the object if it's dirty. */ |
2616 | static void |
2619 | static void |
2617 | i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) |
2620 | i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) |
2618 | { |
2621 | { |
2619 | uint32_t old_write_domain; |
2622 | uint32_t old_write_domain; |
2620 | 2623 | ||
2621 | if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) |
2624 | if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) |
2622 | return; |
2625 | return; |
2623 | 2626 | ||
2624 | /* No actual flushing is required for the GTT write domain. Writes |
2627 | /* No actual flushing is required for the GTT write domain. Writes |
2625 | * to it immediately go to main memory as far as we know, so there's |
2628 | * to it immediately go to main memory as far as we know, so there's |
2626 | * no chipset flush. It also doesn't land in render cache. |
2629 | * no chipset flush. It also doesn't land in render cache. |
2627 | * |
2630 | * |
2628 | * However, we do have to enforce the order so that all writes through |
2631 | * However, we do have to enforce the order so that all writes through |
2629 | * the GTT land before any writes to the device, such as updates to |
2632 | * the GTT land before any writes to the device, such as updates to |
2630 | * the GATT itself. |
2633 | * the GATT itself. |
2631 | */ |
2634 | */ |
2632 | wmb(); |
2635 | wmb(); |
2633 | 2636 | ||
2634 | old_write_domain = obj->base.write_domain; |
2637 | old_write_domain = obj->base.write_domain; |
2635 | obj->base.write_domain = 0; |
2638 | obj->base.write_domain = 0; |
2636 | 2639 | ||
2637 | trace_i915_gem_object_change_domain(obj, |
2640 | trace_i915_gem_object_change_domain(obj, |
2638 | obj->base.read_domains, |
2641 | obj->base.read_domains, |
2639 | old_write_domain); |
2642 | old_write_domain); |
2640 | } |
2643 | } |
2641 | 2644 | ||
2642 | /** Flushes the CPU write domain for the object if it's dirty. */ |
2645 | /** Flushes the CPU write domain for the object if it's dirty. */ |
2643 | static void |
2646 | static void |
2644 | i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) |
2647 | i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) |
2645 | { |
2648 | { |
2646 | uint32_t old_write_domain; |
2649 | uint32_t old_write_domain; |
2647 | 2650 | ||
2648 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) |
2651 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) |
2649 | return; |
2652 | return; |
2650 | 2653 | ||
2651 | i915_gem_clflush_object(obj); |
2654 | i915_gem_clflush_object(obj); |
2652 | intel_gtt_chipset_flush(); |
2655 | intel_gtt_chipset_flush(); |
2653 | old_write_domain = obj->base.write_domain; |
2656 | old_write_domain = obj->base.write_domain; |
2654 | obj->base.write_domain = 0; |
2657 | obj->base.write_domain = 0; |
2655 | 2658 | ||
2656 | trace_i915_gem_object_change_domain(obj, |
2659 | trace_i915_gem_object_change_domain(obj, |
2657 | obj->base.read_domains, |
2660 | obj->base.read_domains, |
2658 | old_write_domain); |
2661 | old_write_domain); |
2659 | } |
2662 | } |
2660 | 2663 | ||
2661 | /** |
2664 | /** |
2662 | * Moves a single object to the GTT read, and possibly write domain. |
2665 | * Moves a single object to the GTT read, and possibly write domain. |
2663 | * |
2666 | * |
2664 | * This function returns when the move is complete, including waiting on |
2667 | * This function returns when the move is complete, including waiting on |
2665 | * flushes to occur. |
2668 | * flushes to occur. |
2666 | */ |
2669 | */ |
2667 | int |
2670 | int |
2668 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) |
2671 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) |
2669 | { |
2672 | { |
2670 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
2673 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
2671 | uint32_t old_write_domain, old_read_domains; |
2674 | uint32_t old_write_domain, old_read_domains; |
2672 | int ret; |
2675 | int ret; |
2673 | 2676 | ||
2674 | /* Not valid to be called on unbound objects. */ |
2677 | /* Not valid to be called on unbound objects. */ |
2675 | if (obj->gtt_space == NULL) |
2678 | if (obj->gtt_space == NULL) |
2676 | return -EINVAL; |
2679 | return -EINVAL; |
2677 | 2680 | ||
2678 | if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) |
2681 | if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) |
2679 | return 0; |
2682 | return 0; |
2680 | 2683 | ||
2681 | ret = i915_gem_object_wait_rendering(obj, !write); |
2684 | ret = i915_gem_object_wait_rendering(obj, !write); |
2682 | if (ret) |
2685 | if (ret) |
2683 | return ret; |
2686 | return ret; |
2684 | 2687 | ||
2685 | i915_gem_object_flush_cpu_write_domain(obj); |
2688 | i915_gem_object_flush_cpu_write_domain(obj); |
2686 | 2689 | ||
2687 | old_write_domain = obj->base.write_domain; |
2690 | old_write_domain = obj->base.write_domain; |
2688 | old_read_domains = obj->base.read_domains; |
2691 | old_read_domains = obj->base.read_domains; |
2689 | 2692 | ||
2690 | /* It should now be out of any other write domains, and we can update |
2693 | /* It should now be out of any other write domains, and we can update |
2691 | * the domain values for our changes. |
2694 | * the domain values for our changes. |
2692 | */ |
2695 | */ |
2693 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); |
2696 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); |
2694 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
2697 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
2695 | if (write) { |
2698 | if (write) { |
2696 | obj->base.read_domains = I915_GEM_DOMAIN_GTT; |
2699 | obj->base.read_domains = I915_GEM_DOMAIN_GTT; |
2697 | obj->base.write_domain = I915_GEM_DOMAIN_GTT; |
2700 | obj->base.write_domain = I915_GEM_DOMAIN_GTT; |
2698 | obj->dirty = 1; |
2701 | obj->dirty = 1; |
2699 | } |
2702 | } |
2700 | 2703 | ||
2701 | trace_i915_gem_object_change_domain(obj, |
2704 | trace_i915_gem_object_change_domain(obj, |
2702 | old_read_domains, |
2705 | old_read_domains, |
2703 | old_write_domain); |
2706 | old_write_domain); |
2704 | 2707 | ||
2705 | /* And bump the LRU for this access */ |
2708 | /* And bump the LRU for this access */ |
2706 | if (i915_gem_object_is_inactive(obj)) |
2709 | if (i915_gem_object_is_inactive(obj)) |
2707 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
2710 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
2708 | 2711 | ||
2709 | return 0; |
2712 | return 0; |
2710 | } |
2713 | } |
2711 | 2714 | ||
2712 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
2715 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
2713 | enum i915_cache_level cache_level) |
2716 | enum i915_cache_level cache_level) |
2714 | { |
2717 | { |
2715 | struct drm_device *dev = obj->base.dev; |
2718 | struct drm_device *dev = obj->base.dev; |
2716 | drm_i915_private_t *dev_priv = dev->dev_private; |
2719 | drm_i915_private_t *dev_priv = dev->dev_private; |
2717 | int ret; |
2720 | int ret; |
2718 | 2721 | ||
2719 | if (obj->cache_level == cache_level) |
2722 | if (obj->cache_level == cache_level) |
2720 | return 0; |
2723 | return 0; |
2721 | 2724 | ||
2722 | if (obj->pin_count) { |
2725 | if (obj->pin_count) { |
2723 | DRM_DEBUG("can not change the cache level of pinned objects\n"); |
2726 | DRM_DEBUG("can not change the cache level of pinned objects\n"); |
2724 | return -EBUSY; |
2727 | return -EBUSY; |
2725 | } |
2728 | } |
2726 | 2729 | ||
2727 | if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { |
2730 | if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { |
2728 | ret = i915_gem_object_unbind(obj); |
2731 | ret = i915_gem_object_unbind(obj); |
2729 | if (ret) |
2732 | if (ret) |
2730 | return ret; |
2733 | return ret; |
2731 | } |
2734 | } |
2732 | 2735 | ||
2733 | if (obj->gtt_space) { |
2736 | if (obj->gtt_space) { |
2734 | ret = i915_gem_object_finish_gpu(obj); |
2737 | ret = i915_gem_object_finish_gpu(obj); |
2735 | if (ret) |
2738 | if (ret) |
2736 | return ret; |
2739 | return ret; |
2737 | 2740 | ||
2738 | i915_gem_object_finish_gtt(obj); |
2741 | i915_gem_object_finish_gtt(obj); |
2739 | 2742 | ||
2740 | /* Before SandyBridge, you could not use tiling or fence |
2743 | /* Before SandyBridge, you could not use tiling or fence |
2741 | * registers with snooped memory, so relinquish any fences |
2744 | * registers with snooped memory, so relinquish any fences |
2742 | * currently pointing to our region in the aperture. |
2745 | * currently pointing to our region in the aperture. |
2743 | */ |
2746 | */ |
2744 | if (INTEL_INFO(dev)->gen < 6) { |
2747 | if (INTEL_INFO(dev)->gen < 6) { |
2745 | ret = i915_gem_object_put_fence(obj); |
2748 | ret = i915_gem_object_put_fence(obj); |
2746 | if (ret) |
2749 | if (ret) |
2747 | return ret; |
2750 | return ret; |
2748 | } |
2751 | } |
2749 | 2752 | ||
2750 | if (obj->has_global_gtt_mapping) |
2753 | if (obj->has_global_gtt_mapping) |
2751 | i915_gem_gtt_bind_object(obj, cache_level); |
2754 | i915_gem_gtt_bind_object(obj, cache_level); |
2752 | if (obj->has_aliasing_ppgtt_mapping) |
2755 | if (obj->has_aliasing_ppgtt_mapping) |
2753 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, |
2756 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, |
2754 | obj, cache_level); |
2757 | obj, cache_level); |
2755 | 2758 | ||
2756 | obj->gtt_space->color = cache_level; |
2759 | obj->gtt_space->color = cache_level; |
2757 | } |
2760 | } |
2758 | 2761 | ||
2759 | if (cache_level == I915_CACHE_NONE) { |
2762 | if (cache_level == I915_CACHE_NONE) { |
2760 | u32 old_read_domains, old_write_domain; |
2763 | u32 old_read_domains, old_write_domain; |
2761 | 2764 | ||
2762 | /* If we're coming from LLC cached, then we haven't |
2765 | /* If we're coming from LLC cached, then we haven't |
2763 | * actually been tracking whether the data is in the |
2766 | * actually been tracking whether the data is in the |
2764 | * CPU cache or not, since we only allow one bit set |
2767 | * CPU cache or not, since we only allow one bit set |
2765 | * in obj->write_domain and have been skipping the clflushes. |
2768 | * in obj->write_domain and have been skipping the clflushes. |
2766 | * Just set it to the CPU cache for now. |
2769 | * Just set it to the CPU cache for now. |
2767 | */ |
2770 | */ |
2768 | WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); |
2771 | WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); |
2769 | WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU); |
2772 | WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU); |
2770 | 2773 | ||
2771 | old_read_domains = obj->base.read_domains; |
2774 | old_read_domains = obj->base.read_domains; |
2772 | old_write_domain = obj->base.write_domain; |
2775 | old_write_domain = obj->base.write_domain; |
2773 | 2776 | ||
2774 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
2777 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
2775 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
2778 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
2776 | 2779 | ||
2777 | trace_i915_gem_object_change_domain(obj, |
2780 | trace_i915_gem_object_change_domain(obj, |
2778 | old_read_domains, |
2781 | old_read_domains, |
2779 | old_write_domain); |
2782 | old_write_domain); |
2780 | } |
2783 | } |
2781 | 2784 | ||
2782 | obj->cache_level = cache_level; |
2785 | obj->cache_level = cache_level; |
2783 | i915_gem_verify_gtt(dev); |
2786 | i915_gem_verify_gtt(dev); |
2784 | return 0; |
2787 | return 0; |
2785 | } |
2788 | } |
2786 | 2789 | ||
2787 | /* |
2790 | /* |
2788 | * Prepare buffer for display plane (scanout, cursors, etc). |
2791 | * Prepare buffer for display plane (scanout, cursors, etc). |
2789 | * Can be called from an uninterruptible phase (modesetting) and allows |
2792 | * Can be called from an uninterruptible phase (modesetting) and allows |
2790 | * any flushes to be pipelined (for pageflips). |
2793 | * any flushes to be pipelined (for pageflips). |
2791 | */ |
2794 | */ |
2792 | int |
2795 | int |
2793 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
2796 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
2794 | u32 alignment, |
2797 | u32 alignment, |
2795 | struct intel_ring_buffer *pipelined) |
2798 | struct intel_ring_buffer *pipelined) |
2796 | { |
2799 | { |
2797 | u32 old_read_domains, old_write_domain; |
2800 | u32 old_read_domains, old_write_domain; |
2798 | int ret; |
2801 | int ret; |
2799 | 2802 | ||
2800 | if (pipelined != obj->ring) { |
2803 | if (pipelined != obj->ring) { |
2801 | ret = i915_gem_object_sync(obj, pipelined); |
2804 | ret = i915_gem_object_sync(obj, pipelined); |
2802 | if (ret) |
2805 | if (ret) |
2803 | return ret; |
2806 | return ret; |
2804 | } |
2807 | } |
2805 | 2808 | ||
2806 | /* The display engine is not coherent with the LLC cache on gen6. As |
2809 | /* The display engine is not coherent with the LLC cache on gen6. As |
2807 | * a result, we make sure that the pinning that is about to occur is |
2810 | * a result, we make sure that the pinning that is about to occur is |
2808 | * done with uncached PTEs. This is lowest common denominator for all |
2811 | * done with uncached PTEs. This is lowest common denominator for all |
2809 | * chipsets. |
2812 | * chipsets. |
2810 | * |
2813 | * |
2811 | * However for gen6+, we could do better by using the GFDT bit instead |
2814 | * However for gen6+, we could do better by using the GFDT bit instead |
2812 | * of uncaching, which would allow us to flush all the LLC-cached data |
2815 | * of uncaching, which would allow us to flush all the LLC-cached data |
2813 | * with that bit in the PTE to main memory with just one PIPE_CONTROL. |
2816 | * with that bit in the PTE to main memory with just one PIPE_CONTROL. |
2814 | */ |
2817 | */ |
2815 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); |
2818 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); |
2816 | if (ret) |
2819 | if (ret) |
2817 | return ret; |
2820 | return ret; |
2818 | 2821 | ||
2819 | /* As the user may map the buffer once pinned in the display plane |
2822 | /* As the user may map the buffer once pinned in the display plane |
2820 | * (e.g. libkms for the bootup splash), we have to ensure that we |
2823 | * (e.g. libkms for the bootup splash), we have to ensure that we |
2821 | * always use map_and_fenceable for all scanout buffers. |
2824 | * always use map_and_fenceable for all scanout buffers. |
2822 | */ |
2825 | */ |
2823 | ret = i915_gem_object_pin(obj, alignment, true, false); |
2826 | ret = i915_gem_object_pin(obj, alignment, true, false); |
2824 | if (ret) |
2827 | if (ret) |
2825 | return ret; |
2828 | return ret; |
2826 | 2829 | ||
2827 | i915_gem_object_flush_cpu_write_domain(obj); |
2830 | i915_gem_object_flush_cpu_write_domain(obj); |
2828 | 2831 | ||
2829 | old_write_domain = obj->base.write_domain; |
2832 | old_write_domain = obj->base.write_domain; |
2830 | old_read_domains = obj->base.read_domains; |
2833 | old_read_domains = obj->base.read_domains; |
2831 | 2834 | ||
2832 | /* It should now be out of any other write domains, and we can update |
2835 | /* It should now be out of any other write domains, and we can update |
2833 | * the domain values for our changes. |
2836 | * the domain values for our changes. |
2834 | */ |
2837 | */ |
2835 | obj->base.write_domain = 0; |
2838 | obj->base.write_domain = 0; |
2836 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
2839 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
2837 | 2840 | ||
2838 | trace_i915_gem_object_change_domain(obj, |
2841 | trace_i915_gem_object_change_domain(obj, |
2839 | old_read_domains, |
2842 | old_read_domains, |
2840 | old_write_domain); |
2843 | old_write_domain); |
2841 | 2844 | ||
2842 | return 0; |
2845 | return 0; |
2843 | } |
2846 | } |
2844 | 2847 | ||
2845 | int |
2848 | int |
2846 | i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) |
2849 | i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) |
2847 | { |
2850 | { |
2848 | int ret; |
2851 | int ret; |
2849 | 2852 | ||
2850 | if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) |
2853 | if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) |
2851 | return 0; |
2854 | return 0; |
2852 | 2855 | ||
2853 | ret = i915_gem_object_wait_rendering(obj, false); |
2856 | ret = i915_gem_object_wait_rendering(obj, false); |
2854 | if (ret) |
2857 | if (ret) |
2855 | return ret; |
2858 | return ret; |
2856 | 2859 | ||
2857 | /* Ensure that we invalidate the GPU's caches and TLBs. */ |
2860 | /* Ensure that we invalidate the GPU's caches and TLBs. */ |
2858 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
2861 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
2859 | return 0; |
2862 | return 0; |
2860 | } |
2863 | } |
2861 | 2864 | ||
2862 | /** |
2865 | /** |
2863 | * Moves a single object to the CPU read, and possibly write domain. |
2866 | * Moves a single object to the CPU read, and possibly write domain. |
2864 | * |
2867 | * |
2865 | * This function returns when the move is complete, including waiting on |
2868 | * This function returns when the move is complete, including waiting on |
2866 | * flushes to occur. |
2869 | * flushes to occur. |
2867 | */ |
2870 | */ |
2868 | int |
2871 | int |
2869 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) |
2872 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) |
2870 | { |
2873 | { |
2871 | uint32_t old_write_domain, old_read_domains; |
2874 | uint32_t old_write_domain, old_read_domains; |
2872 | int ret; |
2875 | int ret; |
2873 | 2876 | ||
2874 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) |
2877 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) |
2875 | return 0; |
2878 | return 0; |
2876 | 2879 | ||
2877 | ret = i915_gem_object_wait_rendering(obj, !write); |
2880 | ret = i915_gem_object_wait_rendering(obj, !write); |
2878 | if (ret) |
2881 | if (ret) |
2879 | return ret; |
2882 | return ret; |
2880 | 2883 | ||
2881 | i915_gem_object_flush_gtt_write_domain(obj); |
2884 | i915_gem_object_flush_gtt_write_domain(obj); |
2882 | 2885 | ||
2883 | old_write_domain = obj->base.write_domain; |
2886 | old_write_domain = obj->base.write_domain; |
2884 | old_read_domains = obj->base.read_domains; |
2887 | old_read_domains = obj->base.read_domains; |
2885 | 2888 | ||
2886 | /* Flush the CPU cache if it's still invalid. */ |
2889 | /* Flush the CPU cache if it's still invalid. */ |
2887 | if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { |
2890 | if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { |
2888 | i915_gem_clflush_object(obj); |
2891 | i915_gem_clflush_object(obj); |
2889 | 2892 | ||
2890 | obj->base.read_domains |= I915_GEM_DOMAIN_CPU; |
2893 | obj->base.read_domains |= I915_GEM_DOMAIN_CPU; |
2891 | } |
2894 | } |
2892 | 2895 | ||
2893 | /* It should now be out of any other write domains, and we can update |
2896 | /* It should now be out of any other write domains, and we can update |
2894 | * the domain values for our changes. |
2897 | * the domain values for our changes. |
2895 | */ |
2898 | */ |
2896 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
2899 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
2897 | 2900 | ||
2898 | /* If we're writing through the CPU, then the GPU read domains will |
2901 | /* If we're writing through the CPU, then the GPU read domains will |
2899 | * need to be invalidated at next use. |
2902 | * need to be invalidated at next use. |
2900 | */ |
2903 | */ |
2901 | if (write) { |
2904 | if (write) { |
2902 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
2905 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
2903 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
2906 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
2904 | } |
2907 | } |
2905 | 2908 | ||
2906 | trace_i915_gem_object_change_domain(obj, |
2909 | trace_i915_gem_object_change_domain(obj, |
2907 | old_read_domains, |
2910 | old_read_domains, |
2908 | old_write_domain); |
2911 | old_write_domain); |
2909 | 2912 | ||
2910 | return 0; |
2913 | return 0; |
2911 | } |
2914 | } |
2912 | 2915 | ||
2913 | #if 0 |
2916 | #if 0 |
2914 | /* Throttle our rendering by waiting until the ring has completed our requests |
2917 | /* Throttle our rendering by waiting until the ring has completed our requests |
2915 | * emitted over 20 msec ago. |
2918 | * emitted over 20 msec ago. |
2916 | * |
2919 | * |
2917 | * Note that if we were to use the current jiffies each time around the loop, |
2920 | * Note that if we were to use the current jiffies each time around the loop, |
2918 | * we wouldn't escape the function with any frames outstanding if the time to |
2921 | * we wouldn't escape the function with any frames outstanding if the time to |
2919 | * render a frame was over 20ms. |
2922 | * render a frame was over 20ms. |
2920 | * |
2923 | * |
2921 | * This should get us reasonable parallelism between CPU and GPU but also |
2924 | * This should get us reasonable parallelism between CPU and GPU but also |
2922 | * relatively low latency when blocking on a particular request to finish. |
2925 | * relatively low latency when blocking on a particular request to finish. |
2923 | */ |
2926 | */ |
2924 | static int |
2927 | static int |
2925 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) |
2928 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) |
2926 | { |
2929 | { |
2927 | struct drm_i915_private *dev_priv = dev->dev_private; |
2930 | struct drm_i915_private *dev_priv = dev->dev_private; |
2928 | struct drm_i915_file_private *file_priv = file->driver_priv; |
2931 | struct drm_i915_file_private *file_priv = file->driver_priv; |
2929 | unsigned long recent_enough = GetTimerTics() - msecs_to_jiffies(20); |
2932 | unsigned long recent_enough = GetTimerTics() - msecs_to_jiffies(20); |
2930 | struct drm_i915_gem_request *request; |
2933 | struct drm_i915_gem_request *request; |
2931 | struct intel_ring_buffer *ring = NULL; |
2934 | struct intel_ring_buffer *ring = NULL; |
2932 | u32 seqno = 0; |
2935 | u32 seqno = 0; |
2933 | int ret; |
2936 | int ret; |
2934 | 2937 | ||
2935 | if (atomic_read(&dev_priv->mm.wedged)) |
2938 | if (atomic_read(&dev_priv->mm.wedged)) |
2936 | return -EIO; |
2939 | return -EIO; |
2937 | 2940 | ||
2938 | spin_lock(&file_priv->mm.lock); |
2941 | spin_lock(&file_priv->mm.lock); |
2939 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { |
2942 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { |
2940 | if (time_after_eq(request->emitted_jiffies, recent_enough)) |
2943 | if (time_after_eq(request->emitted_jiffies, recent_enough)) |
2941 | break; |
2944 | break; |
2942 | 2945 | ||
2943 | ring = request->ring; |
2946 | ring = request->ring; |
2944 | seqno = request->seqno; |
2947 | seqno = request->seqno; |
2945 | } |
2948 | } |
2946 | spin_unlock(&file_priv->mm.lock); |
2949 | spin_unlock(&file_priv->mm.lock); |
2947 | 2950 | ||
2948 | if (seqno == 0) |
2951 | if (seqno == 0) |
2949 | return 0; |
2952 | return 0; |
2950 | 2953 | ||
2951 | ret = __wait_seqno(ring, seqno, true, NULL); |
2954 | ret = __wait_seqno(ring, seqno, true, NULL); |
2952 | if (ret == 0) |
2955 | if (ret == 0) |
2953 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
2956 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
2954 | 2957 | ||
2955 | return ret; |
2958 | return ret; |
2956 | } |
2959 | } |
2957 | #endif |
2960 | #endif |
2958 | 2961 | ||
2959 | int |
2962 | int |
2960 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
2963 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
2961 | uint32_t alignment, |
2964 | uint32_t alignment, |
2962 | bool map_and_fenceable, |
2965 | bool map_and_fenceable, |
2963 | bool nonblocking) |
2966 | bool nonblocking) |
2964 | { |
2967 | { |
2965 | int ret; |
2968 | int ret; |
2966 | 2969 | ||
2967 | if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) |
2970 | if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) |
2968 | return -EBUSY; |
2971 | return -EBUSY; |
2969 | 2972 | ||
2970 | #if 0 |
2973 | #if 0 |
2971 | if (obj->gtt_space != NULL) { |
2974 | if (obj->gtt_space != NULL) { |
2972 | if ((alignment && obj->gtt_offset & (alignment - 1)) || |
2975 | if ((alignment && obj->gtt_offset & (alignment - 1)) || |
2973 | (map_and_fenceable && !obj->map_and_fenceable)) { |
2976 | (map_and_fenceable && !obj->map_and_fenceable)) { |
2974 | WARN(obj->pin_count, |
2977 | WARN(obj->pin_count, |
2975 | "bo is already pinned with incorrect alignment:" |
2978 | "bo is already pinned with incorrect alignment:" |
2976 | " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," |
2979 | " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," |
2977 | " obj->map_and_fenceable=%d\n", |
2980 | " obj->map_and_fenceable=%d\n", |
2978 | obj->gtt_offset, alignment, |
2981 | obj->gtt_offset, alignment, |
2979 | map_and_fenceable, |
2982 | map_and_fenceable, |
2980 | obj->map_and_fenceable); |
2983 | obj->map_and_fenceable); |
2981 | ret = i915_gem_object_unbind(obj); |
2984 | ret = i915_gem_object_unbind(obj); |
2982 | if (ret) |
2985 | if (ret) |
2983 | return ret; |
2986 | return ret; |
2984 | } |
2987 | } |
2985 | } |
2988 | } |
2986 | #endif |
2989 | #endif |
2987 | 2990 | ||
2988 | if (obj->gtt_space == NULL) { |
2991 | if (obj->gtt_space == NULL) { |
2989 | ret = i915_gem_object_bind_to_gtt(obj, alignment, |
2992 | ret = i915_gem_object_bind_to_gtt(obj, alignment, |
2990 | map_and_fenceable, |
2993 | map_and_fenceable, |
2991 | nonblocking); |
2994 | nonblocking); |
2992 | if (ret) |
2995 | if (ret) |
2993 | return ret; |
2996 | return ret; |
2994 | } |
2997 | } |
2995 | 2998 | ||
2996 | if (!obj->has_global_gtt_mapping && map_and_fenceable) |
2999 | if (!obj->has_global_gtt_mapping && map_and_fenceable) |
2997 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
3000 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
2998 | 3001 | ||
2999 | obj->pin_count++; |
3002 | obj->pin_count++; |
3000 | obj->pin_mappable |= map_and_fenceable; |
3003 | obj->pin_mappable |= map_and_fenceable; |
3001 | 3004 | ||
3002 | return 0; |
3005 | return 0; |
3003 | } |
3006 | } |
3004 | 3007 | ||
3005 | void |
3008 | void |
3006 | i915_gem_object_unpin(struct drm_i915_gem_object *obj) |
3009 | i915_gem_object_unpin(struct drm_i915_gem_object *obj) |
3007 | { |
3010 | { |
3008 | BUG_ON(obj->pin_count == 0); |
3011 | BUG_ON(obj->pin_count == 0); |
3009 | BUG_ON(obj->gtt_space == NULL); |
3012 | BUG_ON(obj->gtt_space == NULL); |
3010 | 3013 | ||
3011 | if (--obj->pin_count == 0) |
3014 | if (--obj->pin_count == 0) |
3012 | obj->pin_mappable = false; |
3015 | obj->pin_mappable = false; |
3013 | } |
3016 | } |
3014 | 3017 | ||
3015 | #if 0 |
3018 | #if 0 |
3016 | int |
3019 | int |
3017 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
3020 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
3018 | struct drm_file *file) |
3021 | struct drm_file *file) |
3019 | { |
3022 | { |
3020 | struct drm_i915_gem_pin *args = data; |
3023 | struct drm_i915_gem_pin *args = data; |
3021 | struct drm_i915_gem_object *obj; |
3024 | struct drm_i915_gem_object *obj; |
3022 | int ret; |
3025 | int ret; |
3023 | 3026 | ||
3024 | ret = i915_mutex_lock_interruptible(dev); |
3027 | ret = i915_mutex_lock_interruptible(dev); |
3025 | if (ret) |
3028 | if (ret) |
3026 | return ret; |
3029 | return ret; |
3027 | 3030 | ||
3028 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
3031 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
3029 | if (&obj->base == NULL) { |
3032 | if (&obj->base == NULL) { |
3030 | ret = -ENOENT; |
3033 | ret = -ENOENT; |
3031 | goto unlock; |
3034 | goto unlock; |
3032 | } |
3035 | } |
3033 | 3036 | ||
3034 | if (obj->madv != I915_MADV_WILLNEED) { |
3037 | if (obj->madv != I915_MADV_WILLNEED) { |
3035 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
3038 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
3036 | ret = -EINVAL; |
3039 | ret = -EINVAL; |
3037 | goto out; |
3040 | goto out; |
3038 | } |
3041 | } |
3039 | 3042 | ||
3040 | if (obj->pin_filp != NULL && obj->pin_filp != file) { |
3043 | if (obj->pin_filp != NULL && obj->pin_filp != file) { |
3041 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
3044 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
3042 | args->handle); |
3045 | args->handle); |
3043 | ret = -EINVAL; |
3046 | ret = -EINVAL; |
3044 | goto out; |
3047 | goto out; |
3045 | } |
3048 | } |
3046 | 3049 | ||
3047 | obj->user_pin_count++; |
3050 | obj->user_pin_count++; |
3048 | obj->pin_filp = file; |
3051 | obj->pin_filp = file; |
3049 | if (obj->user_pin_count == 1) { |
3052 | if (obj->user_pin_count == 1) { |
3050 | ret = i915_gem_object_pin(obj, args->alignment, true, false); |
3053 | ret = i915_gem_object_pin(obj, args->alignment, true, false); |
3051 | if (ret) |
3054 | if (ret) |
3052 | goto out; |
3055 | goto out; |
3053 | } |
3056 | } |
3054 | 3057 | ||
3055 | /* XXX - flush the CPU caches for pinned objects |
3058 | /* XXX - flush the CPU caches for pinned objects |
3056 | * as the X server doesn't manage domains yet |
3059 | * as the X server doesn't manage domains yet |
3057 | */ |
3060 | */ |
3058 | i915_gem_object_flush_cpu_write_domain(obj); |
3061 | i915_gem_object_flush_cpu_write_domain(obj); |
3059 | args->offset = obj->gtt_offset; |
3062 | args->offset = obj->gtt_offset; |
3060 | out: |
3063 | out: |
3061 | drm_gem_object_unreference(&obj->base); |
3064 | drm_gem_object_unreference(&obj->base); |
3062 | unlock: |
3065 | unlock: |
3063 | mutex_unlock(&dev->struct_mutex); |
3066 | mutex_unlock(&dev->struct_mutex); |
3064 | return ret; |
3067 | return ret; |
3065 | } |
3068 | } |
3066 | 3069 | ||
3067 | int |
3070 | int |
3068 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
3071 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
3069 | struct drm_file *file) |
3072 | struct drm_file *file) |
3070 | { |
3073 | { |
3071 | struct drm_i915_gem_pin *args = data; |
3074 | struct drm_i915_gem_pin *args = data; |
3072 | struct drm_i915_gem_object *obj; |
3075 | struct drm_i915_gem_object *obj; |
3073 | int ret; |
3076 | int ret; |
3074 | 3077 | ||
3075 | ret = i915_mutex_lock_interruptible(dev); |
3078 | ret = i915_mutex_lock_interruptible(dev); |
3076 | if (ret) |
3079 | if (ret) |
3077 | return ret; |
3080 | return ret; |
3078 | 3081 | ||
3079 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
3082 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
3080 | if (&obj->base == NULL) { |
3083 | if (&obj->base == NULL) { |
3081 | ret = -ENOENT; |
3084 | ret = -ENOENT; |
3082 | goto unlock; |
3085 | goto unlock; |
3083 | } |
3086 | } |
3084 | 3087 | ||
3085 | if (obj->pin_filp != file) { |
3088 | if (obj->pin_filp != file) { |
3086 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
3089 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
3087 | args->handle); |
3090 | args->handle); |
3088 | ret = -EINVAL; |
3091 | ret = -EINVAL; |
3089 | goto out; |
3092 | goto out; |
3090 | } |
3093 | } |
3091 | obj->user_pin_count--; |
3094 | obj->user_pin_count--; |
3092 | if (obj->user_pin_count == 0) { |
3095 | if (obj->user_pin_count == 0) { |
3093 | obj->pin_filp = NULL; |
3096 | obj->pin_filp = NULL; |
3094 | i915_gem_object_unpin(obj); |
3097 | i915_gem_object_unpin(obj); |
3095 | } |
3098 | } |
3096 | 3099 | ||
3097 | out: |
3100 | out: |
3098 | drm_gem_object_unreference(&obj->base); |
3101 | drm_gem_object_unreference(&obj->base); |
3099 | unlock: |
3102 | unlock: |
3100 | mutex_unlock(&dev->struct_mutex); |
3103 | mutex_unlock(&dev->struct_mutex); |
3101 | return ret; |
3104 | return ret; |
3102 | } |
3105 | } |
3103 | 3106 | ||
3104 | int |
3107 | int |
3105 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
3108 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
3106 | struct drm_file *file) |
3109 | struct drm_file *file) |
3107 | { |
3110 | { |
3108 | struct drm_i915_gem_busy *args = data; |
3111 | struct drm_i915_gem_busy *args = data; |
3109 | struct drm_i915_gem_object *obj; |
3112 | struct drm_i915_gem_object *obj; |
3110 | int ret; |
3113 | int ret; |
3111 | 3114 | ||
3112 | ret = i915_mutex_lock_interruptible(dev); |
3115 | ret = i915_mutex_lock_interruptible(dev); |
3113 | if (ret) |
3116 | if (ret) |
3114 | return ret; |
3117 | return ret; |
3115 | 3118 | ||
3116 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
3119 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
3117 | if (&obj->base == NULL) { |
3120 | if (&obj->base == NULL) { |
3118 | ret = -ENOENT; |
3121 | ret = -ENOENT; |
3119 | goto unlock; |
3122 | goto unlock; |
3120 | } |
3123 | } |
3121 | 3124 | ||
3122 | /* Count all active objects as busy, even if they are currently not used |
3125 | /* Count all active objects as busy, even if they are currently not used |
3123 | * by the gpu. Users of this interface expect objects to eventually |
3126 | * by the gpu. Users of this interface expect objects to eventually |
3124 | * become non-busy without any further actions, therefore emit any |
3127 | * become non-busy without any further actions, therefore emit any |
3125 | * necessary flushes here. |
3128 | * necessary flushes here. |
3126 | */ |
3129 | */ |
3127 | ret = i915_gem_object_flush_active(obj); |
3130 | ret = i915_gem_object_flush_active(obj); |
3128 | 3131 | ||
3129 | args->busy = obj->active; |
3132 | args->busy = obj->active; |
3130 | if (obj->ring) { |
3133 | if (obj->ring) { |
3131 | BUILD_BUG_ON(I915_NUM_RINGS > 16); |
3134 | BUILD_BUG_ON(I915_NUM_RINGS > 16); |
3132 | args->busy |= intel_ring_flag(obj->ring) << 16; |
3135 | args->busy |= intel_ring_flag(obj->ring) << 16; |
3133 | } |
3136 | } |
3134 | 3137 | ||
3135 | drm_gem_object_unreference(&obj->base); |
3138 | drm_gem_object_unreference(&obj->base); |
3136 | unlock: |
3139 | unlock: |
3137 | mutex_unlock(&dev->struct_mutex); |
3140 | mutex_unlock(&dev->struct_mutex); |
3138 | return ret; |
3141 | return ret; |
3139 | } |
3142 | } |
3140 | 3143 | ||
3141 | int |
3144 | int |
3142 | i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
3145 | i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
3143 | struct drm_file *file_priv) |
3146 | struct drm_file *file_priv) |
3144 | { |
3147 | { |
3145 | return i915_gem_ring_throttle(dev, file_priv); |
3148 | return i915_gem_ring_throttle(dev, file_priv); |
3146 | } |
3149 | } |
3147 | 3150 | ||
3148 | int |
3151 | int |
3149 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
3152 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
3150 | struct drm_file *file_priv) |
3153 | struct drm_file *file_priv) |
3151 | { |
3154 | { |
3152 | struct drm_i915_gem_madvise *args = data; |
3155 | struct drm_i915_gem_madvise *args = data; |
3153 | struct drm_i915_gem_object *obj; |
3156 | struct drm_i915_gem_object *obj; |
3154 | int ret; |
3157 | int ret; |
3155 | 3158 | ||
3156 | switch (args->madv) { |
3159 | switch (args->madv) { |
3157 | case I915_MADV_DONTNEED: |
3160 | case I915_MADV_DONTNEED: |
3158 | case I915_MADV_WILLNEED: |
3161 | case I915_MADV_WILLNEED: |
3159 | break; |
3162 | break; |
3160 | default: |
3163 | default: |
3161 | return -EINVAL; |
3164 | return -EINVAL; |
3162 | } |
3165 | } |
3163 | 3166 | ||
3164 | ret = i915_mutex_lock_interruptible(dev); |
3167 | ret = i915_mutex_lock_interruptible(dev); |
3165 | if (ret) |
3168 | if (ret) |
3166 | return ret; |
3169 | return ret; |
3167 | 3170 | ||
3168 | obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); |
3171 | obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); |
3169 | if (&obj->base == NULL) { |
3172 | if (&obj->base == NULL) { |
3170 | ret = -ENOENT; |
3173 | ret = -ENOENT; |
3171 | goto unlock; |
3174 | goto unlock; |
3172 | } |
3175 | } |
3173 | 3176 | ||
3174 | if (obj->pin_count) { |
3177 | if (obj->pin_count) { |
3175 | ret = -EINVAL; |
3178 | ret = -EINVAL; |
3176 | goto out; |
3179 | goto out; |
3177 | } |
3180 | } |
3178 | 3181 | ||
3179 | if (obj->madv != __I915_MADV_PURGED) |
3182 | if (obj->madv != __I915_MADV_PURGED) |
3180 | obj->madv = args->madv; |
3183 | obj->madv = args->madv; |
3181 | 3184 | ||
3182 | /* if the object is no longer attached, discard its backing storage */ |
3185 | /* if the object is no longer attached, discard its backing storage */ |
3183 | if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) |
3186 | if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) |
3184 | i915_gem_object_truncate(obj); |
3187 | i915_gem_object_truncate(obj); |
3185 | 3188 | ||
3186 | args->retained = obj->madv != __I915_MADV_PURGED; |
3189 | args->retained = obj->madv != __I915_MADV_PURGED; |
3187 | 3190 | ||
3188 | out: |
3191 | out: |
3189 | drm_gem_object_unreference(&obj->base); |
3192 | drm_gem_object_unreference(&obj->base); |
3190 | unlock: |
3193 | unlock: |
3191 | mutex_unlock(&dev->struct_mutex); |
3194 | mutex_unlock(&dev->struct_mutex); |
3192 | return ret; |
3195 | return ret; |
3193 | } |
3196 | } |
3194 | #endif |
3197 | #endif |
3195 | 3198 | ||
3196 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
3199 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
3197 | const struct drm_i915_gem_object_ops *ops) |
3200 | const struct drm_i915_gem_object_ops *ops) |
3198 | { |
3201 | { |
3199 | INIT_LIST_HEAD(&obj->mm_list); |
3202 | INIT_LIST_HEAD(&obj->mm_list); |
3200 | INIT_LIST_HEAD(&obj->gtt_list); |
3203 | INIT_LIST_HEAD(&obj->gtt_list); |
3201 | INIT_LIST_HEAD(&obj->ring_list); |
3204 | INIT_LIST_HEAD(&obj->ring_list); |
3202 | INIT_LIST_HEAD(&obj->exec_list); |
3205 | INIT_LIST_HEAD(&obj->exec_list); |
3203 | 3206 | ||
3204 | obj->ops = ops; |
3207 | obj->ops = ops; |
3205 | 3208 | ||
3206 | obj->fence_reg = I915_FENCE_REG_NONE; |
3209 | obj->fence_reg = I915_FENCE_REG_NONE; |
3207 | obj->madv = I915_MADV_WILLNEED; |
3210 | obj->madv = I915_MADV_WILLNEED; |
3208 | /* Avoid an unnecessary call to unbind on the first bind. */ |
3211 | /* Avoid an unnecessary call to unbind on the first bind. */ |
3209 | obj->map_and_fenceable = true; |
3212 | obj->map_and_fenceable = true; |
3210 | 3213 | ||
3211 | i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); |
3214 | i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); |
3212 | } |
3215 | } |
3213 | 3216 | ||
3214 | static const struct drm_i915_gem_object_ops i915_gem_object_ops = { |
3217 | static const struct drm_i915_gem_object_ops i915_gem_object_ops = { |
3215 | .get_pages = i915_gem_object_get_pages_gtt, |
3218 | .get_pages = i915_gem_object_get_pages_gtt, |
3216 | .put_pages = i915_gem_object_put_pages_gtt, |
3219 | .put_pages = i915_gem_object_put_pages_gtt, |
3217 | }; |
3220 | }; |
3218 | 3221 | ||
3219 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
3222 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
3220 | size_t size) |
3223 | size_t size) |
3221 | { |
3224 | { |
3222 | struct drm_i915_gem_object *obj; |
3225 | struct drm_i915_gem_object *obj; |
3223 | struct address_space *mapping; |
3226 | struct address_space *mapping; |
3224 | u32 mask; |
3227 | u32 mask; |
3225 | 3228 | ||
3226 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
3229 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
3227 | if (obj == NULL) |
3230 | if (obj == NULL) |
3228 | return NULL; |
3231 | return NULL; |
3229 | 3232 | ||
3230 | if (drm_gem_object_init(dev, &obj->base, size) != 0) { |
3233 | if (drm_gem_object_init(dev, &obj->base, size) != 0) { |
3231 | kfree(obj); |
3234 | kfree(obj); |
3232 | return NULL; |
3235 | return NULL; |
3233 | } |
3236 | } |
3234 | 3237 | ||
3235 | 3238 | ||
3236 | i915_gem_object_init(obj, &i915_gem_object_ops); |
3239 | i915_gem_object_init(obj, &i915_gem_object_ops); |
3237 | 3240 | ||
3238 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
3241 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
3239 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
3242 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
3240 | 3243 | ||
3241 | if (HAS_LLC(dev)) { |
3244 | if (HAS_LLC(dev)) { |
3242 | /* On some devices, we can have the GPU use the LLC (the CPU |
3245 | /* On some devices, we can have the GPU use the LLC (the CPU |
3243 | * cache) for about a 10% performance improvement |
3246 | * cache) for about a 10% performance improvement |
3244 | * compared to uncached. Graphics requests other than |
3247 | * compared to uncached. Graphics requests other than |
3245 | * display scanout are coherent with the CPU in |
3248 | * display scanout are coherent with the CPU in |
3246 | * accessing this cache. This means in this mode we |
3249 | * accessing this cache. This means in this mode we |
3247 | * don't need to clflush on the CPU side, and on the |
3250 | * don't need to clflush on the CPU side, and on the |
3248 | * GPU side we only need to flush internal caches to |
3251 | * GPU side we only need to flush internal caches to |
3249 | * get data visible to the CPU. |
3252 | * get data visible to the CPU. |
3250 | * |
3253 | * |
3251 | * However, we maintain the display planes as UC, and so |
3254 | * However, we maintain the display planes as UC, and so |
3252 | * need to rebind when first used as such. |
3255 | * need to rebind when first used as such. |
3253 | */ |
3256 | */ |
3254 | obj->cache_level = I915_CACHE_LLC; |
3257 | obj->cache_level = I915_CACHE_LLC; |
3255 | } else |
3258 | } else |
3256 | obj->cache_level = I915_CACHE_NONE; |
3259 | obj->cache_level = I915_CACHE_NONE; |
3257 | 3260 | ||
3258 | return obj; |
3261 | return obj; |
3259 | } |
3262 | } |
3260 | 3263 | ||
3261 | int i915_gem_init_object(struct drm_gem_object *obj) |
3264 | int i915_gem_init_object(struct drm_gem_object *obj) |
3262 | { |
3265 | { |
3263 | BUG(); |
3266 | BUG(); |
3264 | 3267 | ||
3265 | return 0; |
3268 | return 0; |
3266 | } |
3269 | } |
3267 | 3270 | ||
3268 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
3271 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
3269 | { |
3272 | { |
3270 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
3273 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
3271 | struct drm_device *dev = obj->base.dev; |
3274 | struct drm_device *dev = obj->base.dev; |
3272 | drm_i915_private_t *dev_priv = dev->dev_private; |
3275 | drm_i915_private_t *dev_priv = dev->dev_private; |
3273 | 3276 | ||
3274 | trace_i915_gem_object_destroy(obj); |
3277 | trace_i915_gem_object_destroy(obj); |
3275 | 3278 | ||
3276 | // if (obj->phys_obj) |
3279 | // if (obj->phys_obj) |
3277 | // i915_gem_detach_phys_object(dev, obj); |
3280 | // i915_gem_detach_phys_object(dev, obj); |
3278 | 3281 | ||
3279 | obj->pin_count = 0; |
3282 | obj->pin_count = 0; |
3280 | if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) { |
3283 | if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) { |
3281 | bool was_interruptible; |
3284 | bool was_interruptible; |
3282 | 3285 | ||
3283 | was_interruptible = dev_priv->mm.interruptible; |
3286 | was_interruptible = dev_priv->mm.interruptible; |
3284 | dev_priv->mm.interruptible = false; |
3287 | dev_priv->mm.interruptible = false; |
3285 | 3288 | ||
3286 | WARN_ON(i915_gem_object_unbind(obj)); |
3289 | WARN_ON(i915_gem_object_unbind(obj)); |
3287 | 3290 | ||
3288 | dev_priv->mm.interruptible = was_interruptible; |
3291 | dev_priv->mm.interruptible = was_interruptible; |
3289 | } |
3292 | } |
3290 | 3293 | ||
3291 | obj->pages_pin_count = 0; |
3294 | obj->pages_pin_count = 0; |
3292 | i915_gem_object_put_pages(obj); |
3295 | i915_gem_object_put_pages(obj); |
3293 | // i915_gem_object_free_mmap_offset(obj); |
3296 | // i915_gem_object_free_mmap_offset(obj); |
3294 | 3297 | ||
3295 | BUG_ON(obj->pages.page); |
3298 | BUG_ON(obj->pages.page); |
3296 | 3299 | ||
3297 | // if (obj->base.import_attach) |
3300 | // if (obj->base.import_attach) |
3298 | // drm_prime_gem_destroy(&obj->base, NULL); |
3301 | // drm_prime_gem_destroy(&obj->base, NULL); |
3299 | 3302 | ||
3300 | drm_gem_object_release(&obj->base); |
3303 | drm_gem_object_release(&obj->base); |
3301 | i915_gem_info_remove_obj(dev_priv, obj->base.size); |
3304 | i915_gem_info_remove_obj(dev_priv, obj->base.size); |
3302 | 3305 | ||
3303 | kfree(obj->bit_17); |
3306 | kfree(obj->bit_17); |
3304 | kfree(obj); |
3307 | kfree(obj); |
3305 | } |
3308 | } |
3306 | 3309 | ||
3307 | #if 0 |
3310 | #if 0 |
3308 | int |
3311 | int |
3309 | i915_gem_idle(struct drm_device *dev) |
3312 | i915_gem_idle(struct drm_device *dev) |
3310 | { |
3313 | { |
3311 | drm_i915_private_t *dev_priv = dev->dev_private; |
3314 | drm_i915_private_t *dev_priv = dev->dev_private; |
3312 | int ret; |
3315 | int ret; |
3313 | 3316 | ||
3314 | mutex_lock(&dev->struct_mutex); |
3317 | mutex_lock(&dev->struct_mutex); |
3315 | 3318 | ||
3316 | if (dev_priv->mm.suspended) { |
3319 | if (dev_priv->mm.suspended) { |
3317 | mutex_unlock(&dev->struct_mutex); |
3320 | mutex_unlock(&dev->struct_mutex); |
3318 | return 0; |
3321 | return 0; |
3319 | } |
3322 | } |
3320 | 3323 | ||
3321 | ret = i915_gpu_idle(dev); |
3324 | ret = i915_gpu_idle(dev); |
3322 | if (ret) { |
3325 | if (ret) { |
3323 | mutex_unlock(&dev->struct_mutex); |
3326 | mutex_unlock(&dev->struct_mutex); |
3324 | return ret; |
3327 | return ret; |
3325 | } |
3328 | } |
3326 | i915_gem_retire_requests(dev); |
3329 | i915_gem_retire_requests(dev); |
3327 | 3330 | ||
3328 | i915_gem_reset_fences(dev); |
3331 | i915_gem_reset_fences(dev); |
3329 | 3332 | ||
3330 | /* Hack! Don't let anybody do execbuf while we don't control the chip. |
3333 | /* Hack! Don't let anybody do execbuf while we don't control the chip. |
3331 | * We need to replace this with a semaphore, or something. |
3334 | * We need to replace this with a semaphore, or something. |
3332 | * And not confound mm.suspended! |
3335 | * And not confound mm.suspended! |
3333 | */ |
3336 | */ |
3334 | dev_priv->mm.suspended = 1; |
3337 | dev_priv->mm.suspended = 1; |
3335 | del_timer_sync(&dev_priv->hangcheck_timer); |
3338 | del_timer_sync(&dev_priv->hangcheck_timer); |
3336 | 3339 | ||
3337 | i915_kernel_lost_context(dev); |
3340 | i915_kernel_lost_context(dev); |
3338 | i915_gem_cleanup_ringbuffer(dev); |
3341 | i915_gem_cleanup_ringbuffer(dev); |
3339 | 3342 | ||
3340 | mutex_unlock(&dev->struct_mutex); |
3343 | mutex_unlock(&dev->struct_mutex); |
3341 | 3344 | ||
3342 | /* Cancel the retire work handler, which should be idle now. */ |
3345 | /* Cancel the retire work handler, which should be idle now. */ |
3343 | // cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
3346 | // cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
3344 | 3347 | ||
3345 | return 0; |
3348 | return 0; |
3346 | } |
3349 | } |
3347 | #endif |
3350 | #endif |
3348 | 3351 | ||
3349 | void i915_gem_l3_remap(struct drm_device *dev) |
3352 | void i915_gem_l3_remap(struct drm_device *dev) |
3350 | { |
3353 | { |
3351 | drm_i915_private_t *dev_priv = dev->dev_private; |
3354 | drm_i915_private_t *dev_priv = dev->dev_private; |
3352 | u32 misccpctl; |
3355 | u32 misccpctl; |
3353 | int i; |
3356 | int i; |
3354 | 3357 | ||
3355 | if (!IS_IVYBRIDGE(dev)) |
3358 | if (!IS_IVYBRIDGE(dev)) |
3356 | return; |
3359 | return; |
3357 | 3360 | ||
3358 | if (!dev_priv->mm.l3_remap_info) |
3361 | if (!dev_priv->mm.l3_remap_info) |
3359 | return; |
3362 | return; |
3360 | 3363 | ||
3361 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
3364 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
3362 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); |
3365 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); |
3363 | POSTING_READ(GEN7_MISCCPCTL); |
3366 | POSTING_READ(GEN7_MISCCPCTL); |
3364 | 3367 | ||
3365 | for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { |
3368 | for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { |
3366 | u32 remap = I915_READ(GEN7_L3LOG_BASE + i); |
3369 | u32 remap = I915_READ(GEN7_L3LOG_BASE + i); |
3367 | if (remap && remap != dev_priv->mm.l3_remap_info[i/4]) |
3370 | if (remap && remap != dev_priv->mm.l3_remap_info[i/4]) |
3368 | DRM_DEBUG("0x%x was already programmed to %x\n", |
3371 | DRM_DEBUG("0x%x was already programmed to %x\n", |
3369 | GEN7_L3LOG_BASE + i, remap); |
3372 | GEN7_L3LOG_BASE + i, remap); |
3370 | if (remap && !dev_priv->mm.l3_remap_info[i/4]) |
3373 | if (remap && !dev_priv->mm.l3_remap_info[i/4]) |
3371 | DRM_DEBUG_DRIVER("Clearing remapped register\n"); |
3374 | DRM_DEBUG_DRIVER("Clearing remapped register\n"); |
3372 | I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]); |
3375 | I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]); |
3373 | } |
3376 | } |
3374 | 3377 | ||
3375 | /* Make sure all the writes land before disabling dop clock gating */ |
3378 | /* Make sure all the writes land before disabling dop clock gating */ |
3376 | POSTING_READ(GEN7_L3LOG_BASE); |
3379 | POSTING_READ(GEN7_L3LOG_BASE); |
3377 | 3380 | ||
3378 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); |
3381 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); |
3379 | } |
3382 | } |
3380 | 3383 | ||
3381 | void i915_gem_init_swizzling(struct drm_device *dev) |
3384 | void i915_gem_init_swizzling(struct drm_device *dev) |
3382 | { |
3385 | { |
3383 | drm_i915_private_t *dev_priv = dev->dev_private; |
3386 | drm_i915_private_t *dev_priv = dev->dev_private; |
3384 | 3387 | ||
3385 | if (INTEL_INFO(dev)->gen < 5 || |
3388 | if (INTEL_INFO(dev)->gen < 5 || |
3386 | dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) |
3389 | dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) |
3387 | return; |
3390 | return; |
3388 | 3391 | ||
3389 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | |
3392 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | |
3390 | DISP_TILE_SURFACE_SWIZZLING); |
3393 | DISP_TILE_SURFACE_SWIZZLING); |
3391 | 3394 | ||
3392 | if (IS_GEN5(dev)) |
3395 | if (IS_GEN5(dev)) |
3393 | return; |
3396 | return; |
3394 | 3397 | ||
3395 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); |
3398 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); |
3396 | if (IS_GEN6(dev)) |
3399 | if (IS_GEN6(dev)) |
3397 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); |
3400 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); |
3398 | else |
3401 | else |
3399 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); |
3402 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); |
3400 | } |
3403 | } |
3401 | 3404 | ||
3402 | void i915_gem_init_ppgtt(struct drm_device *dev) |
3405 | void i915_gem_init_ppgtt(struct drm_device *dev) |
3403 | { |
3406 | { |
3404 | drm_i915_private_t *dev_priv = dev->dev_private; |
3407 | drm_i915_private_t *dev_priv = dev->dev_private; |
3405 | uint32_t pd_offset; |
3408 | uint32_t pd_offset; |
3406 | struct intel_ring_buffer *ring; |
3409 | struct intel_ring_buffer *ring; |
3407 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
3410 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
3408 | uint32_t __iomem *pd_addr; |
3411 | uint32_t __iomem *pd_addr; |
3409 | uint32_t pd_entry; |
3412 | uint32_t pd_entry; |
3410 | int i; |
3413 | int i; |
3411 | 3414 | ||
3412 | if (!dev_priv->mm.aliasing_ppgtt) |
3415 | if (!dev_priv->mm.aliasing_ppgtt) |
3413 | return; |
3416 | return; |
3414 | 3417 | ||
3415 | 3418 | ||
3416 | pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); |
3419 | pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); |
3417 | for (i = 0; i < ppgtt->num_pd_entries; i++) { |
3420 | for (i = 0; i < ppgtt->num_pd_entries; i++) { |
3418 | dma_addr_t pt_addr; |
3421 | dma_addr_t pt_addr; |
3419 | 3422 | ||
3420 | if (dev_priv->mm.gtt->needs_dmar) |
3423 | if (dev_priv->mm.gtt->needs_dmar) |
3421 | pt_addr = ppgtt->pt_dma_addr[i]; |
3424 | pt_addr = ppgtt->pt_dma_addr[i]; |
3422 | else |
3425 | else |
3423 | pt_addr = ppgtt->pt_pages[i]; |
3426 | pt_addr = ppgtt->pt_pages[i]; |
3424 | 3427 | ||
3425 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); |
3428 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); |
3426 | pd_entry |= GEN6_PDE_VALID; |
3429 | pd_entry |= GEN6_PDE_VALID; |
3427 | 3430 | ||
3428 | writel(pd_entry, pd_addr + i); |
3431 | writel(pd_entry, pd_addr + i); |
3429 | } |
3432 | } |
3430 | readl(pd_addr); |
3433 | readl(pd_addr); |
3431 | 3434 | ||
3432 | pd_offset = ppgtt->pd_offset; |
3435 | pd_offset = ppgtt->pd_offset; |
3433 | pd_offset /= 64; /* in cachelines, */ |
3436 | pd_offset /= 64; /* in cachelines, */ |
3434 | pd_offset <<= 16; |
3437 | pd_offset <<= 16; |
3435 | 3438 | ||
3436 | if (INTEL_INFO(dev)->gen == 6) { |
3439 | if (INTEL_INFO(dev)->gen == 6) { |
3437 | uint32_t ecochk, gab_ctl, ecobits; |
3440 | uint32_t ecochk, gab_ctl, ecobits; |
3438 | 3441 | ||
3439 | ecobits = I915_READ(GAC_ECO_BITS); |
3442 | ecobits = I915_READ(GAC_ECO_BITS); |
3440 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); |
3443 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); |
3441 | 3444 | ||
3442 | gab_ctl = I915_READ(GAB_CTL); |
3445 | gab_ctl = I915_READ(GAB_CTL); |
3443 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); |
3446 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); |
3444 | 3447 | ||
3445 | ecochk = I915_READ(GAM_ECOCHK); |
3448 | ecochk = I915_READ(GAM_ECOCHK); |
3446 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | |
3449 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | |
3447 | ECOCHK_PPGTT_CACHE64B); |
3450 | ECOCHK_PPGTT_CACHE64B); |
3448 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
3451 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
3449 | } else if (INTEL_INFO(dev)->gen >= 7) { |
3452 | } else if (INTEL_INFO(dev)->gen >= 7) { |
3450 | I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); |
3453 | I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); |
3451 | /* GFX_MODE is per-ring on gen7+ */ |
3454 | /* GFX_MODE is per-ring on gen7+ */ |
3452 | } |
3455 | } |
3453 | 3456 | ||
3454 | for_each_ring(ring, dev_priv, i) { |
3457 | for_each_ring(ring, dev_priv, i) { |
3455 | if (INTEL_INFO(dev)->gen >= 7) |
3458 | if (INTEL_INFO(dev)->gen >= 7) |
3456 | I915_WRITE(RING_MODE_GEN7(ring), |
3459 | I915_WRITE(RING_MODE_GEN7(ring), |
3457 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
3460 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
3458 | 3461 | ||
3459 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
3462 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
3460 | I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); |
3463 | I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); |
3461 | } |
3464 | } |
3462 | } |
3465 | } |
3463 | 3466 | ||
3464 | static bool |
3467 | static bool |
3465 | intel_enable_blt(struct drm_device *dev) |
3468 | intel_enable_blt(struct drm_device *dev) |
3466 | { |
3469 | { |
3467 | if (!HAS_BLT(dev)) |
3470 | if (!HAS_BLT(dev)) |
3468 | return false; |
3471 | return false; |
3469 | 3472 | ||
3470 | /* The blitter was dysfunctional on early prototypes */ |
3473 | /* The blitter was dysfunctional on early prototypes */ |
3471 | if (IS_GEN6(dev) && dev->pdev->revision < 8) { |
3474 | if (IS_GEN6(dev) && dev->pdev->revision < 8) { |
3472 | DRM_INFO("BLT not supported on this pre-production hardware;" |
3475 | DRM_INFO("BLT not supported on this pre-production hardware;" |
3473 | " graphics performance will be degraded.\n"); |
3476 | " graphics performance will be degraded.\n"); |
3474 | return false; |
3477 | return false; |
3475 | } |
3478 | } |
3476 | 3479 | ||
3477 | return true; |
3480 | return true; |
3478 | } |
3481 | } |
3479 | 3482 | ||
3480 | int |
3483 | int |
3481 | i915_gem_init_hw(struct drm_device *dev) |
3484 | i915_gem_init_hw(struct drm_device *dev) |
3482 | { |
3485 | { |
3483 | drm_i915_private_t *dev_priv = dev->dev_private; |
3486 | drm_i915_private_t *dev_priv = dev->dev_private; |
3484 | int ret; |
3487 | int ret; |
3485 | 3488 | ||
3486 | if (!intel_enable_gtt()) |
3489 | if (!intel_enable_gtt()) |
3487 | return -EIO; |
3490 | return -EIO; |
3488 | 3491 | ||
3489 | if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) |
3492 | if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) |
3490 | I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); |
3493 | I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); |
3491 | 3494 | ||
3492 | i915_gem_l3_remap(dev); |
3495 | i915_gem_l3_remap(dev); |
3493 | 3496 | ||
3494 | i915_gem_init_swizzling(dev); |
3497 | i915_gem_init_swizzling(dev); |
3495 | 3498 | ||
3496 | ret = intel_init_render_ring_buffer(dev); |
3499 | ret = intel_init_render_ring_buffer(dev); |
3497 | if (ret) |
3500 | if (ret) |
3498 | return ret; |
3501 | return ret; |
3499 | 3502 | ||
3500 | if (HAS_BSD(dev)) { |
3503 | if (HAS_BSD(dev)) { |
3501 | ret = intel_init_bsd_ring_buffer(dev); |
3504 | ret = intel_init_bsd_ring_buffer(dev); |
3502 | if (ret) |
3505 | if (ret) |
3503 | goto cleanup_render_ring; |
3506 | goto cleanup_render_ring; |
3504 | } |
3507 | } |
3505 | 3508 | ||
3506 | if (intel_enable_blt(dev)) { |
3509 | if (intel_enable_blt(dev)) { |
3507 | ret = intel_init_blt_ring_buffer(dev); |
3510 | ret = intel_init_blt_ring_buffer(dev); |
3508 | if (ret) |
3511 | if (ret) |
3509 | goto cleanup_bsd_ring; |
3512 | goto cleanup_bsd_ring; |
3510 | } |
3513 | } |
3511 | 3514 | ||
3512 | dev_priv->next_seqno = 1; |
3515 | dev_priv->next_seqno = 1; |
3513 | 3516 | ||
3514 | /* |
3517 | /* |
3515 | * XXX: There was some w/a described somewhere suggesting loading |
3518 | * XXX: There was some w/a described somewhere suggesting loading |
3516 | * contexts before PPGTT. |
3519 | * contexts before PPGTT. |
3517 | */ |
3520 | */ |
3518 | i915_gem_context_init(dev); |
3521 | i915_gem_context_init(dev); |
3519 | i915_gem_init_ppgtt(dev); |
3522 | i915_gem_init_ppgtt(dev); |
3520 | 3523 | ||
3521 | return 0; |
3524 | return 0; |
3522 | 3525 | ||
3523 | cleanup_bsd_ring: |
3526 | cleanup_bsd_ring: |
3524 | intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); |
3527 | intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); |
3525 | cleanup_render_ring: |
3528 | cleanup_render_ring: |
3526 | intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); |
3529 | intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); |
3527 | return ret; |
3530 | return ret; |
3528 | } |
3531 | } |
3529 | 3532 | ||
3530 | static bool |
3533 | static bool |
3531 | intel_enable_ppgtt(struct drm_device *dev) |
3534 | intel_enable_ppgtt(struct drm_device *dev) |
3532 | { |
3535 | { |
3533 | if (i915_enable_ppgtt >= 0) |
3536 | if (i915_enable_ppgtt >= 0) |
3534 | return i915_enable_ppgtt; |
3537 | return i915_enable_ppgtt; |
3535 | 3538 | ||
3536 | #ifdef CONFIG_INTEL_IOMMU |
3539 | #ifdef CONFIG_INTEL_IOMMU |
3537 | /* Disable ppgtt on SNB if VT-d is on. */ |
3540 | /* Disable ppgtt on SNB if VT-d is on. */ |
3538 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) |
3541 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) |
3539 | return false; |
3542 | return false; |
3540 | #endif |
3543 | #endif |
3541 | 3544 | ||
3542 | return true; |
3545 | return true; |
3543 | } |
3546 | } |
3544 | 3547 | ||
3545 | #define LFB_SIZE 0xC00000 |
3548 | #define LFB_SIZE 0xC00000 |
3546 | 3549 | ||
3547 | int i915_gem_init(struct drm_device *dev) |
3550 | int i915_gem_init(struct drm_device *dev) |
3548 | { |
3551 | { |
3549 | struct drm_i915_private *dev_priv = dev->dev_private; |
3552 | struct drm_i915_private *dev_priv = dev->dev_private; |
3550 | unsigned long gtt_size, mappable_size; |
3553 | unsigned long gtt_size, mappable_size; |
3551 | int ret; |
3554 | int ret; |
3552 | 3555 | ||
3553 | gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; |
3556 | gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; |
3554 | mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
3557 | mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
3555 | 3558 | ||
3556 | mutex_lock(&dev->struct_mutex); |
3559 | mutex_lock(&dev->struct_mutex); |
3557 | if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { |
3560 | if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { |
3558 | /* PPGTT pdes are stolen from global gtt ptes, so shrink the |
3561 | /* PPGTT pdes are stolen from global gtt ptes, so shrink the |
3559 | * aperture accordingly when using aliasing ppgtt. */ |
3562 | * aperture accordingly when using aliasing ppgtt. */ |
3560 | gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; |
3563 | gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; |
3561 | 3564 | ||
3562 | i915_gem_init_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size - LFB_SIZE); |
3565 | i915_gem_init_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size - LFB_SIZE); |
3563 | 3566 | ||
3564 | ret = i915_gem_init_aliasing_ppgtt(dev); |
3567 | ret = i915_gem_init_aliasing_ppgtt(dev); |
3565 | if (ret) { |
3568 | if (ret) { |
3566 | mutex_unlock(&dev->struct_mutex); |
3569 | mutex_unlock(&dev->struct_mutex); |
3567 | return ret; |
3570 | return ret; |
3568 | } |
3571 | } |
3569 | } else { |
3572 | } else { |
3570 | /* Let GEM Manage all of the aperture. |
3573 | /* Let GEM Manage all of the aperture. |
3571 | * |
3574 | * |
3572 | * However, leave one page at the end still bound to the scratch |
3575 | * However, leave one page at the end still bound to the scratch |
3573 | * page. There are a number of places where the hardware |
3576 | * page. There are a number of places where the hardware |
3574 | * apparently prefetches past the end of the object, and we've |
3577 | * apparently prefetches past the end of the object, and we've |
3575 | * seen multiple hangs with the GPU head pointer stuck in a |
3578 | * seen multiple hangs with the GPU head pointer stuck in a |
3576 | * batchbuffer bound at the last page of the aperture. One page |
3579 | * batchbuffer bound at the last page of the aperture. One page |
3577 | * should be enough to keep any prefetching inside of the |
3580 | * should be enough to keep any prefetching inside of the |
3578 | * aperture. |
3581 | * aperture. |
3579 | */ |
3582 | */ |
3580 | i915_gem_init_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size - LFB_SIZE); |
3583 | i915_gem_init_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size - LFB_SIZE); |
3581 | } |
3584 | } |
3582 | 3585 | ||
3583 | ret = i915_gem_init_hw(dev); |
3586 | ret = i915_gem_init_hw(dev); |
3584 | mutex_unlock(&dev->struct_mutex); |
3587 | mutex_unlock(&dev->struct_mutex); |
3585 | if (ret) { |
3588 | if (ret) { |
3586 | i915_gem_cleanup_aliasing_ppgtt(dev); |
3589 | i915_gem_cleanup_aliasing_ppgtt(dev); |
3587 | return ret; |
3590 | return ret; |
3588 | } |
3591 | } |
3589 | 3592 | ||
3590 | return 0; |
3593 | return 0; |
3591 | } |
3594 | } |
3592 | 3595 | ||
3593 | void |
3596 | void |
3594 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) |
3597 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) |
3595 | { |
3598 | { |
3596 | drm_i915_private_t *dev_priv = dev->dev_private; |
3599 | drm_i915_private_t *dev_priv = dev->dev_private; |
3597 | struct intel_ring_buffer *ring; |
3600 | struct intel_ring_buffer *ring; |
3598 | int i; |
3601 | int i; |
3599 | 3602 | ||
3600 | for_each_ring(ring, dev_priv, i) |
3603 | for_each_ring(ring, dev_priv, i) |
3601 | intel_cleanup_ring_buffer(ring); |
3604 | intel_cleanup_ring_buffer(ring); |
3602 | } |
3605 | } |
3603 | 3606 | ||
3604 | #if 0 |
3607 | #if 0 |
3605 | 3608 | ||
3606 | int |
3609 | int |
3607 | i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
3610 | i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
3608 | struct drm_file *file_priv) |
3611 | struct drm_file *file_priv) |
3609 | { |
3612 | { |
3610 | drm_i915_private_t *dev_priv = dev->dev_private; |
3613 | drm_i915_private_t *dev_priv = dev->dev_private; |
3611 | int ret; |
3614 | int ret; |
3612 | 3615 | ||
3613 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
3616 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
3614 | return 0; |
3617 | return 0; |
3615 | 3618 | ||
3616 | if (atomic_read(&dev_priv->mm.wedged)) { |
3619 | if (atomic_read(&dev_priv->mm.wedged)) { |
3617 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); |
3620 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); |
3618 | atomic_set(&dev_priv->mm.wedged, 0); |
3621 | atomic_set(&dev_priv->mm.wedged, 0); |
3619 | } |
3622 | } |
3620 | 3623 | ||
3621 | mutex_lock(&dev->struct_mutex); |
3624 | mutex_lock(&dev->struct_mutex); |
3622 | dev_priv->mm.suspended = 0; |
3625 | dev_priv->mm.suspended = 0; |
3623 | 3626 | ||
3624 | ret = i915_gem_init_hw(dev); |
3627 | ret = i915_gem_init_hw(dev); |
3625 | if (ret != 0) { |
3628 | if (ret != 0) { |
3626 | mutex_unlock(&dev->struct_mutex); |
3629 | mutex_unlock(&dev->struct_mutex); |
3627 | return ret; |
3630 | return ret; |
3628 | } |
3631 | } |
3629 | 3632 | ||
3630 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); |
3633 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); |
3631 | mutex_unlock(&dev->struct_mutex); |
3634 | mutex_unlock(&dev->struct_mutex); |
3632 | 3635 | ||
3633 | ret = drm_irq_install(dev); |
3636 | ret = drm_irq_install(dev); |
3634 | if (ret) |
3637 | if (ret) |
3635 | goto cleanup_ringbuffer; |
3638 | goto cleanup_ringbuffer; |
3636 | 3639 | ||
3637 | return 0; |
3640 | return 0; |
3638 | 3641 | ||
3639 | cleanup_ringbuffer: |
3642 | cleanup_ringbuffer: |
3640 | mutex_lock(&dev->struct_mutex); |
3643 | mutex_lock(&dev->struct_mutex); |
3641 | i915_gem_cleanup_ringbuffer(dev); |
3644 | i915_gem_cleanup_ringbuffer(dev); |
3642 | dev_priv->mm.suspended = 1; |
3645 | dev_priv->mm.suspended = 1; |
3643 | mutex_unlock(&dev->struct_mutex); |
3646 | mutex_unlock(&dev->struct_mutex); |
3644 | 3647 | ||
3645 | return ret; |
3648 | return ret; |
3646 | } |
3649 | } |
3647 | 3650 | ||
3648 | int |
3651 | int |
3649 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
3652 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
3650 | struct drm_file *file_priv) |
3653 | struct drm_file *file_priv) |
3651 | { |
3654 | { |
3652 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
3655 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
3653 | return 0; |
3656 | return 0; |
3654 | 3657 | ||
3655 | drm_irq_uninstall(dev); |
3658 | drm_irq_uninstall(dev); |
3656 | return i915_gem_idle(dev); |
3659 | return i915_gem_idle(dev); |
3657 | } |
3660 | } |
3658 | 3661 | ||
3659 | void |
3662 | void |
3660 | i915_gem_lastclose(struct drm_device *dev) |
3663 | i915_gem_lastclose(struct drm_device *dev) |
3661 | { |
3664 | { |
3662 | int ret; |
3665 | int ret; |
3663 | 3666 | ||
3664 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
3667 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
3665 | return; |
3668 | return; |
3666 | 3669 | ||
3667 | ret = i915_gem_idle(dev); |
3670 | ret = i915_gem_idle(dev); |
3668 | if (ret) |
3671 | if (ret) |
3669 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
3672 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
3670 | } |
3673 | } |
3671 | #endif |
3674 | #endif |
3672 | 3675 | ||
3673 | static void |
3676 | static void |
3674 | init_ring_lists(struct intel_ring_buffer *ring) |
3677 | init_ring_lists(struct intel_ring_buffer *ring) |
3675 | { |
3678 | { |
3676 | INIT_LIST_HEAD(&ring->active_list); |
3679 | INIT_LIST_HEAD(&ring->active_list); |
3677 | INIT_LIST_HEAD(&ring->request_list); |
3680 | INIT_LIST_HEAD(&ring->request_list); |
3678 | } |
3681 | } |
3679 | 3682 | ||
3680 | void |
3683 | void |
3681 | i915_gem_load(struct drm_device *dev) |
3684 | i915_gem_load(struct drm_device *dev) |
3682 | { |
3685 | { |
3683 | int i; |
3686 | int i; |
3684 | drm_i915_private_t *dev_priv = dev->dev_private; |
3687 | drm_i915_private_t *dev_priv = dev->dev_private; |
3685 | 3688 | ||
3686 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
3689 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
3687 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
3690 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
3688 | INIT_LIST_HEAD(&dev_priv->mm.unbound_list); |
3691 | INIT_LIST_HEAD(&dev_priv->mm.unbound_list); |
3689 | INIT_LIST_HEAD(&dev_priv->mm.bound_list); |
3692 | INIT_LIST_HEAD(&dev_priv->mm.bound_list); |
3690 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
3693 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
3691 | for (i = 0; i < I915_NUM_RINGS; i++) |
3694 | for (i = 0; i < I915_NUM_RINGS; i++) |
3692 | init_ring_lists(&dev_priv->ring[i]); |
3695 | init_ring_lists(&dev_priv->ring[i]); |
3693 | for (i = 0; i < I915_MAX_NUM_FENCES; i++) |
3696 | for (i = 0; i < I915_MAX_NUM_FENCES; i++) |
3694 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
3697 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
3695 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
3698 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
3696 | i915_gem_retire_work_handler); |
3699 | i915_gem_retire_work_handler); |
3697 | 3700 | ||
3698 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ |
3701 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ |
3699 | if (IS_GEN3(dev)) { |
3702 | if (IS_GEN3(dev)) { |
3700 | I915_WRITE(MI_ARB_STATE, |
3703 | I915_WRITE(MI_ARB_STATE, |
3701 | _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); |
3704 | _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); |
3702 | } |
3705 | } |
3703 | 3706 | ||
3704 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; |
3707 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; |
3705 | 3708 | ||
3706 | if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
3709 | if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
3707 | dev_priv->num_fence_regs = 16; |
3710 | dev_priv->num_fence_regs = 16; |
3708 | else |
3711 | else |
3709 | dev_priv->num_fence_regs = 8; |
3712 | dev_priv->num_fence_regs = 8; |
3710 | 3713 | ||
3711 | /* Initialize fence registers to zero */ |
3714 | /* Initialize fence registers to zero */ |
3712 | i915_gem_reset_fences(dev); |
3715 | i915_gem_reset_fences(dev); |
3713 | 3716 | ||
3714 | i915_gem_detect_bit_6_swizzle(dev); |
3717 | i915_gem_detect_bit_6_swizzle(dev); |
3715 | 3718 | ||
3716 | dev_priv->mm.interruptible = true; |
3719 | dev_priv->mm.interruptible = true; |
3717 | 3720 | ||
3718 | // dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; |
3721 | // dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; |
3719 | // dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; |
3722 | // dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; |
3720 | // register_shrinker(&dev_priv->mm.inactive_shrinker); |
3723 | // register_shrinker(&dev_priv->mm.inactive_shrinker); |
3721 | }>>><>><>>=><=>>>>><>>>>=>>><>><>>><>><>><>><>><>><> |
3724 | }>>><>><>>=><=>>>>><>>>>=>>><>><>>><>><>><>><>><>><> |
3722 | >< |
3725 | >< |
3723 | >><>=>>>>>=><=>> |
3726 | >><>=>>>>>=><=>> |
3724 | //> |
3727 | //> |
3725 | //>>><>=>>><>=>>>6)><6)>6) |
3728 | //>>><>=>>><>=>>>6)><6)>6) |
3726 | #define><6) |
3729 | #define><6) |
3727 | #define>6)><6)>6) |
3730 | #define>6)><6)>6) |
3728 | #define><6) |
3731 | #define><6) |
3729 | #define> |
3732 | #define> |