Rev 5078 | Rev 5346 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5078 | Rev 5271 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
14 | * all copies or substantial portions of the Software. |
15 | * |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
23 | * |
24 | * Authors: Dave Airlie |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
25 | * Alex Deucher |
26 | * Jerome Glisse |
26 | * Jerome Glisse |
27 | */ |
27 | */ |
28 | #include |
28 | #include |
29 | #include |
29 | #include |
30 | #include "radeon.h" |
30 | #include "radeon.h" |
31 | 31 | ||
32 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
32 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
33 | { |
33 | { |
34 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
34 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
35 | 35 | ||
36 | if (robj) { |
36 | if (robj) { |
37 | radeon_bo_unref(&robj); |
37 | radeon_bo_unref(&robj); |
38 | } |
38 | } |
39 | } |
39 | } |
40 | 40 | ||
41 | int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, |
41 | int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, |
42 | int alignment, int initial_domain, |
42 | int alignment, int initial_domain, |
43 | u32 flags, bool kernel, |
43 | u32 flags, bool kernel, |
44 | struct drm_gem_object **obj) |
44 | struct drm_gem_object **obj) |
45 | { |
45 | { |
46 | struct radeon_bo *robj; |
46 | struct radeon_bo *robj; |
47 | unsigned long max_size; |
47 | unsigned long max_size; |
48 | int r; |
48 | int r; |
49 | 49 | ||
50 | *obj = NULL; |
50 | *obj = NULL; |
51 | /* At least align on page size */ |
51 | /* At least align on page size */ |
52 | if (alignment < PAGE_SIZE) { |
52 | if (alignment < PAGE_SIZE) { |
53 | alignment = PAGE_SIZE; |
53 | alignment = PAGE_SIZE; |
54 | } |
54 | } |
55 | 55 | ||
56 | /* Maximum bo size is the unpinned gtt size since we use the gtt to |
56 | /* Maximum bo size is the unpinned gtt size since we use the gtt to |
57 | * handle vram to system pool migrations. |
57 | * handle vram to system pool migrations. |
58 | */ |
58 | */ |
59 | max_size = rdev->mc.gtt_size - rdev->gart_pin_size; |
59 | max_size = rdev->mc.gtt_size - rdev->gart_pin_size; |
60 | if (size > max_size) { |
60 | if (size > max_size) { |
61 | DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", |
61 | DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", |
62 | size >> 20, max_size >> 20); |
62 | size >> 20, max_size >> 20); |
63 | return -ENOMEM; |
63 | return -ENOMEM; |
64 | } |
64 | } |
65 | 65 | ||
66 | retry: |
66 | retry: |
67 | r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, |
67 | r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, |
68 | flags, NULL, &robj); |
68 | flags, NULL, NULL, &robj); |
69 | if (r) { |
69 | if (r) { |
70 | if (r != -ERESTARTSYS) { |
70 | if (r != -ERESTARTSYS) { |
71 | if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { |
71 | if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { |
72 | initial_domain |= RADEON_GEM_DOMAIN_GTT; |
72 | initial_domain |= RADEON_GEM_DOMAIN_GTT; |
73 | goto retry; |
73 | goto retry; |
74 | } |
74 | } |
75 | DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", |
75 | DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", |
76 | size, initial_domain, alignment, r); |
76 | size, initial_domain, alignment, r); |
77 | } |
77 | } |
78 | return r; |
78 | return r; |
79 | } |
79 | } |
80 | *obj = &robj->gem_base; |
80 | *obj = &robj->gem_base; |
81 | 81 | ||
82 | mutex_lock(&rdev->gem.mutex); |
82 | mutex_lock(&rdev->gem.mutex); |
83 | list_add_tail(&robj->list, &rdev->gem.objects); |
83 | list_add_tail(&robj->list, &rdev->gem.objects); |
84 | mutex_unlock(&rdev->gem.mutex); |
84 | mutex_unlock(&rdev->gem.mutex); |
85 | 85 | ||
86 | return 0; |
86 | return 0; |
87 | } |
87 | } |
88 | 88 | ||
89 | static int radeon_gem_set_domain(struct drm_gem_object *gobj, |
89 | static int radeon_gem_set_domain(struct drm_gem_object *gobj, |
90 | uint32_t rdomain, uint32_t wdomain) |
90 | uint32_t rdomain, uint32_t wdomain) |
91 | { |
91 | { |
92 | struct radeon_bo *robj; |
92 | struct radeon_bo *robj; |
93 | uint32_t domain; |
93 | uint32_t domain; |
94 | int r; |
94 | long r; |
95 | 95 | ||
96 | /* FIXME: reeimplement */ |
96 | /* FIXME: reeimplement */ |
97 | robj = gem_to_radeon_bo(gobj); |
97 | robj = gem_to_radeon_bo(gobj); |
98 | /* work out where to validate the buffer to */ |
98 | /* work out where to validate the buffer to */ |
99 | domain = wdomain; |
99 | domain = wdomain; |
100 | if (!domain) { |
100 | if (!domain) { |
101 | domain = rdomain; |
101 | domain = rdomain; |
102 | } |
102 | } |
103 | if (!domain) { |
103 | if (!domain) { |
104 | /* Do nothings */ |
104 | /* Do nothings */ |
105 | printk(KERN_WARNING "Set domain without domain !\n"); |
105 | printk(KERN_WARNING "Set domain without domain !\n"); |
106 | return 0; |
106 | return 0; |
107 | } |
107 | } |
108 | if (domain == RADEON_GEM_DOMAIN_CPU) { |
108 | if (domain == RADEON_GEM_DOMAIN_CPU) { |
109 | /* Asking for cpu access wait for object idle */ |
109 | /* Asking for cpu access wait for object idle */ |
110 | // r = radeon_bo_wait(robj, NULL, false); |
110 | // r = radeon_bo_wait(robj, NULL, false); |
111 | // if (r) { |
111 | // if (r) { |
112 | // printk(KERN_ERR "Failed to wait for object !\n"); |
112 | // printk(KERN_ERR "Failed to wait for object !\n"); |
113 | // return r; |
113 | // return r; |
114 | // } |
114 | // } |
115 | } |
115 | } |
116 | return 0; |
116 | return 0; |
117 | } |
117 | } |
118 | 118 | ||
119 | int radeon_gem_init(struct radeon_device *rdev) |
119 | int radeon_gem_init(struct radeon_device *rdev) |
120 | { |
120 | { |
121 | INIT_LIST_HEAD(&rdev->gem.objects); |
121 | INIT_LIST_HEAD(&rdev->gem.objects); |
122 | return 0; |
122 | return 0; |
123 | } |
123 | } |
124 | 124 | ||
125 | void radeon_gem_fini(struct radeon_device *rdev) |
125 | void radeon_gem_fini(struct radeon_device *rdev) |
126 | { |
126 | { |
127 | // radeon_object_force_delete(rdev); |
127 | // radeon_object_force_delete(rdev); |
128 | } |
128 | } |
129 | 129 | ||
130 | #if 0 |
130 | #if 0 |
131 | /* |
131 | /* |
132 | * GEM ioctls. |
132 | * GEM ioctls. |
133 | */ |
133 | */ |
134 | int radeon_gem_info_ioctl(struct drm_device *dev, void *data, |
134 | int radeon_gem_info_ioctl(struct drm_device *dev, void *data, |
135 | struct drm_file *filp) |
135 | struct drm_file *filp) |
136 | { |
136 | { |
137 | struct radeon_device *rdev = dev->dev_private; |
137 | struct radeon_device *rdev = dev->dev_private; |
138 | struct drm_radeon_gem_info *args = data; |
138 | struct drm_radeon_gem_info *args = data; |
139 | struct ttm_mem_type_manager *man; |
139 | struct ttm_mem_type_manager *man; |
140 | 140 | ||
141 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; |
141 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; |
142 | 142 | ||
143 | args->vram_size = rdev->mc.real_vram_size; |
143 | args->vram_size = rdev->mc.real_vram_size; |
144 | args->vram_visible = (u64)man->size << PAGE_SHIFT; |
144 | args->vram_visible = (u64)man->size << PAGE_SHIFT; |
145 | args->vram_visible -= rdev->vram_pin_size; |
145 | args->vram_visible -= rdev->vram_pin_size; |
146 | args->gart_size = rdev->mc.gtt_size; |
146 | args->gart_size = rdev->mc.gtt_size; |
147 | args->gart_size -= rdev->gart_pin_size; |
147 | args->gart_size -= rdev->gart_pin_size; |
148 | 148 | ||
149 | return 0; |
149 | return 0; |
150 | } |
150 | } |
151 | 151 | ||
152 | int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, |
152 | int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, |
153 | struct drm_file *filp) |
153 | struct drm_file *filp) |
154 | { |
154 | { |
155 | /* TODO: implement */ |
155 | /* TODO: implement */ |
156 | DRM_ERROR("unimplemented %s\n", __func__); |
156 | DRM_ERROR("unimplemented %s\n", __func__); |
157 | return -ENOSYS; |
157 | return -ENOSYS; |
158 | } |
158 | } |
159 | 159 | ||
160 | int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
160 | int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
161 | struct drm_file *filp) |
161 | struct drm_file *filp) |
162 | { |
162 | { |
163 | /* TODO: implement */ |
163 | /* TODO: implement */ |
164 | DRM_ERROR("unimplemented %s\n", __func__); |
164 | DRM_ERROR("unimplemented %s\n", __func__); |
165 | return -ENOSYS; |
165 | return -ENOSYS; |
166 | } |
166 | } |
167 | 167 | ||
168 | int radeon_gem_create_ioctl(struct drm_device *dev, void *data, |
168 | int radeon_gem_create_ioctl(struct drm_device *dev, void *data, |
169 | struct drm_file *filp) |
169 | struct drm_file *filp) |
170 | { |
170 | { |
171 | struct radeon_device *rdev = dev->dev_private; |
171 | struct radeon_device *rdev = dev->dev_private; |
172 | struct drm_radeon_gem_create *args = data; |
172 | struct drm_radeon_gem_create *args = data; |
173 | struct drm_gem_object *gobj; |
173 | struct drm_gem_object *gobj; |
174 | uint32_t handle; |
174 | uint32_t handle; |
175 | int r; |
175 | int r; |
176 | 176 | ||
177 | down_read(&rdev->exclusive_lock); |
177 | down_read(&rdev->exclusive_lock); |
178 | /* create a gem object to contain this object in */ |
178 | /* create a gem object to contain this object in */ |
179 | args->size = roundup(args->size, PAGE_SIZE); |
179 | args->size = roundup(args->size, PAGE_SIZE); |
180 | r = radeon_gem_object_create(rdev, args->size, args->alignment, |
180 | r = radeon_gem_object_create(rdev, args->size, args->alignment, |
181 | args->initial_domain, args->flags, |
181 | args->initial_domain, args->flags, |
182 | false, &gobj); |
182 | false, &gobj); |
183 | if (r) { |
183 | if (r) { |
184 | up_read(&rdev->exclusive_lock); |
184 | up_read(&rdev->exclusive_lock); |
185 | r = radeon_gem_handle_lockup(rdev, r); |
185 | r = radeon_gem_handle_lockup(rdev, r); |
186 | return r; |
186 | return r; |
187 | } |
187 | } |
188 | r = drm_gem_handle_create(filp, gobj, &handle); |
188 | r = drm_gem_handle_create(filp, gobj, &handle); |
189 | /* drop reference from allocate - handle holds it now */ |
189 | /* drop reference from allocate - handle holds it now */ |
190 | drm_gem_object_unreference_unlocked(gobj); |
190 | drm_gem_object_unreference_unlocked(gobj); |
191 | if (r) { |
191 | if (r) { |
192 | up_read(&rdev->exclusive_lock); |
192 | up_read(&rdev->exclusive_lock); |
193 | r = radeon_gem_handle_lockup(rdev, r); |
193 | r = radeon_gem_handle_lockup(rdev, r); |
194 | return r; |
194 | return r; |
195 | } |
195 | } |
196 | args->handle = handle; |
196 | args->handle = handle; |
197 | up_read(&rdev->exclusive_lock); |
197 | up_read(&rdev->exclusive_lock); |
198 | return 0; |
198 | return 0; |
199 | } |
199 | } |
200 | 200 | ||
201 | int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
201 | int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
202 | struct drm_file *filp) |
202 | struct drm_file *filp) |
203 | { |
203 | { |
204 | /* transition the BO to a domain - |
204 | /* transition the BO to a domain - |
205 | * just validate the BO into a certain domain */ |
205 | * just validate the BO into a certain domain */ |
206 | struct radeon_device *rdev = dev->dev_private; |
206 | struct radeon_device *rdev = dev->dev_private; |
207 | struct drm_radeon_gem_set_domain *args = data; |
207 | struct drm_radeon_gem_set_domain *args = data; |
208 | struct drm_gem_object *gobj; |
208 | struct drm_gem_object *gobj; |
209 | struct radeon_bo *robj; |
209 | struct radeon_bo *robj; |
210 | int r; |
210 | int r; |
211 | 211 | ||
212 | /* for now if someone requests domain CPU - |
212 | /* for now if someone requests domain CPU - |
213 | * just make sure the buffer is finished with */ |
213 | * just make sure the buffer is finished with */ |
214 | down_read(&rdev->exclusive_lock); |
214 | down_read(&rdev->exclusive_lock); |
215 | 215 | ||
216 | /* just do a BO wait for now */ |
216 | /* just do a BO wait for now */ |
217 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
217 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
218 | if (gobj == NULL) { |
218 | if (gobj == NULL) { |
219 | up_read(&rdev->exclusive_lock); |
219 | up_read(&rdev->exclusive_lock); |
220 | return -ENOENT; |
220 | return -ENOENT; |
221 | } |
221 | } |
222 | robj = gem_to_radeon_bo(gobj); |
222 | robj = gem_to_radeon_bo(gobj); |
223 | 223 | ||
224 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); |
224 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); |
225 | 225 | ||
226 | drm_gem_object_unreference_unlocked(gobj); |
226 | drm_gem_object_unreference_unlocked(gobj); |
227 | up_read(&rdev->exclusive_lock); |
227 | up_read(&rdev->exclusive_lock); |
228 | r = radeon_gem_handle_lockup(robj->rdev, r); |
228 | r = radeon_gem_handle_lockup(robj->rdev, r); |
229 | return r; |
229 | return r; |
230 | } |
230 | } |
231 | 231 | ||
232 | int radeon_mode_dumb_mmap(struct drm_file *filp, |
232 | static int radeon_mode_mmap(struct drm_file *filp, |
- | 233 | struct drm_device *dev, |
|
233 | struct drm_device *dev, |
234 | uint32_t handle, bool dumb, |
234 | uint32_t handle, uint64_t *offset_p) |
235 | uint64_t *offset_p) |
235 | { |
236 | { |
236 | struct drm_gem_object *gobj; |
237 | struct drm_gem_object *gobj; |
237 | struct radeon_bo *robj; |
238 | struct radeon_bo *robj; |
238 | 239 | ||
239 | gobj = drm_gem_object_lookup(dev, filp, handle); |
240 | gobj = drm_gem_object_lookup(dev, filp, handle); |
240 | if (gobj == NULL) { |
241 | if (gobj == NULL) { |
241 | return -ENOENT; |
242 | return -ENOENT; |
242 | } |
243 | } |
- | 244 | ||
- | 245 | /* |
|
- | 246 | * We don't allow dumb mmaps on objects created using another |
|
- | 247 | * interface. |
|
- | 248 | */ |
|
- | 249 | WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach), |
|
- | 250 | "Illegal dumb map of GPU buffer.\n"); |
|
- | 251 | ||
243 | robj = gem_to_radeon_bo(gobj); |
252 | robj = gem_to_radeon_bo(gobj); |
244 | *offset_p = radeon_bo_mmap_offset(robj); |
253 | *offset_p = radeon_bo_mmap_offset(robj); |
245 | drm_gem_object_unreference_unlocked(gobj); |
254 | drm_gem_object_unreference_unlocked(gobj); |
246 | return 0; |
255 | return 0; |
247 | } |
256 | } |
248 | 257 | ||
249 | int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, |
258 | int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, |
250 | struct drm_file *filp) |
259 | struct drm_file *filp) |
251 | { |
260 | { |
252 | struct drm_radeon_gem_mmap *args = data; |
261 | struct drm_radeon_gem_mmap *args = data; |
253 | 262 | ||
- | 263 | return radeon_mode_mmap(filp, dev, args->handle, false, |
|
254 | return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); |
264 | &args->addr_ptr); |
255 | } |
265 | } |
256 | 266 | ||
257 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
267 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
258 | struct drm_file *filp) |
268 | struct drm_file *filp) |
259 | { |
269 | { |
260 | struct radeon_device *rdev = dev->dev_private; |
270 | struct radeon_device *rdev = dev->dev_private; |
261 | struct drm_radeon_gem_busy *args = data; |
271 | struct drm_radeon_gem_busy *args = data; |
262 | struct drm_gem_object *gobj; |
272 | struct drm_gem_object *gobj; |
263 | struct radeon_bo *robj; |
273 | struct radeon_bo *robj; |
264 | int r; |
274 | int r; |
265 | uint32_t cur_placement = 0; |
275 | uint32_t cur_placement = 0; |
266 | 276 | ||
267 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
277 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
268 | if (gobj == NULL) { |
278 | if (gobj == NULL) { |
269 | return -ENOENT; |
279 | return -ENOENT; |
270 | } |
280 | } |
271 | robj = gem_to_radeon_bo(gobj); |
281 | robj = gem_to_radeon_bo(gobj); |
272 | r = radeon_bo_wait(robj, &cur_placement, true); |
282 | r = radeon_bo_wait(robj, &cur_placement, true); |
273 | args->domain = radeon_mem_type_to_domain(cur_placement); |
283 | args->domain = radeon_mem_type_to_domain(cur_placement); |
274 | drm_gem_object_unreference_unlocked(gobj); |
284 | drm_gem_object_unreference_unlocked(gobj); |
275 | r = radeon_gem_handle_lockup(rdev, r); |
285 | r = radeon_gem_handle_lockup(rdev, r); |
276 | return r; |
286 | return r; |
277 | } |
287 | } |
278 | 288 | ||
279 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
289 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
280 | struct drm_file *filp) |
290 | struct drm_file *filp) |
281 | { |
291 | { |
282 | struct radeon_device *rdev = dev->dev_private; |
292 | struct radeon_device *rdev = dev->dev_private; |
283 | struct drm_radeon_gem_wait_idle *args = data; |
293 | struct drm_radeon_gem_wait_idle *args = data; |
284 | struct drm_gem_object *gobj; |
294 | struct drm_gem_object *gobj; |
285 | struct radeon_bo *robj; |
295 | struct radeon_bo *robj; |
286 | int r; |
296 | int r = 0; |
287 | uint32_t cur_placement = 0; |
297 | uint32_t cur_placement = 0; |
- | 298 | long ret; |
|
288 | 299 | ||
289 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
300 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
290 | if (gobj == NULL) { |
301 | if (gobj == NULL) { |
291 | return -ENOENT; |
302 | return -ENOENT; |
292 | } |
303 | } |
293 | robj = gem_to_radeon_bo(gobj); |
304 | robj = gem_to_radeon_bo(gobj); |
294 | r = radeon_bo_wait(robj, &cur_placement, false); |
305 | r = radeon_bo_wait(robj, &cur_placement, false); |
295 | /* Flush HDP cache via MMIO if necessary */ |
306 | /* Flush HDP cache via MMIO if necessary */ |
296 | if (rdev->asic->mmio_hdp_flush && |
307 | if (rdev->asic->mmio_hdp_flush && |
297 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) |
308 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) |
298 | robj->rdev->asic->mmio_hdp_flush(rdev); |
309 | robj->rdev->asic->mmio_hdp_flush(rdev); |
299 | drm_gem_object_unreference_unlocked(gobj); |
310 | drm_gem_object_unreference_unlocked(gobj); |
300 | r = radeon_gem_handle_lockup(rdev, r); |
311 | r = radeon_gem_handle_lockup(rdev, r); |
301 | return r; |
312 | return r; |
302 | } |
313 | } |
303 | 314 | ||
304 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, |
315 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, |
305 | struct drm_file *filp) |
316 | struct drm_file *filp) |
306 | { |
317 | { |
307 | struct drm_radeon_gem_set_tiling *args = data; |
318 | struct drm_radeon_gem_set_tiling *args = data; |
308 | struct drm_gem_object *gobj; |
319 | struct drm_gem_object *gobj; |
309 | struct radeon_bo *robj; |
320 | struct radeon_bo *robj; |
310 | int r = 0; |
321 | int r = 0; |
311 | 322 | ||
312 | DRM_DEBUG("%d \n", args->handle); |
323 | DRM_DEBUG("%d \n", args->handle); |
313 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
324 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
314 | if (gobj == NULL) |
325 | if (gobj == NULL) |
315 | return -ENOENT; |
326 | return -ENOENT; |
316 | robj = gem_to_radeon_bo(gobj); |
327 | robj = gem_to_radeon_bo(gobj); |
317 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
328 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
318 | drm_gem_object_unreference_unlocked(gobj); |
329 | drm_gem_object_unreference_unlocked(gobj); |
319 | return r; |
330 | return r; |
320 | } |
331 | } |
321 | 332 | ||
322 | #endif><>> |
333 | #endif><>> |