Rev 6104 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1126 | serge | 1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
||
3 | * Copyright 2008 Red Hat Inc. |
||
4 | * Copyright 2009 Jerome Glisse. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the "Software"), |
||
8 | * to deal in the Software without restriction, including without limitation |
||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
10 | * and/or sell copies of the Software, and to permit persons to whom the |
||
11 | * Software is furnished to do so, subject to the following conditions: |
||
12 | * |
||
13 | * The above copyright notice and this permission notice shall be included in |
||
14 | * all copies or substantial portions of the Software. |
||
15 | * |
||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
||
22 | * OTHER DEALINGS IN THE SOFTWARE. |
||
23 | * |
||
24 | * Authors: Dave Airlie |
||
25 | * Alex Deucher |
||
26 | * Jerome Glisse |
||
27 | */ |
||
2997 | Serge | 28 | #include |
29 | #include |
||
1126 | serge | 30 | #include "radeon.h" |
31 | |||
32 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
||
33 | { |
||
1986 | serge | 34 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
1126 | serge | 35 | |
36 | if (robj) { |
||
1404 | serge | 37 | radeon_bo_unref(&robj); |
1126 | serge | 38 | } |
39 | } |
||
40 | |||
5078 | serge | 41 | int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, |
6104 | serge | 42 | int alignment, int initial_domain, |
5078 | serge | 43 | u32 flags, bool kernel, |
6104 | serge | 44 | struct drm_gem_object **obj) |
1126 | serge | 45 | { |
6104 | serge | 46 | struct radeon_bo *robj; |
2997 | Serge | 47 | unsigned long max_size; |
1126 | serge | 48 | int r; |
49 | |||
50 | *obj = NULL; |
||
51 | /* At least align on page size */ |
||
52 | if (alignment < PAGE_SIZE) { |
||
53 | alignment = PAGE_SIZE; |
||
54 | } |
||
2997 | Serge | 55 | |
5078 | serge | 56 | /* Maximum bo size is the unpinned gtt size since we use the gtt to |
57 | * handle vram to system pool migrations. |
||
58 | */ |
||
59 | max_size = rdev->mc.gtt_size - rdev->gart_pin_size; |
||
2997 | Serge | 60 | if (size > max_size) { |
5078 | serge | 61 | DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", |
62 | size >> 20, max_size >> 20); |
||
2997 | Serge | 63 | return -ENOMEM; |
64 | } |
||
65 | |||
66 | retry: |
||
5078 | serge | 67 | r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, |
5271 | serge | 68 | flags, NULL, NULL, &robj); |
1126 | serge | 69 | if (r) { |
2997 | Serge | 70 | if (r != -ERESTARTSYS) { |
71 | if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { |
||
72 | initial_domain |= RADEON_GEM_DOMAIN_GTT; |
||
73 | goto retry; |
||
74 | } |
||
5078 | serge | 75 | DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", |
1986 | serge | 76 | size, initial_domain, alignment, r); |
2997 | Serge | 77 | } |
6104 | serge | 78 | return r; |
1126 | serge | 79 | } |
1986 | serge | 80 | *obj = &robj->gem_base; |
81 | |||
82 | mutex_lock(&rdev->gem.mutex); |
||
83 | list_add_tail(&robj->list, &rdev->gem.objects); |
||
84 | mutex_unlock(&rdev->gem.mutex); |
||
85 | |||
1126 | serge | 86 | return 0; |
87 | } |
||
88 | |||
5078 | serge | 89 | static int radeon_gem_set_domain(struct drm_gem_object *gobj, |
1126 | serge | 90 | uint32_t rdomain, uint32_t wdomain) |
91 | { |
||
1404 | serge | 92 | struct radeon_bo *robj; |
1126 | serge | 93 | uint32_t domain; |
5271 | serge | 94 | long r; |
1126 | serge | 95 | |
96 | /* FIXME: reeimplement */ |
||
1986 | serge | 97 | robj = gem_to_radeon_bo(gobj); |
1126 | serge | 98 | /* work out where to validate the buffer to */ |
99 | domain = wdomain; |
||
100 | if (!domain) { |
||
101 | domain = rdomain; |
||
102 | } |
||
103 | if (!domain) { |
||
104 | /* Do nothings */ |
||
2997 | Serge | 105 | printk(KERN_WARNING "Set domain without domain !\n"); |
1126 | serge | 106 | return 0; |
107 | } |
||
108 | if (domain == RADEON_GEM_DOMAIN_CPU) { |
||
109 | /* Asking for cpu access wait for object idle */ |
||
1404 | serge | 110 | // r = radeon_bo_wait(robj, NULL, false); |
111 | // if (r) { |
||
112 | // printk(KERN_ERR "Failed to wait for object !\n"); |
||
113 | // return r; |
||
114 | // } |
||
1126 | serge | 115 | } |
116 | return 0; |
||
117 | } |
||
118 | |||
119 | int radeon_gem_init(struct radeon_device *rdev) |
||
120 | { |
||
121 | INIT_LIST_HEAD(&rdev->gem.objects); |
||
122 | return 0; |
||
123 | } |
||
124 | |||
125 | void radeon_gem_fini(struct radeon_device *rdev) |
||
126 | { |
||
127 | // radeon_object_force_delete(rdev); |
||
128 | } |
||
129 | |||
130 | #if 0 |
||
131 | /* |
||
132 | * GEM ioctls. |
||
133 | */ |
||
134 | int radeon_gem_info_ioctl(struct drm_device *dev, void *data, |
||
135 | struct drm_file *filp) |
||
136 | { |
||
137 | struct radeon_device *rdev = dev->dev_private; |
||
138 | struct drm_radeon_gem_info *args = data; |
||
1986 | serge | 139 | struct ttm_mem_type_manager *man; |
1126 | serge | 140 | |
1986 | serge | 141 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; |
142 | |||
1179 | serge | 143 | args->vram_size = rdev->mc.real_vram_size; |
1986 | serge | 144 | args->vram_visible = (u64)man->size << PAGE_SHIFT; |
5078 | serge | 145 | args->vram_visible -= rdev->vram_pin_size; |
146 | args->gart_size = rdev->mc.gtt_size; |
||
147 | args->gart_size -= rdev->gart_pin_size; |
||
148 | |||
1126 | serge | 149 | return 0; |
150 | } |
||
151 | |||
152 | int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, |
||
153 | struct drm_file *filp) |
||
154 | { |
||
155 | /* TODO: implement */ |
||
156 | DRM_ERROR("unimplemented %s\n", __func__); |
||
157 | return -ENOSYS; |
||
158 | } |
||
159 | |||
160 | int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
||
161 | struct drm_file *filp) |
||
162 | { |
||
163 | /* TODO: implement */ |
||
164 | DRM_ERROR("unimplemented %s\n", __func__); |
||
165 | return -ENOSYS; |
||
166 | } |
||
167 | |||
168 | int radeon_gem_create_ioctl(struct drm_device *dev, void *data, |
||
169 | struct drm_file *filp) |
||
170 | { |
||
171 | struct radeon_device *rdev = dev->dev_private; |
||
172 | struct drm_radeon_gem_create *args = data; |
||
173 | struct drm_gem_object *gobj; |
||
174 | uint32_t handle; |
||
175 | int r; |
||
176 | |||
2997 | Serge | 177 | down_read(&rdev->exclusive_lock); |
1126 | serge | 178 | /* create a gem object to contain this object in */ |
179 | args->size = roundup(args->size, PAGE_SIZE); |
||
180 | r = radeon_gem_object_create(rdev, args->size, args->alignment, |
||
5078 | serge | 181 | args->initial_domain, args->flags, |
6104 | serge | 182 | false, &gobj); |
1126 | serge | 183 | if (r) { |
2997 | Serge | 184 | up_read(&rdev->exclusive_lock); |
185 | r = radeon_gem_handle_lockup(rdev, r); |
||
1126 | serge | 186 | return r; |
187 | } |
||
188 | r = drm_gem_handle_create(filp, gobj, &handle); |
||
1963 | serge | 189 | /* drop reference from allocate - handle holds it now */ |
190 | drm_gem_object_unreference_unlocked(gobj); |
||
1126 | serge | 191 | if (r) { |
2997 | Serge | 192 | up_read(&rdev->exclusive_lock); |
193 | r = radeon_gem_handle_lockup(rdev, r); |
||
1126 | serge | 194 | return r; |
195 | } |
||
196 | args->handle = handle; |
||
2997 | Serge | 197 | up_read(&rdev->exclusive_lock); |
1126 | serge | 198 | return 0; |
199 | } |
||
200 | |||
201 | int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
||
202 | struct drm_file *filp) |
||
203 | { |
||
204 | /* transition the BO to a domain - |
||
205 | * just validate the BO into a certain domain */ |
||
2997 | Serge | 206 | struct radeon_device *rdev = dev->dev_private; |
1126 | serge | 207 | struct drm_radeon_gem_set_domain *args = data; |
208 | struct drm_gem_object *gobj; |
||
1404 | serge | 209 | struct radeon_bo *robj; |
1126 | serge | 210 | int r; |
211 | |||
212 | /* for now if someone requests domain CPU - |
||
213 | * just make sure the buffer is finished with */ |
||
2997 | Serge | 214 | down_read(&rdev->exclusive_lock); |
1126 | serge | 215 | |
216 | /* just do a BO wait for now */ |
||
217 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
||
218 | if (gobj == NULL) { |
||
2997 | Serge | 219 | up_read(&rdev->exclusive_lock); |
1963 | serge | 220 | return -ENOENT; |
1126 | serge | 221 | } |
1986 | serge | 222 | robj = gem_to_radeon_bo(gobj); |
1126 | serge | 223 | |
224 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); |
||
225 | |||
1963 | serge | 226 | drm_gem_object_unreference_unlocked(gobj); |
2997 | Serge | 227 | up_read(&rdev->exclusive_lock); |
228 | r = radeon_gem_handle_lockup(robj->rdev, r); |
||
1126 | serge | 229 | return r; |
230 | } |
||
231 | |||
5346 | serge | 232 | int radeon_mode_dumb_mmap(struct drm_file *filp, |
1986 | serge | 233 | struct drm_device *dev, |
5346 | serge | 234 | uint32_t handle, uint64_t *offset_p) |
1126 | serge | 235 | { |
236 | struct drm_gem_object *gobj; |
||
1404 | serge | 237 | struct radeon_bo *robj; |
1126 | serge | 238 | |
1986 | serge | 239 | gobj = drm_gem_object_lookup(dev, filp, handle); |
1126 | serge | 240 | if (gobj == NULL) { |
1963 | serge | 241 | return -ENOENT; |
1126 | serge | 242 | } |
1986 | serge | 243 | robj = gem_to_radeon_bo(gobj); |
244 | *offset_p = radeon_bo_mmap_offset(robj); |
||
1963 | serge | 245 | drm_gem_object_unreference_unlocked(gobj); |
1404 | serge | 246 | return 0; |
1126 | serge | 247 | } |
248 | |||
1986 | serge | 249 | int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, |
250 | struct drm_file *filp) |
||
251 | { |
||
252 | struct drm_radeon_gem_mmap *args = data; |
||
253 | |||
5346 | serge | 254 | return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); |
1986 | serge | 255 | } |
256 | |||
1126 | serge | 257 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
258 | struct drm_file *filp) |
||
259 | { |
||
1404 | serge | 260 | struct drm_radeon_gem_busy *args = data; |
261 | struct drm_gem_object *gobj; |
||
262 | struct radeon_bo *robj; |
||
263 | int r; |
||
264 | uint32_t cur_placement = 0; |
||
265 | |||
266 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
||
267 | if (gobj == NULL) { |
||
1963 | serge | 268 | return -ENOENT; |
1404 | serge | 269 | } |
1986 | serge | 270 | robj = gem_to_radeon_bo(gobj); |
1404 | serge | 271 | r = radeon_bo_wait(robj, &cur_placement, true); |
5078 | serge | 272 | args->domain = radeon_mem_type_to_domain(cur_placement); |
1963 | serge | 273 | drm_gem_object_unreference_unlocked(gobj); |
1404 | serge | 274 | return r; |
1126 | serge | 275 | } |
276 | |||
277 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
||
278 | struct drm_file *filp) |
||
279 | { |
||
2997 | Serge | 280 | struct radeon_device *rdev = dev->dev_private; |
1126 | serge | 281 | struct drm_radeon_gem_wait_idle *args = data; |
282 | struct drm_gem_object *gobj; |
||
1404 | serge | 283 | struct radeon_bo *robj; |
5271 | serge | 284 | int r = 0; |
5078 | serge | 285 | uint32_t cur_placement = 0; |
5271 | serge | 286 | long ret; |
1126 | serge | 287 | |
288 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
||
289 | if (gobj == NULL) { |
||
1963 | serge | 290 | return -ENOENT; |
1126 | serge | 291 | } |
1986 | serge | 292 | robj = gem_to_radeon_bo(gobj); |
5078 | serge | 293 | r = radeon_bo_wait(robj, &cur_placement, false); |
294 | /* Flush HDP cache via MMIO if necessary */ |
||
295 | if (rdev->asic->mmio_hdp_flush && |
||
296 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) |
||
297 | robj->rdev->asic->mmio_hdp_flush(rdev); |
||
1963 | serge | 298 | drm_gem_object_unreference_unlocked(gobj); |
2997 | Serge | 299 | r = radeon_gem_handle_lockup(rdev, r); |
1126 | serge | 300 | return r; |
301 | } |
||
302 | |||
1404 | serge | 303 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, |
304 | struct drm_file *filp) |
||
305 | { |
||
306 | struct drm_radeon_gem_set_tiling *args = data; |
||
307 | struct drm_gem_object *gobj; |
||
308 | struct radeon_bo *robj; |
||
309 | int r = 0; |
||
310 | |||
311 | DRM_DEBUG("%d \n", args->handle); |
||
312 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
||
313 | if (gobj == NULL) |
||
1963 | serge | 314 | return -ENOENT; |
1986 | serge | 315 | robj = gem_to_radeon_bo(gobj); |
1404 | serge | 316 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
1963 | serge | 317 | drm_gem_object_unreference_unlocked(gobj); |
1404 | serge | 318 | return r; |
319 | } |
||
320 | |||
1126 | serge | 321 | #endif><>> |