Subversion Repositories Kolibri OS

Rev

Rev 6104 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6104 Rev 7146
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include 
28
#include 
29
#include 
29
#include 
30
#include "radeon.h"
30
#include "radeon.h"
31
 
31
 
32
void radeon_gem_object_free(struct drm_gem_object *gobj)
32
void radeon_gem_object_free(struct drm_gem_object *gobj)
33
{
33
{
34
	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
34
	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35
 
35
 
36
	if (robj) {
36
	if (robj) {
37
		radeon_bo_unref(&robj);
37
		radeon_bo_unref(&robj);
38
	}
38
	}
39
}
39
}
40
 
40
 
41
int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
41
int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
42
				int alignment, int initial_domain,
42
				int alignment, int initial_domain,
43
				u32 flags, bool kernel,
43
				u32 flags, bool kernel,
44
				struct drm_gem_object **obj)
44
				struct drm_gem_object **obj)
45
{
45
{
46
	struct radeon_bo *robj;
46
	struct radeon_bo *robj;
47
	unsigned long max_size;
47
	unsigned long max_size;
48
	int r;
48
	int r;
49
 
49
 
50
	*obj = NULL;
50
	*obj = NULL;
51
	/* At least align on page size */
51
	/* At least align on page size */
52
	if (alignment < PAGE_SIZE) {
52
	if (alignment < PAGE_SIZE) {
53
		alignment = PAGE_SIZE;
53
		alignment = PAGE_SIZE;
54
	}
54
	}
55
 
55
 
56
	/* Maximum bo size is the unpinned gtt size since we use the gtt to
56
	/* Maximum bo size is the unpinned gtt size since we use the gtt to
57
	 * handle vram to system pool migrations.
57
	 * handle vram to system pool migrations.
58
	 */
58
	 */
59
	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
59
	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
60
	if (size > max_size) {
60
	if (size > max_size) {
61
		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
61
		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
62
			  size >> 20, max_size >> 20);
62
			  size >> 20, max_size >> 20);
63
		return -ENOMEM;
63
		return -ENOMEM;
64
	}
64
	}
65
 
65
 
66
retry:
66
retry:
67
	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
67
	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
68
			     flags, NULL, NULL, &robj);
68
			     flags, NULL, NULL, &robj);
69
	if (r) {
69
	if (r) {
70
		if (r != -ERESTARTSYS) {
70
		if (r != -ERESTARTSYS) {
71
			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
71
			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
72
				initial_domain |= RADEON_GEM_DOMAIN_GTT;
72
				initial_domain |= RADEON_GEM_DOMAIN_GTT;
73
				goto retry;
73
				goto retry;
74
			}
74
			}
75
			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
75
			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
76
				  size, initial_domain, alignment, r);
76
				  size, initial_domain, alignment, r);
77
		}
77
		}
78
		return r;
78
		return r;
79
	}
79
	}
80
	*obj = &robj->gem_base;
80
	*obj = &robj->gem_base;
81
 
81
 
82
	mutex_lock(&rdev->gem.mutex);
82
	mutex_lock(&rdev->gem.mutex);
83
	list_add_tail(&robj->list, &rdev->gem.objects);
83
	list_add_tail(&robj->list, &rdev->gem.objects);
84
	mutex_unlock(&rdev->gem.mutex);
84
	mutex_unlock(&rdev->gem.mutex);
85
 
85
 
86
	return 0;
86
	return 0;
87
}
87
}
88
 
88
 
89
static int radeon_gem_set_domain(struct drm_gem_object *gobj,
89
static int radeon_gem_set_domain(struct drm_gem_object *gobj,
90
			  uint32_t rdomain, uint32_t wdomain)
90
			  uint32_t rdomain, uint32_t wdomain)
91
{
91
{
92
	struct radeon_bo *robj;
92
	struct radeon_bo *robj;
93
	uint32_t domain;
93
	uint32_t domain;
94
	long r;
94
	long r;
95
 
95
 
96
	/* FIXME: reeimplement */
96
	/* FIXME: reeimplement */
97
	robj = gem_to_radeon_bo(gobj);
97
	robj = gem_to_radeon_bo(gobj);
98
	/* work out where to validate the buffer to */
98
	/* work out where to validate the buffer to */
99
	domain = wdomain;
99
	domain = wdomain;
100
	if (!domain) {
100
	if (!domain) {
101
		domain = rdomain;
101
		domain = rdomain;
102
	}
102
	}
103
	if (!domain) {
103
	if (!domain) {
104
		/* Do nothings */
104
		/* Do nothings */
105
		printk(KERN_WARNING "Set domain without domain !\n");
105
		printk(KERN_WARNING "Set domain without domain !\n");
106
		return 0;
106
		return 0;
107
	}
107
	}
108
	if (domain == RADEON_GEM_DOMAIN_CPU) {
108
	if (domain == RADEON_GEM_DOMAIN_CPU) {
109
		/* Asking for cpu access wait for object idle */
109
		/* Asking for cpu access wait for object idle */
110
//		r = radeon_bo_wait(robj, NULL, false);
110
//		r = radeon_bo_wait(robj, NULL, false);
111
//		if (r) {
111
//		if (r) {
112
//			printk(KERN_ERR "Failed to wait for object !\n");
112
//			printk(KERN_ERR "Failed to wait for object !\n");
113
//			return r;
113
//			return r;
114
//		}
114
//		}
115
	}
115
	}
116
	return 0;
116
	return 0;
117
}
117
}
118
 
118
 
119
int radeon_gem_init(struct radeon_device *rdev)
119
int radeon_gem_init(struct radeon_device *rdev)
120
{
120
{
121
	INIT_LIST_HEAD(&rdev->gem.objects);
121
	INIT_LIST_HEAD(&rdev->gem.objects);
122
	return 0;
122
	return 0;
123
}
123
}
124
 
124
 
125
void radeon_gem_fini(struct radeon_device *rdev)
125
void radeon_gem_fini(struct radeon_device *rdev)
126
{
126
{
127
 //  radeon_object_force_delete(rdev);
127
 //  radeon_object_force_delete(rdev);
128
}
128
}
129
 
129
 
130
#if 0
130
#if 0
131
/*
131
/*
132
 * GEM ioctls.
132
 * GEM ioctls.
133
 */
133
 */
134
int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
134
int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
135
			  struct drm_file *filp)
135
			  struct drm_file *filp)
136
{
136
{
137
	struct radeon_device *rdev = dev->dev_private;
137
	struct radeon_device *rdev = dev->dev_private;
138
	struct drm_radeon_gem_info *args = data;
138
	struct drm_radeon_gem_info *args = data;
139
	struct ttm_mem_type_manager *man;
139
	struct ttm_mem_type_manager *man;
140
 
140
 
141
	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
141
	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
142
 
142
 
143
	args->vram_size = rdev->mc.real_vram_size;
143
	args->vram_size = rdev->mc.real_vram_size;
144
	args->vram_visible = (u64)man->size << PAGE_SHIFT;
144
	args->vram_visible = (u64)man->size << PAGE_SHIFT;
145
	args->vram_visible -= rdev->vram_pin_size;
145
	args->vram_visible -= rdev->vram_pin_size;
146
	args->gart_size = rdev->mc.gtt_size;
146
	args->gart_size = rdev->mc.gtt_size;
147
	args->gart_size -= rdev->gart_pin_size;
147
	args->gart_size -= rdev->gart_pin_size;
148
 
148
 
149
	return 0;
149
	return 0;
150
}
150
}
151
 
151
 
152
int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
152
int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
153
			   struct drm_file *filp)
153
			   struct drm_file *filp)
154
{
154
{
155
	/* TODO: implement */
155
	/* TODO: implement */
156
	DRM_ERROR("unimplemented %s\n", __func__);
156
	DRM_ERROR("unimplemented %s\n", __func__);
157
	return -ENOSYS;
157
	return -ENOSYS;
158
}
158
}
159
 
159
 
160
int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
160
int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
161
			    struct drm_file *filp)
161
			    struct drm_file *filp)
162
{
162
{
163
	/* TODO: implement */
163
	/* TODO: implement */
164
	DRM_ERROR("unimplemented %s\n", __func__);
164
	DRM_ERROR("unimplemented %s\n", __func__);
165
	return -ENOSYS;
165
	return -ENOSYS;
166
}
166
}
167
 
167
 
168
int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
168
int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
169
			    struct drm_file *filp)
169
			    struct drm_file *filp)
170
{
170
{
171
	struct radeon_device *rdev = dev->dev_private;
171
	struct radeon_device *rdev = dev->dev_private;
172
	struct drm_radeon_gem_create *args = data;
172
	struct drm_radeon_gem_create *args = data;
173
	struct drm_gem_object *gobj;
173
	struct drm_gem_object *gobj;
174
	uint32_t handle;
174
	uint32_t handle;
175
	int r;
175
	int r;
176
 
176
 
177
	down_read(&rdev->exclusive_lock);
177
	down_read(&rdev->exclusive_lock);
178
	/* create a gem object to contain this object in */
178
	/* create a gem object to contain this object in */
179
	args->size = roundup(args->size, PAGE_SIZE);
179
	args->size = roundup(args->size, PAGE_SIZE);
180
	r = radeon_gem_object_create(rdev, args->size, args->alignment,
180
	r = radeon_gem_object_create(rdev, args->size, args->alignment,
181
				     args->initial_domain, args->flags,
181
				     args->initial_domain, args->flags,
182
				     false, &gobj);
182
				     false, &gobj);
183
	if (r) {
183
	if (r) {
184
		up_read(&rdev->exclusive_lock);
184
		up_read(&rdev->exclusive_lock);
185
		r = radeon_gem_handle_lockup(rdev, r);
185
		r = radeon_gem_handle_lockup(rdev, r);
186
		return r;
186
		return r;
187
	}
187
	}
188
	r = drm_gem_handle_create(filp, gobj, &handle);
188
	r = drm_gem_handle_create(filp, gobj, &handle);
189
	/* drop reference from allocate - handle holds it now */
189
	/* drop reference from allocate - handle holds it now */
190
	drm_gem_object_unreference_unlocked(gobj);
190
	drm_gem_object_unreference_unlocked(gobj);
191
	if (r) {
191
	if (r) {
192
		up_read(&rdev->exclusive_lock);
192
		up_read(&rdev->exclusive_lock);
193
		r = radeon_gem_handle_lockup(rdev, r);
193
		r = radeon_gem_handle_lockup(rdev, r);
194
		return r;
194
		return r;
195
	}
195
	}
196
	args->handle = handle;
196
	args->handle = handle;
197
	up_read(&rdev->exclusive_lock);
197
	up_read(&rdev->exclusive_lock);
198
	return 0;
198
	return 0;
199
}
199
}
200
 
200
 
201
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
201
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
202
				struct drm_file *filp)
202
				struct drm_file *filp)
203
{
203
{
204
	/* transition the BO to a domain -
204
	/* transition the BO to a domain -
205
	 * just validate the BO into a certain domain */
205
	 * just validate the BO into a certain domain */
206
	struct radeon_device *rdev = dev->dev_private;
206
	struct radeon_device *rdev = dev->dev_private;
207
	struct drm_radeon_gem_set_domain *args = data;
207
	struct drm_radeon_gem_set_domain *args = data;
208
	struct drm_gem_object *gobj;
208
	struct drm_gem_object *gobj;
209
	struct radeon_bo *robj;
209
	struct radeon_bo *robj;
210
	int r;
210
	int r;
211
 
211
 
212
	/* for now if someone requests domain CPU -
212
	/* for now if someone requests domain CPU -
213
	 * just make sure the buffer is finished with */
213
	 * just make sure the buffer is finished with */
214
	down_read(&rdev->exclusive_lock);
214
	down_read(&rdev->exclusive_lock);
215
 
215
 
216
	/* just do a BO wait for now */
216
	/* just do a BO wait for now */
217
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
217
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
218
	if (gobj == NULL) {
218
	if (gobj == NULL) {
219
		up_read(&rdev->exclusive_lock);
219
		up_read(&rdev->exclusive_lock);
220
		return -ENOENT;
220
		return -ENOENT;
221
	}
221
	}
222
	robj = gem_to_radeon_bo(gobj);
222
	robj = gem_to_radeon_bo(gobj);
223
 
223
 
224
	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
224
	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
225
 
225
 
226
	drm_gem_object_unreference_unlocked(gobj);
226
	drm_gem_object_unreference_unlocked(gobj);
227
	up_read(&rdev->exclusive_lock);
227
	up_read(&rdev->exclusive_lock);
228
	r = radeon_gem_handle_lockup(robj->rdev, r);
228
	r = radeon_gem_handle_lockup(robj->rdev, r);
229
	return r;
229
	return r;
230
}
230
}
231
 
231
 
232
int radeon_mode_dumb_mmap(struct drm_file *filp,
232
int radeon_mode_dumb_mmap(struct drm_file *filp,
233
			  struct drm_device *dev,
233
			  struct drm_device *dev,
234
			  uint32_t handle, uint64_t *offset_p)
234
			  uint32_t handle, uint64_t *offset_p)
235
{
235
{
236
	struct drm_gem_object *gobj;
236
	struct drm_gem_object *gobj;
237
	struct radeon_bo *robj;
237
	struct radeon_bo *robj;
238
 
238
 
239
	gobj = drm_gem_object_lookup(dev, filp, handle);
239
	gobj = drm_gem_object_lookup(dev, filp, handle);
240
	if (gobj == NULL) {
240
	if (gobj == NULL) {
241
		return -ENOENT;
241
		return -ENOENT;
242
	}
242
	}
243
	robj = gem_to_radeon_bo(gobj);
243
	robj = gem_to_radeon_bo(gobj);
244
	*offset_p = radeon_bo_mmap_offset(robj);
244
	*offset_p = radeon_bo_mmap_offset(robj);
245
	drm_gem_object_unreference_unlocked(gobj);
245
	drm_gem_object_unreference_unlocked(gobj);
246
	return 0;
246
	return 0;
247
}
247
}
248
 
248
 
249
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
249
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
250
			  struct drm_file *filp)
250
			  struct drm_file *filp)
251
{
251
{
252
	struct drm_radeon_gem_mmap *args = data;
252
	struct drm_radeon_gem_mmap *args = data;
253
 
253
 
254
	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
254
	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
255
}
255
}
256
 
256
 
257
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
257
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
258
			  struct drm_file *filp)
258
			  struct drm_file *filp)
259
{
259
{
260
	struct radeon_device *rdev = dev->dev_private;
-
 
261
	struct drm_radeon_gem_busy *args = data;
260
	struct drm_radeon_gem_busy *args = data;
262
	struct drm_gem_object *gobj;
261
	struct drm_gem_object *gobj;
263
	struct radeon_bo *robj;
262
	struct radeon_bo *robj;
264
	int r;
263
	int r;
265
	uint32_t cur_placement = 0;
264
	uint32_t cur_placement = 0;
266
 
265
 
267
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
266
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
268
	if (gobj == NULL) {
267
	if (gobj == NULL) {
269
		return -ENOENT;
268
		return -ENOENT;
270
	}
269
	}
271
	robj = gem_to_radeon_bo(gobj);
270
	robj = gem_to_radeon_bo(gobj);
272
	r = radeon_bo_wait(robj, &cur_placement, true);
271
	r = radeon_bo_wait(robj, &cur_placement, true);
273
	args->domain = radeon_mem_type_to_domain(cur_placement);
272
	args->domain = radeon_mem_type_to_domain(cur_placement);
274
	drm_gem_object_unreference_unlocked(gobj);
273
	drm_gem_object_unreference_unlocked(gobj);
275
	r = radeon_gem_handle_lockup(rdev, r);
-
 
276
	return r;
274
	return r;
277
}
275
}
278
 
276
 
279
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
277
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
280
			      struct drm_file *filp)
278
			      struct drm_file *filp)
281
{
279
{
282
	struct radeon_device *rdev = dev->dev_private;
280
	struct radeon_device *rdev = dev->dev_private;
283
	struct drm_radeon_gem_wait_idle *args = data;
281
	struct drm_radeon_gem_wait_idle *args = data;
284
	struct drm_gem_object *gobj;
282
	struct drm_gem_object *gobj;
285
	struct radeon_bo *robj;
283
	struct radeon_bo *robj;
286
	int r = 0;
284
	int r = 0;
287
	uint32_t cur_placement = 0;
285
	uint32_t cur_placement = 0;
288
	long ret;
286
	long ret;
289
 
287
 
290
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
288
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
291
	if (gobj == NULL) {
289
	if (gobj == NULL) {
292
		return -ENOENT;
290
		return -ENOENT;
293
	}
291
	}
294
	robj = gem_to_radeon_bo(gobj);
292
	robj = gem_to_radeon_bo(gobj);
295
	r = radeon_bo_wait(robj, &cur_placement, false);
293
	r = radeon_bo_wait(robj, &cur_placement, false);
296
	/* Flush HDP cache via MMIO if necessary */
294
	/* Flush HDP cache via MMIO if necessary */
297
	if (rdev->asic->mmio_hdp_flush &&
295
	if (rdev->asic->mmio_hdp_flush &&
298
	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
296
	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
299
		robj->rdev->asic->mmio_hdp_flush(rdev);
297
		robj->rdev->asic->mmio_hdp_flush(rdev);
300
	drm_gem_object_unreference_unlocked(gobj);
298
	drm_gem_object_unreference_unlocked(gobj);
301
	r = radeon_gem_handle_lockup(rdev, r);
299
	r = radeon_gem_handle_lockup(rdev, r);
302
	return r;
300
	return r;
303
}
301
}
304
 
302
 
305
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
303
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
306
				struct drm_file *filp)
304
				struct drm_file *filp)
307
{
305
{
308
	struct drm_radeon_gem_set_tiling *args = data;
306
	struct drm_radeon_gem_set_tiling *args = data;
309
	struct drm_gem_object *gobj;
307
	struct drm_gem_object *gobj;
310
	struct radeon_bo *robj;
308
	struct radeon_bo *robj;
311
	int r = 0;
309
	int r = 0;
312
 
310
 
313
	DRM_DEBUG("%d \n", args->handle);
311
	DRM_DEBUG("%d \n", args->handle);
314
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
312
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
315
	if (gobj == NULL)
313
	if (gobj == NULL)
316
		return -ENOENT;
314
		return -ENOENT;
317
	robj = gem_to_radeon_bo(gobj);
315
	robj = gem_to_radeon_bo(gobj);
318
	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
316
	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
319
	drm_gem_object_unreference_unlocked(gobj);
317
	drm_gem_object_unreference_unlocked(gobj);
320
	return r;
318
	return r;
321
}
319
}
322
 
320
 
323
#endif
321
#endif