Subversion Repositories Kolibri OS

Rev

Rev 4075 | Rev 4569 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
#include "vmwgfx_drv.h"
29
#include 
30
#include 
31
#include 
32
#include 
33
#include "vmwgfx_resource_priv.h"
34
 
35
struct vmw_user_dma_buffer {
36
	struct ttm_base_object base;
37
	struct vmw_dma_buffer dma;
38
};
39
 
40
struct vmw_bo_user_rep {
41
	uint32_t handle;
42
	uint64_t map_handle;
43
};
44
 
45
struct vmw_stream {
46
	struct vmw_resource res;
47
	uint32_t stream_id;
48
};
49
 
50
struct vmw_user_stream {
51
	struct ttm_base_object base;
52
	struct vmw_stream stream;
53
};
54
 
55
 
56
static uint64_t vmw_user_stream_size;
57
 
58
static const struct vmw_res_func vmw_stream_func = {
59
	.res_type = vmw_res_stream,
60
	.needs_backup = false,
61
	.may_evict = false,
62
	.type_name = "video streams",
63
	.backup_placement = NULL,
64
	.create = NULL,
65
	.destroy = NULL,
66
	.bind = NULL,
67
	.unbind = NULL
68
};
69
 
70
static inline struct vmw_dma_buffer *
71
vmw_dma_buffer(struct ttm_buffer_object *bo)
72
{
73
	return container_of(bo, struct vmw_dma_buffer, base);
74
}
75
 
76
static inline struct vmw_user_dma_buffer *
77
vmw_user_dma_buffer(struct ttm_buffer_object *bo)
78
{
79
	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
80
	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
81
}
82
 
83
struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
84
{
85
	kref_get(&res->kref);
86
	return res;
87
}
88
 
89
 
90
/**
91
 * vmw_resource_release_id - release a resource id to the id manager.
92
 *
93
 * @res: Pointer to the resource.
94
 *
95
 * Release the resource id to the resource id manager and set it to -1
96
 */
97
void vmw_resource_release_id(struct vmw_resource *res)
98
{
99
	struct vmw_private *dev_priv = res->dev_priv;
100
	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
101
 
102
	write_lock(&dev_priv->resource_lock);
103
	if (res->id != -1)
104
		idr_remove(idr, res->id);
105
	res->id = -1;
106
	write_unlock(&dev_priv->resource_lock);
107
}
108
 
109
static void vmw_resource_release(struct kref *kref)
110
{
111
	struct vmw_resource *res =
112
	    container_of(kref, struct vmw_resource, kref);
113
	struct vmw_private *dev_priv = res->dev_priv;
114
	int id;
115
	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
116
 
117
	res->avail = false;
118
	list_del_init(&res->lru_head);
119
	write_unlock(&dev_priv->resource_lock);
120
	if (res->backup) {
121
		struct ttm_buffer_object *bo = &res->backup->base;
122
 
123
		ttm_bo_reserve(bo, false, false, false, 0);
124
		if (!list_empty(&res->mob_head) &&
125
		    res->func->unbind != NULL) {
126
			struct ttm_validate_buffer val_buf;
127
 
128
			val_buf.bo = bo;
129
			res->func->unbind(res, false, &val_buf);
130
		}
131
		res->backup_dirty = false;
132
		list_del_init(&res->mob_head);
133
		ttm_bo_unreserve(bo);
134
		vmw_dmabuf_unreference(&res->backup);
135
	}
136
 
137
	if (likely(res->hw_destroy != NULL))
138
		res->hw_destroy(res);
139
 
140
	id = res->id;
141
	if (res->res_free != NULL)
142
		res->res_free(res);
143
	else
144
		kfree(res);
145
 
146
	write_lock(&dev_priv->resource_lock);
147
 
148
	if (id != -1)
149
		idr_remove(idr, id);
150
}
151
 
152
void vmw_resource_unreference(struct vmw_resource **p_res)
153
{
154
	struct vmw_resource *res = *p_res;
155
	struct vmw_private *dev_priv = res->dev_priv;
156
 
157
	*p_res = NULL;
158
	write_lock(&dev_priv->resource_lock);
159
	kref_put(&res->kref, vmw_resource_release);
160
	write_unlock(&dev_priv->resource_lock);
161
}
162
 
163
 
164
/**
165
 * vmw_resource_alloc_id - release a resource id to the id manager.
166
 *
167
 * @res: Pointer to the resource.
168
 *
169
 * Allocate the lowest free resource from the resource manager, and set
170
 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
171
 */
172
int vmw_resource_alloc_id(struct vmw_resource *res)
173
{
174
	struct vmw_private *dev_priv = res->dev_priv;
175
	int ret;
176
	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
177
 
178
	BUG_ON(res->id != -1);
179
 
180
	idr_preload(GFP_KERNEL);
181
	write_lock(&dev_priv->resource_lock);
182
 
183
	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
184
	if (ret >= 0)
185
		res->id = ret;
186
 
187
	write_unlock(&dev_priv->resource_lock);
188
	idr_preload_end();
189
	return ret < 0 ? ret : 0;
190
}
191
 
192
/**
193
 * vmw_resource_init - initialize a struct vmw_resource
194
 *
195
 * @dev_priv:       Pointer to a device private struct.
196
 * @res:            The struct vmw_resource to initialize.
197
 * @obj_type:       Resource object type.
198
 * @delay_id:       Boolean whether to defer device id allocation until
199
 *                  the first validation.
200
 * @res_free:       Resource destructor.
201
 * @func:           Resource function table.
202
 */
203
int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
204
		      bool delay_id,
205
		      void (*res_free) (struct vmw_resource *res),
206
		      const struct vmw_res_func *func)
207
{
208
	kref_init(&res->kref);
209
	res->hw_destroy = NULL;
210
	res->res_free = res_free;
211
	res->avail = false;
212
	res->dev_priv = dev_priv;
213
	res->func = func;
214
	INIT_LIST_HEAD(&res->lru_head);
215
	INIT_LIST_HEAD(&res->mob_head);
216
	res->id = -1;
217
	res->backup = NULL;
218
	res->backup_offset = 0;
219
	res->backup_dirty = false;
220
	res->res_dirty = false;
221
	if (delay_id)
222
		return 0;
223
	else
224
		return vmw_resource_alloc_id(res);
225
}
226
 
227
/**
228
 * vmw_resource_activate
229
 *
230
 * @res:        Pointer to the newly created resource
231
 * @hw_destroy: Destroy function. NULL if none.
232
 *
233
 * Activate a resource after the hardware has been made aware of it.
234
 * Set tye destroy function to @destroy. Typically this frees the
235
 * resource and destroys the hardware resources associated with it.
236
 * Activate basically means that the function vmw_resource_lookup will
237
 * find it.
238
 */
239
void vmw_resource_activate(struct vmw_resource *res,
240
			   void (*hw_destroy) (struct vmw_resource *))
241
{
242
	struct vmw_private *dev_priv = res->dev_priv;
243
 
244
	write_lock(&dev_priv->resource_lock);
245
	res->avail = true;
246
	res->hw_destroy = hw_destroy;
247
	write_unlock(&dev_priv->resource_lock);
248
}
249
 
250
struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
251
					 struct idr *idr, int id)
252
{
253
	struct vmw_resource *res;
254
 
255
	read_lock(&dev_priv->resource_lock);
256
	res = idr_find(idr, id);
257
	if (res && res->avail)
258
		kref_get(&res->kref);
259
	else
260
		res = NULL;
261
	read_unlock(&dev_priv->resource_lock);
262
 
263
	if (unlikely(res == NULL))
264
		return NULL;
265
 
266
	return res;
267
}
268
 
269
/**
270
 * vmw_user_resource_lookup_handle - lookup a struct resource from a
271
 * TTM user-space handle and perform basic type checks
272
 *
273
 * @dev_priv:     Pointer to a device private struct
274
 * @tfile:        Pointer to a struct ttm_object_file identifying the caller
275
 * @handle:       The TTM user-space handle
276
 * @converter:    Pointer to an object describing the resource type
277
 * @p_res:        On successful return the location pointed to will contain
278
 *                a pointer to a refcounted struct vmw_resource.
279
 *
280
 * If the handle can't be found or is associated with an incorrect resource
281
 * type, -EINVAL will be returned.
282
 */
283
int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
284
				    struct ttm_object_file *tfile,
285
				    uint32_t handle,
286
				    const struct vmw_user_resource_conv
287
				    *converter,
288
				    struct vmw_resource **p_res)
289
{
290
	struct ttm_base_object *base;
291
	struct vmw_resource *res;
292
	int ret = -EINVAL;
293
 
294
	base = ttm_base_object_lookup(tfile, handle);
295
	if (unlikely(base == NULL))
296
		return -EINVAL;
297
 
298
	if (unlikely(base->object_type != converter->object_type))
299
		goto out_bad_resource;
300
 
301
	res = converter->base_obj_to_res(base);
302
 
303
	read_lock(&dev_priv->resource_lock);
304
	if (!res->avail || res->res_free != converter->res_free) {
305
		read_unlock(&dev_priv->resource_lock);
306
		goto out_bad_resource;
307
	}
308
 
309
	kref_get(&res->kref);
310
	read_unlock(&dev_priv->resource_lock);
311
 
312
	*p_res = res;
313
	ret = 0;
314
 
315
out_bad_resource:
316
	ttm_base_object_unref(&base);
317
 
318
	return ret;
319
}
320
 
321
/**
322
 * Helper function that looks either a surface or dmabuf.
323
 *
324
 * The pointer this pointed at by out_surf and out_buf needs to be null.
325
 */
326
int vmw_user_lookup_handle(struct vmw_private *dev_priv,
327
			   struct ttm_object_file *tfile,
328
			   uint32_t handle,
329
			   struct vmw_surface **out_surf,
330
			   struct vmw_dma_buffer **out_buf)
331
{
332
	struct vmw_resource *res;
333
	int ret;
334
 
335
	BUG_ON(*out_surf || *out_buf);
336
 
337
	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
338
					      user_surface_converter,
339
					      &res);
340
	if (!ret) {
341
		*out_surf = vmw_res_to_srf(res);
342
		return 0;
343
	}
344
 
345
	*out_surf = NULL;
346
	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
347
	return ret;
348
}
349
 
350
/**
351
 * Buffer management.
352
 */
353
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
354
{
355
	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
356
 
357
	kfree(vmw_bo);
358
}
359
 
360
int vmw_dmabuf_init(struct vmw_private *dev_priv,
361
		    struct vmw_dma_buffer *vmw_bo,
362
		    size_t size, struct ttm_placement *placement,
363
		    bool interruptible,
364
		    void (*bo_free) (struct ttm_buffer_object *bo))
365
{
366
	struct ttm_bo_device *bdev = &dev_priv->bdev;
367
	size_t acc_size;
368
	int ret;
369
 
370
	BUG_ON(!bo_free);
371
 
372
	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
373
	memset(vmw_bo, 0, sizeof(*vmw_bo));
374
 
375
	INIT_LIST_HEAD(&vmw_bo->res_list);
376
 
377
	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
378
			  ttm_bo_type_device, placement,
379
			  0, interruptible,
380
			  NULL, acc_size, NULL, bo_free);
381
	return ret;
382
}
383
 
384
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
385
{
386
	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
387
 
388
//   ttm_base_object_kfree(vmw_user_bo, base);
389
}
390
 
391
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
392
{
393
	struct vmw_user_dma_buffer *vmw_user_bo;
394
	struct ttm_base_object *base = *p_base;
395
	struct ttm_buffer_object *bo;
396
 
397
	*p_base = NULL;
398
 
399
	if (unlikely(base == NULL))
400
		return;
401
 
402
	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
403
	bo = &vmw_user_bo->dma.base;
404
	ttm_bo_unref(&bo);
405
}
406
 
407
/**
408
 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
409
 *
410
 * @dev_priv: Pointer to a struct device private.
411
 * @tfile: Pointer to a struct ttm_object_file on which to register the user
412
 * object.
413
 * @size: Size of the dma buffer.
414
 * @shareable: Boolean whether the buffer is shareable with other open files.
415
 * @handle: Pointer to where the handle value should be assigned.
416
 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
417
 * should be assigned.
418
 */
419
int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
420
			  struct ttm_object_file *tfile,
421
			  uint32_t size,
422
			  bool shareable,
423
			  uint32_t *handle,
424
			  struct vmw_dma_buffer **p_dma_buf)
425
{
426
	struct vmw_user_dma_buffer *user_bo;
427
	struct ttm_buffer_object *tmp;
428
	int ret;
429
 
430
	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
431
	if (unlikely(user_bo == NULL)) {
432
		DRM_ERROR("Failed to allocate a buffer.\n");
433
		return -ENOMEM;
434
	}
435
 
436
	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
437
			      &vmw_vram_sys_placement, true,
438
			      &vmw_user_dmabuf_destroy);
439
	if (unlikely(ret != 0))
440
		return ret;
441
 
442
	tmp = ttm_bo_reference(&user_bo->dma.base);
443
	ret = ttm_base_object_init(tfile,
444
				   &user_bo->base,
445
				   shareable,
446
				   ttm_buffer_type,
447
				   &vmw_user_dmabuf_release, NULL);
448
	if (unlikely(ret != 0)) {
449
		ttm_bo_unref(&tmp);
450
		goto out_no_base_object;
451
	}
452
 
453
	*p_dma_buf = &user_bo->dma;
454
	*handle = user_bo->base.hash.key;
455
 
456
out_no_base_object:
457
	return ret;
458
}
459
 
460
/**
461
 * vmw_user_dmabuf_verify_access - verify access permissions on this
462
 * buffer object.
463
 *
464
 * @bo: Pointer to the buffer object being accessed
465
 * @tfile: Identifying the caller.
466
 */
467
int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
468
				  struct ttm_object_file *tfile)
469
{
470
	struct vmw_user_dma_buffer *vmw_user_bo;
471
 
472
	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
473
		return -EPERM;
474
 
475
	vmw_user_bo = vmw_user_dma_buffer(bo);
476
	return (vmw_user_bo->base.tfile == tfile ||
477
	vmw_user_bo->base.shareable) ? 0 : -EPERM;
478
}
479
 
480
#if 0
481
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
482
			   struct drm_file *file_priv)
483
{
484
	struct vmw_private *dev_priv = vmw_priv(dev);
485
	union drm_vmw_alloc_dmabuf_arg *arg =
486
	    (union drm_vmw_alloc_dmabuf_arg *)data;
487
	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
488
	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
489
	struct vmw_dma_buffer *dma_buf;
490
	uint32_t handle;
491
	struct vmw_master *vmaster = vmw_master(file_priv->master);
492
	int ret;
493
 
494
	ret = ttm_read_lock(&vmaster->lock, true);
495
	if (unlikely(ret != 0))
496
		return ret;
497
 
498
	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
499
				    req->size, false, &handle, &dma_buf);
500
	if (unlikely(ret != 0))
501
		goto out_no_dmabuf;
502
 
503
	rep->handle = handle;
4111 Serge 504
	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
4075 Serge 505
	rep->cur_gmr_id = handle;
506
	rep->cur_gmr_offset = 0;
507
 
508
	vmw_dmabuf_unreference(&dma_buf);
509
 
510
out_no_dmabuf:
511
	ttm_read_unlock(&vmaster->lock);
512
 
513
	return ret;
514
}
515
 
516
int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
517
			   struct drm_file *file_priv)
518
{
519
	struct drm_vmw_unref_dmabuf_arg *arg =
520
	    (struct drm_vmw_unref_dmabuf_arg *)data;
521
 
522
	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
523
					 arg->handle,
524
					 TTM_REF_USAGE);
525
}
526
#endif
527
 
528
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
529
			   uint32_t handle, struct vmw_dma_buffer **out)
530
{
531
	struct vmw_user_dma_buffer *vmw_user_bo;
532
	struct ttm_base_object *base;
533
 
534
	base = ttm_base_object_lookup(tfile, handle);
535
	if (unlikely(base == NULL)) {
536
		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
537
		       (unsigned long)handle);
538
		return -ESRCH;
539
	}
540
 
541
	if (unlikely(base->object_type != ttm_buffer_type)) {
542
		ttm_base_object_unref(&base);
543
		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
544
		       (unsigned long)handle);
545
		return -EINVAL;
546
	}
547
 
548
	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
549
	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
550
	ttm_base_object_unref(&base);
551
	*out = &vmw_user_bo->dma;
552
 
553
	return 0;
554
}
555
 
556
int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
557
			      struct vmw_dma_buffer *dma_buf)
558
{
559
	struct vmw_user_dma_buffer *user_bo;
560
 
561
	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
562
		return -EINVAL;
563
 
564
	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
565
	return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
566
}
567
 
568
/*
569
 * Stream management
570
 */
571
 
572
static void vmw_stream_destroy(struct vmw_resource *res)
573
{
574
	struct vmw_private *dev_priv = res->dev_priv;
575
	struct vmw_stream *stream;
576
	int ret;
577
 
578
	DRM_INFO("%s: unref\n", __func__);
579
	stream = container_of(res, struct vmw_stream, res);
580
 
581
	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
582
	WARN_ON(ret != 0);
583
}
584
 
585
static int vmw_stream_init(struct vmw_private *dev_priv,
586
			   struct vmw_stream *stream,
587
			   void (*res_free) (struct vmw_resource *res))
588
{
589
	struct vmw_resource *res = &stream->res;
590
	int ret;
591
 
592
	ret = vmw_resource_init(dev_priv, res, false, res_free,
593
				&vmw_stream_func);
594
 
595
	if (unlikely(ret != 0)) {
596
		if (res_free == NULL)
597
			kfree(stream);
598
		else
599
			res_free(&stream->res);
600
		return ret;
601
	}
602
 
603
	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
604
	if (ret) {
605
		vmw_resource_unreference(&res);
606
		return ret;
607
	}
608
 
609
	DRM_INFO("%s: claimed\n", __func__);
610
 
611
	vmw_resource_activate(&stream->res, vmw_stream_destroy);
612
	return 0;
613
}
614
 
615
static void vmw_user_stream_free(struct vmw_resource *res)
616
{
617
	struct vmw_user_stream *stream =
618
	    container_of(res, struct vmw_user_stream, stream.res);
619
	struct vmw_private *dev_priv = res->dev_priv;
620
 
621
//   ttm_base_object_kfree(stream, base);
622
	ttm_mem_global_free(vmw_mem_glob(dev_priv),
623
			    vmw_user_stream_size);
624
}
625
 
626
/**
627
 * This function is called when user space has no more references on the
628
 * base object. It releases the base-object's reference on the resource object.
629
 */
630
 
631
static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
632
{
633
	struct ttm_base_object *base = *p_base;
634
	struct vmw_user_stream *stream =
635
	    container_of(base, struct vmw_user_stream, base);
636
	struct vmw_resource *res = &stream->stream.res;
637
 
638
	*p_base = NULL;
639
	vmw_resource_unreference(&res);
640
}
641
 
642
#if 0
643
int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
644
			   struct drm_file *file_priv)
645
{
646
	struct vmw_private *dev_priv = vmw_priv(dev);
647
	struct vmw_resource *res;
648
	struct vmw_user_stream *stream;
649
	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
650
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
651
	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
652
	int ret = 0;
653
 
654
 
655
	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
656
	if (unlikely(res == NULL))
657
		return -EINVAL;
658
 
659
	if (res->res_free != &vmw_user_stream_free) {
660
		ret = -EINVAL;
661
		goto out;
662
	}
663
 
664
	stream = container_of(res, struct vmw_user_stream, stream.res);
665
	if (stream->base.tfile != tfile) {
666
		ret = -EINVAL;
667
		goto out;
668
	}
669
 
670
	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
671
out:
672
	vmw_resource_unreference(&res);
673
	return ret;
674
}
675
 
676
int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
677
			   struct drm_file *file_priv)
678
{
679
	struct vmw_private *dev_priv = vmw_priv(dev);
680
	struct vmw_user_stream *stream;
681
	struct vmw_resource *res;
682
	struct vmw_resource *tmp;
683
	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
684
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
685
	struct vmw_master *vmaster = vmw_master(file_priv->master);
686
	int ret;
687
 
688
	/*
689
	 * Approximate idr memory usage with 128 bytes. It will be limited
690
	 * by maximum number_of streams anyway?
691
	 */
692
 
693
	if (unlikely(vmw_user_stream_size == 0))
694
		vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
695
 
696
	ret = ttm_read_lock(&vmaster->lock, true);
697
	if (unlikely(ret != 0))
698
		return ret;
699
 
700
	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
701
				   vmw_user_stream_size,
702
				   false, true);
703
	if (unlikely(ret != 0)) {
704
		if (ret != -ERESTARTSYS)
705
			DRM_ERROR("Out of graphics memory for stream"
706
				  " creation.\n");
707
		goto out_unlock;
708
	}
709
 
710
 
711
	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
712
	if (unlikely(stream == NULL)) {
713
		ttm_mem_global_free(vmw_mem_glob(dev_priv),
714
				    vmw_user_stream_size);
715
		ret = -ENOMEM;
716
		goto out_unlock;
717
	}
718
 
719
	res = &stream->stream.res;
720
	stream->base.shareable = false;
721
	stream->base.tfile = NULL;
722
 
723
	/*
724
	 * From here on, the destructor takes over resource freeing.
725
	 */
726
 
727
	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
728
	if (unlikely(ret != 0))
729
		goto out_unlock;
730
 
731
	tmp = vmw_resource_reference(res);
732
	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
733
				   &vmw_user_stream_base_release, NULL);
734
 
735
	if (unlikely(ret != 0)) {
736
		vmw_resource_unreference(&tmp);
737
		goto out_err;
738
	}
739
 
740
	arg->stream_id = res->id;
741
out_err:
742
	vmw_resource_unreference(&res);
743
out_unlock:
744
	ttm_read_unlock(&vmaster->lock);
745
	return ret;
746
}
747
#endif
748
 
749
int vmw_user_stream_lookup(struct vmw_private *dev_priv,
750
			   struct ttm_object_file *tfile,
751
			   uint32_t *inout_id, struct vmw_resource **out)
752
{
753
	struct vmw_user_stream *stream;
754
	struct vmw_resource *res;
755
	int ret;
756
 
757
	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
758
				  *inout_id);
759
	if (unlikely(res == NULL))
760
		return -EINVAL;
761
 
762
	if (res->res_free != &vmw_user_stream_free) {
763
		ret = -EINVAL;
764
		goto err_ref;
765
	}
766
 
767
	stream = container_of(res, struct vmw_user_stream, stream.res);
768
	if (stream->base.tfile != tfile) {
769
		ret = -EPERM;
770
		goto err_ref;
771
	}
772
 
773
	*inout_id = stream->stream.stream_id;
774
	*out = res;
775
	return 0;
776
err_ref:
777
	vmw_resource_unreference(&res);
778
	return ret;
779
}
780
 
781
#if 0
782
int vmw_dumb_create(struct drm_file *file_priv,
783
		    struct drm_device *dev,
784
		    struct drm_mode_create_dumb *args)
785
{
786
	struct vmw_private *dev_priv = vmw_priv(dev);
787
	struct vmw_master *vmaster = vmw_master(file_priv->master);
788
	struct vmw_user_dma_buffer *vmw_user_bo;
789
	struct ttm_buffer_object *tmp;
790
	int ret;
791
 
792
	args->pitch = args->width * ((args->bpp + 7) / 8);
793
	args->size = args->pitch * args->height;
794
 
795
	vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
796
	if (vmw_user_bo == NULL)
797
		return -ENOMEM;
798
 
799
	ret = ttm_read_lock(&vmaster->lock, true);
800
	if (ret != 0) {
801
		kfree(vmw_user_bo);
802
		return ret;
803
	}
804
 
805
	ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
806
			      &vmw_vram_sys_placement, true,
807
			      &vmw_user_dmabuf_destroy);
808
	if (ret != 0)
809
		goto out_no_dmabuf;
810
 
811
	tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
812
	ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
813
				   &vmw_user_bo->base,
814
				   false,
815
				   ttm_buffer_type,
816
				   &vmw_user_dmabuf_release, NULL);
817
	if (unlikely(ret != 0))
818
		goto out_no_base_object;
819
 
820
	args->handle = vmw_user_bo->base.hash.key;
821
 
822
out_no_base_object:
823
	ttm_bo_unref(&tmp);
824
out_no_dmabuf:
825
	ttm_read_unlock(&vmaster->lock);
826
	return ret;
827
}
828
#endif
829
 
830
int vmw_dumb_map_offset(struct drm_file *file_priv,
831
			struct drm_device *dev, uint32_t handle,
832
			uint64_t *offset)
833
{
834
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
835
	struct vmw_dma_buffer *out_buf;
836
	int ret;
837
 
838
	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
839
	if (ret != 0)
840
		return -EINVAL;
841
 
4111 Serge 842
	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
4075 Serge 843
	vmw_dmabuf_unreference(&out_buf);
844
	return 0;
845
}
846
 
847
int vmw_dumb_destroy(struct drm_file *file_priv,
848
		     struct drm_device *dev,
849
		     uint32_t handle)
850
{
851
	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
852
					 handle, TTM_REF_USAGE);
853
}
854
 
855
/**
856
 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
857
 *
858
 * @res:            The resource for which to allocate a backup buffer.
859
 * @interruptible:  Whether any sleeps during allocation should be
860
 *                  performed while interruptible.
861
 */
862
static int vmw_resource_buf_alloc(struct vmw_resource *res,
863
				  bool interruptible)
864
{
865
	unsigned long size =
866
		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
867
	struct vmw_dma_buffer *backup;
868
	int ret;
869
 
870
	if (likely(res->backup)) {
871
		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
872
		return 0;
873
	}
874
 
875
	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
876
	if (unlikely(backup == NULL))
877
		return -ENOMEM;
878
 
879
	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
880
			      res->func->backup_placement,
881
			      interruptible,
882
			      &vmw_dmabuf_bo_free);
883
	if (unlikely(ret != 0))
884
		goto out_no_dmabuf;
885
 
886
	res->backup = backup;
887
 
888
out_no_dmabuf:
889
	return ret;
890
}
891
 
892
/**
893
 * vmw_resource_do_validate - Make a resource up-to-date and visible
894
 *                            to the device.
895
 *
896
 * @res:            The resource to make visible to the device.
897
 * @val_buf:        Information about a buffer possibly
898
 *                  containing backup data if a bind operation is needed.
899
 *
900
 * On hardware resource shortage, this function returns -EBUSY and
901
 * should be retried once resources have been freed up.
902
 */
903
static int vmw_resource_do_validate(struct vmw_resource *res,
904
				    struct ttm_validate_buffer *val_buf)
905
{
906
	int ret = 0;
907
	const struct vmw_res_func *func = res->func;
908
 
909
	if (unlikely(res->id == -1)) {
910
		ret = func->create(res);
911
		if (unlikely(ret != 0))
912
			return ret;
913
	}
914
 
915
	if (func->bind &&
916
	    ((func->needs_backup && list_empty(&res->mob_head) &&
917
	      val_buf->bo != NULL) ||
918
	     (!func->needs_backup && val_buf->bo != NULL))) {
919
		ret = func->bind(res, val_buf);
920
		if (unlikely(ret != 0))
921
			goto out_bind_failed;
922
		if (func->needs_backup)
923
			list_add_tail(&res->mob_head, &res->backup->res_list);
924
	}
925
 
926
	/*
927
	 * Only do this on write operations, and move to
928
	 * vmw_resource_unreserve if it can be called after
929
	 * backup buffers have been unreserved. Otherwise
930
	 * sort out locking.
931
	 */
932
	res->res_dirty = true;
933
 
934
	return 0;
935
 
936
out_bind_failed:
937
	func->destroy(res);
938
 
939
	return ret;
940
}
941
 
942
/**
943
 * vmw_resource_unreserve - Unreserve a resource previously reserved for
944
 * command submission.
945
 *
946
 * @res:               Pointer to the struct vmw_resource to unreserve.
947
 * @new_backup:        Pointer to new backup buffer if command submission
948
 *                     switched.
949
 * @new_backup_offset: New backup offset if @new_backup is !NULL.
950
 *
951
 * Currently unreserving a resource means putting it back on the device's
952
 * resource lru list, so that it can be evicted if necessary.
953
 */
954
void vmw_resource_unreserve(struct vmw_resource *res,
955
			    struct vmw_dma_buffer *new_backup,
956
			    unsigned long new_backup_offset)
957
{
958
	struct vmw_private *dev_priv = res->dev_priv;
959
 
960
	if (!list_empty(&res->lru_head))
961
		return;
962
 
963
	if (new_backup && new_backup != res->backup) {
964
 
965
		if (res->backup) {
966
			lockdep_assert_held(&res->backup->base.resv->lock.base);
967
			list_del_init(&res->mob_head);
968
			vmw_dmabuf_unreference(&res->backup);
969
		}
970
 
971
		res->backup = vmw_dmabuf_reference(new_backup);
972
		lockdep_assert_held(&new_backup->base.resv->lock.base);
973
		list_add_tail(&res->mob_head, &new_backup->res_list);
974
	}
975
	if (new_backup)
976
		res->backup_offset = new_backup_offset;
977
 
978
	if (!res->func->may_evict)
979
		return;
980
 
981
	write_lock(&dev_priv->resource_lock);
982
	list_add_tail(&res->lru_head,
983
		      &res->dev_priv->res_lru[res->func->res_type]);
984
	write_unlock(&dev_priv->resource_lock);
985
}
986
 
987
/**
988
 * vmw_resource_check_buffer - Check whether a backup buffer is needed
989
 *                             for a resource and in that case, allocate
990
 *                             one, reserve and validate it.
991
 *
992
 * @res:            The resource for which to allocate a backup buffer.
993
 * @interruptible:  Whether any sleeps during allocation should be
994
 *                  performed while interruptible.
995
 * @val_buf:        On successful return contains data about the
996
 *                  reserved and validated backup buffer.
997
 */
998
static int
999
vmw_resource_check_buffer(struct vmw_resource *res,
1000
			  struct ww_acquire_ctx *ticket,
1001
			  bool interruptible,
1002
			  struct ttm_validate_buffer *val_buf)
1003
{
1004
	struct list_head val_list;
1005
	bool backup_dirty = false;
1006
	int ret;
1007
 
1008
	if (unlikely(res->backup == NULL)) {
1009
		ret = vmw_resource_buf_alloc(res, interruptible);
1010
		if (unlikely(ret != 0))
1011
			return ret;
1012
	}
1013
 
1014
	INIT_LIST_HEAD(&val_list);
1015
	val_buf->bo = ttm_bo_reference(&res->backup->base);
1016
	list_add_tail(&val_buf->head, &val_list);
1017
	ret = ttm_eu_reserve_buffers(ticket, &val_list);
1018
	if (unlikely(ret != 0))
1019
		goto out_no_reserve;
1020
 
1021
	if (res->func->needs_backup && list_empty(&res->mob_head))
1022
		return 0;
1023
 
1024
	backup_dirty = res->backup_dirty;
1025
	ret = ttm_bo_validate(&res->backup->base,
1026
			      res->func->backup_placement,
1027
			      true, false);
1028
 
1029
	if (unlikely(ret != 0))
1030
		goto out_no_validate;
1031
 
1032
	return 0;
1033
 
1034
out_no_validate:
1035
	ttm_eu_backoff_reservation(ticket, &val_list);
1036
out_no_reserve:
1037
	ttm_bo_unref(&val_buf->bo);
1038
	if (backup_dirty)
1039
		vmw_dmabuf_unreference(&res->backup);
1040
 
1041
	return ret;
1042
}
1043
 
1044
/**
1045
 * vmw_resource_reserve - Reserve a resource for command submission
1046
 *
1047
 * @res:            The resource to reserve.
1048
 *
1049
 * This function takes the resource off the LRU list and make sure
1050
 * a backup buffer is present for guest-backed resources. However,
1051
 * the buffer may not be bound to the resource at this point.
1052
 *
1053
 */
1054
int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1055
{
1056
	struct vmw_private *dev_priv = res->dev_priv;
1057
	int ret;
1058
 
1059
	write_lock(&dev_priv->resource_lock);
1060
	list_del_init(&res->lru_head);
1061
	write_unlock(&dev_priv->resource_lock);
1062
 
1063
	if (res->func->needs_backup && res->backup == NULL &&
1064
	    !no_backup) {
1065
		ret = vmw_resource_buf_alloc(res, true);
1066
		if (unlikely(ret != 0))
1067
			return ret;
1068
	}
1069
 
1070
	return 0;
1071
}
1072
 
1073
/**
1074
 * vmw_resource_backoff_reservation - Unreserve and unreference a
1075
 *                                    backup buffer
1076
 *.
1077
 * @val_buf:        Backup buffer information.
1078
 */
1079
static void
1080
vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
1081
				 struct ttm_validate_buffer *val_buf)
1082
{
1083
	struct list_head val_list;
1084
 
1085
	if (likely(val_buf->bo == NULL))
1086
		return;
1087
 
1088
	INIT_LIST_HEAD(&val_list);
1089
	list_add_tail(&val_buf->head, &val_list);
1090
	ttm_eu_backoff_reservation(ticket, &val_list);
1091
	ttm_bo_unref(&val_buf->bo);
1092
}
1093
 
1094
/**
1095
 * vmw_resource_do_evict - Evict a resource, and transfer its data
1096
 *                         to a backup buffer.
1097
 *
1098
 * @res:            The resource to evict.
1099
 */
1100
int vmw_resource_do_evict(struct vmw_resource *res)
1101
{
1102
	struct ttm_validate_buffer val_buf;
1103
	const struct vmw_res_func *func = res->func;
1104
	struct ww_acquire_ctx ticket;
1105
	int ret;
1106
 
1107
	BUG_ON(!func->may_evict);
1108
 
1109
	val_buf.bo = NULL;
1110
	ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf);
1111
	if (unlikely(ret != 0))
1112
		return ret;
1113
 
1114
	if (unlikely(func->unbind != NULL &&
1115
		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1116
		ret = func->unbind(res, res->res_dirty, &val_buf);
1117
		if (unlikely(ret != 0))
1118
			goto out_no_unbind;
1119
		list_del_init(&res->mob_head);
1120
	}
1121
	ret = func->destroy(res);
1122
	res->backup_dirty = true;
1123
	res->res_dirty = false;
1124
out_no_unbind:
1125
	vmw_resource_backoff_reservation(&ticket, &val_buf);
1126
 
1127
	return ret;
1128
}
1129
 
1130
 
1131
/**
1132
 * vmw_resource_validate - Make a resource up-to-date and visible
1133
 *                         to the device.
1134
 *
1135
 * @res:            The resource to make visible to the device.
1136
 *
1137
 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1138
 * be reserved and validated.
1139
 * On hardware resource shortage, this function will repeatedly evict
1140
 * resources of the same type until the validation succeeds.
1141
 */
1142
int vmw_resource_validate(struct vmw_resource *res)
1143
{
1144
	int ret;
1145
	struct vmw_resource *evict_res;
1146
	struct vmw_private *dev_priv = res->dev_priv;
1147
	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1148
	struct ttm_validate_buffer val_buf;
1149
 
1150
	if (likely(!res->func->may_evict))
1151
		return 0;
1152
 
1153
	val_buf.bo = NULL;
1154
	if (res->backup)
1155
		val_buf.bo = &res->backup->base;
1156
	do {
1157
		ret = vmw_resource_do_validate(res, &val_buf);
1158
		if (likely(ret != -EBUSY))
1159
			break;
1160
 
1161
		write_lock(&dev_priv->resource_lock);
1162
		if (list_empty(lru_list) || !res->func->may_evict) {
1163
			DRM_ERROR("Out of device device id entries "
1164
				  "for %s.\n", res->func->type_name);
1165
			ret = -EBUSY;
1166
			write_unlock(&dev_priv->resource_lock);
1167
			break;
1168
		}
1169
 
1170
		evict_res = vmw_resource_reference
1171
			(list_first_entry(lru_list, struct vmw_resource,
1172
					  lru_head));
1173
		list_del_init(&evict_res->lru_head);
1174
 
1175
		write_unlock(&dev_priv->resource_lock);
1176
		vmw_resource_do_evict(evict_res);
1177
		vmw_resource_unreference(&evict_res);
1178
	} while (1);
1179
 
1180
	if (unlikely(ret != 0))
1181
		goto out_no_validate;
1182
	else if (!res->func->needs_backup && res->backup) {
1183
		list_del_init(&res->mob_head);
1184
		vmw_dmabuf_unreference(&res->backup);
1185
	}
1186
 
1187
	return 0;
1188
 
1189
out_no_validate:
1190
	return ret;
1191
}
1192
 
1193
/**
1194
 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1195
 *                       object without unreserving it.
1196
 *
1197
 * @bo:             Pointer to the struct ttm_buffer_object to fence.
1198
 * @fence:          Pointer to the fence. If NULL, this function will
1199
 *                  insert a fence into the command stream..
1200
 *
1201
 * Contrary to the ttm_eu version of this function, it takes only
1202
 * a single buffer object instead of a list, and it also doesn't
1203
 * unreserve the buffer object, which needs to be done separately.
1204
 */
1205
void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1206
			 struct vmw_fence_obj *fence)
1207
{
1208
	struct ttm_bo_device *bdev = bo->bdev;
1209
	struct ttm_bo_driver *driver = bdev->driver;
1210
	struct vmw_fence_obj *old_fence_obj;
1211
	struct vmw_private *dev_priv =
1212
		container_of(bdev, struct vmw_private, bdev);
1213
 
1214
	if (fence == NULL)
1215
		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1216
	else
1217
		driver->sync_obj_ref(fence);
1218
 
1219
	spin_lock(&bdev->fence_lock);
1220
 
1221
	old_fence_obj = bo->sync_obj;
1222
	bo->sync_obj = fence;
1223
 
1224
	spin_unlock(&bdev->fence_lock);
1225
 
1226
	if (old_fence_obj)
1227
		vmw_fence_obj_unreference(&old_fence_obj);
1228
}
1229
 
1230
/**
1231
 * vmw_resource_move_notify - TTM move_notify_callback
1232
 *
1233
 * @bo:             The TTM buffer object about to move.
1234
 * @mem:            The truct ttm_mem_reg indicating to what memory
1235
 *                  region the move is taking place.
1236
 *
1237
 * For now does nothing.
1238
 */
1239
void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1240
			      struct ttm_mem_reg *mem)
1241
{
1242
}
1243
 
1244
/**
1245
 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1246
 *
1247
 * @res:            The resource being queried.
1248
 */
1249
bool vmw_resource_needs_backup(const struct vmw_resource *res)
1250
{
1251
	return res->func->needs_backup;
1252
}
1253
 
1254
/**
1255
 * vmw_resource_evict_type - Evict all resources of a specific type
1256
 *
1257
 * @dev_priv:       Pointer to a device private struct
1258
 * @type:           The resource type to evict
1259
 *
1260
 * To avoid thrashing starvation or as part of the hibernation sequence,
1261
 * evict all evictable resources of a specific type.
1262
 */
1263
static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1264
				    enum vmw_res_type type)
1265
{
1266
	struct list_head *lru_list = &dev_priv->res_lru[type];
1267
	struct vmw_resource *evict_res;
1268
 
1269
	do {
1270
		write_lock(&dev_priv->resource_lock);
1271
 
1272
		if (list_empty(lru_list))
1273
			goto out_unlock;
1274
 
1275
		evict_res = vmw_resource_reference(
1276
			list_first_entry(lru_list, struct vmw_resource,
1277
					 lru_head));
1278
		list_del_init(&evict_res->lru_head);
1279
		write_unlock(&dev_priv->resource_lock);
1280
		vmw_resource_do_evict(evict_res);
1281
		vmw_resource_unreference(&evict_res);
1282
	} while (1);
1283
 
1284
out_unlock:
1285
	write_unlock(&dev_priv->resource_lock);
1286
}
1287
 
1288
/**
1289
 * vmw_resource_evict_all - Evict all evictable resources
1290
 *
1291
 * @dev_priv:       Pointer to a device private struct
1292
 *
1293
 * To avoid thrashing starvation or as part of the hibernation sequence,
1294
 * evict all evictable resources. In particular this means that all
1295
 * guest-backed resources that are registered with the device are
1296
 * evicted and the OTable becomes clean.
1297
 */
1298
void vmw_resource_evict_all(struct vmw_private *dev_priv)
1299
{
1300
	enum vmw_res_type type;
1301
 
1302
	mutex_lock(&dev_priv->cmdbuf_mutex);
1303
 
1304
	for (type = 0; type < vmw_res_max; ++type)
1305
		vmw_resource_evict_type(dev_priv, type);
1306
 
1307
	mutex_unlock(&dev_priv->cmdbuf_mutex);
1308
}