Subversion Repositories Kolibri OS

Rev

Rev 5078 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4569 Serge 1
/**************************************************************************
2
 *
3
 * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
#include "vmwgfx_drv.h"
29
 
30
/*
31
 * If we set up the screen target otable, screen objects stop working.
32
 */
33
 
34
#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
35
 
36
#ifdef CONFIG_64BIT
37
#define VMW_PPN_SIZE 8
38
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
39
#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
40
#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
41
#else
42
#define VMW_PPN_SIZE 4
43
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
44
#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
45
#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
46
#endif
47
 
48
/*
49
 * struct vmw_mob - Structure containing page table and metadata for a
50
 * Guest Memory OBject.
51
 *
52
 * @num_pages       Number of pages that make up the page table.
53
 * @pt_level        The indirection level of the page table. 0-2.
54
 * @pt_root_page    DMA address of the level 0 page of the page table.
55
 */
56
struct vmw_mob {
57
	struct ttm_buffer_object *pt_bo;
58
	unsigned long num_pages;
59
	unsigned pt_level;
60
	dma_addr_t pt_root_page;
61
	uint32_t id;
62
};
63
 
64
/*
65
 * struct vmw_otable - Guest Memory OBject table metadata
66
 *
67
 * @size:           Size of the table (page-aligned).
68
 * @page_table:     Pointer to a struct vmw_mob holding the page table.
69
 */
70
struct vmw_otable {
71
	unsigned long size;
72
	struct vmw_mob *page_table;
73
};
74
 
75
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
76
			       struct vmw_mob *mob);
77
static void vmw_mob_pt_setup(struct vmw_mob *mob,
78
			     struct vmw_piter data_iter,
79
			     unsigned long num_data_pages);
80
 
81
/*
82
 * vmw_setup_otable_base - Issue an object table base setup command to
83
 * the device
84
 *
85
 * @dev_priv:       Pointer to a device private structure
86
 * @type:           Type of object table base
87
 * @offset          Start of table offset into dev_priv::otable_bo
88
 * @otable          Pointer to otable metadata;
89
 *
90
 * This function returns -ENOMEM if it fails to reserve fifo space,
91
 * and may block waiting for fifo space.
92
 */
93
static int vmw_setup_otable_base(struct vmw_private *dev_priv,
94
				 SVGAOTableType type,
95
				 unsigned long offset,
96
				 struct vmw_otable *otable)
97
{
98
	struct {
99
		SVGA3dCmdHeader header;
100
		SVGA3dCmdSetOTableBase64 body;
101
	} *cmd;
102
	struct vmw_mob *mob;
103
	const struct vmw_sg_table *vsgt;
104
	struct vmw_piter iter;
105
	int ret;
106
 
107
	BUG_ON(otable->page_table != NULL);
108
 
109
	vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
110
	vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
111
	WARN_ON(!vmw_piter_next(&iter));
112
 
113
	mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
114
	if (unlikely(mob == NULL)) {
115
		DRM_ERROR("Failed creating OTable page table.\n");
116
		return -ENOMEM;
117
	}
118
 
119
	if (otable->size <= PAGE_SIZE) {
120
		mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
121
		mob->pt_root_page = vmw_piter_dma_addr(&iter);
122
	} else if (vsgt->num_regions == 1) {
123
		mob->pt_level = SVGA3D_MOBFMT_RANGE;
124
		mob->pt_root_page = vmw_piter_dma_addr(&iter);
125
	} else {
126
		ret = vmw_mob_pt_populate(dev_priv, mob);
127
		if (unlikely(ret != 0))
128
			goto out_no_populate;
129
 
130
		vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
131
		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
132
	}
133
 
134
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
135
	if (unlikely(cmd == NULL)) {
136
		DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
137
		goto out_no_fifo;
138
	}
139
 
140
	memset(cmd, 0, sizeof(*cmd));
141
	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
142
	cmd->header.size = sizeof(cmd->body);
143
	cmd->body.type = type;
144
	cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
145
	cmd->body.sizeInBytes = otable->size;
146
	cmd->body.validSizeInBytes = 0;
147
	cmd->body.ptDepth = mob->pt_level;
148
 
149
	/*
150
	 * The device doesn't support this, But the otable size is
151
	 * determined at compile-time, so this BUG shouldn't trigger
152
	 * randomly.
153
	 */
154
	BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
155
 
156
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
157
	otable->page_table = mob;
158
 
159
	return 0;
160
 
161
out_no_fifo:
162
out_no_populate:
163
	vmw_mob_destroy(mob);
164
	return ret;
165
}
166
 
167
/*
168
 * vmw_takedown_otable_base - Issue an object table base takedown command
169
 * to the device
170
 *
171
 * @dev_priv:       Pointer to a device private structure
172
 * @type:           Type of object table base
173
 *
174
 */
175
static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
176
				     SVGAOTableType type,
177
				     struct vmw_otable *otable)
178
{
179
	struct {
180
		SVGA3dCmdHeader header;
181
		SVGA3dCmdSetOTableBase body;
182
	} *cmd;
183
	struct ttm_buffer_object *bo;
184
 
185
	if (otable->page_table == NULL)
186
		return;
187
 
188
	bo = otable->page_table->pt_bo;
189
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
190
	if (unlikely(cmd == NULL))
191
		DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
192
 
193
	memset(cmd, 0, sizeof(*cmd));
194
	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
195
	cmd->header.size = sizeof(cmd->body);
196
	cmd->body.type = type;
197
	cmd->body.baseAddress = 0;
198
	cmd->body.sizeInBytes = 0;
199
	cmd->body.validSizeInBytes = 0;
200
	cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
201
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
202
 
203
	if (bo) {
204
		int ret;
205
 
206
		ret = ttm_bo_reserve(bo, false, true, false, NULL);
207
		BUG_ON(ret != 0);
208
 
209
		vmw_fence_single_bo(bo, NULL);
210
		ttm_bo_unreserve(bo);
211
	}
212
 
213
	vmw_mob_destroy(otable->page_table);
214
	otable->page_table = NULL;
215
}
216
 
217
/*
218
 * vmw_otables_setup - Set up guest backed memory object tables
219
 *
220
 * @dev_priv:       Pointer to a device private structure
221
 *
222
 * Takes care of the device guest backed surface
223
 * initialization, by setting up the guest backed memory object tables.
224
 * Returns 0 on success and various error codes on failure. A succesful return
225
 * means the object tables can be taken down using the vmw_otables_takedown
226
 * function.
227
 */
228
int vmw_otables_setup(struct vmw_private *dev_priv)
229
{
230
	unsigned long offset;
231
	unsigned long bo_size;
232
	struct vmw_otable *otables;
233
	SVGAOTableType i;
234
	int ret;
235
 
236
	otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
237
			  GFP_KERNEL);
238
	if (unlikely(otables == NULL)) {
239
		DRM_ERROR("Failed to allocate space for otable "
240
			  "metadata.\n");
241
		return -ENOMEM;
242
	}
243
 
244
	otables[SVGA_OTABLE_MOB].size =
245
		VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
246
	otables[SVGA_OTABLE_SURFACE].size =
247
		VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
248
	otables[SVGA_OTABLE_CONTEXT].size =
249
		VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
250
	otables[SVGA_OTABLE_SHADER].size =
251
		VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
252
	otables[SVGA_OTABLE_SCREEN_TARGET].size =
253
		VMWGFX_NUM_GB_SCREEN_TARGET *
254
		SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
255
 
256
	bo_size = 0;
257
	for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
258
		otables[i].size =
259
			(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
260
		bo_size += otables[i].size;
261
	}
262
 
263
	ret = ttm_bo_create(&dev_priv->bdev, bo_size,
264
			    ttm_bo_type_device,
265
			    &vmw_sys_ne_placement,
266
			    0, false, NULL,
267
			    &dev_priv->otable_bo);
268
 
269
	if (unlikely(ret != 0))
270
		goto out_no_bo;
271
 
272
	ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
273
	BUG_ON(ret != 0);
274
	ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
275
	if (unlikely(ret != 0))
276
		goto out_unreserve;
277
	ret = vmw_bo_map_dma(dev_priv->otable_bo);
278
	if (unlikely(ret != 0))
279
		goto out_unreserve;
280
 
281
	ttm_bo_unreserve(dev_priv->otable_bo);
282
 
283
	offset = 0;
284
	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
285
		ret = vmw_setup_otable_base(dev_priv, i, offset,
286
					    &otables[i]);
287
		if (unlikely(ret != 0))
288
			goto out_no_setup;
289
		offset += otables[i].size;
290
	}
291
 
292
	dev_priv->otables = otables;
293
	return 0;
294
 
295
out_unreserve:
296
	ttm_bo_unreserve(dev_priv->otable_bo);
297
out_no_setup:
298
	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
299
		vmw_takedown_otable_base(dev_priv, i, &otables[i]);
300
 
301
	ttm_bo_unref(&dev_priv->otable_bo);
302
out_no_bo:
303
	kfree(otables);
304
	return ret;
305
}
306
 
307
 
308
/*
309
 * vmw_otables_takedown - Take down guest backed memory object tables
310
 *
311
 * @dev_priv:       Pointer to a device private structure
312
 *
313
 * Take down the Guest Memory Object tables.
314
 */
315
void vmw_otables_takedown(struct vmw_private *dev_priv)
316
{
317
	SVGAOTableType i;
318
	struct ttm_buffer_object *bo = dev_priv->otable_bo;
319
	int ret;
320
 
321
	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
322
		vmw_takedown_otable_base(dev_priv, i,
323
					 &dev_priv->otables[i]);
324
 
325
	ret = ttm_bo_reserve(bo, false, true, false, NULL);
326
	BUG_ON(ret != 0);
327
 
328
	vmw_fence_single_bo(bo, NULL);
329
	ttm_bo_unreserve(bo);
330
 
331
	ttm_bo_unref(&dev_priv->otable_bo);
332
	kfree(dev_priv->otables);
333
	dev_priv->otables = NULL;
334
}
335
 
336
 
337
/*
338
 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
339
 * needed for a guest backed memory object.
340
 *
341
 * @data_pages:  Number of data pages in the memory object buffer.
342
 */
343
static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
344
{
345
	unsigned long data_size = data_pages * PAGE_SIZE;
346
	unsigned long tot_size = 0;
347
 
348
	while (likely(data_size > PAGE_SIZE)) {
349
		data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
350
		data_size *= VMW_PPN_SIZE;
351
		tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
352
	}
353
 
354
	return tot_size >> PAGE_SHIFT;
355
}
356
 
357
/*
358
 * vmw_mob_create - Create a mob, but don't populate it.
359
 *
360
 * @data_pages:  Number of data pages of the underlying buffer object.
361
 */
362
struct vmw_mob *vmw_mob_create(unsigned long data_pages)
363
{
364
	struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
365
 
366
	if (unlikely(mob == NULL))
367
		return NULL;
368
 
369
	mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
370
 
371
	return mob;
372
}
373
 
374
/*
375
 * vmw_mob_pt_populate - Populate the mob pagetable
376
 *
377
 * @mob:         Pointer to the mob the pagetable of which we want to
378
 *               populate.
379
 *
380
 * This function allocates memory to be used for the pagetable, and
381
 * adjusts TTM memory accounting accordingly. Returns ENOMEM if
382
 * memory resources aren't sufficient and may cause TTM buffer objects
383
 * to be swapped out by using the TTM memory accounting function.
384
 */
385
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
386
			       struct vmw_mob *mob)
387
{
388
	int ret;
389
	BUG_ON(mob->pt_bo != NULL);
390
 
391
	ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
392
			    ttm_bo_type_device,
393
			    &vmw_sys_ne_placement,
394
			    0, false, NULL, &mob->pt_bo);
395
	if (unlikely(ret != 0))
396
		return ret;
397
 
398
	ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
399
 
400
	BUG_ON(ret != 0);
401
	ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
402
	if (unlikely(ret != 0))
403
		goto out_unreserve;
404
	ret = vmw_bo_map_dma(mob->pt_bo);
405
	if (unlikely(ret != 0))
406
		goto out_unreserve;
407
 
408
	ttm_bo_unreserve(mob->pt_bo);
409
 
410
	return 0;
411
 
412
out_unreserve:
413
	ttm_bo_unreserve(mob->pt_bo);
414
	ttm_bo_unref(&mob->pt_bo);
415
 
416
	return ret;
417
}
418
 
419
/**
420
 * vmw_mob_assign_ppn - Assign a value to a page table entry
421
 *
422
 * @addr: Pointer to pointer to page table entry.
423
 * @val: The page table entry
424
 *
425
 * Assigns a value to a page table entry pointed to by *@addr and increments
426
 * *@addr according to the page table entry size.
427
 */
428
#if (VMW_PPN_SIZE == 8)
429
static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
430
{
431
	*((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
432
	*addr += 2;
433
}
434
#else
435
static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
436
{
437
	*(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
438
}
439
#endif
440
 
441
/*
442
 * vmw_mob_build_pt - Build a pagetable
443
 *
444
 * @data_addr:      Array of DMA addresses to the underlying buffer
445
 *                  object's data pages.
446
 * @num_data_pages: Number of buffer object data pages.
447
 * @pt_pages:       Array of page pointers to the page table pages.
448
 *
449
 * Returns the number of page table pages actually used.
450
 * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
451
 */
452
static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
453
				      unsigned long num_data_pages,
454
				      struct vmw_piter *pt_iter)
455
{
456
	unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
457
	unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
458
	unsigned long pt_page;
459
	__le32 *addr, *save_addr;
460
	unsigned long i;
461
	struct page *page;
462
 
463
    save_addr = addr = AllocKernelSpace(4096);
464
 
465
	for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
466
		page = vmw_piter_page(pt_iter);
467
 
468
        MapPage(save_addr,(addr_t)page, 3);
469
 
470
		for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
471
			vmw_mob_assign_ppn(&addr,
472
					   vmw_piter_dma_addr(data_iter));
473
			if (unlikely(--num_data_pages == 0))
474
				break;
475
			WARN_ON(!vmw_piter_next(data_iter));
476
		}
477
		vmw_piter_next(pt_iter);
478
	}
479
    FreeKernelSpace(save_addr);
480
	return num_pt_pages;
481
}
482
 
483
/*
484
 * vmw_mob_build_pt - Set up a multilevel mob pagetable
485
 *
486
 * @mob:            Pointer to a mob whose page table needs setting up.
487
 * @data_addr       Array of DMA addresses to the buffer object's data
488
 *                  pages.
489
 * @num_data_pages: Number of buffer object data pages.
490
 *
491
 * Uses tail recursion to set up a multilevel mob page table.
492
 */
493
static void vmw_mob_pt_setup(struct vmw_mob *mob,
494
			     struct vmw_piter data_iter,
495
			     unsigned long num_data_pages)
496
{
497
	unsigned long num_pt_pages = 0;
498
	struct ttm_buffer_object *bo = mob->pt_bo;
499
	struct vmw_piter save_pt_iter;
500
	struct vmw_piter pt_iter;
501
	const struct vmw_sg_table *vsgt;
502
	int ret;
503
 
504
	ret = ttm_bo_reserve(bo, false, true, false, NULL);
505
	BUG_ON(ret != 0);
506
 
507
	vsgt = vmw_bo_sg_table(bo);
508
	vmw_piter_start(&pt_iter, vsgt, 0);
509
	BUG_ON(!vmw_piter_next(&pt_iter));
510
	mob->pt_level = 0;
511
	while (likely(num_data_pages > 1)) {
512
		++mob->pt_level;
513
		BUG_ON(mob->pt_level > 2);
514
		save_pt_iter = pt_iter;
515
		num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
516
						&pt_iter);
517
		data_iter = save_pt_iter;
518
		num_data_pages = num_pt_pages;
519
	}
520
 
521
	mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
522
	ttm_bo_unreserve(bo);
523
}
524
 
525
/*
526
 * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
527
 *
528
 * @mob:            Pointer to a mob to destroy.
529
 */
530
void vmw_mob_destroy(struct vmw_mob *mob)
531
{
532
	if (mob->pt_bo)
533
		ttm_bo_unref(&mob->pt_bo);
534
	kfree(mob);
535
}
536
 
537
/*
538
 * vmw_mob_unbind - Hide a mob from the device.
539
 *
540
 * @dev_priv:       Pointer to a device private.
541
 * @mob_id:         Device id of the mob to unbind.
542
 */
543
void vmw_mob_unbind(struct vmw_private *dev_priv,
544
		    struct vmw_mob *mob)
545
{
546
	struct {
547
		SVGA3dCmdHeader header;
548
		SVGA3dCmdDestroyGBMob body;
549
	} *cmd;
550
	int ret;
551
	struct ttm_buffer_object *bo = mob->pt_bo;
552
 
553
	if (bo) {
554
		ret = ttm_bo_reserve(bo, false, true, false, NULL);
555
		/*
556
		 * Noone else should be using this buffer.
557
		 */
558
		BUG_ON(ret != 0);
559
	}
560
 
561
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
562
	if (unlikely(cmd == NULL)) {
563
		DRM_ERROR("Failed reserving FIFO space for Memory "
564
			  "Object unbinding.\n");
565
	}
566
	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
567
	cmd->header.size = sizeof(cmd->body);
568
	cmd->body.mobid = mob->id;
569
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
570
	if (bo) {
571
		vmw_fence_single_bo(bo, NULL);
572
		ttm_bo_unreserve(bo);
573
	}
574
	vmw_3d_resource_dec(dev_priv, false);
575
}
576
 
577
/*
578
 * vmw_mob_bind - Make a mob visible to the device after first
579
 *                populating it if necessary.
580
 *
581
 * @dev_priv:       Pointer to a device private.
582
 * @mob:            Pointer to the mob we're making visible.
583
 * @data_addr:      Array of DMA addresses to the data pages of the underlying
584
 *                  buffer object.
585
 * @num_data_pages: Number of data pages of the underlying buffer
586
 *                  object.
587
 * @mob_id:         Device id of the mob to bind
588
 *
589
 * This function is intended to be interfaced with the ttm_tt backend
590
 * code.
591
 */
592
int vmw_mob_bind(struct vmw_private *dev_priv,
593
		 struct vmw_mob *mob,
594
		 const struct vmw_sg_table *vsgt,
595
		 unsigned long num_data_pages,
596
		 int32_t mob_id)
597
{
598
	int ret;
599
	bool pt_set_up = false;
600
	struct vmw_piter data_iter;
601
	struct {
602
		SVGA3dCmdHeader header;
603
		SVGA3dCmdDefineGBMob64 body;
604
	} *cmd;
605
 
606
	mob->id = mob_id;
607
	vmw_piter_start(&data_iter, vsgt, 0);
608
	if (unlikely(!vmw_piter_next(&data_iter)))
609
		return 0;
610
 
611
	if (likely(num_data_pages == 1)) {
612
		mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
613
		mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
614
	} else if (vsgt->num_regions == 1) {
615
		mob->pt_level = SVGA3D_MOBFMT_RANGE;
616
		mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
617
	} else if (unlikely(mob->pt_bo == NULL)) {
618
		ret = vmw_mob_pt_populate(dev_priv, mob);
619
		if (unlikely(ret != 0))
620
			return ret;
621
 
622
		vmw_mob_pt_setup(mob, data_iter, num_data_pages);
623
		pt_set_up = true;
624
		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
625
	}
626
 
627
	(void) vmw_3d_resource_inc(dev_priv, false);
628
 
629
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
630
	if (unlikely(cmd == NULL)) {
631
		DRM_ERROR("Failed reserving FIFO space for Memory "
632
			  "Object binding.\n");
633
		goto out_no_cmd_space;
634
	}
635
 
636
	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
637
	cmd->header.size = sizeof(cmd->body);
638
	cmd->body.mobid = mob_id;
639
	cmd->body.ptDepth = mob->pt_level;
640
	cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
641
	cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
642
 
643
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
644
 
645
	return 0;
646
 
647
out_no_cmd_space:
648
	vmw_3d_resource_dec(dev_priv, false);
649
	if (pt_set_up)
650
		ttm_bo_unref(&mob->pt_bo);
651
 
652
	return -ENOMEM;
653
}