Subversion Repositories Kolibri OS

Rev

Rev 5078 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
6296 serge 3
 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4075 Serge 4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
#include "vmwgfx_kms.h"
29
 
30
 
31
/* Might need a hrtimer here? */
32
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33
 
6296 serge 34
void vmw_du_cleanup(struct vmw_display_unit *du)
4075 Serge 35
{
36
//   if (du->cursor_surface)
37
//       vmw_surface_unreference(&du->cursor_surface);
38
//   if (du->cursor_dmabuf)
39
//       vmw_dmabuf_unreference(&du->cursor_dmabuf);
6296 serge 40
	drm_connector_unregister(&du->connector);
4075 Serge 41
	drm_crtc_cleanup(&du->crtc);
42
	drm_encoder_cleanup(&du->encoder);
43
	drm_connector_cleanup(&du->connector);
44
}
45
 
46
/*
47
 * Display Unit Cursor functions
48
 */
49
 
50
int vmw_cursor_update_image(struct vmw_private *dev_priv,
51
			    u32 *image, u32 width, u32 height,
52
			    u32 hotspotX, u32 hotspotY)
53
{
54
	struct {
55
		u32 cmd;
56
		SVGAFifoCmdDefineAlphaCursor cursor;
57
	} *cmd;
58
	u32 image_size = width * height * 4;
59
	u32 cmd_size = sizeof(*cmd) + image_size;
4570 Serge 60
    u32 *dst;
61
    int i, j;
4075 Serge 62
 
63
	if (!image)
64
		return -EINVAL;
65
 
66
	cmd = vmw_fifo_reserve(dev_priv, cmd_size);
67
	if (unlikely(cmd == NULL)) {
68
		DRM_ERROR("Fifo reserve failed.\n");
69
		return -ENOMEM;
70
	}
71
 
72
	memset(cmd, 0, sizeof(*cmd));
73
 
4570 Serge 74
    dst = (u32*)&cmd[1];
4075 Serge 75
 
4570 Serge 76
    for(i = 0; i < 32; i++)
77
    {
78
        for(j = 0; j < 32; j++)
79
            *dst++ = *image++;
80
        for( ; j < 64; j++)
81
            *dst++ = 0;
82
    }
83
    for(i = 0; i < 64*(64-32); i++)
5078 serge 84
        *dst++ = 0;
4570 Serge 85
 
6296 serge 86
	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
87
	cmd->cursor.id = 0;
88
	cmd->cursor.width = width;
89
	cmd->cursor.height = height;
90
	cmd->cursor.hotspotX = hotspotX;
91
	cmd->cursor.hotspotY = hotspotY;
4075 Serge 92
 
6296 serge 93
	vmw_fifo_commit_flush(dev_priv, cmd_size);
4075 Serge 94
 
95
	return 0;
96
}
97
 
4570 Serge 98
#if 0
4075 Serge 99
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
100
			     struct vmw_dma_buffer *dmabuf,
101
			     u32 width, u32 height,
102
			     u32 hotspotX, u32 hotspotY)
103
{
104
	struct ttm_bo_kmap_obj map;
105
	unsigned long kmap_offset;
106
	unsigned long kmap_num;
107
	void *virtual;
108
	bool dummy;
109
	int ret;
110
 
111
	kmap_offset = 0;
112
	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
113
 
5078 serge 114
	ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
4075 Serge 115
	if (unlikely(ret != 0)) {
116
		DRM_ERROR("reserve failed\n");
117
		return -EINVAL;
118
	}
119
 
120
	ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
121
	if (unlikely(ret != 0))
122
		goto err_unreserve;
123
 
124
	virtual = ttm_kmap_obj_virtual(&map, &dummy);
125
	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
126
				      hotspotX, hotspotY);
127
 
128
	ttm_bo_kunmap(&map);
129
err_unreserve:
130
	ttm_bo_unreserve(&dmabuf->base);
131
 
132
	return ret;
133
}
4570 Serge 134
#endif
4075 Serge 135
 
136
void vmw_cursor_update_position(struct vmw_private *dev_priv,
137
				bool show, int x, int y)
138
{
6296 serge 139
	u32 *fifo_mem = dev_priv->mmio_virt;
4075 Serge 140
	uint32_t count;
141
 
6296 serge 142
	vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
143
	vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
144
	vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
145
	count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
146
	vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
4075 Serge 147
}
148
 
4570 Serge 149
#if 0
6296 serge 150
/*
151
 * vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback.
152
 */
153
int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
154
			    uint32_t handle, uint32_t width, uint32_t height,
155
			    int32_t hot_x, int32_t hot_y)
4075 Serge 156
{
157
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
158
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
159
	struct vmw_surface *surface = NULL;
160
	struct vmw_dma_buffer *dmabuf = NULL;
6296 serge 161
	s32 hotspot_x, hotspot_y;
4075 Serge 162
	int ret;
163
 
164
	/*
165
	 * FIXME: Unclear whether there's any global state touched by the
166
	 * cursor_set function, especially vmw_cursor_update_position looks
167
	 * suspicious. For now take the easy route and reacquire all locks. We
168
	 * can do this since the caller in the drm core doesn't check anything
169
	 * which is protected by any looks.
170
	 */
6296 serge 171
	drm_modeset_unlock_crtc(crtc);
4075 Serge 172
	drm_modeset_lock_all(dev_priv->dev);
6296 serge 173
	hotspot_x = hot_x + du->hotspot_x;
174
	hotspot_y = hot_y + du->hotspot_y;
4075 Serge 175
 
176
	/* A lot of the code assumes this */
177
	if (handle && (width != 64 || height != 64)) {
178
		ret = -EINVAL;
179
		goto out;
180
	}
181
 
182
	if (handle) {
183
		struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
184
 
185
		ret = vmw_user_lookup_handle(dev_priv, tfile,
186
					     handle, &surface, &dmabuf);
187
		if (ret) {
188
			DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
189
			ret = -EINVAL;
190
			goto out;
191
		}
192
	}
193
 
194
	/* need to do this before taking down old image */
195
	if (surface && !surface->snooper.image) {
196
		DRM_ERROR("surface not suitable for cursor\n");
197
		vmw_surface_unreference(&surface);
198
		ret = -EINVAL;
199
		goto out;
200
	}
201
 
202
	/* takedown old cursor */
203
	if (du->cursor_surface) {
204
		du->cursor_surface->snooper.crtc = NULL;
205
		vmw_surface_unreference(&du->cursor_surface);
206
	}
207
	if (du->cursor_dmabuf)
208
		vmw_dmabuf_unreference(&du->cursor_dmabuf);
209
 
210
	/* setup new image */
6296 serge 211
	ret = 0;
4075 Serge 212
	if (surface) {
213
		/* vmw_user_surface_lookup takes one reference */
214
		du->cursor_surface = surface;
215
 
216
		du->cursor_surface->snooper.crtc = crtc;
217
		du->cursor_age = du->cursor_surface->snooper.age;
6296 serge 218
		ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
219
					      64, 64, hotspot_x, hotspot_y);
4075 Serge 220
	} else if (dmabuf) {
221
		/* vmw_user_surface_lookup takes one reference */
222
		du->cursor_dmabuf = dmabuf;
223
 
224
		ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
6296 serge 225
					       hotspot_x, hotspot_y);
4075 Serge 226
	} else {
227
		vmw_cursor_update_position(dev_priv, false, 0, 0);
228
		goto out;
229
	}
230
 
6296 serge 231
	if (!ret) {
232
		vmw_cursor_update_position(dev_priv, true,
233
					   du->cursor_x + hotspot_x,
234
					   du->cursor_y + hotspot_y);
235
		du->core_hotspot_x = hot_x;
236
		du->core_hotspot_y = hot_y;
237
	}
4075 Serge 238
 
239
out:
240
	drm_modeset_unlock_all(dev_priv->dev);
6296 serge 241
	drm_modeset_lock_crtc(crtc, crtc->cursor);
4075 Serge 242
 
243
	return ret;
244
}
245
 
246
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
247
{
248
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
249
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
250
	bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
251
 
252
	du->cursor_x = x + crtc->x;
253
	du->cursor_y = y + crtc->y;
254
 
255
	/*
256
	 * FIXME: Unclear whether there's any global state touched by the
257
	 * cursor_set function, especially vmw_cursor_update_position looks
258
	 * suspicious. For now take the easy route and reacquire all locks. We
259
	 * can do this since the caller in the drm core doesn't check anything
260
	 * which is protected by any looks.
261
	 */
6296 serge 262
	drm_modeset_unlock_crtc(crtc);
4075 Serge 263
	drm_modeset_lock_all(dev_priv->dev);
264
 
265
	vmw_cursor_update_position(dev_priv, shown,
6296 serge 266
				   du->cursor_x + du->hotspot_x +
267
				   du->core_hotspot_x,
268
				   du->cursor_y + du->hotspot_y +
269
				   du->core_hotspot_y);
4075 Serge 270
 
271
	drm_modeset_unlock_all(dev_priv->dev);
6296 serge 272
	drm_modeset_lock_crtc(crtc, crtc->cursor);
4075 Serge 273
 
274
	return 0;
275
}
276
 
277
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
278
			  struct ttm_object_file *tfile,
279
			  struct ttm_buffer_object *bo,
280
			  SVGA3dCmdHeader *header)
281
{
282
	struct ttm_bo_kmap_obj map;
283
	unsigned long kmap_offset;
284
	unsigned long kmap_num;
285
	SVGA3dCopyBox *box;
286
	unsigned box_count;
287
	void *virtual;
288
	bool dummy;
289
	struct vmw_dma_cmd {
290
		SVGA3dCmdHeader header;
291
		SVGA3dCmdSurfaceDMA dma;
292
	} *cmd;
293
	int i, ret;
294
 
295
	cmd = container_of(header, struct vmw_dma_cmd, header);
296
 
297
	/* No snooper installed */
298
	if (!srf->snooper.image)
299
		return;
300
 
301
	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
302
		DRM_ERROR("face and mipmap for cursors should never != 0\n");
303
		return;
304
	}
305
 
306
	if (cmd->header.size < 64) {
307
		DRM_ERROR("at least one full copy box must be given\n");
308
		return;
309
	}
310
 
311
	box = (SVGA3dCopyBox *)&cmd[1];
312
	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
313
			sizeof(SVGA3dCopyBox);
314
 
315
	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
316
	    box->x != 0    || box->y != 0    || box->z != 0    ||
317
	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
318
	    box->d != 1    || box_count != 1) {
319
		/* TODO handle none page aligned offsets */
320
		/* TODO handle more dst & src != 0 */
321
		/* TODO handle more then one copy */
322
		DRM_ERROR("Cant snoop dma request for cursor!\n");
323
		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
324
			  box->srcx, box->srcy, box->srcz,
325
			  box->x, box->y, box->z,
326
			  box->w, box->h, box->d, box_count,
327
			  cmd->dma.guest.ptr.offset);
328
		return;
329
	}
330
 
331
	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
332
	kmap_num = (64*64*4) >> PAGE_SHIFT;
333
 
5078 serge 334
	ret = ttm_bo_reserve(bo, true, false, false, NULL);
4075 Serge 335
	if (unlikely(ret != 0)) {
336
		DRM_ERROR("reserve failed\n");
337
		return;
338
	}
339
 
340
	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
341
	if (unlikely(ret != 0))
342
		goto err_unreserve;
343
 
344
	virtual = ttm_kmap_obj_virtual(&map, &dummy);
345
 
346
	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
347
		memcpy(srf->snooper.image, virtual, 64*64*4);
348
	} else {
349
		/* Image is unsigned pointer. */
350
		for (i = 0; i < box->h; i++)
351
			memcpy(srf->snooper.image + i * 64,
352
			       virtual + i * cmd->dma.guest.pitch,
353
			       box->w * 4);
354
	}
355
 
356
	srf->snooper.age++;
357
 
358
	ttm_bo_kunmap(&map);
359
err_unreserve:
360
	ttm_bo_unreserve(bo);
361
}
362
 
6296 serge 363
/**
364
 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
365
 *
366
 * @dev_priv: Pointer to the device private struct.
367
 *
368
 * Clears all legacy hotspots.
369
 */
370
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
371
{
372
	struct drm_device *dev = dev_priv->dev;
373
	struct vmw_display_unit *du;
374
	struct drm_crtc *crtc;
375
 
376
	drm_modeset_lock_all(dev);
377
	drm_for_each_crtc(crtc, dev) {
378
		du = vmw_crtc_to_du(crtc);
379
 
380
		du->hotspot_x = 0;
381
		du->hotspot_y = 0;
382
	}
383
	drm_modeset_unlock_all(dev);
384
}
385
 
4075 Serge 386
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
387
{
388
	struct drm_device *dev = dev_priv->dev;
389
	struct vmw_display_unit *du;
390
	struct drm_crtc *crtc;
391
 
392
	mutex_lock(&dev->mode_config.mutex);
393
 
394
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
395
		du = vmw_crtc_to_du(crtc);
396
		if (!du->cursor_surface ||
397
		    du->cursor_age == du->cursor_surface->snooper.age)
398
			continue;
399
 
400
		du->cursor_age = du->cursor_surface->snooper.age;
401
		vmw_cursor_update_image(dev_priv,
402
					du->cursor_surface->snooper.image,
6296 serge 403
					64, 64,
404
					du->hotspot_x + du->core_hotspot_x,
405
					du->hotspot_y + du->core_hotspot_y);
4075 Serge 406
	}
407
 
408
	mutex_unlock(&dev->mode_config.mutex);
409
}
410
#endif
411
 
412
/*
413
 * Generic framebuffer code
414
 */
415
 
416
/*
417
 * Surface framebuffer code
418
 */
419
 
4570 Serge 420
static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
4075 Serge 421
{
422
	struct vmw_framebuffer_surface *vfbs =
423
		vmw_framebuffer_to_vfbs(framebuffer);
424
 
6296 serge 425
	drm_framebuffer_cleanup(framebuffer);
426
	vmw_surface_unreference(&vfbs->surface);
427
	if (vfbs->base.user_obj)
428
		ttm_base_object_unref(&vfbs->base.user_obj);
4075 Serge 429
 
430
	kfree(vfbs);
431
}
432
 
4570 Serge 433
static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
4075 Serge 434
				  struct drm_file *file_priv,
435
				  unsigned flags, unsigned color,
436
				  struct drm_clip_rect *clips,
437
				  unsigned num_clips)
438
{
439
	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
440
	struct vmw_framebuffer_surface *vfbs =
441
		vmw_framebuffer_to_vfbs(framebuffer);
442
	struct drm_clip_rect norect;
443
	int ret, inc = 1;
444
 
6296 serge 445
	/* Legacy Display Unit does not support 3D */
446
	if (dev_priv->active_display_unit == vmw_du_legacy)
4570 Serge 447
		return -EINVAL;
4075 Serge 448
 
4570 Serge 449
	drm_modeset_lock_all(dev_priv->dev);
4075 Serge 450
 
5078 serge 451
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4570 Serge 452
	if (unlikely(ret != 0)) {
453
		drm_modeset_unlock_all(dev_priv->dev);
454
		return ret;
455
	}
456
 
4075 Serge 457
	if (!num_clips) {
458
		num_clips = 1;
459
		clips = &norect;
460
		norect.x1 = norect.y1 = 0;
461
		norect.x2 = framebuffer->width;
462
		norect.y2 = framebuffer->height;
463
	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
464
		num_clips /= 2;
465
		inc = 2; /* skip source rects */
466
	}
467
 
6296 serge 468
	if (dev_priv->active_display_unit == vmw_du_screen_object)
469
		ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
470
						   clips, NULL, NULL, 0, 0,
471
						   num_clips, inc, NULL);
472
	else
473
		ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
474
						 clips, NULL, NULL, 0, 0,
475
						 num_clips, inc, NULL);
4075 Serge 476
 
6296 serge 477
	vmw_fifo_flush(dev_priv, false);
5078 serge 478
	ttm_read_unlock(&dev_priv->reservation_sem);
4570 Serge 479
 
480
	drm_modeset_unlock_all(dev_priv->dev);
481
 
4075 Serge 482
	return 0;
483
}
484
 
6296 serge 485
/**
486
 * vmw_kms_readback - Perform a readback from the screen system to
487
 * a dma-buffer backed framebuffer.
488
 *
489
 * @dev_priv: Pointer to the device private structure.
490
 * @file_priv: Pointer to a struct drm_file identifying the caller.
491
 * Must be set to NULL if @user_fence_rep is NULL.
492
 * @vfb: Pointer to the dma-buffer backed framebuffer.
493
 * @user_fence_rep: User-space provided structure for fence information.
494
 * Must be set to non-NULL if @file_priv is non-NULL.
495
 * @vclips: Array of clip rects.
496
 * @num_clips: Number of clip rects in @vclips.
497
 *
498
 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
499
 * interrupted.
500
 */
501
int vmw_kms_readback(struct vmw_private *dev_priv,
502
		     struct drm_file *file_priv,
503
		     struct vmw_framebuffer *vfb,
504
		     struct drm_vmw_fence_rep __user *user_fence_rep,
505
		     struct drm_vmw_rect *vclips,
506
		     uint32_t num_clips)
507
{
508
	switch (dev_priv->active_display_unit) {
509
	case vmw_du_screen_object:
510
		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
511
					    user_fence_rep, vclips, num_clips);
512
	case vmw_du_screen_target:
513
		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
514
					user_fence_rep, NULL, vclips, num_clips,
515
					1, false, true);
516
	default:
517
		WARN_ONCE(true,
518
			  "Readback called with invalid display system.\n");
519
}
520
 
521
	return -ENOSYS;
522
}
523
 
524
 
4075 Serge 525
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
6296 serge 526
	.destroy = vmw_framebuffer_surface_destroy,
4075 Serge 527
	.dirty = vmw_framebuffer_surface_dirty,
528
};
529
 
530
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
531
					   struct vmw_surface *surface,
532
					   struct vmw_framebuffer **out,
533
					   const struct drm_mode_fb_cmd
6296 serge 534
					   *mode_cmd,
535
					   bool is_dmabuf_proxy)
4075 Serge 536
 
537
{
538
	struct drm_device *dev = dev_priv->dev;
539
	struct vmw_framebuffer_surface *vfbs;
540
	enum SVGA3dSurfaceFormat format;
541
	int ret;
542
 
6296 serge 543
	/* 3D is only supported on HWv8 and newer hosts */
544
	if (dev_priv->active_display_unit == vmw_du_legacy)
4075 Serge 545
		return -ENOSYS;
546
 
547
	/*
548
	 * Sanity checks.
549
	 */
550
 
551
	/* Surface must be marked as a scanout. */
552
	if (unlikely(!surface->scanout))
553
		return -EINVAL;
554
 
555
	if (unlikely(surface->mip_levels[0] != 1 ||
556
		     surface->num_sizes != 1 ||
4569 Serge 557
		     surface->base_size.width < mode_cmd->width ||
558
		     surface->base_size.height < mode_cmd->height ||
559
		     surface->base_size.depth != 1)) {
4075 Serge 560
		DRM_ERROR("Incompatible surface dimensions "
561
			  "for requested mode.\n");
562
		return -EINVAL;
563
	}
564
 
565
	switch (mode_cmd->depth) {
566
	case 32:
567
		format = SVGA3D_A8R8G8B8;
568
		break;
569
	case 24:
570
		format = SVGA3D_X8R8G8B8;
571
		break;
572
	case 16:
573
		format = SVGA3D_R5G6B5;
574
		break;
575
	case 15:
576
		format = SVGA3D_A1R5G5B5;
577
		break;
578
	default:
579
		DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
580
		return -EINVAL;
581
	}
582
 
6296 serge 583
	/*
584
	 * For DX, surface format validation is done when surface->scanout
585
	 * is set.
586
	 */
587
	if (!dev_priv->has_dx && format != surface->format) {
4075 Serge 588
		DRM_ERROR("Invalid surface format for requested mode.\n");
589
		return -EINVAL;
590
	}
591
 
592
	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
593
	if (!vfbs) {
594
		ret = -ENOMEM;
595
		goto out_err1;
596
	}
597
 
598
	/* XXX get the first 3 from the surface info */
599
	vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
600
	vfbs->base.base.pitches[0] = mode_cmd->pitch;
601
	vfbs->base.base.depth = mode_cmd->depth;
602
	vfbs->base.base.width = mode_cmd->width;
603
	vfbs->base.base.height = mode_cmd->height;
6296 serge 604
	vfbs->surface = vmw_surface_reference(surface);
4075 Serge 605
	vfbs->base.user_handle = mode_cmd->handle;
6296 serge 606
	vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
4075 Serge 607
 
608
	*out = &vfbs->base;
609
 
610
	ret = drm_framebuffer_init(dev, &vfbs->base.base,
611
				   &vmw_framebuffer_surface_funcs);
612
	if (ret)
6296 serge 613
		goto out_err2;
4075 Serge 614
 
615
	return 0;
616
 
6296 serge 617
out_err2:
4075 Serge 618
	vmw_surface_unreference(&surface);
619
	kfree(vfbs);
620
out_err1:
621
	return ret;
622
}
623
 
624
/*
625
 * Dmabuf framebuffer code
626
 */
627
 
4570 Serge 628
static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
4075 Serge 629
{
630
	struct vmw_framebuffer_dmabuf *vfbd =
631
		vmw_framebuffer_to_vfbd(framebuffer);
632
 
6296 serge 633
	drm_framebuffer_cleanup(framebuffer);
634
	vmw_dmabuf_unreference(&vfbd->buffer);
635
	if (vfbd->base.user_obj)
636
		ttm_base_object_unref(&vfbd->base.user_obj);
4075 Serge 637
 
638
	kfree(vfbd);
639
}
640
 
4570 Serge 641
static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
4075 Serge 642
				 struct drm_file *file_priv,
643
				 unsigned flags, unsigned color,
644
				 struct drm_clip_rect *clips,
645
				 unsigned num_clips)
646
{
647
	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
648
	struct vmw_framebuffer_dmabuf *vfbd =
649
		vmw_framebuffer_to_vfbd(framebuffer);
650
	struct drm_clip_rect norect;
651
	int ret, increment = 1;
652
 
4570 Serge 653
	drm_modeset_lock_all(dev_priv->dev);
4075 Serge 654
 
5078 serge 655
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4570 Serge 656
	if (unlikely(ret != 0)) {
657
		drm_modeset_unlock_all(dev_priv->dev);
658
		return ret;
659
	}
660
 
4075 Serge 661
	if (!num_clips) {
662
		num_clips = 1;
663
		clips = &norect;
664
		norect.x1 = norect.y1 = 0;
665
		norect.x2 = framebuffer->width;
666
		norect.y2 = framebuffer->height;
667
	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
668
		num_clips /= 2;
669
		increment = 2;
670
	}
671
 
6296 serge 672
	switch (dev_priv->active_display_unit) {
673
	case vmw_du_screen_target:
674
		ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
675
				       clips, NULL, num_clips, increment,
676
				       true, true);
677
		break;
678
	case vmw_du_screen_object:
679
		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
680
						  clips, num_clips, increment,
681
						  true,
682
						  NULL);
683
		break;
684
	case vmw_du_legacy:
685
		ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
686
						  clips, num_clips, increment);
687
		break;
688
	default:
689
		ret = -EINVAL;
690
		WARN_ONCE(true, "Dirty called with invalid display system.\n");
691
		break;
4075 Serge 692
	}
693
 
6296 serge 694
	vmw_fifo_flush(dev_priv, false);
5078 serge 695
	ttm_read_unlock(&dev_priv->reservation_sem);
4570 Serge 696
 
697
	drm_modeset_unlock_all(dev_priv->dev);
698
 
4075 Serge 699
	return ret;
700
}
701
 
702
static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
703
	.destroy = vmw_framebuffer_dmabuf_destroy,
704
	.dirty = vmw_framebuffer_dmabuf_dirty,
705
};
706
 
707
/**
708
 * Pin the dmabuffer to the start of vram.
709
 */
6296 serge 710
static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
4075 Serge 711
{
712
	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
6296 serge 713
	struct vmw_dma_buffer *buf;
4075 Serge 714
	int ret;
715
 
6296 serge 716
	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
717
		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
4075 Serge 718
 
6296 serge 719
	if (!buf)
720
		return 0;
4075 Serge 721
 
6296 serge 722
	switch (dev_priv->active_display_unit) {
723
	case vmw_du_legacy:
724
		vmw_overlay_pause_all(dev_priv);
725
		ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
726
		vmw_overlay_resume_all(dev_priv);
727
		break;
728
	case vmw_du_screen_object:
729
	case vmw_du_screen_target:
730
		if (vfb->dmabuf)
731
			return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
732
							     false);
4075 Serge 733
 
6296 serge 734
		return vmw_dmabuf_pin_in_placement(dev_priv, buf,
735
						   &vmw_mob_placement, false);
736
	default:
737
		return -EINVAL;
738
	}
4075 Serge 739
 
6296 serge 740
	return ret;
4075 Serge 741
}
742
 
6296 serge 743
static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
4075 Serge 744
{
745
	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
6296 serge 746
	struct vmw_dma_buffer *buf;
4075 Serge 747
 
6296 serge 748
	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
749
		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
750
 
751
	if (WARN_ON(!buf))
4075 Serge 752
		return 0;
6296 serge 753
 
754
	return vmw_dmabuf_unpin(dev_priv, buf, false);
755
}
756
 
757
/**
758
 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
759
 *
760
 * @dev: DRM device
761
 * @mode_cmd: parameters for the new surface
762
 * @dmabuf_mob: MOB backing the DMA buf
763
 * @srf_out: newly created surface
764
 *
765
 * When the content FB is a DMA buf, we create a surface as a proxy to the
766
 * same buffer.  This way we can do a surface copy rather than a surface DMA.
767
 * This is a more efficient approach
768
 *
769
 * RETURNS:
770
 * 0 on success, error code otherwise
771
 */
772
static int vmw_create_dmabuf_proxy(struct drm_device *dev,
773
				   const struct drm_mode_fb_cmd *mode_cmd,
774
				   struct vmw_dma_buffer *dmabuf_mob,
775
				   struct vmw_surface **srf_out)
776
{
777
	uint32_t format;
778
	struct drm_vmw_size content_base_size;
779
	struct vmw_resource *res;
780
	int ret;
781
 
782
	switch (mode_cmd->depth) {
783
	case 32:
784
	case 24:
785
		format = SVGA3D_X8R8G8B8;
786
		break;
787
 
788
	case 16:
789
	case 15:
790
		format = SVGA3D_R5G6B5;
791
		break;
792
 
793
	case 8:
794
		format = SVGA3D_P8;
795
		break;
796
 
797
	default:
798
		DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
799
		return -EINVAL;
4075 Serge 800
	}
801
 
6296 serge 802
	content_base_size.width  = mode_cmd->width;
803
	content_base_size.height = mode_cmd->height;
804
	content_base_size.depth  = 1;
805
 
806
	ret = vmw_surface_gb_priv_define(dev,
807
			0, /* kernel visible only */
808
			0, /* flags */
809
			format,
810
			true, /* can be a scanout buffer */
811
			1, /* num of mip levels */
812
			0,
813
			0,
814
			content_base_size,
815
			srf_out);
816
	if (ret) {
817
		DRM_ERROR("Failed to allocate proxy content buffer\n");
818
		return ret;
819
	}
820
 
821
	res = &(*srf_out)->res;
822
 
823
	/* Reserve and switch the backing mob. */
824
	mutex_lock(&res->dev_priv->cmdbuf_mutex);
825
	(void) vmw_resource_reserve(res, false, true);
826
	vmw_dmabuf_unreference(&res->backup);
827
	res->backup = vmw_dmabuf_reference(dmabuf_mob);
828
	res->backup_offset = 0;
829
	vmw_resource_unreserve(res, false, NULL, 0);
830
	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
831
 
832
	return 0;
4075 Serge 833
}
834
 
6296 serge 835
 
836
 
4075 Serge 837
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
838
					  struct vmw_dma_buffer *dmabuf,
839
					  struct vmw_framebuffer **out,
840
					  const struct drm_mode_fb_cmd
841
					  *mode_cmd)
842
 
843
{
844
	struct drm_device *dev = dev_priv->dev;
845
	struct vmw_framebuffer_dmabuf *vfbd;
846
	unsigned int requested_size;
847
	int ret;
848
 
849
	requested_size = mode_cmd->height * mode_cmd->pitch;
850
	if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
851
		DRM_ERROR("Screen buffer object size is too small "
852
			  "for requested mode.\n");
853
		return -EINVAL;
854
	}
855
 
856
	/* Limited framebuffer color depth support for screen objects */
6296 serge 857
	if (dev_priv->active_display_unit == vmw_du_screen_object) {
4075 Serge 858
		switch (mode_cmd->depth) {
859
		case 32:
860
		case 24:
861
			/* Only support 32 bpp for 32 and 24 depth fbs */
862
			if (mode_cmd->bpp == 32)
863
				break;
864
 
865
			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
866
				  mode_cmd->depth, mode_cmd->bpp);
867
			return -EINVAL;
868
		case 16:
869
		case 15:
870
			/* Only support 16 bpp for 16 and 15 depth fbs */
871
			if (mode_cmd->bpp == 16)
872
				break;
873
 
874
			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
875
				  mode_cmd->depth, mode_cmd->bpp);
876
			return -EINVAL;
877
		default:
878
			DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
879
			return -EINVAL;
880
		}
881
	}
882
 
883
	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
884
	if (!vfbd) {
885
		ret = -ENOMEM;
886
		goto out_err1;
887
	}
888
 
889
	vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
890
	vfbd->base.base.pitches[0] = mode_cmd->pitch;
891
	vfbd->base.base.depth = mode_cmd->depth;
892
	vfbd->base.base.width = mode_cmd->width;
893
	vfbd->base.base.height = mode_cmd->height;
894
	vfbd->base.dmabuf = true;
6296 serge 895
	vfbd->buffer = vmw_dmabuf_reference(dmabuf);
4075 Serge 896
	vfbd->base.user_handle = mode_cmd->handle;
897
	*out = &vfbd->base;
898
 
899
	ret = drm_framebuffer_init(dev, &vfbd->base.base,
900
				   &vmw_framebuffer_dmabuf_funcs);
901
	if (ret)
6296 serge 902
		goto out_err2;
4075 Serge 903
 
904
	return 0;
905
 
6296 serge 906
out_err2:
4075 Serge 907
	vmw_dmabuf_unreference(&dmabuf);
908
	kfree(vfbd);
909
out_err1:
910
	return ret;
911
}
912
 
6296 serge 913
/**
914
 * vmw_kms_new_framebuffer - Create a new framebuffer.
915
 *
916
 * @dev_priv: Pointer to device private struct.
917
 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
918
 * Either @dmabuf or @surface must be NULL.
919
 * @surface: Pointer to a surface to wrap the kms framebuffer around.
920
 * Either @dmabuf or @surface must be NULL.
921
 * @only_2d: No presents will occur to this dma buffer based framebuffer. This
922
 * Helps the code to do some important optimizations.
923
 * @mode_cmd: Frame-buffer metadata.
924
 */
925
struct vmw_framebuffer *
926
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
927
			struct vmw_dma_buffer *dmabuf,
928
			struct vmw_surface *surface,
929
			bool only_2d,
930
			const struct drm_mode_fb_cmd *mode_cmd)
931
{
932
	struct vmw_framebuffer *vfb = NULL;
933
	bool is_dmabuf_proxy = false;
934
	int ret;
935
 
936
	/*
937
	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
938
	 * therefore, wrap the DMA buf in a surface so we can use the
939
	 * SurfaceCopy command.
940
	 */
941
	if (dmabuf && only_2d &&
942
	    dev_priv->active_display_unit == vmw_du_screen_target) {
943
		ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
944
					      dmabuf, &surface);
945
		if (ret)
946
			return ERR_PTR(ret);
947
 
948
		is_dmabuf_proxy = true;
949
	}
950
 
951
	/* Create the new framebuffer depending one what we have */
952
	if (surface) {
953
		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
954
						      mode_cmd,
955
						      is_dmabuf_proxy);
956
 
957
		/*
958
		 * vmw_create_dmabuf_proxy() adds a reference that is no longer
959
		 * needed
960
		 */
961
		if (is_dmabuf_proxy)
962
			vmw_surface_unreference(&surface);
963
	} else if (dmabuf) {
964
		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
965
						     mode_cmd);
966
	} else {
967
		BUG();
968
	}
969
 
970
	if (ret)
971
		return ERR_PTR(ret);
972
 
973
	vfb->pin = vmw_framebuffer_pin;
974
	vfb->unpin = vmw_framebuffer_unpin;
975
 
976
	return vfb;
977
}
978
 
4075 Serge 979
/*
980
 * Generic Kernel modesetting functions
981
 */
982
 
983
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
984
						 struct drm_file *file_priv,
985
						 struct drm_mode_fb_cmd2 *mode_cmd2)
986
{
987
	struct vmw_private *dev_priv = vmw_priv(dev);
988
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
989
	struct vmw_framebuffer *vfb = NULL;
990
	struct vmw_surface *surface = NULL;
991
	struct vmw_dma_buffer *bo = NULL;
992
	struct ttm_base_object *user_obj;
993
	struct drm_mode_fb_cmd mode_cmd;
994
	int ret;
995
 
996
	mode_cmd.width = mode_cmd2->width;
997
	mode_cmd.height = mode_cmd2->height;
998
	mode_cmd.pitch = mode_cmd2->pitches[0];
999
	mode_cmd.handle = mode_cmd2->handles[0];
1000
	drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
1001
				    &mode_cmd.bpp);
1002
 
1003
	/**
1004
	 * This code should be conditioned on Screen Objects not being used.
1005
	 * If screen objects are used, we can allocate a GMR to hold the
1006
	 * requested framebuffer.
1007
	 */
1008
 
1009
	if (!vmw_kms_validate_mode_vram(dev_priv,
1010
					mode_cmd.pitch,
1011
					mode_cmd.height)) {
6296 serge 1012
		DRM_ERROR("Requested mode exceed bounding box limit.\n");
4075 Serge 1013
		return ERR_PTR(-ENOMEM);
1014
	}
1015
 
1016
	/*
1017
	 * Take a reference on the user object of the resource
1018
	 * backing the kms fb. This ensures that user-space handle
1019
	 * lookups on that resource will always work as long as
1020
	 * it's registered with a kms framebuffer. This is important,
1021
	 * since vmw_execbuf_process identifies resources in the
1022
	 * command stream using user-space handles.
1023
	 */
1024
 
1025
	user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
1026
	if (unlikely(user_obj == NULL)) {
1027
		DRM_ERROR("Could not locate requested kms frame buffer.\n");
1028
		return ERR_PTR(-ENOENT);
1029
	}
1030
 
1031
	/**
1032
	 * End conditioned code.
1033
	 */
1034
 
1035
	/* returns either a dmabuf or surface */
6296 serge 1036
	ret = vmw_user_lookup_handle(dev_priv, tfile,
1037
				     mode_cmd.handle,
1038
				     &surface, &bo);
1039
	if (ret)
1040
		goto err_out;
4075 Serge 1041
 
6296 serge 1042
	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1043
				      !(dev_priv->capabilities & SVGA_CAP_3D),
1044
				      &mode_cmd);
1045
	if (IS_ERR(vfb)) {
1046
		ret = PTR_ERR(vfb);
1047
		goto err_out;
1048
 	}
4075 Serge 1049
 
1050
err_out:
1051
	/* vmw_user_lookup_handle takes one ref so does new_fb */
6296 serge 1052
	if (bo)
1053
		vmw_dmabuf_unreference(&bo);
1054
	if (surface)
1055
		vmw_surface_unreference(&surface);
4075 Serge 1056
 
1057
	if (ret) {
1058
		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
6296 serge 1059
		ttm_base_object_unref(&user_obj);
4075 Serge 1060
		return ERR_PTR(ret);
1061
	} else
1062
		vfb->user_obj = user_obj;
1063
 
1064
	return &vfb->base;
1065
}
1066
 
1067
static const struct drm_mode_config_funcs vmw_kms_funcs = {
1068
	.fb_create = vmw_kms_fb_create,
1069
};
1070
 
6296 serge 1071
static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1072
				   struct drm_file *file_priv,
1073
				   struct vmw_framebuffer *vfb,
1074
				   struct vmw_surface *surface,
1075
				   uint32_t sid,
1076
				   int32_t destX, int32_t destY,
1077
				   struct drm_vmw_rect *clips,
1078
				   uint32_t num_clips)
1079
{
1080
	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1081
					    &surface->res, destX, destY,
1082
					    num_clips, 1, NULL);
1083
}
1084
 
1085
 
4075 Serge 1086
int vmw_kms_present(struct vmw_private *dev_priv,
1087
		    struct drm_file *file_priv,
1088
		    struct vmw_framebuffer *vfb,
1089
		    struct vmw_surface *surface,
1090
		    uint32_t sid,
1091
		    int32_t destX, int32_t destY,
1092
		    struct drm_vmw_rect *clips,
1093
		    uint32_t num_clips)
1094
{
6296 serge 1095
	int ret;
4075 Serge 1096
 
6296 serge 1097
	switch (dev_priv->active_display_unit) {
1098
	case vmw_du_screen_target:
1099
		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1100
						 &surface->res, destX, destY,
1101
						 num_clips, 1, NULL);
1102
		break;
1103
	case vmw_du_screen_object:
1104
		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1105
					      sid, destX, destY, clips,
1106
					      num_clips);
1107
		break;
1108
	default:
1109
		WARN_ONCE(true,
1110
			  "Present called with invalid display system.\n");
1111
		ret = -ENOSYS;
1112
		break;
4075 Serge 1113
	}
6296 serge 1114
	if (ret)
1115
		return ret;
4075 Serge 1116
 
6296 serge 1117
	vmw_fifo_flush(dev_priv, false);
4075 Serge 1118
 
6296 serge 1119
	return 0;
4075 Serge 1120
}
1121
 
1122
int vmw_kms_init(struct vmw_private *dev_priv)
1123
{
1124
	struct drm_device *dev = dev_priv->dev;
1125
	int ret;
4569 Serge 1126
 
4075 Serge 1127
	drm_mode_config_init(dev);
1128
	dev->mode_config.funcs = &vmw_kms_funcs;
1129
	dev->mode_config.min_width = 1;
1130
	dev->mode_config.min_height = 1;
6296 serge 1131
	dev->mode_config.max_width = dev_priv->texture_max_width;
1132
	dev->mode_config.max_height = dev_priv->texture_max_height;
4075 Serge 1133
 
6296 serge 1134
	ret = vmw_kms_stdu_init_display(dev_priv);
1135
	if (ret) {
1136
		ret = vmw_kms_sou_init_display(dev_priv);
1137
		if (ret) /* Fallback */
1138
			ret = vmw_kms_ldu_init_display(dev_priv);
1139
	}
4075 Serge 1140
 
6296 serge 1141
	return ret;
4075 Serge 1142
}
1143
 
1144
int vmw_kms_close(struct vmw_private *dev_priv)
1145
{
6296 serge 1146
	int ret;
1147
 
4075 Serge 1148
	/*
1149
	 * Docs says we should take the lock before calling this function
1150
	 * but since it destroys encoders and our destructor calls
1151
	 * drm_encoder_cleanup which takes the lock we deadlock.
1152
	 */
6296 serge 1153
	drm_mode_config_cleanup(dev_priv->dev);
1154
	if (dev_priv->active_display_unit == vmw_du_screen_object)
1155
		ret = vmw_kms_sou_close_display(dev_priv);
1156
	else if (dev_priv->active_display_unit == vmw_du_screen_target)
1157
		ret = vmw_kms_stdu_close_display(dev_priv);
1158
	else
1159
		ret = vmw_kms_ldu_close_display(dev_priv);
1160
 
1161
	return ret;
4075 Serge 1162
}
1163
 
4569 Serge 1164
#if 0
4075 Serge 1165
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1166
				struct drm_file *file_priv)
1167
{
1168
	struct drm_vmw_cursor_bypass_arg *arg = data;
1169
	struct vmw_display_unit *du;
1170
	struct drm_crtc *crtc;
1171
	int ret = 0;
1172
 
1173
 
1174
	mutex_lock(&dev->mode_config.mutex);
1175
	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1176
 
1177
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1178
			du = vmw_crtc_to_du(crtc);
1179
			du->hotspot_x = arg->xhot;
1180
			du->hotspot_y = arg->yhot;
1181
		}
1182
 
1183
		mutex_unlock(&dev->mode_config.mutex);
1184
		return 0;
1185
	}
1186
 
5078 serge 1187
	crtc = drm_crtc_find(dev, arg->crtc_id);
1188
	if (!crtc) {
4569 Serge 1189
		ret = -ENOENT;
4075 Serge 1190
		goto out;
1191
	}
1192
 
1193
	du = vmw_crtc_to_du(crtc);
1194
 
1195
	du->hotspot_x = arg->xhot;
1196
	du->hotspot_y = arg->yhot;
1197
 
1198
out:
1199
	mutex_unlock(&dev->mode_config.mutex);
1200
 
1201
	return ret;
1202
}
1203
#endif
4569 Serge 1204
 
4075 Serge 1205
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1206
			unsigned width, unsigned height, unsigned pitch,
1207
			unsigned bpp, unsigned depth)
1208
{
1209
	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1210
		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1211
	else if (vmw_fifo_have_pitchlock(vmw_priv))
6296 serge 1212
		vmw_mmio_write(pitch, vmw_priv->mmio_virt +
1213
			       SVGA_FIFO_PITCHLOCK);
4075 Serge 1214
	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1215
	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1216
	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1217
 
1218
	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1219
		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1220
			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1221
		return -EINVAL;
1222
	}
1223
 
1224
	return 0;
1225
}
1226
 
1227
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1228
{
1229
	struct vmw_vga_topology_state *save;
1230
	uint32_t i;
1231
 
1232
	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1233
	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
1234
	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
1235
	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1236
		vmw_priv->vga_pitchlock =
1237
		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1238
	else if (vmw_fifo_have_pitchlock(vmw_priv))
6296 serge 1239
		vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
1240
							SVGA_FIFO_PITCHLOCK);
4075 Serge 1241
 
1242
	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1243
		return 0;
1244
 
1245
	vmw_priv->num_displays = vmw_read(vmw_priv,
1246
					  SVGA_REG_NUM_GUEST_DISPLAYS);
1247
 
1248
	if (vmw_priv->num_displays == 0)
1249
		vmw_priv->num_displays = 1;
1250
 
1251
	for (i = 0; i < vmw_priv->num_displays; ++i) {
1252
		save = &vmw_priv->vga_save[i];
1253
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1254
		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1255
		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1256
		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1257
		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1258
		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1259
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1260
		if (i == 0 && vmw_priv->num_displays == 1 &&
1261
		    save->width == 0 && save->height == 0) {
1262
 
1263
			/*
1264
			 * It should be fairly safe to assume that these
1265
			 * values are uninitialized.
1266
			 */
1267
 
1268
			save->width = vmw_priv->vga_width - save->pos_x;
1269
			save->height = vmw_priv->vga_height - save->pos_y;
1270
		}
1271
	}
1272
 
1273
	return 0;
1274
}
1275
 
1276
int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1277
{
1278
	struct vmw_vga_topology_state *save;
1279
	uint32_t i;
1280
 
1281
	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1282
	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
1283
	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
1284
	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1285
		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1286
			  vmw_priv->vga_pitchlock);
1287
	else if (vmw_fifo_have_pitchlock(vmw_priv))
6296 serge 1288
		vmw_mmio_write(vmw_priv->vga_pitchlock,
1289
			       vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
4075 Serge 1290
 
1291
	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1292
		return 0;
1293
 
1294
	for (i = 0; i < vmw_priv->num_displays; ++i) {
1295
		save = &vmw_priv->vga_save[i];
1296
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1297
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1298
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1299
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1300
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1301
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1302
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1303
	}
1304
 
1305
	return 0;
1306
}
1307
 
1308
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1309
				uint32_t pitch,
1310
				uint32_t height)
1311
{
6296 serge 1312
	return ((u64) pitch * (u64) height) < (u64)
1313
		((dev_priv->active_display_unit == vmw_du_screen_target) ?
1314
		 dev_priv->prim_bb_mem : dev_priv->vram_size);
4075 Serge 1315
}
1316
 
1317
 
1318
/**
1319
 * Function called by DRM code called with vbl_lock held.
1320
 */
6296 serge 1321
u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
4075 Serge 1322
{
1323
	return 0;
1324
}
1325
 
1326
/**
1327
 * Function called by DRM code called with vbl_lock held.
1328
 */
6296 serge 1329
int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
4075 Serge 1330
{
1331
	return -ENOSYS;
1332
}
1333
 
1334
/**
1335
 * Function called by DRM code called with vbl_lock held.
1336
 */
6296 serge 1337
void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
4075 Serge 1338
{
1339
}
1340
 
1341
 
1342
/*
1343
 * Small shared kms functions.
1344
 */
1345
 
4570 Serge 1346
static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
4075 Serge 1347
			 struct drm_vmw_rect *rects)
1348
{
1349
	struct drm_device *dev = dev_priv->dev;
1350
	struct vmw_display_unit *du;
1351
	struct drm_connector *con;
1352
 
1353
	mutex_lock(&dev->mode_config.mutex);
1354
 
1355
#if 0
1356
	{
1357
		unsigned int i;
1358
 
1359
		DRM_INFO("%s: new layout ", __func__);
1360
		for (i = 0; i < num; i++)
1361
			DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
1362
				 rects[i].w, rects[i].h);
1363
		DRM_INFO("\n");
1364
	}
1365
#endif
1366
 
1367
	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
1368
		du = vmw_connector_to_du(con);
1369
		if (num > du->unit) {
1370
			du->pref_width = rects[du->unit].w;
1371
			du->pref_height = rects[du->unit].h;
1372
			du->pref_active = true;
1373
			du->gui_x = rects[du->unit].x;
1374
			du->gui_y = rects[du->unit].y;
1375
		} else {
1376
			du->pref_width = 800;
1377
			du->pref_height = 600;
1378
			du->pref_active = false;
1379
		}
1380
		con->status = vmw_du_connector_detect(con, true);
1381
	}
1382
 
1383
	mutex_unlock(&dev->mode_config.mutex);
1384
 
1385
	return 0;
1386
}
1387
 
1388
void vmw_du_crtc_save(struct drm_crtc *crtc)
1389
{
1390
}
1391
 
1392
void vmw_du_crtc_restore(struct drm_crtc *crtc)
1393
{
1394
}
1395
 
1396
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1397
			   u16 *r, u16 *g, u16 *b,
1398
			   uint32_t start, uint32_t size)
1399
{
1400
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1401
	int i;
1402
 
1403
	for (i = 0; i < size; i++) {
1404
		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
1405
			  r[i], g[i], b[i]);
1406
		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
1407
		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1408
		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1409
	}
1410
}
1411
 
6296 serge 1412
int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
4075 Serge 1413
{
6296 serge 1414
	return 0;
4075 Serge 1415
}
1416
 
1417
void vmw_du_connector_save(struct drm_connector *connector)
1418
{
1419
}
1420
 
1421
void vmw_du_connector_restore(struct drm_connector *connector)
1422
{
1423
}
1424
 
1425
enum drm_connector_status
1426
vmw_du_connector_detect(struct drm_connector *connector, bool force)
1427
{
1428
	uint32_t num_displays;
1429
	struct drm_device *dev = connector->dev;
1430
	struct vmw_private *dev_priv = vmw_priv(dev);
1431
	struct vmw_display_unit *du = vmw_connector_to_du(connector);
1432
 
1433
	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
1434
 
1435
	return ((vmw_connector_to_du(connector)->unit < num_displays &&
1436
		 du->pref_active) ?
1437
		connector_status_connected : connector_status_disconnected);
1438
}
1439
 
1440
static struct drm_display_mode vmw_kms_connector_builtin[] = {
1441
	/* 640x480@60Hz */
1442
	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
1443
		   752, 800, 0, 480, 489, 492, 525, 0,
1444
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1445
	/* 800x600@60Hz */
1446
	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
1447
		   968, 1056, 0, 600, 601, 605, 628, 0,
1448
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1449
	/* 1024x768@60Hz */
1450
	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1451
		   1184, 1344, 0, 768, 771, 777, 806, 0,
1452
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1453
	/* 1152x864@75Hz */
1454
	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1455
		   1344, 1600, 0, 864, 865, 868, 900, 0,
1456
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1457
	/* 1280x768@60Hz */
1458
	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1459
		   1472, 1664, 0, 768, 771, 778, 798, 0,
1460
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1461
	/* 1280x800@60Hz */
1462
	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1463
		   1480, 1680, 0, 800, 803, 809, 831, 0,
1464
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
1465
	/* 1280x960@60Hz */
1466
	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1467
		   1488, 1800, 0, 960, 961, 964, 1000, 0,
1468
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1469
	/* 1280x1024@60Hz */
1470
	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1471
		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
1472
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1473
	/* 1360x768@60Hz */
1474
	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1475
		   1536, 1792, 0, 768, 771, 777, 795, 0,
1476
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1477
	/* 1440x1050@60Hz */
1478
	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1479
		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
1480
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1481
	/* 1440x900@60Hz */
1482
	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1483
		   1672, 1904, 0, 900, 903, 909, 934, 0,
1484
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1485
	/* 1600x1200@60Hz */
1486
	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1487
		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
1488
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1489
	/* 1680x1050@60Hz */
1490
	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1491
		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
1492
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1493
	/* 1792x1344@60Hz */
1494
	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
1495
		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
1496
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1497
	/* 1853x1392@60Hz */
1498
	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
1499
		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
1500
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
4080 Serge 1501
    { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
1502
           2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
1503
           DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
1504
           .vrefresh = 60, },
4075 Serge 1505
	/* 1920x1200@60Hz */
1506
	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
1507
		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
1508
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1509
	/* 1920x1440@60Hz */
1510
	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
1511
		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
1512
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1513
	/* 2560x1600@60Hz */
4080 Serge 1514
/*   { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
4075 Serge 1515
		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
4080 Serge 1516
           DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, */
4075 Serge 1517
	/* Terminate */
1518
	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
1519
};
1520
 
1521
/**
1522
 * vmw_guess_mode_timing - Provide fake timings for a
1523
 * 60Hz vrefresh mode.
1524
 *
1525
 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
1526
 * members filled in.
1527
 */
6296 serge 1528
void vmw_guess_mode_timing(struct drm_display_mode *mode)
4075 Serge 1529
{
1530
	mode->hsync_start = mode->hdisplay + 50;
1531
	mode->hsync_end = mode->hsync_start + 50;
1532
	mode->htotal = mode->hsync_end + 50;
1533
 
1534
	mode->vsync_start = mode->vdisplay + 50;
1535
	mode->vsync_end = mode->vsync_start + 50;
1536
	mode->vtotal = mode->vsync_end + 50;
1537
 
1538
	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
1539
	mode->vrefresh = drm_mode_vrefresh(mode);
1540
}
1541
 
1542
 
1543
int vmw_du_connector_fill_modes(struct drm_connector *connector,
1544
				uint32_t max_width, uint32_t max_height)
1545
{
1546
	struct vmw_display_unit *du = vmw_connector_to_du(connector);
1547
	struct drm_device *dev = connector->dev;
1548
	struct vmw_private *dev_priv = vmw_priv(dev);
1549
	struct drm_display_mode *mode = NULL;
1550
	struct drm_display_mode *bmode;
1551
	struct drm_display_mode prefmode = { DRM_MODE("preferred",
1552
		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
1553
		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1554
		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
1555
	};
1556
	int i;
6296 serge 1557
	u32 assumed_bpp = 2;
4075 Serge 1558
 
6296 serge 1559
	/*
1560
	 * If using screen objects, then assume 32-bpp because that's what the
1561
	 * SVGA device is assuming
1562
	 */
1563
	if (dev_priv->active_display_unit == vmw_du_screen_object)
1564
		assumed_bpp = 4;
4075 Serge 1565
 
6296 serge 1566
	if (dev_priv->active_display_unit == vmw_du_screen_target) {
1567
		max_width  = min(max_width,  dev_priv->stdu_max_width);
1568
		max_height = min(max_height, dev_priv->stdu_max_height);
1569
	}
4075 Serge 1570
 
6296 serge 1571
	/* Add preferred mode */
1572
	mode = drm_mode_duplicate(dev, &prefmode);
1573
	if (!mode)
1574
		return 0;
1575
	mode->hdisplay = du->pref_width;
1576
	mode->vdisplay = du->pref_height;
1577
	vmw_guess_mode_timing(mode);
4075 Serge 1578
 
6296 serge 1579
	if (vmw_kms_validate_mode_vram(dev_priv,
1580
					mode->hdisplay * assumed_bpp,
1581
					mode->vdisplay)) {
1582
		drm_mode_probed_add(connector, mode);
1583
	} else {
1584
		drm_mode_destroy(dev, mode);
1585
		mode = NULL;
4075 Serge 1586
	}
1587
 
6296 serge 1588
	if (du->pref_mode) {
1589
		list_del_init(&du->pref_mode->head);
1590
		drm_mode_destroy(dev, du->pref_mode);
1591
	}
1592
 
1593
	/* mode might be null here, this is intended */
1594
	du->pref_mode = mode;
1595
 
4075 Serge 1596
	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
1597
		bmode = &vmw_kms_connector_builtin[i];
1598
		if (bmode->hdisplay > max_width ||
1599
		    bmode->vdisplay > max_height)
1600
			continue;
1601
 
6296 serge 1602
		if (!vmw_kms_validate_mode_vram(dev_priv,
1603
						bmode->hdisplay * assumed_bpp,
4075 Serge 1604
						bmode->vdisplay))
1605
			continue;
1606
 
1607
		mode = drm_mode_duplicate(dev, bmode);
1608
		if (!mode)
1609
			return 0;
1610
		mode->vrefresh = drm_mode_vrefresh(mode);
1611
 
1612
		drm_mode_probed_add(connector, mode);
1613
	}
1614
 
6296 serge 1615
	drm_mode_connector_list_update(connector, true);
4075 Serge 1616
	/* Move the prefered mode first, help apps pick the right mode. */
6296 serge 1617
	drm_mode_sort(&connector->modes);
4075 Serge 1618
 
1619
	return 1;
1620
}
1621
 
1622
int vmw_du_connector_set_property(struct drm_connector *connector,
1623
				  struct drm_property *property,
1624
				  uint64_t val)
1625
{
1626
	return 0;
1627
}
1628
 
1629
#if 0
1630
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1631
				struct drm_file *file_priv)
1632
{
1633
	struct vmw_private *dev_priv = vmw_priv(dev);
1634
	struct drm_vmw_update_layout_arg *arg =
1635
		(struct drm_vmw_update_layout_arg *)data;
1636
	void __user *user_rects;
1637
	struct drm_vmw_rect *rects;
1638
	unsigned rects_size;
1639
	int ret;
1640
	int i;
6296 serge 1641
	u64 total_pixels = 0;
4075 Serge 1642
	struct drm_mode_config *mode_config = &dev->mode_config;
6296 serge 1643
	struct drm_vmw_rect bounding_box = {0};
4075 Serge 1644
 
1645
	if (!arg->num_outputs) {
1646
		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
1647
		vmw_du_update_layout(dev_priv, 1, &def_rect);
6296 serge 1648
		return 0;
4075 Serge 1649
	}
1650
 
1651
	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1652
	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
1653
			GFP_KERNEL);
6296 serge 1654
	if (unlikely(!rects))
1655
		return -ENOMEM;
4075 Serge 1656
 
1657
	user_rects = (void __user *)(unsigned long)arg->rects;
1658
	ret = copy_from_user(rects, user_rects, rects_size);
1659
	if (unlikely(ret != 0)) {
1660
		DRM_ERROR("Failed to get rects.\n");
1661
		ret = -EFAULT;
1662
		goto out_free;
1663
	}
1664
 
1665
	for (i = 0; i < arg->num_outputs; ++i) {
1666
		if (rects[i].x < 0 ||
1667
		    rects[i].y < 0 ||
1668
		    rects[i].x + rects[i].w > mode_config->max_width ||
1669
		    rects[i].y + rects[i].h > mode_config->max_height) {
1670
			DRM_ERROR("Invalid GUI layout.\n");
1671
			ret = -EINVAL;
1672
			goto out_free;
1673
		}
6296 serge 1674
 
1675
		/*
1676
		 * bounding_box.w and bunding_box.h are used as
1677
		 * lower-right coordinates
1678
		 */
1679
		if (rects[i].x + rects[i].w > bounding_box.w)
1680
			bounding_box.w = rects[i].x + rects[i].w;
1681
 
1682
		if (rects[i].y + rects[i].h > bounding_box.h)
1683
			bounding_box.h = rects[i].y + rects[i].h;
1684
 
1685
		total_pixels += (u64) rects[i].w * (u64) rects[i].h;
4075 Serge 1686
	}
1687
 
6296 serge 1688
	if (dev_priv->active_display_unit == vmw_du_screen_target) {
1689
		/*
1690
		 * For Screen Targets, the limits for a toplogy are:
1691
		 *	1. Bounding box (assuming 32bpp) must be < prim_bb_mem
1692
		 *      2. Total pixels (assuming 32bpp) must be < prim_bb_mem
1693
		 */
1694
		u64 bb_mem    = bounding_box.w * bounding_box.h * 4;
1695
		u64 pixel_mem = total_pixels * 4;
1696
 
1697
		if (bb_mem > dev_priv->prim_bb_mem) {
1698
			DRM_ERROR("Topology is beyond supported limits.\n");
1699
			ret = -EINVAL;
1700
			goto out_free;
1701
		}
1702
 
1703
		if (pixel_mem > dev_priv->prim_bb_mem) {
1704
			DRM_ERROR("Combined output size too large\n");
1705
			ret = -EINVAL;
1706
			goto out_free;
1707
		}
1708
	}
1709
 
4075 Serge 1710
	vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
1711
 
1712
out_free:
1713
	kfree(rects);
6296 serge 1714
	return ret;
1715
}
1716
#endif
1717
/**
1718
 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
1719
 * on a set of cliprects and a set of display units.
1720
 *
1721
 * @dev_priv: Pointer to a device private structure.
1722
 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
1723
 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
1724
 * Cliprects are given in framebuffer coordinates.
1725
 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
1726
 * be NULL. Cliprects are given in source coordinates.
1727
 * @dest_x: X coordinate offset for the crtc / destination clip rects.
1728
 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
1729
 * @num_clips: Number of cliprects in the @clips or @vclips array.
1730
 * @increment: Integer with which to increment the clip counter when looping.
1731
 * Used to skip a predetermined number of clip rects.
1732
 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
1733
 */
1734
int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1735
			 struct vmw_framebuffer *framebuffer,
1736
			 const struct drm_clip_rect *clips,
1737
			 const struct drm_vmw_rect *vclips,
1738
			 s32 dest_x, s32 dest_y,
1739
			 int num_clips,
1740
			 int increment,
1741
			 struct vmw_kms_dirty *dirty)
1742
{
1743
	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1744
	struct drm_crtc *crtc;
1745
	u32 num_units = 0;
1746
	u32 i, k;
1747
 
1748
	dirty->dev_priv = dev_priv;
1749
 
1750
	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
1751
		if (crtc->primary->fb != &framebuffer->base)
1752
			continue;
1753
		units[num_units++] = vmw_crtc_to_du(crtc);
1754
	}
1755
 
1756
	for (k = 0; k < num_units; k++) {
1757
		struct vmw_display_unit *unit = units[k];
1758
		s32 crtc_x = unit->crtc.x;
1759
		s32 crtc_y = unit->crtc.y;
1760
		s32 crtc_width = unit->crtc.mode.hdisplay;
1761
		s32 crtc_height = unit->crtc.mode.vdisplay;
1762
		const struct drm_clip_rect *clips_ptr = clips;
1763
		const struct drm_vmw_rect *vclips_ptr = vclips;
1764
 
1765
		dirty->unit = unit;
1766
		if (dirty->fifo_reserve_size > 0) {
1767
			dirty->cmd = vmw_fifo_reserve(dev_priv,
1768
						      dirty->fifo_reserve_size);
1769
			if (!dirty->cmd) {
1770
				DRM_ERROR("Couldn't reserve fifo space "
1771
					  "for dirty blits.\n");
1772
				return -ENOMEM;
1773
			}
1774
			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
1775
		}
1776
		dirty->num_hits = 0;
1777
		for (i = 0; i < num_clips; i++, clips_ptr += increment,
1778
		       vclips_ptr += increment) {
1779
			s32 clip_left;
1780
			s32 clip_top;
1781
 
1782
			/*
1783
			 * Select clip array type. Note that integer type
1784
			 * in @clips is unsigned short, whereas in @vclips
1785
			 * it's 32-bit.
1786
			 */
1787
			if (clips) {
1788
				dirty->fb_x = (s32) clips_ptr->x1;
1789
				dirty->fb_y = (s32) clips_ptr->y1;
1790
				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
1791
					crtc_x;
1792
				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
1793
					crtc_y;
1794
			} else {
1795
				dirty->fb_x = vclips_ptr->x;
1796
				dirty->fb_y = vclips_ptr->y;
1797
				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
1798
					dest_x - crtc_x;
1799
				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
1800
					dest_y - crtc_y;
1801
			}
1802
 
1803
			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
1804
			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
1805
 
1806
			/* Skip this clip if it's outside the crtc region */
1807
			if (dirty->unit_x1 >= crtc_width ||
1808
			    dirty->unit_y1 >= crtc_height ||
1809
			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
1810
				continue;
1811
 
1812
			/* Clip right and bottom to crtc limits */
1813
			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
1814
					       crtc_width);
1815
			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
1816
					       crtc_height);
1817
 
1818
			/* Clip left and top to crtc limits */
1819
			clip_left = min_t(s32, dirty->unit_x1, 0);
1820
			clip_top = min_t(s32, dirty->unit_y1, 0);
1821
			dirty->unit_x1 -= clip_left;
1822
			dirty->unit_y1 -= clip_top;
1823
			dirty->fb_x -= clip_left;
1824
			dirty->fb_y -= clip_top;
1825
 
1826
			dirty->clip(dirty);
1827
		}
1828
 
1829
		dirty->fifo_commit(dirty);
1830
	}
1831
 
1832
	return 0;
1833
}
1834
 
1835
/**
1836
 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
1837
 * command submission.
1838
 *
1839
 * @dev_priv. Pointer to a device private structure.
1840
 * @buf: The buffer object
1841
 * @interruptible: Whether to perform waits as interruptible.
1842
 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
1843
 * The buffer will be validated as a GMR. Already pinned buffers will not be
1844
 * validated.
1845
 *
1846
 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
1847
 * interrupted by a signal.
1848
 */
1849
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
1850
				  struct vmw_dma_buffer *buf,
1851
				  bool interruptible,
1852
				  bool validate_as_mob)
1853
{
1854
	struct ttm_buffer_object *bo = &buf->base;
1855
	int ret;
1856
 
1857
	ttm_bo_reserve(bo, false, false, interruptible, NULL);
1858
	ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
1859
					 validate_as_mob);
1860
	if (ret)
1861
		ttm_bo_unreserve(bo);
1862
 
1863
	return ret;
1864
}
1865
 
1866
/**
1867
 * vmw_kms_helper_buffer_revert - Undo the actions of
1868
 * vmw_kms_helper_buffer_prepare.
1869
 *
1870
 * @res: Pointer to the buffer object.
1871
 *
1872
 * Helper to be used if an error forces the caller to undo the actions of
1873
 * vmw_kms_helper_buffer_prepare.
1874
 */
1875
void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
1876
{
1877
	if (buf)
1878
		ttm_bo_unreserve(&buf->base);
1879
}
1880
 
1881
/**
1882
 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
1883
 * kms command submission.
1884
 *
1885
 * @dev_priv: Pointer to a device private structure.
1886
 * @file_priv: Pointer to a struct drm_file representing the caller's
1887
 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
1888
 * if non-NULL, @user_fence_rep must be non-NULL.
1889
 * @buf: The buffer object.
1890
 * @out_fence:  Optional pointer to a fence pointer. If non-NULL, a
1891
 * ref-counted fence pointer is returned here.
1892
 * @user_fence_rep: Optional pointer to a user-space provided struct
1893
 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
1894
 * function copies fence data to user-space in a fail-safe manner.
1895
 */
1896
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
1897
				  struct drm_file *file_priv,
1898
				  struct vmw_dma_buffer *buf,
1899
				  struct vmw_fence_obj **out_fence,
1900
				  struct drm_vmw_fence_rep __user *
1901
				  user_fence_rep)
1902
{
1903
	struct vmw_fence_obj *fence;
1904
	uint32_t handle;
1905
	int ret;
1906
 
1907
	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
1908
					 file_priv ? &handle : NULL);
1909
	if (buf)
1910
		vmw_fence_single_bo(&buf->base, fence);
1911
	if (file_priv)
1912
		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
1913
					    ret, user_fence_rep, fence,
1914
					    handle);
1915
	if (out_fence)
1916
		*out_fence = fence;
1917
	else
1918
		vmw_fence_obj_unreference(&fence);
1919
 
1920
	vmw_kms_helper_buffer_revert(buf);
1921
}
1922
 
1923
 
1924
/**
1925
 * vmw_kms_helper_resource_revert - Undo the actions of
1926
 * vmw_kms_helper_resource_prepare.
1927
 *
1928
 * @res: Pointer to the resource. Typically a surface.
1929
 *
1930
 * Helper to be used if an error forces the caller to undo the actions of
1931
 * vmw_kms_helper_resource_prepare.
1932
 */
1933
void vmw_kms_helper_resource_revert(struct vmw_resource *res)
1934
{
1935
	vmw_kms_helper_buffer_revert(res->backup);
1936
	vmw_resource_unreserve(res, false, NULL, 0);
1937
	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1938
}
1939
 
1940
/**
1941
 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
1942
 * command submission.
1943
 *
1944
 * @res: Pointer to the resource. Typically a surface.
1945
 * @interruptible: Whether to perform waits as interruptible.
1946
 *
1947
 * Reserves and validates also the backup buffer if a guest-backed resource.
1948
 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1949
 * interrupted by a signal.
1950
 */
1951
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
1952
				    bool interruptible)
1953
{
1954
	int ret = 0;
1955
 
1956
	if (interruptible)
1957
		ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
1958
	else
1959
		mutex_lock(&res->dev_priv->cmdbuf_mutex);
1960
 
1961
	if (unlikely(ret != 0))
1962
		return -ERESTARTSYS;
1963
 
1964
	ret = vmw_resource_reserve(res, interruptible, false);
1965
	if (ret)
1966
		goto out_unlock;
1967
 
1968
	if (res->backup) {
1969
		ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
1970
						    interruptible,
1971
						    res->dev_priv->has_mob);
1972
		if (ret)
1973
			goto out_unreserve;
1974
	}
1975
	ret = vmw_resource_validate(res);
1976
	if (ret)
1977
		goto out_revert;
1978
	return 0;
1979
 
1980
out_revert:
1981
	vmw_kms_helper_buffer_revert(res->backup);
1982
out_unreserve:
1983
	vmw_resource_unreserve(res, false, NULL, 0);
4075 Serge 1984
out_unlock:
6296 serge 1985
	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
4075 Serge 1986
	return ret;
1987
}
6296 serge 1988
 
1989
/**
1990
 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
1991
 * kms command submission.
1992
 *
1993
 * @res: Pointer to the resource. Typically a surface.
1994
 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
1995
 * ref-counted fence pointer is returned here.
1996
 */
1997
void vmw_kms_helper_resource_finish(struct vmw_resource *res,
1998
			     struct vmw_fence_obj **out_fence)
1999
{
2000
	if (res->backup || out_fence)
2001
		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
2002
					     out_fence, NULL);
2003
 
2004
	vmw_resource_unreserve(res, false, NULL, 0);
2005
	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2006
}
2007
 
2008
/**
2009
 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2010
 * its backing MOB.
2011
 *
2012
 * @res: Pointer to the surface resource
2013
 * @clips: Clip rects in framebuffer (surface) space.
2014
 * @num_clips: Number of clips in @clips.
2015
 * @increment: Integer with which to increment the clip counter when looping.
2016
 * Used to skip a predetermined number of clip rects.
2017
 *
2018
 * This function makes sure the proxy surface is updated from its backing MOB
2019
 * using the region given by @clips. The surface resource @res and its backing
2020
 * MOB needs to be reserved and validated on call.
2021
 */
2022
int vmw_kms_update_proxy(struct vmw_resource *res,
2023
			 const struct drm_clip_rect *clips,
2024
			 unsigned num_clips,
2025
			 int increment)
2026
{
2027
	struct vmw_private *dev_priv = res->dev_priv;
2028
	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
2029
	struct {
2030
		SVGA3dCmdHeader header;
2031
		SVGA3dCmdUpdateGBImage body;
2032
	} *cmd;
2033
	SVGA3dBox *box;
2034
	size_t copy_size = 0;
2035
	int i;
2036
 
2037
	if (!clips)
2038
		return 0;
2039
 
2040
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
2041
	if (!cmd) {
2042
		DRM_ERROR("Couldn't reserve fifo space for proxy surface "
2043
			  "update.\n");
2044
		return -ENOMEM;
2045
	}
2046
 
2047
	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2048
		box = &cmd->body.box;
2049
 
2050
		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2051
		cmd->header.size = sizeof(cmd->body);
2052
		cmd->body.image.sid = res->id;
2053
		cmd->body.image.face = 0;
2054
		cmd->body.image.mipmap = 0;
2055
 
2056
		if (clips->x1 > size->width || clips->x2 > size->width ||
2057
		    clips->y1 > size->height || clips->y2 > size->height) {
2058
			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2059
			return -EINVAL;
2060
		}
2061
 
2062
		box->x = clips->x1;
2063
		box->y = clips->y1;
2064
		box->z = 0;
2065
		box->w = clips->x2 - clips->x1;
2066
		box->h = clips->y2 - clips->y1;
2067
		box->d = 1;
2068
 
2069
		copy_size += sizeof(*cmd);
2070
	}
2071
 
2072
	vmw_fifo_commit(dev_priv, copy_size);
2073
 
2074
	return 0;
2075
}
2076
 
2077
int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2078
			    unsigned unit,
2079
			    u32 max_width,
2080
			    u32 max_height,
2081
			    struct drm_connector **p_con,
2082
			    struct drm_crtc **p_crtc,
2083
			    struct drm_display_mode **p_mode)
2084
{
2085
	struct drm_connector *con;
2086
	struct vmw_display_unit *du;
2087
	struct drm_display_mode *mode;
2088
	int i = 0;
2089
 
2090
	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
2091
			    head) {
2092
		if (i == unit)
2093
			break;
2094
 
2095
		++i;
2096
	}
2097
 
2098
	if (i != unit) {
2099
		DRM_ERROR("Could not find initial display unit.\n");
2100
		return -EINVAL;
2101
	}
2102
 
2103
	if (list_empty(&con->modes))
2104
		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
2105
 
2106
	if (list_empty(&con->modes)) {
2107
		DRM_ERROR("Could not find initial display mode.\n");
2108
		return -EINVAL;
2109
	}
2110
 
2111
	du = vmw_connector_to_du(con);
2112
	*p_con = con;
2113
	*p_crtc = &du->crtc;
2114
 
2115
	list_for_each_entry(mode, &con->modes, head) {
2116
		if (mode->type & DRM_MODE_TYPE_PREFERRED)
2117
			break;
2118
	}
2119
 
2120
	if (mode->type & DRM_MODE_TYPE_PREFERRED)
2121
		*p_mode = mode;
2122
	else {
2123
		WARN_ONCE(true, "Could not find initial preferred mode.\n");
2124
		*p_mode = list_first_entry(&con->modes,
2125
					   struct drm_display_mode,
2126
					   head);
2127
	}
2128
 
2129
	return 0;
2130
}