Subversion Repositories Kolibri OS

Rev

Rev 4570 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
#include "vmwgfx_kms.h"
29
 
30
 
31
/* Might need a hrtimer here? */
32
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33
 
34
 
35
struct vmw_clip_rect {
36
	int x1, x2, y1, y2;
37
};
38
 
39
/**
40
 * Clip @num_rects number of @rects against @clip storing the
41
 * results in @out_rects and the number of passed rects in @out_num.
42
 */
4570 Serge 43
static void vmw_clip_cliprects(struct drm_clip_rect *rects,
4075 Serge 44
			int num_rects,
45
			struct vmw_clip_rect clip,
46
			SVGASignedRect *out_rects,
47
			int *out_num)
48
{
49
	int i, k;
50
 
51
	for (i = 0, k = 0; i < num_rects; i++) {
52
		int x1 = max_t(int, clip.x1, rects[i].x1);
53
		int y1 = max_t(int, clip.y1, rects[i].y1);
54
		int x2 = min_t(int, clip.x2, rects[i].x2);
55
		int y2 = min_t(int, clip.y2, rects[i].y2);
56
 
57
		if (x1 >= x2)
58
			continue;
59
		if (y1 >= y2)
60
			continue;
61
 
62
		out_rects[k].left   = x1;
63
		out_rects[k].top    = y1;
64
		out_rects[k].right  = x2;
65
		out_rects[k].bottom = y2;
66
		k++;
67
	}
68
 
69
	*out_num = k;
70
}
71
 
72
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
73
{
74
//   if (du->cursor_surface)
75
//       vmw_surface_unreference(&du->cursor_surface);
76
//   if (du->cursor_dmabuf)
77
//       vmw_dmabuf_unreference(&du->cursor_dmabuf);
78
	drm_crtc_cleanup(&du->crtc);
79
	drm_encoder_cleanup(&du->encoder);
80
	drm_connector_cleanup(&du->connector);
81
}
82
 
83
/*
84
 * Display Unit Cursor functions
85
 */
86
 
87
int vmw_cursor_update_image(struct vmw_private *dev_priv,
88
			    u32 *image, u32 width, u32 height,
89
			    u32 hotspotX, u32 hotspotY)
90
{
91
	struct {
92
		u32 cmd;
93
		SVGAFifoCmdDefineAlphaCursor cursor;
94
	} *cmd;
95
	u32 image_size = width * height * 4;
96
	u32 cmd_size = sizeof(*cmd) + image_size;
4570 Serge 97
    u32 *dst;
98
    int i, j;
4075 Serge 99
 
100
	if (!image)
101
		return -EINVAL;
102
 
103
	cmd = vmw_fifo_reserve(dev_priv, cmd_size);
104
	if (unlikely(cmd == NULL)) {
105
		DRM_ERROR("Fifo reserve failed.\n");
106
		return -ENOMEM;
107
	}
108
 
109
	memset(cmd, 0, sizeof(*cmd));
110
 
4570 Serge 111
    dst = (u32*)&cmd[1];
4075 Serge 112
 
4570 Serge 113
    for(i = 0; i < 32; i++)
114
    {
115
        for(j = 0; j < 32; j++)
116
            *dst++ = *image++;
117
        for( ; j < 64; j++)
118
            *dst++ = 0;
119
    }
120
    for(i = 0; i < 64*(64-32); i++)
5078 serge 121
        *dst++ = 0;
4570 Serge 122
 
4075 Serge 123
	cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
124
	cmd->cursor.id = cpu_to_le32(0);
125
	cmd->cursor.width = cpu_to_le32(width);
126
	cmd->cursor.height = cpu_to_le32(height);
127
	cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
128
	cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
129
 
130
	vmw_fifo_commit(dev_priv, cmd_size);
131
 
132
	return 0;
133
}
134
 
4570 Serge 135
#if 0
4075 Serge 136
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
137
			     struct vmw_dma_buffer *dmabuf,
138
			     u32 width, u32 height,
139
			     u32 hotspotX, u32 hotspotY)
140
{
141
	struct ttm_bo_kmap_obj map;
142
	unsigned long kmap_offset;
143
	unsigned long kmap_num;
144
	void *virtual;
145
	bool dummy;
146
	int ret;
147
 
148
	kmap_offset = 0;
149
	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
150
 
5078 serge 151
	ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
4075 Serge 152
	if (unlikely(ret != 0)) {
153
		DRM_ERROR("reserve failed\n");
154
		return -EINVAL;
155
	}
156
 
157
	ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
158
	if (unlikely(ret != 0))
159
		goto err_unreserve;
160
 
161
	virtual = ttm_kmap_obj_virtual(&map, &dummy);
162
	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
163
				      hotspotX, hotspotY);
164
 
165
	ttm_bo_kunmap(&map);
166
err_unreserve:
167
	ttm_bo_unreserve(&dmabuf->base);
168
 
169
	return ret;
170
}
4570 Serge 171
#endif
4075 Serge 172
 
173
void vmw_cursor_update_position(struct vmw_private *dev_priv,
174
				bool show, int x, int y)
175
{
176
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
177
	uint32_t count;
178
 
179
	iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
180
	iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
181
	iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
182
	count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
183
	iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
184
}
185
 
4570 Serge 186
#if 0
4075 Serge 187
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
188
			   uint32_t handle, uint32_t width, uint32_t height)
189
{
190
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
191
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
192
	struct vmw_surface *surface = NULL;
193
	struct vmw_dma_buffer *dmabuf = NULL;
194
	int ret;
195
 
196
	/*
197
	 * FIXME: Unclear whether there's any global state touched by the
198
	 * cursor_set function, especially vmw_cursor_update_position looks
199
	 * suspicious. For now take the easy route and reacquire all locks. We
200
	 * can do this since the caller in the drm core doesn't check anything
201
	 * which is protected by any looks.
202
	 */
5078 serge 203
	drm_modeset_unlock(&crtc->mutex);
4075 Serge 204
	drm_modeset_lock_all(dev_priv->dev);
205
 
206
	/* A lot of the code assumes this */
207
	if (handle && (width != 64 || height != 64)) {
208
		ret = -EINVAL;
209
		goto out;
210
	}
211
 
212
	if (handle) {
213
		struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
214
 
215
		ret = vmw_user_lookup_handle(dev_priv, tfile,
216
					     handle, &surface, &dmabuf);
217
		if (ret) {
218
			DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
219
			ret = -EINVAL;
220
			goto out;
221
		}
222
	}
223
 
224
	/* need to do this before taking down old image */
225
	if (surface && !surface->snooper.image) {
226
		DRM_ERROR("surface not suitable for cursor\n");
227
		vmw_surface_unreference(&surface);
228
		ret = -EINVAL;
229
		goto out;
230
	}
231
 
232
	/* takedown old cursor */
233
	if (du->cursor_surface) {
234
		du->cursor_surface->snooper.crtc = NULL;
235
		vmw_surface_unreference(&du->cursor_surface);
236
	}
237
	if (du->cursor_dmabuf)
238
		vmw_dmabuf_unreference(&du->cursor_dmabuf);
239
 
240
	/* setup new image */
241
	if (surface) {
242
		/* vmw_user_surface_lookup takes one reference */
243
		du->cursor_surface = surface;
244
 
245
		du->cursor_surface->snooper.crtc = crtc;
246
		du->cursor_age = du->cursor_surface->snooper.age;
247
		vmw_cursor_update_image(dev_priv, surface->snooper.image,
248
					64, 64, du->hotspot_x, du->hotspot_y);
249
	} else if (dmabuf) {
250
		/* vmw_user_surface_lookup takes one reference */
251
		du->cursor_dmabuf = dmabuf;
252
 
253
		ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
254
					       du->hotspot_x, du->hotspot_y);
255
	} else {
256
		vmw_cursor_update_position(dev_priv, false, 0, 0);
257
		ret = 0;
258
		goto out;
259
	}
260
 
261
	vmw_cursor_update_position(dev_priv, true,
262
				   du->cursor_x + du->hotspot_x,
263
				   du->cursor_y + du->hotspot_y);
264
 
265
	ret = 0;
266
out:
267
	drm_modeset_unlock_all(dev_priv->dev);
5078 serge 268
	drm_modeset_lock(&crtc->mutex, NULL);
4075 Serge 269
 
270
	return ret;
271
}
272
 
273
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
274
{
275
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
276
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
277
	bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
278
 
279
	du->cursor_x = x + crtc->x;
280
	du->cursor_y = y + crtc->y;
281
 
282
	/*
283
	 * FIXME: Unclear whether there's any global state touched by the
284
	 * cursor_set function, especially vmw_cursor_update_position looks
285
	 * suspicious. For now take the easy route and reacquire all locks. We
286
	 * can do this since the caller in the drm core doesn't check anything
287
	 * which is protected by any looks.
288
	 */
5078 serge 289
	drm_modeset_unlock(&crtc->mutex);
4075 Serge 290
	drm_modeset_lock_all(dev_priv->dev);
291
 
292
	vmw_cursor_update_position(dev_priv, shown,
293
				   du->cursor_x + du->hotspot_x,
294
				   du->cursor_y + du->hotspot_y);
295
 
296
	drm_modeset_unlock_all(dev_priv->dev);
5078 serge 297
	drm_modeset_lock(&crtc->mutex, NULL);
4075 Serge 298
 
299
	return 0;
300
}
301
 
302
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
303
			  struct ttm_object_file *tfile,
304
			  struct ttm_buffer_object *bo,
305
			  SVGA3dCmdHeader *header)
306
{
307
	struct ttm_bo_kmap_obj map;
308
	unsigned long kmap_offset;
309
	unsigned long kmap_num;
310
	SVGA3dCopyBox *box;
311
	unsigned box_count;
312
	void *virtual;
313
	bool dummy;
314
	struct vmw_dma_cmd {
315
		SVGA3dCmdHeader header;
316
		SVGA3dCmdSurfaceDMA dma;
317
	} *cmd;
318
	int i, ret;
319
 
320
	cmd = container_of(header, struct vmw_dma_cmd, header);
321
 
322
	/* No snooper installed */
323
	if (!srf->snooper.image)
324
		return;
325
 
326
	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
327
		DRM_ERROR("face and mipmap for cursors should never != 0\n");
328
		return;
329
	}
330
 
331
	if (cmd->header.size < 64) {
332
		DRM_ERROR("at least one full copy box must be given\n");
333
		return;
334
	}
335
 
336
	box = (SVGA3dCopyBox *)&cmd[1];
337
	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
338
			sizeof(SVGA3dCopyBox);
339
 
340
	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
341
	    box->x != 0    || box->y != 0    || box->z != 0    ||
342
	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
343
	    box->d != 1    || box_count != 1) {
344
		/* TODO handle none page aligned offsets */
345
		/* TODO handle more dst & src != 0 */
346
		/* TODO handle more then one copy */
347
		DRM_ERROR("Cant snoop dma request for cursor!\n");
348
		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
349
			  box->srcx, box->srcy, box->srcz,
350
			  box->x, box->y, box->z,
351
			  box->w, box->h, box->d, box_count,
352
			  cmd->dma.guest.ptr.offset);
353
		return;
354
	}
355
 
356
	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
357
	kmap_num = (64*64*4) >> PAGE_SHIFT;
358
 
5078 serge 359
	ret = ttm_bo_reserve(bo, true, false, false, NULL);
4075 Serge 360
	if (unlikely(ret != 0)) {
361
		DRM_ERROR("reserve failed\n");
362
		return;
363
	}
364
 
365
	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
366
	if (unlikely(ret != 0))
367
		goto err_unreserve;
368
 
369
	virtual = ttm_kmap_obj_virtual(&map, &dummy);
370
 
371
	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
372
		memcpy(srf->snooper.image, virtual, 64*64*4);
373
	} else {
374
		/* Image is unsigned pointer. */
375
		for (i = 0; i < box->h; i++)
376
			memcpy(srf->snooper.image + i * 64,
377
			       virtual + i * cmd->dma.guest.pitch,
378
			       box->w * 4);
379
	}
380
 
381
	srf->snooper.age++;
382
 
383
	/* we can't call this function from this function since execbuf has
384
	 * reserved fifo space.
385
	 *
386
	 * if (srf->snooper.crtc)
387
	 *	vmw_ldu_crtc_cursor_update_image(dev_priv,
388
	 *					 srf->snooper.image, 64, 64,
389
	 *					 du->hotspot_x, du->hotspot_y);
390
	 */
391
 
392
	ttm_bo_kunmap(&map);
393
err_unreserve:
394
	ttm_bo_unreserve(bo);
395
}
396
 
397
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
398
{
399
	struct drm_device *dev = dev_priv->dev;
400
	struct vmw_display_unit *du;
401
	struct drm_crtc *crtc;
402
 
403
	mutex_lock(&dev->mode_config.mutex);
404
 
405
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
406
		du = vmw_crtc_to_du(crtc);
407
		if (!du->cursor_surface ||
408
		    du->cursor_age == du->cursor_surface->snooper.age)
409
			continue;
410
 
411
		du->cursor_age = du->cursor_surface->snooper.age;
412
		vmw_cursor_update_image(dev_priv,
413
					du->cursor_surface->snooper.image,
414
					64, 64, du->hotspot_x, du->hotspot_y);
415
	}
416
 
417
	mutex_unlock(&dev->mode_config.mutex);
418
}
419
#endif
420
 
421
/*
422
 * Generic framebuffer code
423
 */
424
 
425
/*
426
 * Surface framebuffer code
427
 */
428
 
429
#define vmw_framebuffer_to_vfbs(x) \
430
	container_of(x, struct vmw_framebuffer_surface, base.base)
431
 
432
struct vmw_framebuffer_surface {
433
	struct vmw_framebuffer base;
434
	struct vmw_surface *surface;
435
	struct vmw_dma_buffer *buffer;
436
	struct list_head head;
437
	struct drm_master *master;
438
};
439
 
4570 Serge 440
static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
4075 Serge 441
{
442
	struct vmw_framebuffer_surface *vfbs =
443
		vmw_framebuffer_to_vfbs(framebuffer);
4570 Serge 444
	struct vmw_master *vmaster = vmw_master(vfbs->master);
4075 Serge 445
 
446
 
4570 Serge 447
	mutex_lock(&vmaster->fb_surf_mutex);
448
	list_del(&vfbs->head);
449
	mutex_unlock(&vmaster->fb_surf_mutex);
4075 Serge 450
 
451
 
452
	kfree(vfbs);
453
}
454
 
455
static int do_surface_dirty_sou(struct vmw_private *dev_priv,
456
				struct drm_file *file_priv,
457
				struct vmw_framebuffer *framebuffer,
458
				unsigned flags, unsigned color,
459
				struct drm_clip_rect *clips,
460
				unsigned num_clips, int inc,
461
				struct vmw_fence_obj **out_fence)
462
{
463
	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
464
	struct drm_clip_rect *clips_ptr;
465
	struct drm_clip_rect *tmp;
466
	struct drm_crtc *crtc;
467
	size_t fifo_size;
468
	int i, num_units;
469
	int ret = 0; /* silence warning */
470
	int left, right, top, bottom;
471
 
472
	struct {
473
		SVGA3dCmdHeader header;
474
		SVGA3dCmdBlitSurfaceToScreen body;
475
	} *cmd;
476
	SVGASignedRect *blits;
477
 
478
	num_units = 0;
479
	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
480
			    head) {
5078 serge 481
		if (crtc->primary->fb != &framebuffer->base)
4075 Serge 482
			continue;
483
		units[num_units++] = vmw_crtc_to_du(crtc);
484
	}
485
 
486
	BUG_ON(!clips || !num_clips);
487
 
488
	tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
489
	if (unlikely(tmp == NULL)) {
490
		DRM_ERROR("Temporary cliprect memory alloc failed.\n");
491
		return -ENOMEM;
492
	}
493
 
494
	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
495
	cmd = kzalloc(fifo_size, GFP_KERNEL);
496
	if (unlikely(cmd == NULL)) {
497
		DRM_ERROR("Temporary fifo memory alloc failed.\n");
498
		ret = -ENOMEM;
499
		goto out_free_tmp;
500
	}
501
 
502
	/* setup blits pointer */
503
	blits = (SVGASignedRect *)&cmd[1];
504
 
505
	/* initial clip region */
506
	left = clips->x1;
507
	right = clips->x2;
508
	top = clips->y1;
509
	bottom = clips->y2;
510
 
511
	/* skip the first clip rect */
512
	for (i = 1, clips_ptr = clips + inc;
513
	     i < num_clips; i++, clips_ptr += inc) {
514
		left = min_t(int, left, (int)clips_ptr->x1);
515
		right = max_t(int, right, (int)clips_ptr->x2);
516
		top = min_t(int, top, (int)clips_ptr->y1);
517
		bottom = max_t(int, bottom, (int)clips_ptr->y2);
518
	}
519
 
520
	/* only need to do this once */
521
	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
522
	cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
523
 
524
	cmd->body.srcRect.left = left;
525
	cmd->body.srcRect.right = right;
526
	cmd->body.srcRect.top = top;
527
	cmd->body.srcRect.bottom = bottom;
528
 
529
	clips_ptr = clips;
530
	for (i = 0; i < num_clips; i++, clips_ptr += inc) {
531
		tmp[i].x1 = clips_ptr->x1 - left;
532
		tmp[i].x2 = clips_ptr->x2 - left;
533
		tmp[i].y1 = clips_ptr->y1 - top;
534
		tmp[i].y2 = clips_ptr->y2 - top;
535
	}
536
 
537
	/* do per unit writing, reuse fifo for each */
538
	for (i = 0; i < num_units; i++) {
539
		struct vmw_display_unit *unit = units[i];
540
		struct vmw_clip_rect clip;
541
		int num;
542
 
543
		clip.x1 = left - unit->crtc.x;
544
		clip.y1 = top - unit->crtc.y;
545
		clip.x2 = right - unit->crtc.x;
546
		clip.y2 = bottom - unit->crtc.y;
547
 
548
		/* skip any crtcs that misses the clip region */
549
		if (clip.x1 >= unit->crtc.mode.hdisplay ||
550
		    clip.y1 >= unit->crtc.mode.vdisplay ||
551
		    clip.x2 <= 0 || clip.y2 <= 0)
552
			continue;
553
 
554
		/*
555
		 * In order for the clip rects to be correctly scaled
556
		 * the src and dest rects needs to be the same size.
557
		 */
558
		cmd->body.destRect.left = clip.x1;
559
		cmd->body.destRect.right = clip.x2;
560
		cmd->body.destRect.top = clip.y1;
561
		cmd->body.destRect.bottom = clip.y2;
562
 
563
		/* create a clip rect of the crtc in dest coords */
564
		clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
565
		clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
566
		clip.x1 = 0 - clip.x1;
567
		clip.y1 = 0 - clip.y1;
568
 
569
		/* need to reset sid as it is changed by execbuf */
570
		cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
571
		cmd->body.destScreenId = unit->unit;
572
 
573
		/* clip and write blits to cmd stream */
574
		vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
575
 
576
		/* if no cliprects hit skip this */
577
		if (num == 0)
578
			continue;
579
 
580
		/* only return the last fence */
581
		if (out_fence && *out_fence)
582
			vmw_fence_obj_unreference(out_fence);
583
 
584
		/* recalculate package length */
585
		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
586
		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
587
		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
588
					  fifo_size, 0, NULL, out_fence);
589
 
590
		if (unlikely(ret != 0))
591
			break;
592
	}
593
 
594
 
595
	kfree(cmd);
596
out_free_tmp:
597
	kfree(tmp);
598
 
599
	return ret;
600
}
601
 
4570 Serge 602
static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
4075 Serge 603
				  struct drm_file *file_priv,
604
				  unsigned flags, unsigned color,
605
				  struct drm_clip_rect *clips,
606
				  unsigned num_clips)
607
{
608
	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
609
	struct vmw_framebuffer_surface *vfbs =
610
		vmw_framebuffer_to_vfbs(framebuffer);
611
	struct drm_clip_rect norect;
612
	int ret, inc = 1;
613
 
4570 Serge 614
	if (unlikely(vfbs->master != file_priv->master))
615
		return -EINVAL;
4075 Serge 616
 
617
	/* Require ScreenObject support for 3D */
618
	if (!dev_priv->sou_priv)
619
		return -EINVAL;
620
 
4570 Serge 621
	drm_modeset_lock_all(dev_priv->dev);
4075 Serge 622
 
5078 serge 623
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4570 Serge 624
	if (unlikely(ret != 0)) {
625
		drm_modeset_unlock_all(dev_priv->dev);
626
		return ret;
627
	}
628
 
4075 Serge 629
	if (!num_clips) {
630
		num_clips = 1;
631
		clips = &norect;
632
		norect.x1 = norect.y1 = 0;
633
		norect.x2 = framebuffer->width;
634
		norect.y2 = framebuffer->height;
635
	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
636
		num_clips /= 2;
637
		inc = 2; /* skip source rects */
638
	}
639
 
640
	ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
641
				   flags, color,
642
				   clips, num_clips, inc, NULL);
643
 
5078 serge 644
	ttm_read_unlock(&dev_priv->reservation_sem);
4570 Serge 645
 
646
	drm_modeset_unlock_all(dev_priv->dev);
647
 
4075 Serge 648
	return 0;
649
}
650
 
651
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
652
   .destroy = vmw_framebuffer_surface_destroy,
653
	.dirty = vmw_framebuffer_surface_dirty,
654
};
655
 
656
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
657
					   struct drm_file *file_priv,
658
					   struct vmw_surface *surface,
659
					   struct vmw_framebuffer **out,
660
					   const struct drm_mode_fb_cmd
661
					   *mode_cmd)
662
 
663
{
664
	struct drm_device *dev = dev_priv->dev;
665
	struct vmw_framebuffer_surface *vfbs;
666
	enum SVGA3dSurfaceFormat format;
4570 Serge 667
	struct vmw_master *vmaster = vmw_master(file_priv->master);
4075 Serge 668
	int ret;
669
 
670
	/* 3D is only supported on HWv8 hosts which supports screen objects */
671
	if (!dev_priv->sou_priv)
672
		return -ENOSYS;
673
 
674
	/*
675
	 * Sanity checks.
676
	 */
677
 
678
	/* Surface must be marked as a scanout. */
679
	if (unlikely(!surface->scanout))
680
		return -EINVAL;
681
 
682
	if (unlikely(surface->mip_levels[0] != 1 ||
683
		     surface->num_sizes != 1 ||
4569 Serge 684
		     surface->base_size.width < mode_cmd->width ||
685
		     surface->base_size.height < mode_cmd->height ||
686
		     surface->base_size.depth != 1)) {
4075 Serge 687
		DRM_ERROR("Incompatible surface dimensions "
688
			  "for requested mode.\n");
689
		return -EINVAL;
690
	}
691
 
692
	switch (mode_cmd->depth) {
693
	case 32:
694
		format = SVGA3D_A8R8G8B8;
695
		break;
696
	case 24:
697
		format = SVGA3D_X8R8G8B8;
698
		break;
699
	case 16:
700
		format = SVGA3D_R5G6B5;
701
		break;
702
	case 15:
703
		format = SVGA3D_A1R5G5B5;
704
		break;
705
	case 8:
706
		format = SVGA3D_LUMINANCE8;
707
		break;
708
	default:
709
		DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
710
		return -EINVAL;
711
	}
712
 
713
	if (unlikely(format != surface->format)) {
714
		DRM_ERROR("Invalid surface format for requested mode.\n");
715
		return -EINVAL;
716
	}
717
 
718
	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
719
	if (!vfbs) {
720
		ret = -ENOMEM;
721
		goto out_err1;
722
	}
723
 
724
	if (!vmw_surface_reference(surface)) {
725
		DRM_ERROR("failed to reference surface %p\n", surface);
726
		ret = -EINVAL;
727
		goto out_err2;
728
	}
729
 
730
	/* XXX get the first 3 from the surface info */
731
	vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
732
	vfbs->base.base.pitches[0] = mode_cmd->pitch;
733
	vfbs->base.base.depth = mode_cmd->depth;
734
	vfbs->base.base.width = mode_cmd->width;
735
	vfbs->base.base.height = mode_cmd->height;
736
	vfbs->surface = surface;
737
	vfbs->base.user_handle = mode_cmd->handle;
738
//   vfbs->master = drm_master_get(file_priv->master);
739
 
4570 Serge 740
	mutex_lock(&vmaster->fb_surf_mutex);
741
	list_add_tail(&vfbs->head, &vmaster->fb_surf);
742
	mutex_unlock(&vmaster->fb_surf_mutex);
4075 Serge 743
 
744
	*out = &vfbs->base;
745
 
746
	ret = drm_framebuffer_init(dev, &vfbs->base.base,
747
				   &vmw_framebuffer_surface_funcs);
748
	if (ret)
749
		goto out_err3;
750
 
751
	return 0;
752
 
753
out_err3:
754
	vmw_surface_unreference(&surface);
755
out_err2:
756
	kfree(vfbs);
757
out_err1:
758
	return ret;
759
}
760
 
761
/*
762
 * Dmabuf framebuffer code
763
 */
764
 
765
#define vmw_framebuffer_to_vfbd(x) \
766
	container_of(x, struct vmw_framebuffer_dmabuf, base.base)
767
 
768
struct vmw_framebuffer_dmabuf {
769
	struct vmw_framebuffer base;
770
	struct vmw_dma_buffer *buffer;
771
};
772
 
4570 Serge 773
static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
4075 Serge 774
{
775
	struct vmw_framebuffer_dmabuf *vfbd =
776
		vmw_framebuffer_to_vfbd(framebuffer);
777
 
778
//   drm_framebuffer_cleanup(framebuffer);
779
//   vmw_dmabuf_unreference(&vfbd->buffer);
780
//   ttm_base_object_unref(&vfbd->base.user_obj);
781
 
782
	kfree(vfbd);
783
}
784
 
785
static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
786
			       struct vmw_framebuffer *framebuffer,
787
			       unsigned flags, unsigned color,
788
			       struct drm_clip_rect *clips,
789
			       unsigned num_clips, int increment)
790
{
791
	size_t fifo_size;
792
	int i;
793
 
794
	struct {
795
		uint32_t header;
796
		SVGAFifoCmdUpdate body;
797
	} *cmd;
798
 
799
	fifo_size = sizeof(*cmd) * num_clips;
800
	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
801
	if (unlikely(cmd == NULL)) {
802
		DRM_ERROR("Fifo reserve failed.\n");
803
		return -ENOMEM;
804
	}
805
 
806
	memset(cmd, 0, fifo_size);
807
	for (i = 0; i < num_clips; i++, clips += increment) {
808
		cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
809
		cmd[i].body.x = cpu_to_le32(clips->x1);
810
		cmd[i].body.y = cpu_to_le32(clips->y1);
811
		cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
812
		cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
813
	}
814
 
815
	vmw_fifo_commit(dev_priv, fifo_size);
816
	return 0;
817
}
818
 
819
static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
820
				  struct vmw_private *dev_priv,
821
				  struct vmw_framebuffer *framebuffer)
822
{
823
	int depth = framebuffer->base.depth;
824
	size_t fifo_size;
825
	int ret;
826
 
827
	struct {
828
		uint32_t header;
829
		SVGAFifoCmdDefineGMRFB body;
830
	} *cmd;
831
 
832
	/* Emulate RGBA support, contrary to svga_reg.h this is not
833
	 * supported by hosts. This is only a problem if we are reading
834
	 * this value later and expecting what we uploaded back.
835
	 */
836
	if (depth == 32)
837
		depth = 24;
838
 
839
	fifo_size = sizeof(*cmd);
840
	cmd = kmalloc(fifo_size, GFP_KERNEL);
841
	if (unlikely(cmd == NULL)) {
842
		DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
843
		return -ENOMEM;
844
	}
845
 
846
	memset(cmd, 0, fifo_size);
847
	cmd->header = SVGA_CMD_DEFINE_GMRFB;
848
	cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
849
	cmd->body.format.colorDepth = depth;
850
	cmd->body.format.reserved = 0;
851
	cmd->body.bytesPerLine = framebuffer->base.pitches[0];
852
	cmd->body.ptr.gmrId = framebuffer->user_handle;
853
	cmd->body.ptr.offset = 0;
854
 
855
	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
856
				  fifo_size, 0, NULL, NULL);
857
 
858
	kfree(cmd);
859
 
860
	return ret;
861
}
862
 
863
static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
864
			       struct vmw_private *dev_priv,
865
			       struct vmw_framebuffer *framebuffer,
866
			       unsigned flags, unsigned color,
867
			       struct drm_clip_rect *clips,
868
			       unsigned num_clips, int increment,
869
			       struct vmw_fence_obj **out_fence)
870
{
871
	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
872
	struct drm_clip_rect *clips_ptr;
873
	int i, k, num_units, ret;
874
	struct drm_crtc *crtc;
875
	size_t fifo_size;
876
 
877
	struct {
878
		uint32_t header;
879
		SVGAFifoCmdBlitGMRFBToScreen body;
880
	} *blits;
881
 
882
	ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
883
	if (unlikely(ret != 0))
884
		return ret; /* define_gmrfb prints warnings */
885
 
886
	fifo_size = sizeof(*blits) * num_clips;
887
	blits = kmalloc(fifo_size, GFP_KERNEL);
888
	if (unlikely(blits == NULL)) {
889
		DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
890
		return -ENOMEM;
891
	}
892
 
893
	num_units = 0;
894
	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
5078 serge 895
		if (crtc->primary->fb != &framebuffer->base)
4075 Serge 896
			continue;
897
		units[num_units++] = vmw_crtc_to_du(crtc);
898
	}
899
 
900
	for (k = 0; k < num_units; k++) {
901
		struct vmw_display_unit *unit = units[k];
902
		int hit_num = 0;
903
 
904
		clips_ptr = clips;
905
		for (i = 0; i < num_clips; i++, clips_ptr += increment) {
906
			int clip_x1 = clips_ptr->x1 - unit->crtc.x;
907
			int clip_y1 = clips_ptr->y1 - unit->crtc.y;
908
			int clip_x2 = clips_ptr->x2 - unit->crtc.x;
909
			int clip_y2 = clips_ptr->y2 - unit->crtc.y;
910
			int move_x, move_y;
911
 
912
			/* skip any crtcs that misses the clip region */
913
			if (clip_x1 >= unit->crtc.mode.hdisplay ||
914
			    clip_y1 >= unit->crtc.mode.vdisplay ||
915
			    clip_x2 <= 0 || clip_y2 <= 0)
916
				continue;
917
 
918
			/* clip size to crtc size */
919
			clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
920
			clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
921
 
922
			/* translate both src and dest to bring clip into screen */
923
			move_x = min_t(int, clip_x1, 0);
924
			move_y = min_t(int, clip_y1, 0);
925
 
926
			/* actual translate done here */
927
			blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
928
			blits[hit_num].body.destScreenId = unit->unit;
929
			blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
930
			blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
931
			blits[hit_num].body.destRect.left = clip_x1 - move_x;
932
			blits[hit_num].body.destRect.top = clip_y1 - move_y;
933
			blits[hit_num].body.destRect.right = clip_x2;
934
			blits[hit_num].body.destRect.bottom = clip_y2;
935
			hit_num++;
936
		}
937
 
938
		/* no clips hit the crtc */
939
		if (hit_num == 0)
940
			continue;
941
 
942
		/* only return the last fence */
943
		if (out_fence && *out_fence)
944
			vmw_fence_obj_unreference(out_fence);
945
 
946
		fifo_size = sizeof(*blits) * hit_num;
947
		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
948
					  fifo_size, 0, NULL, out_fence);
949
 
950
		if (unlikely(ret != 0))
951
			break;
952
	}
953
 
954
	kfree(blits);
955
 
956
	return ret;
957
}
958
 
4570 Serge 959
static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
4075 Serge 960
				 struct drm_file *file_priv,
961
				 unsigned flags, unsigned color,
962
				 struct drm_clip_rect *clips,
963
				 unsigned num_clips)
964
{
965
	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
966
	struct vmw_framebuffer_dmabuf *vfbd =
967
		vmw_framebuffer_to_vfbd(framebuffer);
968
	struct drm_clip_rect norect;
969
	int ret, increment = 1;
970
 
4570 Serge 971
	drm_modeset_lock_all(dev_priv->dev);
4075 Serge 972
 
5078 serge 973
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4570 Serge 974
	if (unlikely(ret != 0)) {
975
		drm_modeset_unlock_all(dev_priv->dev);
976
		return ret;
977
	}
978
 
4075 Serge 979
	if (!num_clips) {
980
		num_clips = 1;
981
		clips = &norect;
982
		norect.x1 = norect.y1 = 0;
983
		norect.x2 = framebuffer->width;
984
		norect.y2 = framebuffer->height;
985
	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
986
		num_clips /= 2;
987
		increment = 2;
988
	}
989
 
990
	if (dev_priv->ldu_priv) {
991
		ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
992
					  flags, color,
993
					  clips, num_clips, increment);
994
	} else {
995
		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
996
					  flags, color,
997
					  clips, num_clips, increment, NULL);
998
	}
999
 
5078 serge 1000
	ttm_read_unlock(&dev_priv->reservation_sem);
4570 Serge 1001
 
1002
	drm_modeset_unlock_all(dev_priv->dev);
1003
 
4075 Serge 1004
	return ret;
1005
}
1006
 
1007
static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
1008
	.destroy = vmw_framebuffer_dmabuf_destroy,
1009
	.dirty = vmw_framebuffer_dmabuf_dirty,
1010
};
1011
 
1012
/**
1013
 * Pin the dmabuffer to the start of vram.
1014
 */
1015
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
1016
{
1017
	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1018
	struct vmw_framebuffer_dmabuf *vfbd =
1019
		vmw_framebuffer_to_vfbd(&vfb->base);
1020
	int ret;
1021
 
1022
	/* This code should not be used with screen objects */
1023
	BUG_ON(dev_priv->sou_priv);
1024
 
1025
//   vmw_overlay_pause_all(dev_priv);
1026
 
1027
	ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
1028
 
1029
//   vmw_overlay_resume_all(dev_priv);
1030
 
1031
	WARN_ON(ret != 0);
1032
 
1033
	return 0;
1034
}
1035
 
1036
static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
1037
{
1038
	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1039
	struct vmw_framebuffer_dmabuf *vfbd =
1040
		vmw_framebuffer_to_vfbd(&vfb->base);
1041
 
1042
	if (!vfbd->buffer) {
1043
		WARN_ON(!vfbd->buffer);
1044
		return 0;
1045
	}
1046
 
1047
	return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
1048
}
1049
 
1050
#if 0
1051
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1052
					  struct vmw_dma_buffer *dmabuf,
1053
					  struct vmw_framebuffer **out,
1054
					  const struct drm_mode_fb_cmd
1055
					  *mode_cmd)
1056
 
1057
{
1058
	struct drm_device *dev = dev_priv->dev;
1059
	struct vmw_framebuffer_dmabuf *vfbd;
1060
	unsigned int requested_size;
1061
	int ret;
1062
 
1063
	requested_size = mode_cmd->height * mode_cmd->pitch;
1064
	if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
1065
		DRM_ERROR("Screen buffer object size is too small "
1066
			  "for requested mode.\n");
1067
		return -EINVAL;
1068
	}
1069
 
1070
	/* Limited framebuffer color depth support for screen objects */
1071
	if (dev_priv->sou_priv) {
1072
		switch (mode_cmd->depth) {
1073
		case 32:
1074
		case 24:
1075
			/* Only support 32 bpp for 32 and 24 depth fbs */
1076
			if (mode_cmd->bpp == 32)
1077
				break;
1078
 
1079
			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
1080
				  mode_cmd->depth, mode_cmd->bpp);
1081
			return -EINVAL;
1082
		case 16:
1083
		case 15:
1084
			/* Only support 16 bpp for 16 and 15 depth fbs */
1085
			if (mode_cmd->bpp == 16)
1086
				break;
1087
 
1088
			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
1089
				  mode_cmd->depth, mode_cmd->bpp);
1090
			return -EINVAL;
1091
		default:
1092
			DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
1093
			return -EINVAL;
1094
		}
1095
	}
1096
 
1097
	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1098
	if (!vfbd) {
1099
		ret = -ENOMEM;
1100
		goto out_err1;
1101
	}
1102
 
1103
	if (!vmw_dmabuf_reference(dmabuf)) {
1104
		DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
1105
		ret = -EINVAL;
1106
		goto out_err2;
1107
	}
1108
 
1109
	vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
1110
	vfbd->base.base.pitches[0] = mode_cmd->pitch;
1111
	vfbd->base.base.depth = mode_cmd->depth;
1112
	vfbd->base.base.width = mode_cmd->width;
1113
	vfbd->base.base.height = mode_cmd->height;
1114
	if (!dev_priv->sou_priv) {
1115
		vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
1116
		vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
1117
	}
1118
	vfbd->base.dmabuf = true;
1119
	vfbd->buffer = dmabuf;
1120
	vfbd->base.user_handle = mode_cmd->handle;
1121
	*out = &vfbd->base;
1122
 
1123
	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1124
				   &vmw_framebuffer_dmabuf_funcs);
1125
	if (ret)
1126
		goto out_err3;
1127
 
1128
	return 0;
1129
 
1130
out_err3:
1131
	vmw_dmabuf_unreference(&dmabuf);
1132
out_err2:
1133
	kfree(vfbd);
1134
out_err1:
1135
	return ret;
1136
}
1137
#endif
1138
 
1139
/*
1140
 * Generic Kernel modesetting functions
1141
 */
1142
 
1143
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1144
						 struct drm_file *file_priv,
1145
						 struct drm_mode_fb_cmd2 *mode_cmd2)
1146
{
1147
	struct vmw_private *dev_priv = vmw_priv(dev);
1148
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1149
	struct vmw_framebuffer *vfb = NULL;
1150
	struct vmw_surface *surface = NULL;
1151
	struct vmw_dma_buffer *bo = NULL;
1152
	struct ttm_base_object *user_obj;
1153
	struct drm_mode_fb_cmd mode_cmd;
1154
	int ret;
1155
 
1156
	mode_cmd.width = mode_cmd2->width;
1157
	mode_cmd.height = mode_cmd2->height;
1158
	mode_cmd.pitch = mode_cmd2->pitches[0];
1159
	mode_cmd.handle = mode_cmd2->handles[0];
1160
	drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
1161
				    &mode_cmd.bpp);
1162
 
1163
	/**
1164
	 * This code should be conditioned on Screen Objects not being used.
1165
	 * If screen objects are used, we can allocate a GMR to hold the
1166
	 * requested framebuffer.
1167
	 */
1168
 
1169
	if (!vmw_kms_validate_mode_vram(dev_priv,
1170
					mode_cmd.pitch,
1171
					mode_cmd.height)) {
1172
		DRM_ERROR("VRAM size is too small for requested mode.\n");
1173
		return ERR_PTR(-ENOMEM);
1174
	}
1175
 
1176
	/*
1177
	 * Take a reference on the user object of the resource
1178
	 * backing the kms fb. This ensures that user-space handle
1179
	 * lookups on that resource will always work as long as
1180
	 * it's registered with a kms framebuffer. This is important,
1181
	 * since vmw_execbuf_process identifies resources in the
1182
	 * command stream using user-space handles.
1183
	 */
1184
 
1185
	user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
1186
	if (unlikely(user_obj == NULL)) {
1187
		DRM_ERROR("Could not locate requested kms frame buffer.\n");
1188
		return ERR_PTR(-ENOENT);
1189
	}
1190
 
1191
	/**
1192
	 * End conditioned code.
1193
	 */
1194
 
1195
	/* returns either a dmabuf or surface */
1196
//   ret = vmw_user_lookup_handle(dev_priv, tfile,
1197
//                    mode_cmd.handle,
1198
//                    &surface, &bo);
1199
//   if (ret)
1200
//       goto err_out;
1201
 
1202
	/* Create the new framebuffer depending one what we got back */
1203
//   if (bo)
1204
//       ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
1205
//                            &mode_cmd);
1206
//   else if (surface)
1207
		ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
1208
						      surface, &vfb, &mode_cmd);
1209
//   else
1210
//       BUG();
1211
 
1212
err_out:
1213
	/* vmw_user_lookup_handle takes one ref so does new_fb */
1214
//   if (bo)
1215
//       vmw_dmabuf_unreference(&bo);
1216
//   if (surface)
1217
//       vmw_surface_unreference(&surface);
1218
 
1219
	if (ret) {
1220
		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1221
//       ttm_base_object_unref(&user_obj);
1222
		return ERR_PTR(ret);
1223
	} else
1224
		vfb->user_obj = user_obj;
1225
 
1226
	return &vfb->base;
1227
}
1228
 
1229
static const struct drm_mode_config_funcs vmw_kms_funcs = {
1230
	.fb_create = vmw_kms_fb_create,
1231
};
1232
 
1233
int vmw_kms_present(struct vmw_private *dev_priv,
1234
		    struct drm_file *file_priv,
1235
		    struct vmw_framebuffer *vfb,
1236
		    struct vmw_surface *surface,
1237
		    uint32_t sid,
1238
		    int32_t destX, int32_t destY,
1239
		    struct drm_vmw_rect *clips,
1240
		    uint32_t num_clips)
1241
{
1242
	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1243
	struct drm_clip_rect *tmp;
1244
	struct drm_crtc *crtc;
1245
	size_t fifo_size;
1246
	int i, k, num_units;
1247
	int ret = 0; /* silence warning */
1248
	int left, right, top, bottom;
1249
 
1250
	struct {
1251
		SVGA3dCmdHeader header;
1252
		SVGA3dCmdBlitSurfaceToScreen body;
1253
	} *cmd;
1254
	SVGASignedRect *blits;
1255
 
1256
	num_units = 0;
1257
	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
5078 serge 1258
		if (crtc->primary->fb != &vfb->base)
4075 Serge 1259
			continue;
1260
		units[num_units++] = vmw_crtc_to_du(crtc);
1261
	}
1262
 
1263
	BUG_ON(surface == NULL);
1264
	BUG_ON(!clips || !num_clips);
1265
 
1266
	tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
1267
	if (unlikely(tmp == NULL)) {
1268
		DRM_ERROR("Temporary cliprect memory alloc failed.\n");
1269
		return -ENOMEM;
1270
	}
1271
 
1272
	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
1273
	cmd = kmalloc(fifo_size, GFP_KERNEL);
1274
	if (unlikely(cmd == NULL)) {
1275
		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
1276
		ret = -ENOMEM;
1277
		goto out_free_tmp;
1278
	}
1279
 
1280
	left = clips->x;
1281
	right = clips->x + clips->w;
1282
	top = clips->y;
1283
	bottom = clips->y + clips->h;
1284
 
1285
	for (i = 1; i < num_clips; i++) {
1286
		left = min_t(int, left, (int)clips[i].x);
1287
		right = max_t(int, right, (int)clips[i].x + clips[i].w);
1288
		top = min_t(int, top, (int)clips[i].y);
1289
		bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
1290
	}
1291
 
1292
	/* only need to do this once */
1293
	memset(cmd, 0, fifo_size);
1294
	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
1295
 
1296
	blits = (SVGASignedRect *)&cmd[1];
1297
 
1298
	cmd->body.srcRect.left = left;
1299
	cmd->body.srcRect.right = right;
1300
	cmd->body.srcRect.top = top;
1301
	cmd->body.srcRect.bottom = bottom;
1302
 
1303
	for (i = 0; i < num_clips; i++) {
1304
		tmp[i].x1 = clips[i].x - left;
1305
		tmp[i].x2 = clips[i].x + clips[i].w - left;
1306
		tmp[i].y1 = clips[i].y - top;
1307
		tmp[i].y2 = clips[i].y + clips[i].h - top;
1308
	}
1309
 
1310
	for (k = 0; k < num_units; k++) {
1311
		struct vmw_display_unit *unit = units[k];
1312
		struct vmw_clip_rect clip;
1313
		int num;
1314
 
1315
		clip.x1 = left + destX - unit->crtc.x;
1316
		clip.y1 = top + destY - unit->crtc.y;
1317
		clip.x2 = right + destX - unit->crtc.x;
1318
		clip.y2 = bottom + destY - unit->crtc.y;
1319
 
1320
		/* skip any crtcs that misses the clip region */
1321
		if (clip.x1 >= unit->crtc.mode.hdisplay ||
1322
		    clip.y1 >= unit->crtc.mode.vdisplay ||
1323
		    clip.x2 <= 0 || clip.y2 <= 0)
1324
			continue;
1325
 
1326
		/*
1327
		 * In order for the clip rects to be correctly scaled
1328
		 * the src and dest rects needs to be the same size.
1329
		 */
1330
		cmd->body.destRect.left = clip.x1;
1331
		cmd->body.destRect.right = clip.x2;
1332
		cmd->body.destRect.top = clip.y1;
1333
		cmd->body.destRect.bottom = clip.y2;
1334
 
1335
		/* create a clip rect of the crtc in dest coords */
1336
		clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
1337
		clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
1338
		clip.x1 = 0 - clip.x1;
1339
		clip.y1 = 0 - clip.y1;
1340
 
1341
		/* need to reset sid as it is changed by execbuf */
1342
		cmd->body.srcImage.sid = sid;
1343
		cmd->body.destScreenId = unit->unit;
1344
 
1345
		/* clip and write blits to cmd stream */
1346
		vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
1347
 
1348
		/* if no cliprects hit skip this */
1349
		if (num == 0)
1350
			continue;
1351
 
1352
		/* recalculate package length */
1353
		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
1354
		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
1355
		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
1356
					  fifo_size, 0, NULL, NULL);
1357
 
1358
		if (unlikely(ret != 0))
1359
			break;
1360
	}
1361
 
1362
	kfree(cmd);
1363
out_free_tmp:
1364
	kfree(tmp);
1365
 
1366
	return ret;
1367
}
1368
 
1369
int vmw_kms_readback(struct vmw_private *dev_priv,
1370
		     struct drm_file *file_priv,
1371
		     struct vmw_framebuffer *vfb,
1372
		     struct drm_vmw_fence_rep __user *user_fence_rep,
1373
		     struct drm_vmw_rect *clips,
1374
		     uint32_t num_clips)
1375
{
1376
	struct vmw_framebuffer_dmabuf *vfbd =
1377
		vmw_framebuffer_to_vfbd(&vfb->base);
1378
	struct vmw_dma_buffer *dmabuf = vfbd->buffer;
1379
	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1380
	struct drm_crtc *crtc;
1381
	size_t fifo_size;
1382
	int i, k, ret, num_units, blits_pos;
1383
 
1384
	struct {
1385
		uint32_t header;
1386
		SVGAFifoCmdDefineGMRFB body;
1387
	} *cmd;
1388
	struct {
1389
		uint32_t header;
1390
		SVGAFifoCmdBlitScreenToGMRFB body;
1391
	} *blits;
1392
 
1393
	num_units = 0;
1394
	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
5078 serge 1395
		if (crtc->primary->fb != &vfb->base)
4075 Serge 1396
			continue;
1397
		units[num_units++] = vmw_crtc_to_du(crtc);
1398
	}
1399
 
1400
	BUG_ON(dmabuf == NULL);
1401
	BUG_ON(!clips || !num_clips);
1402
 
1403
	/* take a safe guess at fifo size */
1404
	fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
1405
	cmd = kmalloc(fifo_size, GFP_KERNEL);
1406
	if (unlikely(cmd == NULL)) {
1407
		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
1408
		return -ENOMEM;
1409
	}
1410
 
1411
	memset(cmd, 0, fifo_size);
1412
	cmd->header = SVGA_CMD_DEFINE_GMRFB;
1413
	cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
1414
	cmd->body.format.colorDepth = vfb->base.depth;
1415
	cmd->body.format.reserved = 0;
1416
	cmd->body.bytesPerLine = vfb->base.pitches[0];
1417
	cmd->body.ptr.gmrId = vfb->user_handle;
1418
	cmd->body.ptr.offset = 0;
1419
 
1420
	blits = (void *)&cmd[1];
1421
	blits_pos = 0;
1422
	for (i = 0; i < num_units; i++) {
1423
		struct drm_vmw_rect *c = clips;
1424
		for (k = 0; k < num_clips; k++, c++) {
1425
			/* transform clip coords to crtc origin based coords */
1426
			int clip_x1 = c->x - units[i]->crtc.x;
1427
			int clip_x2 = c->x - units[i]->crtc.x + c->w;
1428
			int clip_y1 = c->y - units[i]->crtc.y;
1429
			int clip_y2 = c->y - units[i]->crtc.y + c->h;
1430
			int dest_x = c->x;
1431
			int dest_y = c->y;
1432
 
1433
			/* compensate for clipping, we negate
1434
			 * a negative number and add that.
1435
			 */
1436
			if (clip_x1 < 0)
1437
				dest_x += -clip_x1;
1438
			if (clip_y1 < 0)
1439
				dest_y += -clip_y1;
1440
 
1441
			/* clip */
1442
			clip_x1 = max(clip_x1, 0);
1443
			clip_y1 = max(clip_y1, 0);
1444
			clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
1445
			clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
1446
 
1447
			/* and cull any rects that misses the crtc */
1448
			if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
1449
			    clip_y1 >= units[i]->crtc.mode.vdisplay ||
1450
			    clip_x2 <= 0 || clip_y2 <= 0)
1451
				continue;
1452
 
1453
			blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
1454
			blits[blits_pos].body.srcScreenId = units[i]->unit;
1455
			blits[blits_pos].body.destOrigin.x = dest_x;
1456
			blits[blits_pos].body.destOrigin.y = dest_y;
1457
 
1458
			blits[blits_pos].body.srcRect.left = clip_x1;
1459
			blits[blits_pos].body.srcRect.top = clip_y1;
1460
			blits[blits_pos].body.srcRect.right = clip_x2;
1461
			blits[blits_pos].body.srcRect.bottom = clip_y2;
1462
			blits_pos++;
1463
		}
1464
	}
1465
	/* reset size here and use calculated exact size from loops */
1466
	fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
1467
 
1468
	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
1469
				  0, user_fence_rep, NULL);
1470
 
1471
	kfree(cmd);
1472
 
1473
	return ret;
1474
}
1475
 
1476
int vmw_kms_init(struct vmw_private *dev_priv)
1477
{
1478
	struct drm_device *dev = dev_priv->dev;
1479
	int ret;
4569 Serge 1480
 
4075 Serge 1481
	drm_mode_config_init(dev);
1482
	dev->mode_config.funcs = &vmw_kms_funcs;
1483
	dev->mode_config.min_width = 1;
1484
	dev->mode_config.min_height = 1;
1485
	/* assumed largest fb size */
1486
	dev->mode_config.max_width = 8192;
1487
	dev->mode_config.max_height = 8192;
1488
 
1489
	ret = vmw_kms_init_screen_object_display(dev_priv);
1490
 
1491
	return 0;
1492
}
1493
 
1494
int vmw_kms_close(struct vmw_private *dev_priv)
1495
{
1496
	/*
1497
	 * Docs says we should take the lock before calling this function
1498
	 * but since it destroys encoders and our destructor calls
1499
	 * drm_encoder_cleanup which takes the lock we deadlock.
1500
	 */
1501
//   drm_mode_config_cleanup(dev_priv->dev);
1502
//   if (dev_priv->sou_priv)
1503
//       vmw_kms_close_screen_object_display(dev_priv);
1504
//   else
1505
//       vmw_kms_close_legacy_display_system(dev_priv);
1506
	return 0;
1507
}
1508
 
4569 Serge 1509
#if 0
4075 Serge 1510
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1511
				struct drm_file *file_priv)
1512
{
1513
	struct drm_vmw_cursor_bypass_arg *arg = data;
1514
	struct vmw_display_unit *du;
1515
	struct drm_crtc *crtc;
1516
	int ret = 0;
1517
 
1518
 
1519
	mutex_lock(&dev->mode_config.mutex);
1520
	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1521
 
1522
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1523
			du = vmw_crtc_to_du(crtc);
1524
			du->hotspot_x = arg->xhot;
1525
			du->hotspot_y = arg->yhot;
1526
		}
1527
 
1528
		mutex_unlock(&dev->mode_config.mutex);
1529
		return 0;
1530
	}
1531
 
5078 serge 1532
	crtc = drm_crtc_find(dev, arg->crtc_id);
1533
	if (!crtc) {
4569 Serge 1534
		ret = -ENOENT;
4075 Serge 1535
		goto out;
1536
	}
1537
 
1538
	du = vmw_crtc_to_du(crtc);
1539
 
1540
	du->hotspot_x = arg->xhot;
1541
	du->hotspot_y = arg->yhot;
1542
 
1543
out:
1544
	mutex_unlock(&dev->mode_config.mutex);
1545
 
1546
	return ret;
1547
}
1548
#endif
4569 Serge 1549
 
4075 Serge 1550
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1551
			unsigned width, unsigned height, unsigned pitch,
1552
			unsigned bpp, unsigned depth)
1553
{
1554
	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1555
		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1556
	else if (vmw_fifo_have_pitchlock(vmw_priv))
1557
		iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1558
	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1559
	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1560
	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1561
 
1562
	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1563
		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1564
			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1565
		return -EINVAL;
1566
	}
1567
 
1568
	return 0;
1569
}
1570
 
1571
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1572
{
1573
	struct vmw_vga_topology_state *save;
1574
	uint32_t i;
1575
 
1576
	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1577
	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
1578
	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
1579
	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1580
		vmw_priv->vga_pitchlock =
1581
		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1582
	else if (vmw_fifo_have_pitchlock(vmw_priv))
1583
		vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
1584
						       SVGA_FIFO_PITCHLOCK);
1585
 
1586
	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1587
		return 0;
1588
 
1589
	vmw_priv->num_displays = vmw_read(vmw_priv,
1590
					  SVGA_REG_NUM_GUEST_DISPLAYS);
1591
 
1592
	if (vmw_priv->num_displays == 0)
1593
		vmw_priv->num_displays = 1;
1594
 
1595
	for (i = 0; i < vmw_priv->num_displays; ++i) {
1596
		save = &vmw_priv->vga_save[i];
1597
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1598
		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1599
		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1600
		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1601
		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1602
		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1603
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1604
		if (i == 0 && vmw_priv->num_displays == 1 &&
1605
		    save->width == 0 && save->height == 0) {
1606
 
1607
			/*
1608
			 * It should be fairly safe to assume that these
1609
			 * values are uninitialized.
1610
			 */
1611
 
1612
			save->width = vmw_priv->vga_width - save->pos_x;
1613
			save->height = vmw_priv->vga_height - save->pos_y;
1614
		}
1615
	}
1616
 
1617
	return 0;
1618
}
1619
 
1620
int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1621
{
1622
	struct vmw_vga_topology_state *save;
1623
	uint32_t i;
1624
 
1625
	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1626
	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
1627
	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
1628
	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1629
		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1630
			  vmw_priv->vga_pitchlock);
1631
	else if (vmw_fifo_have_pitchlock(vmw_priv))
1632
		iowrite32(vmw_priv->vga_pitchlock,
1633
			  vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1634
 
1635
	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1636
		return 0;
1637
 
1638
	for (i = 0; i < vmw_priv->num_displays; ++i) {
1639
		save = &vmw_priv->vga_save[i];
1640
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1641
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1642
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1643
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1644
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1645
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1646
		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1647
	}
1648
 
1649
	return 0;
1650
}
1651
 
1652
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1653
				uint32_t pitch,
1654
				uint32_t height)
1655
{
4569 Serge 1656
	return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
4075 Serge 1657
}
1658
 
1659
 
1660
/**
1661
 * Function called by DRM code called with vbl_lock held.
1662
 */
1663
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
1664
{
1665
	return 0;
1666
}
1667
 
1668
/**
1669
 * Function called by DRM code called with vbl_lock held.
1670
 */
1671
int vmw_enable_vblank(struct drm_device *dev, int crtc)
1672
{
1673
	return -ENOSYS;
1674
}
1675
 
1676
/**
1677
 * Function called by DRM code called with vbl_lock held.
1678
 */
1679
void vmw_disable_vblank(struct drm_device *dev, int crtc)
1680
{
1681
}
1682
 
1683
 
1684
/*
1685
 * Small shared kms functions.
1686
 */
1687
 
4570 Serge 1688
static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
4075 Serge 1689
			 struct drm_vmw_rect *rects)
1690
{
1691
	struct drm_device *dev = dev_priv->dev;
1692
	struct vmw_display_unit *du;
1693
	struct drm_connector *con;
1694
 
1695
	mutex_lock(&dev->mode_config.mutex);
1696
 
1697
#if 0
1698
	{
1699
		unsigned int i;
1700
 
1701
		DRM_INFO("%s: new layout ", __func__);
1702
		for (i = 0; i < num; i++)
1703
			DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
1704
				 rects[i].w, rects[i].h);
1705
		DRM_INFO("\n");
1706
	}
1707
#endif
1708
 
1709
	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
1710
		du = vmw_connector_to_du(con);
1711
		if (num > du->unit) {
1712
			du->pref_width = rects[du->unit].w;
1713
			du->pref_height = rects[du->unit].h;
1714
			du->pref_active = true;
1715
			du->gui_x = rects[du->unit].x;
1716
			du->gui_y = rects[du->unit].y;
1717
		} else {
1718
			du->pref_width = 800;
1719
			du->pref_height = 600;
1720
			du->pref_active = false;
1721
		}
1722
		con->status = vmw_du_connector_detect(con, true);
1723
	}
1724
 
1725
	mutex_unlock(&dev->mode_config.mutex);
1726
 
1727
	return 0;
1728
}
1729
 
1730
#if 0
1731
int vmw_du_page_flip(struct drm_crtc *crtc,
1732
		     struct drm_framebuffer *fb,
4570 Serge 1733
		     struct drm_pending_vblank_event *event,
1734
		     uint32_t page_flip_flags)
4075 Serge 1735
{
1736
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
5078 serge 1737
	struct drm_framebuffer *old_fb = crtc->primary->fb;
4075 Serge 1738
	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
1739
	struct drm_file *file_priv ;
1740
	struct vmw_fence_obj *fence = NULL;
1741
	struct drm_clip_rect clips;
1742
	int ret;
1743
 
1744
	if (event == NULL)
1745
		return -EINVAL;
1746
 
1747
	/* require ScreenObject support for page flipping */
1748
	if (!dev_priv->sou_priv)
1749
		return -ENOSYS;
1750
 
1751
	file_priv = event->base.file_priv;
1752
	if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
1753
		return -EINVAL;
1754
 
5078 serge 1755
	crtc->primary->fb = fb;
4075 Serge 1756
 
1757
	/* do a full screen dirty update */
1758
	clips.x1 = clips.y1 = 0;
1759
	clips.x2 = fb->width;
1760
	clips.y2 = fb->height;
1761
 
1762
	if (vfb->dmabuf)
1763
		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
1764
					  0, 0, &clips, 1, 1, &fence);
1765
	else
1766
		ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
1767
					   0, 0, &clips, 1, 1, &fence);
1768
 
1769
 
1770
	if (ret != 0)
1771
		goto out_no_fence;
1772
	if (!fence) {
1773
		ret = -EINVAL;
1774
		goto out_no_fence;
1775
	}
1776
 
1777
	ret = vmw_event_fence_action_queue(file_priv, fence,
1778
					   &event->base,
1779
					   &event->event.tv_sec,
1780
					   &event->event.tv_usec,
1781
					   true);
1782
 
1783
	/*
1784
	 * No need to hold on to this now. The only cleanup
1785
	 * we need to do if we fail is unref the fence.
1786
	 */
1787
	vmw_fence_obj_unreference(&fence);
1788
 
1789
	if (vmw_crtc_to_du(crtc)->is_implicit)
1790
		vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
1791
 
1792
	return ret;
1793
 
1794
out_no_fence:
5078 serge 1795
	crtc->primary->fb = old_fb;
4075 Serge 1796
	return ret;
1797
}
1798
#endif
1799
 
1800
void vmw_du_crtc_save(struct drm_crtc *crtc)
1801
{
1802
}
1803
 
1804
void vmw_du_crtc_restore(struct drm_crtc *crtc)
1805
{
1806
}
1807
 
1808
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1809
			   u16 *r, u16 *g, u16 *b,
1810
			   uint32_t start, uint32_t size)
1811
{
1812
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1813
	int i;
1814
 
1815
	for (i = 0; i < size; i++) {
1816
		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
1817
			  r[i], g[i], b[i]);
1818
		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
1819
		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1820
		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1821
	}
1822
}
1823
 
1824
void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
1825
{
1826
}
1827
 
1828
void vmw_du_connector_save(struct drm_connector *connector)
1829
{
1830
}
1831
 
1832
void vmw_du_connector_restore(struct drm_connector *connector)
1833
{
1834
}
1835
 
1836
enum drm_connector_status
1837
vmw_du_connector_detect(struct drm_connector *connector, bool force)
1838
{
1839
	uint32_t num_displays;
1840
	struct drm_device *dev = connector->dev;
1841
	struct vmw_private *dev_priv = vmw_priv(dev);
1842
	struct vmw_display_unit *du = vmw_connector_to_du(connector);
1843
 
1844
	mutex_lock(&dev_priv->hw_mutex);
1845
	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
1846
	mutex_unlock(&dev_priv->hw_mutex);
1847
 
1848
	return ((vmw_connector_to_du(connector)->unit < num_displays &&
1849
		 du->pref_active) ?
1850
		connector_status_connected : connector_status_disconnected);
1851
}
1852
 
1853
static struct drm_display_mode vmw_kms_connector_builtin[] = {
1854
	/* 640x480@60Hz */
1855
	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
1856
		   752, 800, 0, 480, 489, 492, 525, 0,
1857
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1858
	/* 800x600@60Hz */
1859
	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
1860
		   968, 1056, 0, 600, 601, 605, 628, 0,
1861
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1862
	/* 1024x768@60Hz */
1863
	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1864
		   1184, 1344, 0, 768, 771, 777, 806, 0,
1865
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1866
	/* 1152x864@75Hz */
1867
	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1868
		   1344, 1600, 0, 864, 865, 868, 900, 0,
1869
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1870
	/* 1280x768@60Hz */
1871
	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1872
		   1472, 1664, 0, 768, 771, 778, 798, 0,
1873
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1874
	/* 1280x800@60Hz */
1875
	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1876
		   1480, 1680, 0, 800, 803, 809, 831, 0,
1877
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
1878
	/* 1280x960@60Hz */
1879
	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1880
		   1488, 1800, 0, 960, 961, 964, 1000, 0,
1881
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1882
	/* 1280x1024@60Hz */
1883
	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1884
		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
1885
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1886
	/* 1360x768@60Hz */
1887
	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1888
		   1536, 1792, 0, 768, 771, 777, 795, 0,
1889
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1890
	/* 1440x1050@60Hz */
1891
	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1892
		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
1893
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1894
	/* 1440x900@60Hz */
1895
	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1896
		   1672, 1904, 0, 900, 903, 909, 934, 0,
1897
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1898
	/* 1600x1200@60Hz */
1899
	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1900
		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
1901
		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1902
	/* 1680x1050@60Hz */
1903
	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1904
		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
1905
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1906
	/* 1792x1344@60Hz */
1907
	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
1908
		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
1909
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1910
	/* 1853x1392@60Hz */
1911
	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
1912
		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
1913
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
4080 Serge 1914
    { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
1915
           2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
1916
           DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
1917
           .vrefresh = 60, },
4075 Serge 1918
	/* 1920x1200@60Hz */
1919
	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
1920
		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
1921
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1922
	/* 1920x1440@60Hz */
1923
	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
1924
		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
1925
		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1926
	/* 2560x1600@60Hz */
4080 Serge 1927
/*   { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
4075 Serge 1928
		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
4080 Serge 1929
           DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, */
4075 Serge 1930
	/* Terminate */
1931
	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
1932
};
1933
 
1934
/**
1935
 * vmw_guess_mode_timing - Provide fake timings for a
1936
 * 60Hz vrefresh mode.
1937
 *
1938
 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
1939
 * members filled in.
1940
 */
1941
static void vmw_guess_mode_timing(struct drm_display_mode *mode)
1942
{
1943
	mode->hsync_start = mode->hdisplay + 50;
1944
	mode->hsync_end = mode->hsync_start + 50;
1945
	mode->htotal = mode->hsync_end + 50;
1946
 
1947
	mode->vsync_start = mode->vdisplay + 50;
1948
	mode->vsync_end = mode->vsync_start + 50;
1949
	mode->vtotal = mode->vsync_end + 50;
1950
 
1951
	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
1952
	mode->vrefresh = drm_mode_vrefresh(mode);
1953
}
1954
 
1955
 
1956
int vmw_du_connector_fill_modes(struct drm_connector *connector,
1957
				uint32_t max_width, uint32_t max_height)
1958
{
1959
	struct vmw_display_unit *du = vmw_connector_to_du(connector);
1960
	struct drm_device *dev = connector->dev;
1961
	struct vmw_private *dev_priv = vmw_priv(dev);
1962
	struct drm_display_mode *mode = NULL;
1963
	struct drm_display_mode *bmode;
1964
	struct drm_display_mode prefmode = { DRM_MODE("preferred",
1965
		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
1966
		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1967
		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
1968
	};
1969
	int i;
1970
 
1971
	/* Add preferred mode */
1972
	{
1973
		mode = drm_mode_duplicate(dev, &prefmode);
1974
		if (!mode)
1975
			return 0;
1976
		mode->hdisplay = du->pref_width;
1977
		mode->vdisplay = du->pref_height;
1978
		vmw_guess_mode_timing(mode);
1979
 
1980
		if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
1981
					       mode->vdisplay)) {
1982
			drm_mode_probed_add(connector, mode);
1983
		} else {
1984
			drm_mode_destroy(dev, mode);
1985
			mode = NULL;
1986
		}
1987
 
1988
		if (du->pref_mode) {
1989
			list_del_init(&du->pref_mode->head);
1990
			drm_mode_destroy(dev, du->pref_mode);
1991
		}
1992
 
1993
		/* mode might be null here, this is intended */
1994
		du->pref_mode = mode;
1995
	}
1996
 
1997
	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
1998
		bmode = &vmw_kms_connector_builtin[i];
1999
		if (bmode->hdisplay > max_width ||
2000
		    bmode->vdisplay > max_height)
2001
			continue;
2002
 
2003
		if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
2004
						bmode->vdisplay))
2005
			continue;
2006
 
2007
		mode = drm_mode_duplicate(dev, bmode);
2008
		if (!mode)
2009
			return 0;
2010
		mode->vrefresh = drm_mode_vrefresh(mode);
2011
 
2012
		drm_mode_probed_add(connector, mode);
2013
	}
2014
 
2015
	/* Move the prefered mode first, help apps pick the right mode. */
2016
	if (du->pref_mode)
2017
		list_move(&du->pref_mode->head, &connector->probed_modes);
2018
 
5078 serge 2019
	drm_mode_connector_list_update(connector, true);
4075 Serge 2020
 
2021
	return 1;
2022
}
2023
 
2024
int vmw_du_connector_set_property(struct drm_connector *connector,
2025
				  struct drm_property *property,
2026
				  uint64_t val)
2027
{
2028
	return 0;
2029
}
2030
 
2031
#if 0
2032
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2033
				struct drm_file *file_priv)
2034
{
2035
	struct vmw_private *dev_priv = vmw_priv(dev);
2036
	struct drm_vmw_update_layout_arg *arg =
2037
		(struct drm_vmw_update_layout_arg *)data;
2038
	void __user *user_rects;
2039
	struct drm_vmw_rect *rects;
2040
	unsigned rects_size;
2041
	int ret;
2042
	int i;
2043
	struct drm_mode_config *mode_config = &dev->mode_config;
2044
 
5078 serge 2045
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4075 Serge 2046
	if (unlikely(ret != 0))
2047
		return ret;
2048
 
2049
	if (!arg->num_outputs) {
2050
		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
2051
		vmw_du_update_layout(dev_priv, 1, &def_rect);
2052
		goto out_unlock;
2053
	}
2054
 
2055
	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2056
	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2057
			GFP_KERNEL);
2058
	if (unlikely(!rects)) {
2059
		ret = -ENOMEM;
2060
		goto out_unlock;
2061
	}
2062
 
2063
	user_rects = (void __user *)(unsigned long)arg->rects;
2064
	ret = copy_from_user(rects, user_rects, rects_size);
2065
	if (unlikely(ret != 0)) {
2066
		DRM_ERROR("Failed to get rects.\n");
2067
		ret = -EFAULT;
2068
		goto out_free;
2069
	}
2070
 
2071
	for (i = 0; i < arg->num_outputs; ++i) {
2072
		if (rects[i].x < 0 ||
2073
		    rects[i].y < 0 ||
2074
		    rects[i].x + rects[i].w > mode_config->max_width ||
2075
		    rects[i].y + rects[i].h > mode_config->max_height) {
2076
			DRM_ERROR("Invalid GUI layout.\n");
2077
			ret = -EINVAL;
2078
			goto out_free;
2079
		}
2080
	}
2081
 
2082
	vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
2083
 
2084
out_free:
2085
	kfree(rects);
2086
out_unlock:
5078 serge 2087
	ttm_read_unlock(&dev_priv->reservation_sem);
4075 Serge 2088
	return ret;
2089
}
2090
#endif