Rev 4075 | Rev 4111 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4075 | Serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | #include |
||
28 | |||
29 | #include |
||
30 | #include "vmwgfx_drv.h" |
||
31 | #include |
||
32 | #include |
||
33 | #include |
||
34 | //#include |
||
35 | |||
36 | #define VMWGFX_DRIVER_NAME "vmwgfx" |
||
37 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
||
38 | #define VMWGFX_CHIP_SVGAII 0 |
||
39 | #define VMW_FB_RESERVATION 0 |
||
40 | |||
41 | #define VMW_MIN_INITIAL_WIDTH 800 |
||
42 | #define VMW_MIN_INITIAL_HEIGHT 600 |
||
43 | |||
44 | struct drm_device *main_device; |
||
45 | |||
46 | struct drm_file *drm_file_handlers[256]; |
||
47 | |||
48 | #if 0 |
||
49 | /** |
||
50 | * Fully encoded drm commands. Might move to vmw_drm.h |
||
51 | */ |
||
52 | |||
53 | #define DRM_IOCTL_VMW_GET_PARAM \ |
||
54 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ |
||
55 | struct drm_vmw_getparam_arg) |
||
56 | #define DRM_IOCTL_VMW_ALLOC_DMABUF \ |
||
57 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ |
||
58 | union drm_vmw_alloc_dmabuf_arg) |
||
59 | #define DRM_IOCTL_VMW_UNREF_DMABUF \ |
||
60 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ |
||
61 | struct drm_vmw_unref_dmabuf_arg) |
||
62 | #define DRM_IOCTL_VMW_CURSOR_BYPASS \ |
||
63 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ |
||
64 | struct drm_vmw_cursor_bypass_arg) |
||
65 | |||
66 | #define DRM_IOCTL_VMW_CONTROL_STREAM \ |
||
67 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ |
||
68 | struct drm_vmw_control_stream_arg) |
||
69 | #define DRM_IOCTL_VMW_CLAIM_STREAM \ |
||
70 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ |
||
71 | struct drm_vmw_stream_arg) |
||
72 | #define DRM_IOCTL_VMW_UNREF_STREAM \ |
||
73 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ |
||
74 | struct drm_vmw_stream_arg) |
||
75 | |||
76 | #define DRM_IOCTL_VMW_CREATE_CONTEXT \ |
||
77 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ |
||
78 | struct drm_vmw_context_arg) |
||
79 | #define DRM_IOCTL_VMW_UNREF_CONTEXT \ |
||
80 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ |
||
81 | struct drm_vmw_context_arg) |
||
82 | #define DRM_IOCTL_VMW_CREATE_SURFACE \ |
||
83 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ |
||
84 | union drm_vmw_surface_create_arg) |
||
85 | #define DRM_IOCTL_VMW_UNREF_SURFACE \ |
||
86 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ |
||
87 | struct drm_vmw_surface_arg) |
||
88 | #define DRM_IOCTL_VMW_REF_SURFACE \ |
||
89 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ |
||
90 | union drm_vmw_surface_reference_arg) |
||
91 | #define DRM_IOCTL_VMW_EXECBUF \ |
||
92 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ |
||
93 | struct drm_vmw_execbuf_arg) |
||
94 | #define DRM_IOCTL_VMW_GET_3D_CAP \ |
||
95 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ |
||
96 | struct drm_vmw_get_3d_cap_arg) |
||
97 | #define DRM_IOCTL_VMW_FENCE_WAIT \ |
||
98 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ |
||
99 | struct drm_vmw_fence_wait_arg) |
||
100 | #define DRM_IOCTL_VMW_FENCE_SIGNALED \ |
||
101 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ |
||
102 | struct drm_vmw_fence_signaled_arg) |
||
103 | #define DRM_IOCTL_VMW_FENCE_UNREF \ |
||
104 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ |
||
105 | struct drm_vmw_fence_arg) |
||
106 | #define DRM_IOCTL_VMW_FENCE_EVENT \ |
||
107 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ |
||
108 | struct drm_vmw_fence_event_arg) |
||
109 | #define DRM_IOCTL_VMW_PRESENT \ |
||
110 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ |
||
111 | struct drm_vmw_present_arg) |
||
112 | #define DRM_IOCTL_VMW_PRESENT_READBACK \ |
||
113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ |
||
114 | struct drm_vmw_present_readback_arg) |
||
115 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
||
116 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
||
117 | struct drm_vmw_update_layout_arg) |
||
118 | |||
119 | /** |
||
120 | * The core DRM version of this macro doesn't account for |
||
121 | * DRM_COMMAND_BASE. |
||
122 | */ |
||
123 | |||
124 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ |
||
125 | [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl} |
||
126 | |||
127 | /** |
||
128 | * Ioctl definitions. |
||
129 | */ |
||
130 | |||
131 | static struct drm_ioctl_desc vmw_ioctls[] = { |
||
132 | VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, |
||
133 | DRM_AUTH | DRM_UNLOCKED), |
||
134 | VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, |
||
135 | DRM_AUTH | DRM_UNLOCKED), |
||
136 | VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, |
||
137 | DRM_AUTH | DRM_UNLOCKED), |
||
138 | VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, |
||
139 | vmw_kms_cursor_bypass_ioctl, |
||
140 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
||
141 | |||
142 | VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, |
||
143 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
||
144 | VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, |
||
145 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
||
146 | VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, |
||
147 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
||
148 | |||
149 | VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, |
||
150 | DRM_AUTH | DRM_UNLOCKED), |
||
151 | VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, |
||
152 | DRM_AUTH | DRM_UNLOCKED), |
||
153 | VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, |
||
154 | DRM_AUTH | DRM_UNLOCKED), |
||
155 | VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, |
||
156 | DRM_AUTH | DRM_UNLOCKED), |
||
157 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
||
158 | DRM_AUTH | DRM_UNLOCKED), |
||
159 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, |
||
160 | DRM_AUTH | DRM_UNLOCKED), |
||
161 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, |
||
162 | DRM_AUTH | DRM_UNLOCKED), |
||
163 | VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, |
||
164 | vmw_fence_obj_signaled_ioctl, |
||
165 | DRM_AUTH | DRM_UNLOCKED), |
||
166 | VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, |
||
167 | DRM_AUTH | DRM_UNLOCKED), |
||
168 | VMW_IOCTL_DEF(VMW_FENCE_EVENT, |
||
169 | vmw_fence_event_ioctl, |
||
170 | DRM_AUTH | DRM_UNLOCKED), |
||
171 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, |
||
172 | DRM_AUTH | DRM_UNLOCKED), |
||
173 | |||
174 | /* these allow direct access to the framebuffers mark as master only */ |
||
175 | VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, |
||
176 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), |
||
177 | VMW_IOCTL_DEF(VMW_PRESENT_READBACK, |
||
178 | vmw_present_readback_ioctl, |
||
179 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), |
||
180 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
||
181 | vmw_kms_update_layout_ioctl, |
||
182 | DRM_MASTER | DRM_UNLOCKED), |
||
183 | }; |
||
184 | #endif |
||
185 | |||
186 | static struct pci_device_id vmw_pci_id_list[] = { |
||
187 | {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, |
||
188 | {0, 0, 0} |
||
189 | }; |
||
190 | |||
191 | static int enable_fbdev = 1; |
||
192 | |||
193 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
||
194 | static void vmw_master_init(struct vmw_master *); |
||
195 | |||
196 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); |
||
197 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); |
||
198 | |||
199 | static void vmw_print_capabilities(uint32_t capabilities) |
||
200 | { |
||
201 | DRM_INFO("Capabilities:\n"); |
||
202 | if (capabilities & SVGA_CAP_RECT_COPY) |
||
203 | DRM_INFO(" Rect copy.\n"); |
||
204 | if (capabilities & SVGA_CAP_CURSOR) |
||
205 | DRM_INFO(" Cursor.\n"); |
||
206 | if (capabilities & SVGA_CAP_CURSOR_BYPASS) |
||
207 | DRM_INFO(" Cursor bypass.\n"); |
||
208 | if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) |
||
209 | DRM_INFO(" Cursor bypass 2.\n"); |
||
210 | if (capabilities & SVGA_CAP_8BIT_EMULATION) |
||
211 | DRM_INFO(" 8bit emulation.\n"); |
||
212 | if (capabilities & SVGA_CAP_ALPHA_CURSOR) |
||
213 | DRM_INFO(" Alpha cursor.\n"); |
||
214 | if (capabilities & SVGA_CAP_3D) |
||
215 | DRM_INFO(" 3D.\n"); |
||
216 | if (capabilities & SVGA_CAP_EXTENDED_FIFO) |
||
217 | DRM_INFO(" Extended Fifo.\n"); |
||
218 | if (capabilities & SVGA_CAP_MULTIMON) |
||
219 | DRM_INFO(" Multimon.\n"); |
||
220 | if (capabilities & SVGA_CAP_PITCHLOCK) |
||
221 | DRM_INFO(" Pitchlock.\n"); |
||
222 | if (capabilities & SVGA_CAP_IRQMASK) |
||
223 | DRM_INFO(" Irq mask.\n"); |
||
224 | if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) |
||
225 | DRM_INFO(" Display Topology.\n"); |
||
226 | if (capabilities & SVGA_CAP_GMR) |
||
227 | DRM_INFO(" GMR.\n"); |
||
228 | if (capabilities & SVGA_CAP_TRACES) |
||
229 | DRM_INFO(" Traces.\n"); |
||
230 | if (capabilities & SVGA_CAP_GMR2) |
||
231 | DRM_INFO(" GMR2.\n"); |
||
232 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) |
||
233 | DRM_INFO(" Screen Object 2.\n"); |
||
234 | } |
||
235 | |||
236 | |||
237 | /** |
||
238 | * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at |
||
239 | * the start of a buffer object. |
||
240 | * |
||
241 | * @dev_priv: The device private structure. |
||
242 | * |
||
243 | * This function will idle the buffer using an uninterruptible wait, then |
||
244 | * map the first page and initialize a pending occlusion query result structure, |
||
245 | * Finally it will unmap the buffer. |
||
246 | * |
||
247 | * TODO: Since we're only mapping a single page, we should optimize the map |
||
248 | * to use kmap_atomic / iomap_atomic. |
||
249 | */ |
||
250 | static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) |
||
251 | { |
||
252 | struct ttm_bo_kmap_obj map; |
||
253 | volatile SVGA3dQueryResult *result; |
||
254 | bool dummy; |
||
255 | int ret; |
||
256 | struct ttm_bo_device *bdev = &dev_priv->bdev; |
||
257 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; |
||
258 | |||
259 | ttm_bo_reserve(bo, false, false, false, 0); |
||
260 | spin_lock(&bdev->fence_lock); |
||
261 | ret = 0; //ttm_bo_wait(bo, false, false, false); |
||
262 | spin_unlock(&bdev->fence_lock); |
||
263 | if (unlikely(ret != 0)) |
||
264 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, |
||
265 | 10*HZ); |
||
266 | /* |
||
267 | ret = ttm_bo_kmap(bo, 0, 1, &map); |
||
268 | if (likely(ret == 0)) { |
||
269 | result = ttm_kmap_obj_virtual(&map, &dummy); |
||
270 | result->totalSize = sizeof(*result); |
||
271 | result->state = SVGA3D_QUERYSTATE_PENDING; |
||
272 | result->result32 = 0xff; |
||
273 | ttm_bo_kunmap(&map); |
||
274 | } else |
||
275 | DRM_ERROR("Dummy query buffer map failed.\n"); |
||
276 | */ |
||
277 | ttm_bo_unreserve(bo); |
||
278 | } |
||
279 | |||
280 | |||
281 | /** |
||
282 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
||
283 | * |
||
284 | * @dev_priv: A device private structure. |
||
285 | * |
||
286 | * This function creates a small buffer object that holds the query |
||
287 | * result for dummy queries emitted as query barriers. |
||
288 | * No interruptible waits are done within this function. |
||
289 | * |
||
290 | * Returns an error if bo creation fails. |
||
291 | */ |
||
292 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
||
293 | { |
||
294 | return ttm_bo_create(&dev_priv->bdev, |
||
295 | PAGE_SIZE, |
||
296 | ttm_bo_type_device, |
||
297 | &vmw_vram_sys_placement, |
||
298 | 0, false, NULL, |
||
299 | &dev_priv->dummy_query_bo); |
||
300 | } |
||
301 | |||
302 | |||
303 | static int vmw_request_device(struct vmw_private *dev_priv) |
||
304 | { |
||
305 | int ret; |
||
306 | ENTER(); |
||
307 | |||
308 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
||
309 | if (unlikely(ret != 0)) { |
||
310 | DRM_ERROR("Unable to initialize FIFO.\n"); |
||
311 | return ret; |
||
312 | } |
||
313 | // vmw_fence_fifo_up(dev_priv->fman); |
||
314 | // ret = vmw_dummy_query_bo_create(dev_priv); |
||
315 | // if (unlikely(ret != 0)) |
||
316 | // goto out_no_query_bo; |
||
317 | // vmw_dummy_query_bo_prepare(dev_priv); |
||
318 | |||
319 | LEAVE(); |
||
320 | |||
321 | return 0; |
||
322 | |||
323 | out_no_query_bo: |
||
324 | vmw_fence_fifo_down(dev_priv->fman); |
||
325 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
||
326 | return ret; |
||
327 | } |
||
328 | |||
329 | static void vmw_release_device(struct vmw_private *dev_priv) |
||
330 | { |
||
331 | /* |
||
332 | * Previous destructions should've released |
||
333 | * the pinned bo. |
||
334 | */ |
||
335 | |||
336 | BUG_ON(dev_priv->pinned_bo != NULL); |
||
337 | |||
338 | ttm_bo_unref(&dev_priv->dummy_query_bo); |
||
339 | vmw_fence_fifo_down(dev_priv->fman); |
||
340 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
||
341 | } |
||
342 | |||
343 | /** |
||
344 | * Increase the 3d resource refcount. |
||
345 | * If the count was prevously zero, initialize the fifo, switching to svga |
||
346 | * mode. Note that the master holds a ref as well, and may request an |
||
347 | * explicit switch to svga mode if fb is not running, using @unhide_svga. |
||
348 | */ |
||
349 | int vmw_3d_resource_inc(struct vmw_private *dev_priv, |
||
350 | bool unhide_svga) |
||
351 | { |
||
352 | int ret = 0; |
||
353 | |||
354 | ENTER(); |
||
355 | |||
356 | mutex_lock(&dev_priv->release_mutex); |
||
357 | if (unlikely(dev_priv->num_3d_resources++ == 0)) { |
||
358 | ret = vmw_request_device(dev_priv); |
||
359 | if (unlikely(ret != 0)) |
||
360 | --dev_priv->num_3d_resources; |
||
361 | } else if (unhide_svga) { |
||
362 | mutex_lock(&dev_priv->hw_mutex); |
||
363 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
||
364 | vmw_read(dev_priv, SVGA_REG_ENABLE) & |
||
365 | ~SVGA_REG_ENABLE_HIDE); |
||
366 | mutex_unlock(&dev_priv->hw_mutex); |
||
367 | } |
||
368 | |||
369 | mutex_unlock(&dev_priv->release_mutex); |
||
370 | LEAVE(); |
||
371 | return ret; |
||
372 | } |
||
373 | |||
374 | /** |
||
375 | * Decrease the 3d resource refcount. |
||
376 | * If the count reaches zero, disable the fifo, switching to vga mode. |
||
377 | * Note that the master holds a refcount as well, and may request an |
||
378 | * explicit switch to vga mode when it releases its refcount to account |
||
379 | * for the situation of an X server vt switch to VGA with 3d resources |
||
380 | * active. |
||
381 | */ |
||
382 | void vmw_3d_resource_dec(struct vmw_private *dev_priv, |
||
383 | bool hide_svga) |
||
384 | { |
||
385 | int32_t n3d; |
||
386 | |||
387 | mutex_lock(&dev_priv->release_mutex); |
||
388 | if (unlikely(--dev_priv->num_3d_resources == 0)) |
||
389 | vmw_release_device(dev_priv); |
||
390 | else if (hide_svga) { |
||
391 | mutex_lock(&dev_priv->hw_mutex); |
||
392 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
||
393 | vmw_read(dev_priv, SVGA_REG_ENABLE) | |
||
394 | SVGA_REG_ENABLE_HIDE); |
||
395 | mutex_unlock(&dev_priv->hw_mutex); |
||
396 | } |
||
397 | |||
398 | n3d = (int32_t) dev_priv->num_3d_resources; |
||
399 | mutex_unlock(&dev_priv->release_mutex); |
||
400 | |||
401 | BUG_ON(n3d < 0); |
||
402 | } |
||
403 | |||
404 | /** |
||
405 | * Sets the initial_[width|height] fields on the given vmw_private. |
||
406 | * |
||
407 | * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then |
||
408 | * clamping the value to fb_max_[width|height] fields and the |
||
409 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
||
410 | * If the values appear to be invalid, set them to |
||
411 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
||
412 | */ |
||
413 | static void vmw_get_initial_size(struct vmw_private *dev_priv) |
||
414 | { |
||
415 | uint32_t width; |
||
416 | uint32_t height; |
||
417 | |||
418 | width = vmw_read(dev_priv, SVGA_REG_WIDTH); |
||
419 | height = vmw_read(dev_priv, SVGA_REG_HEIGHT); |
||
420 | |||
421 | width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); |
||
422 | height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); |
||
423 | |||
424 | if (width > dev_priv->fb_max_width || |
||
425 | height > dev_priv->fb_max_height) { |
||
426 | |||
427 | /* |
||
428 | * This is a host error and shouldn't occur. |
||
429 | */ |
||
430 | |||
431 | width = VMW_MIN_INITIAL_WIDTH; |
||
432 | height = VMW_MIN_INITIAL_HEIGHT; |
||
433 | } |
||
434 | |||
435 | dev_priv->initial_width = width; |
||
436 | dev_priv->initial_height = height; |
||
437 | } |
||
438 | |||
439 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
||
440 | { |
||
441 | struct vmw_private *dev_priv; |
||
442 | int ret; |
||
443 | uint32_t svga_id; |
||
444 | enum vmw_res_type i; |
||
445 | |||
446 | ENTER(); |
||
447 | |||
448 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
||
449 | if (unlikely(dev_priv == NULL)) { |
||
450 | DRM_ERROR("Failed allocating a device private struct.\n"); |
||
451 | return -ENOMEM; |
||
452 | } |
||
453 | |||
454 | pci_set_master(dev->pdev); |
||
455 | |||
456 | dev_priv->dev = dev; |
||
457 | dev_priv->vmw_chipset = chipset; |
||
458 | dev_priv->last_read_seqno = (uint32_t) -100; |
||
459 | mutex_init(&dev_priv->hw_mutex); |
||
460 | mutex_init(&dev_priv->cmdbuf_mutex); |
||
461 | mutex_init(&dev_priv->release_mutex); |
||
462 | rwlock_init(&dev_priv->resource_lock); |
||
463 | |||
464 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
||
465 | idr_init(&dev_priv->res_idr[i]); |
||
466 | INIT_LIST_HEAD(&dev_priv->res_lru[i]); |
||
467 | } |
||
468 | |||
469 | mutex_init(&dev_priv->init_mutex); |
||
470 | init_waitqueue_head(&dev_priv->fence_queue); |
||
471 | init_waitqueue_head(&dev_priv->fifo_queue); |
||
472 | dev_priv->fence_queue_waiters = 0; |
||
473 | atomic_set(&dev_priv->fifo_queue_waiters, 0); |
||
474 | |||
475 | dev_priv->used_memory_size = 0; |
||
476 | |||
477 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); |
||
478 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
||
479 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
||
480 | |||
481 | printk("io: %x vram: %x mmio: %x\n",dev_priv->io_start, |
||
482 | dev_priv->vram_start,dev_priv->mmio_start); |
||
483 | |||
484 | dev_priv->enable_fb = enable_fbdev; |
||
485 | |||
486 | mutex_lock(&dev_priv->hw_mutex); |
||
487 | |||
488 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
||
489 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); |
||
490 | if (svga_id != SVGA_ID_2) { |
||
491 | ret = -ENOSYS; |
||
492 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); |
||
493 | mutex_unlock(&dev_priv->hw_mutex); |
||
494 | goto out_err0; |
||
495 | } |
||
496 | |||
497 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
||
498 | |||
499 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); |
||
500 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); |
||
501 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); |
||
502 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); |
||
503 | |||
504 | vmw_get_initial_size(dev_priv); |
||
505 | |||
506 | if (dev_priv->capabilities & SVGA_CAP_GMR) { |
||
507 | dev_priv->max_gmr_descriptors = |
||
508 | vmw_read(dev_priv, |
||
509 | SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); |
||
510 | dev_priv->max_gmr_ids = |
||
511 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
||
512 | } |
||
513 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
||
514 | dev_priv->max_gmr_pages = |
||
515 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
||
516 | dev_priv->memory_size = |
||
517 | vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); |
||
518 | dev_priv->memory_size -= dev_priv->vram_size; |
||
519 | } else { |
||
520 | /* |
||
521 | * An arbitrary limit of 512MiB on surface |
||
522 | * memory. But all HWV8 hardware supports GMR2. |
||
523 | */ |
||
524 | dev_priv->memory_size = 512*1024*1024; |
||
525 | } |
||
526 | |||
527 | mutex_unlock(&dev_priv->hw_mutex); |
||
528 | |||
529 | vmw_print_capabilities(dev_priv->capabilities); |
||
530 | |||
531 | if (dev_priv->capabilities & SVGA_CAP_GMR) { |
||
532 | DRM_INFO("Max GMR ids is %u\n", |
||
533 | (unsigned)dev_priv->max_gmr_ids); |
||
534 | DRM_INFO("Max GMR descriptors is %u\n", |
||
535 | (unsigned)dev_priv->max_gmr_descriptors); |
||
536 | } |
||
537 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
||
538 | DRM_INFO("Max number of GMR pages is %u\n", |
||
539 | (unsigned)dev_priv->max_gmr_pages); |
||
540 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
||
541 | (unsigned)dev_priv->memory_size / 1024); |
||
542 | } |
||
543 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
||
544 | dev_priv->vram_start, dev_priv->vram_size / 1024); |
||
545 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
||
546 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); |
||
547 | |||
548 | ret = vmw_ttm_global_init(dev_priv); |
||
549 | if (unlikely(ret != 0)) |
||
550 | goto out_err0; |
||
551 | |||
552 | |||
553 | |||
554 | |||
555 | ret = ttm_bo_device_init(&dev_priv->bdev, |
||
556 | dev_priv->bo_global_ref.ref.object, |
||
557 | &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, |
||
558 | false); |
||
559 | if (unlikely(ret != 0)) { |
||
560 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); |
||
561 | goto out_err1; |
||
562 | } |
||
563 | |||
564 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, |
||
565 | (dev_priv->vram_size >> PAGE_SHIFT)); |
||
566 | if (unlikely(ret != 0)) { |
||
567 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); |
||
568 | goto out_err2; |
||
569 | } |
||
570 | |||
571 | dev_priv->has_gmr = true; |
||
572 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
||
573 | dev_priv->max_gmr_ids) != 0) { |
||
574 | DRM_INFO("No GMR memory available. " |
||
575 | "Graphics memory resources are very limited.\n"); |
||
576 | dev_priv->has_gmr = false; |
||
577 | } |
||
578 | |||
579 | dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, |
||
580 | dev_priv->mmio_size); |
||
581 | |||
582 | if (unlikely(dev_priv->mmio_virt == NULL)) { |
||
583 | ret = -ENOMEM; |
||
584 | DRM_ERROR("Failed mapping MMIO.\n"); |
||
585 | goto out_err3; |
||
586 | } |
||
587 | |||
588 | /* Need mmio memory to check for fifo pitchlock cap. */ |
||
589 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && |
||
590 | !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && |
||
591 | !vmw_fifo_have_pitchlock(dev_priv)) { |
||
592 | ret = -ENOSYS; |
||
593 | DRM_ERROR("Hardware has no pitchlock\n"); |
||
594 | goto out_err4; |
||
595 | } |
||
596 | |||
597 | dev_priv->tdev = ttm_object_device_init |
||
598 | (dev_priv->mem_global_ref.object, 12); |
||
599 | |||
600 | if (unlikely(dev_priv->tdev == NULL)) { |
||
601 | DRM_ERROR("Unable to initialize TTM object management.\n"); |
||
602 | ret = -ENOMEM; |
||
603 | goto out_err4; |
||
604 | } |
||
605 | |||
606 | dev->dev_private = dev_priv; |
||
607 | |||
608 | #if 0 |
||
609 | |||
610 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { |
||
611 | ret = drm_irq_install(dev); |
||
612 | if (ret != 0) { |
||
613 | DRM_ERROR("Failed installing irq: %d\n", ret); |
||
614 | goto out_no_irq; |
||
615 | } |
||
616 | } |
||
617 | |||
618 | dev_priv->fman = vmw_fence_manager_init(dev_priv); |
||
619 | if (unlikely(dev_priv->fman == NULL)) |
||
620 | goto out_no_fman; |
||
621 | |||
622 | vmw_kms_save_vga(dev_priv); |
||
623 | #endif |
||
624 | |||
625 | /* Start kms and overlay systems, needs fifo. */ |
||
626 | ret = vmw_kms_init(dev_priv); |
||
627 | if (unlikely(ret != 0)) |
||
628 | goto out_no_kms; |
||
629 | |||
4080 | Serge | 630 | if (dev_priv->enable_fb) { |
4075 | Serge | 631 | ret = vmw_3d_resource_inc(dev_priv, true); |
632 | if (unlikely(ret != 0)) |
||
633 | goto out_no_fifo; |
||
634 | // vmw_fb_init(dev_priv); |
||
4080 | Serge | 635 | } |
4075 | Serge | 636 | |
4080 | Serge | 637 | LEAVE(); |
4075 | Serge | 638 | return 0; |
639 | |||
640 | out_no_fifo: |
||
641 | // vmw_overlay_close(dev_priv); |
||
642 | // vmw_kms_close(dev_priv); |
||
643 | out_no_kms: |
||
644 | // vmw_kms_restore_vga(dev_priv); |
||
645 | // vmw_fence_manager_takedown(dev_priv->fman); |
||
646 | out_no_fman: |
||
647 | // if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
||
648 | // drm_irq_uninstall(dev_priv->dev); |
||
649 | out_no_irq: |
||
650 | // if (dev_priv->stealth) |
||
651 | // pci_release_region(dev->pdev, 2); |
||
652 | // else |
||
653 | // pci_release_regions(dev->pdev); |
||
654 | out_no_device: |
||
655 | // ttm_object_device_release(&dev_priv->tdev); |
||
656 | out_err4: |
||
657 | // iounmap(dev_priv->mmio_virt); |
||
658 | out_err3: |
||
659 | // arch_phys_wc_del(dev_priv->mmio_mtrr); |
||
660 | // if (dev_priv->has_gmr) |
||
661 | // (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
||
662 | // (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
||
663 | out_err2: |
||
664 | // (void)ttm_bo_device_release(&dev_priv->bdev); |
||
665 | out_err1: |
||
666 | // vmw_ttm_global_release(dev_priv); |
||
667 | out_err0: |
||
668 | // for (i = vmw_res_context; i < vmw_res_max; ++i) |
||
669 | // idr_destroy(&dev_priv->res_idr[i]); |
||
670 | |||
671 | kfree(dev_priv); |
||
672 | return ret; |
||
673 | } |
||
674 | |||
675 | #if 0 |
||
676 | static int vmw_driver_unload(struct drm_device *dev) |
||
677 | { |
||
678 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
679 | enum vmw_res_type i; |
||
680 | |||
681 | unregister_pm_notifier(&dev_priv->pm_nb); |
||
682 | |||
683 | if (dev_priv->ctx.res_ht_initialized) |
||
684 | drm_ht_remove(&dev_priv->ctx.res_ht); |
||
685 | if (dev_priv->ctx.cmd_bounce) |
||
686 | vfree(dev_priv->ctx.cmd_bounce); |
||
687 | if (dev_priv->enable_fb) { |
||
688 | vmw_fb_close(dev_priv); |
||
689 | vmw_kms_restore_vga(dev_priv); |
||
690 | vmw_3d_resource_dec(dev_priv, false); |
||
691 | } |
||
692 | vmw_kms_close(dev_priv); |
||
693 | vmw_overlay_close(dev_priv); |
||
694 | vmw_fence_manager_takedown(dev_priv->fman); |
||
695 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
||
696 | drm_irq_uninstall(dev_priv->dev); |
||
697 | if (dev_priv->stealth) |
||
698 | pci_release_region(dev->pdev, 2); |
||
699 | else |
||
700 | pci_release_regions(dev->pdev); |
||
701 | |||
702 | ttm_object_device_release(&dev_priv->tdev); |
||
703 | iounmap(dev_priv->mmio_virt); |
||
704 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
||
705 | if (dev_priv->has_gmr) |
||
706 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
||
707 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
||
708 | (void)ttm_bo_device_release(&dev_priv->bdev); |
||
709 | vmw_ttm_global_release(dev_priv); |
||
710 | |||
711 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
||
712 | idr_destroy(&dev_priv->res_idr[i]); |
||
713 | |||
714 | kfree(dev_priv); |
||
715 | |||
716 | return 0; |
||
717 | } |
||
718 | |||
719 | static void vmw_preclose(struct drm_device *dev, |
||
720 | struct drm_file *file_priv) |
||
721 | { |
||
722 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
||
723 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
724 | |||
725 | vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events); |
||
726 | } |
||
727 | |||
728 | static void vmw_postclose(struct drm_device *dev, |
||
729 | struct drm_file *file_priv) |
||
730 | { |
||
731 | struct vmw_fpriv *vmw_fp; |
||
732 | |||
733 | vmw_fp = vmw_fpriv(file_priv); |
||
734 | ttm_object_file_release(&vmw_fp->tfile); |
||
735 | if (vmw_fp->locked_master) |
||
736 | drm_master_put(&vmw_fp->locked_master); |
||
737 | kfree(vmw_fp); |
||
738 | } |
||
739 | #endif |
||
740 | |||
741 | static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) |
||
742 | { |
||
743 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
744 | struct vmw_fpriv *vmw_fp; |
||
745 | int ret = -ENOMEM; |
||
746 | |||
747 | vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); |
||
748 | if (unlikely(vmw_fp == NULL)) |
||
749 | return ret; |
||
750 | |||
751 | INIT_LIST_HEAD(&vmw_fp->fence_events); |
||
752 | // vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); |
||
753 | // if (unlikely(vmw_fp->tfile == NULL)) |
||
754 | // goto out_no_tfile; |
||
755 | |||
756 | file_priv->driver_priv = vmw_fp; |
||
757 | // dev_priv->bdev.dev_mapping = dev->dev_mapping; |
||
758 | |||
759 | return 0; |
||
760 | |||
761 | out_no_tfile: |
||
762 | kfree(vmw_fp); |
||
763 | return ret; |
||
764 | } |
||
765 | |||
766 | #if 0 |
||
767 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, |
||
768 | unsigned long arg) |
||
769 | { |
||
770 | struct drm_file *file_priv = filp->private_data; |
||
771 | struct drm_device *dev = file_priv->minor->dev; |
||
772 | unsigned int nr = DRM_IOCTL_NR(cmd); |
||
773 | |||
774 | /* |
||
775 | * Do extra checking on driver private ioctls. |
||
776 | */ |
||
777 | |||
778 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) |
||
779 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { |
||
780 | struct drm_ioctl_desc *ioctl = |
||
781 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
||
782 | |||
783 | if (unlikely(ioctl->cmd_drv != cmd)) { |
||
784 | DRM_ERROR("Invalid command format, ioctl %d\n", |
||
785 | nr - DRM_COMMAND_BASE); |
||
786 | return -EINVAL; |
||
787 | } |
||
788 | } |
||
789 | |||
790 | return drm_ioctl(filp, cmd, arg); |
||
791 | } |
||
792 | |||
793 | static int vmw_firstopen(struct drm_device *dev) |
||
794 | { |
||
795 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
796 | dev_priv->is_opened = true; |
||
797 | |||
798 | return 0; |
||
799 | } |
||
800 | |||
801 | static void vmw_lastclose(struct drm_device *dev) |
||
802 | { |
||
803 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
804 | struct drm_crtc *crtc; |
||
805 | struct drm_mode_set set; |
||
806 | int ret; |
||
807 | |||
808 | /** |
||
809 | * Do nothing on the lastclose call from drm_unload. |
||
810 | */ |
||
811 | |||
812 | if (!dev_priv->is_opened) |
||
813 | return; |
||
814 | |||
815 | dev_priv->is_opened = false; |
||
816 | set.x = 0; |
||
817 | set.y = 0; |
||
818 | set.fb = NULL; |
||
819 | set.mode = NULL; |
||
820 | set.connectors = NULL; |
||
821 | set.num_connectors = 0; |
||
822 | |||
823 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
||
824 | set.crtc = crtc; |
||
825 | ret = drm_mode_set_config_internal(&set); |
||
826 | WARN_ON(ret != 0); |
||
827 | } |
||
828 | |||
829 | } |
||
830 | |||
831 | static void vmw_master_init(struct vmw_master *vmaster) |
||
832 | { |
||
833 | ttm_lock_init(&vmaster->lock); |
||
834 | INIT_LIST_HEAD(&vmaster->fb_surf); |
||
835 | mutex_init(&vmaster->fb_surf_mutex); |
||
836 | } |
||
837 | |||
838 | static int vmw_master_create(struct drm_device *dev, |
||
839 | struct drm_master *master) |
||
840 | { |
||
841 | struct vmw_master *vmaster; |
||
842 | |||
843 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); |
||
844 | if (unlikely(vmaster == NULL)) |
||
845 | return -ENOMEM; |
||
846 | |||
847 | vmw_master_init(vmaster); |
||
848 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
||
849 | master->driver_priv = vmaster; |
||
850 | |||
851 | return 0; |
||
852 | } |
||
853 | |||
854 | static void vmw_master_destroy(struct drm_device *dev, |
||
855 | struct drm_master *master) |
||
856 | { |
||
857 | struct vmw_master *vmaster = vmw_master(master); |
||
858 | |||
859 | master->driver_priv = NULL; |
||
860 | kfree(vmaster); |
||
861 | } |
||
862 | |||
863 | |||
864 | static int vmw_master_set(struct drm_device *dev, |
||
865 | struct drm_file *file_priv, |
||
866 | bool from_open) |
||
867 | { |
||
868 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
869 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
||
870 | struct vmw_master *active = dev_priv->active_master; |
||
871 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
||
872 | int ret = 0; |
||
873 | |||
874 | if (!dev_priv->enable_fb) { |
||
875 | ret = vmw_3d_resource_inc(dev_priv, true); |
||
876 | if (unlikely(ret != 0)) |
||
877 | return ret; |
||
878 | vmw_kms_save_vga(dev_priv); |
||
879 | mutex_lock(&dev_priv->hw_mutex); |
||
880 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); |
||
881 | mutex_unlock(&dev_priv->hw_mutex); |
||
882 | } |
||
883 | |||
884 | if (active) { |
||
885 | BUG_ON(active != &dev_priv->fbdev_master); |
||
886 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
||
887 | if (unlikely(ret != 0)) |
||
888 | goto out_no_active_lock; |
||
889 | |||
890 | ttm_lock_set_kill(&active->lock, true, SIGTERM); |
||
891 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); |
||
892 | if (unlikely(ret != 0)) { |
||
893 | DRM_ERROR("Unable to clean VRAM on " |
||
894 | "master drop.\n"); |
||
895 | } |
||
896 | |||
897 | dev_priv->active_master = NULL; |
||
898 | } |
||
899 | |||
900 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); |
||
901 | if (!from_open) { |
||
902 | ttm_vt_unlock(&vmaster->lock); |
||
903 | BUG_ON(vmw_fp->locked_master != file_priv->master); |
||
904 | drm_master_put(&vmw_fp->locked_master); |
||
905 | } |
||
906 | |||
907 | dev_priv->active_master = vmaster; |
||
908 | |||
909 | return 0; |
||
910 | |||
911 | out_no_active_lock: |
||
912 | if (!dev_priv->enable_fb) { |
||
913 | vmw_kms_restore_vga(dev_priv); |
||
914 | vmw_3d_resource_dec(dev_priv, true); |
||
915 | mutex_lock(&dev_priv->hw_mutex); |
||
916 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
||
917 | mutex_unlock(&dev_priv->hw_mutex); |
||
918 | } |
||
919 | return ret; |
||
920 | } |
||
921 | |||
922 | static void vmw_master_drop(struct drm_device *dev, |
||
923 | struct drm_file *file_priv, |
||
924 | bool from_release) |
||
925 | { |
||
926 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
927 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
||
928 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
||
929 | int ret; |
||
930 | |||
931 | /** |
||
932 | * Make sure the master doesn't disappear while we have |
||
933 | * it locked. |
||
934 | */ |
||
935 | |||
936 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
||
937 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
||
938 | vmw_execbuf_release_pinned_bo(dev_priv); |
||
939 | |||
940 | if (unlikely((ret != 0))) { |
||
941 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
||
942 | drm_master_put(&vmw_fp->locked_master); |
||
943 | } |
||
944 | |||
945 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
||
946 | |||
947 | if (!dev_priv->enable_fb) { |
||
948 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); |
||
949 | if (unlikely(ret != 0)) |
||
950 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); |
||
951 | vmw_kms_restore_vga(dev_priv); |
||
952 | vmw_3d_resource_dec(dev_priv, true); |
||
953 | mutex_lock(&dev_priv->hw_mutex); |
||
954 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
||
955 | mutex_unlock(&dev_priv->hw_mutex); |
||
956 | } |
||
957 | |||
958 | dev_priv->active_master = &dev_priv->fbdev_master; |
||
959 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
||
960 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
||
961 | |||
962 | if (dev_priv->enable_fb) |
||
963 | vmw_fb_on(dev_priv); |
||
964 | } |
||
965 | |||
4080 | Serge | 966 | |
967 | static void vmw_remove(struct pci_dev *pdev) |
||
968 | { |
||
969 | struct drm_device *dev = pci_get_drvdata(pdev); |
||
970 | |||
971 | drm_put_dev(dev); |
||
972 | } |
||
973 | |||
974 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
||
975 | void *ptr) |
||
976 | { |
||
977 | struct vmw_private *dev_priv = |
||
978 | container_of(nb, struct vmw_private, pm_nb); |
||
979 | struct vmw_master *vmaster = dev_priv->active_master; |
||
980 | |||
981 | switch (val) { |
||
982 | case PM_HIBERNATION_PREPARE: |
||
983 | case PM_SUSPEND_PREPARE: |
||
984 | ttm_suspend_lock(&vmaster->lock); |
||
985 | |||
986 | /** |
||
987 | * This empties VRAM and unbinds all GMR bindings. |
||
988 | * Buffer contents is moved to swappable memory. |
||
989 | */ |
||
990 | vmw_execbuf_release_pinned_bo(dev_priv); |
||
991 | vmw_resource_evict_all(dev_priv); |
||
992 | ttm_bo_swapout_all(&dev_priv->bdev); |
||
993 | |||
994 | break; |
||
995 | case PM_POST_HIBERNATION: |
||
996 | case PM_POST_SUSPEND: |
||
997 | case PM_POST_RESTORE: |
||
998 | ttm_suspend_unlock(&vmaster->lock); |
||
999 | |||
1000 | break; |
||
1001 | case PM_RESTORE_PREPARE: |
||
1002 | break; |
||
1003 | default: |
||
1004 | break; |
||
1005 | } |
||
1006 | return 0; |
||
1007 | } |
||
1008 | |||
1009 | /** |
||
1010 | * These might not be needed with the virtual SVGA device. |
||
1011 | */ |
||
1012 | |||
1013 | static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) |
||
1014 | { |
||
1015 | struct drm_device *dev = pci_get_drvdata(pdev); |
||
1016 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
1017 | |||
1018 | if (dev_priv->num_3d_resources != 0) { |
||
1019 | DRM_INFO("Can't suspend or hibernate " |
||
1020 | "while 3D resources are active.\n"); |
||
1021 | return -EBUSY; |
||
1022 | } |
||
1023 | |||
1024 | pci_save_state(pdev); |
||
1025 | pci_disable_device(pdev); |
||
1026 | pci_set_power_state(pdev, PCI_D3hot); |
||
1027 | return 0; |
||
1028 | } |
||
1029 | |||
1030 | static int vmw_pci_resume(struct pci_dev *pdev) |
||
1031 | { |
||
1032 | pci_set_power_state(pdev, PCI_D0); |
||
1033 | pci_restore_state(pdev); |
||
1034 | return pci_enable_device(pdev); |
||
1035 | } |
||
1036 | |||
1037 | static int vmw_pm_suspend(struct device *kdev) |
||
1038 | { |
||
1039 | struct pci_dev *pdev = to_pci_dev(kdev); |
||
1040 | struct pm_message dummy; |
||
1041 | |||
1042 | dummy.event = 0; |
||
1043 | |||
1044 | return vmw_pci_suspend(pdev, dummy); |
||
1045 | } |
||
1046 | |||
1047 | static int vmw_pm_resume(struct device *kdev) |
||
1048 | { |
||
1049 | struct pci_dev *pdev = to_pci_dev(kdev); |
||
1050 | |||
1051 | return vmw_pci_resume(pdev); |
||
1052 | } |
||
1053 | |||
1054 | static int vmw_pm_prepare(struct device *kdev) |
||
1055 | { |
||
1056 | struct pci_dev *pdev = to_pci_dev(kdev); |
||
1057 | struct drm_device *dev = pci_get_drvdata(pdev); |
||
1058 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
1059 | |||
1060 | /** |
||
1061 | * Release 3d reference held by fbdev and potentially |
||
1062 | * stop fifo. |
||
1063 | */ |
||
1064 | dev_priv->suspended = true; |
||
1065 | if (dev_priv->enable_fb) |
||
1066 | vmw_3d_resource_dec(dev_priv, true); |
||
1067 | |||
1068 | if (dev_priv->num_3d_resources != 0) { |
||
1069 | |||
1070 | DRM_INFO("Can't suspend or hibernate " |
||
1071 | "while 3D resources are active.\n"); |
||
1072 | |||
1073 | if (dev_priv->enable_fb) |
||
1074 | vmw_3d_resource_inc(dev_priv, true); |
||
1075 | dev_priv->suspended = false; |
||
1076 | return -EBUSY; |
||
1077 | } |
||
1078 | |||
1079 | return 0; |
||
1080 | } |
||
1081 | |||
4075 | Serge | 1082 | #endif |
1083 | |||
1084 | |||
1085 | |||
1086 | |||
1087 | static struct drm_driver driver = { |
||
1088 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | |
||
1089 | DRIVER_MODESET, |
||
1090 | // .load = vmw_driver_load, |
||
1091 | // .unload = vmw_driver_unload, |
||
1092 | // .firstopen = vmw_firstopen, |
||
1093 | // .lastclose = vmw_lastclose, |
||
1094 | .irq_preinstall = vmw_irq_preinstall, |
||
1095 | .irq_postinstall = vmw_irq_postinstall, |
||
1096 | // .irq_uninstall = vmw_irq_uninstall, |
||
1097 | .irq_handler = vmw_irq_handler, |
||
1098 | // .get_vblank_counter = vmw_get_vblank_counter, |
||
1099 | // .enable_vblank = vmw_enable_vblank, |
||
1100 | // .disable_vblank = vmw_disable_vblank, |
||
1101 | // .ioctls = vmw_ioctls, |
||
1102 | // .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), |
||
1103 | // .dma_quiescent = NULL, /*vmw_dma_quiescent, */ |
||
1104 | // .master_create = vmw_master_create, |
||
1105 | // .master_destroy = vmw_master_destroy, |
||
1106 | // .master_set = vmw_master_set, |
||
1107 | // .master_drop = vmw_master_drop, |
||
1108 | .open = vmw_driver_open, |
||
1109 | // .preclose = vmw_preclose, |
||
1110 | // .postclose = vmw_postclose, |
||
1111 | |||
1112 | // .dumb_create = vmw_dumb_create, |
||
1113 | // .dumb_map_offset = vmw_dumb_map_offset, |
||
1114 | // .dumb_destroy = vmw_dumb_destroy, |
||
1115 | |||
1116 | // .fops = &vmwgfx_driver_fops, |
||
1117 | // .name = VMWGFX_DRIVER_NAME, |
||
1118 | // .desc = VMWGFX_DRIVER_DESC, |
||
1119 | // .date = VMWGFX_DRIVER_DATE, |
||
1120 | // .major = VMWGFX_DRIVER_MAJOR, |
||
1121 | // .minor = VMWGFX_DRIVER_MINOR, |
||
1122 | // .patchlevel = VMWGFX_DRIVER_PATCHLEVEL |
||
1123 | }; |
||
1124 | |||
4080 | Serge | 1125 | |
1126 | int vmw_init(void) |
||
1127 | { |
||
1128 | static pci_dev_t device; |
||
1129 | const struct pci_device_id *ent; |
||
1130 | int err; |
||
1131 | |||
1132 | ENTER(); |
||
1133 | |||
1134 | ent = find_pci_device(&device, vmw_pci_id_list); |
||
1135 | if( unlikely(ent == NULL) ) |
||
1136 | { |
||
1137 | dbgprintf("device not found\n"); |
||
1138 | return -ENODEV; |
||
1139 | }; |
||
1140 | |||
1141 | DRM_INFO("device %x:%x\n", device.pci_dev.vendor, |
||
1142 | device.pci_dev.device); |
||
1143 | drm_global_init(); |
||
1144 | |||
1145 | err = drm_get_dev(&device.pci_dev, ent); |
||
1146 | LEAVE(); |
||
1147 | |||
1148 | return err; |
||
1149 | } |
||
1150 | |||
1151 | |||
1152 | |||
1153 | //module_init(vmwgfx_init); |
||
1154 | //module_exit(vmwgfx_exit); |
||
1155 | |||
1156 | MODULE_AUTHOR("VMware Inc. and others"); |
||
1157 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); |
||
1158 | MODULE_LICENSE("GPL and additional rights"); |
||
1159 | |||
4075 | Serge | 1160 | int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent) |
1161 | { |
||
1162 | static struct drm_device drm_dev; |
||
1163 | static struct drm_file drm_file; |
||
1164 | |||
1165 | struct drm_device *dev; |
||
1166 | struct drm_file *priv; |
||
1167 | |||
1168 | int ret; |
||
1169 | |||
1170 | dev = &drm_dev; |
||
1171 | priv = &drm_file; |
||
1172 | |||
1173 | drm_file_handlers[0] = priv; |
||
1174 | |||
1175 | // ret = pci_enable_device(pdev); |
||
1176 | // if (ret) |
||
1177 | // goto err_g1; |
||
1178 | |||
1179 | pci_set_master(pdev); |
||
1180 | |||
1181 | // if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) { |
||
1182 | // printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); |
||
1183 | // goto err_g2; |
||
1184 | // } |
||
1185 | |||
1186 | dev->pdev = pdev; |
||
1187 | dev->pci_device = pdev->device; |
||
1188 | dev->pci_vendor = pdev->vendor; |
||
1189 | |||
1190 | INIT_LIST_HEAD(&dev->filelist); |
||
1191 | INIT_LIST_HEAD(&dev->ctxlist); |
||
1192 | INIT_LIST_HEAD(&dev->vmalist); |
||
1193 | INIT_LIST_HEAD(&dev->maplist); |
||
1194 | |||
1195 | spin_lock_init(&dev->count_lock); |
||
1196 | mutex_init(&dev->struct_mutex); |
||
1197 | mutex_init(&dev->ctxlist_mutex); |
||
1198 | |||
1199 | INIT_LIST_HEAD(&priv->lhead); |
||
1200 | INIT_LIST_HEAD(&priv->fbs); |
||
1201 | INIT_LIST_HEAD(&priv->event_list); |
||
1202 | init_waitqueue_head(&priv->event_wait); |
||
1203 | priv->event_space = 4096; /* set aside 4k for event buffer */ |
||
1204 | |||
1205 | idr_init(&priv->object_idr); |
||
1206 | spin_lock_init(&priv->table_lock); |
||
1207 | |||
1208 | dev->driver = &driver; |
||
1209 | |||
1210 | if (dev->driver->open) { |
||
1211 | ret = dev->driver->open(dev, priv); |
||
1212 | if (ret < 0) |
||
1213 | goto err_g4; |
||
1214 | } |
||
1215 | |||
1216 | ret = vmw_driver_load(dev, ent->driver_data ); |
||
1217 | |||
1218 | if (ret) |
||
1219 | goto err_g4; |
||
1220 | |||
4080 | Serge | 1221 | ret = kms_init(dev); |
4075 | Serge | 1222 | |
1223 | if (ret) |
||
1224 | goto err_g4; |
||
1225 | |||
1226 | return 0; |
||
1227 | |||
1228 | err_g4: |
||
1229 | //err_g3: |
||
1230 | // if (drm_core_check_feature(dev, DRIVER_MODESET)) |
||
1231 | // drm_put_minor(&dev->control); |
||
1232 | //err_g2: |
||
1233 | // pci_disable_device(pdev); |
||
1234 | //err_g1: |
||
1235 | |||
1236 | return ret; |
||
1237 | }>>>>>>> |