Rev 4111 | Rev 4570 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4111 | Rev 4569 | ||
---|---|---|---|
1 | /************************************************************************** |
1 | /************************************************************************** |
2 | * |
2 | * |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
4 | * All Rights Reserved. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
12 | * the following conditions: |
13 | * |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
16 | * of the Software. |
17 | * |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
25 | * |
26 | **************************************************************************/ |
26 | **************************************************************************/ |
27 | #include |
27 | #include |
28 | 28 | ||
29 | #include |
29 | #include |
30 | #include "vmwgfx_drv.h" |
30 | #include "vmwgfx_drv.h" |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | //#include |
34 | //#include |
- | 35 | #include |
|
35 | 36 | ||
36 | #define VMWGFX_DRIVER_NAME "vmwgfx" |
37 | #define VMWGFX_DRIVER_NAME "vmwgfx" |
37 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
38 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
38 | #define VMWGFX_CHIP_SVGAII 0 |
39 | #define VMWGFX_CHIP_SVGAII 0 |
39 | #define VMW_FB_RESERVATION 0 |
40 | #define VMW_FB_RESERVATION 0 |
40 | 41 | ||
41 | #define VMW_MIN_INITIAL_WIDTH 800 |
42 | #define VMW_MIN_INITIAL_WIDTH 800 |
42 | #define VMW_MIN_INITIAL_HEIGHT 600 |
43 | #define VMW_MIN_INITIAL_HEIGHT 600 |
43 | 44 | ||
44 | #if 0 |
45 | #if 0 |
45 | /** |
46 | /** |
46 | * Fully encoded drm commands. Might move to vmw_drm.h |
47 | * Fully encoded drm commands. Might move to vmw_drm.h |
47 | */ |
48 | */ |
48 | 49 | ||
49 | #define DRM_IOCTL_VMW_GET_PARAM \ |
50 | #define DRM_IOCTL_VMW_GET_PARAM \ |
50 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ |
51 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ |
51 | struct drm_vmw_getparam_arg) |
52 | struct drm_vmw_getparam_arg) |
52 | #define DRM_IOCTL_VMW_ALLOC_DMABUF \ |
53 | #define DRM_IOCTL_VMW_ALLOC_DMABUF \ |
53 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ |
54 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ |
54 | union drm_vmw_alloc_dmabuf_arg) |
55 | union drm_vmw_alloc_dmabuf_arg) |
55 | #define DRM_IOCTL_VMW_UNREF_DMABUF \ |
56 | #define DRM_IOCTL_VMW_UNREF_DMABUF \ |
56 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ |
57 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ |
57 | struct drm_vmw_unref_dmabuf_arg) |
58 | struct drm_vmw_unref_dmabuf_arg) |
58 | #define DRM_IOCTL_VMW_CURSOR_BYPASS \ |
59 | #define DRM_IOCTL_VMW_CURSOR_BYPASS \ |
59 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ |
60 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ |
60 | struct drm_vmw_cursor_bypass_arg) |
61 | struct drm_vmw_cursor_bypass_arg) |
61 | 62 | ||
62 | #define DRM_IOCTL_VMW_CONTROL_STREAM \ |
63 | #define DRM_IOCTL_VMW_CONTROL_STREAM \ |
63 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ |
64 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ |
64 | struct drm_vmw_control_stream_arg) |
65 | struct drm_vmw_control_stream_arg) |
65 | #define DRM_IOCTL_VMW_CLAIM_STREAM \ |
66 | #define DRM_IOCTL_VMW_CLAIM_STREAM \ |
66 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ |
67 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ |
67 | struct drm_vmw_stream_arg) |
68 | struct drm_vmw_stream_arg) |
68 | #define DRM_IOCTL_VMW_UNREF_STREAM \ |
69 | #define DRM_IOCTL_VMW_UNREF_STREAM \ |
69 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ |
70 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ |
70 | struct drm_vmw_stream_arg) |
71 | struct drm_vmw_stream_arg) |
71 | 72 | ||
72 | #define DRM_IOCTL_VMW_CREATE_CONTEXT \ |
73 | #define DRM_IOCTL_VMW_CREATE_CONTEXT \ |
73 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ |
74 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ |
74 | struct drm_vmw_context_arg) |
75 | struct drm_vmw_context_arg) |
75 | #define DRM_IOCTL_VMW_UNREF_CONTEXT \ |
76 | #define DRM_IOCTL_VMW_UNREF_CONTEXT \ |
76 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ |
77 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ |
77 | struct drm_vmw_context_arg) |
78 | struct drm_vmw_context_arg) |
78 | #define DRM_IOCTL_VMW_CREATE_SURFACE \ |
79 | #define DRM_IOCTL_VMW_CREATE_SURFACE \ |
79 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ |
80 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ |
80 | union drm_vmw_surface_create_arg) |
81 | union drm_vmw_surface_create_arg) |
81 | #define DRM_IOCTL_VMW_UNREF_SURFACE \ |
82 | #define DRM_IOCTL_VMW_UNREF_SURFACE \ |
82 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ |
83 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ |
83 | struct drm_vmw_surface_arg) |
84 | struct drm_vmw_surface_arg) |
84 | #define DRM_IOCTL_VMW_REF_SURFACE \ |
85 | #define DRM_IOCTL_VMW_REF_SURFACE \ |
85 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ |
86 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ |
86 | union drm_vmw_surface_reference_arg) |
87 | union drm_vmw_surface_reference_arg) |
87 | #define DRM_IOCTL_VMW_EXECBUF \ |
88 | #define DRM_IOCTL_VMW_EXECBUF \ |
88 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ |
89 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ |
89 | struct drm_vmw_execbuf_arg) |
90 | struct drm_vmw_execbuf_arg) |
90 | #define DRM_IOCTL_VMW_GET_3D_CAP \ |
91 | #define DRM_IOCTL_VMW_GET_3D_CAP \ |
91 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ |
92 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ |
92 | struct drm_vmw_get_3d_cap_arg) |
93 | struct drm_vmw_get_3d_cap_arg) |
93 | #define DRM_IOCTL_VMW_FENCE_WAIT \ |
94 | #define DRM_IOCTL_VMW_FENCE_WAIT \ |
94 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ |
95 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ |
95 | struct drm_vmw_fence_wait_arg) |
96 | struct drm_vmw_fence_wait_arg) |
96 | #define DRM_IOCTL_VMW_FENCE_SIGNALED \ |
97 | #define DRM_IOCTL_VMW_FENCE_SIGNALED \ |
97 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ |
98 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ |
98 | struct drm_vmw_fence_signaled_arg) |
99 | struct drm_vmw_fence_signaled_arg) |
99 | #define DRM_IOCTL_VMW_FENCE_UNREF \ |
100 | #define DRM_IOCTL_VMW_FENCE_UNREF \ |
100 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ |
101 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ |
101 | struct drm_vmw_fence_arg) |
102 | struct drm_vmw_fence_arg) |
102 | #define DRM_IOCTL_VMW_FENCE_EVENT \ |
103 | #define DRM_IOCTL_VMW_FENCE_EVENT \ |
103 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ |
104 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ |
104 | struct drm_vmw_fence_event_arg) |
105 | struct drm_vmw_fence_event_arg) |
105 | #define DRM_IOCTL_VMW_PRESENT \ |
106 | #define DRM_IOCTL_VMW_PRESENT \ |
106 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ |
107 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ |
107 | struct drm_vmw_present_arg) |
108 | struct drm_vmw_present_arg) |
108 | #define DRM_IOCTL_VMW_PRESENT_READBACK \ |
109 | #define DRM_IOCTL_VMW_PRESENT_READBACK \ |
109 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ |
110 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ |
110 | struct drm_vmw_present_readback_arg) |
111 | struct drm_vmw_present_readback_arg) |
111 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
112 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
113 | struct drm_vmw_update_layout_arg) |
114 | struct drm_vmw_update_layout_arg) |
- | 115 | #define DRM_IOCTL_VMW_CREATE_SHADER \ |
|
- | 116 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ |
|
- | 117 | struct drm_vmw_shader_create_arg) |
|
- | 118 | #define DRM_IOCTL_VMW_UNREF_SHADER \ |
|
- | 119 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ |
|
- | 120 | struct drm_vmw_shader_arg) |
|
- | 121 | #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ |
|
- | 122 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ |
|
- | 123 | union drm_vmw_gb_surface_create_arg) |
|
- | 124 | #define DRM_IOCTL_VMW_GB_SURFACE_REF \ |
|
- | 125 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ |
|
- | 126 | union drm_vmw_gb_surface_reference_arg) |
|
- | 127 | #define DRM_IOCTL_VMW_SYNCCPU \ |
|
- | 128 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ |
|
- | 129 | struct drm_vmw_synccpu_arg) |
|
114 | 130 | ||
115 | /** |
131 | /** |
116 | * The core DRM version of this macro doesn't account for |
132 | * The core DRM version of this macro doesn't account for |
117 | * DRM_COMMAND_BASE. |
133 | * DRM_COMMAND_BASE. |
118 | */ |
134 | */ |
119 | 135 | ||
120 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ |
136 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ |
121 | [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl} |
137 | [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl} |
122 | 138 | ||
123 | /** |
139 | /** |
124 | * Ioctl definitions. |
140 | * Ioctl definitions. |
125 | */ |
141 | */ |
126 | 142 | ||
127 | static const struct drm_ioctl_desc vmw_ioctls[] = { |
143 | static const struct drm_ioctl_desc vmw_ioctls[] = { |
128 | VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, |
144 | VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, |
129 | DRM_AUTH | DRM_UNLOCKED), |
145 | DRM_AUTH | DRM_UNLOCKED), |
130 | VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, |
146 | VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, |
131 | DRM_AUTH | DRM_UNLOCKED), |
147 | DRM_AUTH | DRM_UNLOCKED), |
132 | VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, |
148 | VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, |
133 | DRM_AUTH | DRM_UNLOCKED), |
149 | DRM_AUTH | DRM_UNLOCKED), |
134 | VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, |
150 | VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, |
135 | vmw_kms_cursor_bypass_ioctl, |
151 | vmw_kms_cursor_bypass_ioctl, |
136 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
152 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
137 | 153 | ||
138 | VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, |
154 | VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, |
139 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
155 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
140 | VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, |
156 | VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, |
141 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
157 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
142 | VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, |
158 | VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, |
143 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
159 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
144 | 160 | ||
145 | VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, |
161 | VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, |
146 | DRM_AUTH | DRM_UNLOCKED), |
162 | DRM_AUTH | DRM_UNLOCKED), |
147 | VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, |
163 | VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, |
148 | DRM_AUTH | DRM_UNLOCKED), |
164 | DRM_AUTH | DRM_UNLOCKED), |
149 | VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, |
165 | VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, |
150 | DRM_AUTH | DRM_UNLOCKED), |
166 | DRM_AUTH | DRM_UNLOCKED), |
151 | VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, |
167 | VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, |
152 | DRM_AUTH | DRM_UNLOCKED), |
168 | DRM_AUTH | DRM_UNLOCKED), |
153 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
169 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
154 | DRM_AUTH | DRM_UNLOCKED), |
170 | DRM_AUTH | DRM_UNLOCKED), |
155 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, |
171 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, |
156 | DRM_AUTH | DRM_UNLOCKED), |
172 | DRM_AUTH | DRM_UNLOCKED), |
157 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, |
173 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, |
158 | DRM_AUTH | DRM_UNLOCKED), |
174 | DRM_AUTH | DRM_UNLOCKED), |
159 | VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, |
175 | VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, |
160 | vmw_fence_obj_signaled_ioctl, |
176 | vmw_fence_obj_signaled_ioctl, |
161 | DRM_AUTH | DRM_UNLOCKED), |
177 | DRM_AUTH | DRM_UNLOCKED), |
162 | VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, |
178 | VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, |
163 | DRM_AUTH | DRM_UNLOCKED), |
179 | DRM_AUTH | DRM_UNLOCKED), |
164 | VMW_IOCTL_DEF(VMW_FENCE_EVENT, |
180 | VMW_IOCTL_DEF(VMW_FENCE_EVENT, |
165 | vmw_fence_event_ioctl, |
181 | vmw_fence_event_ioctl, |
166 | DRM_AUTH | DRM_UNLOCKED), |
182 | DRM_AUTH | DRM_UNLOCKED), |
167 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, |
183 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, |
168 | DRM_AUTH | DRM_UNLOCKED), |
184 | DRM_AUTH | DRM_UNLOCKED), |
169 | 185 | ||
170 | /* these allow direct access to the framebuffers mark as master only */ |
186 | /* these allow direct access to the framebuffers mark as master only */ |
171 | VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, |
187 | VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, |
172 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), |
188 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), |
173 | VMW_IOCTL_DEF(VMW_PRESENT_READBACK, |
189 | VMW_IOCTL_DEF(VMW_PRESENT_READBACK, |
174 | vmw_present_readback_ioctl, |
190 | vmw_present_readback_ioctl, |
175 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), |
191 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), |
176 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
192 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
177 | vmw_kms_update_layout_ioctl, |
193 | vmw_kms_update_layout_ioctl, |
178 | DRM_MASTER | DRM_UNLOCKED), |
194 | DRM_MASTER | DRM_UNLOCKED), |
- | 195 | VMW_IOCTL_DEF(VMW_CREATE_SHADER, |
|
- | 196 | vmw_shader_define_ioctl, |
|
- | 197 | DRM_AUTH | DRM_UNLOCKED), |
|
- | 198 | VMW_IOCTL_DEF(VMW_UNREF_SHADER, |
|
- | 199 | vmw_shader_destroy_ioctl, |
|
- | 200 | DRM_AUTH | DRM_UNLOCKED), |
|
- | 201 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, |
|
- | 202 | vmw_gb_surface_define_ioctl, |
|
- | 203 | DRM_AUTH | DRM_UNLOCKED), |
|
- | 204 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, |
|
- | 205 | vmw_gb_surface_reference_ioctl, |
|
- | 206 | DRM_AUTH | DRM_UNLOCKED), |
|
- | 207 | VMW_IOCTL_DEF(VMW_SYNCCPU, |
|
- | 208 | vmw_user_dmabuf_synccpu_ioctl, |
|
- | 209 | DRM_AUTH | DRM_UNLOCKED), |
|
179 | }; |
210 | }; |
180 | #endif |
211 | #endif |
181 | 212 | ||
182 | static struct pci_device_id vmw_pci_id_list[] = { |
213 | static struct pci_device_id vmw_pci_id_list[] = { |
183 | {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, |
214 | {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, |
184 | {0, 0, 0} |
215 | {0, 0, 0} |
185 | }; |
216 | }; |
186 | 217 | ||
187 | static int enable_fbdev = 1; |
218 | static int enable_fbdev = 1; |
- | 219 | static int vmw_force_iommu; |
|
- | 220 | static int vmw_restrict_iommu; |
|
- | 221 | static int vmw_force_coherent; |
|
- | 222 | static int vmw_restrict_dma_mask; |
|
188 | 223 | ||
189 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
224 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
190 | static void vmw_master_init(struct vmw_master *); |
225 | static void vmw_master_init(struct vmw_master *); |
191 | 226 | ||
192 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); |
227 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); |
193 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); |
228 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); |
- | 229 | MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); |
|
- | 230 | module_param_named(force_dma_api, vmw_force_iommu, int, 0600); |
|
- | 231 | MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); |
|
- | 232 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); |
|
- | 233 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); |
|
- | 234 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); |
|
- | 235 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); |
|
- | 236 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); |
|
- | 237 | ||
194 | 238 | ||
195 | static void vmw_print_capabilities(uint32_t capabilities) |
239 | static void vmw_print_capabilities(uint32_t capabilities) |
196 | { |
240 | { |
197 | DRM_INFO("Capabilities:\n"); |
241 | DRM_INFO("Capabilities:\n"); |
198 | if (capabilities & SVGA_CAP_RECT_COPY) |
242 | if (capabilities & SVGA_CAP_RECT_COPY) |
199 | DRM_INFO(" Rect copy.\n"); |
243 | DRM_INFO(" Rect copy.\n"); |
200 | if (capabilities & SVGA_CAP_CURSOR) |
244 | if (capabilities & SVGA_CAP_CURSOR) |
201 | DRM_INFO(" Cursor.\n"); |
245 | DRM_INFO(" Cursor.\n"); |
202 | if (capabilities & SVGA_CAP_CURSOR_BYPASS) |
246 | if (capabilities & SVGA_CAP_CURSOR_BYPASS) |
203 | DRM_INFO(" Cursor bypass.\n"); |
247 | DRM_INFO(" Cursor bypass.\n"); |
204 | if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) |
248 | if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) |
205 | DRM_INFO(" Cursor bypass 2.\n"); |
249 | DRM_INFO(" Cursor bypass 2.\n"); |
206 | if (capabilities & SVGA_CAP_8BIT_EMULATION) |
250 | if (capabilities & SVGA_CAP_8BIT_EMULATION) |
207 | DRM_INFO(" 8bit emulation.\n"); |
251 | DRM_INFO(" 8bit emulation.\n"); |
208 | if (capabilities & SVGA_CAP_ALPHA_CURSOR) |
252 | if (capabilities & SVGA_CAP_ALPHA_CURSOR) |
209 | DRM_INFO(" Alpha cursor.\n"); |
253 | DRM_INFO(" Alpha cursor.\n"); |
210 | if (capabilities & SVGA_CAP_3D) |
254 | if (capabilities & SVGA_CAP_3D) |
211 | DRM_INFO(" 3D.\n"); |
255 | DRM_INFO(" 3D.\n"); |
212 | if (capabilities & SVGA_CAP_EXTENDED_FIFO) |
256 | if (capabilities & SVGA_CAP_EXTENDED_FIFO) |
213 | DRM_INFO(" Extended Fifo.\n"); |
257 | DRM_INFO(" Extended Fifo.\n"); |
214 | if (capabilities & SVGA_CAP_MULTIMON) |
258 | if (capabilities & SVGA_CAP_MULTIMON) |
215 | DRM_INFO(" Multimon.\n"); |
259 | DRM_INFO(" Multimon.\n"); |
216 | if (capabilities & SVGA_CAP_PITCHLOCK) |
260 | if (capabilities & SVGA_CAP_PITCHLOCK) |
217 | DRM_INFO(" Pitchlock.\n"); |
261 | DRM_INFO(" Pitchlock.\n"); |
218 | if (capabilities & SVGA_CAP_IRQMASK) |
262 | if (capabilities & SVGA_CAP_IRQMASK) |
219 | DRM_INFO(" Irq mask.\n"); |
263 | DRM_INFO(" Irq mask.\n"); |
220 | if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) |
264 | if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) |
221 | DRM_INFO(" Display Topology.\n"); |
265 | DRM_INFO(" Display Topology.\n"); |
222 | if (capabilities & SVGA_CAP_GMR) |
266 | if (capabilities & SVGA_CAP_GMR) |
223 | DRM_INFO(" GMR.\n"); |
267 | DRM_INFO(" GMR.\n"); |
224 | if (capabilities & SVGA_CAP_TRACES) |
268 | if (capabilities & SVGA_CAP_TRACES) |
225 | DRM_INFO(" Traces.\n"); |
269 | DRM_INFO(" Traces.\n"); |
226 | if (capabilities & SVGA_CAP_GMR2) |
270 | if (capabilities & SVGA_CAP_GMR2) |
227 | DRM_INFO(" GMR2.\n"); |
271 | DRM_INFO(" GMR2.\n"); |
228 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) |
272 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) |
229 | DRM_INFO(" Screen Object 2.\n"); |
273 | DRM_INFO(" Screen Object 2.\n"); |
- | 274 | if (capabilities & SVGA_CAP_COMMAND_BUFFERS) |
|
- | 275 | DRM_INFO(" Command Buffers.\n"); |
|
- | 276 | if (capabilities & SVGA_CAP_CMD_BUFFERS_2) |
|
- | 277 | DRM_INFO(" Command Buffers 2.\n"); |
|
- | 278 | if (capabilities & SVGA_CAP_GBOBJECTS) |
|
- | 279 | DRM_INFO(" Guest Backed Resources.\n"); |
|
230 | } |
280 | } |
231 | - | ||
232 | 281 | ||
233 | /** |
282 | /** |
234 | * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at |
- | |
235 | * the start of a buffer object. |
283 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
236 | * |
284 | * |
237 | * @dev_priv: The device private structure. |
285 | * @dev_priv: A device private structure. |
238 | * |
286 | * |
- | 287 | * This function creates a small buffer object that holds the query |
|
239 | * This function will idle the buffer using an uninterruptible wait, then |
288 | * result for dummy queries emitted as query barriers. |
240 | * map the first page and initialize a pending occlusion query result structure, |
289 | * The function will then map the first page and initialize a pending |
- | 290 | * occlusion query result structure, Finally it will unmap the buffer. |
|
241 | * Finally it will unmap the buffer. |
291 | * No interruptible waits are done within this function. |
242 | * |
- | |
243 | * TODO: Since we're only mapping a single page, we should optimize the map |
292 | * |
244 | * to use kmap_atomic / iomap_atomic. |
293 | * Returns an error if bo creation or initialization fails. |
245 | */ |
294 | */ |
246 | static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) |
295 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
- | 296 | { |
|
- | 297 | int ret; |
|
247 | { |
298 | struct ttm_buffer_object *bo; |
248 | struct ttm_bo_kmap_obj map; |
299 | struct ttm_bo_kmap_obj map; |
249 | volatile SVGA3dQueryResult *result; |
300 | volatile SVGA3dQueryResult *result; |
250 | bool dummy; |
301 | bool dummy; |
251 | int ret; |
- | |
252 | struct ttm_bo_device *bdev = &dev_priv->bdev; |
- | |
253 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; |
- | |
254 | - | ||
255 | ttm_bo_reserve(bo, false, false, false, 0); |
- | |
256 | spin_lock(&bdev->fence_lock); |
- | |
257 | ret = 0; //ttm_bo_wait(bo, false, false, false); |
- | |
258 | spin_unlock(&bdev->fence_lock); |
- | |
259 | if (unlikely(ret != 0)) |
- | |
260 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, |
- | |
261 | 10*HZ); |
302 | |
- | 303 | /* |
|
- | 304 | * Create the bo as pinned, so that a tryreserve will |
|
- | 305 | * immediately succeed. This is because we're the only |
|
- | 306 | * user of the bo currently. |
|
- | 307 | */ |
|
- | 308 | ret = ttm_bo_create(&dev_priv->bdev, |
|
- | 309 | PAGE_SIZE, |
|
- | 310 | ttm_bo_type_device, |
|
- | 311 | &vmw_sys_ne_placement, |
|
- | 312 | 0, false, NULL, |
|
- | 313 | &bo); |
|
- | 314 | ||
- | 315 | if (unlikely(ret != 0)) |
|
- | 316 | return ret; |
|
- | 317 | ||
- | 318 | ret = ttm_bo_reserve(bo, false, true, false, 0); |
|
- | 319 | BUG_ON(ret != 0); |
|
262 | /* |
320 | |
263 | ret = ttm_bo_kmap(bo, 0, 1, &map); |
321 | ret = ttm_bo_kmap(bo, 0, 1, &map); |
264 | if (likely(ret == 0)) { |
322 | if (likely(ret == 0)) { |
265 | result = ttm_kmap_obj_virtual(&map, &dummy); |
323 | result = ttm_kmap_obj_virtual(&map, &dummy); |
266 | result->totalSize = sizeof(*result); |
324 | result->totalSize = sizeof(*result); |
267 | result->state = SVGA3D_QUERYSTATE_PENDING; |
325 | result->state = SVGA3D_QUERYSTATE_PENDING; |
268 | result->result32 = 0xff; |
326 | result->result32 = 0xff; |
269 | ttm_bo_kunmap(&map); |
327 | ttm_bo_kunmap(&map); |
270 | } else |
- | |
271 | DRM_ERROR("Dummy query buffer map failed.\n"); |
- | |
272 | */ |
- | |
273 | ttm_bo_unreserve(bo); |
- | |
274 | } |
328 | } |
- | 329 | vmw_bo_pin(bo, false); |
|
- | 330 | ttm_bo_unreserve(bo); |
|
- | 331 | ||
- | 332 | if (unlikely(ret != 0)) { |
|
- | 333 | DRM_ERROR("Dummy query buffer map failed.\n"); |
|
- | 334 | ttm_bo_unref(&bo); |
|
- | 335 | } else |
|
275 | - | ||
276 | - | ||
277 | /** |
- | |
278 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
- | |
279 | * |
- | |
280 | * @dev_priv: A device private structure. |
- | |
281 | * |
- | |
282 | * This function creates a small buffer object that holds the query |
- | |
283 | * result for dummy queries emitted as query barriers. |
- | |
284 | * No interruptible waits are done within this function. |
- | |
285 | * |
- | |
286 | * Returns an error if bo creation fails. |
- | |
287 | */ |
- | |
288 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
- | |
289 | { |
336 | dev_priv->dummy_query_bo = bo; |
290 | return ttm_bo_create(&dev_priv->bdev, |
- | |
291 | PAGE_SIZE, |
- | |
292 | ttm_bo_type_device, |
- | |
293 | &vmw_vram_sys_placement, |
- | |
294 | 0, false, NULL, |
337 | |
295 | &dev_priv->dummy_query_bo); |
- | |
296 | } |
338 | return ret; |
297 | 339 | } |
|
298 | 340 | ||
299 | static int vmw_request_device(struct vmw_private *dev_priv) |
341 | static int vmw_request_device(struct vmw_private *dev_priv) |
300 | { |
342 | { |
301 | int ret; |
343 | int ret; |
302 | ENTER(); |
344 | ENTER(); |
303 | 345 | ||
304 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
346 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); |
305 | if (unlikely(ret != 0)) { |
347 | if (unlikely(ret != 0)) { |
306 | DRM_ERROR("Unable to initialize FIFO.\n"); |
348 | DRM_ERROR("Unable to initialize FIFO.\n"); |
307 | return ret; |
349 | return ret; |
308 | } |
350 | } |
309 | // vmw_fence_fifo_up(dev_priv->fman); |
351 | // vmw_fence_fifo_up(dev_priv->fman); |
310 | // ret = vmw_dummy_query_bo_create(dev_priv); |
352 | // ret = vmw_dummy_query_bo_create(dev_priv); |
311 | // if (unlikely(ret != 0)) |
353 | // if (unlikely(ret != 0)) |
312 | // goto out_no_query_bo; |
354 | // goto out_no_query_bo; |
313 | // vmw_dummy_query_bo_prepare(dev_priv); |
355 | // vmw_dummy_query_bo_prepare(dev_priv); |
314 | 356 | ||
315 | LEAVE(); |
357 | LEAVE(); |
316 | 358 | ||
317 | return 0; |
359 | return 0; |
318 | 360 | ||
319 | out_no_query_bo: |
361 | out_no_query_bo: |
320 | vmw_fence_fifo_down(dev_priv->fman); |
362 | vmw_fence_fifo_down(dev_priv->fman); |
321 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
363 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
322 | return ret; |
364 | return ret; |
323 | } |
365 | } |
324 | 366 | ||
325 | static void vmw_release_device(struct vmw_private *dev_priv) |
367 | static void vmw_release_device(struct vmw_private *dev_priv) |
326 | { |
368 | { |
327 | /* |
369 | /* |
328 | * Previous destructions should've released |
370 | * Previous destructions should've released |
329 | * the pinned bo. |
371 | * the pinned bo. |
330 | */ |
372 | */ |
331 | 373 | ||
332 | BUG_ON(dev_priv->pinned_bo != NULL); |
374 | BUG_ON(dev_priv->pinned_bo != NULL); |
333 | 375 | ||
334 | ttm_bo_unref(&dev_priv->dummy_query_bo); |
376 | ttm_bo_unref(&dev_priv->dummy_query_bo); |
335 | vmw_fence_fifo_down(dev_priv->fman); |
377 | vmw_fence_fifo_down(dev_priv->fman); |
336 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
378 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
337 | } |
379 | } |
- | 380 | ||
338 | 381 | ||
339 | /** |
382 | /** |
340 | * Increase the 3d resource refcount. |
383 | * Increase the 3d resource refcount. |
341 | * If the count was prevously zero, initialize the fifo, switching to svga |
384 | * If the count was prevously zero, initialize the fifo, switching to svga |
342 | * mode. Note that the master holds a ref as well, and may request an |
385 | * mode. Note that the master holds a ref as well, and may request an |
343 | * explicit switch to svga mode if fb is not running, using @unhide_svga. |
386 | * explicit switch to svga mode if fb is not running, using @unhide_svga. |
344 | */ |
387 | */ |
345 | int vmw_3d_resource_inc(struct vmw_private *dev_priv, |
388 | int vmw_3d_resource_inc(struct vmw_private *dev_priv, |
346 | bool unhide_svga) |
389 | bool unhide_svga) |
347 | { |
390 | { |
348 | int ret = 0; |
391 | int ret = 0; |
349 | 392 | ||
350 | ENTER(); |
393 | ENTER(); |
351 | 394 | ||
352 | mutex_lock(&dev_priv->release_mutex); |
395 | mutex_lock(&dev_priv->release_mutex); |
353 | if (unlikely(dev_priv->num_3d_resources++ == 0)) { |
396 | if (unlikely(dev_priv->num_3d_resources++ == 0)) { |
354 | ret = vmw_request_device(dev_priv); |
397 | ret = vmw_request_device(dev_priv); |
355 | if (unlikely(ret != 0)) |
398 | if (unlikely(ret != 0)) |
356 | --dev_priv->num_3d_resources; |
399 | --dev_priv->num_3d_resources; |
357 | } else if (unhide_svga) { |
400 | } else if (unhide_svga) { |
358 | mutex_lock(&dev_priv->hw_mutex); |
401 | mutex_lock(&dev_priv->hw_mutex); |
359 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
402 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
360 | vmw_read(dev_priv, SVGA_REG_ENABLE) & |
403 | vmw_read(dev_priv, SVGA_REG_ENABLE) & |
361 | ~SVGA_REG_ENABLE_HIDE); |
404 | ~SVGA_REG_ENABLE_HIDE); |
362 | mutex_unlock(&dev_priv->hw_mutex); |
405 | mutex_unlock(&dev_priv->hw_mutex); |
363 | } |
406 | } |
364 | 407 | ||
365 | mutex_unlock(&dev_priv->release_mutex); |
408 | mutex_unlock(&dev_priv->release_mutex); |
366 | LEAVE(); |
409 | LEAVE(); |
367 | return ret; |
410 | return ret; |
368 | } |
411 | } |
369 | 412 | ||
370 | /** |
413 | /** |
371 | * Decrease the 3d resource refcount. |
414 | * Decrease the 3d resource refcount. |
372 | * If the count reaches zero, disable the fifo, switching to vga mode. |
415 | * If the count reaches zero, disable the fifo, switching to vga mode. |
373 | * Note that the master holds a refcount as well, and may request an |
416 | * Note that the master holds a refcount as well, and may request an |
374 | * explicit switch to vga mode when it releases its refcount to account |
417 | * explicit switch to vga mode when it releases its refcount to account |
375 | * for the situation of an X server vt switch to VGA with 3d resources |
418 | * for the situation of an X server vt switch to VGA with 3d resources |
376 | * active. |
419 | * active. |
377 | */ |
420 | */ |
378 | void vmw_3d_resource_dec(struct vmw_private *dev_priv, |
421 | void vmw_3d_resource_dec(struct vmw_private *dev_priv, |
379 | bool hide_svga) |
422 | bool hide_svga) |
380 | { |
423 | { |
381 | int32_t n3d; |
424 | int32_t n3d; |
382 | 425 | ||
383 | mutex_lock(&dev_priv->release_mutex); |
426 | mutex_lock(&dev_priv->release_mutex); |
384 | if (unlikely(--dev_priv->num_3d_resources == 0)) |
427 | if (unlikely(--dev_priv->num_3d_resources == 0)) |
385 | vmw_release_device(dev_priv); |
428 | vmw_release_device(dev_priv); |
386 | else if (hide_svga) { |
429 | else if (hide_svga) { |
387 | mutex_lock(&dev_priv->hw_mutex); |
430 | mutex_lock(&dev_priv->hw_mutex); |
388 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
431 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
389 | vmw_read(dev_priv, SVGA_REG_ENABLE) | |
432 | vmw_read(dev_priv, SVGA_REG_ENABLE) | |
390 | SVGA_REG_ENABLE_HIDE); |
433 | SVGA_REG_ENABLE_HIDE); |
391 | mutex_unlock(&dev_priv->hw_mutex); |
434 | mutex_unlock(&dev_priv->hw_mutex); |
392 | } |
435 | } |
393 | 436 | ||
394 | n3d = (int32_t) dev_priv->num_3d_resources; |
437 | n3d = (int32_t) dev_priv->num_3d_resources; |
395 | mutex_unlock(&dev_priv->release_mutex); |
438 | mutex_unlock(&dev_priv->release_mutex); |
396 | 439 | ||
397 | BUG_ON(n3d < 0); |
440 | BUG_ON(n3d < 0); |
398 | } |
441 | } |
399 | 442 | ||
400 | /** |
443 | /** |
401 | * Sets the initial_[width|height] fields on the given vmw_private. |
444 | * Sets the initial_[width|height] fields on the given vmw_private. |
402 | * |
445 | * |
403 | * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then |
446 | * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then |
404 | * clamping the value to fb_max_[width|height] fields and the |
447 | * clamping the value to fb_max_[width|height] fields and the |
405 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
448 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
406 | * If the values appear to be invalid, set them to |
449 | * If the values appear to be invalid, set them to |
407 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
450 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. |
408 | */ |
451 | */ |
409 | static void vmw_get_initial_size(struct vmw_private *dev_priv) |
452 | static void vmw_get_initial_size(struct vmw_private *dev_priv) |
410 | { |
453 | { |
411 | uint32_t width; |
454 | uint32_t width; |
412 | uint32_t height; |
455 | uint32_t height; |
413 | 456 | ||
414 | width = vmw_read(dev_priv, SVGA_REG_WIDTH); |
457 | width = vmw_read(dev_priv, SVGA_REG_WIDTH); |
415 | height = vmw_read(dev_priv, SVGA_REG_HEIGHT); |
458 | height = vmw_read(dev_priv, SVGA_REG_HEIGHT); |
416 | 459 | ||
417 | width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); |
460 | width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); |
418 | height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); |
461 | height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); |
419 | 462 | ||
420 | if (width > dev_priv->fb_max_width || |
463 | if (width > dev_priv->fb_max_width || |
421 | height > dev_priv->fb_max_height) { |
464 | height > dev_priv->fb_max_height) { |
422 | 465 | ||
423 | /* |
466 | /* |
424 | * This is a host error and shouldn't occur. |
467 | * This is a host error and shouldn't occur. |
425 | */ |
468 | */ |
426 | 469 | ||
427 | width = VMW_MIN_INITIAL_WIDTH; |
470 | width = VMW_MIN_INITIAL_WIDTH; |
428 | height = VMW_MIN_INITIAL_HEIGHT; |
471 | height = VMW_MIN_INITIAL_HEIGHT; |
429 | } |
472 | } |
430 | 473 | ||
431 | dev_priv->initial_width = width; |
474 | dev_priv->initial_width = width; |
432 | dev_priv->initial_height = height; |
475 | dev_priv->initial_height = height; |
433 | } |
476 | } |
- | 477 | ||
- | 478 | /** |
|
- | 479 | * vmw_dma_masks - set required page- and dma masks |
|
- | 480 | * |
|
- | 481 | * @dev: Pointer to struct drm-device |
|
- | 482 | * |
|
- | 483 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that |
|
- | 484 | * restriction also for 64-bit systems. |
|
- | 485 | */ |
|
- | 486 | #ifdef CONFIG_INTEL_IOMMU |
|
- | 487 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
|
- | 488 | { |
|
- | 489 | struct drm_device *dev = dev_priv->dev; |
|
- | 490 | ||
- | 491 | if (intel_iommu_enabled && |
|
- | 492 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { |
|
- | 493 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); |
|
- | 494 | return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); |
|
- | 495 | } |
|
- | 496 | return 0; |
|
- | 497 | } |
|
- | 498 | #else |
|
- | 499 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
|
- | 500 | { |
|
- | 501 | return 0; |
|
- | 502 | } |
|
- | 503 | #endif |
|
434 | 504 | ||
435 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
505 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
436 | { |
506 | { |
437 | struct vmw_private *dev_priv; |
507 | struct vmw_private *dev_priv; |
438 | int ret; |
508 | int ret; |
439 | uint32_t svga_id; |
509 | uint32_t svga_id; |
440 | enum vmw_res_type i; |
510 | enum vmw_res_type i; |
- | 511 | bool refuse_dma = false; |
|
- | 512 | ||
441 | 513 | ||
442 | ENTER(); |
514 | ENTER(); |
443 | 515 | ||
444 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
516 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
445 | if (unlikely(dev_priv == NULL)) { |
517 | if (unlikely(dev_priv == NULL)) { |
446 | DRM_ERROR("Failed allocating a device private struct.\n"); |
518 | DRM_ERROR("Failed allocating a device private struct.\n"); |
447 | return -ENOMEM; |
519 | return -ENOMEM; |
448 | } |
520 | } |
449 | 521 | ||
450 | pci_set_master(dev->pdev); |
522 | pci_set_master(dev->pdev); |
451 | 523 | ||
452 | dev_priv->dev = dev; |
524 | dev_priv->dev = dev; |
453 | dev_priv->vmw_chipset = chipset; |
525 | dev_priv->vmw_chipset = chipset; |
454 | dev_priv->last_read_seqno = (uint32_t) -100; |
526 | dev_priv->last_read_seqno = (uint32_t) -100; |
455 | mutex_init(&dev_priv->hw_mutex); |
527 | mutex_init(&dev_priv->hw_mutex); |
456 | mutex_init(&dev_priv->cmdbuf_mutex); |
528 | mutex_init(&dev_priv->cmdbuf_mutex); |
457 | mutex_init(&dev_priv->release_mutex); |
529 | mutex_init(&dev_priv->release_mutex); |
- | 530 | mutex_init(&dev_priv->binding_mutex); |
|
458 | rwlock_init(&dev_priv->resource_lock); |
531 | rwlock_init(&dev_priv->resource_lock); |
459 | 532 | ||
460 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
533 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
461 | idr_init(&dev_priv->res_idr[i]); |
534 | idr_init(&dev_priv->res_idr[i]); |
462 | INIT_LIST_HEAD(&dev_priv->res_lru[i]); |
535 | INIT_LIST_HEAD(&dev_priv->res_lru[i]); |
463 | } |
536 | } |
464 | 537 | ||
465 | mutex_init(&dev_priv->init_mutex); |
538 | mutex_init(&dev_priv->init_mutex); |
466 | init_waitqueue_head(&dev_priv->fence_queue); |
539 | init_waitqueue_head(&dev_priv->fence_queue); |
467 | init_waitqueue_head(&dev_priv->fifo_queue); |
540 | init_waitqueue_head(&dev_priv->fifo_queue); |
468 | dev_priv->fence_queue_waiters = 0; |
541 | dev_priv->fence_queue_waiters = 0; |
469 | atomic_set(&dev_priv->fifo_queue_waiters, 0); |
542 | atomic_set(&dev_priv->fifo_queue_waiters, 0); |
470 | 543 | ||
471 | dev_priv->used_memory_size = 0; |
544 | dev_priv->used_memory_size = 0; |
472 | 545 | ||
473 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); |
546 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); |
474 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
547 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
475 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
548 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
476 | 549 | ||
477 | printk("io: %x vram: %x mmio: %x\n",dev_priv->io_start, |
550 | printk("io: %x vram: %x mmio: %x\n",dev_priv->io_start, |
478 | dev_priv->vram_start,dev_priv->mmio_start); |
551 | dev_priv->vram_start,dev_priv->mmio_start); |
479 | 552 | ||
480 | dev_priv->enable_fb = enable_fbdev; |
553 | dev_priv->enable_fb = enable_fbdev; |
481 | 554 | ||
482 | mutex_lock(&dev_priv->hw_mutex); |
555 | mutex_lock(&dev_priv->hw_mutex); |
483 | 556 | ||
484 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
557 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
485 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); |
558 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); |
486 | if (svga_id != SVGA_ID_2) { |
559 | if (svga_id != SVGA_ID_2) { |
487 | ret = -ENOSYS; |
560 | ret = -ENOSYS; |
488 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); |
561 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); |
489 | mutex_unlock(&dev_priv->hw_mutex); |
562 | mutex_unlock(&dev_priv->hw_mutex); |
490 | goto out_err0; |
563 | goto out_err0; |
491 | } |
564 | } |
492 | 565 | ||
493 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
566 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
- | 567 | // ret = vmw_dma_select_mode(dev_priv); |
|
- | 568 | // if (unlikely(ret != 0)) { |
|
- | 569 | // DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); |
|
- | 570 | // refuse_dma = true; |
|
- | 571 | // } |
|
494 | 572 | ||
495 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); |
573 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); |
496 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); |
574 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); |
497 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); |
575 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); |
498 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); |
576 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); |
499 | 577 | ||
500 | vmw_get_initial_size(dev_priv); |
578 | vmw_get_initial_size(dev_priv); |
501 | 579 | ||
502 | if (dev_priv->capabilities & SVGA_CAP_GMR) { |
- | |
503 | dev_priv->max_gmr_descriptors = |
- | |
504 | vmw_read(dev_priv, |
- | |
505 | SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); |
580 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
506 | dev_priv->max_gmr_ids = |
581 | dev_priv->max_gmr_ids = |
507 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
- | |
508 | } |
- | |
509 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
582 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
510 | dev_priv->max_gmr_pages = |
583 | dev_priv->max_gmr_pages = |
511 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
584 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
512 | dev_priv->memory_size = |
585 | dev_priv->memory_size = |
513 | vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); |
586 | vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); |
514 | dev_priv->memory_size -= dev_priv->vram_size; |
587 | dev_priv->memory_size -= dev_priv->vram_size; |
515 | } else { |
588 | } else { |
516 | /* |
589 | /* |
517 | * An arbitrary limit of 512MiB on surface |
590 | * An arbitrary limit of 512MiB on surface |
518 | * memory. But all HWV8 hardware supports GMR2. |
591 | * memory. But all HWV8 hardware supports GMR2. |
519 | */ |
592 | */ |
520 | dev_priv->memory_size = 512*1024*1024; |
593 | dev_priv->memory_size = 512*1024*1024; |
521 | } |
594 | } |
- | 595 | dev_priv->max_mob_pages = 0; |
|
- | 596 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
|
- | 597 | uint64_t mem_size = |
|
- | 598 | vmw_read(dev_priv, |
|
- | 599 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); |
|
- | 600 | ||
- | 601 | dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; |
|
- | 602 | dev_priv->prim_bb_mem = |
|
- | 603 | vmw_read(dev_priv, |
|
- | 604 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); |
|
- | 605 | } else |
|
- | 606 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
|
- | 607 | ||
- | 608 | ret = vmw_dma_masks(dev_priv); |
|
- | 609 | if (unlikely(ret != 0)) { |
|
- | 610 | mutex_unlock(&dev_priv->hw_mutex); |
|
- | 611 | goto out_err0; |
|
- | 612 | } |
|
- | 613 | ||
- | 614 | if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size)) |
|
- | 615 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
|
522 | 616 | ||
523 | mutex_unlock(&dev_priv->hw_mutex); |
617 | mutex_unlock(&dev_priv->hw_mutex); |
524 | 618 | ||
525 | vmw_print_capabilities(dev_priv->capabilities); |
619 | vmw_print_capabilities(dev_priv->capabilities); |
526 | 620 | ||
527 | if (dev_priv->capabilities & SVGA_CAP_GMR) { |
621 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
528 | DRM_INFO("Max GMR ids is %u\n", |
622 | DRM_INFO("Max GMR ids is %u\n", |
529 | (unsigned)dev_priv->max_gmr_ids); |
- | |
530 | DRM_INFO("Max GMR descriptors is %u\n", |
- | |
531 | (unsigned)dev_priv->max_gmr_descriptors); |
- | |
532 | } |
- | |
533 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
623 | (unsigned)dev_priv->max_gmr_ids); |
534 | DRM_INFO("Max number of GMR pages is %u\n", |
624 | DRM_INFO("Max number of GMR pages is %u\n", |
535 | (unsigned)dev_priv->max_gmr_pages); |
625 | (unsigned)dev_priv->max_gmr_pages); |
536 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
626 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
537 | (unsigned)dev_priv->memory_size / 1024); |
627 | (unsigned)dev_priv->memory_size / 1024); |
538 | } |
628 | } |
- | 629 | DRM_INFO("Maximum display memory size is %u kiB\n", |
|
- | 630 | dev_priv->prim_bb_mem / 1024); |
|
539 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
631 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
540 | dev_priv->vram_start, dev_priv->vram_size / 1024); |
632 | dev_priv->vram_start, dev_priv->vram_size / 1024); |
541 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
633 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
542 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); |
634 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); |
543 | 635 | ||
544 | ret = vmw_ttm_global_init(dev_priv); |
636 | ret = vmw_ttm_global_init(dev_priv); |
545 | if (unlikely(ret != 0)) |
637 | if (unlikely(ret != 0)) |
546 | goto out_err0; |
638 | goto out_err0; |
547 | 639 | ||
- | 640 | ||
- | 641 | vmw_master_init(&dev_priv->fbdev_master); |
|
548 | 642 | dev_priv->active_master = &dev_priv->fbdev_master; |
|
549 | 643 | ||
550 | 644 | ||
551 | ret = ttm_bo_device_init(&dev_priv->bdev, |
645 | ret = ttm_bo_device_init(&dev_priv->bdev, |
552 | dev_priv->bo_global_ref.ref.object, |
646 | dev_priv->bo_global_ref.ref.object, |
553 | &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, |
647 | &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, |
554 | false); |
648 | false); |
555 | if (unlikely(ret != 0)) { |
649 | if (unlikely(ret != 0)) { |
556 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); |
650 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); |
557 | goto out_err1; |
651 | goto out_err1; |
558 | } |
652 | } |
559 | 653 | ||
560 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, |
654 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, |
561 | (dev_priv->vram_size >> PAGE_SHIFT)); |
655 | (dev_priv->vram_size >> PAGE_SHIFT)); |
562 | if (unlikely(ret != 0)) { |
656 | if (unlikely(ret != 0)) { |
563 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); |
657 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); |
564 | goto out_err2; |
658 | goto out_err2; |
565 | } |
659 | } |
566 | 660 | ||
567 | dev_priv->has_gmr = true; |
661 | dev_priv->has_gmr = true; |
- | 662 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || |
|
568 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
663 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
569 | dev_priv->max_gmr_ids) != 0) { |
664 | VMW_PL_GMR) != 0) { |
570 | DRM_INFO("No GMR memory available. " |
665 | DRM_INFO("No GMR memory available. " |
571 | "Graphics memory resources are very limited.\n"); |
666 | "Graphics memory resources are very limited.\n"); |
572 | dev_priv->has_gmr = false; |
667 | dev_priv->has_gmr = false; |
573 | } |
668 | } |
- | 669 | ||
- | 670 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
|
- | 671 | dev_priv->has_mob = true; |
|
- | 672 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, |
|
- | 673 | VMW_PL_MOB) != 0) { |
|
- | 674 | DRM_INFO("No MOB memory available. " |
|
- | 675 | "3D will be disabled.\n"); |
|
- | 676 | dev_priv->has_mob = false; |
|
- | 677 | } |
|
574 | 678 | } |
|
575 | dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, |
679 | dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, |
576 | dev_priv->mmio_size); |
680 | dev_priv->mmio_size); |
577 | 681 | ||
578 | if (unlikely(dev_priv->mmio_virt == NULL)) { |
682 | if (unlikely(dev_priv->mmio_virt == NULL)) { |
579 | ret = -ENOMEM; |
683 | ret = -ENOMEM; |
580 | DRM_ERROR("Failed mapping MMIO.\n"); |
684 | DRM_ERROR("Failed mapping MMIO.\n"); |
581 | goto out_err3; |
685 | goto out_err3; |
582 | } |
686 | } |
583 | 687 | ||
584 | /* Need mmio memory to check for fifo pitchlock cap. */ |
688 | /* Need mmio memory to check for fifo pitchlock cap. */ |
585 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && |
689 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && |
586 | !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && |
690 | !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && |
587 | !vmw_fifo_have_pitchlock(dev_priv)) { |
691 | !vmw_fifo_have_pitchlock(dev_priv)) { |
588 | ret = -ENOSYS; |
692 | ret = -ENOSYS; |
589 | DRM_ERROR("Hardware has no pitchlock\n"); |
693 | DRM_ERROR("Hardware has no pitchlock\n"); |
590 | goto out_err4; |
694 | goto out_err4; |
591 | } |
695 | } |
592 | 696 | ||
593 | dev_priv->tdev = ttm_object_device_init |
697 | // dev_priv->tdev = ttm_object_device_init |
594 | (dev_priv->mem_global_ref.object, 12); |
698 | // (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); |
595 | 699 | ||
596 | if (unlikely(dev_priv->tdev == NULL)) { |
700 | // if (unlikely(dev_priv->tdev == NULL)) { |
597 | DRM_ERROR("Unable to initialize TTM object management.\n"); |
701 | // DRM_ERROR("Unable to initialize TTM object management.\n"); |
598 | ret = -ENOMEM; |
702 | // ret = -ENOMEM; |
599 | goto out_err4; |
703 | // goto out_err4; |
600 | } |
704 | // } |
601 | 705 | ||
602 | dev->dev_private = dev_priv; |
706 | dev->dev_private = dev_priv; |
603 | 707 | ||
604 | #if 0 |
708 | #if 0 |
605 | 709 | ||
606 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { |
710 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { |
607 | ret = drm_irq_install(dev); |
711 | ret = drm_irq_install(dev); |
608 | if (ret != 0) { |
712 | if (ret != 0) { |
609 | DRM_ERROR("Failed installing irq: %d\n", ret); |
713 | DRM_ERROR("Failed installing irq: %d\n", ret); |
610 | goto out_no_irq; |
714 | goto out_no_irq; |
611 | } |
715 | } |
612 | } |
716 | } |
613 | 717 | ||
614 | dev_priv->fman = vmw_fence_manager_init(dev_priv); |
718 | dev_priv->fman = vmw_fence_manager_init(dev_priv); |
615 | if (unlikely(dev_priv->fman == NULL)) { |
719 | if (unlikely(dev_priv->fman == NULL)) { |
616 | ret = -ENOMEM; |
720 | ret = -ENOMEM; |
617 | goto out_no_fman; |
721 | goto out_no_fman; |
618 | } |
722 | } |
619 | 723 | ||
620 | vmw_kms_save_vga(dev_priv); |
724 | vmw_kms_save_vga(dev_priv); |
621 | #endif |
725 | #endif |
622 | 726 | ||
623 | /* Start kms and overlay systems, needs fifo. */ |
727 | /* Start kms and overlay systems, needs fifo. */ |
624 | ret = vmw_kms_init(dev_priv); |
728 | ret = vmw_kms_init(dev_priv); |
625 | if (unlikely(ret != 0)) |
729 | if (unlikely(ret != 0)) |
626 | goto out_no_kms; |
730 | goto out_no_kms; |
627 | 731 | ||
628 | if (dev_priv->enable_fb) { |
732 | if (dev_priv->enable_fb) { |
629 | ret = vmw_3d_resource_inc(dev_priv, true); |
733 | ret = vmw_3d_resource_inc(dev_priv, true); |
630 | if (unlikely(ret != 0)) |
734 | if (unlikely(ret != 0)) |
631 | goto out_no_fifo; |
735 | goto out_no_fifo; |
632 | // vmw_fb_init(dev_priv); |
736 | // vmw_fb_init(dev_priv); |
633 | } |
737 | } |
634 | 738 | ||
635 | main_device = dev; |
739 | main_device = dev; |
636 | 740 | ||
637 | LEAVE(); |
741 | LEAVE(); |
638 | return 0; |
742 | return 0; |
639 | 743 | ||
640 | out_no_fifo: |
744 | out_no_fifo: |
641 | // vmw_overlay_close(dev_priv); |
745 | // vmw_overlay_close(dev_priv); |
642 | // vmw_kms_close(dev_priv); |
746 | // vmw_kms_close(dev_priv); |
643 | out_no_kms: |
747 | out_no_kms: |
644 | // vmw_kms_restore_vga(dev_priv); |
748 | // vmw_kms_restore_vga(dev_priv); |
645 | // vmw_fence_manager_takedown(dev_priv->fman); |
749 | // vmw_fence_manager_takedown(dev_priv->fman); |
646 | out_no_fman: |
750 | out_no_fman: |
647 | // if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
751 | // if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
648 | // drm_irq_uninstall(dev_priv->dev); |
752 | // drm_irq_uninstall(dev_priv->dev); |
649 | out_no_irq: |
753 | out_no_irq: |
650 | // if (dev_priv->stealth) |
754 | // if (dev_priv->stealth) |
651 | // pci_release_region(dev->pdev, 2); |
755 | // pci_release_region(dev->pdev, 2); |
652 | // else |
756 | // else |
653 | // pci_release_regions(dev->pdev); |
757 | // pci_release_regions(dev->pdev); |
654 | out_no_device: |
758 | out_no_device: |
655 | // ttm_object_device_release(&dev_priv->tdev); |
759 | // ttm_object_device_release(&dev_priv->tdev); |
656 | out_err4: |
760 | out_err4: |
657 | // iounmap(dev_priv->mmio_virt); |
761 | // iounmap(dev_priv->mmio_virt); |
658 | out_err3: |
762 | out_err3: |
659 | // arch_phys_wc_del(dev_priv->mmio_mtrr); |
763 | // arch_phys_wc_del(dev_priv->mmio_mtrr); |
660 | // if (dev_priv->has_gmr) |
764 | // if (dev_priv->has_gmr) |
661 | // (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
765 | // (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
662 | // (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
766 | // (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
663 | out_err2: |
767 | out_err2: |
664 | // (void)ttm_bo_device_release(&dev_priv->bdev); |
768 | // (void)ttm_bo_device_release(&dev_priv->bdev); |
665 | out_err1: |
769 | out_err1: |
666 | // vmw_ttm_global_release(dev_priv); |
770 | // vmw_ttm_global_release(dev_priv); |
667 | out_err0: |
771 | out_err0: |
668 | // for (i = vmw_res_context; i < vmw_res_max; ++i) |
772 | // for (i = vmw_res_context; i < vmw_res_max; ++i) |
669 | // idr_destroy(&dev_priv->res_idr[i]); |
773 | // idr_destroy(&dev_priv->res_idr[i]); |
670 | 774 | ||
671 | kfree(dev_priv); |
775 | kfree(dev_priv); |
672 | return ret; |
776 | return ret; |
673 | } |
777 | } |
674 | 778 | ||
675 | #if 0 |
779 | #if 0 |
676 | static int vmw_driver_unload(struct drm_device *dev) |
780 | static int vmw_driver_unload(struct drm_device *dev) |
677 | { |
781 | { |
678 | struct vmw_private *dev_priv = vmw_priv(dev); |
782 | struct vmw_private *dev_priv = vmw_priv(dev); |
679 | enum vmw_res_type i; |
783 | enum vmw_res_type i; |
680 | 784 | ||
681 | unregister_pm_notifier(&dev_priv->pm_nb); |
785 | unregister_pm_notifier(&dev_priv->pm_nb); |
682 | 786 | ||
683 | if (dev_priv->ctx.res_ht_initialized) |
787 | if (dev_priv->ctx.res_ht_initialized) |
684 | drm_ht_remove(&dev_priv->ctx.res_ht); |
788 | drm_ht_remove(&dev_priv->ctx.res_ht); |
685 | if (dev_priv->ctx.cmd_bounce) |
789 | if (dev_priv->ctx.cmd_bounce) |
686 | vfree(dev_priv->ctx.cmd_bounce); |
790 | vfree(dev_priv->ctx.cmd_bounce); |
687 | if (dev_priv->enable_fb) { |
791 | if (dev_priv->enable_fb) { |
688 | vmw_fb_close(dev_priv); |
792 | vmw_fb_close(dev_priv); |
689 | vmw_kms_restore_vga(dev_priv); |
793 | vmw_kms_restore_vga(dev_priv); |
690 | vmw_3d_resource_dec(dev_priv, false); |
794 | vmw_3d_resource_dec(dev_priv, false); |
691 | } |
795 | } |
692 | vmw_kms_close(dev_priv); |
796 | vmw_kms_close(dev_priv); |
693 | vmw_overlay_close(dev_priv); |
797 | vmw_overlay_close(dev_priv); |
694 | vmw_fence_manager_takedown(dev_priv->fman); |
798 | vmw_fence_manager_takedown(dev_priv->fman); |
695 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
799 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
696 | drm_irq_uninstall(dev_priv->dev); |
800 | drm_irq_uninstall(dev_priv->dev); |
697 | if (dev_priv->stealth) |
801 | if (dev_priv->stealth) |
698 | pci_release_region(dev->pdev, 2); |
802 | pci_release_region(dev->pdev, 2); |
699 | else |
803 | else |
700 | pci_release_regions(dev->pdev); |
804 | pci_release_regions(dev->pdev); |
701 | 805 | ||
702 | ttm_object_device_release(&dev_priv->tdev); |
806 | ttm_object_device_release(&dev_priv->tdev); |
703 | iounmap(dev_priv->mmio_virt); |
807 | iounmap(dev_priv->mmio_virt); |
704 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
808 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
- | 809 | if (dev_priv->has_mob) |
|
- | 810 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); |
|
705 | if (dev_priv->has_gmr) |
811 | if (dev_priv->has_gmr) |
706 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
812 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
707 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
813 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
708 | (void)ttm_bo_device_release(&dev_priv->bdev); |
814 | (void)ttm_bo_device_release(&dev_priv->bdev); |
709 | vmw_ttm_global_release(dev_priv); |
815 | vmw_ttm_global_release(dev_priv); |
710 | 816 | ||
711 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
817 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
712 | idr_destroy(&dev_priv->res_idr[i]); |
818 | idr_destroy(&dev_priv->res_idr[i]); |
713 | 819 | ||
714 | kfree(dev_priv); |
820 | kfree(dev_priv); |
715 | 821 | ||
716 | return 0; |
822 | return 0; |
717 | } |
823 | } |
718 | 824 | ||
719 | static void vmw_preclose(struct drm_device *dev, |
825 | static void vmw_preclose(struct drm_device *dev, |
720 | struct drm_file *file_priv) |
826 | struct drm_file *file_priv) |
721 | { |
827 | { |
722 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
828 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
723 | struct vmw_private *dev_priv = vmw_priv(dev); |
829 | struct vmw_private *dev_priv = vmw_priv(dev); |
724 | 830 | ||
725 | vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events); |
831 | vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events); |
726 | } |
832 | } |
727 | 833 | ||
728 | static void vmw_postclose(struct drm_device *dev, |
834 | static void vmw_postclose(struct drm_device *dev, |
729 | struct drm_file *file_priv) |
835 | struct drm_file *file_priv) |
730 | { |
836 | { |
731 | struct vmw_fpriv *vmw_fp; |
837 | struct vmw_fpriv *vmw_fp; |
732 | 838 | ||
733 | vmw_fp = vmw_fpriv(file_priv); |
839 | vmw_fp = vmw_fpriv(file_priv); |
- | 840 | ||
- | 841 | if (vmw_fp->locked_master) { |
|
734 | ttm_object_file_release(&vmw_fp->tfile); |
842 | struct vmw_master *vmaster = |
735 | if (vmw_fp->locked_master) |
843 | vmw_master(vmw_fp->locked_master); |
- | 844 | ||
- | 845 | ttm_vt_unlock(&vmaster->lock); |
|
736 | drm_master_put(&vmw_fp->locked_master); |
846 | drm_master_put(&vmw_fp->locked_master); |
- | 847 | } |
|
- | 848 | ||
- | 849 | ttm_object_file_release(&vmw_fp->tfile); |
|
737 | kfree(vmw_fp); |
850 | kfree(vmw_fp); |
738 | } |
851 | } |
739 | #endif |
852 | #endif |
740 | 853 | ||
741 | static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) |
854 | static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) |
742 | { |
855 | { |
743 | struct vmw_private *dev_priv = vmw_priv(dev); |
856 | struct vmw_private *dev_priv = vmw_priv(dev); |
744 | struct vmw_fpriv *vmw_fp; |
857 | struct vmw_fpriv *vmw_fp; |
745 | int ret = -ENOMEM; |
858 | int ret = -ENOMEM; |
746 | 859 | ||
747 | vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); |
860 | vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); |
748 | if (unlikely(vmw_fp == NULL)) |
861 | if (unlikely(vmw_fp == NULL)) |
749 | return ret; |
862 | return ret; |
750 | 863 | ||
751 | INIT_LIST_HEAD(&vmw_fp->fence_events); |
864 | INIT_LIST_HEAD(&vmw_fp->fence_events); |
752 | // vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); |
865 | // vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); |
753 | // if (unlikely(vmw_fp->tfile == NULL)) |
866 | // if (unlikely(vmw_fp->tfile == NULL)) |
754 | // goto out_no_tfile; |
867 | // goto out_no_tfile; |
755 | 868 | ||
756 | file_priv->driver_priv = vmw_fp; |
869 | file_priv->driver_priv = vmw_fp; |
757 | // dev_priv->bdev.dev_mapping = dev->dev_mapping; |
870 | // dev_priv->bdev.dev_mapping = dev->dev_mapping; |
758 | 871 | ||
759 | return 0; |
872 | return 0; |
760 | 873 | ||
761 | out_no_tfile: |
874 | out_no_tfile: |
762 | kfree(vmw_fp); |
875 | kfree(vmw_fp); |
763 | return ret; |
876 | return ret; |
764 | } |
877 | } |
765 | 878 | ||
766 | #if 0 |
879 | #if 0 |
767 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, |
880 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, |
768 | unsigned long arg) |
881 | unsigned long arg) |
769 | { |
882 | { |
770 | struct drm_file *file_priv = filp->private_data; |
883 | struct drm_file *file_priv = filp->private_data; |
771 | struct drm_device *dev = file_priv->minor->dev; |
884 | struct drm_device *dev = file_priv->minor->dev; |
772 | unsigned int nr = DRM_IOCTL_NR(cmd); |
885 | unsigned int nr = DRM_IOCTL_NR(cmd); |
773 | 886 | ||
774 | /* |
887 | /* |
775 | * Do extra checking on driver private ioctls. |
888 | * Do extra checking on driver private ioctls. |
776 | */ |
889 | */ |
777 | 890 | ||
778 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) |
891 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) |
779 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { |
892 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { |
780 | const struct drm_ioctl_desc *ioctl = |
893 | const struct drm_ioctl_desc *ioctl = |
781 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
894 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
782 | 895 | ||
783 | if (unlikely(ioctl->cmd_drv != cmd)) { |
896 | if (unlikely(ioctl->cmd_drv != cmd)) { |
784 | DRM_ERROR("Invalid command format, ioctl %d\n", |
897 | DRM_ERROR("Invalid command format, ioctl %d\n", |
785 | nr - DRM_COMMAND_BASE); |
898 | nr - DRM_COMMAND_BASE); |
786 | return -EINVAL; |
899 | return -EINVAL; |
787 | } |
900 | } |
788 | } |
901 | } |
789 | 902 | ||
790 | return drm_ioctl(filp, cmd, arg); |
903 | return drm_ioctl(filp, cmd, arg); |
791 | } |
904 | } |
792 | 905 | ||
793 | static void vmw_lastclose(struct drm_device *dev) |
906 | static void vmw_lastclose(struct drm_device *dev) |
794 | { |
907 | { |
795 | struct drm_crtc *crtc; |
908 | struct drm_crtc *crtc; |
796 | struct drm_mode_set set; |
909 | struct drm_mode_set set; |
797 | int ret; |
910 | int ret; |
798 | 911 | ||
799 | set.x = 0; |
912 | set.x = 0; |
800 | set.y = 0; |
913 | set.y = 0; |
801 | set.fb = NULL; |
914 | set.fb = NULL; |
802 | set.mode = NULL; |
915 | set.mode = NULL; |
803 | set.connectors = NULL; |
916 | set.connectors = NULL; |
804 | set.num_connectors = 0; |
917 | set.num_connectors = 0; |
805 | 918 | ||
806 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
919 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
807 | set.crtc = crtc; |
920 | set.crtc = crtc; |
808 | ret = drm_mode_set_config_internal(&set); |
921 | ret = drm_mode_set_config_internal(&set); |
809 | WARN_ON(ret != 0); |
922 | WARN_ON(ret != 0); |
810 | } |
923 | } |
811 | 924 | ||
812 | } |
925 | } |
- | 926 | #endif |
|
813 | 927 | ||
814 | static void vmw_master_init(struct vmw_master *vmaster) |
928 | static void vmw_master_init(struct vmw_master *vmaster) |
815 | { |
929 | { |
816 | ttm_lock_init(&vmaster->lock); |
930 | // ttm_lock_init(&vmaster->lock); |
817 | INIT_LIST_HEAD(&vmaster->fb_surf); |
931 | INIT_LIST_HEAD(&vmaster->fb_surf); |
818 | mutex_init(&vmaster->fb_surf_mutex); |
932 | mutex_init(&vmaster->fb_surf_mutex); |
819 | } |
933 | } |
820 | 934 | ||
821 | static int vmw_master_create(struct drm_device *dev, |
935 | static int vmw_master_create(struct drm_device *dev, |
822 | struct drm_master *master) |
936 | struct drm_master *master) |
823 | { |
937 | { |
824 | struct vmw_master *vmaster; |
938 | struct vmw_master *vmaster; |
825 | 939 | ||
826 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); |
940 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); |
827 | if (unlikely(vmaster == NULL)) |
941 | if (unlikely(vmaster == NULL)) |
828 | return -ENOMEM; |
942 | return -ENOMEM; |
829 | 943 | ||
830 | vmw_master_init(vmaster); |
944 | vmw_master_init(vmaster); |
831 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
945 | // ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
832 | master->driver_priv = vmaster; |
946 | master->driver_priv = vmaster; |
833 | 947 | ||
834 | return 0; |
948 | return 0; |
835 | } |
949 | } |
836 | 950 | ||
837 | static void vmw_master_destroy(struct drm_device *dev, |
951 | static void vmw_master_destroy(struct drm_device *dev, |
838 | struct drm_master *master) |
952 | struct drm_master *master) |
839 | { |
953 | { |
840 | struct vmw_master *vmaster = vmw_master(master); |
954 | struct vmw_master *vmaster = vmw_master(master); |
841 | 955 | ||
842 | master->driver_priv = NULL; |
956 | master->driver_priv = NULL; |
843 | kfree(vmaster); |
957 | kfree(vmaster); |
844 | } |
958 | } |
845 | 959 | ||
846 | 960 | #if 0 |
|
847 | static int vmw_master_set(struct drm_device *dev, |
961 | static int vmw_master_set(struct drm_device *dev, |
848 | struct drm_file *file_priv, |
962 | struct drm_file *file_priv, |
849 | bool from_open) |
963 | bool from_open) |
850 | { |
964 | { |
851 | struct vmw_private *dev_priv = vmw_priv(dev); |
965 | struct vmw_private *dev_priv = vmw_priv(dev); |
852 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
966 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
853 | struct vmw_master *active = dev_priv->active_master; |
967 | struct vmw_master *active = dev_priv->active_master; |
854 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
968 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
855 | int ret = 0; |
969 | int ret = 0; |
856 | 970 | ||
857 | if (!dev_priv->enable_fb) { |
971 | if (!dev_priv->enable_fb) { |
858 | ret = vmw_3d_resource_inc(dev_priv, true); |
972 | ret = vmw_3d_resource_inc(dev_priv, true); |
859 | if (unlikely(ret != 0)) |
973 | if (unlikely(ret != 0)) |
860 | return ret; |
974 | return ret; |
861 | vmw_kms_save_vga(dev_priv); |
975 | vmw_kms_save_vga(dev_priv); |
862 | mutex_lock(&dev_priv->hw_mutex); |
976 | mutex_lock(&dev_priv->hw_mutex); |
863 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); |
977 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); |
864 | mutex_unlock(&dev_priv->hw_mutex); |
978 | mutex_unlock(&dev_priv->hw_mutex); |
865 | } |
979 | } |
866 | 980 | ||
867 | if (active) { |
981 | if (active) { |
868 | BUG_ON(active != &dev_priv->fbdev_master); |
982 | BUG_ON(active != &dev_priv->fbdev_master); |
869 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
983 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
870 | if (unlikely(ret != 0)) |
984 | if (unlikely(ret != 0)) |
871 | goto out_no_active_lock; |
985 | goto out_no_active_lock; |
872 | 986 | ||
873 | ttm_lock_set_kill(&active->lock, true, SIGTERM); |
987 | ttm_lock_set_kill(&active->lock, true, SIGTERM); |
874 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); |
988 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); |
875 | if (unlikely(ret != 0)) { |
989 | if (unlikely(ret != 0)) { |
876 | DRM_ERROR("Unable to clean VRAM on " |
990 | DRM_ERROR("Unable to clean VRAM on " |
877 | "master drop.\n"); |
991 | "master drop.\n"); |
878 | } |
992 | } |
879 | 993 | ||
880 | dev_priv->active_master = NULL; |
994 | dev_priv->active_master = NULL; |
881 | } |
995 | } |
882 | 996 | ||
883 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); |
997 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); |
884 | if (!from_open) { |
998 | if (!from_open) { |
885 | ttm_vt_unlock(&vmaster->lock); |
999 | ttm_vt_unlock(&vmaster->lock); |
886 | BUG_ON(vmw_fp->locked_master != file_priv->master); |
1000 | BUG_ON(vmw_fp->locked_master != file_priv->master); |
887 | drm_master_put(&vmw_fp->locked_master); |
1001 | drm_master_put(&vmw_fp->locked_master); |
888 | } |
1002 | } |
889 | 1003 | ||
890 | dev_priv->active_master = vmaster; |
1004 | dev_priv->active_master = vmaster; |
891 | 1005 | ||
892 | return 0; |
1006 | return 0; |
893 | 1007 | ||
894 | out_no_active_lock: |
1008 | out_no_active_lock: |
895 | if (!dev_priv->enable_fb) { |
1009 | if (!dev_priv->enable_fb) { |
896 | vmw_kms_restore_vga(dev_priv); |
1010 | vmw_kms_restore_vga(dev_priv); |
897 | vmw_3d_resource_dec(dev_priv, true); |
1011 | vmw_3d_resource_dec(dev_priv, true); |
898 | mutex_lock(&dev_priv->hw_mutex); |
1012 | mutex_lock(&dev_priv->hw_mutex); |
899 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
1013 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
900 | mutex_unlock(&dev_priv->hw_mutex); |
1014 | mutex_unlock(&dev_priv->hw_mutex); |
901 | } |
1015 | } |
902 | return ret; |
1016 | return ret; |
903 | } |
1017 | } |
904 | 1018 | ||
905 | static void vmw_master_drop(struct drm_device *dev, |
1019 | static void vmw_master_drop(struct drm_device *dev, |
906 | struct drm_file *file_priv, |
1020 | struct drm_file *file_priv, |
907 | bool from_release) |
1021 | bool from_release) |
908 | { |
1022 | { |
909 | struct vmw_private *dev_priv = vmw_priv(dev); |
1023 | struct vmw_private *dev_priv = vmw_priv(dev); |
910 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
1024 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
911 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
1025 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
912 | int ret; |
1026 | int ret; |
913 | 1027 | ||
914 | /** |
1028 | /** |
915 | * Make sure the master doesn't disappear while we have |
1029 | * Make sure the master doesn't disappear while we have |
916 | * it locked. |
1030 | * it locked. |
917 | */ |
1031 | */ |
918 | 1032 | ||
919 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
1033 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
920 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
1034 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
921 | vmw_execbuf_release_pinned_bo(dev_priv); |
- | |
922 | - | ||
923 | if (unlikely((ret != 0))) { |
1035 | if (unlikely((ret != 0))) { |
924 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
1036 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
925 | drm_master_put(&vmw_fp->locked_master); |
1037 | drm_master_put(&vmw_fp->locked_master); |
926 | } |
1038 | } |
927 | 1039 | ||
928 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
1040 | vmw_execbuf_release_pinned_bo(dev_priv); |
929 | 1041 | ||
930 | if (!dev_priv->enable_fb) { |
1042 | if (!dev_priv->enable_fb) { |
931 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); |
1043 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); |
932 | if (unlikely(ret != 0)) |
1044 | if (unlikely(ret != 0)) |
933 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); |
1045 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); |
934 | vmw_kms_restore_vga(dev_priv); |
1046 | vmw_kms_restore_vga(dev_priv); |
935 | vmw_3d_resource_dec(dev_priv, true); |
1047 | vmw_3d_resource_dec(dev_priv, true); |
936 | mutex_lock(&dev_priv->hw_mutex); |
1048 | mutex_lock(&dev_priv->hw_mutex); |
937 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
1049 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
938 | mutex_unlock(&dev_priv->hw_mutex); |
1050 | mutex_unlock(&dev_priv->hw_mutex); |
939 | } |
1051 | } |
940 | 1052 | ||
941 | dev_priv->active_master = &dev_priv->fbdev_master; |
1053 | dev_priv->active_master = &dev_priv->fbdev_master; |
942 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
1054 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
943 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
1055 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
944 | 1056 | ||
945 | if (dev_priv->enable_fb) |
1057 | if (dev_priv->enable_fb) |
946 | vmw_fb_on(dev_priv); |
1058 | vmw_fb_on(dev_priv); |
947 | } |
1059 | } |
948 | 1060 | ||
949 | 1061 | ||
950 | static void vmw_remove(struct pci_dev *pdev) |
1062 | static void vmw_remove(struct pci_dev *pdev) |
951 | { |
1063 | { |
952 | struct drm_device *dev = pci_get_drvdata(pdev); |
1064 | struct drm_device *dev = pci_get_drvdata(pdev); |
953 | 1065 | ||
954 | drm_put_dev(dev); |
1066 | drm_put_dev(dev); |
955 | } |
1067 | } |
956 | 1068 | ||
957 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
1069 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
958 | void *ptr) |
1070 | void *ptr) |
959 | { |
1071 | { |
960 | struct vmw_private *dev_priv = |
1072 | struct vmw_private *dev_priv = |
961 | container_of(nb, struct vmw_private, pm_nb); |
1073 | container_of(nb, struct vmw_private, pm_nb); |
962 | struct vmw_master *vmaster = dev_priv->active_master; |
1074 | struct vmw_master *vmaster = dev_priv->active_master; |
963 | 1075 | ||
964 | switch (val) { |
1076 | switch (val) { |
965 | case PM_HIBERNATION_PREPARE: |
1077 | case PM_HIBERNATION_PREPARE: |
966 | case PM_SUSPEND_PREPARE: |
1078 | case PM_SUSPEND_PREPARE: |
967 | ttm_suspend_lock(&vmaster->lock); |
1079 | ttm_suspend_lock(&vmaster->lock); |
968 | 1080 | ||
969 | /** |
1081 | /** |
970 | * This empties VRAM and unbinds all GMR bindings. |
1082 | * This empties VRAM and unbinds all GMR bindings. |
971 | * Buffer contents is moved to swappable memory. |
1083 | * Buffer contents is moved to swappable memory. |
972 | */ |
1084 | */ |
973 | vmw_execbuf_release_pinned_bo(dev_priv); |
1085 | vmw_execbuf_release_pinned_bo(dev_priv); |
974 | vmw_resource_evict_all(dev_priv); |
1086 | vmw_resource_evict_all(dev_priv); |
975 | ttm_bo_swapout_all(&dev_priv->bdev); |
1087 | ttm_bo_swapout_all(&dev_priv->bdev); |
976 | 1088 | ||
977 | break; |
1089 | break; |
978 | case PM_POST_HIBERNATION: |
1090 | case PM_POST_HIBERNATION: |
979 | case PM_POST_SUSPEND: |
1091 | case PM_POST_SUSPEND: |
980 | case PM_POST_RESTORE: |
1092 | case PM_POST_RESTORE: |
981 | ttm_suspend_unlock(&vmaster->lock); |
1093 | ttm_suspend_unlock(&vmaster->lock); |
982 | 1094 | ||
983 | break; |
1095 | break; |
984 | case PM_RESTORE_PREPARE: |
1096 | case PM_RESTORE_PREPARE: |
985 | break; |
1097 | break; |
986 | default: |
1098 | default: |
987 | break; |
1099 | break; |
988 | } |
1100 | } |
989 | return 0; |
1101 | return 0; |
990 | } |
1102 | } |
991 | 1103 | ||
992 | /** |
1104 | /** |
993 | * These might not be needed with the virtual SVGA device. |
1105 | * These might not be needed with the virtual SVGA device. |
994 | */ |
1106 | */ |
995 | 1107 | ||
996 | static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) |
1108 | static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) |
997 | { |
1109 | { |
998 | struct drm_device *dev = pci_get_drvdata(pdev); |
1110 | struct drm_device *dev = pci_get_drvdata(pdev); |
999 | struct vmw_private *dev_priv = vmw_priv(dev); |
1111 | struct vmw_private *dev_priv = vmw_priv(dev); |
1000 | 1112 | ||
1001 | if (dev_priv->num_3d_resources != 0) { |
1113 | if (dev_priv->num_3d_resources != 0) { |
1002 | DRM_INFO("Can't suspend or hibernate " |
1114 | DRM_INFO("Can't suspend or hibernate " |
1003 | "while 3D resources are active.\n"); |
1115 | "while 3D resources are active.\n"); |
1004 | return -EBUSY; |
1116 | return -EBUSY; |
1005 | } |
1117 | } |
1006 | 1118 | ||
1007 | pci_save_state(pdev); |
1119 | pci_save_state(pdev); |
1008 | pci_disable_device(pdev); |
1120 | pci_disable_device(pdev); |
1009 | pci_set_power_state(pdev, PCI_D3hot); |
1121 | pci_set_power_state(pdev, PCI_D3hot); |
1010 | return 0; |
1122 | return 0; |
1011 | } |
1123 | } |
1012 | 1124 | ||
1013 | static int vmw_pci_resume(struct pci_dev *pdev) |
1125 | static int vmw_pci_resume(struct pci_dev *pdev) |
1014 | { |
1126 | { |
1015 | pci_set_power_state(pdev, PCI_D0); |
1127 | pci_set_power_state(pdev, PCI_D0); |
1016 | pci_restore_state(pdev); |
1128 | pci_restore_state(pdev); |
1017 | return pci_enable_device(pdev); |
1129 | return pci_enable_device(pdev); |
1018 | } |
1130 | } |
1019 | 1131 | ||
1020 | static int vmw_pm_suspend(struct device *kdev) |
1132 | static int vmw_pm_suspend(struct device *kdev) |
1021 | { |
1133 | { |
1022 | struct pci_dev *pdev = to_pci_dev(kdev); |
1134 | struct pci_dev *pdev = to_pci_dev(kdev); |
1023 | struct pm_message dummy; |
1135 | struct pm_message dummy; |
1024 | 1136 | ||
1025 | dummy.event = 0; |
1137 | dummy.event = 0; |
1026 | 1138 | ||
1027 | return vmw_pci_suspend(pdev, dummy); |
1139 | return vmw_pci_suspend(pdev, dummy); |
1028 | } |
1140 | } |
1029 | 1141 | ||
1030 | static int vmw_pm_resume(struct device *kdev) |
1142 | static int vmw_pm_resume(struct device *kdev) |
1031 | { |
1143 | { |
1032 | struct pci_dev *pdev = to_pci_dev(kdev); |
1144 | struct pci_dev *pdev = to_pci_dev(kdev); |
1033 | 1145 | ||
1034 | return vmw_pci_resume(pdev); |
1146 | return vmw_pci_resume(pdev); |
1035 | } |
1147 | } |
1036 | 1148 | ||
1037 | static int vmw_pm_prepare(struct device *kdev) |
1149 | static int vmw_pm_prepare(struct device *kdev) |
1038 | { |
1150 | { |
1039 | struct pci_dev *pdev = to_pci_dev(kdev); |
1151 | struct pci_dev *pdev = to_pci_dev(kdev); |
1040 | struct drm_device *dev = pci_get_drvdata(pdev); |
1152 | struct drm_device *dev = pci_get_drvdata(pdev); |
1041 | struct vmw_private *dev_priv = vmw_priv(dev); |
1153 | struct vmw_private *dev_priv = vmw_priv(dev); |
1042 | 1154 | ||
1043 | /** |
1155 | /** |
1044 | * Release 3d reference held by fbdev and potentially |
1156 | * Release 3d reference held by fbdev and potentially |
1045 | * stop fifo. |
1157 | * stop fifo. |
1046 | */ |
1158 | */ |
1047 | dev_priv->suspended = true; |
1159 | dev_priv->suspended = true; |
1048 | if (dev_priv->enable_fb) |
1160 | if (dev_priv->enable_fb) |
1049 | vmw_3d_resource_dec(dev_priv, true); |
1161 | vmw_3d_resource_dec(dev_priv, true); |
1050 | 1162 | ||
1051 | if (dev_priv->num_3d_resources != 0) { |
1163 | if (dev_priv->num_3d_resources != 0) { |
1052 | 1164 | ||
1053 | DRM_INFO("Can't suspend or hibernate " |
1165 | DRM_INFO("Can't suspend or hibernate " |
1054 | "while 3D resources are active.\n"); |
1166 | "while 3D resources are active.\n"); |
1055 | 1167 | ||
1056 | if (dev_priv->enable_fb) |
1168 | if (dev_priv->enable_fb) |
1057 | vmw_3d_resource_inc(dev_priv, true); |
1169 | vmw_3d_resource_inc(dev_priv, true); |
1058 | dev_priv->suspended = false; |
1170 | dev_priv->suspended = false; |
1059 | return -EBUSY; |
1171 | return -EBUSY; |
1060 | } |
1172 | } |
1061 | 1173 | ||
1062 | return 0; |
1174 | return 0; |
1063 | } |
1175 | } |
1064 | 1176 | ||
1065 | #endif |
1177 | #endif |
1066 | 1178 | ||
1067 | 1179 | ||
1068 | static struct drm_driver driver = { |
1180 | static struct drm_driver driver = { |
1069 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | |
1181 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | |
1070 | DRIVER_MODESET, |
1182 | DRIVER_MODESET, |
1071 | .load = vmw_driver_load, |
1183 | .load = vmw_driver_load, |
1072 | // .unload = vmw_driver_unload, |
1184 | // .unload = vmw_driver_unload, |
1073 | // .firstopen = vmw_firstopen, |
1185 | // .firstopen = vmw_firstopen, |
1074 | // .lastclose = vmw_lastclose, |
1186 | // .lastclose = vmw_lastclose, |
1075 | .irq_preinstall = vmw_irq_preinstall, |
1187 | .irq_preinstall = vmw_irq_preinstall, |
1076 | .irq_postinstall = vmw_irq_postinstall, |
1188 | .irq_postinstall = vmw_irq_postinstall, |
1077 | // .irq_uninstall = vmw_irq_uninstall, |
1189 | // .irq_uninstall = vmw_irq_uninstall, |
1078 | .irq_handler = vmw_irq_handler, |
1190 | .irq_handler = vmw_irq_handler, |
1079 | // .get_vblank_counter = vmw_get_vblank_counter, |
1191 | // .get_vblank_counter = vmw_get_vblank_counter, |
1080 | // .enable_vblank = vmw_enable_vblank, |
1192 | // .enable_vblank = vmw_enable_vblank, |
1081 | // .disable_vblank = vmw_disable_vblank, |
1193 | // .disable_vblank = vmw_disable_vblank, |
1082 | // .ioctls = vmw_ioctls, |
1194 | // .ioctls = vmw_ioctls, |
1083 | // .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), |
1195 | // .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), |
1084 | // .dma_quiescent = NULL, /*vmw_dma_quiescent, */ |
1196 | // .dma_quiescent = NULL, /*vmw_dma_quiescent, */ |
1085 | // .master_create = vmw_master_create, |
1197 | // .master_create = vmw_master_create, |
1086 | // .master_destroy = vmw_master_destroy, |
1198 | // .master_destroy = vmw_master_destroy, |
1087 | // .master_set = vmw_master_set, |
1199 | // .master_set = vmw_master_set, |
1088 | // .master_drop = vmw_master_drop, |
1200 | // .master_drop = vmw_master_drop, |
1089 | .open = vmw_driver_open, |
1201 | .open = vmw_driver_open, |
1090 | // .preclose = vmw_preclose, |
1202 | // .preclose = vmw_preclose, |
1091 | // .postclose = vmw_postclose, |
1203 | // .postclose = vmw_postclose, |
1092 | 1204 | ||
1093 | // .dumb_create = vmw_dumb_create, |
1205 | // .dumb_create = vmw_dumb_create, |
1094 | // .dumb_map_offset = vmw_dumb_map_offset, |
1206 | // .dumb_map_offset = vmw_dumb_map_offset, |
1095 | // .dumb_destroy = vmw_dumb_destroy, |
1207 | // .dumb_destroy = vmw_dumb_destroy, |
1096 | 1208 | ||
1097 | // .fops = &vmwgfx_driver_fops, |
1209 | // .fops = &vmwgfx_driver_fops, |
1098 | // .name = VMWGFX_DRIVER_NAME, |
1210 | // .name = VMWGFX_DRIVER_NAME, |
1099 | // .desc = VMWGFX_DRIVER_DESC, |
1211 | // .desc = VMWGFX_DRIVER_DESC, |
1100 | // .date = VMWGFX_DRIVER_DATE, |
1212 | // .date = VMWGFX_DRIVER_DATE, |
1101 | // .major = VMWGFX_DRIVER_MAJOR, |
1213 | // .major = VMWGFX_DRIVER_MAJOR, |
1102 | // .minor = VMWGFX_DRIVER_MINOR, |
1214 | // .minor = VMWGFX_DRIVER_MINOR, |
1103 | // .patchlevel = VMWGFX_DRIVER_PATCHLEVEL |
1215 | // .patchlevel = VMWGFX_DRIVER_PATCHLEVEL |
1104 | }; |
1216 | }; |
1105 | 1217 | ||
1106 | #if 0 |
1218 | #if 0 |
1107 | static struct pci_driver vmw_pci_driver = { |
1219 | static struct pci_driver vmw_pci_driver = { |
1108 | .name = VMWGFX_DRIVER_NAME, |
1220 | .name = VMWGFX_DRIVER_NAME, |
1109 | .id_table = vmw_pci_id_list, |
1221 | .id_table = vmw_pci_id_list, |
1110 | .probe = vmw_probe, |
1222 | .probe = vmw_probe, |
1111 | .remove = vmw_remove, |
1223 | .remove = vmw_remove, |
1112 | .driver = { |
1224 | .driver = { |
1113 | .pm = &vmw_pm_ops |
1225 | .pm = &vmw_pm_ops |
1114 | } |
1226 | } |
1115 | }; |
1227 | }; |
1116 | 1228 | ||
1117 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1229 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1118 | { |
1230 | { |
1119 | return drm_get_pci_dev(pdev, ent, &driver); |
1231 | return drm_get_pci_dev(pdev, ent, &driver); |
1120 | } |
1232 | } |
1121 | #endif |
1233 | #endif |
1122 | 1234 | ||
1123 | int vmw_init(void) |
1235 | int vmw_init(void) |
1124 | { |
1236 | { |
1125 | static pci_dev_t device; |
1237 | static pci_dev_t device; |
1126 | const struct pci_device_id *ent; |
1238 | const struct pci_device_id *ent; |
1127 | int err; |
1239 | int err; |
1128 | 1240 | ||
1129 | ENTER(); |
1241 | ENTER(); |
1130 | 1242 | ||
1131 | ent = find_pci_device(&device, vmw_pci_id_list); |
1243 | ent = find_pci_device(&device, vmw_pci_id_list); |
1132 | if( unlikely(ent == NULL) ) |
1244 | if( unlikely(ent == NULL) ) |
1133 | { |
1245 | { |
1134 | dbgprintf("device not found\n"); |
1246 | dbgprintf("device not found\n"); |
1135 | return -ENODEV; |
1247 | return -ENODEV; |
1136 | }; |
1248 | }; |
1137 | 1249 | ||
1138 | drm_core_init(); |
1250 | drm_core_init(); |
1139 | 1251 | ||
1140 | DRM_INFO("device %x:%x\n", device.pci_dev.vendor, |
1252 | DRM_INFO("device %x:%x\n", device.pci_dev.vendor, |
1141 | device.pci_dev.device); |
1253 | device.pci_dev.device); |
1142 | 1254 | ||
1143 | err = drm_get_pci_dev(&device.pci_dev, ent, &driver); |
1255 | err = drm_get_pci_dev(&device.pci_dev, ent, &driver); |
1144 | LEAVE(); |
1256 | LEAVE(); |
1145 | 1257 | ||
1146 | return err; |
1258 | return err; |
1147 | } |
1259 | } |
1148 | 1260 | ||
1149 | 1261 | ||
1150 | MODULE_AUTHOR("VMware Inc. and others"); |
1262 | MODULE_AUTHOR("VMware Inc. and others"); |
1151 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); |
1263 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); |
1152 | MODULE_LICENSE("GPL and additional rights");>>>>>> |
1264 | MODULE_LICENSE("GPL and additional rights"); |
- | 1265 | ||
- | 1266 | ||
- | 1267 | void *kmemdup(const void *src, size_t len, gfp_t gfp) |
|
- | 1268 | { |
|
- | 1269 | void *p; |
|
- | 1270 | ||
- | 1271 | p = kmalloc(len, gfp); |
|
- | 1272 | if (p) |
|
- | 1273 | memcpy(p, src, len); |
|
- | 1274 | return p; |
|
- | 1275 | }>>>>>>> |
|
- | 1276 |