Subversion Repositories Kolibri OS

Rev

Rev 5078 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 6296
1
/**************************************************************************
1
/**************************************************************************
2
 *
2
 *
3
 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
3
 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
4
 * All Rights Reserved.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
12
 * the following conditions:
13
 *
13
 *
14
 * The above copyright notice and this permission notice (including the
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
16
 * of the Software.
17
 *
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
25
 *
26
 **************************************************************************/
26
 **************************************************************************/
27
#include 
27
#include 
28
 
28
 
29
#include 
29
#include 
30
#include "vmwgfx_drv.h"
30
#include "vmwgfx_drv.h"
-
 
31
#include "vmwgfx_binding.h"
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
//#include 
35
#include 
35
#include 
36
#include 
36
 
37
 
37
#define VMWGFX_DRIVER_NAME "vmwgfx"
38
#define VMWGFX_DRIVER_NAME "vmwgfx"
38
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
39
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
39
#define VMWGFX_CHIP_SVGAII 0
40
#define VMWGFX_CHIP_SVGAII 0
40
#define VMW_FB_RESERVATION 0
41
#define VMW_FB_RESERVATION 0
41
 
42
 
42
#define VMW_MIN_INITIAL_WIDTH 800
43
#define VMW_MIN_INITIAL_WIDTH 800
43
#define VMW_MIN_INITIAL_HEIGHT 600
44
#define VMW_MIN_INITIAL_HEIGHT 600
44
 
45
 
45
#if 0
46
#if 0
46
/**
47
/**
47
 * Fully encoded drm commands. Might move to vmw_drm.h
48
 * Fully encoded drm commands. Might move to vmw_drm.h
48
 */
49
 */
49
 
50
 
50
#define DRM_IOCTL_VMW_GET_PARAM					\
51
#define DRM_IOCTL_VMW_GET_PARAM					\
51
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
52
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
52
		 struct drm_vmw_getparam_arg)
53
		 struct drm_vmw_getparam_arg)
53
#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
54
#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
54
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
55
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
55
		union drm_vmw_alloc_dmabuf_arg)
56
		union drm_vmw_alloc_dmabuf_arg)
56
#define DRM_IOCTL_VMW_UNREF_DMABUF				\
57
#define DRM_IOCTL_VMW_UNREF_DMABUF				\
57
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
58
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
58
		struct drm_vmw_unref_dmabuf_arg)
59
		struct drm_vmw_unref_dmabuf_arg)
59
#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
60
#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
60
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
61
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
61
		 struct drm_vmw_cursor_bypass_arg)
62
		 struct drm_vmw_cursor_bypass_arg)
62
 
63
 
63
#define DRM_IOCTL_VMW_CONTROL_STREAM				\
64
#define DRM_IOCTL_VMW_CONTROL_STREAM				\
64
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
65
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
65
		 struct drm_vmw_control_stream_arg)
66
		 struct drm_vmw_control_stream_arg)
66
#define DRM_IOCTL_VMW_CLAIM_STREAM				\
67
#define DRM_IOCTL_VMW_CLAIM_STREAM				\
67
	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
68
	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
68
		 struct drm_vmw_stream_arg)
69
		 struct drm_vmw_stream_arg)
69
#define DRM_IOCTL_VMW_UNREF_STREAM				\
70
#define DRM_IOCTL_VMW_UNREF_STREAM				\
70
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
71
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
71
		 struct drm_vmw_stream_arg)
72
		 struct drm_vmw_stream_arg)
72
 
73
 
73
#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
74
#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
74
	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
75
	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
75
		struct drm_vmw_context_arg)
76
		struct drm_vmw_context_arg)
76
#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
77
#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
77
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
78
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
78
		struct drm_vmw_context_arg)
79
		struct drm_vmw_context_arg)
79
#define DRM_IOCTL_VMW_CREATE_SURFACE				\
80
#define DRM_IOCTL_VMW_CREATE_SURFACE				\
80
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
81
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
81
		 union drm_vmw_surface_create_arg)
82
		 union drm_vmw_surface_create_arg)
82
#define DRM_IOCTL_VMW_UNREF_SURFACE				\
83
#define DRM_IOCTL_VMW_UNREF_SURFACE				\
83
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
84
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
84
		 struct drm_vmw_surface_arg)
85
		 struct drm_vmw_surface_arg)
85
#define DRM_IOCTL_VMW_REF_SURFACE				\
86
#define DRM_IOCTL_VMW_REF_SURFACE				\
86
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
87
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
87
		 union drm_vmw_surface_reference_arg)
88
		 union drm_vmw_surface_reference_arg)
88
#define DRM_IOCTL_VMW_EXECBUF					\
89
#define DRM_IOCTL_VMW_EXECBUF					\
89
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
90
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
90
		struct drm_vmw_execbuf_arg)
91
		struct drm_vmw_execbuf_arg)
91
#define DRM_IOCTL_VMW_GET_3D_CAP				\
92
#define DRM_IOCTL_VMW_GET_3D_CAP				\
92
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
93
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
93
		 struct drm_vmw_get_3d_cap_arg)
94
		 struct drm_vmw_get_3d_cap_arg)
94
#define DRM_IOCTL_VMW_FENCE_WAIT				\
95
#define DRM_IOCTL_VMW_FENCE_WAIT				\
95
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
96
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
96
		 struct drm_vmw_fence_wait_arg)
97
		 struct drm_vmw_fence_wait_arg)
97
#define DRM_IOCTL_VMW_FENCE_SIGNALED				\
98
#define DRM_IOCTL_VMW_FENCE_SIGNALED				\
98
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
99
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
99
		 struct drm_vmw_fence_signaled_arg)
100
		 struct drm_vmw_fence_signaled_arg)
100
#define DRM_IOCTL_VMW_FENCE_UNREF				\
101
#define DRM_IOCTL_VMW_FENCE_UNREF				\
101
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
102
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
102
		 struct drm_vmw_fence_arg)
103
		 struct drm_vmw_fence_arg)
103
#define DRM_IOCTL_VMW_FENCE_EVENT				\
104
#define DRM_IOCTL_VMW_FENCE_EVENT				\
104
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
105
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
105
		 struct drm_vmw_fence_event_arg)
106
		 struct drm_vmw_fence_event_arg)
106
#define DRM_IOCTL_VMW_PRESENT					\
107
#define DRM_IOCTL_VMW_PRESENT					\
107
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
108
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
108
		 struct drm_vmw_present_arg)
109
		 struct drm_vmw_present_arg)
109
#define DRM_IOCTL_VMW_PRESENT_READBACK				\
110
#define DRM_IOCTL_VMW_PRESENT_READBACK				\
110
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
111
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
111
		 struct drm_vmw_present_readback_arg)
112
		 struct drm_vmw_present_readback_arg)
112
#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
113
#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
113
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
114
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
114
		 struct drm_vmw_update_layout_arg)
115
		 struct drm_vmw_update_layout_arg)
115
#define DRM_IOCTL_VMW_CREATE_SHADER				\
116
#define DRM_IOCTL_VMW_CREATE_SHADER				\
116
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
117
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
117
		 struct drm_vmw_shader_create_arg)
118
		 struct drm_vmw_shader_create_arg)
118
#define DRM_IOCTL_VMW_UNREF_SHADER				\
119
#define DRM_IOCTL_VMW_UNREF_SHADER				\
119
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
120
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
120
		 struct drm_vmw_shader_arg)
121
		 struct drm_vmw_shader_arg)
121
#define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
122
#define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
122
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
123
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
123
		 union drm_vmw_gb_surface_create_arg)
124
		 union drm_vmw_gb_surface_create_arg)
124
#define DRM_IOCTL_VMW_GB_SURFACE_REF				\
125
#define DRM_IOCTL_VMW_GB_SURFACE_REF				\
125
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
126
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
126
		 union drm_vmw_gb_surface_reference_arg)
127
		 union drm_vmw_gb_surface_reference_arg)
127
#define DRM_IOCTL_VMW_SYNCCPU					\
128
#define DRM_IOCTL_VMW_SYNCCPU					\
128
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
129
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
129
		 struct drm_vmw_synccpu_arg)
130
		 struct drm_vmw_synccpu_arg)
-
 
131
#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
-
 
132
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
-
 
133
		struct drm_vmw_context_arg)
130
 
134
 
131
/**
135
/**
132
 * The core DRM version of this macro doesn't account for
136
 * The core DRM version of this macro doesn't account for
133
 * DRM_COMMAND_BASE.
137
 * DRM_COMMAND_BASE.
134
 */
138
 */
135
 
139
 
136
#define VMW_IOCTL_DEF(ioctl, func, flags) \
140
#define VMW_IOCTL_DEF(ioctl, func, flags) \
137
  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
141
  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
138
 
142
 
139
/**
143
/**
140
 * Ioctl definitions.
144
 * Ioctl definitions.
141
 */
145
 */
142
 
146
 
143
static const struct drm_ioctl_desc vmw_ioctls[] = {
147
static const struct drm_ioctl_desc vmw_ioctls[] = {
144
	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
148
	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
145
		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
149
		      DRM_AUTH | DRM_RENDER_ALLOW),
146
	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
150
	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
147
		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
151
		      DRM_AUTH | DRM_RENDER_ALLOW),
148
	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
152
	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
149
		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
153
		      DRM_RENDER_ALLOW),
150
	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
154
	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
151
		      vmw_kms_cursor_bypass_ioctl,
155
		      vmw_kms_cursor_bypass_ioctl,
152
		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
156
		      DRM_MASTER | DRM_CONTROL_ALLOW),
153
 
157
 
154
	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
158
	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
155
		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
159
		      DRM_MASTER | DRM_CONTROL_ALLOW),
156
	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
160
	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
157
		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
161
		      DRM_MASTER | DRM_CONTROL_ALLOW),
158
	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
162
	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
159
		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
163
		      DRM_MASTER | DRM_CONTROL_ALLOW),
160
 
164
 
161
	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
165
	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
162
		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
166
		      DRM_AUTH | DRM_RENDER_ALLOW),
163
	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
167
	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
164
		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
168
		      DRM_RENDER_ALLOW),
165
	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
169
	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
166
		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
170
		      DRM_AUTH | DRM_RENDER_ALLOW),
167
	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
171
	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
168
		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
172
		      DRM_RENDER_ALLOW),
169
	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
173
	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
170
		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
174
		      DRM_AUTH | DRM_RENDER_ALLOW),
171
	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
175
	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
172
		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
176
		      DRM_RENDER_ALLOW),
173
	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
177
	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
174
		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
178
		      DRM_RENDER_ALLOW),
175
	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
179
	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
176
		      vmw_fence_obj_signaled_ioctl,
180
		      vmw_fence_obj_signaled_ioctl,
177
		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
181
		      DRM_RENDER_ALLOW),
178
	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
182
	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
179
		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
183
		      DRM_RENDER_ALLOW),
180
	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
184
	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
181
		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
185
		      DRM_AUTH | DRM_RENDER_ALLOW),
182
	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
186
	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
183
		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
187
		      DRM_AUTH | DRM_RENDER_ALLOW),
184
 
188
 
185
	/* these allow direct access to the framebuffers mark as master only */
189
	/* these allow direct access to the framebuffers mark as master only */
186
	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
190
	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
187
		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
191
		      DRM_MASTER | DRM_AUTH),
188
	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
192
	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
189
		      vmw_present_readback_ioctl,
193
		      vmw_present_readback_ioctl,
190
		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
194
		      DRM_MASTER | DRM_AUTH),
191
	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
195
	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
192
		      vmw_kms_update_layout_ioctl,
196
		      vmw_kms_update_layout_ioctl,
193
		      DRM_MASTER | DRM_UNLOCKED),
197
		      DRM_MASTER),
194
	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
198
	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
195
		      vmw_shader_define_ioctl,
199
		      vmw_shader_define_ioctl,
196
		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
200
		      DRM_AUTH | DRM_RENDER_ALLOW),
197
	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
201
	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
198
		      vmw_shader_destroy_ioctl,
202
		      vmw_shader_destroy_ioctl,
199
		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
203
		      DRM_RENDER_ALLOW),
200
	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
204
	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
201
		      vmw_gb_surface_define_ioctl,
205
		      vmw_gb_surface_define_ioctl,
202
		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
206
		      DRM_AUTH | DRM_RENDER_ALLOW),
203
	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
207
	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
204
		      vmw_gb_surface_reference_ioctl,
208
		      vmw_gb_surface_reference_ioctl,
205
		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
209
		      DRM_AUTH | DRM_RENDER_ALLOW),
206
	VMW_IOCTL_DEF(VMW_SYNCCPU,
210
	VMW_IOCTL_DEF(VMW_SYNCCPU,
207
		      vmw_user_dmabuf_synccpu_ioctl,
211
		      vmw_user_dmabuf_synccpu_ioctl,
-
 
212
		      DRM_RENDER_ALLOW),
-
 
213
	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
-
 
214
		      vmw_extended_context_define_ioctl,
208
		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
215
		      DRM_AUTH | DRM_RENDER_ALLOW),
209
};
216
};
210
#endif
217
#endif
211
 
218
 
212
static struct pci_device_id vmw_pci_id_list[] = {
219
static struct pci_device_id vmw_pci_id_list[] = {
213
	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
220
	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
214
	{0, 0, 0}
221
	{0, 0, 0}
215
};
222
};
216
 
223
 
217
static int enable_fbdev = 1;
224
static int enable_fbdev = 1;
218
static int vmw_force_iommu;
225
static int vmw_force_iommu;
219
static int vmw_restrict_iommu;
226
static int vmw_restrict_iommu;
220
static int vmw_force_coherent;
227
static int vmw_force_coherent;
221
static int vmw_restrict_dma_mask;
228
static int vmw_restrict_dma_mask;
222
 
229
 
223
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
230
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
224
static void vmw_master_init(struct vmw_master *);
231
static void vmw_master_init(struct vmw_master *);
225
 
232
 
226
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
233
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
227
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
234
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
228
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
235
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
229
module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
236
module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
230
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
237
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
231
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
238
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
232
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
239
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
233
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
240
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
234
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
241
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
235
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
242
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
236
 
243
 
237
 
244
 
238
static void vmw_print_capabilities(uint32_t capabilities)
245
static void vmw_print_capabilities(uint32_t capabilities)
239
{
246
{
240
	DRM_INFO("Capabilities:\n");
247
	DRM_INFO("Capabilities:\n");
241
	if (capabilities & SVGA_CAP_RECT_COPY)
248
	if (capabilities & SVGA_CAP_RECT_COPY)
242
		DRM_INFO("  Rect copy.\n");
249
		DRM_INFO("  Rect copy.\n");
243
	if (capabilities & SVGA_CAP_CURSOR)
250
	if (capabilities & SVGA_CAP_CURSOR)
244
		DRM_INFO("  Cursor.\n");
251
		DRM_INFO("  Cursor.\n");
245
	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
252
	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
246
		DRM_INFO("  Cursor bypass.\n");
253
		DRM_INFO("  Cursor bypass.\n");
247
	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
254
	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
248
		DRM_INFO("  Cursor bypass 2.\n");
255
		DRM_INFO("  Cursor bypass 2.\n");
249
	if (capabilities & SVGA_CAP_8BIT_EMULATION)
256
	if (capabilities & SVGA_CAP_8BIT_EMULATION)
250
		DRM_INFO("  8bit emulation.\n");
257
		DRM_INFO("  8bit emulation.\n");
251
	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
258
	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
252
		DRM_INFO("  Alpha cursor.\n");
259
		DRM_INFO("  Alpha cursor.\n");
253
	if (capabilities & SVGA_CAP_3D)
260
	if (capabilities & SVGA_CAP_3D)
254
		DRM_INFO("  3D.\n");
261
		DRM_INFO("  3D.\n");
255
	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
262
	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
256
		DRM_INFO("  Extended Fifo.\n");
263
		DRM_INFO("  Extended Fifo.\n");
257
	if (capabilities & SVGA_CAP_MULTIMON)
264
	if (capabilities & SVGA_CAP_MULTIMON)
258
		DRM_INFO("  Multimon.\n");
265
		DRM_INFO("  Multimon.\n");
259
	if (capabilities & SVGA_CAP_PITCHLOCK)
266
	if (capabilities & SVGA_CAP_PITCHLOCK)
260
		DRM_INFO("  Pitchlock.\n");
267
		DRM_INFO("  Pitchlock.\n");
261
	if (capabilities & SVGA_CAP_IRQMASK)
268
	if (capabilities & SVGA_CAP_IRQMASK)
262
		DRM_INFO("  Irq mask.\n");
269
		DRM_INFO("  Irq mask.\n");
263
	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
270
	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
264
		DRM_INFO("  Display Topology.\n");
271
		DRM_INFO("  Display Topology.\n");
265
	if (capabilities & SVGA_CAP_GMR)
272
	if (capabilities & SVGA_CAP_GMR)
266
		DRM_INFO("  GMR.\n");
273
		DRM_INFO("  GMR.\n");
267
	if (capabilities & SVGA_CAP_TRACES)
274
	if (capabilities & SVGA_CAP_TRACES)
268
		DRM_INFO("  Traces.\n");
275
		DRM_INFO("  Traces.\n");
269
	if (capabilities & SVGA_CAP_GMR2)
276
	if (capabilities & SVGA_CAP_GMR2)
270
		DRM_INFO("  GMR2.\n");
277
		DRM_INFO("  GMR2.\n");
271
	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
278
	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
272
		DRM_INFO("  Screen Object 2.\n");
279
		DRM_INFO("  Screen Object 2.\n");
273
	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
280
	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
274
		DRM_INFO("  Command Buffers.\n");
281
		DRM_INFO("  Command Buffers.\n");
275
	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
282
	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
276
		DRM_INFO("  Command Buffers 2.\n");
283
		DRM_INFO("  Command Buffers 2.\n");
277
	if (capabilities & SVGA_CAP_GBOBJECTS)
284
	if (capabilities & SVGA_CAP_GBOBJECTS)
278
		DRM_INFO("  Guest Backed Resources.\n");
285
		DRM_INFO("  Guest Backed Resources.\n");
-
 
286
	if (capabilities & SVGA_CAP_DX)
-
 
287
		DRM_INFO("  DX Features.\n");
279
}
288
}
280
 
289
 
281
/**
290
/**
282
 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
291
 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
283
 *
292
 *
284
 * @dev_priv: A device private structure.
293
 * @dev_priv: A device private structure.
285
 *
294
 *
286
 * This function creates a small buffer object that holds the query
295
 * This function creates a small buffer object that holds the query
287
 * result for dummy queries emitted as query barriers.
296
 * result for dummy queries emitted as query barriers.
288
 * The function will then map the first page and initialize a pending
297
 * The function will then map the first page and initialize a pending
289
 * occlusion query result structure, Finally it will unmap the buffer.
298
 * occlusion query result structure, Finally it will unmap the buffer.
290
 * No interruptible waits are done within this function.
299
 * No interruptible waits are done within this function.
291
 *
300
 *
292
 * Returns an error if bo creation or initialization fails.
301
 * Returns an error if bo creation or initialization fails.
293
 */
302
 */
294
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
303
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
295
{
304
{
296
	int ret;
305
	int ret;
297
	struct ttm_buffer_object *bo;
306
	struct vmw_dma_buffer *vbo;
298
	struct ttm_bo_kmap_obj map;
307
	struct ttm_bo_kmap_obj map;
299
	volatile SVGA3dQueryResult *result;
308
	volatile SVGA3dQueryResult *result;
300
	bool dummy;
309
	bool dummy;
301
 
310
 
302
	/*
311
	/*
303
	 * Create the bo as pinned, so that a tryreserve will
312
	 * Create the vbo as pinned, so that a tryreserve will
304
	 * immediately succeed. This is because we're the only
313
	 * immediately succeed. This is because we're the only
305
	 * user of the bo currently.
314
	 * user of the bo currently.
306
	 */
315
	 */
307
	ret = ttm_bo_create(&dev_priv->bdev,
316
	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
308
			     PAGE_SIZE,
317
	if (!vbo)
309
			     ttm_bo_type_device,
-
 
310
			    &vmw_sys_ne_placement,
-
 
311
			     0, false, NULL,
-
 
312
			    &bo);
318
		return -ENOMEM;
-
 
319
 
-
 
320
	ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
-
 
321
			      &vmw_sys_ne_placement, false,
313
 
322
			      &vmw_dmabuf_bo_free);
314
	if (unlikely(ret != 0))
323
	if (unlikely(ret != 0))
315
		return ret;
324
		return ret;
316
 
325
 
317
	ret = ttm_bo_reserve(bo, false, true, false, NULL);
326
	ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
-
 
327
	BUG_ON(ret != 0);
318
	BUG_ON(ret != 0);
328
	vmw_bo_pin_reserved(vbo, true);
319
 
329
 
320
	ret = ttm_bo_kmap(bo, 0, 1, &map);
330
	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
321
	if (likely(ret == 0)) {
331
	if (likely(ret == 0)) {
322
		result = ttm_kmap_obj_virtual(&map, &dummy);
332
		result = ttm_kmap_obj_virtual(&map, &dummy);
323
		result->totalSize = sizeof(*result);
333
		result->totalSize = sizeof(*result);
324
		result->state = SVGA3D_QUERYSTATE_PENDING;
334
		result->state = SVGA3D_QUERYSTATE_PENDING;
325
		result->result32 = 0xff;
335
		result->result32 = 0xff;
326
		ttm_bo_kunmap(&map);
336
		ttm_bo_kunmap(&map);
327
	}
337
	}
328
	vmw_bo_pin(bo, false);
338
	vmw_bo_pin_reserved(vbo, false);
329
	ttm_bo_unreserve(bo);
339
	ttm_bo_unreserve(&vbo->base);
330
 
340
 
331
	if (unlikely(ret != 0)) {
341
	if (unlikely(ret != 0)) {
332
		DRM_ERROR("Dummy query buffer map failed.\n");
342
		DRM_ERROR("Dummy query buffer map failed.\n");
333
		ttm_bo_unref(&bo);
343
		vmw_dmabuf_unreference(&vbo);
334
	} else
344
	} else
335
		dev_priv->dummy_query_bo = bo;
345
		dev_priv->dummy_query_bo = vbo;
336
 
346
 
337
	return ret;
347
	return ret;
338
}
348
}
-
 
349
 
-
 
350
/**
-
 
351
 * vmw_request_device_late - Perform late device setup
-
 
352
 *
-
 
353
 * @dev_priv: Pointer to device private.
-
 
354
 *
-
 
355
 * This function performs setup of otables and enables large command
-
 
356
 * buffer submission. These tasks are split out to a separate function
-
 
357
 * because it reverts vmw_release_device_early and is intended to be used
-
 
358
 * by an error path in the hibernation code.
-
 
359
 */
-
 
360
static int vmw_request_device_late(struct vmw_private *dev_priv)
-
 
361
{
-
 
362
	int ret;
-
 
363
 
-
 
364
	if (dev_priv->has_mob) {
-
 
365
		ret = vmw_otables_setup(dev_priv);
-
 
366
		if (unlikely(ret != 0)) {
-
 
367
			DRM_ERROR("Unable to initialize "
-
 
368
				  "guest Memory OBjects.\n");
-
 
369
			return ret;
-
 
370
		}
-
 
371
	}
-
 
372
 
-
 
373
	if (dev_priv->cman) {
-
 
374
		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
-
 
375
					       256*4096, 2*4096);
-
 
376
		if (ret) {
-
 
377
			struct vmw_cmdbuf_man *man = dev_priv->cman;
-
 
378
 
-
 
379
			dev_priv->cman = NULL;
-
 
380
			vmw_cmdbuf_man_destroy(man);
-
 
381
		}
-
 
382
	}
-
 
383
 
-
 
384
	return 0;
-
 
385
}
339
 
386
 
340
static int vmw_request_device(struct vmw_private *dev_priv)
387
static int vmw_request_device(struct vmw_private *dev_priv)
341
{
388
{
342
	int ret;
389
	int ret;
343
 
390
 
344
	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
391
	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
345
	if (unlikely(ret != 0)) {
392
	if (unlikely(ret != 0)) {
346
		DRM_ERROR("Unable to initialize FIFO.\n");
393
		DRM_ERROR("Unable to initialize FIFO.\n");
347
		return ret;
394
		return ret;
348
	}
395
	}
349
//   vmw_fence_fifo_up(dev_priv->fman);
396
	vmw_fence_fifo_up(dev_priv->fman);
350
//   ret = vmw_dummy_query_bo_create(dev_priv);
397
	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
351
//   if (unlikely(ret != 0))
398
	if (IS_ERR(dev_priv->cman)) {
352
//       goto out_no_query_bo;
399
		dev_priv->cman = NULL;
353
//   vmw_dummy_query_bo_prepare(dev_priv);
400
		dev_priv->has_dx = false;
-
 
401
	}
-
 
402
 
-
 
403
	ret = vmw_request_device_late(dev_priv);
-
 
404
	if (ret)
-
 
405
		goto out_no_mob;
-
 
406
 
-
 
407
	ret = vmw_dummy_query_bo_create(dev_priv);
354
 
408
	if (unlikely(ret != 0))
355
 
409
		goto out_no_query_bo;
356
 
410
 
357
	return 0;
411
	return 0;
358
 
412
 
359
out_no_query_bo:
413
out_no_query_bo:
-
 
414
	if (dev_priv->cman)
-
 
415
		vmw_cmdbuf_remove_pool(dev_priv->cman);
-
 
416
	if (dev_priv->has_mob) {
-
 
417
//		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
-
 
418
		vmw_otables_takedown(dev_priv);
-
 
419
	}
-
 
420
	if (dev_priv->cman)
-
 
421
		vmw_cmdbuf_man_destroy(dev_priv->cman);
-
 
422
out_no_mob:
360
	vmw_fence_fifo_down(dev_priv->fman);
423
	vmw_fence_fifo_down(dev_priv->fman);
361
	vmw_fifo_release(dev_priv, &dev_priv->fifo);
424
	vmw_fifo_release(dev_priv, &dev_priv->fifo);
362
	return ret;
425
	return ret;
363
}
426
}
-
 
427
 
-
 
428
/**
-
 
429
 * vmw_release_device_early - Early part of fifo takedown.
-
 
430
 *
-
 
431
 * @dev_priv: Pointer to device private struct.
-
 
432
 *
-
 
433
 * This is the first part of command submission takedown, to be called before
-
 
434
 * buffer management is taken down.
364
 
435
 */
365
static void vmw_release_device(struct vmw_private *dev_priv)
436
static void vmw_release_device_early(struct vmw_private *dev_priv)
366
{
437
{
367
	/*
438
	/*
368
	 * Previous destructions should've released
439
	 * Previous destructions should've released
369
	 * the pinned bo.
440
	 * the pinned bo.
370
	 */
441
	 */
371
 
442
 
372
	BUG_ON(dev_priv->pinned_bo != NULL);
443
	BUG_ON(dev_priv->pinned_bo != NULL);
373
 
444
 
-
 
445
	vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
374
	ttm_bo_unref(&dev_priv->dummy_query_bo);
446
	if (dev_priv->cman)
-
 
447
		vmw_cmdbuf_remove_pool(dev_priv->cman);
-
 
448
 
375
	vmw_fence_fifo_down(dev_priv->fman);
449
	if (dev_priv->has_mob) {
-
 
450
		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
-
 
451
		vmw_otables_takedown(dev_priv);
376
	vmw_fifo_release(dev_priv, &dev_priv->fifo);
452
	}
377
}
-
 
378
 
453
}
379
 
454
 
-
 
455
/**
380
/**
456
 * vmw_release_device_late - Late part of fifo takedown.
-
 
457
 *
381
 * Increase the 3d resource refcount.
458
 * @dev_priv: Pointer to device private struct.
382
 * If the count was prevously zero, initialize the fifo, switching to svga
459
 *
383
 * mode. Note that the master holds a ref as well, and may request an
460
 * This is the last part of the command submission takedown, to be called when
384
 * explicit switch to svga mode if fb is not running, using @unhide_svga.
461
 * command submission is no longer needed. It may wait on pending fences.
385
 */
-
 
386
int vmw_3d_resource_inc(struct vmw_private *dev_priv,
462
 */
387
			bool unhide_svga)
-
 
388
{
-
 
389
	int ret = 0;
-
 
390
 
-
 
391
	mutex_lock(&dev_priv->release_mutex);
-
 
392
	if (unlikely(dev_priv->num_3d_resources++ == 0)) {
-
 
393
        ret = vmw_request_device(dev_priv);
-
 
394
		if (unlikely(ret != 0))
-
 
395
			--dev_priv->num_3d_resources;
-
 
396
	} else if (unhide_svga) {
-
 
397
		mutex_lock(&dev_priv->hw_mutex);
-
 
398
		vmw_write(dev_priv, SVGA_REG_ENABLE,
-
 
399
			  vmw_read(dev_priv, SVGA_REG_ENABLE) &
463
static void vmw_release_device_late(struct vmw_private *dev_priv)
400
			  ~SVGA_REG_ENABLE_HIDE);
-
 
401
		mutex_unlock(&dev_priv->hw_mutex);
-
 
402
	}
-
 
403
 
-
 
404
	mutex_unlock(&dev_priv->release_mutex);
-
 
405
	return ret;
-
 
406
}
-
 
407
 
-
 
408
/**
-
 
409
 * Decrease the 3d resource refcount.
-
 
410
 * If the count reaches zero, disable the fifo, switching to vga mode.
-
 
411
 * Note that the master holds a refcount as well, and may request an
-
 
412
 * explicit switch to vga mode when it releases its refcount to account
-
 
413
 * for the situation of an X server vt switch to VGA with 3d resources
-
 
414
 * active.
-
 
415
 */
464
{
416
void vmw_3d_resource_dec(struct vmw_private *dev_priv,
-
 
417
			 bool hide_svga)
-
 
418
{
-
 
419
	int32_t n3d;
-
 
420
 
-
 
421
	mutex_lock(&dev_priv->release_mutex);
465
	vmw_fence_fifo_down(dev_priv->fman);
422
	if (unlikely(--dev_priv->num_3d_resources == 0))
-
 
423
		vmw_release_device(dev_priv);
-
 
424
	else if (hide_svga) {
-
 
425
		mutex_lock(&dev_priv->hw_mutex);
-
 
426
		vmw_write(dev_priv, SVGA_REG_ENABLE,
-
 
427
			  vmw_read(dev_priv, SVGA_REG_ENABLE) |
-
 
428
			  SVGA_REG_ENABLE_HIDE);
-
 
429
		mutex_unlock(&dev_priv->hw_mutex);
-
 
430
	}
-
 
431
 
-
 
432
	n3d = (int32_t) dev_priv->num_3d_resources;
466
	if (dev_priv->cman)
433
	mutex_unlock(&dev_priv->release_mutex);
467
		vmw_cmdbuf_man_destroy(dev_priv->cman);
434
 
468
 
435
	BUG_ON(n3d < 0);
469
	vmw_fifo_release(dev_priv, &dev_priv->fifo);
436
}
470
}
437
 
471
 
438
/**
472
/**
439
 * Sets the initial_[width|height] fields on the given vmw_private.
473
 * Sets the initial_[width|height] fields on the given vmw_private.
440
 *
474
 *
441
 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
475
 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
442
 * clamping the value to fb_max_[width|height] fields and the
476
 * clamping the value to fb_max_[width|height] fields and the
443
 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
477
 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
444
 * If the values appear to be invalid, set them to
478
 * If the values appear to be invalid, set them to
445
 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
479
 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
446
 */
480
 */
447
static void vmw_get_initial_size(struct vmw_private *dev_priv)
481
static void vmw_get_initial_size(struct vmw_private *dev_priv)
448
{
482
{
449
	uint32_t width;
483
	uint32_t width;
450
	uint32_t height;
484
	uint32_t height;
451
 
485
 
452
	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
486
	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
453
	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
487
	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
454
 
488
 
455
	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
489
	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
456
	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
490
	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
457
 
491
 
458
	if (width > dev_priv->fb_max_width ||
492
	if (width > dev_priv->fb_max_width ||
459
	    height > dev_priv->fb_max_height) {
493
	    height > dev_priv->fb_max_height) {
460
 
494
 
461
		/*
495
		/*
462
		 * This is a host error and shouldn't occur.
496
		 * This is a host error and shouldn't occur.
463
		 */
497
		 */
464
 
498
 
465
		width = VMW_MIN_INITIAL_WIDTH;
499
		width = VMW_MIN_INITIAL_WIDTH;
466
		height = VMW_MIN_INITIAL_HEIGHT;
500
		height = VMW_MIN_INITIAL_HEIGHT;
467
	}
501
	}
468
 
502
 
469
	dev_priv->initial_width = width;
503
	dev_priv->initial_width = width;
470
	dev_priv->initial_height = height;
504
	dev_priv->initial_height = height;
471
}
505
}
472
 
506
 
473
/**
507
/**
474
 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
508
 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
475
 * system.
509
 * system.
476
 *
510
 *
477
 * @dev_priv: Pointer to a struct vmw_private
511
 * @dev_priv: Pointer to a struct vmw_private
478
 *
512
 *
479
 * This functions tries to determine the IOMMU setup and what actions
513
 * This functions tries to determine the IOMMU setup and what actions
480
 * need to be taken by the driver to make system pages visible to the
514
 * need to be taken by the driver to make system pages visible to the
481
 * device.
515
 * device.
482
 * If this function decides that DMA is not possible, it returns -EINVAL.
516
 * If this function decides that DMA is not possible, it returns -EINVAL.
483
 * The driver may then try to disable features of the device that require
517
 * The driver may then try to disable features of the device that require
484
 * DMA.
518
 * DMA.
485
 */
519
 */
486
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
520
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
487
{
521
{
488
	static const char *names[vmw_dma_map_max] = {
522
	static const char *names[vmw_dma_map_max] = {
489
		[vmw_dma_phys] = "Using physical TTM page addresses.",
523
		[vmw_dma_phys] = "Using physical TTM page addresses.",
490
		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
524
		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
491
		[vmw_dma_map_populate] = "Keeping DMA mappings.",
525
		[vmw_dma_map_populate] = "Keeping DMA mappings.",
492
		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
526
		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
-
 
527
#ifdef CONFIG_X86
-
 
528
	const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
-
 
529
 
-
 
530
#ifdef CONFIG_INTEL_IOMMU
-
 
531
	if (intel_iommu_enabled) {
-
 
532
		dev_priv->map_mode = vmw_dma_map_populate;
-
 
533
		goto out_fixup;
-
 
534
	}
-
 
535
#endif
-
 
536
 
493
 
537
	if (!(vmw_force_iommu || vmw_force_coherent)) {
494
    dev_priv->map_mode = vmw_dma_phys;
538
		dev_priv->map_mode = vmw_dma_phys;
-
 
539
		DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
-
 
540
		return 0;
-
 
541
	}
-
 
542
 
-
 
543
	dev_priv->map_mode = vmw_dma_map_populate;
-
 
544
 
-
 
545
	if (dma_ops->sync_single_for_cpu)
-
 
546
		dev_priv->map_mode = vmw_dma_alloc_coherent;
-
 
547
#ifdef CONFIG_SWIOTLB
-
 
548
	if (swiotlb_nr_tbl() == 0)
-
 
549
		dev_priv->map_mode = vmw_dma_map_populate;
-
 
550
#endif
-
 
551
 
-
 
552
#ifdef CONFIG_INTEL_IOMMU
-
 
553
out_fixup:
-
 
554
#endif
-
 
555
	if (dev_priv->map_mode == vmw_dma_map_populate &&
-
 
556
	    vmw_restrict_iommu)
-
 
557
		dev_priv->map_mode = vmw_dma_map_bind;
-
 
558
 
-
 
559
	if (vmw_force_coherent)
-
 
560
		dev_priv->map_mode = vmw_dma_alloc_coherent;
-
 
561
 
-
 
562
#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
-
 
563
	/*
-
 
564
	 * No coherent page pool
-
 
565
	 */
-
 
566
	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
-
 
567
		return -EINVAL;
-
 
568
#endif
-
 
569
 
-
 
570
#else /* CONFIG_X86 */
-
 
571
	dev_priv->map_mode = vmw_dma_map_populate;
-
 
572
#endif /* CONFIG_X86 */
-
 
573
 
495
    DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
574
	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
496
 
575
 
497
	return 0;
576
	return 0;
498
}
577
}
499
 
578
 
500
/**
579
/**
501
 * vmw_dma_masks - set required page- and dma masks
580
 * vmw_dma_masks - set required page- and dma masks
502
 *
581
 *
503
 * @dev: Pointer to struct drm-device
582
 * @dev: Pointer to struct drm-device
504
 *
583
 *
505
 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
584
 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
506
 * restriction also for 64-bit systems.
585
 * restriction also for 64-bit systems.
507
 */
586
 */
508
#ifdef CONFIG_INTEL_IOMMU
587
#ifdef CONFIG_INTEL_IOMMU
509
static int vmw_dma_masks(struct vmw_private *dev_priv)
588
static int vmw_dma_masks(struct vmw_private *dev_priv)
510
{
589
{
511
	struct drm_device *dev = dev_priv->dev;
590
	struct drm_device *dev = dev_priv->dev;
512
 
591
 
513
	if (intel_iommu_enabled &&
592
	if (intel_iommu_enabled &&
514
	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
593
	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
515
		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
594
		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
516
		return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
595
		return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
517
	}
596
	}
518
	return 0;
597
	return 0;
519
}
598
}
520
#else
599
#else
521
static int vmw_dma_masks(struct vmw_private *dev_priv)
600
static int vmw_dma_masks(struct vmw_private *dev_priv)
522
{
601
{
523
	return 0;
602
	return 0;
524
}
603
}
525
#endif
604
#endif
526
 
605
 
527
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
606
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
528
{
607
{
529
	struct vmw_private *dev_priv;
608
	struct vmw_private *dev_priv;
530
	int ret;
609
	int ret;
531
	uint32_t svga_id;
610
	uint32_t svga_id;
532
	enum vmw_res_type i;
611
	enum vmw_res_type i;
533
	bool refuse_dma = false;
612
	bool refuse_dma = false;
534
 
613
 
535
	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
614
	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
536
	if (unlikely(dev_priv == NULL)) {
615
	if (unlikely(dev_priv == NULL)) {
537
		DRM_ERROR("Failed allocating a device private struct.\n");
616
		DRM_ERROR("Failed allocating a device private struct.\n");
538
		return -ENOMEM;
617
		return -ENOMEM;
539
	}
618
	}
540
 
619
 
541
	pci_set_master(dev->pdev);
620
	pci_set_master(dev->pdev);
542
 
621
 
543
	dev_priv->dev = dev;
622
	dev_priv->dev = dev;
544
	dev_priv->vmw_chipset = chipset;
623
	dev_priv->vmw_chipset = chipset;
545
	dev_priv->last_read_seqno = (uint32_t) -100;
624
	dev_priv->last_read_seqno = (uint32_t) -100;
546
	mutex_init(&dev_priv->hw_mutex);
-
 
547
	mutex_init(&dev_priv->cmdbuf_mutex);
625
	mutex_init(&dev_priv->cmdbuf_mutex);
548
	mutex_init(&dev_priv->release_mutex);
626
	mutex_init(&dev_priv->release_mutex);
549
	mutex_init(&dev_priv->binding_mutex);
627
	mutex_init(&dev_priv->binding_mutex);
550
	rwlock_init(&dev_priv->resource_lock);
628
	rwlock_init(&dev_priv->resource_lock);
551
	ttm_lock_init(&dev_priv->reservation_sem);
629
	ttm_lock_init(&dev_priv->reservation_sem);
-
 
630
	spin_lock_init(&dev_priv->hw_lock);
-
 
631
	spin_lock_init(&dev_priv->waiter_lock);
-
 
632
	spin_lock_init(&dev_priv->cap_lock);
-
 
633
	spin_lock_init(&dev_priv->svga_lock);
552
 
634
 
553
	for (i = vmw_res_context; i < vmw_res_max; ++i) {
635
	for (i = vmw_res_context; i < vmw_res_max; ++i) {
554
		idr_init(&dev_priv->res_idr[i]);
636
		idr_init(&dev_priv->res_idr[i]);
555
		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
637
		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
556
	}
638
	}
557
 
639
 
558
	mutex_init(&dev_priv->init_mutex);
640
	mutex_init(&dev_priv->init_mutex);
559
	init_waitqueue_head(&dev_priv->fence_queue);
641
	init_waitqueue_head(&dev_priv->fence_queue);
560
	init_waitqueue_head(&dev_priv->fifo_queue);
642
	init_waitqueue_head(&dev_priv->fifo_queue);
561
	dev_priv->fence_queue_waiters = 0;
643
	dev_priv->fence_queue_waiters = 0;
562
	atomic_set(&dev_priv->fifo_queue_waiters, 0);
644
	dev_priv->fifo_queue_waiters = 0;
563
 
645
 
564
	dev_priv->used_memory_size = 0;
646
	dev_priv->used_memory_size = 0;
565
 
647
 
566
	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
648
	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
567
	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
649
	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
568
	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
650
	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
569
 
651
 
570
	dev_priv->enable_fb = enable_fbdev;
652
	dev_priv->enable_fb = enable_fbdev;
571
 
-
 
572
	mutex_lock(&dev_priv->hw_mutex);
-
 
573
 
653
 
574
    vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
654
	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
575
	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
655
	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
576
	if (svga_id != SVGA_ID_2) {
656
	if (svga_id != SVGA_ID_2) {
577
		ret = -ENOSYS;
657
		ret = -ENOSYS;
578
		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
658
		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
579
		mutex_unlock(&dev_priv->hw_mutex);
-
 
580
		goto out_err0;
659
		goto out_err0;
581
	}
660
	}
582
 
661
 
583
	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
662
	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
584
	ret = vmw_dma_select_mode(dev_priv);
663
	ret = vmw_dma_select_mode(dev_priv);
585
	if (unlikely(ret != 0)) {
664
	if (unlikely(ret != 0)) {
586
		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
665
		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
587
		refuse_dma = true;
666
		refuse_dma = true;
588
	}
667
	}
589
 
668
 
590
	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
669
	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
591
	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
670
	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
592
	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
671
	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
593
	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
672
	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
594
 
673
 
595
	vmw_get_initial_size(dev_priv);
674
	vmw_get_initial_size(dev_priv);
596
 
675
 
597
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
676
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
598
		dev_priv->max_gmr_ids =
677
		dev_priv->max_gmr_ids =
599
			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
678
			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
600
		dev_priv->max_gmr_pages =
679
		dev_priv->max_gmr_pages =
601
			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
680
			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
602
		dev_priv->memory_size =
681
		dev_priv->memory_size =
603
			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
682
			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
604
		dev_priv->memory_size -= dev_priv->vram_size;
683
		dev_priv->memory_size -= dev_priv->vram_size;
605
	} else {
684
	} else {
606
		/*
685
		/*
607
		 * An arbitrary limit of 512MiB on surface
686
		 * An arbitrary limit of 512MiB on surface
608
		 * memory. But all HWV8 hardware supports GMR2.
687
		 * memory. But all HWV8 hardware supports GMR2.
609
		 */
688
		 */
610
		dev_priv->memory_size = 512*1024*1024;
689
		dev_priv->memory_size = 512*1024*1024;
611
	}
690
	}
612
	dev_priv->max_mob_pages = 0;
691
	dev_priv->max_mob_pages = 0;
613
	dev_priv->max_mob_size = 0;
692
	dev_priv->max_mob_size = 0;
614
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
693
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
615
		uint64_t mem_size =
694
		uint64_t mem_size =
616
			vmw_read(dev_priv,
695
			vmw_read(dev_priv,
617
				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
696
				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
618
 
697
 
619
		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
698
		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
620
		dev_priv->prim_bb_mem =
699
		dev_priv->prim_bb_mem =
621
			vmw_read(dev_priv,
700
			vmw_read(dev_priv,
622
				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
701
				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
623
		dev_priv->max_mob_size =
702
		dev_priv->max_mob_size =
624
			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
703
			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
-
 
704
		dev_priv->stdu_max_width =
-
 
705
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
-
 
706
		dev_priv->stdu_max_height =
-
 
707
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
-
 
708
 
-
 
709
		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
-
 
710
			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
-
 
711
		dev_priv->texture_max_width = vmw_read(dev_priv,
-
 
712
						       SVGA_REG_DEV_CAP);
-
 
713
		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
-
 
714
			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
-
 
715
		dev_priv->texture_max_height = vmw_read(dev_priv,
-
 
716
							SVGA_REG_DEV_CAP);
625
	} else
717
	} else {
-
 
718
		dev_priv->texture_max_width = 8192;
-
 
719
		dev_priv->texture_max_height = 8192;
626
		dev_priv->prim_bb_mem = dev_priv->vram_size;
720
		dev_priv->prim_bb_mem = dev_priv->vram_size;
627
 
-
 
628
	ret = vmw_dma_masks(dev_priv);
-
 
629
	if (unlikely(ret != 0)) {
-
 
630
		mutex_unlock(&dev_priv->hw_mutex);
-
 
631
		goto out_err0;
-
 
632
	}
721
	}
633
 
-
 
634
	if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
-
 
635
		dev_priv->prim_bb_mem = dev_priv->vram_size;
-
 
636
 
-
 
637
	mutex_unlock(&dev_priv->hw_mutex);
-
 
638
 
722
 
-
 
723
	vmw_print_capabilities(dev_priv->capabilities);
-
 
724
 
-
 
725
	ret = vmw_dma_masks(dev_priv);
-
 
726
	if (unlikely(ret != 0))
639
	vmw_print_capabilities(dev_priv->capabilities);
727
		goto out_err0;
640
 
728
 
641
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
729
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
642
		DRM_INFO("Max GMR ids is %u\n",
730
		DRM_INFO("Max GMR ids is %u\n",
643
			 (unsigned)dev_priv->max_gmr_ids);
731
			 (unsigned)dev_priv->max_gmr_ids);
644
		DRM_INFO("Max number of GMR pages is %u\n",
732
		DRM_INFO("Max number of GMR pages is %u\n",
645
			 (unsigned)dev_priv->max_gmr_pages);
733
			 (unsigned)dev_priv->max_gmr_pages);
646
		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
734
		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
647
			 (unsigned)dev_priv->memory_size / 1024);
735
			 (unsigned)dev_priv->memory_size / 1024);
648
	}
736
	}
649
	DRM_INFO("Maximum display memory size is %u kiB\n",
737
	DRM_INFO("Maximum display memory size is %u kiB\n",
650
		 dev_priv->prim_bb_mem / 1024);
738
		 dev_priv->prim_bb_mem / 1024);
651
	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
739
	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
652
		 dev_priv->vram_start, dev_priv->vram_size / 1024);
740
		 dev_priv->vram_start, dev_priv->vram_size / 1024);
653
	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
741
	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
654
		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
742
		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
655
 
743
 
656
	ret = vmw_ttm_global_init(dev_priv);
744
	ret = vmw_ttm_global_init(dev_priv);
657
	if (unlikely(ret != 0))
745
	if (unlikely(ret != 0))
658
		goto out_err0;
746
		goto out_err0;
659
 
747
 
660
 
748
 
661
	vmw_master_init(&dev_priv->fbdev_master);
749
	vmw_master_init(&dev_priv->fbdev_master);
662
	dev_priv->active_master = &dev_priv->fbdev_master;
750
	dev_priv->active_master = &dev_priv->fbdev_master;
663
 
-
 
664
 
-
 
665
	ret = ttm_bo_device_init(&dev_priv->bdev,
-
 
666
				 dev_priv->bo_global_ref.ref.object,
-
 
667
				 &vmw_bo_driver,
-
 
668
				 NULL,
-
 
669
				 VMWGFX_FILE_PAGE_OFFSET,
-
 
670
				 false);
-
 
671
	if (unlikely(ret != 0)) {
-
 
672
		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
-
 
673
		goto out_err1;
-
 
674
	}
-
 
675
 
-
 
676
	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
-
 
677
			     (dev_priv->vram_size >> PAGE_SHIFT));
-
 
678
	if (unlikely(ret != 0)) {
-
 
679
		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
-
 
680
		goto out_err2;
-
 
681
	}
-
 
682
 
-
 
683
	dev_priv->has_gmr = true;
-
 
684
	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
-
 
685
	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
-
 
686
					 VMW_PL_GMR) != 0) {
-
 
687
		DRM_INFO("No GMR memory available. "
-
 
688
			 "Graphics memory resources are very limited.\n");
-
 
689
		dev_priv->has_gmr = false;
-
 
690
	}
-
 
691
 
-
 
692
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
-
 
693
		dev_priv->has_mob = true;
-
 
694
		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
-
 
695
				   VMW_PL_MOB) != 0) {
-
 
696
			DRM_INFO("No MOB memory available. "
-
 
697
				 "3D will be disabled.\n");
-
 
698
			dev_priv->has_mob = false;
-
 
699
		}
-
 
700
	}
751
 
701
	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
752
	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
702
					 dev_priv->mmio_size);
753
					 dev_priv->mmio_size);
703
 
754
 
704
	if (unlikely(dev_priv->mmio_virt == NULL)) {
755
	if (unlikely(dev_priv->mmio_virt == NULL)) {
705
		ret = -ENOMEM;
756
		ret = -ENOMEM;
706
		DRM_ERROR("Failed mapping MMIO.\n");
757
		DRM_ERROR("Failed mapping MMIO.\n");
707
		goto out_err3;
758
		goto out_err3;
708
	}
759
	}
709
 
760
 
710
	/* Need mmio memory to check for fifo pitchlock cap. */
761
	/* Need mmio memory to check for fifo pitchlock cap. */
711
	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
762
	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
712
	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
763
	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
713
	    !vmw_fifo_have_pitchlock(dev_priv)) {
764
	    !vmw_fifo_have_pitchlock(dev_priv)) {
714
		ret = -ENOSYS;
765
		ret = -ENOSYS;
715
		DRM_ERROR("Hardware has no pitchlock\n");
766
		DRM_ERROR("Hardware has no pitchlock\n");
716
		goto out_err4;
767
		goto out_err4;
717
	}
768
	}
718
 
769
 
719
	dev_priv->tdev = ttm_object_device_init
770
	dev_priv->tdev = ttm_object_device_init
720
		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
771
		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
721
 
772
 
722
	if (unlikely(dev_priv->tdev == NULL)) {
773
	if (unlikely(dev_priv->tdev == NULL)) {
723
		DRM_ERROR("Unable to initialize TTM object management.\n");
774
		DRM_ERROR("Unable to initialize TTM object management.\n");
724
		ret = -ENOMEM;
775
		ret = -ENOMEM;
725
		goto out_err4;
776
		goto out_err4;
726
	}
777
	}
727
 
778
 
728
	dev->dev_private = dev_priv;
779
	dev->dev_private = dev_priv;
729
 
780
 
730
#if 0
781
#if 0
-
 
782
	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
-
 
783
	dev_priv->stealth = (ret != 0);
-
 
784
	if (dev_priv->stealth) {
-
 
785
		/**
-
 
786
		 * Request at least the mmio PCI resource.
-
 
787
		 */
-
 
788
 
-
 
789
		DRM_INFO("It appears like vesafb is loaded. "
-
 
790
			 "Ignore above error if any.\n");
-
 
791
		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
-
 
792
		if (unlikely(ret != 0)) {
-
 
793
			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
-
 
794
			goto out_no_device;
-
 
795
		}
-
 
796
	}
731
 
797
 
732
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
798
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
733
		ret = drm_irq_install(dev, dev->pdev->irq);
799
		ret = drm_irq_install(dev, dev->pdev->irq);
734
		if (ret != 0) {
800
		if (ret != 0) {
735
			DRM_ERROR("Failed installing irq: %d\n", ret);
801
			DRM_ERROR("Failed installing irq: %d\n", ret);
736
			goto out_no_irq;
802
			goto out_no_irq;
737
		}
803
		}
738
	}
804
	}
-
 
805
#endif
739
 
806
 
740
	dev_priv->fman = vmw_fence_manager_init(dev_priv);
807
	dev_priv->fman = vmw_fence_manager_init(dev_priv);
741
	if (unlikely(dev_priv->fman == NULL)) {
808
	if (unlikely(dev_priv->fman == NULL)) {
742
		ret = -ENOMEM;
809
		ret = -ENOMEM;
743
		goto out_no_fman;
810
		goto out_no_fman;
744
	}
811
	}
745
 
812
 
-
 
813
	ret = ttm_bo_device_init(&dev_priv->bdev,
-
 
814
				 dev_priv->bo_global_ref.ref.object,
-
 
815
				 &vmw_bo_driver,
-
 
816
				 NULL,
-
 
817
				 VMWGFX_FILE_PAGE_OFFSET,
-
 
818
				 false);
-
 
819
	if (unlikely(ret != 0)) {
-
 
820
		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
-
 
821
		goto out_no_bdev;
-
 
822
	}
-
 
823
 
-
 
824
	/*
-
 
825
	 * Enable VRAM, but initially don't use it until SVGA is enabled and
746
	vmw_kms_save_vga(dev_priv);
826
	 * unhidden.
-
 
827
	 */
-
 
828
	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
-
 
829
			     (dev_priv->vram_size >> PAGE_SHIFT));
-
 
830
	if (unlikely(ret != 0)) {
-
 
831
		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
-
 
832
		goto out_no_vram;
-
 
833
	}
-
 
834
	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
-
 
835
 
-
 
836
	dev_priv->has_gmr = true;
-
 
837
	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
-
 
838
	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
-
 
839
					 VMW_PL_GMR) != 0) {
-
 
840
		DRM_INFO("No GMR memory available. "
-
 
841
			 "Graphics memory resources are very limited.\n");
-
 
842
		dev_priv->has_gmr = false;
-
 
843
	}
-
 
844
 
-
 
845
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
-
 
846
		dev_priv->has_mob = true;
-
 
847
		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
-
 
848
				   VMW_PL_MOB) != 0) {
-
 
849
			DRM_INFO("No MOB memory available. "
-
 
850
				 "3D will be disabled.\n");
-
 
851
			dev_priv->has_mob = false;
-
 
852
		}
-
 
853
	}
-
 
854
 
-
 
855
	if (dev_priv->has_mob) {
-
 
856
		spin_lock(&dev_priv->cap_lock);
-
 
857
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
-
 
858
		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
-
 
859
		spin_unlock(&dev_priv->cap_lock);
-
 
860
	}
747
#endif
-
 
748
 
861
 
749
	/* Start kms and overlay systems, needs fifo. */
862
 
750
	ret = vmw_kms_init(dev_priv);
863
	ret = vmw_kms_init(dev_priv);
751
	if (unlikely(ret != 0))
864
	if (unlikely(ret != 0))
752
		goto out_no_kms;
865
		goto out_no_kms;
-
 
866
	vmw_overlay_init(dev_priv);
753
 
-
 
754
    if (dev_priv->enable_fb) {
867
 
755
       ret = vmw_3d_resource_inc(dev_priv, true);
868
	ret = vmw_request_device(dev_priv);
756
       if (unlikely(ret != 0))
869
	if (ret)
757
           goto out_no_fifo;
-
 
758
//       vmw_fb_init(dev_priv);
-
 
-
 
870
		goto out_no_fifo;
-
 
871
 
-
 
872
	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
759
    }
873
 
-
 
874
    system_wq = alloc_ordered_workqueue("vmwgfx", 0);
-
 
875
    main_device = dev;
-
 
876
 
-
 
877
    if (dev_priv->enable_fb) {
-
 
878
		vmw_fifo_resource_inc(dev_priv);
-
 
879
		vmw_svga_enable(dev_priv);
-
 
880
        vmw_fb_init(dev_priv);
760
 
881
    }
761
    main_device = dev;
882
LINE();
762
 
883
 
763
	return 0;
884
	return 0;
764
 
885
 
765
out_no_fifo:
886
out_no_fifo:
766
//   vmw_overlay_close(dev_priv);
887
	vmw_overlay_close(dev_priv);
767
//   vmw_kms_close(dev_priv);
888
	vmw_kms_close(dev_priv);
768
out_no_kms:
889
out_no_kms:
-
 
890
//	if (dev_priv->has_mob)
-
 
891
//		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
-
 
892
//	if (dev_priv->has_gmr)
-
 
893
//		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
-
 
894
//	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
-
 
895
out_no_vram:
769
//   vmw_kms_restore_vga(dev_priv);
896
//	(void)ttm_bo_device_release(&dev_priv->bdev);
-
 
897
out_no_bdev:
770
//   vmw_fence_manager_takedown(dev_priv->fman);
898
//	vmw_fence_manager_takedown(dev_priv->fman);
771
out_no_fman:
899
out_no_fman:
772
//   if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
900
//	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
773
//       drm_irq_uninstall(dev_priv->dev);
901
//		drm_irq_uninstall(dev_priv->dev);
774
out_no_irq:
902
out_no_irq:
775
//   if (dev_priv->stealth)
903
//	if (dev_priv->stealth)
776
//       pci_release_region(dev->pdev, 2);
904
//		pci_release_region(dev->pdev, 2);
777
//   else
905
//	else
778
//       pci_release_regions(dev->pdev);
906
//		pci_release_regions(dev->pdev);
779
out_no_device:
907
out_no_device:
780
//   ttm_object_device_release(&dev_priv->tdev);
908
//	ttm_object_device_release(&dev_priv->tdev);
781
out_err4:
909
out_err4:
782
//   iounmap(dev_priv->mmio_virt);
910
//	memunmap(dev_priv->mmio_virt);
783
out_err3:
911
out_err3:
784
//   arch_phys_wc_del(dev_priv->mmio_mtrr);
-
 
785
//   if (dev_priv->has_gmr)
-
 
786
//       (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
-
 
787
//   (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
-
 
788
out_err2:
-
 
789
//   (void)ttm_bo_device_release(&dev_priv->bdev);
-
 
790
out_err1:
-
 
791
//   vmw_ttm_global_release(dev_priv);
912
//	vmw_ttm_global_release(dev_priv);
792
out_err0:
913
out_err0:
793
//   for (i = vmw_res_context; i < vmw_res_max; ++i)
914
//	for (i = vmw_res_context; i < vmw_res_max; ++i)
794
//       idr_destroy(&dev_priv->res_idr[i]);
915
//		idr_destroy(&dev_priv->res_idr[i]);
-
 
916
 
-
 
917
//	if (dev_priv->ctx.staged_bindings)
795
 
918
//		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
796
	kfree(dev_priv);
919
	kfree(dev_priv);
797
	return ret;
920
	return ret;
798
}
921
}
799
 
922
 
800
#if 0
923
#if 0
801
static int vmw_driver_unload(struct drm_device *dev)
924
static int vmw_driver_unload(struct drm_device *dev)
802
{
925
{
803
	struct vmw_private *dev_priv = vmw_priv(dev);
926
	struct vmw_private *dev_priv = vmw_priv(dev);
804
	enum vmw_res_type i;
927
	enum vmw_res_type i;
805
 
928
 
806
	unregister_pm_notifier(&dev_priv->pm_nb);
929
	unregister_pm_notifier(&dev_priv->pm_nb);
807
 
930
 
808
	if (dev_priv->ctx.res_ht_initialized)
931
	if (dev_priv->ctx.res_ht_initialized)
809
		drm_ht_remove(&dev_priv->ctx.res_ht);
932
		drm_ht_remove(&dev_priv->ctx.res_ht);
810
	if (dev_priv->ctx.cmd_bounce)
-
 
811
		vfree(dev_priv->ctx.cmd_bounce);
933
		vfree(dev_priv->ctx.cmd_bounce);
812
	if (dev_priv->enable_fb) {
934
	if (dev_priv->enable_fb) {
-
 
935
		vmw_fb_off(dev_priv);
813
		vmw_fb_close(dev_priv);
936
		vmw_fb_close(dev_priv);
814
		vmw_kms_restore_vga(dev_priv);
937
		vmw_fifo_resource_dec(dev_priv);
815
		vmw_3d_resource_dec(dev_priv, false);
938
		vmw_svga_disable(dev_priv);
816
	}
939
	}
-
 
940
 
817
	vmw_kms_close(dev_priv);
941
	vmw_kms_close(dev_priv);
818
	vmw_overlay_close(dev_priv);
942
	vmw_overlay_close(dev_priv);
-
 
943
 
-
 
944
	if (dev_priv->has_gmr)
-
 
945
		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
-
 
946
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
-
 
947
 
-
 
948
	vmw_release_device_early(dev_priv);
-
 
949
	if (dev_priv->has_mob)
-
 
950
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
-
 
951
	(void) ttm_bo_device_release(&dev_priv->bdev);
-
 
952
	vmw_release_device_late(dev_priv);
819
	vmw_fence_manager_takedown(dev_priv->fman);
953
	vmw_fence_manager_takedown(dev_priv->fman);
820
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
954
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
821
		drm_irq_uninstall(dev_priv->dev);
955
		drm_irq_uninstall(dev_priv->dev);
822
	if (dev_priv->stealth)
956
	if (dev_priv->stealth)
823
		pci_release_region(dev->pdev, 2);
957
		pci_release_region(dev->pdev, 2);
824
	else
958
	else
825
		pci_release_regions(dev->pdev);
959
		pci_release_regions(dev->pdev);
826
 
960
 
827
	ttm_object_device_release(&dev_priv->tdev);
961
	ttm_object_device_release(&dev_priv->tdev);
828
	iounmap(dev_priv->mmio_virt);
962
	memunmap(dev_priv->mmio_virt);
829
	arch_phys_wc_del(dev_priv->mmio_mtrr);
-
 
830
	if (dev_priv->has_mob)
-
 
831
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
-
 
832
	if (dev_priv->has_gmr)
963
	if (dev_priv->ctx.staged_bindings)
833
		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
-
 
834
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
964
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
835
	(void)ttm_bo_device_release(&dev_priv->bdev);
-
 
836
	vmw_ttm_global_release(dev_priv);
965
	vmw_ttm_global_release(dev_priv);
837
 
966
 
838
	for (i = vmw_res_context; i < vmw_res_max; ++i)
967
	for (i = vmw_res_context; i < vmw_res_max; ++i)
839
		idr_destroy(&dev_priv->res_idr[i]);
968
		idr_destroy(&dev_priv->res_idr[i]);
840
 
969
 
841
	kfree(dev_priv);
970
	kfree(dev_priv);
842
 
971
 
843
	return 0;
972
	return 0;
844
}
973
}
845
 
974
 
846
static void vmw_preclose(struct drm_device *dev,
975
static void vmw_preclose(struct drm_device *dev,
847
			 struct drm_file *file_priv)
976
			 struct drm_file *file_priv)
848
{
977
{
849
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
978
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
850
	struct vmw_private *dev_priv = vmw_priv(dev);
979
	struct vmw_private *dev_priv = vmw_priv(dev);
851
 
980
 
852
	vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
981
	vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
853
}
982
}
854
 
983
 
855
static void vmw_postclose(struct drm_device *dev,
984
static void vmw_postclose(struct drm_device *dev,
856
			 struct drm_file *file_priv)
985
			 struct drm_file *file_priv)
857
{
986
{
858
	struct vmw_fpriv *vmw_fp;
987
	struct vmw_fpriv *vmw_fp;
859
 
988
 
860
	vmw_fp = vmw_fpriv(file_priv);
989
	vmw_fp = vmw_fpriv(file_priv);
861
 
990
 
862
	if (vmw_fp->locked_master) {
991
	if (vmw_fp->locked_master) {
863
		struct vmw_master *vmaster =
992
		struct vmw_master *vmaster =
864
			vmw_master(vmw_fp->locked_master);
993
			vmw_master(vmw_fp->locked_master);
865
 
994
 
866
		ttm_vt_unlock(&vmaster->lock);
995
		ttm_vt_unlock(&vmaster->lock);
867
		drm_master_put(&vmw_fp->locked_master);
996
		drm_master_put(&vmw_fp->locked_master);
868
	}
997
	}
869
 
998
 
870
	ttm_object_file_release(&vmw_fp->tfile);
999
	ttm_object_file_release(&vmw_fp->tfile);
871
	kfree(vmw_fp);
1000
	kfree(vmw_fp);
872
}
1001
}
873
#endif
1002
#endif
874
 
1003
 
875
static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1004
static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
876
{
1005
{
877
	struct vmw_private *dev_priv = vmw_priv(dev);
1006
	struct vmw_private *dev_priv = vmw_priv(dev);
878
	struct vmw_fpriv *vmw_fp;
1007
	struct vmw_fpriv *vmw_fp;
879
	int ret = -ENOMEM;
1008
	int ret = -ENOMEM;
880
 
1009
 
881
	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1010
	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
882
	if (unlikely(vmw_fp == NULL))
1011
	if (unlikely(vmw_fp == NULL))
883
		return ret;
1012
		return ret;
884
 
1013
 
885
	INIT_LIST_HEAD(&vmw_fp->fence_events);
1014
	INIT_LIST_HEAD(&vmw_fp->fence_events);
886
//   vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1015
//   vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
887
//   if (unlikely(vmw_fp->tfile == NULL))
1016
//   if (unlikely(vmw_fp->tfile == NULL))
888
//       goto out_no_tfile;
1017
//       goto out_no_tfile;
889
 
1018
 
890
	file_priv->driver_priv = vmw_fp;
1019
	file_priv->driver_priv = vmw_fp;
891
 
1020
 
892
	return 0;
1021
	return 0;
893
 
1022
 
894
out_no_tfile:
1023
out_no_tfile:
895
	kfree(vmw_fp);
1024
	kfree(vmw_fp);
896
	return ret;
1025
	return ret;
897
}
1026
}
898
 
1027
 
899
#if 0
1028
#if 0
-
 
1029
static struct vmw_master *vmw_master_check(struct drm_device *dev,
-
 
1030
					   struct drm_file *file_priv,
-
 
1031
					   unsigned int flags)
-
 
1032
{
-
 
1033
	int ret;
-
 
1034
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
-
 
1035
	struct vmw_master *vmaster;
-
 
1036
 
-
 
1037
	if (file_priv->minor->type != DRM_MINOR_LEGACY ||
-
 
1038
	    !(flags & DRM_AUTH))
-
 
1039
		return NULL;
-
 
1040
 
-
 
1041
	ret = mutex_lock_interruptible(&dev->master_mutex);
-
 
1042
	if (unlikely(ret != 0))
-
 
1043
		return ERR_PTR(-ERESTARTSYS);
-
 
1044
 
-
 
1045
	if (file_priv->is_master) {
-
 
1046
		mutex_unlock(&dev->master_mutex);
-
 
1047
		return NULL;
-
 
1048
	}
-
 
1049
 
-
 
1050
	/*
-
 
1051
	 * Check if we were previously master, but now dropped. In that
-
 
1052
	 * case, allow at least render node functionality.
-
 
1053
	 */
-
 
1054
	if (vmw_fp->locked_master) {
-
 
1055
		mutex_unlock(&dev->master_mutex);
-
 
1056
 
-
 
1057
		if (flags & DRM_RENDER_ALLOW)
-
 
1058
			return NULL;
-
 
1059
 
-
 
1060
		DRM_ERROR("Dropped master trying to access ioctl that "
-
 
1061
			  "requires authentication.\n");
-
 
1062
		return ERR_PTR(-EACCES);
-
 
1063
	}
-
 
1064
	mutex_unlock(&dev->master_mutex);
-
 
1065
 
-
 
1066
	/*
-
 
1067
	 * Take the TTM lock. Possibly sleep waiting for the authenticating
-
 
1068
	 * master to become master again, or for a SIGTERM if the
-
 
1069
	 * authenticating master exits.
-
 
1070
	 */
-
 
1071
	vmaster = vmw_master(file_priv->master);
-
 
1072
	ret = ttm_read_lock(&vmaster->lock, true);
-
 
1073
	if (unlikely(ret != 0))
-
 
1074
		vmaster = ERR_PTR(ret);
-
 
1075
 
-
 
1076
	return vmaster;
-
 
1077
}
-
 
1078
 
900
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1079
static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
901
			       unsigned long arg)
1080
			      unsigned long arg,
-
 
1081
			      long (*ioctl_func)(struct file *, unsigned int,
-
 
1082
						 unsigned long))
902
{
1083
{
903
	struct drm_file *file_priv = filp->private_data;
1084
	struct drm_file *file_priv = filp->private_data;
904
	struct drm_device *dev = file_priv->minor->dev;
1085
	struct drm_device *dev = file_priv->minor->dev;
905
	unsigned int nr = DRM_IOCTL_NR(cmd);
1086
	unsigned int nr = DRM_IOCTL_NR(cmd);
-
 
1087
	struct vmw_master *vmaster;
-
 
1088
	unsigned int flags;
-
 
1089
	long ret;
906
 
1090
 
907
	/*
1091
	/*
908
	 * Do extra checking on driver private ioctls.
1092
	 * Do extra checking on driver private ioctls.
909
	 */
1093
	 */
910
 
1094
 
911
	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1095
	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
912
	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1096
	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
913
		const struct drm_ioctl_desc *ioctl =
1097
		const struct drm_ioctl_desc *ioctl =
914
		    &vmw_ioctls[nr - DRM_COMMAND_BASE];
1098
		    &vmw_ioctls[nr - DRM_COMMAND_BASE];
-
 
1099
 
-
 
1100
		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
-
 
1101
			ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
-
 
1102
			if (unlikely(ret != 0))
-
 
1103
				return ret;
-
 
1104
 
-
 
1105
			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
-
 
1106
				goto out_io_encoding;
-
 
1107
 
-
 
1108
			return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
-
 
1109
							_IOC_SIZE(cmd));
-
 
1110
		}
915
 
1111
 
-
 
1112
		if (unlikely(ioctl->cmd != cmd))
-
 
1113
			goto out_io_encoding;
-
 
1114
 
-
 
1115
		flags = ioctl->flags;
-
 
1116
	} else if (!drm_ioctl_flags(nr, &flags))
-
 
1117
		return -EINVAL;
-
 
1118
 
-
 
1119
	vmaster = vmw_master_check(dev, file_priv, flags);
-
 
1120
	if (IS_ERR(vmaster)) {
-
 
1121
		ret = PTR_ERR(vmaster);
-
 
1122
 
-
 
1123
		if (ret != -ERESTARTSYS)
-
 
1124
			DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
-
 
1125
				 nr, ret);
-
 
1126
		return ret;
-
 
1127
	}
-
 
1128
 
-
 
1129
	ret = ioctl_func(filp, cmd, arg);
-
 
1130
	if (vmaster)
-
 
1131
		ttm_read_unlock(&vmaster->lock);
-
 
1132
 
-
 
1133
	return ret;
-
 
1134
 
916
		if (unlikely(ioctl->cmd_drv != cmd)) {
1135
out_io_encoding:
917
			DRM_ERROR("Invalid command format, ioctl %d\n",
1136
			DRM_ERROR("Invalid command format, ioctl %d\n",
-
 
1137
				  nr - DRM_COMMAND_BASE);
918
				  nr - DRM_COMMAND_BASE);
1138
 
919
			return -EINVAL;
1139
			return -EINVAL;
920
		}
-
 
-
 
1140
}
-
 
1141
 
-
 
1142
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
921
	}
1143
			       unsigned long arg)
922
 
1144
{
-
 
1145
	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
923
	return drm_ioctl(filp, cmd, arg);
1146
}
-
 
1147
 
924
}
1148
#ifdef CONFIG_COMPAT
925
 
-
 
926
static void vmw_lastclose(struct drm_device *dev)
-
 
927
{
-
 
928
	struct drm_crtc *crtc;
-
 
929
	struct drm_mode_set set;
-
 
930
	int ret;
-
 
931
 
-
 
932
	set.x = 0;
-
 
933
	set.y = 0;
-
 
934
	set.fb = NULL;
-
 
935
	set.mode = NULL;
-
 
936
	set.connectors = NULL;
1149
static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
937
	set.num_connectors = 0;
-
 
938
 
-
 
939
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
 
940
		set.crtc = crtc;
1150
			     unsigned long arg)
-
 
1151
{
-
 
1152
	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
-
 
1153
}
941
		ret = drm_mode_set_config_internal(&set);
1154
#endif
942
		WARN_ON(ret != 0);
1155
 
943
	}
1156
static void vmw_lastclose(struct drm_device *dev)
944
 
1157
{
945
}
1158
}
946
#endif
1159
#endif
947
 
1160
 
948
static void vmw_master_init(struct vmw_master *vmaster)
1161
static void vmw_master_init(struct vmw_master *vmaster)
949
{
1162
{
950
	ttm_lock_init(&vmaster->lock);
1163
	ttm_lock_init(&vmaster->lock);
951
	INIT_LIST_HEAD(&vmaster->fb_surf);
-
 
952
	mutex_init(&vmaster->fb_surf_mutex);
-
 
953
}
1164
}
954
 
1165
 
955
static int vmw_master_create(struct drm_device *dev,
1166
static int vmw_master_create(struct drm_device *dev,
956
			     struct drm_master *master)
1167
			     struct drm_master *master)
957
{
1168
{
958
	struct vmw_master *vmaster;
1169
	struct vmw_master *vmaster;
959
 
1170
 
960
	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1171
	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
961
	if (unlikely(vmaster == NULL))
1172
	if (unlikely(vmaster == NULL))
962
		return -ENOMEM;
1173
		return -ENOMEM;
963
 
1174
 
964
	vmw_master_init(vmaster);
1175
	vmw_master_init(vmaster);
965
//	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1176
//	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
966
	master->driver_priv = vmaster;
1177
	master->driver_priv = vmaster;
967
 
1178
 
968
	return 0;
1179
	return 0;
969
}
1180
}
970
 
1181
 
971
static void vmw_master_destroy(struct drm_device *dev,
1182
static void vmw_master_destroy(struct drm_device *dev,
972
			       struct drm_master *master)
1183
			       struct drm_master *master)
973
{
1184
{
974
	struct vmw_master *vmaster = vmw_master(master);
1185
	struct vmw_master *vmaster = vmw_master(master);
975
 
1186
 
976
	master->driver_priv = NULL;
1187
	master->driver_priv = NULL;
977
	kfree(vmaster);
1188
	kfree(vmaster);
978
}
1189
}
979
 
1190
 
980
#if 0
1191
#if 0
981
static int vmw_master_set(struct drm_device *dev,
1192
static int vmw_master_set(struct drm_device *dev,
982
			  struct drm_file *file_priv,
1193
			  struct drm_file *file_priv,
983
			  bool from_open)
1194
			  bool from_open)
984
{
1195
{
985
	struct vmw_private *dev_priv = vmw_priv(dev);
1196
	struct vmw_private *dev_priv = vmw_priv(dev);
986
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1197
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
987
	struct vmw_master *active = dev_priv->active_master;
1198
	struct vmw_master *active = dev_priv->active_master;
988
	struct vmw_master *vmaster = vmw_master(file_priv->master);
1199
	struct vmw_master *vmaster = vmw_master(file_priv->master);
989
	int ret = 0;
1200
	int ret = 0;
990
 
-
 
991
	if (!dev_priv->enable_fb) {
-
 
992
		ret = vmw_3d_resource_inc(dev_priv, true);
-
 
993
		if (unlikely(ret != 0))
-
 
994
			return ret;
-
 
995
		vmw_kms_save_vga(dev_priv);
-
 
996
		mutex_lock(&dev_priv->hw_mutex);
-
 
997
		vmw_write(dev_priv, SVGA_REG_TRACES, 0);
-
 
998
		mutex_unlock(&dev_priv->hw_mutex);
-
 
999
	}
-
 
1000
 
1201
 
1001
	if (active) {
1202
	if (active) {
1002
		BUG_ON(active != &dev_priv->fbdev_master);
1203
		BUG_ON(active != &dev_priv->fbdev_master);
1003
		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1204
		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1004
		if (unlikely(ret != 0))
1205
		if (unlikely(ret != 0))
1005
			goto out_no_active_lock;
1206
			return ret;
1006
 
1207
 
1007
		ttm_lock_set_kill(&active->lock, true, SIGTERM);
-
 
1008
		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-
 
1009
		if (unlikely(ret != 0)) {
-
 
1010
			DRM_ERROR("Unable to clean VRAM on "
-
 
1011
				  "master drop.\n");
-
 
1012
		}
-
 
1013
 
1208
		ttm_lock_set_kill(&active->lock, true, SIGTERM);
1014
		dev_priv->active_master = NULL;
1209
		dev_priv->active_master = NULL;
1015
	}
1210
	}
1016
 
1211
 
1017
	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1212
	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1018
	if (!from_open) {
1213
	if (!from_open) {
1019
		ttm_vt_unlock(&vmaster->lock);
1214
		ttm_vt_unlock(&vmaster->lock);
1020
		BUG_ON(vmw_fp->locked_master != file_priv->master);
1215
		BUG_ON(vmw_fp->locked_master != file_priv->master);
1021
		drm_master_put(&vmw_fp->locked_master);
1216
		drm_master_put(&vmw_fp->locked_master);
1022
	}
1217
	}
1023
 
1218
 
1024
	dev_priv->active_master = vmaster;
1219
	dev_priv->active_master = vmaster;
1025
 
1220
 
1026
	return 0;
1221
	return 0;
1027
 
-
 
1028
out_no_active_lock:
-
 
1029
	if (!dev_priv->enable_fb) {
-
 
1030
		vmw_kms_restore_vga(dev_priv);
-
 
1031
		vmw_3d_resource_dec(dev_priv, true);
-
 
1032
		mutex_lock(&dev_priv->hw_mutex);
-
 
1033
		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-
 
1034
		mutex_unlock(&dev_priv->hw_mutex);
-
 
1035
	}
-
 
1036
	return ret;
-
 
1037
}
1222
}
1038
 
1223
 
1039
static void vmw_master_drop(struct drm_device *dev,
1224
static void vmw_master_drop(struct drm_device *dev,
1040
			    struct drm_file *file_priv,
1225
			    struct drm_file *file_priv,
1041
			    bool from_release)
1226
			    bool from_release)
1042
{
1227
{
1043
	struct vmw_private *dev_priv = vmw_priv(dev);
1228
	struct vmw_private *dev_priv = vmw_priv(dev);
1044
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1229
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1045
	struct vmw_master *vmaster = vmw_master(file_priv->master);
1230
	struct vmw_master *vmaster = vmw_master(file_priv->master);
1046
	int ret;
1231
	int ret;
1047
 
1232
 
1048
	/**
1233
	/**
1049
	 * Make sure the master doesn't disappear while we have
1234
	 * Make sure the master doesn't disappear while we have
1050
	 * it locked.
1235
	 * it locked.
1051
	 */
1236
	 */
1052
 
1237
 
1053
	vmw_fp->locked_master = drm_master_get(file_priv->master);
1238
	vmw_fp->locked_master = drm_master_get(file_priv->master);
1054
	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1239
	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
-
 
1240
	vmw_kms_legacy_hotspot_clear(dev_priv);
1055
	if (unlikely((ret != 0))) {
1241
	if (unlikely((ret != 0))) {
1056
		DRM_ERROR("Unable to lock TTM at VT switch.\n");
1242
		DRM_ERROR("Unable to lock TTM at VT switch.\n");
1057
		drm_master_put(&vmw_fp->locked_master);
1243
		drm_master_put(&vmw_fp->locked_master);
1058
	}
1244
	}
1059
 
-
 
1060
	vmw_execbuf_release_pinned_bo(dev_priv);
1245
 
1061
 
-
 
1062
	if (!dev_priv->enable_fb) {
-
 
1063
		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-
 
1064
		if (unlikely(ret != 0))
1246
 
1065
			DRM_ERROR("Unable to clean VRAM on master drop.\n");
-
 
1066
		vmw_kms_restore_vga(dev_priv);
-
 
1067
		vmw_3d_resource_dec(dev_priv, true);
-
 
1068
		mutex_lock(&dev_priv->hw_mutex);
-
 
1069
		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-
 
1070
		mutex_unlock(&dev_priv->hw_mutex);
1247
	if (!dev_priv->enable_fb)
1071
	}
1248
		vmw_svga_disable(dev_priv);
1072
 
1249
 
1073
	dev_priv->active_master = &dev_priv->fbdev_master;
1250
	dev_priv->active_master = &dev_priv->fbdev_master;
1074
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1251
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1075
	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1252
	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1076
 
1253
 
1077
	if (dev_priv->enable_fb)
1254
	if (dev_priv->enable_fb)
1078
		vmw_fb_on(dev_priv);
1255
		vmw_fb_on(dev_priv);
1079
}
1256
}
-
 
1257
#endif
-
 
1258
/**
-
 
1259
 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
-
 
1260
 *
-
 
1261
 * @dev_priv: Pointer to device private struct.
-
 
1262
 * Needs the reservation sem to be held in non-exclusive mode.
-
 
1263
 */
-
 
1264
static void __vmw_svga_enable(struct vmw_private *dev_priv)
-
 
1265
{
-
 
1266
	spin_lock(&dev_priv->svga_lock);
-
 
1267
	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
-
 
1268
		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
-
 
1269
		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
-
 
1270
	}
-
 
1271
	spin_unlock(&dev_priv->svga_lock);
-
 
1272
}
-
 
1273
 
-
 
1274
/**
-
 
1275
 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
-
 
1276
 *
-
 
1277
 * @dev_priv: Pointer to device private struct.
-
 
1278
 */
-
 
1279
void vmw_svga_enable(struct vmw_private *dev_priv)
-
 
1280
{
-
 
1281
	ttm_read_lock(&dev_priv->reservation_sem, false);
-
 
1282
	__vmw_svga_enable(dev_priv);
-
 
1283
	ttm_read_unlock(&dev_priv->reservation_sem);
-
 
1284
}
1080
 
1285
 
1081
 
1286
#if 0
1082
static void vmw_remove(struct pci_dev *pdev)
1287
static void vmw_remove(struct pci_dev *pdev)
1083
{
1288
{
1084
	struct drm_device *dev = pci_get_drvdata(pdev);
1289
	struct drm_device *dev = pci_get_drvdata(pdev);
1085
 
1290
 
1086
	drm_put_dev(dev);
1291
	drm_put_dev(dev);
1087
}
1292
}
1088
 
1293
 
1089
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1294
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1090
			      void *ptr)
1295
			      void *ptr)
1091
{
1296
{
1092
	struct vmw_private *dev_priv =
1297
	struct vmw_private *dev_priv =
1093
		container_of(nb, struct vmw_private, pm_nb);
1298
		container_of(nb, struct vmw_private, pm_nb);
1094
 
1299
 
1095
	switch (val) {
1300
	switch (val) {
1096
	case PM_HIBERNATION_PREPARE:
1301
	case PM_HIBERNATION_PREPARE:
-
 
1302
		if (dev_priv->enable_fb)
1097
	case PM_SUSPEND_PREPARE:
1303
			vmw_fb_off(dev_priv);
1098
		ttm_suspend_lock(&dev_priv->reservation_sem);
1304
		ttm_suspend_lock(&dev_priv->reservation_sem);
1099
 
1305
 
1100
		/**
1306
		/*
1101
		 * This empties VRAM and unbinds all GMR bindings.
1307
		 * This empties VRAM and unbinds all GMR bindings.
1102
		 * Buffer contents is moved to swappable memory.
1308
		 * Buffer contents is moved to swappable memory.
1103
		 */
1309
		 */
1104
		vmw_execbuf_release_pinned_bo(dev_priv);
1310
		vmw_execbuf_release_pinned_bo(dev_priv);
1105
		vmw_resource_evict_all(dev_priv);
1311
		vmw_resource_evict_all(dev_priv);
-
 
1312
		vmw_release_device_early(dev_priv);
1106
		ttm_bo_swapout_all(&dev_priv->bdev);
1313
		ttm_bo_swapout_all(&dev_priv->bdev);
1107
 
-
 
-
 
1314
		vmw_fence_fifo_down(dev_priv->fman);
1108
		break;
1315
		break;
1109
	case PM_POST_HIBERNATION:
1316
	case PM_POST_HIBERNATION:
1110
	case PM_POST_SUSPEND:
-
 
1111
	case PM_POST_RESTORE:
1317
	case PM_POST_RESTORE:
-
 
1318
		vmw_fence_fifo_up(dev_priv->fman);
1112
		ttm_suspend_unlock(&dev_priv->reservation_sem);
1319
		ttm_suspend_unlock(&dev_priv->reservation_sem);
1113
 
-
 
-
 
1320
		if (dev_priv->enable_fb)
-
 
1321
			vmw_fb_on(dev_priv);
1114
		break;
1322
		break;
1115
	case PM_RESTORE_PREPARE:
1323
	case PM_RESTORE_PREPARE:
1116
		break;
1324
		break;
1117
	default:
1325
	default:
1118
		break;
1326
		break;
1119
	}
1327
	}
1120
	return 0;
1328
	return 0;
1121
}
1329
}
1122
 
-
 
1123
/**
-
 
1124
 * These might not be needed with the virtual SVGA device.
-
 
1125
 */
-
 
1126
 
1330
 
1127
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1331
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1128
{
1332
{
1129
	struct drm_device *dev = pci_get_drvdata(pdev);
1333
	struct drm_device *dev = pci_get_drvdata(pdev);
1130
	struct vmw_private *dev_priv = vmw_priv(dev);
1334
	struct vmw_private *dev_priv = vmw_priv(dev);
1131
 
1335
 
1132
	if (dev_priv->num_3d_resources != 0) {
-
 
1133
		DRM_INFO("Can't suspend or hibernate "
-
 
1134
			 "while 3D resources are active.\n");
1336
	if (dev_priv->refuse_hibernation)
1135
		return -EBUSY;
-
 
1136
	}
1337
		return -EBUSY;
1137
 
1338
 
1138
	pci_save_state(pdev);
1339
	pci_save_state(pdev);
1139
	pci_disable_device(pdev);
1340
	pci_disable_device(pdev);
1140
	pci_set_power_state(pdev, PCI_D3hot);
1341
	pci_set_power_state(pdev, PCI_D3hot);
1141
	return 0;
1342
	return 0;
1142
}
1343
}
1143
 
1344
 
1144
static int vmw_pci_resume(struct pci_dev *pdev)
1345
static int vmw_pci_resume(struct pci_dev *pdev)
1145
{
1346
{
1146
	pci_set_power_state(pdev, PCI_D0);
1347
	pci_set_power_state(pdev, PCI_D0);
1147
	pci_restore_state(pdev);
1348
	pci_restore_state(pdev);
1148
	return pci_enable_device(pdev);
1349
	return pci_enable_device(pdev);
1149
}
1350
}
1150
 
1351
 
1151
static int vmw_pm_suspend(struct device *kdev)
1352
static int vmw_pm_suspend(struct device *kdev)
1152
{
1353
{
1153
	struct pci_dev *pdev = to_pci_dev(kdev);
1354
	struct pci_dev *pdev = to_pci_dev(kdev);
1154
	struct pm_message dummy;
1355
	struct pm_message dummy;
1155
 
1356
 
1156
	dummy.event = 0;
1357
	dummy.event = 0;
1157
 
1358
 
1158
	return vmw_pci_suspend(pdev, dummy);
1359
	return vmw_pci_suspend(pdev, dummy);
1159
}
1360
}
1160
 
1361
 
1161
static int vmw_pm_resume(struct device *kdev)
1362
static int vmw_pm_resume(struct device *kdev)
1162
{
1363
{
1163
	struct pci_dev *pdev = to_pci_dev(kdev);
1364
	struct pci_dev *pdev = to_pci_dev(kdev);
1164
 
1365
 
1165
	return vmw_pci_resume(pdev);
1366
	return vmw_pci_resume(pdev);
1166
}
1367
}
1167
 
1368
 
1168
static int vmw_pm_prepare(struct device *kdev)
1369
static int vmw_pm_freeze(struct device *kdev)
1169
{
1370
{
1170
	struct pci_dev *pdev = to_pci_dev(kdev);
1371
	struct pci_dev *pdev = to_pci_dev(kdev);
1171
	struct drm_device *dev = pci_get_drvdata(pdev);
1372
	struct drm_device *dev = pci_get_drvdata(pdev);
1172
	struct vmw_private *dev_priv = vmw_priv(dev);
1373
	struct vmw_private *dev_priv = vmw_priv(dev);
1173
 
-
 
1174
	/**
-
 
1175
	 * Release 3d reference held by fbdev and potentially
-
 
1176
	 * stop fifo.
-
 
1177
	 */
1374
 
1178
	dev_priv->suspended = true;
1375
	dev_priv->suspended = true;
1179
	if (dev_priv->enable_fb)
1376
	if (dev_priv->enable_fb)
1180
			vmw_3d_resource_dec(dev_priv, true);
-
 
1181
 
-
 
1182
	if (dev_priv->num_3d_resources != 0) {
-
 
1183
 
-
 
1184
		DRM_INFO("Can't suspend or hibernate "
-
 
-
 
1377
		vmw_fifo_resource_dec(dev_priv);
-
 
1378
 
1185
			 "while 3D resources are active.\n");
1379
	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1186
 
1380
		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
-
 
1381
		if (dev_priv->enable_fb)
1187
		if (dev_priv->enable_fb)
1382
			vmw_fifo_resource_inc(dev_priv);
1188
			vmw_3d_resource_inc(dev_priv, true);
1383
		WARN_ON(vmw_request_device_late(dev_priv));
1189
		dev_priv->suspended = false;
1384
		dev_priv->suspended = false;
1190
		return -EBUSY;
1385
		return -EBUSY;
1191
	}
1386
	}
-
 
1387
 
-
 
1388
	if (dev_priv->enable_fb)
-
 
1389
		__vmw_svga_disable(dev_priv);
-
 
1390
	
-
 
1391
	vmw_release_device_late(dev_priv);
1192
 
1392
 
1193
	return 0;
1393
	return 0;
1194
}
1394
}
1195
 
1395
 
1196
#endif
1396
#endif
1197
 
1397
 
1198
static struct drm_driver driver = {
1398
static struct drm_driver driver = {
1199
	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1399
	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1200
	DRIVER_MODESET | DRIVER_RENDER,
1400
	DRIVER_MODESET | DRIVER_RENDER,
1201
   .load = vmw_driver_load,
1401
   .load = vmw_driver_load,
1202
//	.unload = vmw_driver_unload,
1402
//	.unload = vmw_driver_unload,
1203
//	.lastclose = vmw_lastclose,
1403
//	.lastclose = vmw_lastclose,
1204
   .irq_preinstall = vmw_irq_preinstall,
1404
   .irq_preinstall = vmw_irq_preinstall,
1205
   .irq_postinstall = vmw_irq_postinstall,
1405
   .irq_postinstall = vmw_irq_postinstall,
1206
//   .irq_uninstall = vmw_irq_uninstall,
1406
//   .irq_uninstall = vmw_irq_uninstall,
1207
   .irq_handler = vmw_irq_handler,
1407
   .irq_handler = vmw_irq_handler,
1208
//   .get_vblank_counter = vmw_get_vblank_counter,
1408
   .get_vblank_counter = vmw_get_vblank_counter,
1209
//   .enable_vblank = vmw_enable_vblank,
1409
   .enable_vblank = vmw_enable_vblank,
1210
//   .disable_vblank = vmw_disable_vblank,
1410
   .disable_vblank = vmw_disable_vblank,
1211
//   .ioctls = vmw_ioctls,
1411
//   .ioctls = vmw_ioctls,
1212
//   .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1412
//   .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1213
     .open = vmw_driver_open,
1413
     .open = vmw_driver_open,
1214
//   .preclose = vmw_preclose,
1414
//   .preclose = vmw_preclose,
1215
//   .postclose = vmw_postclose,
1415
//   .postclose = vmw_postclose,
1216
 
1416
 
1217
//   .dumb_create = vmw_dumb_create,
1417
//   .dumb_create = vmw_dumb_create,
1218
//   .dumb_map_offset = vmw_dumb_map_offset,
1418
//   .dumb_map_offset = vmw_dumb_map_offset,
1219
//   .dumb_destroy = vmw_dumb_destroy,
1419
//   .dumb_destroy = vmw_dumb_destroy,
1220
 
1420
 
1221
 
1421
 
1222
};
1422
};
1223
 
1423
 
1224
#if 0
1424
#if 0
1225
static struct pci_driver vmw_pci_driver = {
1425
static struct pci_driver vmw_pci_driver = {
1226
	.name = VMWGFX_DRIVER_NAME,
1426
	.name = VMWGFX_DRIVER_NAME,
1227
	.id_table = vmw_pci_id_list,
1427
	.id_table = vmw_pci_id_list,
1228
	.probe = vmw_probe,
1428
	.probe = vmw_probe,
1229
	.remove = vmw_remove,
1429
	.remove = vmw_remove,
1230
	.driver = {
1430
	.driver = {
1231
		.pm = &vmw_pm_ops
1431
		.pm = &vmw_pm_ops
1232
	}
1432
	}
1233
};
1433
};
1234
 
1434
 
1235
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1435
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1236
{
1436
{
1237
	return drm_get_pci_dev(pdev, ent, &driver);
1437
	return drm_get_pci_dev(pdev, ent, &driver);
1238
}
1438
}
1239
#endif
1439
#endif
1240
 
1440
 
1241
int vmw_init(void)
1441
int vmw_init(void)
1242
{
1442
{
1243
    static pci_dev_t device;
1443
    static pci_dev_t device;
1244
    const struct pci_device_id  *ent;
1444
    const struct pci_device_id  *ent;
1245
    int  err;
1445
    int  err;
1246
 
1446
 
1247
 
1447
 
1248
    ent = find_pci_device(&device, vmw_pci_id_list);
1448
    ent = find_pci_device(&device, vmw_pci_id_list);
1249
    if( unlikely(ent == NULL) )
1449
    if( unlikely(ent == NULL) )
1250
    {
1450
    {
1251
        dbgprintf("device not found\n");
1451
        dbgprintf("device not found\n");
1252
        return -ENODEV;
1452
        return -ENODEV;
1253
    };
1453
    };
1254
 
1454
 
1255
    drm_core_init();
1455
    drm_core_init();
1256
 
1456
 
1257
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1457
    DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
1258
                                device.pci_dev.device);
1458
                                device.pci_dev.device);
1259
 
1459
 
1260
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
1460
    err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
1261
 
1461
 
1262
    return err;
1462
    return err;
1263
}
1463
}
1264
 
1464
 
1265
 
1465
 
1266
MODULE_AUTHOR("VMware Inc. and others");
1466
MODULE_AUTHOR("VMware Inc. and others");
1267
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1467
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1268
MODULE_LICENSE("GPL and additional rights");
1468
MODULE_LICENSE("GPL and additional rights");
1269
 
1469
 
1270
 
1470
 
1271
void *kmemdup(const void *src, size_t len, gfp_t gfp)
1471
void *kmemdup(const void *src, size_t len, gfp_t gfp)
1272
{
1472
{
1273
    void *p;
1473
    void *p;
1274
 
1474
 
1275
    p = kmalloc(len, gfp);
1475
    p = kmalloc(len, gfp);
1276
    if (p)
1476
    if (p)
1277
        memcpy(p, src, len);
1477
        memcpy(p, src, len);
1278
    return p;
1478
    return p;
1279
}
1479
}