Subversion Repositories Kolibri OS

Rev

Rev 4560 | Rev 6084 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4560 Rev 5060
1
/*
1
/*
2
 * Copyright © 2013 Intel Corporation
2
 * Copyright © 2013 Intel Corporation
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
21
 * IN THE SOFTWARE.
22
 *
22
 *
23
 */
23
 */
24
 
24
 
25
#include "i915_drv.h"
25
#include "i915_drv.h"
26
#include "intel_drv.h"
26
#include "intel_drv.h"
27
 
27
 
28
/*
28
/*
29
 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
29
 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
30
 * VLV_VLV2_PUNIT_HAS_0.8.docx
30
 * VLV_VLV2_PUNIT_HAS_0.8.docx
31
 */
31
 */
-
 
32
 
-
 
33
/* Standard MMIO read, non-posted */
-
 
34
#define SB_MRD_NP	0x00
-
 
35
/* Standard MMIO write, non-posted */
-
 
36
#define SB_MWR_NP	0x01
-
 
37
/* Private register read, double-word addressing, non-posted */
-
 
38
#define SB_CRRDDA_NP	0x06
-
 
39
/* Private register write, double-word addressing, non-posted */
-
 
40
#define SB_CRWRDA_NP	0x07
-
 
41
 
32
static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
42
static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
33
			   u32 port, u32 opcode, u32 addr, u32 *val)
43
			   u32 port, u32 opcode, u32 addr, u32 *val)
34
{
44
{
35
	u32 cmd, be = 0xf, bar = 0;
45
	u32 cmd, be = 0xf, bar = 0;
36
	bool is_read = (opcode == PUNIT_OPCODE_REG_READ ||
46
	bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
37
			opcode == DPIO_OPCODE_REG_READ);
-
 
38
 
47
 
39
	cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
48
	cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
40
		(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
49
		(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
41
		(bar << IOSF_BAR_SHIFT);
50
		(bar << IOSF_BAR_SHIFT);
42
 
51
 
43
	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
52
	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
44
 
53
 
45
	if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
54
	if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
46
		DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
55
		DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
47
				 is_read ? "read" : "write");
56
				 is_read ? "read" : "write");
48
		return -EAGAIN;
57
		return -EAGAIN;
49
	}
58
	}
50
 
59
 
51
	I915_WRITE(VLV_IOSF_ADDR, addr);
60
	I915_WRITE(VLV_IOSF_ADDR, addr);
52
	if (!is_read)
61
	if (!is_read)
53
		I915_WRITE(VLV_IOSF_DATA, *val);
62
		I915_WRITE(VLV_IOSF_DATA, *val);
54
	I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
63
	I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
55
 
64
 
56
	if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
65
	if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
57
		DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
66
		DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
58
				 is_read ? "read" : "write");
67
				 is_read ? "read" : "write");
59
		return -ETIMEDOUT;
68
		return -ETIMEDOUT;
60
	}
69
	}
61
 
70
 
62
	if (is_read)
71
	if (is_read)
63
		*val = I915_READ(VLV_IOSF_DATA);
72
		*val = I915_READ(VLV_IOSF_DATA);
64
	I915_WRITE(VLV_IOSF_DATA, 0);
73
	I915_WRITE(VLV_IOSF_DATA, 0);
65
 
74
 
66
	return 0;
75
	return 0;
67
}
76
}
68
 
77
 
69
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
78
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
70
{
79
{
71
	u32 val = 0;
80
	u32 val = 0;
72
 
81
 
73
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
82
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
74
 
83
 
75
	mutex_lock(&dev_priv->dpio_lock);
84
	mutex_lock(&dev_priv->dpio_lock);
76
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
85
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
77
			PUNIT_OPCODE_REG_READ, addr, &val);
86
			SB_CRRDDA_NP, addr, &val);
78
	mutex_unlock(&dev_priv->dpio_lock);
87
	mutex_unlock(&dev_priv->dpio_lock);
79
 
88
 
80
	return val;
89
	return val;
81
}
90
}
82
 
91
 
83
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
92
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
84
{
93
{
85
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
94
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
86
 
95
 
87
	mutex_lock(&dev_priv->dpio_lock);
96
	mutex_lock(&dev_priv->dpio_lock);
88
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
97
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
89
			PUNIT_OPCODE_REG_WRITE, addr, &val);
98
			SB_CRWRDA_NP, addr, &val);
90
	mutex_unlock(&dev_priv->dpio_lock);
99
	mutex_unlock(&dev_priv->dpio_lock);
91
}
100
}
92
 
101
 
93
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
102
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
94
{
103
{
95
	u32 val = 0;
104
	u32 val = 0;
96
 
105
 
97
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
106
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
98
			PUNIT_OPCODE_REG_READ, reg, &val);
107
			SB_CRRDDA_NP, reg, &val);
99
 
108
 
100
	return val;
109
	return val;
101
}
110
}
102
 
111
 
103
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
112
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
104
{
113
{
105
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
114
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
106
			PUNIT_OPCODE_REG_WRITE, reg, &val);
115
			SB_CRWRDA_NP, reg, &val);
107
}
116
}
108
 
117
 
109
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
118
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
110
{
119
{
111
	u32 val = 0;
120
	u32 val = 0;
112
 
121
 
113
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
122
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
114
 
123
 
115
	mutex_lock(&dev_priv->dpio_lock);
124
	mutex_lock(&dev_priv->dpio_lock);
116
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
125
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
117
			PUNIT_OPCODE_REG_READ, addr, &val);
126
			SB_CRRDDA_NP, addr, &val);
118
	mutex_unlock(&dev_priv->dpio_lock);
127
	mutex_unlock(&dev_priv->dpio_lock);
119
 
128
 
120
	return val;
129
	return val;
121
}
130
}
122
 
131
 
123
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
132
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
124
{
133
{
125
	u32 val = 0;
134
	u32 val = 0;
126
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
135
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
127
			PUNIT_OPCODE_REG_READ, reg, &val);
136
			SB_CRRDDA_NP, reg, &val);
128
	return val;
137
	return val;
129
}
138
}
130
 
139
 
131
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
140
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
132
{
141
{
133
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
142
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
134
			PUNIT_OPCODE_REG_WRITE, reg, &val);
143
			SB_CRWRDA_NP, reg, &val);
135
}
144
}
136
 
145
 
137
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
146
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
138
{
147
{
139
	u32 val = 0;
148
	u32 val = 0;
140
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
149
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
141
			PUNIT_OPCODE_REG_READ, reg, &val);
150
			SB_CRRDDA_NP, reg, &val);
142
	return val;
151
	return val;
143
}
152
}
144
 
153
 
145
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
154
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
146
{
155
{
147
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
156
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
148
			PUNIT_OPCODE_REG_WRITE, reg, &val);
157
			SB_CRWRDA_NP, reg, &val);
149
}
158
}
150
 
159
 
151
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
160
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
152
{
161
{
153
	u32 val = 0;
162
	u32 val = 0;
154
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
163
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
155
			PUNIT_OPCODE_REG_READ, reg, &val);
164
			SB_CRRDDA_NP, reg, &val);
156
	return val;
165
	return val;
157
}
166
}
158
 
167
 
159
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
168
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
160
{
169
{
161
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
170
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
162
			PUNIT_OPCODE_REG_WRITE, reg, &val);
171
			SB_CRWRDA_NP, reg, &val);
163
}
172
}
164
 
173
 
165
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
174
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
166
{
175
{
167
	u32 val = 0;
176
	u32 val = 0;
168
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
177
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
169
			PUNIT_OPCODE_REG_READ, reg, &val);
178
			SB_CRRDDA_NP, reg, &val);
170
	return val;
179
	return val;
171
}
180
}
172
 
181
 
173
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
182
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
174
{
183
{
175
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
184
	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
176
			PUNIT_OPCODE_REG_WRITE, reg, &val);
185
			SB_CRWRDA_NP, reg, &val);
177
}
186
}
178
 
187
 
179
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
188
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
180
{
189
{
181
	u32 val = 0;
190
	u32 val = 0;
182
 
191
 
183
	vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
192
	vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
184
			DPIO_OPCODE_REG_READ, reg, &val);
193
			SB_MRD_NP, reg, &val);
-
 
194
 
-
 
195
	/*
-
 
196
	 * FIXME: There might be some registers where all 1's is a valid value,
-
 
197
	 * so ideally we should check the register offset instead...
-
 
198
	 */
-
 
199
	WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
-
 
200
	     pipe_name(pipe), reg, val);
-
 
201
 
185
	return val;
202
	return val;
186
}
203
}
187
 
204
 
188
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
205
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
189
{
206
{
190
	vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
207
	vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
191
			DPIO_OPCODE_REG_WRITE, reg, &val);
208
			SB_MWR_NP, reg, &val);
192
}
209
}
193
 
210
 
194
/* SBI access */
211
/* SBI access */
195
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
212
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
196
		   enum intel_sbi_destination destination)
213
		   enum intel_sbi_destination destination)
197
{
214
{
198
	u32 value = 0;
215
	u32 value = 0;
199
	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
216
	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
200
 
217
 
201
	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
218
	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
202
				100)) {
219
				100)) {
203
		DRM_ERROR("timeout waiting for SBI to become ready\n");
220
		DRM_ERROR("timeout waiting for SBI to become ready\n");
204
		return 0;
221
		return 0;
205
	}
222
	}
206
 
223
 
207
	I915_WRITE(SBI_ADDR, (reg << 16));
224
	I915_WRITE(SBI_ADDR, (reg << 16));
208
 
225
 
209
	if (destination == SBI_ICLK)
226
	if (destination == SBI_ICLK)
210
		value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
227
		value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
211
	else
228
	else
212
		value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
229
		value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
213
	I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
230
	I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
214
 
231
 
215
	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
232
	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
216
				100)) {
233
				100)) {
217
		DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
234
		DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
218
		return 0;
235
		return 0;
219
	}
236
	}
220
 
237
 
221
	return I915_READ(SBI_DATA);
238
	return I915_READ(SBI_DATA);
222
}
239
}
223
 
240
 
224
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
241
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
225
		     enum intel_sbi_destination destination)
242
		     enum intel_sbi_destination destination)
226
{
243
{
227
	u32 tmp;
244
	u32 tmp;
228
 
245
 
229
	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
246
	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
230
 
247
 
231
	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
248
	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
232
				100)) {
249
				100)) {
233
		DRM_ERROR("timeout waiting for SBI to become ready\n");
250
		DRM_ERROR("timeout waiting for SBI to become ready\n");
234
		return;
251
		return;
235
	}
252
	}
236
 
253
 
237
	I915_WRITE(SBI_ADDR, (reg << 16));
254
	I915_WRITE(SBI_ADDR, (reg << 16));
238
	I915_WRITE(SBI_DATA, value);
255
	I915_WRITE(SBI_DATA, value);
239
 
256
 
240
	if (destination == SBI_ICLK)
257
	if (destination == SBI_ICLK)
241
		tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
258
		tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
242
	else
259
	else
243
		tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
260
		tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
244
	I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
261
	I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
245
 
262
 
246
	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
263
	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
247
				100)) {
264
				100)) {
248
		DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
265
		DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
249
		return;
266
		return;
250
	}
267
	}
251
}
268
}
252
 
269
 
253
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
270
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
254
{
271
{
255
	u32 val = 0;
272
	u32 val = 0;
256
	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
273
	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
257
					DPIO_OPCODE_REG_READ, reg, &val);
274
			reg, &val);
258
	return val;
275
	return val;
259
}
276
}
260
 
277
 
261
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
278
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
262
{
279
{
263
	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
280
	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
264
					DPIO_OPCODE_REG_WRITE, reg, &val);
281
			reg, &val);
265
}
282
}