Subversion Repositories Kolibri OS

Rev

Rev 6937 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6937 Rev 7144
1
/*
1
/*
2
 * Copyright © 2014 Intel Corporation
2
 * Copyright © 2014 Intel Corporation
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
21
 * IN THE SOFTWARE.
22
 *
22
 *
23
 * Authors:
23
 * Authors:
24
 *    Vinit Azad 
24
 *    Vinit Azad 
25
 *    Ben Widawsky 
25
 *    Ben Widawsky 
26
 *    Dave Gordon 
26
 *    Dave Gordon 
27
 *    Alex Dai 
27
 *    Alex Dai 
28
 */
28
 */
29
#include 
29
#include 
30
#include "i915_drv.h"
30
#include "i915_drv.h"
31
#include "intel_guc.h"
31
#include "intel_guc.h"
32
 
32
 
33
/**
33
/**
34
 * DOC: GuC-specific firmware loader
34
 * DOC: GuC-specific firmware loader
35
 *
35
 *
36
 * intel_guc:
36
 * intel_guc:
37
 * Top level structure of guc. It handles firmware loading and manages client
37
 * Top level structure of guc. It handles firmware loading and manages client
38
 * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
38
 * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
39
 * ExecList submission.
39
 * ExecList submission.
40
 *
40
 *
41
 * Firmware versioning:
41
 * Firmware versioning:
42
 * The firmware build process will generate a version header file with major and
42
 * The firmware build process will generate a version header file with major and
43
 * minor version defined. The versions are built into CSS header of firmware.
43
 * minor version defined. The versions are built into CSS header of firmware.
44
 * i915 kernel driver set the minimal firmware version required per platform.
44
 * i915 kernel driver set the minimal firmware version required per platform.
45
 * The firmware installation package will install (symbolic link) proper version
45
 * The firmware installation package will install (symbolic link) proper version
46
 * of firmware.
46
 * of firmware.
47
 *
47
 *
48
 * GuC address space:
48
 * GuC address space:
49
 * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
49
 * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
50
 * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
50
 * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
51
 * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
51
 * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
52
 * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
52
 * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
53
 *
53
 *
54
 * Firmware log:
54
 * Firmware log:
55
 * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
55
 * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
56
 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
56
 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
57
 * i915_guc_load_status will print out firmware loading status and scratch
57
 * i915_guc_load_status will print out firmware loading status and scratch
58
 * registers value.
58
 * registers value.
59
 *
59
 *
60
 */
60
 */
61
 
61
 
62
#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
62
#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
63
MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
63
MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
64
 
64
 
65
/* User-friendly representation of an enum */
65
/* User-friendly representation of an enum */
66
const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
66
const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
67
{
67
{
68
	switch (status) {
68
	switch (status) {
69
	case GUC_FIRMWARE_FAIL:
69
	case GUC_FIRMWARE_FAIL:
70
		return "FAIL";
70
		return "FAIL";
71
	case GUC_FIRMWARE_NONE:
71
	case GUC_FIRMWARE_NONE:
72
		return "NONE";
72
		return "NONE";
73
	case GUC_FIRMWARE_PENDING:
73
	case GUC_FIRMWARE_PENDING:
74
		return "PENDING";
74
		return "PENDING";
75
	case GUC_FIRMWARE_SUCCESS:
75
	case GUC_FIRMWARE_SUCCESS:
76
		return "SUCCESS";
76
		return "SUCCESS";
77
	default:
77
	default:
78
		return "UNKNOWN!";
78
		return "UNKNOWN!";
79
	}
79
	}
80
};
80
};
81
 
81
 
82
static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
82
static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
83
{
83
{
84
	struct intel_engine_cs *ring;
84
	struct intel_engine_cs *ring;
85
	int i, irqs;
85
	int i, irqs;
86
 
86
 
87
	/* tell all command streamers NOT to forward interrupts and vblank to GuC */
87
	/* tell all command streamers NOT to forward interrupts and vblank to GuC */
88
	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
88
	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
89
	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
89
	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
90
	for_each_ring(ring, dev_priv, i)
90
	for_each_ring(ring, dev_priv, i)
91
		I915_WRITE(RING_MODE_GEN7(ring), irqs);
91
		I915_WRITE(RING_MODE_GEN7(ring), irqs);
92
 
92
 
93
	/* route all GT interrupts to the host */
93
	/* route all GT interrupts to the host */
94
	I915_WRITE(GUC_BCS_RCS_IER, 0);
94
	I915_WRITE(GUC_BCS_RCS_IER, 0);
95
	I915_WRITE(GUC_VCS2_VCS1_IER, 0);
95
	I915_WRITE(GUC_VCS2_VCS1_IER, 0);
96
	I915_WRITE(GUC_WD_VECS_IER, 0);
96
	I915_WRITE(GUC_WD_VECS_IER, 0);
97
}
97
}
98
 
98
 
99
static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
99
static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
100
{
100
{
101
	struct intel_engine_cs *ring;
101
	struct intel_engine_cs *ring;
102
	int i, irqs;
102
	int i, irqs;
103
 
103
 
104
	/* tell all command streamers to forward interrupts and vblank to GuC */
104
	/* tell all command streamers to forward interrupts and vblank to GuC */
105
	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
105
	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
106
	irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
106
	irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
107
	for_each_ring(ring, dev_priv, i)
107
	for_each_ring(ring, dev_priv, i)
108
		I915_WRITE(RING_MODE_GEN7(ring), irqs);
108
		I915_WRITE(RING_MODE_GEN7(ring), irqs);
109
 
109
 
110
	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
110
	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
111
	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
111
	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
112
	       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
112
	       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
113
	/* These three registers have the same bit definitions */
113
	/* These three registers have the same bit definitions */
114
	I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
114
	I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
115
	I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
115
	I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
116
	I915_WRITE(GUC_WD_VECS_IER, ~irqs);
116
	I915_WRITE(GUC_WD_VECS_IER, ~irqs);
117
}
117
}
118
 
118
 
119
static u32 get_gttype(struct drm_i915_private *dev_priv)
119
static u32 get_gttype(struct drm_i915_private *dev_priv)
120
{
120
{
121
	/* XXX: GT type based on PCI device ID? field seems unused by fw */
121
	/* XXX: GT type based on PCI device ID? field seems unused by fw */
122
	return 0;
122
	return 0;
123
}
123
}
124
 
124
 
125
static u32 get_core_family(struct drm_i915_private *dev_priv)
125
static u32 get_core_family(struct drm_i915_private *dev_priv)
126
{
126
{
127
	switch (INTEL_INFO(dev_priv)->gen) {
127
	switch (INTEL_INFO(dev_priv)->gen) {
128
	case 9:
128
	case 9:
129
		return GFXCORE_FAMILY_GEN9;
129
		return GFXCORE_FAMILY_GEN9;
130
 
130
 
131
	default:
131
	default:
132
		DRM_ERROR("GUC: unsupported core family\n");
132
		DRM_ERROR("GUC: unsupported core family\n");
133
		return GFXCORE_FAMILY_UNKNOWN;
133
		return GFXCORE_FAMILY_UNKNOWN;
134
	}
134
	}
135
}
135
}
136
 
136
 
137
static void set_guc_init_params(struct drm_i915_private *dev_priv)
137
static void set_guc_init_params(struct drm_i915_private *dev_priv)
138
{
138
{
139
	struct intel_guc *guc = &dev_priv->guc;
139
	struct intel_guc *guc = &dev_priv->guc;
140
	u32 params[GUC_CTL_MAX_DWORDS];
140
	u32 params[GUC_CTL_MAX_DWORDS];
141
	int i;
141
	int i;
142
 
142
 
143
	memset(¶ms, 0, sizeof(params));
143
	memset(¶ms, 0, sizeof(params));
144
 
144
 
145
	params[GUC_CTL_DEVICE_INFO] |=
145
	params[GUC_CTL_DEVICE_INFO] |=
146
		(get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
146
		(get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
147
		(get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
147
		(get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
148
 
148
 
149
	/*
149
	/*
150
	 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
150
	 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
151
	 * second. This ARAR is calculated by:
151
	 * second. This ARAR is calculated by:
152
	 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
152
	 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
153
	 */
153
	 */
154
	params[GUC_CTL_ARAT_HIGH] = 0;
154
	params[GUC_CTL_ARAT_HIGH] = 0;
155
	params[GUC_CTL_ARAT_LOW] = 100000000;
155
	params[GUC_CTL_ARAT_LOW] = 100000000;
156
 
156
 
157
	params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
157
	params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
158
 
158
 
159
	params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
159
	params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
160
			GUC_CTL_VCS2_ENABLED;
160
			GUC_CTL_VCS2_ENABLED;
161
 
161
 
162
	if (i915.guc_log_level >= 0) {
162
	if (i915.guc_log_level >= 0) {
163
		params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
163
		params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
164
		params[GUC_CTL_DEBUG] =
164
		params[GUC_CTL_DEBUG] =
165
			i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
165
			i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
166
	}
166
	}
-
 
167
 
-
 
168
	if (guc->ads_obj) {
-
 
169
		u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
-
 
170
				>> PAGE_SHIFT;
-
 
171
		params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
-
 
172
		params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
-
 
173
	}
167
 
174
 
168
	/* If GuC submission is enabled, set up additional parameters here */
175
	/* If GuC submission is enabled, set up additional parameters here */
169
	if (i915.enable_guc_submission) {
176
	if (i915.enable_guc_submission) {
170
		u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
177
		u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
171
		u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
178
		u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
172
 
179
 
173
		pgs >>= PAGE_SHIFT;
180
		pgs >>= PAGE_SHIFT;
174
		params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
181
		params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
175
			(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
182
			(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
176
 
183
 
177
		params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
184
		params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
178
 
185
 
179
		/* Unmask this bit to enable the GuC's internal scheduler */
186
		/* Unmask this bit to enable the GuC's internal scheduler */
180
		params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
187
		params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
181
	}
188
	}
182
 
189
 
183
	I915_WRITE(SOFT_SCRATCH(0), 0);
190
	I915_WRITE(SOFT_SCRATCH(0), 0);
184
 
191
 
185
	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
192
	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
186
		I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
193
		I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
187
}
194
}
188
 
195
 
189
/*
196
/*
190
 * Read the GuC status register (GUC_STATUS) and store it in the
197
 * Read the GuC status register (GUC_STATUS) and store it in the
191
 * specified location; then return a boolean indicating whether
198
 * specified location; then return a boolean indicating whether
192
 * the value matches either of two values representing completion
199
 * the value matches either of two values representing completion
193
 * of the GuC boot process.
200
 * of the GuC boot process.
194
 *
201
 *
195
 * This is used for polling the GuC status in a wait_for_atomic()
202
 * This is used for polling the GuC status in a wait_for()
196
 * loop below.
203
 * loop below.
197
 */
204
 */
198
static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
205
static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
199
				      u32 *status)
206
				      u32 *status)
200
{
207
{
201
	u32 val = I915_READ(GUC_STATUS);
208
	u32 val = I915_READ(GUC_STATUS);
202
	u32 uk_val = val & GS_UKERNEL_MASK;
209
	u32 uk_val = val & GS_UKERNEL_MASK;
203
	*status = val;
210
	*status = val;
204
	return (uk_val == GS_UKERNEL_READY ||
211
	return (uk_val == GS_UKERNEL_READY ||
205
		((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
212
		((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
206
}
213
}
207
 
214
 
208
/*
215
/*
209
 * Transfer the firmware image to RAM for execution by the microcontroller.
216
 * Transfer the firmware image to RAM for execution by the microcontroller.
210
 *
217
 *
211
 * Architecturally, the DMA engine is bidirectional, and can potentially even
218
 * Architecturally, the DMA engine is bidirectional, and can potentially even
212
 * transfer between GTT locations. This functionality is left out of the API
219
 * transfer between GTT locations. This functionality is left out of the API
213
 * for now as there is no need for it.
220
 * for now as there is no need for it.
214
 *
221
 *
215
 * Note that GuC needs the CSS header plus uKernel code to be copied by the
222
 * Note that GuC needs the CSS header plus uKernel code to be copied by the
216
 * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
223
 * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
217
 */
224
 */
218
static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
225
static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
219
{
226
{
220
	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
227
	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
221
	struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
228
	struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
222
	unsigned long offset;
229
	unsigned long offset;
223
	struct sg_table *sg = fw_obj->pages;
230
	struct sg_table *sg = fw_obj->pages;
224
	u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
231
	u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
225
	int i, ret = 0;
232
	int i, ret = 0;
226
 
233
 
227
	/* where RSA signature starts */
234
	/* where RSA signature starts */
228
	offset = guc_fw->rsa_offset;
235
	offset = guc_fw->rsa_offset;
229
 
236
 
230
	/* Copy RSA signature from the fw image to HW for verification */
237
	/* Copy RSA signature from the fw image to HW for verification */
231
	sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
238
	sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
232
	for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
239
	for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
233
		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
240
		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
234
 
241
 
235
	/* The header plus uCode will be copied to WOPCM via DMA, excluding any
242
	/* The header plus uCode will be copied to WOPCM via DMA, excluding any
236
	 * other components */
243
	 * other components */
237
	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
244
	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
238
 
245
 
239
	/* Set the source address for the new blob */
246
	/* Set the source address for the new blob */
240
	offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
247
	offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
241
	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
248
	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
242
	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
249
	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
243
 
250
 
244
	/*
251
	/*
245
	 * Set the DMA destination. Current uCode expects the code to be
252
	 * Set the DMA destination. Current uCode expects the code to be
246
	 * loaded at 8k; locations below this are used for the stack.
253
	 * loaded at 8k; locations below this are used for the stack.
247
	 */
254
	 */
248
	I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
255
	I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
249
	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
256
	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
250
 
257
 
251
	/* Finally start the DMA */
258
	/* Finally start the DMA */
252
	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
259
	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
253
 
260
 
254
	/*
261
	/*
255
	 * Spin-wait for the DMA to complete & the GuC to start up.
262
	 * Wait for the DMA to complete & the GuC to start up.
256
	 * NB: Docs recommend not using the interrupt for completion.
263
	 * NB: Docs recommend not using the interrupt for completion.
257
	 * Measurements indicate this should take no more than 20ms, so a
264
	 * Measurements indicate this should take no more than 20ms, so a
258
	 * timeout here indicates that the GuC has failed and is unusable.
265
	 * timeout here indicates that the GuC has failed and is unusable.
259
	 * (Higher levels of the driver will attempt to fall back to
266
	 * (Higher levels of the driver will attempt to fall back to
260
	 * execlist mode if this happens.)
267
	 * execlist mode if this happens.)
261
	 */
268
	 */
262
	ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100);
269
	ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
263
 
270
 
264
	DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
271
	DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
265
			I915_READ(DMA_CTRL), status);
272
			I915_READ(DMA_CTRL), status);
266
 
273
 
267
	if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
274
	if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
268
		DRM_ERROR("GuC firmware signature verification failed\n");
275
		DRM_ERROR("GuC firmware signature verification failed\n");
269
		ret = -ENOEXEC;
276
		ret = -ENOEXEC;
270
	}
277
	}
271
 
278
 
272
	DRM_DEBUG_DRIVER("returning %d\n", ret);
279
	DRM_DEBUG_DRIVER("returning %d\n", ret);
273
 
280
 
274
	return ret;
281
	return ret;
275
}
282
}
276
 
283
 
277
/*
284
/*
278
 * Load the GuC firmware blob into the MinuteIA.
285
 * Load the GuC firmware blob into the MinuteIA.
279
 */
286
 */
280
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
287
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
281
{
288
{
282
	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
289
	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
283
	struct drm_device *dev = dev_priv->dev;
290
	struct drm_device *dev = dev_priv->dev;
284
	int ret;
291
	int ret;
285
 
292
 
286
	ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
293
	ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
287
	if (ret) {
294
	if (ret) {
288
		DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
295
		DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
289
		return ret;
296
		return ret;
290
	}
297
	}
291
 
298
 
292
	ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
299
	ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
293
	if (ret) {
300
	if (ret) {
294
		DRM_DEBUG_DRIVER("pin failed %d\n", ret);
301
		DRM_DEBUG_DRIVER("pin failed %d\n", ret);
295
		return ret;
302
		return ret;
296
	}
303
	}
297
 
304
 
298
	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
305
	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
299
	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
306
	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
300
 
307
 
301
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
308
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
302
 
309
 
303
	/* init WOPCM */
310
	/* init WOPCM */
304
	I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
311
	I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
305
	I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
312
	I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
306
 
313
 
307
	/* Enable MIA caching. GuC clock gating is disabled. */
314
	/* Enable MIA caching. GuC clock gating is disabled. */
308
	I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
315
	I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
309
 
316
 
310
	/* WaDisableMinuteIaClockGating:skl,bxt */
317
	/* WaDisableMinuteIaClockGating:skl,bxt */
311
	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
318
	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
312
	    IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
319
	    IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
313
		I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
320
		I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
314
					      ~GUC_ENABLE_MIA_CLOCK_GATING));
321
					      ~GUC_ENABLE_MIA_CLOCK_GATING));
315
	}
322
	}
316
 
323
 
317
	/* WaC6DisallowByGfxPause*/
324
	/* WaC6DisallowByGfxPause*/
318
	I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
325
	I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
319
 
326
 
320
	if (IS_BROXTON(dev))
327
	if (IS_BROXTON(dev))
321
		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
328
		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
322
	else
329
	else
323
		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
330
		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
324
 
331
 
325
	if (IS_GEN9(dev)) {
332
	if (IS_GEN9(dev)) {
326
		/* DOP Clock Gating Enable for GuC clocks */
333
		/* DOP Clock Gating Enable for GuC clocks */
327
		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
334
		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
328
					    I915_READ(GEN7_MISCCPCTL)));
335
					    I915_READ(GEN7_MISCCPCTL)));
329
 
336
 
330
		/* allows for 5us before GT can go to RC6 */
337
		/* allows for 5us before GT can go to RC6 */
331
		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
338
		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
332
	}
339
	}
333
 
340
 
334
	set_guc_init_params(dev_priv);
341
	set_guc_init_params(dev_priv);
335
 
342
 
336
	ret = guc_ucode_xfer_dma(dev_priv);
343
	ret = guc_ucode_xfer_dma(dev_priv);
337
 
344
 
338
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
345
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
339
 
346
 
340
	/*
347
	/*
341
	 * We keep the object pages for reuse during resume. But we can unpin it
348
	 * We keep the object pages for reuse during resume. But we can unpin it
342
	 * now that DMA has completed, so it doesn't continue to take up space.
349
	 * now that DMA has completed, so it doesn't continue to take up space.
343
	 */
350
	 */
344
	i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
351
	i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
345
 
352
 
346
	return ret;
353
	return ret;
347
}
354
}
348
 
355
 
349
/**
356
/**
350
 * intel_guc_ucode_load() - load GuC uCode into the device
357
 * intel_guc_ucode_load() - load GuC uCode into the device
351
 * @dev:	drm device
358
 * @dev:	drm device
352
 *
359
 *
353
 * Called from gem_init_hw() during driver loading and also after a GPU reset.
360
 * Called from gem_init_hw() during driver loading and also after a GPU reset.
354
 *
361
 *
355
 * The firmware image should have already been fetched into memory by the
362
 * The firmware image should have already been fetched into memory by the
356
 * earlier call to intel_guc_ucode_init(), so here we need only check that
363
 * earlier call to intel_guc_ucode_init(), so here we need only check that
357
 * is succeeded, and then transfer the image to the h/w.
364
 * is succeeded, and then transfer the image to the h/w.
358
 *
365
 *
359
 * Return:	non-zero code on error
366
 * Return:	non-zero code on error
360
 */
367
 */
361
int intel_guc_ucode_load(struct drm_device *dev)
368
int intel_guc_ucode_load(struct drm_device *dev)
362
{
369
{
363
	struct drm_i915_private *dev_priv = dev->dev_private;
370
	struct drm_i915_private *dev_priv = dev->dev_private;
364
	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
371
	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
365
	int err = 0;
372
	int err = 0;
366
 
373
 
367
	if (!i915.enable_guc_submission)
374
	if (!i915.enable_guc_submission)
368
		return 0;
375
		return 0;
369
 
376
 
370
	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
377
	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
371
		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
378
		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
372
		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
379
		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
373
 
380
 
374
	direct_interrupts_to_host(dev_priv);
381
	direct_interrupts_to_host(dev_priv);
375
 
382
 
376
	if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
383
	if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
377
		return 0;
384
		return 0;
378
 
385
 
379
	if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
386
	if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
380
	    guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
387
	    guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
381
		return -ENOEXEC;
388
		return -ENOEXEC;
382
 
389
 
383
	guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
390
	guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
384
 
391
 
385
	DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
392
	DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
386
		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
393
		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
387
 
394
 
388
	switch (guc_fw->guc_fw_fetch_status) {
395
	switch (guc_fw->guc_fw_fetch_status) {
389
	case GUC_FIRMWARE_FAIL:
396
	case GUC_FIRMWARE_FAIL:
390
		/* something went wrong :( */
397
		/* something went wrong :( */
391
		err = -EIO;
398
		err = -EIO;
392
		goto fail;
399
		goto fail;
393
 
400
 
394
	case GUC_FIRMWARE_NONE:
401
	case GUC_FIRMWARE_NONE:
395
	case GUC_FIRMWARE_PENDING:
402
	case GUC_FIRMWARE_PENDING:
396
	default:
403
	default:
397
		/* "can't happen" */
404
		/* "can't happen" */
398
		WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
405
		WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
399
			guc_fw->guc_fw_path,
406
			guc_fw->guc_fw_path,
400
			intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
407
			intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
401
			guc_fw->guc_fw_fetch_status);
408
			guc_fw->guc_fw_fetch_status);
402
		err = -ENXIO;
409
		err = -ENXIO;
403
		goto fail;
410
		goto fail;
404
 
411
 
405
	case GUC_FIRMWARE_SUCCESS:
412
	case GUC_FIRMWARE_SUCCESS:
406
		break;
413
		break;
407
	}
414
	}
408
 
415
 
409
	err = i915_guc_submission_init(dev);
416
	err = i915_guc_submission_init(dev);
410
	if (err)
417
	if (err)
411
		goto fail;
418
		goto fail;
412
 
419
 
413
	err = guc_ucode_xfer(dev_priv);
420
	err = guc_ucode_xfer(dev_priv);
414
	if (err)
421
	if (err)
415
		goto fail;
422
		goto fail;
416
 
423
 
417
	guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
424
	guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
418
 
425
 
419
	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
426
	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
420
		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
427
		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
421
		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
428
		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
422
 
429
 
423
	if (i915.enable_guc_submission) {
430
	if (i915.enable_guc_submission) {
424
		/* The execbuf_client will be recreated. Release it first. */
431
		/* The execbuf_client will be recreated. Release it first. */
425
		i915_guc_submission_disable(dev);
432
		i915_guc_submission_disable(dev);
426
 
433
 
427
		err = i915_guc_submission_enable(dev);
434
		err = i915_guc_submission_enable(dev);
428
		if (err)
435
		if (err)
429
			goto fail;
436
			goto fail;
430
		direct_interrupts_to_guc(dev_priv);
437
		direct_interrupts_to_guc(dev_priv);
431
	}
438
	}
432
 
439
 
433
	return 0;
440
	return 0;
434
 
441
 
435
fail:
442
fail:
436
	if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
443
	if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
437
		guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
444
		guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
438
 
445
 
439
	direct_interrupts_to_host(dev_priv);
446
	direct_interrupts_to_host(dev_priv);
440
	i915_guc_submission_disable(dev);
447
	i915_guc_submission_disable(dev);
-
 
448
	i915_guc_submission_fini(dev);
441
 
449
 
442
	return err;
450
	return err;
443
}
451
}
444
 
452
 
445
static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
453
static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
446
{
454
{
447
	struct drm_i915_gem_object *obj;
455
	struct drm_i915_gem_object *obj;
448
	const struct firmware *fw;
456
	const struct firmware *fw;
449
	struct guc_css_header *css;
457
	struct guc_css_header *css;
450
	size_t size;
458
	size_t size;
451
	int err;
459
	int err;
452
 
460
 
453
	DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
461
	DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
454
		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
462
		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
455
 
463
 
456
	err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
464
	err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
457
	if (err)
465
	if (err)
458
		goto fail;
466
		goto fail;
459
	if (!fw)
467
	if (!fw)
460
		goto fail;
468
		goto fail;
461
 
469
 
462
	DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
470
	DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
463
		guc_fw->guc_fw_path, fw);
471
		guc_fw->guc_fw_path, fw);
464
 
472
 
465
	/* Check the size of the blob before examining buffer contents */
473
	/* Check the size of the blob before examining buffer contents */
466
	if (fw->size < sizeof(struct guc_css_header)) {
474
	if (fw->size < sizeof(struct guc_css_header)) {
467
		DRM_ERROR("Firmware header is missing\n");
475
		DRM_ERROR("Firmware header is missing\n");
468
		goto fail;
476
		goto fail;
469
	}
477
	}
470
 
478
 
471
	css = (struct guc_css_header *)fw->data;
479
	css = (struct guc_css_header *)fw->data;
472
 
480
 
473
	/* Firmware bits always start from header */
481
	/* Firmware bits always start from header */
474
	guc_fw->header_offset = 0;
482
	guc_fw->header_offset = 0;
475
	guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
483
	guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
476
		css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
484
		css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
477
 
485
 
478
	if (guc_fw->header_size != sizeof(struct guc_css_header)) {
486
	if (guc_fw->header_size != sizeof(struct guc_css_header)) {
479
		DRM_ERROR("CSS header definition mismatch\n");
487
		DRM_ERROR("CSS header definition mismatch\n");
480
		goto fail;
488
		goto fail;
481
	}
489
	}
482
 
490
 
483
	/* then, uCode */
491
	/* then, uCode */
484
	guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
492
	guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
485
	guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
493
	guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
486
 
494
 
487
	/* now RSA */
495
	/* now RSA */
488
	if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
496
	if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
489
		DRM_ERROR("RSA key size is bad\n");
497
		DRM_ERROR("RSA key size is bad\n");
490
		goto fail;
498
		goto fail;
491
	}
499
	}
492
	guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
500
	guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
493
	guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
501
	guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
494
 
502
 
495
	/* At least, it should have header, uCode and RSA. Size of all three. */
503
	/* At least, it should have header, uCode and RSA. Size of all three. */
496
	size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
504
	size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
497
	if (fw->size < size) {
505
	if (fw->size < size) {
498
		DRM_ERROR("Missing firmware components\n");
506
		DRM_ERROR("Missing firmware components\n");
499
		goto fail;
507
		goto fail;
500
	}
508
	}
501
 
509
 
502
	/* Header and uCode will be loaded to WOPCM. Size of the two. */
510
	/* Header and uCode will be loaded to WOPCM. Size of the two. */
503
	size = guc_fw->header_size + guc_fw->ucode_size;
511
	size = guc_fw->header_size + guc_fw->ucode_size;
504
 
512
 
505
	/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
513
	/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
506
	if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
514
	if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
507
		DRM_ERROR("Firmware is too large to fit in WOPCM\n");
515
		DRM_ERROR("Firmware is too large to fit in WOPCM\n");
508
		goto fail;
516
		goto fail;
509
	}
517
	}
510
 
518
 
511
	/*
519
	/*
512
	 * The GuC firmware image has the version number embedded at a well-known
520
	 * The GuC firmware image has the version number embedded at a well-known
513
	 * offset within the firmware blob; note that major / minor version are
521
	 * offset within the firmware blob; note that major / minor version are
514
	 * TWO bytes each (i.e. u16), although all pointers and offsets are defined
522
	 * TWO bytes each (i.e. u16), although all pointers and offsets are defined
515
	 * in terms of bytes (u8).
523
	 * in terms of bytes (u8).
516
	 */
524
	 */
517
	guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
525
	guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
518
	guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
526
	guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
519
 
527
 
520
	if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
528
	if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
521
	    guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
529
	    guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
522
		DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
530
		DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
523
			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
531
			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
524
			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
532
			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
525
		err = -ENOEXEC;
533
		err = -ENOEXEC;
526
		goto fail;
534
		goto fail;
527
	}
535
	}
528
 
536
 
529
	DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
537
	DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
530
			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
538
			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
531
			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
539
			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
532
 
540
 
533
	mutex_lock(&dev->struct_mutex);
541
	mutex_lock(&dev->struct_mutex);
534
	obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
542
	obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
535
	mutex_unlock(&dev->struct_mutex);
543
	mutex_unlock(&dev->struct_mutex);
536
	if (IS_ERR_OR_NULL(obj)) {
544
	if (IS_ERR_OR_NULL(obj)) {
537
		err = obj ? PTR_ERR(obj) : -ENOMEM;
545
		err = obj ? PTR_ERR(obj) : -ENOMEM;
538
		goto fail;
546
		goto fail;
539
	}
547
	}
540
 
548
 
541
	guc_fw->guc_fw_obj = obj;
549
	guc_fw->guc_fw_obj = obj;
542
	guc_fw->guc_fw_size = fw->size;
550
	guc_fw->guc_fw_size = fw->size;
543
 
551
 
544
	DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
552
	DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
545
			guc_fw->guc_fw_obj);
553
			guc_fw->guc_fw_obj);
546
 
554
 
547
	release_firmware(fw);
555
	release_firmware(fw);
548
	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
556
	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
549
	return;
557
	return;
550
 
558
 
551
fail:
559
fail:
552
	DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
560
	DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
553
		err, fw, guc_fw->guc_fw_obj);
561
		err, fw, guc_fw->guc_fw_obj);
554
	DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
562
	DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
555
		  guc_fw->guc_fw_path, err);
563
		  guc_fw->guc_fw_path, err);
-
 
564
 
556
 
565
	mutex_lock(&dev->struct_mutex);
557
	obj = guc_fw->guc_fw_obj;
566
	obj = guc_fw->guc_fw_obj;
558
	if (obj)
567
	if (obj)
559
		drm_gem_object_unreference(&obj->base);
568
		drm_gem_object_unreference(&obj->base);
560
	guc_fw->guc_fw_obj = NULL;
569
	guc_fw->guc_fw_obj = NULL;
-
 
570
	mutex_unlock(&dev->struct_mutex);
561
 
571
 
562
	release_firmware(fw);		/* OK even if fw is NULL */
572
	release_firmware(fw);		/* OK even if fw is NULL */
563
	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
573
	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
564
}
574
}
565
 
575
 
566
/**
576
/**
567
 * intel_guc_ucode_init() - define parameters and fetch firmware
577
 * intel_guc_ucode_init() - define parameters and fetch firmware
568
 * @dev:	drm device
578
 * @dev:	drm device
569
 *
579
 *
570
 * Called early during driver load, but after GEM is initialised.
580
 * Called early during driver load, but after GEM is initialised.
571
 *
581
 *
572
 * The firmware will be transferred to the GuC's memory later,
582
 * The firmware will be transferred to the GuC's memory later,
573
 * when intel_guc_ucode_load() is called.
583
 * when intel_guc_ucode_load() is called.
574
 */
584
 */
575
void intel_guc_ucode_init(struct drm_device *dev)
585
void intel_guc_ucode_init(struct drm_device *dev)
576
{
586
{
577
	struct drm_i915_private *dev_priv = dev->dev_private;
587
	struct drm_i915_private *dev_priv = dev->dev_private;
578
	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
588
	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
579
	const char *fw_path;
589
	const char *fw_path;
580
 
590
 
581
	if (!HAS_GUC_SCHED(dev))
591
	if (!HAS_GUC_SCHED(dev))
582
		i915.enable_guc_submission = false;
592
		i915.enable_guc_submission = false;
583
 
593
 
584
	if (!HAS_GUC_UCODE(dev)) {
594
	if (!HAS_GUC_UCODE(dev)) {
585
		fw_path = NULL;
595
		fw_path = NULL;
586
	} else if (IS_SKYLAKE(dev)) {
596
	} else if (IS_SKYLAKE(dev)) {
587
		fw_path = I915_SKL_GUC_UCODE;
597
		fw_path = I915_SKL_GUC_UCODE;
588
		guc_fw->guc_fw_major_wanted = 4;
598
		guc_fw->guc_fw_major_wanted = 4;
589
		guc_fw->guc_fw_minor_wanted = 3;
599
		guc_fw->guc_fw_minor_wanted = 3;
590
	} else {
600
	} else {
591
		i915.enable_guc_submission = false;
601
		i915.enable_guc_submission = false;
592
		fw_path = "";	/* unknown device */
602
		fw_path = "";	/* unknown device */
593
	}
603
	}
594
 
604
 
595
	if (!i915.enable_guc_submission)
605
	if (!i915.enable_guc_submission)
596
		return;
606
		return;
597
 
607
 
598
	guc_fw->guc_dev = dev;
608
	guc_fw->guc_dev = dev;
599
	guc_fw->guc_fw_path = fw_path;
609
	guc_fw->guc_fw_path = fw_path;
600
	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
610
	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
601
	guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
611
	guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
602
 
612
 
603
	if (fw_path == NULL)
613
	if (fw_path == NULL)
604
		return;
614
		return;
605
 
615
 
606
	if (*fw_path == '\0') {
616
	if (*fw_path == '\0') {
607
		DRM_ERROR("No GuC firmware known for this platform\n");
617
		DRM_ERROR("No GuC firmware known for this platform\n");
608
		guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
618
		guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
609
		return;
619
		return;
610
	}
620
	}
611
 
621
 
612
	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
622
	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
613
	DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
623
	DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
614
	guc_fw_fetch(dev, guc_fw);
624
	guc_fw_fetch(dev, guc_fw);
615
	/* status must now be FAIL or SUCCESS */
625
	/* status must now be FAIL or SUCCESS */
616
}
626
}
617
 
627
 
618
/**
628
/**
619
 * intel_guc_ucode_fini() - clean up all allocated resources
629
 * intel_guc_ucode_fini() - clean up all allocated resources
620
 * @dev:	drm device
630
 * @dev:	drm device
621
 */
631
 */
622
void intel_guc_ucode_fini(struct drm_device *dev)
632
void intel_guc_ucode_fini(struct drm_device *dev)
623
{
633
{
624
	struct drm_i915_private *dev_priv = dev->dev_private;
634
	struct drm_i915_private *dev_priv = dev->dev_private;
625
	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
635
	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
-
 
636
 
626
 
637
	mutex_lock(&dev->struct_mutex);
-
 
638
	direct_interrupts_to_host(dev_priv);
627
	direct_interrupts_to_host(dev_priv);
639
	i915_guc_submission_disable(dev);
628
	i915_guc_submission_fini(dev);
-
 
629
 
640
	i915_guc_submission_fini(dev);
630
	mutex_lock(&dev->struct_mutex);
641
 
631
	if (guc_fw->guc_fw_obj)
642
	if (guc_fw->guc_fw_obj)
632
		drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
643
		drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
633
	guc_fw->guc_fw_obj = NULL;
644
	guc_fw->guc_fw_obj = NULL;
634
	mutex_unlock(&dev->struct_mutex);
645
	mutex_unlock(&dev->struct_mutex);
635
 
646
 
636
	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
647
	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
637
}
648
}