Rev 6937 | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6937 | Rev 7144 | ||
---|---|---|---|
Line 221... | Line 221... | ||
221 | FAULT_AND_HALT, /* Debug only */ |
221 | FAULT_AND_HALT, /* Debug only */ |
222 | FAULT_AND_STREAM, |
222 | FAULT_AND_STREAM, |
223 | FAULT_AND_CONTINUE /* Unsupported */ |
223 | FAULT_AND_CONTINUE /* Unsupported */ |
224 | }; |
224 | }; |
225 | #define GEN8_CTX_ID_SHIFT 32 |
225 | #define GEN8_CTX_ID_SHIFT 32 |
226 | #define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 |
226 | #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 |
- | 227 | #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 |
|
Line 227... | Line 228... | ||
227 | 228 | ||
- | 229 | static int intel_lr_context_pin(struct intel_context *ctx, |
|
228 | static int intel_lr_context_pin(struct drm_i915_gem_request *rq); |
230 | struct intel_engine_cs *engine); |
229 | static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, |
231 | static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, |
Line 230... | Line 232... | ||
230 | struct drm_i915_gem_object *default_ctx_obj); |
232 | struct drm_i915_gem_object *default_ctx_obj); |
Line 261... | Line 263... | ||
261 | return 1; |
263 | return 1; |
Line 262... | Line 264... | ||
262 | 264 | ||
263 | return 0; |
265 | return 0; |
Line 264... | Line -... | ||
264 | } |
- | |
265 | - | ||
266 | /** |
- | |
267 | * intel_execlists_ctx_id() - get the Execlists Context ID |
- | |
268 | * @ctx_obj: Logical Ring Context backing object. |
- | |
269 | * |
- | |
270 | * Do not confuse with ctx->id! Unfortunately we have a name overload |
- | |
271 | * here: the old context ID we pass to userspace as a handler so that |
- | |
272 | * they can refer to a context, and the new context ID we pass to the |
266 | } |
273 | * ELSP so that the GPU can inform us of the context status via |
- | |
274 | * interrupts. |
- | |
275 | * |
- | |
276 | * Return: 20-bits globally unique context ID. |
- | |
277 | */ |
- | |
278 | u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) |
- | |
279 | { |
- | |
280 | u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) + |
- | |
281 | LRC_PPHWSP_PN * PAGE_SIZE; |
- | |
282 | - | ||
283 | /* LRCA is required to be 4K aligned so the more significant 20 bits |
- | |
284 | * are globally unique */ |
- | |
285 | return lrca >> 12; |
- | |
286 | } |
267 | |
287 | 268 | static void |
|
288 | static bool disable_lite_restore_wa(struct intel_engine_cs *ring) |
269 | logical_ring_init_platform_invariants(struct intel_engine_cs *ring) |
Line 289... | Line 270... | ||
289 | { |
270 | { |
290 | struct drm_device *dev = ring->dev; |
271 | struct drm_device *dev = ring->dev; |
291 | 272 | ||
- | 273 | ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || |
|
- | 274 | IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && |
|
- | 275 | (ring->id == VCS || ring->id == VCS2); |
|
- | 276 | ||
- | 277 | ring->ctx_desc_template = GEN8_CTX_VALID; |
|
- | 278 | ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << |
|
- | 279 | GEN8_CTX_ADDRESSING_MODE_SHIFT; |
|
- | 280 | if (IS_GEN8(dev)) |
|
- | 281 | ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; |
|
- | 282 | ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE; |
|
- | 283 | ||
- | 284 | /* TODO: WaDisableLiteRestore when we start using semaphore |
|
- | 285 | * signalling between Command Streamers */ |
|
- | 286 | /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */ |
|
- | 287 | ||
- | 288 | /* WaEnableForceRestoreInCtxtDescForVCS:skl */ |
|
292 | return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || |
289 | /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ |
Line -... | Line 290... | ||
- | 290 | if (ring->disable_lite_restore_wa) |
|
- | 291 | ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; |
|
- | 292 | } |
|
- | 293 | ||
- | 294 | /** |
|
- | 295 | * intel_lr_context_descriptor_update() - calculate & cache the descriptor |
|
- | 296 | * descriptor for a pinned context |
|
- | 297 | * |
|
- | 298 | * @ctx: Context to work on |
|
- | 299 | * @ring: Engine the descriptor will be used with |
|
- | 300 | * |
|
- | 301 | * The context descriptor encodes various attributes of a context, |
|
- | 302 | * including its GTT address and some flags. Because it's fairly |
|
- | 303 | * expensive to calculate, we'll just do it once and cache the result, |
|
- | 304 | * which remains valid until the context is unpinned. |
|
- | 305 | * |
|
- | 306 | * This is what a descriptor looks like, from LSB to MSB: |
|
- | 307 | * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) |
|
- | 308 | * bits 12-31: LRCA, GTT address of (the HWSP of) this context |
|
293 | IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && |
309 | * bits 32-51: ctx ID, a globally unique tag (the LRCA again!) |
294 | (ring->id == VCS || ring->id == VCS2); |
310 | * bits 52-63: reserved, may encode the engine ID (for GuC) |
295 | } |
311 | */ |
296 | - | ||
297 | uint64_t intel_lr_context_descriptor(struct intel_context *ctx, |
312 | static void |
298 | struct intel_engine_cs *ring) |
- | |
299 | { |
- | |
Line -... | Line 313... | ||
- | 313 | intel_lr_context_descriptor_update(struct intel_context *ctx, |
|
300 | struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; |
314 | struct intel_engine_cs *ring) |
Line 301... | Line -... | ||
301 | uint64_t desc; |
- | |
302 | uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) + |
315 | { |
303 | LRC_PPHWSP_PN * PAGE_SIZE; |
- | |
304 | 316 | uint64_t lrca, desc; |
|
305 | WARN_ON(lrca & 0xFFFFFFFF00000FFFULL); |
- | |
306 | - | ||
307 | desc = GEN8_CTX_VALID; |
317 | |
Line 308... | Line -... | ||
308 | desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT; |
- | |
309 | if (IS_GEN8(ctx_obj->base.dev)) |
318 | lrca = ctx->engine[ring->id].lrc_vma->node.start + |
310 | desc |= GEN8_CTX_L3LLC_COHERENT; |
- | |
- | 319 | LRC_PPHWSP_PN * PAGE_SIZE; |
|
Line 311... | Line -... | ||
311 | desc |= GEN8_CTX_PRIVILEGE; |
- | |
312 | desc |= lrca; |
320 | |
313 | desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT; |
321 | desc = ring->ctx_desc_template; /* bits 0-11 */ |
- | 322 | desc |= lrca; /* bits 12-31 */ |
|
314 | 323 | desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ |
|
- | 324 | ||
Line -... | Line 325... | ||
- | 325 | ctx->engine[ring->id].lrc_desc = desc; |
|
- | 326 | } |
|
- | 327 | ||
- | 328 | uint64_t intel_lr_context_descriptor(struct intel_context *ctx, |
|
- | 329 | struct intel_engine_cs *ring) |
|
- | 330 | { |
|
- | 331 | return ctx->engine[ring->id].lrc_desc; |
|
- | 332 | } |
|
- | 333 | ||
315 | /* TODO: WaDisableLiteRestore when we start using semaphore |
334 | /** |
- | 335 | * intel_execlists_ctx_id() - get the Execlists Context ID |
|
- | 336 | * @ctx: Context to get the ID for |
|
- | 337 | * @ring: Engine to get the ID for |
|
- | 338 | * |
|
- | 339 | * Do not confuse with ctx->id! Unfortunately we have a name overload |
|
- | 340 | * here: the old context ID we pass to userspace as a handler so that |
|
- | 341 | * they can refer to a context, and the new context ID we pass to the |
|
- | 342 | * ELSP so that the GPU can inform us of the context status via |
|
- | 343 | * interrupts. |
|
- | 344 | * |
|
316 | * signalling between Command Streamers */ |
345 | * The context ID is a portion of the context descriptor, so we can |
Line 317... | Line 346... | ||
317 | /* desc |= GEN8_CTX_FORCE_RESTORE; */ |
346 | * just extract the required part from the cached descriptor. |
318 | 347 | * |
|
319 | /* WaEnableForceRestoreInCtxtDescForVCS:skl */ |
348 | * Return: 20-bits globally unique context ID. |
Line 361... | Line 390... | ||
361 | 390 | ||
362 | static int execlists_update_context(struct drm_i915_gem_request *rq) |
391 | static int execlists_update_context(struct drm_i915_gem_request *rq) |
363 | { |
392 | { |
364 | struct intel_engine_cs *ring = rq->ring; |
393 | struct intel_engine_cs *ring = rq->ring; |
365 | struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; |
394 | struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; |
366 | struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; |
- | |
367 | struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj; |
- | |
368 | struct page *page; |
- | |
369 | uint32_t *reg_state; |
- | |
370 | - | ||
371 | BUG_ON(!ctx_obj); |
- | |
372 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); |
- | |
373 | WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); |
- | |
374 | - | ||
375 | page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); |
- | |
Line 376... | Line 395... | ||
376 | reg_state = kmap_atomic(page); |
395 | uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state; |
377 | - | ||
Line 378... | Line 396... | ||
378 | reg_state[CTX_RING_TAIL+1] = rq->tail; |
396 | |
379 | reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); |
397 | reg_state[CTX_RING_TAIL+1] = rq->tail; |
380 | 398 | ||
381 | if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { |
399 | if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { |
Line 388... | Line 406... | ||
388 | ASSIGN_CTX_PDP(ppgtt, reg_state, 2); |
406 | ASSIGN_CTX_PDP(ppgtt, reg_state, 2); |
389 | ASSIGN_CTX_PDP(ppgtt, reg_state, 1); |
407 | ASSIGN_CTX_PDP(ppgtt, reg_state, 1); |
390 | ASSIGN_CTX_PDP(ppgtt, reg_state, 0); |
408 | ASSIGN_CTX_PDP(ppgtt, reg_state, 0); |
391 | } |
409 | } |
Line 392... | Line -... | ||
392 | - | ||
393 | kunmap_atomic(reg_state); |
- | |
394 | 410 | ||
395 | return 0; |
411 | return 0; |
Line 396... | Line 412... | ||
396 | } |
412 | } |
397 | 413 | ||
Line 429... | Line 445... | ||
429 | req0 = cursor; |
445 | req0 = cursor; |
430 | } else if (req0->ctx == cursor->ctx) { |
446 | } else if (req0->ctx == cursor->ctx) { |
431 | /* Same ctx: ignore first request, as second request |
447 | /* Same ctx: ignore first request, as second request |
432 | * will update tail past first request's workload */ |
448 | * will update tail past first request's workload */ |
433 | cursor->elsp_submitted = req0->elsp_submitted; |
449 | cursor->elsp_submitted = req0->elsp_submitted; |
434 | list_del(&req0->execlist_link); |
- | |
435 | list_add_tail(&req0->execlist_link, |
450 | list_move_tail(&req0->execlist_link, |
436 | &ring->execlist_retired_req_list); |
451 | &ring->execlist_retired_req_list); |
437 | req0 = cursor; |
452 | req0 = cursor; |
438 | } else { |
453 | } else { |
439 | req1 = cursor; |
454 | req1 = cursor; |
440 | break; |
455 | break; |
Line 476... | Line 491... | ||
476 | head_req = list_first_entry_or_null(&ring->execlist_queue, |
491 | head_req = list_first_entry_or_null(&ring->execlist_queue, |
477 | struct drm_i915_gem_request, |
492 | struct drm_i915_gem_request, |
478 | execlist_link); |
493 | execlist_link); |
Line 479... | Line 494... | ||
479 | 494 | ||
480 | if (head_req != NULL) { |
- | |
481 | struct drm_i915_gem_object *ctx_obj = |
- | |
482 | head_req->ctx->engine[ring->id].state; |
495 | if (head_req != NULL) { |
483 | if (intel_execlists_ctx_id(ctx_obj) == request_id) { |
496 | if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) { |
484 | WARN(head_req->elsp_submitted == 0, |
497 | WARN(head_req->elsp_submitted == 0, |
Line 485... | Line 498... | ||
485 | "Never submitted head request\n"); |
498 | "Never submitted head request\n"); |
486 | - | ||
487 | if (--head_req->elsp_submitted <= 0) { |
499 | |
488 | list_del(&head_req->execlist_link); |
500 | if (--head_req->elsp_submitted <= 0) { |
489 | list_add_tail(&head_req->execlist_link, |
501 | list_move_tail(&head_req->execlist_link, |
490 | &ring->execlist_retired_req_list); |
502 | &ring->execlist_retired_req_list); |
491 | return true; |
503 | return true; |
492 | } |
504 | } |
Line 493... | Line 505... | ||
493 | } |
505 | } |
494 | } |
506 | } |
Line -... | Line 507... | ||
- | 507 | ||
- | 508 | return false; |
|
- | 509 | } |
|
- | 510 | ||
- | 511 | static void get_context_status(struct intel_engine_cs *ring, |
|
- | 512 | u8 read_pointer, |
|
- | 513 | u32 *status, u32 *context_id) |
|
- | 514 | { |
|
- | 515 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
|
- | 516 | ||
- | 517 | if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES)) |
|
- | 518 | return; |
|
- | 519 | ||
495 | 520 | *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer)); |
|
496 | return false; |
521 | *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer)); |
497 | } |
522 | } |
498 | 523 | ||
499 | /** |
524 | /** |
Line 514... | Line 539... | ||
514 | u32 submit_contexts = 0; |
539 | u32 submit_contexts = 0; |
Line 515... | Line 540... | ||
515 | 540 | ||
Line 516... | Line 541... | ||
516 | status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); |
541 | status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); |
517 | 542 | ||
518 | read_pointer = ring->next_context_status_buffer; |
543 | read_pointer = ring->next_context_status_buffer; |
519 | write_pointer = status_pointer & GEN8_CSB_PTR_MASK; |
544 | write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); |
Line 520... | Line 545... | ||
520 | if (read_pointer > write_pointer) |
545 | if (read_pointer > write_pointer) |
Line 521... | Line 546... | ||
521 | write_pointer += GEN8_CSB_ENTRIES; |
546 | write_pointer += GEN8_CSB_ENTRIES; |
522 | - | ||
- | 547 | ||
523 | spin_lock(&ring->execlist_lock); |
548 | spin_lock(&ring->execlist_lock); |
524 | 549 | ||
Line 525... | Line 550... | ||
525 | while (read_pointer < write_pointer) { |
550 | while (read_pointer < write_pointer) { |
526 | read_pointer++; |
551 | |
Line 527... | Line 552... | ||
527 | status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES)); |
552 | get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES, |
Line 543... | Line 568... | ||
543 | if (execlists_check_remove_request(ring, status_id)) |
568 | if (execlists_check_remove_request(ring, status_id)) |
544 | submit_contexts++; |
569 | submit_contexts++; |
545 | } |
570 | } |
546 | } |
571 | } |
Line 547... | Line 572... | ||
547 | 572 | ||
548 | if (disable_lite_restore_wa(ring)) { |
573 | if (ring->disable_lite_restore_wa) { |
549 | /* Prevent a ctx to preempt itself */ |
574 | /* Prevent a ctx to preempt itself */ |
550 | if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) && |
575 | if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) && |
551 | (submit_contexts != 0)) |
576 | (submit_contexts != 0)) |
552 | execlists_context_unqueue(ring); |
577 | execlists_context_unqueue(ring); |
553 | } else if (submit_contexts != 0) { |
578 | } else if (submit_contexts != 0) { |
554 | execlists_context_unqueue(ring); |
579 | execlists_context_unqueue(ring); |
Line 555... | Line 580... | ||
555 | } |
580 | } |
Line -... | Line 581... | ||
- | 581 | ||
556 | 582 | spin_unlock(&ring->execlist_lock); |
|
- | 583 | ||
557 | spin_unlock(&ring->execlist_lock); |
584 | if (unlikely(submit_contexts > 2)) |
Line -... | Line 585... | ||
- | 585 | DRM_ERROR("More than two context complete events?\n"); |
|
- | 586 | ||
558 | 587 | ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; |
|
559 | WARN(submit_contexts > 2, "More than two context complete events?\n"); |
588 | |
560 | ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; |
589 | /* Update the read pointer to the old write pointer. Manual ringbuffer |
561 | - | ||
562 | I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), |
590 | * management ftw */ |
Line 563... | Line 591... | ||
563 | _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8, |
591 | I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), |
564 | ((u32)ring->next_context_status_buffer & |
592 | _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, |
565 | GEN8_CSB_PTR_MASK) << 8)); |
593 | ring->next_context_status_buffer << 8)); |
566 | } |
594 | } |
567 | 595 | ||
Line 568... | Line 596... | ||
568 | static int execlists_context_queue(struct drm_i915_gem_request *request) |
596 | static int execlists_context_queue(struct drm_i915_gem_request *request) |
569 | { |
597 | { |
Line 570... | Line 598... | ||
570 | struct intel_engine_cs *ring = request->ring; |
598 | struct intel_engine_cs *ring = request->ring; |
Line 571... | Line 599... | ||
571 | struct drm_i915_gem_request *cursor; |
599 | struct drm_i915_gem_request *cursor; |
Line 590... | Line 618... | ||
590 | execlist_link); |
618 | execlist_link); |
Line 591... | Line 619... | ||
591 | 619 | ||
592 | if (request->ctx == tail_req->ctx) { |
620 | if (request->ctx == tail_req->ctx) { |
593 | WARN(tail_req->elsp_submitted != 0, |
621 | WARN(tail_req->elsp_submitted != 0, |
594 | "More than 2 already-submitted reqs queued\n"); |
- | |
595 | list_del(&tail_req->execlist_link); |
622 | "More than 2 already-submitted reqs queued\n"); |
596 | list_add_tail(&tail_req->execlist_link, |
623 | list_move_tail(&tail_req->execlist_link, |
597 | &ring->execlist_retired_req_list); |
624 | &ring->execlist_retired_req_list); |
598 | } |
625 | } |
Line 599... | Line 626... | ||
599 | } |
626 | } |
Line 658... | Line 685... | ||
658 | return logical_ring_invalidate_all_caches(req); |
685 | return logical_ring_invalidate_all_caches(req); |
659 | } |
686 | } |
Line 660... | Line 687... | ||
660 | 687 | ||
661 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) |
688 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) |
662 | { |
689 | { |
Line 663... | Line 690... | ||
663 | int ret; |
690 | int ret = 0; |
Line -... | Line 691... | ||
- | 691 | ||
- | 692 | request->ringbuf = request->ctx->engine[request->ring->id].ringbuf; |
|
- | 693 | ||
- | 694 | if (i915.enable_guc_submission) { |
|
- | 695 | /* |
|
- | 696 | * Check that the GuC has space for the request before |
|
664 | 697 | * going any further, as the i915_add_request() call |
|
- | 698 | * later on mustn't fail ... |
|
665 | request->ringbuf = request->ctx->engine[request->ring->id].ringbuf; |
699 | */ |
666 | 700 | struct intel_guc *guc = &request->i915->guc; |
|
667 | if (request->ctx != request->ring->default_context) { |
701 | |
668 | ret = intel_lr_context_pin(request); |
702 | ret = i915_guc_wq_check_space(guc->execbuf_client); |
Line -... | Line 703... | ||
- | 703 | if (ret) |
|
- | 704 | return ret; |
|
- | 705 | } |
|
669 | if (ret) |
706 | |
670 | return ret; |
707 | if (request->ctx != request->i915->kernel_context) |
Line 671... | Line 708... | ||
671 | } |
708 | ret = intel_lr_context_pin(request->ctx, request->ring); |
672 | 709 | ||
673 | return 0; |
710 | return ret; |
Line 722... | Line 759... | ||
722 | * The tail is updated in our logical ringbuffer struct, not in the actual context. What |
759 | * The tail is updated in our logical ringbuffer struct, not in the actual context. What |
723 | * really happens during submission is that the context and current tail will be placed |
760 | * really happens during submission is that the context and current tail will be placed |
724 | * on a queue waiting for the ELSP to be ready to accept a new context submission. At that |
761 | * on a queue waiting for the ELSP to be ready to accept a new context submission. At that |
725 | * point, the tail *inside* the context is updated and the ELSP written to. |
762 | * point, the tail *inside* the context is updated and the ELSP written to. |
726 | */ |
763 | */ |
727 | static void |
764 | static int |
728 | intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) |
765 | intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) |
729 | { |
766 | { |
730 | struct intel_engine_cs *ring = request->ring; |
767 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
731 | struct drm_i915_private *dev_priv = request->i915; |
768 | struct drm_i915_private *dev_priv = request->i915; |
- | 769 | struct intel_engine_cs *engine = request->ring; |
|
Line 732... | Line 770... | ||
732 | 770 | ||
- | 771 | intel_logical_ring_advance(ringbuf); |
|
Line -... | Line 772... | ||
- | 772 | request->tail = ringbuf->tail; |
|
- | 773 | ||
- | 774 | /* |
|
- | 775 | * Here we add two extra NOOPs as padding to avoid |
|
- | 776 | * lite restore of a context with HEAD==TAIL. |
|
- | 777 | * |
|
- | 778 | * Caller must reserve WA_TAIL_DWORDS for us! |
|
733 | intel_logical_ring_advance(request->ringbuf); |
779 | */ |
- | 780 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
|
Line 734... | Line 781... | ||
734 | 781 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
|
735 | request->tail = request->ringbuf->tail; |
782 | intel_logical_ring_advance(ringbuf); |
- | 783 | ||
- | 784 | if (intel_ring_stopped(engine)) |
|
- | 785 | return 0; |
|
- | 786 | ||
- | 787 | if (engine->last_context != request->ctx) { |
|
- | 788 | if (engine->last_context) |
|
- | 789 | intel_lr_context_unpin(engine->last_context, engine); |
|
- | 790 | if (request->ctx != request->i915->kernel_context) { |
|
- | 791 | intel_lr_context_pin(request->ctx, engine); |
|
- | 792 | engine->last_context = request->ctx; |
|
- | 793 | } else { |
|
Line 736... | Line 794... | ||
736 | 794 | engine->last_context = NULL; |
|
737 | if (intel_ring_stopped(ring)) |
795 | } |
738 | return; |
796 | } |
739 | 797 | ||
- | 798 | if (dev_priv->guc.execbuf_client) |
|
- | 799 | i915_guc_submit(dev_priv->guc.execbuf_client, request); |
|
740 | if (dev_priv->guc.execbuf_client) |
800 | else |
Line 741... | Line 801... | ||
741 | i915_guc_submit(dev_priv->guc.execbuf_client, request); |
801 | execlists_context_queue(request); |
742 | else |
802 | |
743 | execlists_context_queue(request); |
803 | return 0; |
Line 965... | Line 1025... | ||
965 | list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { |
1025 | list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { |
966 | struct intel_context *ctx = req->ctx; |
1026 | struct intel_context *ctx = req->ctx; |
967 | struct drm_i915_gem_object *ctx_obj = |
1027 | struct drm_i915_gem_object *ctx_obj = |
968 | ctx->engine[ring->id].state; |
1028 | ctx->engine[ring->id].state; |
Line 969... | Line 1029... | ||
969 | 1029 | ||
970 | if (ctx_obj && (ctx != ring->default_context)) |
1030 | if (ctx_obj && (ctx != req->i915->kernel_context)) |
- | 1031 | intel_lr_context_unpin(ctx, ring); |
|
971 | intel_lr_context_unpin(req); |
1032 | |
972 | list_del(&req->execlist_link); |
1033 | list_del(&req->execlist_link); |
973 | i915_gem_request_unreference(req); |
1034 | i915_gem_request_unreference(req); |
974 | } |
1035 | } |
Line 1010... | Line 1071... | ||
1010 | 1071 | ||
1011 | ring->gpu_caches_dirty = false; |
1072 | ring->gpu_caches_dirty = false; |
1012 | return 0; |
1073 | return 0; |
Line 1013... | Line 1074... | ||
1013 | } |
1074 | } |
1014 | - | ||
1015 | static int intel_lr_context_do_pin(struct intel_engine_cs *ring, |
1075 | |
1016 | struct drm_i915_gem_object *ctx_obj, |
1076 | static int intel_lr_context_do_pin(struct intel_context *ctx, |
1017 | struct intel_ringbuffer *ringbuf) |
1077 | struct intel_engine_cs *ring) |
1018 | { |
1078 | { |
- | 1079 | struct drm_device *dev = ring->dev; |
|
- | 1080 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
- | 1081 | struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; |
|
- | 1082 | struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; |
|
1019 | struct drm_device *dev = ring->dev; |
1083 | struct page *lrc_state_page; |
Line 1020... | Line 1084... | ||
1020 | struct drm_i915_private *dev_priv = dev->dev_private; |
1084 | uint32_t *lrc_reg_state; |
- | 1085 | int ret; |
|
1021 | int ret = 0; |
1086 | |
1022 | 1087 | WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); |
|
1023 | WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); |
1088 | |
1024 | ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, |
1089 | ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, |
Line -... | Line 1090... | ||
- | 1090 | PIN_OFFSET_BIAS | GUC_WOPCM_TOP); |
|
- | 1091 | if (ret) |
|
- | 1092 | return ret; |
|
- | 1093 | ||
- | 1094 | lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); |
|
- | 1095 | if (WARN_ON(!lrc_state_page)) { |
|
1025 | PIN_OFFSET_BIAS | GUC_WOPCM_TOP); |
1096 | ret = -ENODEV; |
1026 | if (ret) |
1097 | goto unpin_ctx_obj; |
1027 | return ret; |
1098 | } |
Line -... | Line 1099... | ||
- | 1099 | ||
- | 1100 | ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); |
|
- | 1101 | if (ret) |
|
- | 1102 | goto unpin_ctx_obj; |
|
- | 1103 | ||
1028 | 1104 | ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); |
|
Line 1029... | Line 1105... | ||
1029 | ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); |
1105 | intel_lr_context_descriptor_update(ctx, ring); |
1030 | if (ret) |
1106 | lrc_reg_state = kmap(lrc_state_page); |
1031 | goto unpin_ctx_obj; |
1107 | lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; |
Line 1042... | Line 1118... | ||
1042 | i915_gem_object_ggtt_unpin(ctx_obj); |
1118 | i915_gem_object_ggtt_unpin(ctx_obj); |
Line 1043... | Line 1119... | ||
1043 | 1119 | ||
1044 | return ret; |
1120 | return ret; |
Line 1045... | Line 1121... | ||
1045 | } |
1121 | } |
- | 1122 | ||
1046 | 1123 | static int intel_lr_context_pin(struct intel_context *ctx, |
|
1047 | static int intel_lr_context_pin(struct drm_i915_gem_request *rq) |
1124 | struct intel_engine_cs *engine) |
1048 | { |
- | |
1049 | int ret = 0; |
- | |
1050 | struct intel_engine_cs *ring = rq->ring; |
- | |
Line 1051... | Line 1125... | ||
1051 | struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; |
1125 | { |
1052 | struct intel_ringbuffer *ringbuf = rq->ringbuf; |
1126 | int ret = 0; |
1053 | 1127 | ||
1054 | if (rq->ctx->engine[ring->id].pin_count++ == 0) { |
1128 | if (ctx->engine[engine->id].pin_count++ == 0) { |
- | 1129 | ret = intel_lr_context_do_pin(ctx, engine); |
|
- | 1130 | if (ret) |
|
1055 | ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf); |
1131 | goto reset_pin_count; |
1056 | if (ret) |
1132 | |
Line 1057... | Line 1133... | ||
1057 | goto reset_pin_count; |
1133 | i915_gem_context_reference(ctx); |
1058 | } |
1134 | } |
1059 | return ret; |
1135 | return ret; |
1060 | 1136 | ||
Line 1061... | Line 1137... | ||
1061 | reset_pin_count: |
1137 | reset_pin_count: |
- | 1138 | ctx->engine[engine->id].pin_count = 0; |
|
1062 | rq->ctx->engine[ring->id].pin_count = 0; |
1139 | return ret; |
1063 | return ret; |
- | |
1064 | } |
1140 | } |
1065 | - | ||
Line 1066... | Line -... | ||
1066 | void intel_lr_context_unpin(struct drm_i915_gem_request *rq) |
- | |
1067 | { |
1141 | |
1068 | struct intel_engine_cs *ring = rq->ring; |
1142 | void intel_lr_context_unpin(struct intel_context *ctx, |
- | 1143 | struct intel_engine_cs *engine) |
|
1069 | struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; |
1144 | { |
1070 | struct intel_ringbuffer *ringbuf = rq->ringbuf; |
1145 | struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; |
- | 1146 | ||
- | 1147 | WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); |
|
- | 1148 | if (--ctx->engine[engine->id].pin_count == 0) { |
|
1071 | 1149 | // kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state)); |
|
- | 1150 | intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); |
|
1072 | if (ctx_obj) { |
1151 | i915_gem_object_ggtt_unpin(ctx_obj); |
1073 | WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); |
1152 | ctx->engine[engine->id].lrc_vma = NULL; |
Line 1074... | Line 1153... | ||
1074 | if (--rq->ctx->engine[ring->id].pin_count == 0) { |
1153 | ctx->engine[engine->id].lrc_desc = 0; |
1075 | intel_unpin_ringbuffer_obj(ringbuf); |
1154 | ctx->engine[engine->id].lrc_reg_state = NULL; |
Line 1085... | Line 1164... | ||
1085 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
1164 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
1086 | struct drm_device *dev = ring->dev; |
1165 | struct drm_device *dev = ring->dev; |
1087 | struct drm_i915_private *dev_priv = dev->dev_private; |
1166 | struct drm_i915_private *dev_priv = dev->dev_private; |
1088 | struct i915_workarounds *w = &dev_priv->workarounds; |
1167 | struct i915_workarounds *w = &dev_priv->workarounds; |
Line 1089... | Line 1168... | ||
1089 | 1168 | ||
1090 | if (WARN_ON_ONCE(w->count == 0)) |
1169 | if (w->count == 0) |
Line 1091... | Line 1170... | ||
1091 | return 0; |
1170 | return 0; |
1092 | 1171 | ||
1093 | ring->gpu_caches_dirty = true; |
1172 | ring->gpu_caches_dirty = true; |
Line 1472... | Line 1551... | ||
1472 | struct drm_device *dev = ring->dev; |
1551 | struct drm_device *dev = ring->dev; |
1473 | struct drm_i915_private *dev_priv = dev->dev_private; |
1552 | struct drm_i915_private *dev_priv = dev->dev_private; |
1474 | u8 next_context_status_buffer_hw; |
1553 | u8 next_context_status_buffer_hw; |
Line 1475... | Line 1554... | ||
1475 | 1554 | ||
1476 | lrc_setup_hardware_status_page(ring, |
1555 | lrc_setup_hardware_status_page(ring, |
Line 1477... | Line 1556... | ||
1477 | ring->default_context->engine[ring->id].state); |
1556 | dev_priv->kernel_context->engine[ring->id].state); |
1478 | 1557 | ||
Line 1479... | Line 1558... | ||
1479 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); |
1558 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); |
Line 1491... | Line 1570... | ||
1491 | * Effectively, these states have been observed: |
1570 | * Effectively, these states have been observed: |
1492 | * |
1571 | * |
1493 | * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | |
1572 | * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | |
1494 | * BDW | CSB regs not reset | CSB regs reset | |
1573 | * BDW | CSB regs not reset | CSB regs reset | |
1495 | * CHT | CSB regs not reset | CSB regs not reset | |
1574 | * CHT | CSB regs not reset | CSB regs not reset | |
- | 1575 | * SKL | ? | ? | |
|
- | 1576 | * BXT | ? | ? | |
|
1496 | */ |
1577 | */ |
1497 | next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) |
1578 | next_context_status_buffer_hw = |
1498 | & GEN8_CSB_PTR_MASK); |
1579 | GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring))); |
Line 1499... | Line 1580... | ||
1499 | 1580 | ||
1500 | /* |
1581 | /* |
1501 | * When the CSB registers are reset (also after power-up / gpu reset), |
1582 | * When the CSB registers are reset (also after power-up / gpu reset), |
1502 | * CSB write pointer is set to all 1's, which is not valid, use '5' in |
1583 | * CSB write pointer is set to all 1's, which is not valid, use '5' in |
Line 1696... | Line 1777... | ||
1696 | u32 flush_domains) |
1777 | u32 flush_domains) |
1697 | { |
1778 | { |
1698 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
1779 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
1699 | struct intel_engine_cs *ring = ringbuf->ring; |
1780 | struct intel_engine_cs *ring = ringbuf->ring; |
1700 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
1781 | u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; |
1701 | bool vf_flush_wa; |
1782 | bool vf_flush_wa = false; |
1702 | u32 flags = 0; |
1783 | u32 flags = 0; |
1703 | int ret; |
1784 | int ret; |
Line 1704... | Line 1785... | ||
1704 | 1785 | ||
Line 1718... | Line 1799... | ||
1718 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
1799 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
1719 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
1800 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
1720 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
1801 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
1721 | flags |= PIPE_CONTROL_QW_WRITE; |
1802 | flags |= PIPE_CONTROL_QW_WRITE; |
1722 | flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; |
1803 | flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; |
1723 | } |
- | |
Line 1724... | Line 1804... | ||
1724 | 1804 | ||
1725 | /* |
1805 | /* |
1726 | * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe |
1806 | * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL |
1727 | * control. |
1807 | * pipe control. |
1728 | */ |
1808 | */ |
1729 | vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 && |
1809 | if (IS_GEN9(ring->dev)) |
- | 1810 | vf_flush_wa = true; |
|
Line 1730... | Line 1811... | ||
1730 | flags & PIPE_CONTROL_VF_CACHE_INVALIDATE; |
1811 | } |
1731 | 1812 | ||
1732 | ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6); |
1813 | ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6); |
Line 1789... | Line 1870... | ||
1789 | 1870 | ||
1790 | /* See bxt_a_get_seqno() explaining the reason for the clflush. */ |
1871 | /* See bxt_a_get_seqno() explaining the reason for the clflush. */ |
1791 | intel_flush_status_page(ring, I915_GEM_HWS_INDEX); |
1872 | intel_flush_status_page(ring, I915_GEM_HWS_INDEX); |
Line 1792... | Line -... | ||
1792 | } |
- | |
1793 | - | ||
1794 | static int gen8_emit_request(struct drm_i915_gem_request *request) |
- | |
1795 | { |
- | |
1796 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
- | |
1797 | struct intel_engine_cs *ring = ringbuf->ring; |
- | |
1798 | u32 cmd; |
- | |
1799 | int ret; |
1873 | } |
1800 | 1874 | ||
1801 | /* |
1875 | /* |
1802 | * Reserve space for 2 NOOPs at the end of each request to be |
1876 | * Reserve space for 2 NOOPs at the end of each request to be |
1803 | * used as a workaround for not being allowed to do lite |
1877 | * used as a workaround for not being allowed to do lite |
- | 1878 | * restore with HEAD==TAIL (WaIdleLiteRestore). |
|
- | 1879 | */ |
|
- | 1880 | #define WA_TAIL_DWORDS 2 |
|
- | 1881 | ||
- | 1882 | static inline u32 hws_seqno_address(struct intel_engine_cs *engine) |
|
- | 1883 | { |
|
- | 1884 | return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; |
|
- | 1885 | } |
|
- | 1886 | ||
- | 1887 | static int gen8_emit_request(struct drm_i915_gem_request *request) |
|
- | 1888 | { |
|
- | 1889 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
|
1804 | * restore with HEAD==TAIL (WaIdleLiteRestore). |
1890 | int ret; |
1805 | */ |
1891 | |
1806 | ret = intel_logical_ring_begin(request, 8); |
1892 | ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); |
Line 1807... | Line 1893... | ||
1807 | if (ret) |
1893 | if (ret) |
1808 | return ret; |
1894 | return ret; |
Line 1809... | Line -... | ||
1809 | - | ||
1810 | cmd = MI_STORE_DWORD_IMM_GEN4; |
1895 | |
- | 1896 | /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ |
|
- | 1897 | BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); |
|
1811 | cmd |= MI_GLOBAL_GTT; |
1898 | |
1812 | 1899 | intel_logical_ring_emit(ringbuf, |
|
1813 | intel_logical_ring_emit(ringbuf, cmd); |
1900 | (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); |
1814 | intel_logical_ring_emit(ringbuf, |
1901 | intel_logical_ring_emit(ringbuf, |
1815 | (ring->status_page.gfx_addr + |
1902 | hws_seqno_address(request->ring) | |
1816 | (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT))); |
1903 | MI_FLUSH_DW_USE_GTT); |
1817 | intel_logical_ring_emit(ringbuf, 0); |
1904 | intel_logical_ring_emit(ringbuf, 0); |
- | 1905 | intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); |
|
Line -... | Line 1906... | ||
- | 1906 | intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); |
|
1818 | intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); |
1907 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
- | 1908 | return intel_logical_ring_advance_and_submit(request); |
|
- | 1909 | } |
|
- | 1910 | ||
- | 1911 | static int gen8_emit_request_render(struct drm_i915_gem_request *request) |
|
- | 1912 | { |
|
- | 1913 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
|
- | 1914 | int ret; |
|
1819 | intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); |
1915 | |
- | 1916 | ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS); |
|
- | 1917 | if (ret) |
|
- | 1918 | return ret; |
|
1820 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
1919 | |
- | 1920 | /* We're using qword write, seqno should be aligned to 8 bytes. */ |
|
1821 | intel_logical_ring_advance_and_submit(request); |
1921 | BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); |
- | 1922 | ||
- | 1923 | /* w/a for post sync ops following a GPGPU operation we |
|
- | 1924 | * need a prior CS_STALL, which is emitted by the flush |
|
- | 1925 | * following the batch. |
|
- | 1926 | */ |
|
- | 1927 | intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); |
|
- | 1928 | intel_logical_ring_emit(ringbuf, |
|
- | 1929 | (PIPE_CONTROL_GLOBAL_GTT_IVB | |
|
- | 1930 | PIPE_CONTROL_CS_STALL | |
|
- | 1931 | PIPE_CONTROL_QW_WRITE)); |
|
- | 1932 | intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring)); |
|
1822 | 1933 | intel_logical_ring_emit(ringbuf, 0); |
|
1823 | /* |
- | |
1824 | * Here we add two extra NOOPs as padding to avoid |
1934 | intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); |
1825 | * lite restore of a context with HEAD==TAIL. |
- | |
1826 | */ |
- | |
1827 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
1935 | /* We're thrashing one dword of HWS. */ |
Line 1828... | Line 1936... | ||
1828 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
1936 | intel_logical_ring_emit(ringbuf, 0); |
1829 | intel_logical_ring_advance(ringbuf); |
1937 | intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); |
1830 | 1938 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
|
Line 1909... | Line 2017... | ||
1909 | if (ring->status_page.obj) { |
2017 | if (ring->status_page.obj) { |
1910 | kunmap(sg_page(ring->status_page.obj->pages->sgl)); |
2018 | kunmap(sg_page(ring->status_page.obj->pages->sgl)); |
1911 | ring->status_page.obj = NULL; |
2019 | ring->status_page.obj = NULL; |
1912 | } |
2020 | } |
Line -... | Line 2021... | ||
- | 2021 | ||
- | 2022 | ring->disable_lite_restore_wa = false; |
|
- | 2023 | ring->ctx_desc_template = 0; |
|
1913 | 2024 | ||
1914 | lrc_destroy_wa_ctx_obj(ring); |
2025 | lrc_destroy_wa_ctx_obj(ring); |
1915 | ring->dev = NULL; |
2026 | ring->dev = NULL; |
Line -... | Line 2027... | ||
- | 2027 | } |
|
1916 | } |
2028 | |
- | 2029 | static void |
|
1917 | 2030 | logical_ring_default_vfuncs(struct drm_device *dev, |
|
- | 2031 | struct intel_engine_cs *ring) |
|
- | 2032 | { |
|
- | 2033 | /* Default vfuncs which can be overriden by each engine. */ |
|
- | 2034 | ring->init_hw = gen8_init_common_ring; |
|
- | 2035 | ring->emit_request = gen8_emit_request; |
|
- | 2036 | ring->emit_flush = gen8_emit_flush; |
|
- | 2037 | ring->irq_get = gen8_logical_ring_get_irq; |
|
- | 2038 | ring->irq_put = gen8_logical_ring_put_irq; |
|
- | 2039 | ring->emit_bb_start = gen8_emit_bb_start; |
|
- | 2040 | if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { |
|
- | 2041 | ring->get_seqno = bxt_a_get_seqno; |
|
- | 2042 | ring->set_seqno = bxt_a_set_seqno; |
|
- | 2043 | } else { |
|
- | 2044 | ring->get_seqno = gen8_get_seqno; |
|
- | 2045 | ring->set_seqno = gen8_set_seqno; |
|
- | 2046 | } |
|
- | 2047 | } |
|
- | 2048 | ||
- | 2049 | static inline void |
|
- | 2050 | logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift) |
|
- | 2051 | { |
|
- | 2052 | ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; |
|
- | 2053 | ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; |
|
- | 2054 | } |
|
- | 2055 | ||
- | 2056 | static int |
|
- | 2057 | logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) |
|
1918 | static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) |
2058 | { |
Line 1919... | Line 2059... | ||
1919 | { |
2059 | struct intel_context *dctx = to_i915(dev)->kernel_context; |
1920 | int ret; |
2060 | int ret; |
Line 1931... | Line 2071... | ||
1931 | INIT_LIST_HEAD(&ring->buffers); |
2071 | INIT_LIST_HEAD(&ring->buffers); |
1932 | INIT_LIST_HEAD(&ring->execlist_queue); |
2072 | INIT_LIST_HEAD(&ring->execlist_queue); |
1933 | INIT_LIST_HEAD(&ring->execlist_retired_req_list); |
2073 | INIT_LIST_HEAD(&ring->execlist_retired_req_list); |
1934 | spin_lock_init(&ring->execlist_lock); |
2074 | spin_lock_init(&ring->execlist_lock); |
Line -... | Line 2075... | ||
- | 2075 | ||
- | 2076 | logical_ring_init_platform_invariants(ring); |
|
1935 | 2077 | ||
1936 | ret = i915_cmd_parser_init_ring(ring); |
2078 | ret = i915_cmd_parser_init_ring(ring); |
1937 | if (ret) |
2079 | if (ret) |
Line 1938... | Line 2080... | ||
1938 | goto error; |
2080 | goto error; |
1939 | 2081 | ||
1940 | ret = intel_lr_context_deferred_alloc(ring->default_context, ring); |
2082 | ret = intel_lr_context_deferred_alloc(dctx, ring); |
Line 1941... | Line 2083... | ||
1941 | if (ret) |
2083 | if (ret) |
1942 | goto error; |
2084 | goto error; |
1943 | - | ||
1944 | /* As this is the default context, always pin it */ |
- | |
1945 | ret = intel_lr_context_do_pin( |
- | |
1946 | ring, |
2085 | |
1947 | ring->default_context->engine[ring->id].state, |
2086 | /* As this is the default context, always pin it */ |
1948 | ring->default_context->engine[ring->id].ringbuf); |
2087 | ret = intel_lr_context_do_pin(dctx, ring); |
1949 | if (ret) { |
2088 | if (ret) { |
1950 | DRM_ERROR( |
2089 | DRM_ERROR( |
Line 1966... | Line 2105... | ||
1966 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; |
2105 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; |
1967 | int ret; |
2106 | int ret; |
Line 1968... | Line 2107... | ||
1968 | 2107 | ||
1969 | ring->name = "render ring"; |
2108 | ring->name = "render ring"; |
- | 2109 | ring->id = RCS; |
|
- | 2110 | ring->exec_id = I915_EXEC_RENDER; |
|
1970 | ring->id = RCS; |
2111 | ring->guc_id = GUC_RENDER_ENGINE; |
1971 | ring->mmio_base = RENDER_RING_BASE; |
- | |
- | 2112 | ring->mmio_base = RENDER_RING_BASE; |
|
1972 | ring->irq_enable_mask = |
2113 | |
1973 | GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT; |
- | |
1974 | ring->irq_keep_mask = |
- | |
1975 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT; |
2114 | logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT); |
1976 | if (HAS_L3_DPF(dev)) |
2115 | if (HAS_L3_DPF(dev)) |
Line -... | Line 2116... | ||
- | 2116 | ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; |
|
- | 2117 | ||
- | 2118 | logical_ring_default_vfuncs(dev, ring); |
|
1977 | ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; |
2119 | |
1978 | 2120 | /* Override some for render ring. */ |
|
1979 | if (INTEL_INFO(dev)->gen >= 9) |
2121 | if (INTEL_INFO(dev)->gen >= 9) |
1980 | ring->init_hw = gen9_init_render_ring; |
2122 | ring->init_hw = gen9_init_render_ring; |
1981 | else |
2123 | else |
1982 | ring->init_hw = gen8_init_render_ring; |
2124 | ring->init_hw = gen8_init_render_ring; |
1983 | ring->init_context = gen8_init_rcs_context; |
- | |
1984 | ring->cleanup = intel_fini_pipe_control; |
- | |
1985 | if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { |
- | |
1986 | ring->get_seqno = bxt_a_get_seqno; |
- | |
1987 | ring->set_seqno = bxt_a_set_seqno; |
- | |
1988 | } else { |
- | |
1989 | ring->get_seqno = gen8_get_seqno; |
- | |
1990 | ring->set_seqno = gen8_set_seqno; |
- | |
1991 | } |
2125 | ring->init_context = gen8_init_rcs_context; |
1992 | ring->emit_request = gen8_emit_request; |
- | |
1993 | ring->emit_flush = gen8_emit_flush_render; |
- | |
1994 | ring->irq_get = gen8_logical_ring_get_irq; |
2126 | ring->cleanup = intel_fini_pipe_control; |
Line 1995... | Line 2127... | ||
1995 | ring->irq_put = gen8_logical_ring_put_irq; |
2127 | ring->emit_flush = gen8_emit_flush_render; |
Line 1996... | Line 2128... | ||
1996 | ring->emit_bb_start = gen8_emit_bb_start; |
2128 | ring->emit_request = gen8_emit_request_render; |
1997 | 2129 | ||
Line 2025... | Line 2157... | ||
2025 | struct drm_i915_private *dev_priv = dev->dev_private; |
2157 | struct drm_i915_private *dev_priv = dev->dev_private; |
2026 | struct intel_engine_cs *ring = &dev_priv->ring[VCS]; |
2158 | struct intel_engine_cs *ring = &dev_priv->ring[VCS]; |
Line 2027... | Line 2159... | ||
2027 | 2159 | ||
2028 | ring->name = "bsd ring"; |
2160 | ring->name = "bsd ring"; |
- | 2161 | ring->id = VCS; |
|
- | 2162 | ring->exec_id = I915_EXEC_BSD; |
|
2029 | ring->id = VCS; |
2163 | ring->guc_id = GUC_VIDEO_ENGINE; |
2030 | ring->mmio_base = GEN6_BSD_RING_BASE; |
- | |
2031 | ring->irq_enable_mask = |
- | |
2032 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; |
- | |
2033 | ring->irq_keep_mask = |
- | |
Line 2034... | Line -... | ||
2034 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; |
- | |
2035 | - | ||
2036 | ring->init_hw = gen8_init_common_ring; |
- | |
2037 | if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { |
- | |
2038 | ring->get_seqno = bxt_a_get_seqno; |
- | |
2039 | ring->set_seqno = bxt_a_set_seqno; |
- | |
2040 | } else { |
- | |
2041 | ring->get_seqno = gen8_get_seqno; |
- | |
2042 | ring->set_seqno = gen8_set_seqno; |
2164 | ring->mmio_base = GEN6_BSD_RING_BASE; |
2043 | } |
2165 | |
2044 | ring->emit_request = gen8_emit_request; |
- | |
2045 | ring->emit_flush = gen8_emit_flush; |
- | |
2046 | ring->irq_get = gen8_logical_ring_get_irq; |
- | |
Line 2047... | Line 2166... | ||
2047 | ring->irq_put = gen8_logical_ring_put_irq; |
2166 | logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT); |
2048 | ring->emit_bb_start = gen8_emit_bb_start; |
2167 | logical_ring_default_vfuncs(dev, ring); |
Line 2049... | Line 2168... | ||
2049 | 2168 | ||
2050 | return logical_ring_init(dev, ring); |
2169 | return logical_ring_init(dev, ring); |
2051 | } |
2170 | } |
2052 | 2171 | ||
Line 2053... | Line 2172... | ||
2053 | static int logical_bsd2_ring_init(struct drm_device *dev) |
2172 | static int logical_bsd2_ring_init(struct drm_device *dev) |
2054 | { |
2173 | { |
- | 2174 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
- | 2175 | struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; |
|
2055 | struct drm_i915_private *dev_priv = dev->dev_private; |
2176 | |
2056 | struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; |
- | |
2057 | - | ||
2058 | ring->name = "bds2 ring"; |
- | |
2059 | ring->id = VCS2; |
- | |
Line 2060... | Line -... | ||
2060 | ring->mmio_base = GEN8_BSD2_RING_BASE; |
- | |
2061 | ring->irq_enable_mask = |
- | |
2062 | GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; |
- | |
2063 | ring->irq_keep_mask = |
2177 | ring->name = "bsd2 ring"; |
2064 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; |
2178 | ring->id = VCS2; |
2065 | - | ||
2066 | ring->init_hw = gen8_init_common_ring; |
- | |
2067 | ring->get_seqno = gen8_get_seqno; |
- | |
Line 2068... | Line 2179... | ||
2068 | ring->set_seqno = gen8_set_seqno; |
2179 | ring->exec_id = I915_EXEC_BSD; |
2069 | ring->emit_request = gen8_emit_request; |
2180 | ring->guc_id = GUC_VIDEO_ENGINE2; |
Line 2070... | Line 2181... | ||
2070 | ring->emit_flush = gen8_emit_flush; |
2181 | ring->mmio_base = GEN8_BSD2_RING_BASE; |
Line 2080... | Line 2191... | ||
2080 | struct drm_i915_private *dev_priv = dev->dev_private; |
2191 | struct drm_i915_private *dev_priv = dev->dev_private; |
2081 | struct intel_engine_cs *ring = &dev_priv->ring[BCS]; |
2192 | struct intel_engine_cs *ring = &dev_priv->ring[BCS]; |
Line 2082... | Line 2193... | ||
2082 | 2193 | ||
2083 | ring->name = "blitter ring"; |
2194 | ring->name = "blitter ring"; |
- | 2195 | ring->id = BCS; |
|
- | 2196 | ring->exec_id = I915_EXEC_BLT; |
|
2084 | ring->id = BCS; |
2197 | ring->guc_id = GUC_BLITTER_ENGINE; |
2085 | ring->mmio_base = BLT_RING_BASE; |
- | |
2086 | ring->irq_enable_mask = |
- | |
2087 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; |
- | |
2088 | ring->irq_keep_mask = |
- | |
Line 2089... | Line -... | ||
2089 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; |
- | |
2090 | - | ||
2091 | ring->init_hw = gen8_init_common_ring; |
- | |
2092 | if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { |
2198 | ring->mmio_base = BLT_RING_BASE; |
2093 | ring->get_seqno = bxt_a_get_seqno; |
- | |
2094 | ring->set_seqno = bxt_a_set_seqno; |
- | |
2095 | } else { |
2199 | |
2096 | ring->get_seqno = gen8_get_seqno; |
- | |
2097 | ring->set_seqno = gen8_set_seqno; |
- | |
2098 | } |
- | |
2099 | ring->emit_request = gen8_emit_request; |
- | |
2100 | ring->emit_flush = gen8_emit_flush; |
- | |
2101 | ring->irq_get = gen8_logical_ring_get_irq; |
- | |
Line 2102... | Line 2200... | ||
2102 | ring->irq_put = gen8_logical_ring_put_irq; |
2200 | logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT); |
2103 | ring->emit_bb_start = gen8_emit_bb_start; |
2201 | logical_ring_default_vfuncs(dev, ring); |
Line 2104... | Line 2202... | ||
2104 | 2202 | ||
Line 2110... | Line 2208... | ||
2110 | struct drm_i915_private *dev_priv = dev->dev_private; |
2208 | struct drm_i915_private *dev_priv = dev->dev_private; |
2111 | struct intel_engine_cs *ring = &dev_priv->ring[VECS]; |
2209 | struct intel_engine_cs *ring = &dev_priv->ring[VECS]; |
Line 2112... | Line 2210... | ||
2112 | 2210 | ||
2113 | ring->name = "video enhancement ring"; |
2211 | ring->name = "video enhancement ring"; |
- | 2212 | ring->id = VECS; |
|
- | 2213 | ring->exec_id = I915_EXEC_VEBOX; |
|
2114 | ring->id = VECS; |
2214 | ring->guc_id = GUC_VIDEOENHANCE_ENGINE; |
2115 | ring->mmio_base = VEBOX_RING_BASE; |
- | |
2116 | ring->irq_enable_mask = |
- | |
2117 | GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; |
- | |
2118 | ring->irq_keep_mask = |
- | |
Line 2119... | Line -... | ||
2119 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; |
- | |
2120 | - | ||
2121 | ring->init_hw = gen8_init_common_ring; |
- | |
2122 | if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { |
- | |
2123 | ring->get_seqno = bxt_a_get_seqno; |
- | |
2124 | ring->set_seqno = bxt_a_set_seqno; |
- | |
2125 | } else { |
- | |
2126 | ring->get_seqno = gen8_get_seqno; |
- | |
2127 | ring->set_seqno = gen8_set_seqno; |
2215 | ring->mmio_base = VEBOX_RING_BASE; |
2128 | } |
2216 | |
2129 | ring->emit_request = gen8_emit_request; |
- | |
2130 | ring->emit_flush = gen8_emit_flush; |
- | |
2131 | ring->irq_get = gen8_logical_ring_get_irq; |
- | |
Line 2132... | Line 2217... | ||
2132 | ring->irq_put = gen8_logical_ring_put_irq; |
2217 | logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT); |
2133 | ring->emit_bb_start = gen8_emit_bb_start; |
2218 | logical_ring_default_vfuncs(dev, ring); |
Line 2134... | Line 2219... | ||
2134 | 2219 | ||
Line 2233... | Line 2318... | ||
2233 | } |
2318 | } |
Line 2234... | Line 2319... | ||
2234 | 2319 | ||
2235 | return rpcs; |
2320 | return rpcs; |
Line -... | Line 2321... | ||
- | 2321 | } |
|
- | 2322 | ||
- | 2323 | static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring) |
|
- | 2324 | { |
|
- | 2325 | u32 indirect_ctx_offset; |
|
- | 2326 | ||
- | 2327 | switch (INTEL_INFO(ring->dev)->gen) { |
|
- | 2328 | default: |
|
- | 2329 | MISSING_CASE(INTEL_INFO(ring->dev)->gen); |
|
- | 2330 | /* fall through */ |
|
- | 2331 | case 9: |
|
- | 2332 | indirect_ctx_offset = |
|
- | 2333 | GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; |
|
- | 2334 | break; |
|
- | 2335 | case 8: |
|
- | 2336 | indirect_ctx_offset = |
|
- | 2337 | GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; |
|
- | 2338 | break; |
|
- | 2339 | } |
|
- | 2340 | ||
- | 2341 | return indirect_ctx_offset; |
|
2236 | } |
2342 | } |
2237 | 2343 | ||
2238 | static int |
2344 | static int |
2239 | populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, |
2345 | populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, |
2240 | struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) |
2346 | struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) |
Line 2276... | Line 2382... | ||
2276 | reg_state[CTX_LRI_HEADER_0] = |
2382 | reg_state[CTX_LRI_HEADER_0] = |
2277 | MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED; |
2383 | MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED; |
2278 | ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring), |
2384 | ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring), |
2279 | _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | |
2385 | _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | |
2280 | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | |
2386 | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | |
- | 2387 | (HAS_RESOURCE_STREAMER(dev) ? |
|
2281 | CTX_CTRL_RS_CTX_ENABLE)); |
2388 | CTX_CTRL_RS_CTX_ENABLE : 0))); |
2282 | ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0); |
2389 | ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0); |
2283 | ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0); |
2390 | ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0); |
2284 | /* Ring buffer start address is not known until the buffer is pinned. |
2391 | /* Ring buffer start address is not known until the buffer is pinned. |
2285 | * It is written to the context image in execlists_update_context() |
2392 | * It is written to the context image in execlists_update_context() |
2286 | */ |
2393 | */ |
Line 2305... | Line 2412... | ||
2305 | reg_state[CTX_RCS_INDIRECT_CTX+1] = |
2412 | reg_state[CTX_RCS_INDIRECT_CTX+1] = |
2306 | (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) | |
2413 | (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) | |
2307 | (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS); |
2414 | (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS); |
Line 2308... | Line 2415... | ||
2308 | 2415 | ||
2309 | reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = |
2416 | reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = |
Line 2310... | Line 2417... | ||
2310 | CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6; |
2417 | intel_lr_indirect_ctx_offset(ring) << 6; |
2311 | 2418 | ||
2312 | reg_state[CTX_BB_PER_CTX_PTR+1] = |
2419 | reg_state[CTX_BB_PER_CTX_PTR+1] = |
2313 | (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) | |
2420 | (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) | |
Line 2366... | Line 2473... | ||
2366 | */ |
2473 | */ |
2367 | void intel_lr_context_free(struct intel_context *ctx) |
2474 | void intel_lr_context_free(struct intel_context *ctx) |
2368 | { |
2475 | { |
2369 | int i; |
2476 | int i; |
Line 2370... | Line 2477... | ||
2370 | 2477 | ||
- | 2478 | for (i = I915_NUM_RINGS; --i >= 0; ) { |
|
2371 | for (i = 0; i < I915_NUM_RINGS; i++) { |
2479 | struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; |
Line 2372... | Line 2480... | ||
2372 | struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; |
2480 | struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; |
2373 | - | ||
2374 | if (ctx_obj) { |
2481 | |
2375 | struct intel_ringbuffer *ringbuf = |
- | |
Line 2376... | Line 2482... | ||
2376 | ctx->engine[i].ringbuf; |
2482 | if (!ctx_obj) |
2377 | struct intel_engine_cs *ring = ringbuf->ring; |
2483 | continue; |
2378 | 2484 | ||
2379 | if (ctx == ring->default_context) { |
2485 | if (ctx == ctx->i915->kernel_context) { |
- | 2486 | intel_unpin_ringbuffer_obj(ringbuf); |
|
2380 | intel_unpin_ringbuffer_obj(ringbuf); |
2487 | i915_gem_object_ggtt_unpin(ctx_obj); |
2381 | i915_gem_object_ggtt_unpin(ctx_obj); |
2488 | } |
2382 | } |
2489 | |
2383 | WARN_ON(ctx->engine[ring->id].pin_count); |
2490 | WARN_ON(ctx->engine[i].pin_count); |
2384 | intel_ringbuffer_free(ringbuf); |
2491 | intel_ringbuffer_free(ringbuf); |
2385 | drm_gem_object_unreference(&ctx_obj->base); |
- | |
Line -... | Line 2492... | ||
- | 2492 | drm_gem_object_unreference(&ctx_obj->base); |
|
- | 2493 | } |
|
- | 2494 | } |
|
- | 2495 | ||
- | 2496 | /** |
|
- | 2497 | * intel_lr_context_size() - return the size of the context for an engine |
|
- | 2498 | * @ring: which engine to find the context size for |
|
- | 2499 | * |
|
- | 2500 | * Each engine may require a different amount of space for a context image, |
|
- | 2501 | * so when allocating (or copying) an image, this function can be used to |
|
- | 2502 | * find the right size for the specific engine. |
|
- | 2503 | * |
|
- | 2504 | * Return: size (in bytes) of an engine-specific context image |
|
- | 2505 | * |
|
2386 | } |
2506 | * Note: this size includes the HWSP, which is part of the context image |
2387 | } |
2507 | * in LRC mode, but does not include the "shared data page" used with |
2388 | } |
2508 | * GuC submission. The caller should account for this if using the GuC. |
Line 2389... | Line 2509... | ||
2389 | 2509 | */ |
|
Line 2453... | Line 2573... | ||
2453 | int ret; |
2573 | int ret; |
Line 2454... | Line 2574... | ||
2454 | 2574 | ||
2455 | WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); |
2575 | WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); |
Line 2456... | Line 2576... | ||
2456 | WARN_ON(ctx->engine[ring->id].state); |
2576 | WARN_ON(ctx->engine[ring->id].state); |
Line 2457... | Line 2577... | ||
2457 | 2577 | ||
2458 | context_size = round_up(get_lr_context_size(ring), 4096); |
2578 | context_size = round_up(intel_lr_context_size(ring), 4096); |
Line 2459... | Line 2579... | ||
2459 | 2579 | ||
Line 2479... | Line 2599... | ||
2479 | } |
2599 | } |
Line 2480... | Line 2600... | ||
2480 | 2600 | ||
2481 | ctx->engine[ring->id].ringbuf = ringbuf; |
2601 | ctx->engine[ring->id].ringbuf = ringbuf; |
Line 2482... | Line 2602... | ||
2482 | ctx->engine[ring->id].state = ctx_obj; |
2602 | ctx->engine[ring->id].state = ctx_obj; |
2483 | 2603 | ||
Line 2484... | Line 2604... | ||
2484 | if (ctx != ring->default_context && ring->init_context) { |
2604 | if (ctx != ctx->i915->kernel_context && ring->init_context) { |
2485 | struct drm_i915_gem_request *req; |
2605 | struct drm_i915_gem_request *req; |
2486 | 2606 | ||
2487 | ret = i915_gem_request_alloc(ring, |
2607 | req = i915_gem_request_alloc(ring, ctx); |
2488 | ctx, &req); |
- | |
2489 | if (ret) { |
2608 | if (IS_ERR(req)) { |
2490 | DRM_ERROR("ring create req: %d\n", |
2609 | ret = PTR_ERR(req); |
Line 2491... | Line 2610... | ||
2491 | ret); |
2610 | DRM_ERROR("ring create req: %d\n", ret); |
2492 | goto error_ringbuf; |
2611 | goto error_ringbuf; |