Rev 5354 | Rev 6320 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5354 | Rev 6084 | ||
---|---|---|---|
Line 131... | Line 131... | ||
131 | } |
131 | } |
Line 132... | Line 132... | ||
132 | 132 | ||
133 | return ret; |
133 | return ret; |
Line -... | Line 134... | ||
- | 134 | } |
|
- | 135 | ||
- | 136 | static void i915_gem_context_clean(struct intel_context *ctx) |
|
- | 137 | { |
|
- | 138 | struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; |
|
- | 139 | struct i915_vma *vma, *next; |
|
- | 140 | ||
- | 141 | if (!ppgtt) |
|
- | 142 | return; |
|
- | 143 | ||
- | 144 | list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, |
|
- | 145 | mm_list) { |
|
- | 146 | if (WARN_ON(__i915_vma_unbind_no_wait(vma))) |
|
- | 147 | break; |
|
- | 148 | } |
|
134 | } |
149 | } |
135 | 150 | ||
136 | void i915_gem_context_free(struct kref *ctx_ref) |
151 | void i915_gem_context_free(struct kref *ctx_ref) |
137 | { |
- | |
Line 138... | Line 152... | ||
138 | struct intel_context *ctx = container_of(ctx_ref, |
152 | { |
Line 139... | Line 153... | ||
139 | typeof(*ctx), ref); |
153 | struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); |
140 | 154 | ||
Line -... | Line 155... | ||
- | 155 | trace_i915_context_free(ctx); |
|
- | 156 | ||
- | 157 | if (i915.enable_execlists) |
|
- | 158 | intel_lr_context_free(ctx); |
|
- | 159 | ||
- | 160 | /* |
|
- | 161 | * This context is going away and we need to remove all VMAs still |
|
141 | trace_i915_context_free(ctx); |
162 | * around. This is to handle imported shared objects for which |
Line 142... | Line 163... | ||
142 | 163 | * destructor did not run when their handles were closed. |
|
143 | if (i915.enable_execlists) |
164 | */ |
144 | intel_lr_context_free(ctx); |
165 | i915_gem_context_clean(ctx); |
Line 193... | Line 214... | ||
193 | if (ctx == NULL) |
214 | if (ctx == NULL) |
194 | return ERR_PTR(-ENOMEM); |
215 | return ERR_PTR(-ENOMEM); |
Line 195... | Line 216... | ||
195 | 216 | ||
196 | kref_init(&ctx->ref); |
217 | kref_init(&ctx->ref); |
- | 218 | list_add_tail(&ctx->link, &dev_priv->context_list); |
|
Line 197... | Line 219... | ||
197 | list_add_tail(&ctx->link, &dev_priv->context_list); |
219 | ctx->i915 = dev_priv; |
198 | 220 | ||
199 | if (dev_priv->hw_context_size) { |
221 | if (dev_priv->hw_context_size) { |
200 | struct drm_i915_gem_object *obj = |
222 | struct drm_i915_gem_object *obj = |
Line 220... | Line 242... | ||
220 | /* NB: Mark all slices as needing a remap so that when the context first |
242 | /* NB: Mark all slices as needing a remap so that when the context first |
221 | * loads it will restore whatever remap state already exists. If there |
243 | * loads it will restore whatever remap state already exists. If there |
222 | * is no remap info, it will be a NOP. */ |
244 | * is no remap info, it will be a NOP. */ |
223 | ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; |
245 | ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; |
Line -... | Line 246... | ||
- | 246 | ||
- | 247 | ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; |
|
224 | 248 | ||
Line 225... | Line 249... | ||
225 | return ctx; |
249 | return ctx; |
226 | 250 | ||
227 | err_out: |
251 | err_out: |
Line 283... | Line 307... | ||
283 | 307 | ||
284 | err_unpin: |
308 | err_unpin: |
285 | if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) |
309 | if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) |
286 | i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); |
310 | i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); |
- | 311 | err_destroy: |
|
287 | err_destroy: |
312 | idr_remove(&file_priv->context_idr, ctx->user_handle); |
288 | i915_gem_context_unreference(ctx); |
313 | i915_gem_context_unreference(ctx); |
289 | return ERR_PTR(ret); |
314 | return ERR_PTR(ret); |
Line 290... | Line 315... | ||
290 | } |
315 | } |
291 | 316 | ||
292 | void i915_gem_context_reset(struct drm_device *dev) |
317 | void i915_gem_context_reset(struct drm_device *dev) |
293 | { |
318 | { |
Line -... | Line 319... | ||
- | 319 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
- | 320 | int i; |
|
- | 321 | ||
294 | struct drm_i915_private *dev_priv = dev->dev_private; |
322 | if (i915.enable_execlists) { |
295 | int i; |
323 | struct intel_context *ctx; |
296 | 324 | ||
297 | /* In execlists mode we will unreference the context when the execlist |
- | |
- | 325 | list_for_each_entry(ctx, &dev_priv->context_list, link) { |
|
298 | * queue is cleared and the requests destroyed. |
326 | intel_lr_context_reset(dev, ctx); |
- | 327 | } |
|
Line 299... | Line 328... | ||
299 | */ |
328 | |
300 | if (i915.enable_execlists) |
329 | return; |
301 | return; |
330 | } |
Line 323... | Line 352... | ||
323 | /* Init should only be called once per module load. Eventually the |
352 | /* Init should only be called once per module load. Eventually the |
324 | * restriction on the context_disabled check can be loosened. */ |
353 | * restriction on the context_disabled check can be loosened. */ |
325 | if (WARN_ON(dev_priv->ring[RCS].default_context)) |
354 | if (WARN_ON(dev_priv->ring[RCS].default_context)) |
326 | return 0; |
355 | return 0; |
Line -... | Line 356... | ||
- | 356 | ||
- | 357 | if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { |
|
- | 358 | if (!i915.enable_execlists) { |
|
- | 359 | DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); |
|
- | 360 | return -EINVAL; |
|
- | 361 | } |
|
- | 362 | } |
|
327 | 363 | ||
328 | if (i915.enable_execlists) { |
364 | if (i915.enable_execlists) { |
329 | /* NB: intentionally left blank. We will allocate our own |
365 | /* NB: intentionally left blank. We will allocate our own |
330 | * backing objects as we need them, thank you very much */ |
366 | * backing objects as we need them, thank you very much */ |
331 | dev_priv->hw_context_size = 0; |
367 | dev_priv->hw_context_size = 0; |
Line 399... | Line 435... | ||
399 | } |
435 | } |
Line 400... | Line 436... | ||
400 | 436 | ||
401 | i915_gem_context_unreference(dctx); |
437 | i915_gem_context_unreference(dctx); |
Line 402... | Line 438... | ||
402 | } |
438 | } |
403 | 439 | ||
404 | int i915_gem_context_enable(struct drm_i915_private *dev_priv) |
440 | int i915_gem_context_enable(struct drm_i915_gem_request *req) |
405 | { |
441 | { |
406 | struct intel_engine_cs *ring; |
- | |
407 | int ret, i; |
- | |
Line 408... | Line 442... | ||
408 | 442 | struct intel_engine_cs *ring = req->ring; |
|
- | 443 | int ret; |
|
409 | BUG_ON(!dev_priv->ring[RCS].default_context); |
444 | |
Line 410... | Line 445... | ||
410 | 445 | if (i915.enable_execlists) { |
|
- | 446 | if (ring->init_context == NULL) |
|
411 | if (i915.enable_execlists) |
447 | return 0; |
- | 448 | ||
412 | return 0; |
449 | ret = ring->init_context(req); |
- | 450 | } else |
|
413 | 451 | ret = i915_switch_context(req); |
|
414 | for_each_ring(ring, dev_priv, i) { |
452 | |
Line 415... | Line 453... | ||
415 | ret = i915_switch_context(ring, ring->default_context); |
453 | if (ret) { |
416 | if (ret) |
454 | DRM_ERROR("ring init context: %d\n", ret); |
Line 466... | Line 504... | ||
466 | 504 | ||
467 | return ctx; |
505 | return ctx; |
Line 468... | Line 506... | ||
468 | } |
506 | } |
469 | 507 | ||
470 | static inline int |
- | |
471 | mi_set_context(struct intel_engine_cs *ring, |
- | |
472 | struct intel_context *new_context, |
508 | static inline int |
- | 509 | mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) |
|
473 | u32 hw_flags) |
510 | { |
474 | { |
511 | struct intel_engine_cs *ring = req->ring; |
475 | u32 flags = hw_flags | MI_MM_SPACE_GTT; |
512 | u32 flags = hw_flags | MI_MM_SPACE_GTT; |
476 | const int num_rings = |
513 | const int num_rings = |
477 | /* Use an extended w/a on ivb+ if signalling from other rings */ |
514 | /* Use an extended w/a on ivb+ if signalling from other rings */ |
Line 484... | Line 521... | ||
484 | * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value |
521 | * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value |
485 | * explicitly, so we rely on the value at ring init, stored in |
522 | * explicitly, so we rely on the value at ring init, stored in |
486 | * itlb_before_ctx_switch. |
523 | * itlb_before_ctx_switch. |
487 | */ |
524 | */ |
488 | if (IS_GEN6(ring->dev)) { |
525 | if (IS_GEN6(ring->dev)) { |
489 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0); |
526 | ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0); |
490 | if (ret) |
527 | if (ret) |
491 | return ret; |
528 | return ret; |
492 | } |
529 | } |
Line 493... | Line 530... | ||
493 | 530 | ||
494 | /* These flags are for resource streamer on HSW+ */ |
531 | /* These flags are for resource streamer on HSW+ */ |
- | 532 | if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8) |
|
- | 533 | flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); |
|
495 | if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8) |
534 | else if (INTEL_INFO(ring->dev)->gen < 8) |
Line 496... | Line 535... | ||
496 | flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); |
535 | flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); |
497 | 536 | ||
498 | 537 | ||
Line 499... | Line 538... | ||
499 | len = 4; |
538 | len = 4; |
500 | if (INTEL_INFO(ring->dev)->gen >= 7) |
539 | if (INTEL_INFO(ring->dev)->gen >= 7) |
501 | len += 2 + (num_rings ? 4*num_rings + 2 : 0); |
540 | len += 2 + (num_rings ? 4*num_rings + 2 : 0); |
Line 502... | Line 541... | ||
502 | 541 | ||
503 | ret = intel_ring_begin(ring, len); |
542 | ret = intel_ring_begin(req, len); |
Line 521... | Line 560... | ||
521 | } |
560 | } |
522 | } |
561 | } |
Line 523... | Line 562... | ||
523 | 562 | ||
524 | intel_ring_emit(ring, MI_NOOP); |
563 | intel_ring_emit(ring, MI_NOOP); |
525 | intel_ring_emit(ring, MI_SET_CONTEXT); |
564 | intel_ring_emit(ring, MI_SET_CONTEXT); |
526 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) | |
565 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | |
527 | flags); |
566 | flags); |
528 | /* |
567 | /* |
529 | * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP |
568 | * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP |
530 | * WaMiSetContext_Hang:snb,ivb,vlv |
569 | * WaMiSetContext_Hang:snb,ivb,vlv |
Line 550... | Line 589... | ||
550 | intel_ring_advance(ring); |
589 | intel_ring_advance(ring); |
Line 551... | Line 590... | ||
551 | 590 | ||
552 | return ret; |
591 | return ret; |
Line 553... | Line 592... | ||
553 | } |
592 | } |
- | 593 | ||
554 | 594 | static inline bool should_skip_switch(struct intel_engine_cs *ring, |
|
555 | static int do_switch(struct intel_engine_cs *ring, |
595 | struct intel_context *from, |
- | 596 | struct intel_context *to) |
|
- | 597 | { |
|
- | 598 | if (to->remap_slice) |
|
- | 599 | return false; |
|
- | 600 | ||
- | 601 | if (to->ppgtt && from == to && |
|
- | 602 | !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) |
|
- | 603 | return true; |
|
- | 604 | ||
- | 605 | return false; |
|
- | 606 | } |
|
- | 607 | ||
- | 608 | static bool |
|
- | 609 | needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to) |
|
- | 610 | { |
|
- | 611 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
|
- | 612 | ||
- | 613 | if (!to->ppgtt) |
|
- | 614 | return false; |
|
- | 615 | ||
- | 616 | if (INTEL_INFO(ring->dev)->gen < 8) |
|
- | 617 | return true; |
|
- | 618 | ||
- | 619 | if (ring != &dev_priv->ring[RCS]) |
|
- | 620 | return true; |
|
- | 621 | ||
- | 622 | return false; |
|
- | 623 | } |
|
- | 624 | ||
- | 625 | static bool |
|
- | 626 | needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to, |
|
- | 627 | u32 hw_flags) |
|
- | 628 | { |
|
- | 629 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
|
- | 630 | ||
- | 631 | if (!to->ppgtt) |
|
- | 632 | return false; |
|
- | 633 | ||
- | 634 | if (!IS_GEN8(ring->dev)) |
|
- | 635 | return false; |
|
- | 636 | ||
- | 637 | if (ring != &dev_priv->ring[RCS]) |
|
- | 638 | return false; |
|
- | 639 | ||
- | 640 | if (hw_flags & MI_RESTORE_INHIBIT) |
|
- | 641 | return true; |
|
- | 642 | ||
- | 643 | return false; |
|
- | 644 | } |
|
- | 645 | ||
- | 646 | static int do_switch(struct drm_i915_gem_request *req) |
|
- | 647 | { |
|
556 | struct intel_context *to) |
648 | struct intel_context *to = req->ctx; |
557 | { |
649 | struct intel_engine_cs *ring = req->ring; |
558 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
650 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
559 | struct intel_context *from = ring->last_context; |
651 | struct intel_context *from = ring->last_context; |
560 | u32 hw_flags = 0; |
- | |
561 | bool uninitialized = false; |
652 | u32 hw_flags = 0; |
Line 562... | Line 653... | ||
562 | struct i915_vma *vma; |
653 | bool uninitialized = false; |
563 | int ret, i; |
654 | int ret, i; |
564 | 655 | ||
565 | if (from != NULL && ring == &dev_priv->ring[RCS]) { |
656 | if (from != NULL && ring == &dev_priv->ring[RCS]) { |
Line 566... | Line 657... | ||
566 | BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); |
657 | BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); |
567 | BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); |
658 | BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); |
Line 568... | Line 659... | ||
568 | } |
659 | } |
569 | 660 | ||
570 | if (from == to && !to->remap_slice) |
661 | if (should_skip_switch(ring, from, to)) |
Line 583... | Line 674... | ||
583 | * evict_everything - as a last ditch gtt defrag effort that also |
674 | * evict_everything - as a last ditch gtt defrag effort that also |
584 | * switches to the default context. Hence we need to reload from here. |
675 | * switches to the default context. Hence we need to reload from here. |
585 | */ |
676 | */ |
586 | from = ring->last_context; |
677 | from = ring->last_context; |
Line -... | Line 678... | ||
- | 678 | ||
- | 679 | if (needs_pd_load_pre(ring, to)) { |
|
- | 680 | /* Older GENs and non render rings still want the load first, |
|
- | 681 | * "PP_DCLV followed by PP_DIR_BASE register through Load |
|
587 | 682 | * Register Immediate commands in Ring Buffer before submitting |
|
588 | if (to->ppgtt) { |
683 | * a context."*/ |
589 | trace_switch_mm(ring, to); |
684 | trace_switch_mm(ring, to); |
590 | ret = to->ppgtt->switch_mm(to->ppgtt, ring); |
685 | ret = to->ppgtt->switch_mm(to->ppgtt, req); |
591 | if (ret) |
686 | if (ret) |
- | 687 | goto unpin_out; |
|
- | 688 | ||
- | 689 | /* Doing a PD load always reloads the page dirs */ |
|
592 | goto unpin_out; |
690 | to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); |
Line 593... | Line 691... | ||
593 | } |
691 | } |
594 | 692 | ||
595 | if (ring != &dev_priv->ring[RCS]) { |
693 | if (ring != &dev_priv->ring[RCS]) { |
Line 608... | Line 706... | ||
608 | */ |
706 | */ |
609 | ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); |
707 | ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); |
610 | if (ret) |
708 | if (ret) |
611 | goto unpin_out; |
709 | goto unpin_out; |
Line 612... | Line -... | ||
612 | - | ||
613 | vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state); |
- | |
614 | if (!(vma->bound & GLOBAL_BIND)) |
- | |
615 | vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, |
- | |
616 | GLOBAL_BIND); |
- | |
617 | 710 | ||
618 | if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) |
711 | if (!to->legacy_hw_ctx.initialized) { |
- | 712 | hw_flags |= MI_RESTORE_INHIBIT; |
|
- | 713 | /* NB: If we inhibit the restore, the context is not allowed to |
|
- | 714 | * die because future work may end up depending on valid address |
|
- | 715 | * space. This means we must enforce that a page table load |
|
- | 716 | * occur when this occurs. */ |
|
- | 717 | } else if (to->ppgtt && |
|
- | 718 | (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) { |
|
- | 719 | hw_flags |= MI_FORCE_RESTORE; |
|
- | 720 | to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); |
|
Line -... | Line 721... | ||
- | 721 | } |
|
- | 722 | ||
- | 723 | /* We should never emit switch_mm more than once */ |
|
- | 724 | WARN_ON(needs_pd_load_pre(ring, to) && |
|
619 | hw_flags |= MI_RESTORE_INHIBIT; |
725 | needs_pd_load_post(ring, to, hw_flags)); |
620 | 726 | ||
621 | ret = mi_set_context(ring, to, hw_flags); |
727 | ret = mi_set_context(req, hw_flags); |
Line -... | Line 728... | ||
- | 728 | if (ret) |
|
- | 729 | goto unpin_out; |
|
- | 730 | ||
- | 731 | /* GEN8 does *not* require an explicit reload if the PDPs have been |
|
- | 732 | * setup, and we do not wish to move them. |
|
- | 733 | */ |
|
- | 734 | if (needs_pd_load_post(ring, to, hw_flags)) { |
|
- | 735 | trace_switch_mm(ring, to); |
|
- | 736 | ret = to->ppgtt->switch_mm(to->ppgtt, req); |
|
- | 737 | /* The hardware context switch is emitted, but we haven't |
|
- | 738 | * actually changed the state - so it's probably safe to bail |
|
- | 739 | * here. Still, let the user know something dangerous has |
|
- | 740 | * happened. |
|
- | 741 | */ |
|
- | 742 | if (ret) { |
|
- | 743 | DRM_ERROR("Failed to change address space on context switch\n"); |
|
- | 744 | goto unpin_out; |
|
622 | if (ret) |
745 | } |
623 | goto unpin_out; |
746 | } |
624 | 747 | ||
Line 625... | Line 748... | ||
625 | for (i = 0; i < MAX_L3_SLICES; i++) { |
748 | for (i = 0; i < MAX_L3_SLICES; i++) { |
626 | if (!(to->remap_slice & (1< |
749 | if (!(to->remap_slice & (1< |
627 | continue; |
750 | continue; |
628 | 751 | ||
629 | ret = i915_gem_l3_remap(ring, i); |
752 | ret = i915_gem_l3_remap(req, i); |
630 | /* If it failed, try again next round */ |
753 | /* If it failed, try again next round */ |
Line 640... | Line 763... | ||
640 | * is a bit suboptimal because the retiring can occur simply after the |
763 | * is a bit suboptimal because the retiring can occur simply after the |
641 | * MI_SET_CONTEXT instead of when the next seqno has completed. |
764 | * MI_SET_CONTEXT instead of when the next seqno has completed. |
642 | */ |
765 | */ |
643 | if (from != NULL) { |
766 | if (from != NULL) { |
644 | from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; |
767 | from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; |
645 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring); |
768 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); |
646 | /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the |
769 | /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the |
647 | * whole damn pipeline, we don't need to explicitly mark the |
770 | * whole damn pipeline, we don't need to explicitly mark the |
648 | * object dirty. The only exception is that the context must be |
771 | * object dirty. The only exception is that the context must be |
649 | * correct in case the object gets swapped out. Ideally we'd be |
772 | * correct in case the object gets swapped out. Ideally we'd be |
650 | * able to defer doing this until we know the object would be |
773 | * able to defer doing this until we know the object would be |
651 | * swapped, but there is no way to do that yet. |
774 | * swapped, but there is no way to do that yet. |
652 | */ |
775 | */ |
653 | from->legacy_hw_ctx.rcs_state->dirty = 1; |
776 | from->legacy_hw_ctx.rcs_state->dirty = 1; |
654 | BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring); |
- | |
Line 655... | Line 777... | ||
655 | 777 | ||
656 | /* obj is kept alive until the next request by its active ref */ |
778 | /* obj is kept alive until the next request by its active ref */ |
657 | i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); |
779 | i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); |
658 | i915_gem_context_unreference(from); |
780 | i915_gem_context_unreference(from); |
Line 659... | Line 781... | ||
659 | } |
781 | } |
660 | 782 | ||
Line 661... | Line 783... | ||
661 | uninitialized = !to->legacy_hw_ctx.initialized && from == NULL; |
783 | uninitialized = !to->legacy_hw_ctx.initialized; |
662 | to->legacy_hw_ctx.initialized = true; |
784 | to->legacy_hw_ctx.initialized = true; |
663 | 785 | ||
Line 664... | Line 786... | ||
664 | done: |
786 | done: |
665 | i915_gem_context_reference(to); |
787 | i915_gem_context_reference(to); |
666 | ring->last_context = to; |
788 | ring->last_context = to; |
667 | 789 | ||
668 | if (uninitialized) { |
790 | if (uninitialized) { |
669 | if (ring->init_context) { |
791 | if (ring->init_context) { |
670 | ret = ring->init_context(ring, to); |
- | |
671 | if (ret) |
- | |
672 | DRM_ERROR("ring init context: %d\n", ret); |
- | |
673 | } |
- | |
674 | 792 | ret = ring->init_context(req); |
|
Line 675... | Line 793... | ||
675 | ret = i915_gem_render_state_init(ring); |
793 | if (ret) |
Line 676... | Line 794... | ||
676 | if (ret) |
794 | DRM_ERROR("ring init context: %d\n", ret); |
Line 685... | Line 803... | ||
685 | return ret; |
803 | return ret; |
686 | } |
804 | } |
Line 687... | Line 805... | ||
687 | 805 | ||
688 | /** |
806 | /** |
689 | * i915_switch_context() - perform a GPU context switch. |
807 | * i915_switch_context() - perform a GPU context switch. |
690 | * @ring: ring for which we'll execute the context switch |
- | |
691 | * @to: the context to switch to |
808 | * @req: request for which we'll execute the context switch |
692 | * |
809 | * |
693 | * The context life cycle is simple. The context refcount is incremented and |
810 | * The context life cycle is simple. The context refcount is incremented and |
694 | * decremented by 1 and create and destroy. If the context is in use by the GPU, |
811 | * decremented by 1 and create and destroy. If the context is in use by the GPU, |
695 | * it will have a refcount > 1. This allows us to destroy the context abstract |
812 | * it will have a refcount > 1. This allows us to destroy the context abstract |
696 | * object while letting the normal object tracking destroy the backing BO. |
813 | * object while letting the normal object tracking destroy the backing BO. |
697 | * |
814 | * |
698 | * This function should not be used in execlists mode. Instead the context is |
815 | * This function should not be used in execlists mode. Instead the context is |
699 | * switched by writing to the ELSP and requests keep a reference to their |
816 | * switched by writing to the ELSP and requests keep a reference to their |
700 | * context. |
817 | * context. |
701 | */ |
818 | */ |
702 | int i915_switch_context(struct intel_engine_cs *ring, |
- | |
703 | struct intel_context *to) |
819 | int i915_switch_context(struct drm_i915_gem_request *req) |
- | 820 | { |
|
704 | { |
821 | struct intel_engine_cs *ring = req->ring; |
Line 705... | Line 822... | ||
705 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
822 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
706 | 823 | ||
Line 707... | Line 824... | ||
707 | WARN_ON(i915.enable_execlists); |
824 | WARN_ON(i915.enable_execlists); |
708 | WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); |
825 | WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); |
709 | 826 | ||
710 | if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ |
827 | if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ |
711 | if (to != ring->last_context) { |
828 | if (req->ctx != ring->last_context) { |
712 | i915_gem_context_reference(to); |
829 | i915_gem_context_reference(req->ctx); |
713 | if (ring->last_context) |
830 | if (ring->last_context) |
714 | i915_gem_context_unreference(ring->last_context); |
831 | i915_gem_context_unreference(ring->last_context); |
715 | ring->last_context = to; |
832 | ring->last_context = req->ctx; |
Line 716... | Line 833... | ||
716 | } |
833 | } |
717 | return 0; |
834 | return 0; |
Line 718... | Line 835... | ||
718 | } |
835 | } |
719 | 836 | ||
720 | return do_switch(ring, to); |
837 | return do_switch(req); |
Line 777... | Line 894... | ||
777 | mutex_unlock(&dev->struct_mutex); |
894 | mutex_unlock(&dev->struct_mutex); |
Line 778... | Line 895... | ||
778 | 895 | ||
779 | DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); |
896 | DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); |
780 | return 0; |
897 | return 0; |
- | 898 | } |
|
- | 899 | ||
- | 900 | int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, |
|
- | 901 | struct drm_file *file) |
|
- | 902 | { |
|
- | 903 | struct drm_i915_file_private *file_priv = file->driver_priv; |
|
- | 904 | struct drm_i915_gem_context_param *args = data; |
|
- | 905 | struct intel_context *ctx; |
|
- | 906 | int ret; |
|
- | 907 | ||
- | 908 | ret = i915_mutex_lock_interruptible(dev); |
|
- | 909 | if (ret) |
|
- | 910 | return ret; |
|
- | 911 | ||
- | 912 | ctx = i915_gem_context_get(file_priv, args->ctx_id); |
|
- | 913 | if (IS_ERR(ctx)) { |
|
- | 914 | mutex_unlock(&dev->struct_mutex); |
|
- | 915 | return PTR_ERR(ctx); |
|
- | 916 | } |
|
- | 917 | ||
- | 918 | args->size = 0; |
|
- | 919 | switch (args->param) { |
|
- | 920 | case I915_CONTEXT_PARAM_BAN_PERIOD: |
|
- | 921 | args->value = ctx->hang_stats.ban_period_seconds; |
|
- | 922 | break; |
|
- | 923 | case I915_CONTEXT_PARAM_NO_ZEROMAP: |
|
- | 924 | args->value = ctx->flags & CONTEXT_NO_ZEROMAP; |
|
- | 925 | break; |
|
- | 926 | default: |
|
- | 927 | ret = -EINVAL; |
|
- | 928 | break; |
|
- | 929 | } |
|
- | 930 | mutex_unlock(&dev->struct_mutex); |
|
- | 931 | ||
- | 932 | return ret; |
|
- | 933 | } |
|
- | 934 | ||
- | 935 | int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, |
|
- | 936 | struct drm_file *file) |
|
- | 937 | { |
|
- | 938 | struct drm_i915_file_private *file_priv = file->driver_priv; |
|
- | 939 | struct drm_i915_gem_context_param *args = data; |
|
- | 940 | struct intel_context *ctx; |
|
- | 941 | int ret; |
|
- | 942 | ||
- | 943 | ret = i915_mutex_lock_interruptible(dev); |
|
- | 944 | if (ret) |
|
- | 945 | return ret; |
|
- | 946 | ||
- | 947 | ctx = i915_gem_context_get(file_priv, args->ctx_id); |
|
- | 948 | if (IS_ERR(ctx)) { |
|
- | 949 | mutex_unlock(&dev->struct_mutex); |
|
- | 950 | return PTR_ERR(ctx); |
|
- | 951 | } |
|
- | 952 | ||
- | 953 | switch (args->param) { |
|
- | 954 | case I915_CONTEXT_PARAM_BAN_PERIOD: |
|
- | 955 | if (args->size) |
|
- | 956 | ret = -EINVAL; |
|
- | 957 | else if (args->value < ctx->hang_stats.ban_period_seconds) |
|
- | 958 | ret = -EPERM; |
|
- | 959 | else |
|
- | 960 | ctx->hang_stats.ban_period_seconds = args->value; |
|
- | 961 | break; |
|
- | 962 | case I915_CONTEXT_PARAM_NO_ZEROMAP: |
|
- | 963 | if (args->size) { |
|
- | 964 | ret = -EINVAL; |
|
- | 965 | } else { |
|
- | 966 | ctx->flags &= ~CONTEXT_NO_ZEROMAP; |
|
- | 967 | ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; |
|
- | 968 | } |
|
- | 969 | break; |
|
- | 970 | default: |
|
- | 971 | ret = -EINVAL; |
|
- | 972 | break; |
|
- | 973 | } |
|
- | 974 | mutex_unlock(&dev->struct_mutex); |
|
- | 975 | ||
- | 976 | return ret; |