Subversion Repositories Kolibri OS

Rev

Rev 6937 | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6937 Rev 7144
Line 156... Line 156...
156
	struct drm_device *dev = dev_priv->dev;
156
	struct drm_device *dev = dev_priv->dev;
157
	u32 data[2];
157
	u32 data[2];
Line 158... Line 158...
158
 
158
 
159
	data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
159
	data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
160
	/* WaRsDisableCoarsePowerGating:skl,bxt */
160
	/* WaRsDisableCoarsePowerGating:skl,bxt */
161
	if (!intel_enable_rc6(dev_priv->dev) ||
161
	if (!intel_enable_rc6(dev) ||
162
	    IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
-
 
163
	    (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
-
 
164
	    (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
162
	    NEEDS_WaRsDisableCoarsePowerGating(dev))
165
		data[1] = 0;
163
		data[1] = 0;
166
	else
164
	else
167
		/* bit 0 and 1 are for Render and Media domain separately */
165
		/* bit 0 and 1 are for Render and Media domain separately */
Line 244... Line 242...
244
		db_exc.cookie = db_ret.cookie + 1;
242
		db_exc.cookie = db_ret.cookie + 1;
245
		if (db_exc.cookie == 0)
243
		if (db_exc.cookie == 0)
246
			db_exc.cookie = 1;
244
			db_exc.cookie = 1;
247
	}
245
	}
Line -... Line 246...
-
 
246
 
-
 
247
	/* Finally, update the cached copy of the GuC's WQ head */
-
 
248
	gc->wq_head = desc->head;
248
 
249
 
249
	kunmap_atomic(base);
250
	kunmap_atomic(base);
250
	return ret;
251
	return ret;
Line 251... Line 252...
251
}
252
}
Line 373... Line 374...
373
 */
374
 */
Line 374... Line 375...
374
 
375
 
375
static void guc_init_ctx_desc(struct intel_guc *guc,
376
static void guc_init_ctx_desc(struct intel_guc *guc,
376
			      struct i915_guc_client *client)
377
			      struct i915_guc_client *client)
-
 
378
{
-
 
379
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
377
{
380
	struct intel_engine_cs *ring;
378
	struct intel_context *ctx = client->owner;
381
	struct intel_context *ctx = client->owner;
379
	struct guc_context_desc desc;
382
	struct guc_context_desc desc;
380
	struct sg_table *sg;
383
	struct sg_table *sg;
Line 385... Line 388...
385
	desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
388
	desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
386
	desc.context_id = client->ctx_index;
389
	desc.context_id = client->ctx_index;
387
	desc.priority = client->priority;
390
	desc.priority = client->priority;
388
	desc.db_id = client->doorbell_id;
391
	desc.db_id = client->doorbell_id;
Line 389... Line 392...
389
 
392
 
390
	for (i = 0; i < I915_NUM_RINGS; i++) {
393
	for_each_ring(ring, dev_priv, i) {
391
		struct guc_execlist_context *lrc = &desc.lrc[i];
-
 
392
		struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
-
 
393
		struct intel_engine_cs *ring;
394
		struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
394
		struct drm_i915_gem_object *obj;
395
		struct drm_i915_gem_object *obj;
Line 395... Line 396...
395
		uint64_t ctx_desc;
396
		uint64_t ctx_desc;
396
 
397
 
Line 403... Line 404...
403
		 */
404
		 */
404
		obj = ctx->engine[i].state;
405
		obj = ctx->engine[i].state;
405
		if (!obj)
406
		if (!obj)
406
			break;	/* XXX: continue? */
407
			break;	/* XXX: continue? */
Line 407... Line -...
407
 
-
 
408
		ring = ringbuf->ring;
408
 
409
		ctx_desc = intel_lr_context_descriptor(ctx, ring);
409
		ctx_desc = intel_lr_context_descriptor(ctx, ring);
Line 410... Line 410...
410
		lrc->context_desc = (u32)ctx_desc;
410
		lrc->context_desc = (u32)ctx_desc;
411
 
411
 
412
		/* The state page is after PPHWSP */
412
		/* The state page is after PPHWSP */
413
		lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
413
		lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
414
				LRC_STATE_PN * PAGE_SIZE;
414
				LRC_STATE_PN * PAGE_SIZE;
Line 415... Line 415...
415
		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
415
		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
Line 416... Line 416...
416
				(ring->id << GUC_ELC_ENGINE_OFFSET);
416
				(ring->guc_id << GUC_ELC_ENGINE_OFFSET);
417
 
417
 
418
		obj = ringbuf->obj;
418
		obj = ctx->engine[i].ringbuf->obj;
419
 
419
 
Line 420... Line 420...
420
		lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
420
		lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
421
		lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
421
		lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
Line 422... Line 422...
422
		lrc->ring_next_free_location = lrc->ring_begin;
422
		lrc->ring_next_free_location = lrc->ring_begin;
Line 423... Line 423...
423
		lrc->ring_current_tail_pointer_value = 0;
423
		lrc->ring_current_tail_pointer_value = 0;
Line 469... Line 469...
469
	sg = guc->ctx_pool_obj->pages;
469
	sg = guc->ctx_pool_obj->pages;
470
	sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
470
	sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
471
			     sizeof(desc) * client->ctx_index);
471
			     sizeof(desc) * client->ctx_index);
472
}
472
}
Line 473... Line -...
473
 
-
 
474
/* Get valid workqueue item and return it back to offset */
473
 
475
static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
474
int i915_guc_wq_check_space(struct i915_guc_client *gc)
476
{
475
{
477
	struct guc_process_desc *desc;
476
	struct guc_process_desc *desc;
478
	void *base;
477
	void *base;
479
	u32 size = sizeof(struct guc_wq_item);
478
	u32 size = sizeof(struct guc_wq_item);
Line -... Line 479...
-
 
479
	int ret = -ETIMEDOUT, timeout_counter = 200;
-
 
480
 
-
 
481
	if (!gc)
-
 
482
		return 0;
-
 
483
 
-
 
484
	/* Quickly return if wq space is available since last time we cache the
-
 
485
	 * head position. */
-
 
486
	if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
480
	int ret = -ETIMEDOUT, timeout_counter = 200;
487
		return 0;
481
 
488
 
Line 482... Line 489...
482
	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
489
	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
483
	desc = base + gc->proc_desc_offset;
-
 
484
 
490
	desc = base + gc->proc_desc_offset;
Line 485... Line -...
485
	while (timeout_counter-- > 0) {
-
 
486
		if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
-
 
487
			*offset = gc->wq_tail;
491
 
488
 
-
 
489
			/* advance the tail for next workqueue item */
-
 
490
			gc->wq_tail += size;
-
 
491
			gc->wq_tail &= gc->wq_size - 1;
492
	while (timeout_counter-- > 0) {
-
 
493
		gc->wq_head = desc->head;
492
 
494
 
Line 493... Line 495...
493
			/* this will break the loop */
495
		if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
494
			timeout_counter = 0;
496
			ret = 0;
495
			ret = 0;
497
			break;
Line 505... Line 507...
505
}
507
}
Line 506... Line 508...
506
 
508
 
507
static int guc_add_workqueue_item(struct i915_guc_client *gc,
509
static int guc_add_workqueue_item(struct i915_guc_client *gc,
508
				  struct drm_i915_gem_request *rq)
510
				  struct drm_i915_gem_request *rq)
509
{
-
 
510
	enum intel_ring_id ring_id = rq->ring->id;
511
{
511
	struct guc_wq_item *wqi;
512
	struct guc_wq_item *wqi;
512
	void *base;
513
	void *base;
513
	u32 tail, wq_len, wq_off = 0;
-
 
Line 514... Line 514...
514
	int ret;
514
	u32 tail, wq_len, wq_off, space;
-
 
515
 
-
 
516
	space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
-
 
517
	if (WARN_ON(space < sizeof(struct guc_wq_item)))
-
 
518
		return -ENOSPC; /* shouldn't happen */
515
 
519
 
-
 
520
	/* postincrement WQ tail for next time */
516
	ret = guc_get_workqueue_space(gc, &wq_off);
521
	wq_off = gc->wq_tail;
Line 517... Line 522...
517
	if (ret)
522
	gc->wq_tail += sizeof(struct guc_wq_item);
518
		return ret;
523
	gc->wq_tail &= gc->wq_size - 1;
519
 
524
 
520
	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
525
	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
Line 535... Line 540...
535
 
540
 
536
	/* len does not include the header */
541
	/* len does not include the header */
537
	wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
542
	wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
538
	wqi->header = WQ_TYPE_INORDER |
543
	wqi->header = WQ_TYPE_INORDER |
539
			(wq_len << WQ_LEN_SHIFT) |
544
			(wq_len << WQ_LEN_SHIFT) |
540
			(ring_id << WQ_TARGET_SHIFT) |
545
			(rq->ring->guc_id << WQ_TARGET_SHIFT) |
Line 541... Line 546...
541
			WQ_NO_WCFLUSH_WAIT;
546
			WQ_NO_WCFLUSH_WAIT;
542
 
547
 
Line 551... Line 556...
551
	kunmap_atomic(base);
556
	kunmap_atomic(base);
Line 552... Line 557...
552
 
557
 
553
	return 0;
558
	return 0;
Line 554... Line -...
554
}
-
 
555
 
-
 
556
#define CTX_RING_BUFFER_START		0x08
-
 
557
 
-
 
558
/* Update the ringbuffer pointer in a saved context image */
-
 
559
static void lr_context_update(struct drm_i915_gem_request *rq)
-
 
560
{
-
 
561
	enum intel_ring_id ring_id = rq->ring->id;
-
 
562
	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
-
 
563
	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
-
 
564
	struct page *page;
-
 
565
	uint32_t *reg_state;
-
 
566
 
-
 
567
	BUG_ON(!ctx_obj);
-
 
568
	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
-
 
569
	WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
-
 
570
 
-
 
571
	page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-
 
572
	reg_state = kmap_atomic(page);
-
 
573
 
-
 
574
	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
-
 
575
 
-
 
576
	kunmap_atomic(reg_state);
-
 
577
}
559
}
578
 
560
 
579
/**
561
/**
580
 * i915_guc_submit() - Submit commands through GuC
562
 * i915_guc_submit() - Submit commands through GuC
581
 * @client:	the guc client where commands will go through
563
 * @client:	the guc client where commands will go through
Line 585... Line 567...
585
 */
567
 */
586
int i915_guc_submit(struct i915_guc_client *client,
568
int i915_guc_submit(struct i915_guc_client *client,
587
		    struct drm_i915_gem_request *rq)
569
		    struct drm_i915_gem_request *rq)
588
{
570
{
589
	struct intel_guc *guc = client->guc;
571
	struct intel_guc *guc = client->guc;
590
	enum intel_ring_id ring_id = rq->ring->id;
572
	unsigned int engine_id = rq->ring->guc_id;
591
	int q_ret, b_ret;
573
	int q_ret, b_ret;
Line 592... Line -...
592
 
-
 
593
	/* Need this because of the deferred pin ctx and ring */
-
 
594
	/* Shall we move this right after ring is pinned? */
-
 
595
	lr_context_update(rq);
-
 
596
 
574
 
597
	q_ret = guc_add_workqueue_item(client, rq);
575
	q_ret = guc_add_workqueue_item(client, rq);
598
	if (q_ret == 0)
576
	if (q_ret == 0)
Line 599... Line 577...
599
		b_ret = guc_ring_doorbell(client);
577
		b_ret = guc_ring_doorbell(client);
600
 
578
 
601
	client->submissions[ring_id] += 1;
579
	client->submissions[engine_id] += 1;
602
	if (q_ret) {
580
	if (q_ret) {
603
		client->q_fail += 1;
581
		client->q_fail += 1;
604
		client->retcode = q_ret;
582
		client->retcode = q_ret;
605
	} else if (b_ret) {
583
	} else if (b_ret) {
606
		client->b_fail += 1;
584
		client->b_fail += 1;
607
		client->retcode = q_ret = b_ret;
585
		client->retcode = q_ret = b_ret;
608
	} else {
586
	} else {
609
		client->retcode = 0;
587
		client->retcode = 0;
610
	}
588
	}
Line 611... Line 589...
611
	guc->submissions[ring_id] += 1;
589
	guc->submissions[engine_id] += 1;
612
	guc->last_seqno[ring_id] = rq->seqno;
590
	guc->last_seqno[engine_id] = rq->seqno;
Line 613... Line 591...
613
 
591
 
Line 830... Line 808...
830
 
808
 
831
	offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */
809
	offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */
832
	guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
810
	guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
Line -... Line 811...
-
 
811
}
-
 
812
 
-
 
813
static void init_guc_policies(struct guc_policies *policies)
-
 
814
{
-
 
815
	struct guc_policy *policy;
-
 
816
	u32 p, i;
-
 
817
 
-
 
818
	policies->dpc_promote_time = 500000;
-
 
819
	policies->max_num_work_items = POLICY_MAX_NUM_WI;
-
 
820
 
-
 
821
	for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
-
 
822
		for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
-
 
823
			policy = &policies->policy[p][i];
-
 
824
 
-
 
825
			policy->execution_quantum = 1000000;
-
 
826
			policy->preemption_time = 500000;
-
 
827
			policy->fault_time = 250000;
-
 
828
			policy->policy_flags = 0;
-
 
829
		}
-
 
830
	}
-
 
831
 
-
 
832
	policies->is_valid = 1;
-
 
833
}
-
 
834
 
-
 
835
static void guc_create_ads(struct intel_guc *guc)
-
 
836
{
-
 
837
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
 
838
	struct drm_i915_gem_object *obj;
-
 
839
	struct guc_ads *ads;
-
 
840
	struct guc_policies *policies;
-
 
841
	struct guc_mmio_reg_state *reg_state;
-
 
842
	struct intel_engine_cs *ring;
-
 
843
	struct page *page;
-
 
844
	u32 size, i;
-
 
845
 
-
 
846
	/* The ads obj includes the struct itself and buffers passed to GuC */
-
 
847
	size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
-
 
848
			sizeof(struct guc_mmio_reg_state) +
-
 
849
			GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
-
 
850
 
-
 
851
	obj = guc->ads_obj;
-
 
852
	if (!obj) {
-
 
853
		obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
-
 
854
		if (!obj)
-
 
855
			return;
-
 
856
 
-
 
857
		guc->ads_obj = obj;
-
 
858
	}
-
 
859
 
-
 
860
	page = i915_gem_object_get_page(obj, 0);
-
 
861
	ads = kmap(page);
-
 
862
 
-
 
863
	/*
-
 
864
	 * The GuC requires a "Golden Context" when it reinitialises
-
 
865
	 * engines after a reset. Here we use the Render ring default
-
 
866
	 * context, which must already exist and be pinned in the GGTT,
-
 
867
	 * so its address won't change after we've told the GuC where
-
 
868
	 * to find it.
-
 
869
	 */
-
 
870
	ring = &dev_priv->ring[RCS];
-
 
871
	ads->golden_context_lrca = ring->status_page.gfx_addr;
-
 
872
 
-
 
873
	for_each_ring(ring, dev_priv, i)
-
 
874
		ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
-
 
875
 
-
 
876
	/* GuC scheduling policies */
-
 
877
	policies = (void *)ads + sizeof(struct guc_ads);
-
 
878
	init_guc_policies(policies);
-
 
879
 
-
 
880
	ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
-
 
881
			sizeof(struct guc_ads);
-
 
882
 
-
 
883
	/* MMIO reg state */
-
 
884
	reg_state = (void *)policies + sizeof(struct guc_policies);
-
 
885
 
-
 
886
	for_each_ring(ring, dev_priv, i) {
-
 
887
		reg_state->mmio_white_list[ring->guc_id].mmio_start =
-
 
888
			ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
-
 
889
 
-
 
890
		/* Nothing to be saved or restored for now. */
-
 
891
		reg_state->mmio_white_list[ring->guc_id].count = 0;
-
 
892
	}
-
 
893
 
-
 
894
	ads->reg_state_addr = ads->scheduler_policies +
-
 
895
			sizeof(struct guc_policies);
-
 
896
 
-
 
897
	ads->reg_state_buffer = ads->reg_state_addr +
-
 
898
			sizeof(struct guc_mmio_reg_state);
-
 
899
 
-
 
900
	kunmap(page);
833
}
901
}
834
 
902
 
835
/*
903
/*
836
 * Set up the memory resources to be shared with the GuC.  At this point,
904
 * Set up the memory resources to be shared with the GuC.  At this point,
837
 * we require just one object that can be mapped through the GGTT.
905
 * we require just one object that can be mapped through the GGTT.
Line 856... Line 924...
856
 
924
 
Line 857... Line 925...
857
	ida_init(&guc->ctx_ids);
925
	ida_init(&guc->ctx_ids);
Line -... Line 926...
-
 
926
 
-
 
927
	guc_create_log(guc);
858
 
928
 
859
	guc_create_log(guc);
929
	guc_create_ads(guc);
Line 860... Line 930...
860
 
930
 
861
	return 0;
931
	return 0;
862
}
932
}
863
 
933
 
864
int i915_guc_submission_enable(struct drm_device *dev)
934
int i915_guc_submission_enable(struct drm_device *dev)
865
{
935
{
Line 866... Line 936...
866
	struct drm_i915_private *dev_priv = dev->dev_private;
936
	struct drm_i915_private *dev_priv = dev->dev_private;
867
	struct intel_guc *guc = &dev_priv->guc;
937
	struct intel_guc *guc = &dev_priv->guc;
868
	struct intel_context *ctx = dev_priv->ring[RCS].default_context;
938
	struct intel_context *ctx = dev_priv->kernel_context;
Line 894... Line 964...
894
void i915_guc_submission_fini(struct drm_device *dev)
964
void i915_guc_submission_fini(struct drm_device *dev)
895
{
965
{
896
	struct drm_i915_private *dev_priv = dev->dev_private;
966
	struct drm_i915_private *dev_priv = dev->dev_private;
897
	struct intel_guc *guc = &dev_priv->guc;
967
	struct intel_guc *guc = &dev_priv->guc;
Line -... Line 968...
-
 
968
 
-
 
969
	gem_release_guc_obj(dev_priv->guc.ads_obj);
-
 
970
	guc->ads_obj = NULL;
898
 
971
 
899
	gem_release_guc_obj(dev_priv->guc.log_obj);
972
	gem_release_guc_obj(dev_priv->guc.log_obj);
Line 900... Line 973...
900
	guc->log_obj = NULL;
973
	guc->log_obj = NULL;
901
 
974
 
Line 917... Line 990...
917
	u32 data[3];
990
	u32 data[3];
Line 918... Line 991...
918
 
991
 
919
	if (!i915.enable_guc_submission)
992
	if (!i915.enable_guc_submission)
Line 920... Line 993...
920
		return 0;
993
		return 0;
Line 921... Line 994...
921
 
994
 
922
	ctx = dev_priv->ring[RCS].default_context;
995
	ctx = dev_priv->kernel_context;
923
 
996
 
924
	data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
997
	data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
Line 943... Line 1016...
943
	u32 data[3];
1016
	u32 data[3];
Line 944... Line 1017...
944
 
1017
 
945
	if (!i915.enable_guc_submission)
1018
	if (!i915.enable_guc_submission)
Line 946... Line 1019...
946
		return 0;
1019
		return 0;
Line 947... Line 1020...
947
 
1020
 
948
	ctx = dev_priv->ring[RCS].default_context;
1021
	ctx = dev_priv->kernel_context;
949
 
1022
 
950
	data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
1023
	data[0] = HOST2GUC_ACTION_EXIT_S_STATE;