Subversion Repositories Kolibri OS

Rev

Rev 5078 | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 6296
Line 1... Line 1...
1
/**************************************************************************
1
/**************************************************************************
2
 *
2
 *
3
 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
3
 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
4
 * All Rights Reserved.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
8
 * "Software"), to deal in the Software without restriction, including
Line 25... Line 25...
25
 *
25
 *
26
 **************************************************************************/
26
 **************************************************************************/
Line 27... Line 27...
27
 
27
 
28
#include "vmwgfx_drv.h"
28
#include "vmwgfx_drv.h"
-
 
29
#include "vmwgfx_resource_priv.h"
-
 
30
#include "vmwgfx_so.h"
29
#include "vmwgfx_resource_priv.h"
31
#include "vmwgfx_binding.h"
30
#include 
32
#include 
-
 
33
#include "device_include/svga3d_surfacedefs.h"
Line 31... Line 34...
31
#include "svga3d_surfacedefs.h"
34
 
32
 
35
 
33
/**
36
/**
34
 * struct vmw_user_surface - User-space visible surface resource
37
 * struct vmw_user_surface - User-space visible surface resource
Line 40... Line 43...
40
 */
43
 */
41
struct vmw_user_surface {
44
struct vmw_user_surface {
42
	struct ttm_prime_object prime;
45
	struct ttm_prime_object prime;
43
	struct vmw_surface srf;
46
	struct vmw_surface srf;
44
	uint32_t size;
47
	uint32_t size;
-
 
48
	struct drm_master *master;
-
 
49
	struct ttm_base_object *backup_base;
45
};
50
};
Line 46... Line 51...
46
 
51
 
47
/**
52
/**
48
 * struct vmw_surface_offset - Backing store mip level offset info
53
 * struct vmw_surface_offset - Backing store mip level offset info
Line 217... Line 222...
217
 
222
 
218
	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
223
	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
219
	cmd->header.size = cmd_len;
224
	cmd->header.size = cmd_len;
220
	cmd->body.sid = srf->res.id;
225
	cmd->body.sid = srf->res.id;
221
	cmd->body.surfaceFlags = srf->flags;
226
	cmd->body.surfaceFlags = srf->flags;
222
	cmd->body.format = cpu_to_le32(srf->format);
227
	cmd->body.format = srf->format;
223
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
228
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
Line 224... Line 229...
224
		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
229
		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
225
 
230
 
Line 337... Line 342...
337
		mutex_lock(&dev_priv->cmdbuf_mutex);
342
		mutex_lock(&dev_priv->cmdbuf_mutex);
338
		srf = vmw_res_to_srf(res);
343
		srf = vmw_res_to_srf(res);
339
		dev_priv->used_memory_size -= res->backup_size;
344
		dev_priv->used_memory_size -= res->backup_size;
340
		mutex_unlock(&dev_priv->cmdbuf_mutex);
345
		mutex_unlock(&dev_priv->cmdbuf_mutex);
341
	}
346
	}
342
	vmw_3d_resource_dec(dev_priv, false);
347
	vmw_fifo_resource_dec(dev_priv);
343
}
348
}
Line 344... Line 349...
344
 
349
 
345
/**
350
/**
346
 * vmw_legacy_srf_create - Create a device surface as part of the
351
 * vmw_legacy_srf_create - Create a device surface as part of the
Line 573... Line 578...
573
	int ret;
578
	int ret;
574
	struct vmw_resource *res = &srf->res;
579
	struct vmw_resource *res = &srf->res;
Line 575... Line 580...
575
 
580
 
576
	BUG_ON(res_free == NULL);
581
	BUG_ON(res_free == NULL);
577
	if (!dev_priv->has_mob)
582
	if (!dev_priv->has_mob)
578
	(void) vmw_3d_resource_inc(dev_priv, false);
583
		vmw_fifo_resource_inc(dev_priv);
579
	ret = vmw_resource_init(dev_priv, res, true, res_free,
584
	ret = vmw_resource_init(dev_priv, res, true, res_free,
580
				(dev_priv->has_mob) ? &vmw_gb_surface_func :
585
				(dev_priv->has_mob) ? &vmw_gb_surface_func :
Line 581... Line 586...
581
				&vmw_legacy_surface_func);
586
				&vmw_legacy_surface_func);
582
 
587
 
583
	if (unlikely(ret != 0)) {
588
	if (unlikely(ret != 0)) {
584
		if (!dev_priv->has_mob)
589
		if (!dev_priv->has_mob)
585
		vmw_3d_resource_dec(dev_priv, false);
590
			vmw_fifo_resource_dec(dev_priv);
586
		res_free(res);
591
		res_free(res);
Line 587... Line 592...
587
		return ret;
592
		return ret;
588
	}
593
	}
589
 
594
 
590
	/*
595
	/*
Line -... Line 596...
-
 
596
	 * The surface won't be visible to hardware until a
591
	 * The surface won't be visible to hardware until a
597
	 * surface validate.
592
	 * surface validate.
598
	 */
593
	 */
599
 
Line 594... Line 600...
594
 
600
	INIT_LIST_HEAD(&srf->view_list);
Line 623... Line 629...
623
	struct vmw_user_surface *user_srf =
629
	struct vmw_user_surface *user_srf =
624
	    container_of(srf, struct vmw_user_surface, srf);
630
	    container_of(srf, struct vmw_user_surface, srf);
625
	struct vmw_private *dev_priv = srf->res.dev_priv;
631
	struct vmw_private *dev_priv = srf->res.dev_priv;
626
	uint32_t size = user_srf->size;
632
	uint32_t size = user_srf->size;
Line -... Line 633...
-
 
633
 
-
 
634
	if (user_srf->master)
627
 
635
		drm_master_put(&user_srf->master);
628
	kfree(srf->offsets);
636
	kfree(srf->offsets);
629
	kfree(srf->sizes);
637
	kfree(srf->sizes);
630
	kfree(srf->snooper.image);
638
	kfree(srf->snooper.image);
631
//   ttm_base_object_kfree(user_srf, base);
639
	ttm_prime_object_kfree(user_srf, prime);
632
	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
640
	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
Line 633... Line 641...
633
}
641
}
634
 
642
 
Line 647... Line 655...
647
	struct vmw_user_surface *user_srf =
655
	struct vmw_user_surface *user_srf =
648
	    container_of(base, struct vmw_user_surface, prime.base);
656
	    container_of(base, struct vmw_user_surface, prime.base);
649
	struct vmw_resource *res = &user_srf->srf.res;
657
	struct vmw_resource *res = &user_srf->srf.res;
Line 650... Line 658...
650
 
658
 
-
 
659
	*p_base = NULL;
-
 
660
	if (user_srf->backup_base)
651
	*p_base = NULL;
661
		ttm_base_object_unref(&user_srf->backup_base);
652
	vmw_resource_unreference(&res);
662
	vmw_resource_unreference(&res);
Line 653... Line 663...
653
}
663
}
-
 
664
 
-
 
665
#if 0
-
 
666
 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
-
 
667
 *                                  the user surface destroy functionality.
-
 
668
 *
-
 
669
 * @dev:            Pointer to a struct drm_device.
-
 
670
 * @data:           Pointer to data copied from / to user-space.
-
 
671
 * @file_priv:      Pointer to a drm file private structure.
-
 
672
 */
-
 
673
int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
-
 
674
			      struct drm_file *file_priv)
-
 
675
{
-
 
676
	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
-
 
677
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
 
678
 
-
 
679
	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
654
 
680
}
655
#if 0
681
 
656
/**
682
/**
657
 * vmw_user_surface_define_ioctl - Ioctl function implementing
683
 * vmw_user_surface_define_ioctl - Ioctl function implementing
658
 *                                  the user surface define functionality.
684
 *                                  the user surface define functionality.
Line 702... Line 728...
702
 
728
 
703
 
729
 
704
	desc = svga3dsurface_get_desc(req->format);
730
	desc = svga3dsurface_get_desc(req->format);
-
 
731
	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
705
	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
732
		DRM_ERROR("Invalid surface format for surface creation.\n");
706
		DRM_ERROR("Invalid surface format for surface creation.\n");
733
		DRM_ERROR("Format requested is: %d\n", req->format);
Line 707... Line 734...
707
		return -EINVAL;
734
		return -EINVAL;
708
	}
735
	}
Line 812... Line 839...
812
 
839
 
813
	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
840
	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
814
	if (unlikely(ret != 0))
841
	if (unlikely(ret != 0))
Line -... Line 842...
-
 
842
		goto out_unlock;
-
 
843
 
-
 
844
	/*
-
 
845
	 * A gb-aware client referencing a shared surface will
-
 
846
	 * expect a backup buffer to be present.
-
 
847
	 */
-
 
848
	if (dev_priv->has_mob && req->shareable) {
-
 
849
		uint32_t backup_handle;
-
 
850
 
-
 
851
		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
-
 
852
					    res->backup_size,
-
 
853
					    true,
-
 
854
					    &backup_handle,
-
 
855
					    &res->backup,
-
 
856
					    &user_srf->backup_base);
-
 
857
		if (unlikely(ret != 0)) {
-
 
858
			vmw_resource_unreference(&res);
-
 
859
			goto out_unlock;
-
 
860
		}
815
		goto out_unlock;
861
	}
816
 
862
 
817
	tmp = vmw_resource_reference(&srf->res);
863
	tmp = vmw_resource_reference(&srf->res);
818
	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
864
	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
Line 838... Line 884...
838
	ttm_prime_object_kfree(user_srf, prime);
884
	ttm_prime_object_kfree(user_srf, prime);
839
out_no_user_srf:
885
out_no_user_srf:
840
	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
886
	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
841
out_unlock:
887
out_unlock:
842
	ttm_read_unlock(&dev_priv->reservation_sem);
888
	ttm_read_unlock(&dev_priv->reservation_sem);
-
 
889
	return ret;
-
 
890
}
-
 
891
 
-
 
892
 
-
 
893
static int
-
 
894
vmw_surface_handle_reference(struct vmw_private *dev_priv,
-
 
895
			     struct drm_file *file_priv,
-
 
896
			     uint32_t u_handle,
-
 
897
			     enum drm_vmw_handle_type handle_type,
-
 
898
			     struct ttm_base_object **base_p)
-
 
899
{
-
 
900
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
 
901
	struct vmw_user_surface *user_srf;
-
 
902
	uint32_t handle;
-
 
903
	struct ttm_base_object *base;
-
 
904
	int ret;
-
 
905
 
-
 
906
	if (handle_type == DRM_VMW_HANDLE_PRIME) {
-
 
907
		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
-
 
908
		if (unlikely(ret != 0))
-
 
909
			return ret;
-
 
910
	} else {
-
 
911
		if (unlikely(drm_is_render_client(file_priv))) {
-
 
912
			DRM_ERROR("Render client refused legacy "
-
 
913
				  "surface reference.\n");
-
 
914
			return -EACCES;
-
 
915
		}
-
 
916
		if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
-
 
917
			DRM_ERROR("Locked master refused legacy "
-
 
918
				  "surface reference.\n");
-
 
919
			return -EACCES;
-
 
920
		}
-
 
921
 
-
 
922
		handle = u_handle;
-
 
923
	}
-
 
924
 
-
 
925
	ret = -EINVAL;
-
 
926
	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
-
 
927
	if (unlikely(base == NULL)) {
-
 
928
		DRM_ERROR("Could not find surface to reference.\n");
-
 
929
		goto out_no_lookup;
-
 
930
	}
-
 
931
 
-
 
932
	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
-
 
933
		DRM_ERROR("Referenced object is not a surface.\n");
-
 
934
		goto out_bad_resource;
-
 
935
	}
-
 
936
 
-
 
937
	if (handle_type != DRM_VMW_HANDLE_PRIME) {
-
 
938
		user_srf = container_of(base, struct vmw_user_surface,
-
 
939
					prime.base);
-
 
940
 
-
 
941
		/*
-
 
942
		 * Make sure the surface creator has the same
-
 
943
		 * authenticating master.
-
 
944
		 */
-
 
945
		if (drm_is_primary_client(file_priv) &&
-
 
946
		    user_srf->master != file_priv->master) {
-
 
947
			DRM_ERROR("Trying to reference surface outside of"
-
 
948
				  " master domain.\n");
-
 
949
			ret = -EACCES;
-
 
950
			goto out_bad_resource;
-
 
951
		}
-
 
952
 
-
 
953
		ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
-
 
954
		if (unlikely(ret != 0)) {
-
 
955
			DRM_ERROR("Could not add a reference to a surface.\n");
-
 
956
			goto out_bad_resource;
-
 
957
		}
-
 
958
	}
-
 
959
 
-
 
960
	*base_p = base;
-
 
961
	return 0;
-
 
962
 
-
 
963
out_bad_resource:
-
 
964
	ttm_base_object_unref(&base);
-
 
965
out_no_lookup:
-
 
966
	if (handle_type == DRM_VMW_HANDLE_PRIME)
-
 
967
		(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
Line 843... Line 968...
843
 
968
 
844
	return ret;
969
	return ret;
Line 845... Line 970...
845
}
970
}
Line 888... Line 1013...
888
		DRM_ERROR("copy_to_user failed %p %u\n",
1013
		DRM_ERROR("copy_to_user failed %p %u\n",
889
			  user_sizes, srf->num_sizes);
1014
			  user_sizes, srf->num_sizes);
890
		ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
1015
		ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
891
		ret = -EFAULT;
1016
		ret = -EFAULT;
892
	}
1017
	}
893
out_bad_resource:
-
 
894
out_no_reference:
-
 
-
 
1018
 
895
	ttm_base_object_unref(&base);
1019
	ttm_base_object_unref(&base);
Line 896... Line 1020...
896
 
1020
 
897
	return ret;
1021
	return ret;
Line 898... Line 1022...
898
}
1022
}
-
 
1023
 
-
 
1024
#endif
-
 
1025
/**
-
 
1026
 * vmw_surface_define_encode - Encode a surface_define command.
-
 
1027
 *
-
 
1028
 * @srf: Pointer to a struct vmw_surface object.
-
 
1029
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
-
 
1030
 */
-
 
1031
static int vmw_gb_surface_create(struct vmw_resource *res)
-
 
1032
{
-
 
1033
	struct vmw_private *dev_priv = res->dev_priv;
-
 
1034
	struct vmw_surface *srf = vmw_res_to_srf(res);
-
 
1035
	uint32_t cmd_len, cmd_id, submit_len;
-
 
1036
	int ret;
-
 
1037
	struct {
-
 
1038
		SVGA3dCmdHeader header;
-
 
1039
		SVGA3dCmdDefineGBSurface body;
-
 
1040
	} *cmd;
-
 
1041
	struct {
-
 
1042
		SVGA3dCmdHeader header;
-
 
1043
		SVGA3dCmdDefineGBSurface_v2 body;
-
 
1044
	} *cmd2;
-
 
1045
 
-
 
1046
	if (likely(res->id != -1))
-
 
1047
		return 0;
-
 
1048
 
-
 
1049
	vmw_fifo_resource_inc(dev_priv);
-
 
1050
	ret = vmw_resource_alloc_id(res);
-
 
1051
	if (unlikely(ret != 0)) {
-
 
1052
		DRM_ERROR("Failed to allocate a surface id.\n");
-
 
1053
		goto out_no_id;
-
 
1054
	}
-
 
1055
 
-
 
1056
	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
-
 
1057
		ret = -EBUSY;
-
 
1058
		goto out_no_fifo;
-
 
1059
	}
-
 
1060
 
-
 
1061
	if (srf->array_size > 0) {
-
 
1062
		/* has_dx checked on creation time. */
-
 
1063
		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
-
 
1064
		cmd_len = sizeof(cmd2->body);
-
 
1065
		submit_len = sizeof(*cmd2);
-
 
1066
	} else {
-
 
1067
		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
-
 
1068
		cmd_len = sizeof(cmd->body);
-
 
1069
		submit_len = sizeof(*cmd);
-
 
1070
	}
-
 
1071
 
-
 
1072
	cmd = vmw_fifo_reserve(dev_priv, submit_len);
-
 
1073
	cmd2 = (typeof(cmd2))cmd;
-
 
1074
	if (unlikely(cmd == NULL)) {
-
 
1075
		DRM_ERROR("Failed reserving FIFO space for surface "
-
 
1076
			  "creation.\n");
-
 
1077
		ret = -ENOMEM;
-
 
1078
		goto out_no_fifo;
-
 
1079
	}
-
 
1080
 
-
 
1081
	if (srf->array_size > 0) {
-
 
1082
		cmd2->header.id = cmd_id;
-
 
1083
		cmd2->header.size = cmd_len;
-
 
1084
		cmd2->body.sid = srf->res.id;
-
 
1085
		cmd2->body.surfaceFlags = srf->flags;
-
 
1086
		cmd2->body.format = cpu_to_le32(srf->format);
-
 
1087
		cmd2->body.numMipLevels = srf->mip_levels[0];
-
 
1088
		cmd2->body.multisampleCount = srf->multisample_count;
-
 
1089
		cmd2->body.autogenFilter = srf->autogen_filter;
-
 
1090
		cmd2->body.size.width = srf->base_size.width;
-
 
1091
		cmd2->body.size.height = srf->base_size.height;
-
 
1092
		cmd2->body.size.depth = srf->base_size.depth;
-
 
1093
		cmd2->body.arraySize = srf->array_size;
-
 
1094
	} else {
-
 
1095
		cmd->header.id = cmd_id;
-
 
1096
		cmd->header.size = cmd_len;
-
 
1097
		cmd->body.sid = srf->res.id;
-
 
1098
		cmd->body.surfaceFlags = srf->flags;
-
 
1099
		cmd->body.format = cpu_to_le32(srf->format);
-
 
1100
		cmd->body.numMipLevels = srf->mip_levels[0];
-
 
1101
		cmd->body.multisampleCount = srf->multisample_count;
-
 
1102
		cmd->body.autogenFilter = srf->autogen_filter;
-
 
1103
		cmd->body.size.width = srf->base_size.width;
-
 
1104
		cmd->body.size.height = srf->base_size.height;
-
 
1105
		cmd->body.size.depth = srf->base_size.depth;
-
 
1106
	}
-
 
1107
 
-
 
1108
	vmw_fifo_commit(dev_priv, submit_len);
-
 
1109
 
-
 
1110
	return 0;
-
 
1111
 
-
 
1112
out_no_fifo:
-
 
1113
	vmw_resource_release_id(res);
-
 
1114
out_no_id:
-
 
1115
	vmw_fifo_resource_dec(dev_priv);
-
 
1116
	return ret;
-
 
1117
}
-
 
1118
 
-
 
1119
 
-
 
1120
static int vmw_gb_surface_bind(struct vmw_resource *res,
-
 
1121
			       struct ttm_validate_buffer *val_buf)
-
 
1122
{
-
 
1123
	struct vmw_private *dev_priv = res->dev_priv;
-
 
1124
	struct {
-
 
1125
		SVGA3dCmdHeader header;
-
 
1126
		SVGA3dCmdBindGBSurface body;
-
 
1127
	} *cmd1;
-
 
1128
	struct {
-
 
1129
		SVGA3dCmdHeader header;
-
 
1130
		SVGA3dCmdUpdateGBSurface body;
-
 
1131
	} *cmd2;
-
 
1132
	uint32_t submit_size;
-
 
1133
	struct ttm_buffer_object *bo = val_buf->bo;
-
 
1134
 
-
 
1135
	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
-
 
1136
 
-
 
1137
	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
-
 
1138
 
-
 
1139
	cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
-
 
1140
	if (unlikely(cmd1 == NULL)) {
-
 
1141
		DRM_ERROR("Failed reserving FIFO space for surface "
-
 
1142
			  "binding.\n");
-
 
1143
		return -ENOMEM;
-
 
1144
	}
-
 
1145
 
-
 
1146
	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
-
 
1147
	cmd1->header.size = sizeof(cmd1->body);
-
 
1148
	cmd1->body.sid = res->id;
-
 
1149
	cmd1->body.mobid = bo->mem.start;
-
 
1150
	if (res->backup_dirty) {
-
 
1151
		cmd2 = (void *) &cmd1[1];
-
 
1152
		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
-
 
1153
		cmd2->header.size = sizeof(cmd2->body);
-
 
1154
		cmd2->body.sid = res->id;
-
 
1155
		res->backup_dirty = false;
-
 
1156
	}
-
 
1157
	vmw_fifo_commit(dev_priv, submit_size);
-
 
1158
 
-
 
1159
	return 0;
-
 
1160
}
-
 
1161
 
-
 
1162
static int vmw_gb_surface_unbind(struct vmw_resource *res,
-
 
1163
				 bool readback,
-
 
1164
				 struct ttm_validate_buffer *val_buf)
-
 
1165
{
-
 
1166
	struct vmw_private *dev_priv = res->dev_priv;
-
 
1167
	struct ttm_buffer_object *bo = val_buf->bo;
-
 
1168
	struct vmw_fence_obj *fence;
-
 
1169
 
-
 
1170
	struct {
-
 
1171
		SVGA3dCmdHeader header;
-
 
1172
		SVGA3dCmdReadbackGBSurface body;
-
 
1173
	} *cmd1;
-
 
1174
	struct {
-
 
1175
		SVGA3dCmdHeader header;
-
 
1176
		SVGA3dCmdInvalidateGBSurface body;
-
 
1177
	} *cmd2;
-
 
1178
	struct {
-
 
1179
		SVGA3dCmdHeader header;
-
 
1180
		SVGA3dCmdBindGBSurface body;
-
 
1181
	} *cmd3;
-
 
1182
	uint32_t submit_size;
-
 
1183
	uint8_t *cmd;
-
 
1184
 
-
 
1185
 
-
 
1186
	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
-
 
1187
 
-
 
1188
	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
-
 
1189
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
-
 
1190
	if (unlikely(cmd == NULL)) {
-
 
1191
		DRM_ERROR("Failed reserving FIFO space for surface "
-
 
1192
			  "unbinding.\n");
-
 
1193
		return -ENOMEM;
-
 
1194
	}
-
 
1195
 
-
 
1196
	if (readback) {
-
 
1197
		cmd1 = (void *) cmd;
-
 
1198
		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
-
 
1199
		cmd1->header.size = sizeof(cmd1->body);
-
 
1200
		cmd1->body.sid = res->id;
-
 
1201
		cmd3 = (void *) &cmd1[1];
-
 
1202
	} else {
-
 
1203
		cmd2 = (void *) cmd;
-
 
1204
		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
-
 
1205
		cmd2->header.size = sizeof(cmd2->body);
-
 
1206
		cmd2->body.sid = res->id;
-
 
1207
		cmd3 = (void *) &cmd2[1];
-
 
1208
	}
-
 
1209
 
-
 
1210
	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
-
 
1211
	cmd3->header.size = sizeof(cmd3->body);
-
 
1212
	cmd3->body.sid = res->id;
-
 
1213
	cmd3->body.mobid = SVGA3D_INVALID_ID;
-
 
1214
 
-
 
1215
	vmw_fifo_commit(dev_priv, submit_size);
-
 
1216
 
-
 
1217
	/*
-
 
1218
	 * Create a fence object and fence the backup buffer.
-
 
1219
	 */
-
 
1220
 
-
 
1221
	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
-
 
1222
					  &fence, NULL);
-
 
1223
 
-
 
1224
	vmw_fence_single_bo(val_buf->bo, fence);
-
 
1225
 
-
 
1226
	if (likely(fence != NULL))
-
 
1227
		vmw_fence_obj_unreference(&fence);
-
 
1228
 
-
 
1229
	return 0;
-
 
1230
}
-
 
1231
 
-
 
1232
static int vmw_gb_surface_destroy(struct vmw_resource *res)
-
 
1233
{
-
 
1234
	struct vmw_private *dev_priv = res->dev_priv;
-
 
1235
	struct vmw_surface *srf = vmw_res_to_srf(res);
-
 
1236
	struct {
-
 
1237
		SVGA3dCmdHeader header;
-
 
1238
		SVGA3dCmdDestroyGBSurface body;
-
 
1239
	} *cmd;
-
 
1240
 
-
 
1241
	if (likely(res->id == -1))
-
 
1242
		return 0;
-
 
1243
 
-
 
1244
	mutex_lock(&dev_priv->binding_mutex);
-
 
1245
	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
-
 
1246
	vmw_binding_res_list_scrub(&res->binding_head);
-
 
1247
 
-
 
1248
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
 
1249
	if (unlikely(cmd == NULL)) {
-
 
1250
		DRM_ERROR("Failed reserving FIFO space for surface "
-
 
1251
			  "destruction.\n");
-
 
1252
		mutex_unlock(&dev_priv->binding_mutex);
-
 
1253
		return -ENOMEM;
-
 
1254
	}
-
 
1255
 
-
 
1256
	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
-
 
1257
	cmd->header.size = sizeof(cmd->body);
-
 
1258
	cmd->body.sid = res->id;
-
 
1259
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
 
1260
	mutex_unlock(&dev_priv->binding_mutex);
-
 
1261
	vmw_resource_release_id(res);
-
 
1262
	vmw_fifo_resource_dec(dev_priv);
-
 
1263
 
-
 
1264
	return 0;
-
 
1265
}
-
 
1266
/**
-
 
1267
 * vmw_surface_gb_priv_define - Define a private GB surface
-
 
1268
 *
-
 
1269
 * @dev:  Pointer to a struct drm_device
-
 
1270
 * @user_accounting_size:  Used to track user-space memory usage, set
-
 
1271
 *                         to 0 for kernel mode only memory
-
 
1272
 * @svga3d_flags: SVGA3d surface flags for the device
-
 
1273
 * @format: requested surface format
-
 
1274
 * @for_scanout: true if inteded to be used for scanout buffer
-
 
1275
 * @num_mip_levels:  number of MIP levels
-
 
1276
 * @multisample_count:
-
 
1277
 * @array_size: Surface array size.
-
 
1278
 * @size: width, heigh, depth of the surface requested
-
 
1279
 * @user_srf_out: allocated user_srf.  Set to NULL on failure.
-
 
1280
 *
-
 
1281
 * GB surfaces allocated by this function will not have a user mode handle, and
-
 
1282
 * thus will only be visible to vmwgfx.  For optimization reasons the
-
 
1283
 * surface may later be given a user mode handle by another function to make
-
 
1284
 * it available to user mode drivers.
-
 
1285
 */
-
 
1286
int vmw_surface_gb_priv_define(struct drm_device *dev,
-
 
1287
			       uint32_t user_accounting_size,
-
 
1288
			       uint32_t svga3d_flags,
-
 
1289
			       SVGA3dSurfaceFormat format,
-
 
1290
			       bool for_scanout,
-
 
1291
			       uint32_t num_mip_levels,
-
 
1292
			       uint32_t multisample_count,
-
 
1293
			       uint32_t array_size,
-
 
1294
			       struct drm_vmw_size size,
-
 
1295
			       struct vmw_surface **srf_out)
-
 
1296
{
-
 
1297
	struct vmw_private *dev_priv = vmw_priv(dev);
-
 
1298
	struct vmw_user_surface *user_srf;
-
 
1299
	struct vmw_surface *srf;
-
 
1300
	int ret;
-
 
1301
	u32 num_layers;
-
 
1302
 
-
 
1303
	*srf_out = NULL;
-
 
1304
 
-
 
1305
	if (for_scanout) {
-
 
1306
		if (!svga3dsurface_is_screen_target_format(format)) {
-
 
1307
			DRM_ERROR("Invalid Screen Target surface format.");
-
 
1308
			return -EINVAL;
-
 
1309
		}
-
 
1310
	} else {
-
 
1311
		const struct svga3d_surface_desc *desc;
-
 
1312
 
-
 
1313
		desc = svga3dsurface_get_desc(format);
-
 
1314
		if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
-
 
1315
			DRM_ERROR("Invalid surface format.\n");
-
 
1316
			return -EINVAL;
-
 
1317
		}
-
 
1318
	}
-
 
1319
 
-
 
1320
	/* array_size must be null for non-GL3 host. */
-
 
1321
	if (array_size > 0 && !dev_priv->has_dx) {
-
 
1322
		DRM_ERROR("Tried to create DX surface on non-DX host.\n");
-
 
1323
		return -EINVAL;
-
 
1324
	}
-
 
1325
 
-
 
1326
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
-
 
1327
	if (unlikely(ret != 0))
-
 
1328
		return ret;
-
 
1329
 
-
 
1330
	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-
 
1331
				   user_accounting_size, false, true);
-
 
1332
	if (unlikely(ret != 0)) {
-
 
1333
		if (ret != -ERESTARTSYS)
-
 
1334
			DRM_ERROR("Out of graphics memory for surface"
-
 
1335
				  " creation.\n");
-
 
1336
		goto out_unlock;
-
 
1337
	}
-
 
1338
 
-
 
1339
	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
-
 
1340
	if (unlikely(user_srf == NULL)) {
-
 
1341
		ret = -ENOMEM;
-
 
1342
		goto out_no_user_srf;
-
 
1343
	}
-
 
1344
 
-
 
1345
	*srf_out  = &user_srf->srf;
-
 
1346
	user_srf->size = user_accounting_size;
-
 
1347
	user_srf->prime.base.shareable = false;
-
 
1348
	user_srf->prime.base.tfile     = NULL;
-
 
1349
 
-
 
1350
	srf = &user_srf->srf;
-
 
1351
	srf->flags             = svga3d_flags;
-
 
1352
	srf->format            = format;
-
 
1353
	srf->scanout           = for_scanout;
-
 
1354
	srf->mip_levels[0]     = num_mip_levels;
-
 
1355
	srf->num_sizes         = 1;
-
 
1356
	srf->sizes             = NULL;
-
 
1357
	srf->offsets           = NULL;
-
 
1358
	srf->base_size         = size;
-
 
1359
	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
-
 
1360
	srf->array_size        = array_size;
-
 
1361
	srf->multisample_count = multisample_count;
-
 
1362
 
-
 
1363
	if (array_size)
-
 
1364
		num_layers = array_size;
-
 
1365
	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
-
 
1366
		num_layers = SVGA3D_MAX_SURFACE_FACES;
-
 
1367
	else
-
 
1368
		num_layers = 1;
-
 
1369
 
-
 
1370
	srf->res.backup_size   =
-
 
1371
		svga3dsurface_get_serialized_size(srf->format,
-
 
1372
						  srf->base_size,
-
 
1373
						  srf->mip_levels[0],
-
 
1374
						  num_layers);
-
 
1375
 
-
 
1376
	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
-
 
1377
		srf->res.backup_size += sizeof(SVGA3dDXSOState);
-
 
1378
 
-
 
1379
	if (dev_priv->active_display_unit == vmw_du_screen_target &&
-
 
1380
	    for_scanout)
-
 
1381
		srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
-
 
1382
 
-
 
1383
	/*
-
 
1384
	 * From this point, the generic resource management functions
-
 
1385
	 * destroy the object on failure.
-
 
1386
	 */
-
 
1387
	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
-
 
1388
 
-
 
1389
	ttm_read_unlock(&dev_priv->reservation_sem);
-
 
1390
	return ret;
-
 
1391
 
-
 
1392
out_no_user_srf:
-
 
1393
	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
-
 
1394
 
-
 
1395
out_unlock:
-
 
1396
	ttm_read_unlock(&dev_priv->reservation_sem);