Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5077 → Rev 5078

/drivers/video/drm/vmwgfx/Makefile
1,4 → 1,5
 
 
CC = gcc
LD = ld
AS = as
13,7 → 14,7
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/uapi
 
CFLAGS = -c -O2 $(INCLUDES) -march=i686 -fomit-frame-pointer -fno-builtin-printf
CFLAGS = -c -Os $(INCLUDES) -fomit-frame-pointer -fno-builtin-printf
CFLAGS+= -mno-ms-bitfields
 
LIBPATH:= $(DRV_TOPDIR)/ddk
93,7 → 94,6
 
$(NAME).dll: $(NAME_OBJS) $(LIBPATH)/libcore.a $(LIBPATH)/libddk.a vmw.lds Makefile
$(LD) -L$(LIBPATH) $(LDFLAGS) -T vmw.lds -o $@ $(NAME_OBJS) $(LIBS)
kpack $@
 
 
%.o : %.c $(HFILES) Makefile
/drivers/video/drm/vmwgfx/Makefile.lto
6,19 → 6,18
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
 
DRV_TOPDIR = $(CURDIR)/../../..
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
 
DRV_INCLUDES = $(DRV_TOPDIR)/include
 
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/uapi
 
CFLAGS_OPT = -Os -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -mno-stack-arg-probe
CFLAGS_OPT = -Os -fomit-frame-pointer -fno-builtin-printf -mno-stack-arg-probe
CFLAGS_OPT+= -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields -flto
CFLAGS = -c $(INCLUDES) $(DEFINES) $(CFLAGS_OPT)
 
LIBPATH:= $(DRV_TOPDIR)/ddk
LIBPATH:= ../../../ddk
 
LIBS:= -lddk -lcore -lgcc
 
52,8 → 51,10
vmwgfx_irq.c \
vmwgfx_kms.c \
vmwgfx_marker.c \
vmwgfx_mob.c \
vmwgfx_resource.c \
vmwgfx_scrn.c \
vmwgfx_shader.c \
vmwgfx_surface.c \
vmwgfx_ttm_glue.c \
../hdmi.c \
61,6 → 62,7
../ttm/ttm_bo.c \
../ttm/ttm_bo_manager.c \
../ttm/ttm_execbuf_util.c \
../ttm/ttm_lock.c \
../ttm/ttm_memory.c \
../ttm/ttm_object.c \
../ttm/ttm_page_alloc.c \
90,7 → 92,7
all: $(NAME).dll
 
$(NAME).dll: $(NAME_OBJS) $(SRC_DEP) $(HFILES) vmw.lds Makefile
$(CC) $(CFLAGS_OPT) -fwhole-program -nostdlib -Wl,-L$(LIBPATH),$(LDFLAGS),-T,vmw.lds -o $@ $(NAME_OBJS) libddk.a libcore.a libgcc.a
$(CC) $(CFLAGS_OPT) -fwhole-program -nostdlib -Wl,-L$(LIBPATH),$(LDFLAGS),-T,vmw.lds -o $@ $(NAME_OBJS) $(LIBS)
kpack $@
 
 
101,6 → 103,5
as -o $@ $<
 
 
clean:
-rm -f */*.o
 
 
/drivers/video/drm/vmwgfx/main.c
31,22 → 31,14
 
int vmw_init(void);
int kms_init(struct drm_device *dev);
void vmw_driver_thread();
void kms_update();
 
void cpu_detect();
 
void parse_cmdline(char *cmdline, char *log);
int _stdcall display_handler(ioctl_t *io);
 
int srv_blit_bitmap(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
int blit_textured(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
int blit_tex(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
void get_pci_info(struct pci_device *dev);
int gem_getparam(struct drm_device *dev, void *data);
 
64,23 → 56,6
 
int kms_modeset = 1;
 
 
void vmw_driver_thread()
{
dbgprintf("%s\n",__FUNCTION__);
 
// run_workqueue(dev_priv->wq);
 
while(driver_wq_state)
{
kms_update();
delay(1);
};
__asm__ __volatile__ (
"int $0x40"
::"a"(-1));
}
 
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
{
 
203,9 → 178,9
break;
 
case SRV_ENUM_MODES:
dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
inp, io->inp_size, io->out_size );
check_output(4);
// dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
// inp, io->inp_size, io->out_size );
// check_output(4);
// check_input(*outp * sizeof(videomode_t));
if( kms_modeset)
retval = get_videomodes((videomode_t*)inp, outp);
212,9 → 187,9
break;
 
case SRV_SET_MODE:
dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
inp, io->inp_size);
check_input(sizeof(videomode_t));
// dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
// inp, io->inp_size);
// check_input(sizeof(videomode_t));
if( kms_modeset )
retval = set_user_mode((videomode_t*)inp);
break;
830,6 → 805,7
uint32_t hot_y;
 
struct list_head list;
void *priv;
}cursor_t;
 
#define CURSOR_WIDTH 64
865,9 → 841,8
u32 check_m_pixel;
};
 
display_t *os_display;
 
static display_t *os_display;
 
static int count_connector_modes(struct drm_connector* connector)
{
struct drm_display_mode *mode;
889,6 → 864,8
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 
du->cursor_x = x;
du->cursor_y = y;
vmw_cursor_update_position(dev_priv, true, x,y);
};
 
895,6 → 872,7
static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
{
struct vmw_private *dev_priv = vmw_priv(os_display->ddev);
struct vmw_display_unit *du = vmw_crtc_to_du(os_display->crtc);
cursor_t *old;
 
old = os_display->cursor;
902,30 → 880,38
 
vmw_cursor_update_image(dev_priv, cursor->data,
64, 64, cursor->hot_x, cursor->hot_y);
vmw_cursor_update_position(dev_priv, true,
du->cursor_x, du->cursor_y);
return old;
};
 
// vmw_cursor_update_position(dev_priv, true,
// du->cursor_x + du->hotspot_x,
// du->cursor_y + du->hotspot_y);
void vmw_driver_thread()
{
DRM_DEBUG_KMS("%s\n",__FUNCTION__);
 
return old;
select_cursor_kms(os_display->cursor);
 
while(driver_wq_state)
{
kms_update();
delay(2);
};
__asm__ __volatile__ (
"int $0x40"
::"a"(-1));
}
 
 
int kms_init(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb;
 
struct vmw_display_unit *du;
cursor_t *cursor;
int mode_count;
u32_t ifl;
int err;
 
ENTER();
 
crtc = list_entry(dev->mode_config.crtc_list.next, typeof(*crtc), head);
encoder = list_entry(dev->mode_config.encoder_list.next, typeof(*encoder), head);
connector = list_entry(dev->mode_config.connector_list.next, typeof(*connector), head);
944,24 → 930,20
mode_count++;
};
 
printf("%s %d\n",__FUNCTION__, mode_count);
 
DRM_DEBUG_KMS("CONNECTOR %x ID:%d status:%d ENCODER %x CRTC %x ID:%d\n",
connector, connector->base.id,
connector->status, connector->encoder,
crtc, crtc->base.id );
 
DRM_DEBUG_KMS("[Select CRTC:%d]\n", crtc->base.id);
 
os_display = GetDisplay();
 
ifl = safe_cli();
{
os_display->ddev = dev;
os_display->connector = connector;
os_display->crtc = crtc;
os_display->supported_modes = mode_count;
 
ifl = safe_cli();
{
os_display->restore_cursor(0,0);
os_display->select_cursor = select_cursor_kms;
os_display->show_cursor = NULL;
968,16 → 950,14
os_display->move_cursor = move_cursor_kms;
os_display->restore_cursor = restore_cursor;
os_display->disable_mouse = disable_mouse;
select_cursor_kms(os_display->cursor);
};
safe_sti(ifl);
 
#ifdef __HWA__
err = init_bitmaps();
#endif
du = vmw_crtc_to_du(os_display->crtc);
du->cursor_x = os_display->width/2;
du->cursor_y = os_display->height/2;
select_cursor_kms(os_display->cursor);
 
LEAVE();
 
return 0;
};
 
986,6 → 966,7
{
struct vmw_private *dev_priv = vmw_priv(main_device);
size_t fifo_size;
u32_t ifl;
int i;
 
struct {
1004,8 → 985,8
cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd->body.x = 0;
cmd->body.y = 0;
cmd->body.width = os_display->width; //cpu_to_le32(clips->x2 - clips->x1);
cmd->body.height = os_display->height; //cpu_to_le32(clips->y2 - clips->y1);
cmd->body.width = os_display->width;
cmd->body.height = os_display->height;
 
vmw_fifo_commit(dev_priv, fifo_size);
}
1012,10 → 993,9
 
int get_videomodes(videomode_t *mode, int *count)
{
struct drm_display_mode *drmmode;
int err = -1;
 
dbgprintf("mode %x count %d\n", mode, *count);
 
if( *count == 0 )
{
*count = os_display->supported_modes;
1023,7 → 1003,6
}
else if( mode != NULL )
{
struct drm_display_mode *drmmode;
int i = 0;
 
if( *count > os_display->supported_modes)
1036,15 → 1015,17
mode->width = drm_mode_width(drmmode);
mode->height = drm_mode_height(drmmode);
mode->bpp = 32;
mode->freq = drm_mode_vrefresh(drmmode);
mode->freq = drmmode->vrefresh;
i++;
mode++;
}
else break;
};
 
*count = i;
err = 0;
};
 
return err;
};
 
/drivers/video/drm/vmwgfx/svga3d_reg.h
261,12 → 261,7
/* Planar video formats. */
SVGA3D_YV12 = 121,
 
/* Shader constant formats. */
SVGA3D_SURFACE_SHADERCONST_FLOAT = 122,
SVGA3D_SURFACE_SHADERCONST_INT = 123,
SVGA3D_SURFACE_SHADERCONST_BOOL = 124,
 
SVGA3D_FORMAT_MAX = 125,
SVGA3D_FORMAT_MAX = 122,
} SVGA3dSurfaceFormat;
 
typedef uint32 SVGA3dColor; /* a, r, g, b */
1223,10 → 1218,20
#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
 
#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
 
#define SVGA_3D_CMD_GB_SCREEN_DMA 1131
#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132
#define SVGA_3D_CMD_GB_MOB_FENCE 1133
#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134
#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
#define SVGA_3D_CMD_NOP_ERROR 1137
 
#define SVGA_3D_CMD_RESERVED1 1138
#define SVGA_3D_CMD_RESERVED2 1139
#define SVGA_3D_CMD_RESERVED3 1140
#define SVGA_3D_CMD_RESERVED4 1141
#define SVGA_3D_CMD_RESERVED5 1142
 
#define SVGA_3D_CMD_MAX 1142
#define SVGA_3D_CMD_FUTURE_MAX 3000
 
1973,8 → 1978,7
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
}
__attribute__((__packed__))
} __packed
SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
 
typedef
1984,15 → 1988,13
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
}
__attribute__((__packed__))
} __packed
SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
 
typedef
struct {
SVGAOTableType type;
}
__attribute__((__packed__))
} __packed
SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
 
/*
2005,8 → 2007,7
SVGAMobFormat ptDepth;
PPN base;
uint32 sizeInBytes;
}
__attribute__((__packed__))
} __packed
SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
 
 
2017,8 → 2018,7
typedef
struct SVGA3dCmdDestroyGBMob {
SVGAMobId mobid;
}
__attribute__((__packed__))
} __packed
SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
 
/*
2031,8 → 2031,7
SVGAMobFormat ptDepth;
PPN base;
uint32 sizeInBytes;
}
__attribute__((__packed__))
} __packed
SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
 
/*
2045,8 → 2044,7
SVGAMobFormat ptDepth;
PPN64 base;
uint32 sizeInBytes;
}
__attribute__((__packed__))
} __packed
SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
 
/*
2059,8 → 2057,7
SVGAMobFormat ptDepth;
PPN64 base;
uint32 sizeInBytes;
}
__attribute__((__packed__))
} __packed
SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
 
/*
2070,8 → 2067,7
typedef
struct SVGA3dCmdUpdateGBMobMapping {
SVGAMobId mobid;
}
__attribute__((__packed__))
} __packed
SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
 
/*
2087,7 → 2083,8
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
SVGA3dSize size;
} SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
} __packed
SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
 
/*
* Destroy a guest-backed surface.
2096,7 → 2093,8
typedef
struct SVGA3dCmdDestroyGBSurface {
uint32 sid;
} SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
} __packed
SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
 
/*
* Bind a guest-backed surface to an object.
2106,7 → 2104,8
struct SVGA3dCmdBindGBSurface {
uint32 sid;
SVGAMobId mobid;
} SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
} __packed
SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
 
/*
* Conditionally bind a mob to a guest backed surface if testMobid
2123,7 → 2122,7
SVGAMobId testMobid;
SVGAMobId mobid;
uint32 flags;
}
} __packed
SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
 
/*
2135,7 → 2134,8
struct SVGA3dCmdUpdateGBImage {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
} SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
} __packed
SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
 
/*
* Update an entire guest-backed surface.
2145,7 → 2145,8
typedef
struct SVGA3dCmdUpdateGBSurface {
uint32 sid;
} SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
} __packed
SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
 
/*
* Readback an image in a guest-backed surface.
2155,7 → 2156,8
typedef
struct SVGA3dCmdReadbackGBImage {
SVGA3dSurfaceImageId image;
} SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
} __packed
SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
 
/*
* Readback an entire guest-backed surface.
2165,7 → 2167,8
typedef
struct SVGA3dCmdReadbackGBSurface {
uint32 sid;
} SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
} __packed
SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
 
/*
* Readback a sub rect of an image in a guest-backed surface. After
2179,7 → 2182,7
SVGA3dSurfaceImageId image;
SVGA3dBox box;
uint32 invertBox;
}
} __packed
SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
 
/*
2190,7 → 2193,8
typedef
struct SVGA3dCmdInvalidateGBImage {
SVGA3dSurfaceImageId image;
} SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
} __packed
SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
 
/*
* Invalidate an entire guest-backed surface.
2200,7 → 2204,8
typedef
struct SVGA3dCmdInvalidateGBSurface {
uint32 sid;
} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
} __packed
SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
 
/*
* Invalidate a sub rect of an image in a guest-backed surface. After
2214,7 → 2219,7
SVGA3dSurfaceImageId image;
SVGA3dBox box;
uint32 invertBox;
}
} __packed
SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
 
/*
2224,7 → 2229,8
typedef
struct SVGA3dCmdDefineGBContext {
uint32 cid;
} SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
} __packed
SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
 
/*
* Destroy a guest-backed context.
2233,7 → 2239,8
typedef
struct SVGA3dCmdDestroyGBContext {
uint32 cid;
} SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
} __packed
SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
 
/*
* Bind a guest-backed context.
2252,7 → 2259,8
uint32 cid;
SVGAMobId mobid;
uint32 validContents;
} SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
} __packed
SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
 
/*
* Readback a guest-backed context.
2262,7 → 2270,8
typedef
struct SVGA3dCmdReadbackGBContext {
uint32 cid;
} SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
} __packed
SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
 
/*
* Invalidate a guest-backed context.
2270,7 → 2279,8
typedef
struct SVGA3dCmdInvalidateGBContext {
uint32 cid;
} SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
} __packed
SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
 
/*
* Define a guest-backed shader.
2281,7 → 2291,8
uint32 shid;
SVGA3dShaderType type;
uint32 sizeInBytes;
} SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
} __packed
SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
 
/*
* Bind a guest-backed shader.
2291,7 → 2302,8
uint32 shid;
SVGAMobId mobid;
uint32 offsetInBytes;
} SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
} __packed
SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
 
/*
* Destroy a guest-backed shader.
2299,7 → 2311,8
 
typedef struct SVGA3dCmdDestroyGBShader {
uint32 shid;
} SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
} __packed
SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
 
typedef
struct {
2314,7 → 2327,8
* Note that FLOAT and INT constants are 4-dwords in length, while
* BOOL constants are 1-dword in length.
*/
} SVGA3dCmdSetGBShaderConstInline;
} __packed
SVGA3dCmdSetGBShaderConstInline;
/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
 
typedef
2321,7 → 2335,8
struct {
uint32 cid;
SVGA3dQueryType type;
} SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
} __packed
SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
 
typedef
struct {
2329,7 → 2344,8
SVGA3dQueryType type;
SVGAMobId mobid;
uint32 offset;
} SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
} __packed
SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
 
 
/*
2346,7 → 2362,8
SVGA3dQueryType type;
SVGAMobId mobid;
uint32 offset;
} SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
} __packed
SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
 
typedef
struct {
2353,7 → 2370,7
SVGAMobId mobid;
uint32 fbOffset;
uint32 initalized;
}
} __packed
SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
 
typedef
2360,7 → 2377,7
struct {
SVGAMobId mobid;
uint32 gartOffset;
}
} __packed
SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
 
 
2368,7 → 2385,7
struct {
uint32 gartOffset;
uint32 numPages;
}
} __packed
SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
 
 
2385,13 → 2402,13
int32 xRoot;
int32 yRoot;
uint32 flags;
}
} __packed
SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
 
typedef
struct {
uint32 stid;
}
} __packed
SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
 
typedef
2398,7 → 2415,7
struct {
uint32 stid;
SVGA3dSurfaceImageId image;
}
} __packed
SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
 
typedef
2405,7 → 2422,7
struct {
uint32 stid;
SVGA3dBox box;
}
} __packed
SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
 
/*
2583,4 → 2600,28
float f;
} SVGA3dDevCapResult;
 
typedef enum {
SVGA3DCAPS_RECORD_UNKNOWN = 0,
SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
} SVGA3dCapsRecordType;
 
typedef
struct SVGA3dCapsRecordHeader {
uint32 length;
SVGA3dCapsRecordType type;
}
SVGA3dCapsRecordHeader;
 
typedef
struct SVGA3dCapsRecord {
SVGA3dCapsRecordHeader header;
uint32 data[1];
}
SVGA3dCapsRecord;
 
 
typedef uint32 SVGA3dCapPair[2];
 
#endif /* _SVGA3D_REG_H_ */
/drivers/video/drm/vmwgfx/svga3d_surfacedefs.h
38,8 → 38,11
 
#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
#define min_t(type, x, y) ((x) < (y) ? (x) : (y))
#define surf_size_struct SVGA3dSize
#define u32 uint32
#define u64 uint64_t
#define U32_MAX ((u32)~0U)
 
#endif /* __KERNEL__ */
 
704,8 → 707,8
 
static inline u32 clamped_umul32(u32 a, u32 b)
{
uint64_t tmp = (uint64_t) a*b;
return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
u64 tmp = (u64) a*b;
return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
}
 
static inline const struct svga3d_surface_desc *
834,7 → 837,7
bool cubemap)
{
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
u32 total_size = 0;
u64 total_size = 0;
u32 mip;
 
for (mip = 0; mip < num_mip_levels; mip++) {
847,7 → 850,7
if (cubemap)
total_size *= SVGA3D_MAX_SURFACE_FACES;
 
return total_size;
return (u32) min_t(u64, total_size, (u64) U32_MAX);
}
 
 
/drivers/video/drm/vmwgfx/svga_reg.h
169,10 → 169,17
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
SVGA_REG_TOP = 53, /* Must be 1 more than the last register */
SVGA_REG_CMD_PREPEND_LOW = 53,
SVGA_REG_CMD_PREPEND_HIGH = 54,
SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
SVGA_REG_MOB_MAX_SIZE = 57,
SVGA_REG_TOP = 58, /* Must be 1 more than the last register */
 
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
/drivers/video/drm/vmwgfx/vmwgfx_context.c
33,11 → 33,12
struct ttm_base_object base;
struct vmw_resource res;
struct vmw_ctx_binding_state cbs;
struct vmw_cmdbuf_res_manager *man;
};
 
 
 
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
 
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
50,9 → 51,11
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
bool rebind);
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
static uint64_t vmw_user_context_size;
 
101,7 → 104,8
 
static void vmw_hw_context_destroy(struct vmw_resource *res)
{
 
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
111,7 → 115,11
 
if (res->func->destroy == vmw_gb_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
vmw_cmdbuf_res_man_destroy(uctx->man);
mutex_lock(&dev_priv->binding_mutex);
(void) vmw_context_binding_state_kill(&uctx->cbs);
(void) vmw_gb_context_destroy(res);
mutex_unlock(&dev_priv->binding_mutex);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
146,14 → 154,17
ret = vmw_resource_init(dev_priv, res, true,
res_free, &vmw_gb_context_func);
res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
if (unlikely(ret != 0))
goto out_err;
 
if (unlikely(ret != 0)) {
if (res_free)
res_free(res);
else
kfree(res);
return ret;
if (dev_priv->has_mob) {
uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
if (unlikely(IS_ERR(uctx->man))) {
ret = PTR_ERR(uctx->man);
uctx->man = NULL;
goto out_err;
}
}
 
memset(&uctx->cbs, 0, sizeof(uctx->cbs));
INIT_LIST_HEAD(&uctx->cbs.list);
160,6 → 171,13
 
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
 
out_err:
if (res_free)
res_free(res);
else
kfree(res);
return ret;
}
 
static int vmw_context_init(struct vmw_private *dev_priv,
328,7 → 346,7
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_state_kill(&uctx->cbs);
vmw_context_binding_state_scrub(&uctx->cbs);
 
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
378,11 → 396,7
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBContext body;
} *cmd;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
 
BUG_ON(!list_empty(&uctx->cbs.list));
 
if (likely(res->id == -1))
return 0;
 
461,7 → 475,6
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
 
471,9 → 484,10
*/
 
if (unlikely(vmw_user_context_size == 0))
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
 
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
 
520,7 → 534,7
out_err:
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
 
}
530,8 → 544,9
* vmw_context_scrub_shader - scrub a shader binding from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
550,7 → 565,7
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.shader_type;
cmd->body.shid = SVGA3D_INVALID_ID;
cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
561,8 → 576,10
* from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
581,7 → 598,7
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.rt_type;
cmd->body.target.sid = SVGA3D_INVALID_ID;
cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.face = 0;
cmd->body.target.mipmap = 0;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
593,11 → 610,13
* vmw_context_scrub_texture - scrub a texture binding from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*
* TODO: Possibly complement this function with a function that takes
* a list of texture bindings and combines them to a single command.
*/
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
621,7 → 640,7
cmd->body.c.cid = bi->ctx->id;
cmd->body.s1.stage = bi->i1.texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
694,6 → 713,7
vmw_context_binding_drop(loc);
 
loc->bi = *bi;
loc->bi.scrubbed = false;
list_add_tail(&loc->ctx_list, &cbs->list);
INIT_LIST_HEAD(&loc->res_list);
 
729,13 → 749,12
if (loc->bi.ctx != NULL)
vmw_context_binding_drop(loc);
 
if (bi->res != NULL) {
loc->bi = *bi;
list_add_tail(&loc->ctx_list, &cbs->list);
if (bi->res != NULL)
list_add_tail(&loc->res_list, &bi->res->binding_head);
else
INIT_LIST_HEAD(&loc->res_list);
}
}
 
/**
* vmw_context_binding_kill - Kill a binding on the device
748,7 → 767,10
*/
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
{
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
if (!cb->bi.scrubbed) {
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
cb->bi.scrubbed = true;
}
vmw_context_binding_drop(cb);
}
 
770,6 → 792,27
}
 
/**
* vmw_context_binding_state_scrub - Scrub all bindings associated with a
* struct vmw_ctx_binding state structure.
*
* @cbs: Pointer to the context binding state tracker.
*
* Emits commands to scrub all bindings associated with the
* context binding state tracker.
*/
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
{
struct vmw_ctx_binding *entry;
 
list_for_each_entry(entry, &cbs->list, ctx_list) {
if (!entry->bi.scrubbed) {
(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
entry->bi.scrubbed = true;
}
}
}
 
/**
* vmw_context_binding_res_list_kill - Kill all bindings on a
* resource binding list
*
787,6 → 830,27
}
 
/**
* vmw_context_binding_res_list_scrub - Scrub all bindings on a
* resource binding list
*
* @head: list head of resource binding list
*
* Scrub all bindings associated with a specific resource. Typically
* called before the resource is evicted.
*/
void vmw_context_binding_res_list_scrub(struct list_head *head)
{
struct vmw_ctx_binding *entry;
 
list_for_each_entry(entry, head, res_list) {
if (!entry->bi.scrubbed) {
(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
entry->bi.scrubbed = true;
}
}
}
 
/**
* vmw_context_binding_state_transfer - Commit staged binding info
*
* @ctx: Pointer to context to commit the staged binding info to.
805,3 → 869,55
list_for_each_entry_safe(entry, next, &from->list, ctx_list)
vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
}
 
/**
* vmw_context_rebind_all - Rebind all scrubbed bindings of a context
*
* @ctx: The context resource
*
* Walks through the context binding list and rebinds all scrubbed
* resources.
*/
int vmw_context_rebind_all(struct vmw_resource *ctx)
{
struct vmw_ctx_binding *entry;
struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res);
struct vmw_ctx_binding_state *cbs = &uctx->cbs;
int ret;
 
list_for_each_entry(entry, &cbs->list, ctx_list) {
if (likely(!entry->bi.scrubbed))
continue;
 
if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
SVGA3D_INVALID_ID))
continue;
 
ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
if (unlikely(ret != 0))
return ret;
 
entry->bi.scrubbed = false;
}
 
return 0;
}
 
/**
* vmw_context_binding_list - Return a list of context bindings
*
* @ctx: The context resource
*
* Returns the current list of bindings of the given context. Note that
* this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
}
 
struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
{
return container_of(ctx, struct vmw_user_context, res)->man;
}
/drivers/video/drm/vmwgfx/vmwgfx_dmabuf.c
52,7 → 52,6
struct ttm_placement *placement,
bool interruptible)
{
// struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
int ret;
 
62,7 → 61,7
 
vmw_execbuf_release_pinned_bo(dev_priv);
 
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err;
 
95,7 → 94,6
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
// struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement *placement;
int ret;
107,7 → 105,7
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
 
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err;
 
198,7 → 196,6
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
// struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
int ret = 0;
215,7 → 212,7
 
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err_unlock;
 
/drivers/video/drm/vmwgfx/vmwgfx_drv.c
142,11 → 142,11
 
static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
159,29 → 159,28
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
vmw_fence_obj_signaled_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_FENCE_EVENT,
vmw_fence_event_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 
/* these allow direct access to the framebuffers mark as master only */
VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
194,19 → 193,19
DRM_MASTER | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
vmw_shader_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SHADER,
vmw_shader_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
vmw_gb_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
vmw_gb_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl,
DRM_AUTH | DRM_UNLOCKED),
DRM_UNLOCKED | DRM_RENDER_ALLOW),
};
#endif
 
315,7 → 314,7
if (unlikely(ret != 0))
return ret;
 
ret = ttm_bo_reserve(bo, false, true, false, 0);
ret = ttm_bo_reserve(bo, false, true, false, NULL);
BUG_ON(ret != 0);
 
ret = ttm_bo_kmap(bo, 0, 1, &map);
341,7 → 340,6
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
ENTER();
 
ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
if (unlikely(ret != 0)) {
354,8 → 352,8
// goto out_no_query_bo;
// vmw_dummy_query_bo_prepare(dev_priv);
 
LEAVE();
 
 
return 0;
 
out_no_query_bo:
534,8 → 532,6
enum vmw_res_type i;
bool refuse_dma = false;
 
ENTER();
 
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (unlikely(dev_priv == NULL)) {
DRM_ERROR("Failed allocating a device private struct.\n");
552,6 → 548,7
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
 
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
570,9 → 567,6
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 
printk("io: %x vram: %x mmio: %x\n",dev_priv->io_start,
dev_priv->vram_start,dev_priv->mmio_start);
 
dev_priv->enable_fb = enable_fbdev;
 
mutex_lock(&dev_priv->hw_mutex);
616,6 → 610,7
dev_priv->memory_size = 512*1024*1024;
}
dev_priv->max_mob_pages = 0;
dev_priv->max_mob_size = 0;
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
uint64_t mem_size =
vmw_read(dev_priv,
625,6 → 620,8
dev_priv->prim_bb_mem =
vmw_read(dev_priv,
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
dev_priv->max_mob_size =
vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
} else
dev_priv->prim_bb_mem = dev_priv->vram_size;
 
667,7 → 664,9
 
ret = ttm_bo_device_init(&dev_priv->bdev,
dev_priv->bo_global_ref.ref.object,
&vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
&vmw_bo_driver,
NULL,
VMWGFX_FILE_PAGE_OFFSET,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
717,14 → 716,14
goto out_err4;
}
 
// dev_priv->tdev = ttm_object_device_init
// (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
dev_priv->tdev = ttm_object_device_init
(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
 
// if (unlikely(dev_priv->tdev == NULL)) {
// DRM_ERROR("Unable to initialize TTM object management.\n");
// ret = -ENOMEM;
// goto out_err4;
// }
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
ret = -ENOMEM;
goto out_err4;
}
 
dev->dev_private = dev_priv;
 
731,7 → 730,7
#if 0
 
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
ret = drm_irq_install(dev);
ret = drm_irq_install(dev, dev->pdev->irq);
if (ret != 0) {
DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq;
761,7 → 760,6
 
main_device = dev;
 
LEAVE();
return 0;
 
out_no_fifo:
890,7 → 888,6
// goto out_no_tfile;
 
file_priv->driver_priv = vmw_fp;
// dev_priv->bdev.dev_mapping = dev->dev_mapping;
 
return 0;
 
1094,12 → 1091,11
{
struct vmw_private *dev_priv =
container_of(nb, struct vmw_private, pm_nb);
struct vmw_master *vmaster = dev_priv->active_master;
 
switch (val) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
ttm_suspend_lock(&vmaster->lock);
ttm_suspend_lock(&dev_priv->reservation_sem);
 
/**
* This empties VRAM and unbinds all GMR bindings.
1113,7 → 1109,7
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
case PM_POST_RESTORE:
ttm_suspend_unlock(&vmaster->lock);
ttm_suspend_unlock(&dev_priv->reservation_sem);
 
break;
case PM_RESTORE_PREPARE:
1201,7 → 1197,7
 
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET,
DRIVER_MODESET | DRIVER_RENDER,
.load = vmw_driver_load,
// .unload = vmw_driver_unload,
// .lastclose = vmw_lastclose,
1248,7 → 1244,6
const struct pci_device_id *ent;
int err;
 
ENTER();
 
ent = find_pci_device(&device, vmw_pci_id_list);
if( unlikely(ent == NULL) )
1263,7 → 1258,6
device.pci_dev.device);
 
err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
LEAVE();
 
return err;
}
/drivers/video/drm/vmwgfx/vmwgfx_drv.h
36,15 → 36,15
//#include <linux/suspend.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
//#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_execbuf_util.h>
//#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
 
#define VMWGFX_DRIVER_DATE "20121114"
#define VMWGFX_DRIVER_DATE "20140704"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 5
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_DRIVER_MINOR 6
#define VMWGFX_DRIVER_PATCHLEVEL 1
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
89,13 → 89,12
return v;
}
 
struct ttm_lock{};
struct ww_acquire_ctx{};
 
struct vmw_fpriv {
// struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
bool gb_aware;
};
 
struct vmw_dma_buffer {
137,6 → 136,10
void (*hw_destroy) (struct vmw_resource *res);
};
 
 
/*
* Resources that are managed using ioctls.
*/
enum vmw_res_type {
vmw_res_context,
vmw_res_surface,
145,6 → 148,15
vmw_res_max
};
 
/*
* Resources that are managed using command streams.
*/
enum vmw_cmdbuf_res_type {
vmw_cmdbuf_res_compat_shader
};
 
struct vmw_cmdbuf_res_manager;
 
struct vmw_cursor_snooper {
struct drm_crtc *crtc;
size_t age;
172,8 → 184,8
 
struct vmw_marker_queue {
struct list_head head;
struct timespec lag;
struct timespec lag_time;
u64 lag;
u64 lag_time;
spinlock_t lock;
};
 
289,6 → 301,7
struct vmw_resource *ctx;
struct vmw_resource *res;
enum vmw_ctx_binding_type bt;
bool scrubbed;
union {
SVGA3dShaderType shader_type;
SVGA3dRenderTargetType rt_type;
335,7 → 348,7
struct drm_open_hash res_ht;
bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
struct ttm_object_file *tfile;
struct vmw_fpriv *fp;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
353,6 → 366,7
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings;
struct list_head staged_cmd_res;
};
 
struct vmw_legacy_display;
397,6 → 411,7
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t max_mob_pages;
uint32_t max_mob_size;
uint32_t memory_size;
bool has_gmr;
bool has_mob;
497,6 → 512,11
uint32_t num_3d_resources;
 
/*
* Replace this with an rwsem as soon as we have down_xx_interruptible()
*/
struct ttm_lock reservation_sem;
 
/*
* Query processing. These members
* are protected by the cmdbuf mutex.
*/
586,6 → 606,8
 
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
960,6 → 982,9
vmw_context_binding_state_transfer(struct vmw_resource *res,
struct vmw_ctx_binding_state *cbs);
extern void vmw_context_binding_res_list_kill(struct list_head *head);
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
 
/*
* Surface management - vmwgfx_surface.c
979,6 → 1004,27
*/
 
extern const struct vmw_user_resource_conv *user_shader_converter;
extern struct vmw_cmdbuf_res_manager *
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
extern size_t vmw_cmdbuf_res_man_size(void);
extern struct vmw_resource *
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key);
extern void vmw_cmdbuf_res_revert(struct list_head *list);
extern void vmw_cmdbuf_res_commit(struct list_head *list);
extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
struct vmw_resource *res,
struct list_head *list);
extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
struct list_head *list);
 
 
/**
* Inline helper functions
*/
/drivers/video/drm/vmwgfx/vmwgfx_execbuf.c
114,8 → 114,10
* persistent context binding tracker.
*/
if (unlikely(val->staged_bindings)) {
if (!backoff) {
vmw_context_binding_state_transfer
(val->res, val->staged_bindings);
}
kfree(val->staged_bindings);
val->staged_bindings = NULL;
}
178,6 → 180,44
}
 
/**
* vmw_resource_context_res_add - Put resources previously bound to a context on
* the validation list
*
* @dev_priv: Pointer to a device private structure
* @sw_context: Pointer to a software context used for this command submission
* @ctx: Pointer to the context resource
*
* This function puts all resources that were previously bound to @ctx on
* the resource validation list. This is part of the context state reemission
*/
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
struct vmw_resource *ctx)
{
struct list_head *binding_list;
struct vmw_ctx_binding *entry;
int ret = 0;
struct vmw_resource *res;
 
mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx);
 
list_for_each_entry(entry, binding_list, ctx_list) {
res = vmw_resource_reference_unless_doomed(entry->bi.res);
if (unlikely(res == NULL))
continue;
 
ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
break;
}
 
mutex_unlock(&dev_priv->binding_mutex);
return ret;
}
 
/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
* @list: Pointer to head of relocation list.
233,9 → 273,13
{
struct vmw_resource_relocation *rel;
 
list_for_each_entry(rel, list, head)
list_for_each_entry(rel, list, head) {
if (likely(rel->res != NULL))
cb[rel->offset] = rel->res->id;
else
cb[rel->offset] = SVGA_3D_CMD_NOP;
}
}
 
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
378,7 → 422,72
return 0;
}
 
 
/**
* vmw_cmd_res_reloc_add - Add a resource to a software context's
* relocation- and validation lists.
*
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @id_loc: Pointer to where the id that needs translation is located.
* @res: Valid pointer to a struct vmw_resource.
* @p_val: If non null, a pointer to the struct vmw_resource_validate_node
* used for this resource is returned here.
*/
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
uint32_t *id_loc,
struct vmw_resource *res,
struct vmw_resource_val_node **p_val)
{
int ret;
struct vmw_resource_val_node *node;
 
*p_val = NULL;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
id_loc - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_err;
 
ret = vmw_resource_val_add(sw_context, res, &node);
if (unlikely(ret != 0))
goto out_err;
 
if (res_type == vmw_res_context && dev_priv->has_mob &&
node->first_usage) {
 
/*
* Put contexts first on the list to be able to exit
* list traversal for contexts early.
*/
list_del(&node->head);
list_add(&node->head, &sw_context->resource_list);
 
ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
if (unlikely(ret != 0))
goto out_err;
node->staged_bindings =
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
if (node->staged_bindings == NULL) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
goto out_err;
}
INIT_LIST_HEAD(&node->staged_bindings->list);
}
 
if (p_val)
*p_val = node;
 
out_err:
return ret;
}
 
 
/**
* vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
386,14 → 495,17
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
* @id: Pointer to the location in the command buffer currently being
* @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
* @p_val: Pointer to pointer to resource validalidation node. Populated
* on exit.
*/
static int vmw_cmd_res_check(struct vmw_private *dev_priv,
static int
vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter,
uint32_t *id,
uint32_t *id_loc,
struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
402,7 → 514,7
struct vmw_resource_val_node *node;
int ret;
 
if (*id == SVGA3D_INVALID_ID) {
if (*id_loc == SVGA3D_INVALID_ID) {
if (p_val)
*p_val = NULL;
if (res_type == vmw_res_context) {
417,7 → 529,7
* resource
*/
 
if (likely(rcache->valid && *id == rcache->handle)) {
if (likely(rcache->valid && *id_loc == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
 
rcache->node->first_usage = false;
426,50 → 538,33
 
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
id - sw_context->buf_start);
id_loc - sw_context->buf_start);
}
 
ret = vmw_user_resource_lookup_handle(dev_priv,
sw_context->tfile,
*id,
sw_context->fp->tfile,
*id_loc,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
(unsigned) *id);
// dump_stack();
(unsigned) *id_loc);
.. dump_stack();
return ret;
}
 
rcache->valid = true;
rcache->res = res;
rcache->handle = *id;
rcache->handle = *id_loc;
 
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
id - sw_context->buf_start);
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
 
ret = vmw_resource_val_add(sw_context, res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
 
rcache->node = node;
if (p_val)
*p_val = node;
 
if (node->first_usage && res_type == vmw_res_context) {
node->staged_bindings =
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
if (node->staged_bindings == NULL) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
goto out_no_reloc;
}
INIT_LIST_HEAD(&node->staged_bindings->list);
}
 
vmw_resource_unreference(&res);
return 0;
 
481,6 → 576,34
}
 
/**
* vmw_rebind_contexts - Rebind all resources previously bound to
* referenced contexts.
*
* @sw_context: Pointer to the software context.
*
* Rebind context binding points that have been scrubbed because of eviction.
*/
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
 
list_for_each_entry(val, &sw_context->resource_list, head) {
if (unlikely(!val->staged_bindings))
break;
 
ret = vmw_context_rebind_all(val->res);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
}
 
return 0;
}
 
/**
* vmw_cmd_cid_check - Check a command header for valid context information.
*
* @dev_priv: Pointer to a device private structure.
496,7 → 619,7
{
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
__le32 cid;
uint32_t cid;
} *cmd;
 
cmd = container_of(header, struct vmw_cid_cmd, header);
767,7 → 890,7
struct vmw_relocation *reloc;
int ret;
 
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL;
828,7 → 951,7
struct vmw_relocation *reloc;
int ret;
 
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
1108,8 → 1231,19
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
SVGA3dCmdSurfaceDMASuffix *suffix;
uint32_t bo_size;
 
cmd = container_of(header, struct vmw_dma_cmd, header);
suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
header->size - sizeof(*suffix));
 
/* Make sure device and verifier stays in sync. */
if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
DRM_ERROR("Invalid DMA suffix size.\n");
return -EINVAL;
}
 
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->dma.guest.ptr,
&vmw_bo);
1116,6 → 1250,17
if (unlikely(ret != 0))
return ret;
 
/* Make sure DMA doesn't cross BO boundaries. */
bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
DRM_ERROR("Invalid DMA offset.\n");
return -EINVAL;
}
 
bo_size -= cmd->dma.guest.ptr.offset;
if (unlikely(suffix->maximumOffset > bo_size))
suffix->maximumOffset = bo_size;
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->dma.host.sid,
NULL);
1478,6 → 1623,7
&cmd->body.sid, NULL);
}
 
#if 0
/**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command
1494,7 → 1640,9
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
struct vmw_resource_val_node *ctx_node;
struct vmw_resource_val_node *ctx_node, *res_node = NULL;
struct vmw_ctx_bindinfo bi;
struct vmw_resource *res = NULL;
int ret;
 
cmd = container_of(header, struct vmw_set_shader_cmd,
1506,15 → 1654,34
if (unlikely(ret != 0))
return ret;
 
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo bi;
struct vmw_resource_val_node *res_node;
if (!dev_priv->has_mob)
return 0;
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
if (cmd->body.shid != SVGA3D_INVALID_ID) {
res = vmw_compat_shader_lookup
(vmw_context_res_man(ctx_node->res),
cmd->body.shid,
cmd->body.type);
 
if (!IS_ERR(res)) {
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
vmw_res_shader,
&cmd->body.shid, res,
&res_node);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
return ret;
}
}
 
if (!res_node) {
ret = vmw_cmd_res_check(dev_priv, sw_context,
vmw_res_shader,
user_shader_converter,
&cmd->body.shid, &res_node);
if (unlikely(ret != 0))
return ret;
}
 
bi.ctx = ctx_node->res;
bi.res = res_node ? res_node->res : NULL;
1522,10 → 1689,42
bi.i1.shader_type = cmd->body.type;
return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
}
#endif
 
/**
* vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_set_shader_const_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetShaderConst body;
} *cmd;
int ret;
 
cmd = container_of(header, struct vmw_set_shader_const_cmd,
header);
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
NULL);
if (unlikely(ret != 0))
return ret;
 
if (dev_priv->has_mob)
header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
 
return 0;
}
 
#if 0
/**
* vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
* command
1551,6 → 1750,7
&cmd->body.shid, &cmd->body.mobid,
cmd->body.offsetInBytes);
}
#endif
 
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
1595,7 → 1795,7
return 0;
}
 
static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1634,14 → 1834,14
true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
true, true, false),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
true, true, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
true, true, false),
// VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
// true, false, false),
// VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
// true, false, false),
// VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
// true, false, false),
// VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
// true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1726,8 → 1926,8
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
true, false, true),
// VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
// true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
1792,6 → 1992,9
goto out_invalid;
 
entry = &vmw_cmd_entries[cmd_id];
if (unlikely(!entry->func))
goto out_invalid;
 
if (unlikely(!entry->user_allow && !sw_context->kernel))
goto out_privileged;
 
2129,6 → 2332,8
}
}
 
 
 
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
2172,7 → 2377,7
} else */
sw_context->kernel = true;
 
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
2189,16 → 2394,17
goto out_unlock;
sw_context->res_ht_initialized = true;
}
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
 
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
goto out_err;
goto out_err_nores;
 
ret = vmw_resources_reserve(sw_context);
if (unlikely(ret != 0))
goto out_err;
goto out_err_nores;
 
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
if (unlikely(ret != 0))
2226,6 → 2432,12
goto out_err;
}
 
if (dev_priv->has_mob) {
ret = vmw_rebind_contexts(sw_context);
if (unlikely(ret != 0))
goto out_unlock_binding;
}
 
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
2277,6 → 2489,7
}
 
list_splice_init(&sw_context->resource_list, &resource_list);
vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
 
/*
2290,10 → 2503,11
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
out_err:
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
out_err_nores:
vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
2302,6 → 2516,7
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
 
/*
2458,7 → 2673,6
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
// struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
/*
2475,7 → 2689,7
return -EINVAL;
}
 
// ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
 
2491,6 → 2705,6
// vmw_kms_cursor_post_execbuf(dev_priv);
 
out_unlock:
// ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
/drivers/video/drm/vmwgfx/vmwgfx_fence.c
701,7 → 701,7
 
if (!arg->cookie_valid) {
arg->cookie_valid = 1;
arg->kernel_cookie = GetTimerTicks() + wait_timeout;
arg->kernel_cookie = jiffies + wait_timeout;
}
 
base = ttm_base_object_lookup(tfile, arg->handle);
714,7 → 714,7
 
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 
timeout = GetTimerTicks();
timeout = jiffies;
if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
0 : -EBUSY);
/drivers/video/drm/vmwgfx/vmwgfx_fifo.c
106,8 → 106,6
uint32_t min;
uint32_t dummy;
 
ENTER();
 
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = KernelAlloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
167,7 → 165,6
vmw_marker_queue_init(&fifo->marker_queue);
 
int ret = 0; //vmw_fifo_send_fence(dev_priv, &dummy);
LEAVE();
return ret;
}
 
233,7 → 230,7
unsigned long timeout)
{
int ret = 0;
unsigned long end_jiffies = GetTimerTicks() + timeout;
unsigned long end_jiffies = jiffies + timeout;
// DEFINE_WAIT(__wait);
 
DRM_INFO("Fifo wait noirq.\n");
244,7 → 241,7
// TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (!vmw_fifo_is_full(dev_priv, bytes))
break;
if (time_after_eq(GetTimerTicks(), end_jiffies)) {
if (time_after_eq(jiffies, end_jiffies)) {
ret = -EBUSY;
DRM_ERROR("SVGA device lockup.\n");
break;
411,8 → 408,6
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
 
ENTER();
 
if (bytes < chunk_size)
chunk_size = bytes;
 
423,8 → 418,6
if (rest)
memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
rest);
LEAVE();
 
}
 
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
434,7 → 427,6
{
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
ENTER();
 
while (bytes > 0) {
iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
446,7 → 438,6
mb();
bytes -= sizeof(uint32_t);
}
LEAVE();
}
 
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
458,8 → 449,6
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 
// ENTER();
 
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
 
495,8 → 484,6
// up_write(&fifo_state->rwsem);
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
mutex_unlock(&fifo_state->fifo_mutex);
 
// LEAVE();
}
 
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
/drivers/video/drm/vmwgfx/vmwgfx_gmrid_manager.c
47,6 → 47,7
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
uint32_t flags,
struct ttm_mem_reg *mem)
{
struct vmwgfx_gmrid_man *gman =
/drivers/video/drm/vmwgfx/vmwgfx_irq.c
128,7 → 128,7
uint32_t count = 0;
uint32_t signal_seq;
int ret;
unsigned long end_jiffies = GetTimerTicks() + timeout;
unsigned long end_jiffies = jiffies + timeout;
bool (*wait_condition)(struct vmw_private *, uint32_t);
DEFINE_WAIT(__wait);
 
150,7 → 150,7
// TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (wait_condition(dev_priv, seqno))
break;
if (time_after_eq(GetTimerTicks(), end_jiffies)) {
if (time_after_eq(jiffies, end_jiffies)) {
DRM_ERROR("SVGA device lockup.\n");
break;
}
/drivers/video/drm/vmwgfx/vmwgfx_kms.c
118,7 → 118,7
*dst++ = 0;
}
for(i = 0; i < 64*(64-32); i++)
*image++ = 0;
*dst++ = 0;
 
cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
cmd->cursor.id = cpu_to_le32(0);
148,7 → 148,7
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
200,7 → 200,7
* can do this since the caller in the drm core doesn't check anything
* which is protected by any looks.
*/
mutex_unlock(&crtc->mutex);
drm_modeset_unlock(&crtc->mutex);
drm_modeset_lock_all(dev_priv->dev);
 
/* A lot of the code assumes this */
265,7 → 265,7
ret = 0;
out:
drm_modeset_unlock_all(dev_priv->dev);
mutex_lock(&crtc->mutex);
drm_modeset_lock(&crtc->mutex, NULL);
 
return ret;
}
286,7 → 286,7
* can do this since the caller in the drm core doesn't check anything
* which is protected by any looks.
*/
mutex_unlock(&crtc->mutex);
drm_modeset_unlock(&crtc->mutex);
drm_modeset_lock_all(dev_priv->dev);
 
vmw_cursor_update_position(dev_priv, shown,
294,7 → 294,7
du->cursor_y + du->hotspot_y);
 
drm_modeset_unlock_all(dev_priv->dev);
mutex_lock(&crtc->mutex);
drm_modeset_lock(&crtc->mutex, NULL);
 
return 0;
}
356,7 → 356,7
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
kmap_num = (64*64*4) >> PAGE_SHIFT;
 
ret = ttm_bo_reserve(bo, true, false, false, 0);
ret = ttm_bo_reserve(bo, true, false, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return;
478,7 → 478,7
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
if (crtc->fb != &framebuffer->base)
if (crtc->primary->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
606,7 → 606,6
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct drm_clip_rect norect;
621,7 → 620,7
 
drm_modeset_lock_all(dev_priv->dev);
 
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) {
drm_modeset_unlock_all(dev_priv->dev);
return ret;
642,7 → 641,7
flags, color,
clips, num_clips, inc, NULL);
 
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
 
drm_modeset_unlock_all(dev_priv->dev);
 
893,7 → 892,7
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->fb != &framebuffer->base)
if (crtc->primary->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
964,7 → 963,6
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
struct drm_clip_rect norect;
972,7 → 970,7
 
drm_modeset_lock_all(dev_priv->dev);
 
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) {
drm_modeset_unlock_all(dev_priv->dev);
return ret;
999,7 → 997,7
clips, num_clips, increment, NULL);
}
 
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
 
drm_modeset_unlock_all(dev_priv->dev);
 
1257,7 → 1255,7
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->fb != &vfb->base)
if (crtc->primary->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
1394,7 → 1392,7
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->fb != &vfb->base)
if (crtc->primary->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
1480,8 → 1478,6
struct drm_device *dev = dev_priv->dev;
int ret;
 
ENTER();
 
drm_mode_config_init(dev);
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
1491,11 → 1487,7
dev->mode_config.max_height = 8192;
 
ret = vmw_kms_init_screen_object_display(dev_priv);
// if (ret) /* Fallback */
// (void)vmw_kms_init_legacy_display_system(dev_priv);
 
LEAVE();
 
return 0;
}
 
1520,7 → 1512,6
{
struct drm_vmw_cursor_bypass_arg *arg = data;
struct vmw_display_unit *du;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
 
1538,13 → 1529,12
return 0;
}
 
obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
crtc = drm_crtc_find(dev, arg->crtc_id);
if (!crtc) {
ret = -ENOENT;
goto out;
}
 
crtc = obj_to_crtc(obj);
du = vmw_crtc_to_du(crtc);
 
du->hotspot_x = arg->xhot;
1744,7 → 1734,7
uint32_t page_flip_flags)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct drm_framebuffer *old_fb = crtc->fb;
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
struct drm_file *file_priv ;
struct vmw_fence_obj *fence = NULL;
1762,7 → 1752,7
if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
return -EINVAL;
 
crtc->fb = fb;
crtc->primary->fb = fb;
 
/* do a full screen dirty update */
clips.x1 = clips.y1 = 0;
1802,7 → 1792,7
return ret;
 
out_no_fence:
crtc->fb = old_fb;
crtc->primary->fb = old_fb;
return ret;
}
#endif
2026,7 → 2016,7
if (du->pref_mode)
list_move(&du->pref_mode->head, &connector->probed_modes);
 
drm_mode_connector_list_update(connector);
drm_mode_connector_list_update(connector, true);
 
return 1;
}
2045,7 → 2035,6
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
struct vmw_master *vmaster = vmw_master(file_priv->master);
void __user *user_rects;
struct drm_vmw_rect *rects;
unsigned rects_size;
2053,7 → 2042,7
int i;
struct drm_mode_config *mode_config = &dev->mode_config;
 
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
 
2095,7 → 2084,7
out_free:
kfree(rects);
out_unlock:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
#endif
/drivers/video/drm/vmwgfx/vmwgfx_marker.c
27,19 → 27,18
 
 
#include "vmwgfx_drv.h"
#include <linux/time.h>
 
struct vmw_marker {
struct list_head head;
uint32_t seqno;
struct timespec submitted;
u64 submitted;
};
 
void vmw_marker_queue_init(struct vmw_marker_queue *queue)
{
INIT_LIST_HEAD(&queue->head);
queue->lag = ns_to_timespec(0);
// getrawmonotonic(&queue->lag_time);
queue->lag = 0;
queue->lag_time = ktime_get_raw_ns();
spin_lock_init(&queue->lock);
}
 
63,7 → 62,7
return -ENOMEM;
 
marker->seqno = seqno;
// getrawmonotonic(&marker->submitted);
marker->submitted = ktime_get_raw_ns();
spin_lock(&queue->lock);
list_add_tail(&marker->head, &queue->head);
spin_unlock(&queue->lock);
75,14 → 74,14
uint32_t signaled_seqno)
{
struct vmw_marker *marker, *next;
struct timespec now;
bool updated = false;
u64 now;
 
spin_lock(&queue->lock);
// getrawmonotonic(&now);
now = ktime_get_raw_ns();
 
if (list_empty(&queue->head)) {
// queue->lag = ns_to_timespec(0);
queue->lag = 0;
queue->lag_time = now;
updated = true;
goto out_unlock;
92,7 → 91,7
if (signaled_seqno - marker->seqno > (1 << 30))
continue;
 
// queue->lag = timespec_sub(now, marker->submitted);
queue->lag = now - marker->submitted;
queue->lag_time = now;
updated = true;
list_del(&marker->head);
105,27 → 104,13
return (updated) ? 0 : -EBUSY;
}
 
static struct timespec vmw_timespec_add(struct timespec t1,
struct timespec t2)
static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
{
t1.tv_sec += t2.tv_sec;
t1.tv_nsec += t2.tv_nsec;
if (t1.tv_nsec >= 1000000000L) {
t1.tv_sec += 1;
t1.tv_nsec -= 1000000000L;
}
u64 now;
 
return t1;
}
 
static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
{
struct timespec now;
 
spin_lock(&queue->lock);
// getrawmonotonic(&now);
// queue->lag = vmw_timespec_add(queue->lag,
// timespec_sub(now, queue->lag_time));
now = ktime_get_raw_ns();
queue->lag += now - queue->lag_time;
queue->lag_time = now;
spin_unlock(&queue->lock);
return queue->lag;
135,11 → 120,9
static bool vmw_lag_lt(struct vmw_marker_queue *queue,
uint32_t us)
{
struct timespec lag, cond;
u64 cond = (u64) us * NSEC_PER_USEC;
 
cond = ns_to_timespec((s64) us * 1000);
lag = vmw_fifo_lag(queue);
return (timespec_compare(&lag, &cond) < 1);
return vmw_fifo_lag(queue) <= cond;
}
 
int vmw_wait_lag(struct vmw_private *dev_priv,
/drivers/video/drm/vmwgfx/vmwgfx_mob.c
134,6 → 134,7
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
 
187,9 → 188,10
 
bo = otable->page_table->pt_bo;
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
 
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for OTable "
"takedown.\n");
} else {
memset(cmd, 0, sizeof(*cmd));
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
cmd->header.size = sizeof(cmd->body);
199,6 → 201,7
cmd->body.validSizeInBytes = 0;
cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
 
if (bo) {
int ret;
562,11 → 565,12
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for Memory "
"Object unbinding.\n");
}
} else {
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
cmd->header.size = sizeof(cmd->body);
cmd->body.mobid = mob->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
if (bo) {
vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo);
/drivers/video/drm/vmwgfx/vmwgfx_resource.c
122,7 → 122,7
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
 
ttm_bo_reserve(bo, false, false, false, 0);
ttm_bo_reserve(bo, false, false, false, NULL);
if (!list_empty(&res->mob_head) &&
res->func->unbind != NULL) {
struct ttm_validate_buffer val_buf;
136,8 → 136,12
vmw_dmabuf_unreference(&res->backup);
}
 
if (likely(res->hw_destroy != NULL))
if (likely(res->hw_destroy != NULL)) {
res->hw_destroy(res);
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_res_list_kill(&res->binding_head);
mutex_unlock(&dev_priv->binding_mutex);
}
 
id = res->id;
if (res->res_free != NULL)
418,8 → 422,7
INIT_LIST_HEAD(&vmw_bo->res_list);
 
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
(user) ? ttm_bo_type_device :
ttm_bo_type_kernel, placement,
ttm_bo_type_device, placement,
0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
532,8 → 535,13
return -EPERM;
 
vmw_user_bo = vmw_user_dma_buffer(bo);
return (vmw_user_bo->prime.base.tfile == tfile ||
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
 
/* Check that the caller has opened the object. */
if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
return 0;
 
DRM_ERROR("Could not grant buffer access.\n");
return -EPERM;
}
 
/**
553,7 → 561,7
{
struct ttm_buffer_object *bo = &user_bo->dma.base;
bool existed;
int ret=0;
int ret;
 
if (flags & drm_vmw_synccpu_allow_cs) {
struct ttm_bo_device *bdev = bo->bdev;
671,10 → 679,9
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
struct vmw_dma_buffer *dma_buf;
uint32_t handle;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
 
691,7 → 698,7
vmw_dmabuf_unreference(&dma_buf);
 
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
 
return ret;
}
806,7 → 813,7
container_of(res, struct vmw_user_stream, stream.res);
struct vmw_private *dev_priv = res->dev_priv;
 
// ttm_base_object_kfree(stream, base);
ttm_base_object_kfree(stream, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_stream_size);
}
870,7 → 877,6
struct vmw_resource *tmp;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
/*
881,7 → 887,7
if (unlikely(vmw_user_stream_size == 0))
vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
 
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
 
929,7 → 935,7
out_err:
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
#endif
979,7 → 985,7
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
 
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
 
991,7 → 997,7
 
vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
#endif
/drivers/video/drm/vmwgfx/vmwgfx_scrn.c
100,7 → 100,7
/**
* Send the fifo command to create a screen.
*/
int vmw_sou_fifo_create(struct vmw_private *dev_priv,
static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou,
uint32_t x, uint32_t y,
struct drm_display_mode *mode)
114,10 → 114,8
SVGAScreenObject obj;
} *cmd;
 
// BUG_ON(!sou->buffer);
BUG_ON(!sou->buffer);
 
ENTER();
 
fifo_size = sizeof(*cmd);
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
/* The hardware has hung, nothing we can do about it here. */
143,10 → 141,7
}
 
/* Ok to assume that buffer is pinned in vram */
// vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
 
cmd->obj.backingStore.ptr.gmrId = SVGA_GMR_FRAMEBUFFER;
cmd->obj.backingStore.ptr.offset = 0;
vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
cmd->obj.backingStore.pitch = mode->hdisplay * 4;
 
vmw_fifo_commit(dev_priv, fifo_size);
153,8 → 148,6
 
sou->defined = true;
 
LEAVE();
 
return 0;
}
 
314,7 → 307,7
 
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->fb = NULL;
crtc->primary->fb = NULL;
crtc->x = 0;
crtc->y = 0;
crtc->enabled = false;
375,7 → 368,7
 
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->fb = NULL;
crtc->primary->fb = NULL;
crtc->x = 0;
crtc->y = 0;
crtc->enabled = false;
388,7 → 381,7
connector->encoder = encoder;
encoder->crtc = crtc;
crtc->mode = *mode;
crtc->fb = fb;
crtc->primary->fb = fb;
crtc->x = set->x;
crtc->y = set->y;
crtc->enabled = true;
447,8 → 440,6
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
ENTER();
 
sou = kzalloc(sizeof(*sou), GFP_KERNEL);
if (!sou)
return -ENOMEM;
476,6 → 467,8
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
 
(void) drm_connector_register(connector);
 
drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
 
drm_mode_crtc_set_gamma_size(crtc, 256);
483,7 → 476,7
drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
LEAVE();
 
return 0;
}
 
492,8 → 485,6
struct drm_device *dev = dev_priv->dev;
int i, ret;
 
ENTER();
 
if (dev_priv->sou_priv) {
DRM_INFO("sou system already on\n");
return -EINVAL;
523,7 → 514,6
 
DRM_INFO("Screen objects system initialized\n");
 
LEAVE();
return 0;
 
err_vblank_cleanup:
579,7 → 569,7
BUG_ON(!sou->base.is_implicit);
 
dev_priv->sou_priv->implicit_fb =
vmw_framebuffer_to_vfb(sou->base.crtc.fb);
vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
}
 
#include "bitmap.h"
639,8 → 629,6
 
bool ret = false;
 
ENTER();
 
// dbgprintf("width %d height %d vrefresh %d\n",
// reqmode->width, reqmode->height, reqmode->freq);
 
718,6 → 706,7
vmw_write(dev_priv,SVGA_REG_WIDTH, mode->hdisplay);
vmw_write(dev_priv,SVGA_REG_HEIGHT, mode->vdisplay);
vmw_write(dev_priv,SVGA_REG_BITS_PER_PIXEL, 32);
os_display->select_cursor(os_display->cursor);
ret = 0;
#endif
if (ret == 0)
737,6 → 726,5
os_display->width, os_display->height, crtc);
}
 
LEAVE();
return ret;
};
/drivers/video/drm/vmwgfx/vmwgfx_shader.c
29,6 → 29,9
#include "vmwgfx_resource_priv.h"
#include "ttm/ttm_placement.h"
 
#define VMW_COMPAT_SHADER_HT_ORDER 12
 
#if 0
struct vmw_shader {
struct vmw_resource res;
SVGA3dShaderType type;
40,6 → 43,50
struct vmw_shader shader;
};
 
/**
* enum vmw_compat_shader_state - Staging state for compat shaders
*/
enum vmw_compat_shader_state {
VMW_COMPAT_COMMITED,
VMW_COMPAT_ADD,
VMW_COMPAT_DEL
};
 
/**
* struct vmw_compat_shader - Metadata for compat shaders.
*
* @handle: The TTM handle of the guest backed shader.
* @tfile: The struct ttm_object_file the guest backed shader is registered
* with.
* @hash: Hash item for lookup.
* @head: List head for staging lists or the compat shader manager list.
* @state: Staging state.
*
* The structure is protected by the cmdbuf lock.
*/
struct vmw_compat_shader {
u32 handle;
struct ttm_object_file *tfile;
struct drm_hash_item hash;
struct list_head head;
enum vmw_compat_shader_state state;
};
 
/**
* struct vmw_compat_shader_manager - Compat shader manager.
*
* @shaders: Hash table containing staged and commited compat shaders
* @list: List of commited shaders.
* @dev_priv: Pointer to a device private structure.
*
* @shaders and @list are protected by the cmdbuf mutex for now.
*/
struct vmw_compat_shader_manager {
struct drm_open_hash shaders;
struct list_head list;
struct vmw_private *dev_priv;
};
 
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base);
52,8 → 99,6
struct ttm_validate_buffer *val_buf);
static int vmw_gb_shader_destroy(struct vmw_resource *res);
 
static uint64_t vmw_user_shader_size;
 
static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER,
.base_obj_to_res = vmw_user_shader_base_to_res,
258,7 → 303,7
return 0;
 
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_res_list_kill(&res->binding_head);
vmw_context_binding_res_list_scrub(&res->binding_head);
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
296,9 → 341,9
container_of(res, struct vmw_user_shader, shader.res);
struct vmw_private *dev_priv = res->dev_priv;
 
// ttm_base_object_kfree(ushader, base);
// ttm_mem_global_free(vmw_mem_glob(dev_priv),
// vmw_user_shader_size);
ttm_base_object_kfree(ushader, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
}
 
/**
325,18 → 370,84
TTM_REF_USAGE);
}
 
#if 0
static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
struct ttm_object_file *tfile,
u32 *handle)
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
int ret;
 
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of shaders anyway.
*/
if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size =
ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
 
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
false, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
"creation.\n");
goto out;
}
 
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
if (unlikely(ushader == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
ret = -ENOMEM;
goto out;
}
 
res = &ushader->shader.res;
ushader->base.shareable = false;
ushader->base.tfile = NULL;
 
/*
* From here on, the destructor takes over resource freeing.
*/
 
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
offset, shader_type, buffer,
vmw_user_shader_free);
if (unlikely(ret != 0))
goto out;
 
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &ushader->base, false,
VMW_RES_SHADER,
&vmw_user_shader_base_release, NULL);
 
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
goto out_err;
}
 
if (handle)
*handle = ushader->base.hash.key;
out_err:
vmw_resource_unreference(&res);
out:
return ret;
}
 
 
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_shader *ushader;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_dma_buffer *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
374,70 → 485,164
goto out_bad_arg;
}
 
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of shaders anyway.
*/
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
goto out_bad_arg;
 
if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
+ 128;
ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
shader_type, tfile, &arg->shader_handle);
 
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
vmw_dmabuf_unreference(&buffer);
return ret;
}
 
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
false, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader"
" creation.\n");
goto out_unlock;
/**
* vmw_compat_shader_id_ok - Check whether a compat shader user key and
* shader type are within valid bounds.
*
* @user_key: User space id of the shader.
* @shader_type: Shader type.
*
* Returns true if valid false if not.
*/
static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
{
return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
}
 
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
if (unlikely(ushader == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
ret = -ENOMEM;
goto out_unlock;
/**
* vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
*
* @user_key: User space id of the shader.
* @shader_type: Shader type.
*
* Returns a hash key suitable for a command buffer managed resource
* manager hash table.
*/
static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
{
return user_key | (shader_type << 20);
}
 
res = &ushader->shader.res;
ushader->base.shareable = false;
ushader->base.tfile = NULL;
/**
* vmw_compat_shader_remove - Stage a compat shader for removal.
*
* @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @shader_type: Shader type.
* @list: Caller's list of staged command buffer resource actions.
*/
int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
u32 user_key, SVGA3dShaderType shader_type,
struct list_head *list)
{
if (!vmw_compat_shader_id_ok(user_key, shader_type))
return -EINVAL;
 
/*
* From here on, the destructor takes over resource freeing.
return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
vmw_compat_shader_key(user_key,
shader_type),
list);
}
 
/**
* vmw_compat_shader_add - Create a compat shader and stage it for addition
* as a command buffer managed resource.
*
* @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @bytecode: Pointer to the bytecode of the shader.
* @shader_type: Shader type.
* @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
* to be created with.
* @list: Caller's list of staged command buffer resource actions.
*
*/
int vmw_compat_shader_add(struct vmw_private *dev_priv,
struct vmw_cmdbuf_res_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
struct list_head *list)
{
struct vmw_dma_buffer *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
int ret;
struct vmw_resource *res;
 
ret = vmw_gb_shader_init(dev_priv, res, arg->size,
arg->offset, shader_type, buffer,
vmw_user_shader_free);
if (!vmw_compat_shader_id_ok(user_key, shader_type))
return -EINVAL;
 
/* Allocate and pin a DMA buffer */
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (unlikely(buf == NULL))
return -ENOMEM;
 
ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
true, vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
goto out_unlock;
goto out;
 
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &ushader->base, false,
VMW_RES_SHADER,
&vmw_user_shader_base_release, NULL);
ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
if (unlikely(ret != 0))
goto no_reserve;
 
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
&map);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
goto out_err;
ttm_bo_unreserve(&buf->base);
goto no_reserve;
}
 
arg->shader_handle = ushader->base.hash.key;
out_err:
memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
WARN_ON(is_iomem);
 
ttm_bo_kunmap(&map);
ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
WARN_ON(ret != 0);
ttm_bo_unreserve(&buf->base);
 
res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
if (unlikely(ret != 0))
goto no_reserve;
 
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
vmw_compat_shader_key(user_key, shader_type),
res, list);
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&vmaster->lock);
out_bad_arg:
vmw_dmabuf_unreference(&buffer);
 
no_reserve:
vmw_dmabuf_unreference(&buf);
out:
return ret;
}
 
/**
* vmw_compat_shader_lookup - Look up a compat shader
*
* @man: Pointer to the command buffer managed resource manager identifying
* the shader namespace.
* @user_key: The user space id of the shader.
* @shader_type: The shader type.
*
* Returns a refcounted pointer to a struct vmw_resource if the shader was
* found. An error pointer otherwise.
*/
struct vmw_resource *
vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
u32 user_key,
SVGA3dShaderType shader_type)
{
if (!vmw_compat_shader_id_ok(user_key, shader_type))
return ERR_PTR(-EINVAL);
 
return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
vmw_compat_shader_key(user_key,
shader_type));
}
#endif
/drivers/video/drm/vmwgfx/vmwgfx_surface.c
36,6 → 36,7
* @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
* @size: TTM accounting size for the surface.
* @master: master of the creating client. Used for security check.
*/
struct vmw_user_surface {
struct ttm_prime_object prime;
681,7 → 682,6
struct vmw_surface_offset *cur_offset;
uint32_t num_sizes;
uint32_t size;
struct vmw_master *vmaster = vmw_master(file_priv->master);
const struct svga3d_surface_desc *desc;
 
if (unlikely(vmw_user_surface_size == 0))
707,7 → 707,7
return -EINVAL;
}
 
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
 
828,7 → 828,7
rep->sid = user_srf->prime.base.hash.key;
vmw_resource_unreference(&res);
 
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return 0;
out_no_copy:
kfree(srf->offsets);
839,7 → 839,8
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
 
return ret;
}
 
864,27 → 865,16
struct vmw_user_surface *user_srf;
struct drm_vmw_size __user *user_sizes;
struct ttm_base_object *base;
int ret = -EINVAL;
int ret;
 
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
if (unlikely(base == NULL)) {
DRM_ERROR("Could not find surface to reference.\n");
return -EINVAL;
}
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
req->handle_type, &base);
if (unlikely(ret != 0))
return ret;
 
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
goto out_bad_resource;
 
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
 
ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_no_reference;
}
 
rep->flags = srf->flags;
rep->format = srf->format;
memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
892,11 → 882,12
rep->size_addr;
 
if (user_sizes)
ret = copy_to_user(user_sizes, srf->sizes,
srf->num_sizes * sizeof(*srf->sizes));
ret = copy_to_user(user_sizes, &srf->base_size,
sizeof(srf->base_size));
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
ret = -EFAULT;
}
out_bad_resource:
/drivers/video/drm/vmwgfx/vmwgfx_ttm_glue.c
53,7 → 53,6
 
static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
{
// ttm_mem_global_release(ref->object);
}
 
int vmw_ttm_global_init(struct vmw_private *dev_priv)