summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig9
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h259
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_escape.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_overlay.h22
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h304
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c73
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c322
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c279
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h229
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c870
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c76
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c1135
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h113
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c156
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c273
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c187
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1174
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c310
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c171
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c188
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c1035
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c537
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c5
27 files changed, 6488 insertions, 1421 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 30ad13344f7..794ff67c570 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -7,7 +7,8 @@ config DRM_VMWGFX
select FB_CFB_IMAGEBLIT
select DRM_TTM
help
- KMS enabled DRM driver for SVGA2 virtual hardware.
-
- If unsure say n. The compiled module will be
- called vmwgfx.ko
+ Choose this option if you would like to run 3D acceleration
+ in a VMware virtual machine.
+ This is a KMS enabled DRM driver for the VMware SVGA2
+ virtual hardware.
+ The compiled module will be called "vmwgfx.ko".
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index c9281a1b1d3..586869c8c11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,7 @@ ccflags-y := -Iinclude/drm
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
- vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o
+ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
+ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index 77cb4533100..d0e085ee824 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -57,7 +57,8 @@ typedef enum {
SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
- SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS65_B1,
+ SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
+ SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
} SVGA3dHardwareVersion;
/*
@@ -67,7 +68,8 @@ typedef enum {
typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
#define SVGA3D_NUM_CLIPPLANES 6
#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
-
+#define SVGA3D_MAX_CONTEXT_IDS 256
+#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
/*
* Surface formats.
@@ -79,76 +81,91 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
*/
typedef enum SVGA3dSurfaceFormat {
- SVGA3D_FORMAT_INVALID = 0,
+ SVGA3D_FORMAT_INVALID = 0,
- SVGA3D_X8R8G8B8 = 1,
- SVGA3D_A8R8G8B8 = 2,
+ SVGA3D_X8R8G8B8 = 1,
+ SVGA3D_A8R8G8B8 = 2,
- SVGA3D_R5G6B5 = 3,
- SVGA3D_X1R5G5B5 = 4,
- SVGA3D_A1R5G5B5 = 5,
- SVGA3D_A4R4G4B4 = 6,
+ SVGA3D_R5G6B5 = 3,
+ SVGA3D_X1R5G5B5 = 4,
+ SVGA3D_A1R5G5B5 = 5,
+ SVGA3D_A4R4G4B4 = 6,
- SVGA3D_Z_D32 = 7,
- SVGA3D_Z_D16 = 8,
- SVGA3D_Z_D24S8 = 9,
- SVGA3D_Z_D15S1 = 10,
+ SVGA3D_Z_D32 = 7,
+ SVGA3D_Z_D16 = 8,
+ SVGA3D_Z_D24S8 = 9,
+ SVGA3D_Z_D15S1 = 10,
- SVGA3D_LUMINANCE8 = 11,
- SVGA3D_LUMINANCE4_ALPHA4 = 12,
- SVGA3D_LUMINANCE16 = 13,
- SVGA3D_LUMINANCE8_ALPHA8 = 14,
+ SVGA3D_LUMINANCE8 = 11,
+ SVGA3D_LUMINANCE4_ALPHA4 = 12,
+ SVGA3D_LUMINANCE16 = 13,
+ SVGA3D_LUMINANCE8_ALPHA8 = 14,
- SVGA3D_DXT1 = 15,
- SVGA3D_DXT2 = 16,
- SVGA3D_DXT3 = 17,
- SVGA3D_DXT4 = 18,
- SVGA3D_DXT5 = 19,
+ SVGA3D_DXT1 = 15,
+ SVGA3D_DXT2 = 16,
+ SVGA3D_DXT3 = 17,
+ SVGA3D_DXT4 = 18,
+ SVGA3D_DXT5 = 19,
- SVGA3D_BUMPU8V8 = 20,
- SVGA3D_BUMPL6V5U5 = 21,
- SVGA3D_BUMPX8L8V8U8 = 22,
- SVGA3D_BUMPL8V8U8 = 23,
+ SVGA3D_BUMPU8V8 = 20,
+ SVGA3D_BUMPL6V5U5 = 21,
+ SVGA3D_BUMPX8L8V8U8 = 22,
+ SVGA3D_BUMPL8V8U8 = 23,
- SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
- SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
+ SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
+ SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
- SVGA3D_A2R10G10B10 = 26,
+ SVGA3D_A2R10G10B10 = 26,
/* signed formats */
- SVGA3D_V8U8 = 27,
- SVGA3D_Q8W8V8U8 = 28,
- SVGA3D_CxV8U8 = 29,
+ SVGA3D_V8U8 = 27,
+ SVGA3D_Q8W8V8U8 = 28,
+ SVGA3D_CxV8U8 = 29,
/* mixed formats */
- SVGA3D_X8L8V8U8 = 30,
- SVGA3D_A2W10V10U10 = 31,
+ SVGA3D_X8L8V8U8 = 30,
+ SVGA3D_A2W10V10U10 = 31,
- SVGA3D_ALPHA8 = 32,
+ SVGA3D_ALPHA8 = 32,
/* Single- and dual-component floating point formats */
- SVGA3D_R_S10E5 = 33,
- SVGA3D_R_S23E8 = 34,
- SVGA3D_RG_S10E5 = 35,
- SVGA3D_RG_S23E8 = 36,
+ SVGA3D_R_S10E5 = 33,
+ SVGA3D_R_S23E8 = 34,
+ SVGA3D_RG_S10E5 = 35,
+ SVGA3D_RG_S23E8 = 36,
/*
* Any surface can be used as a buffer object, but SVGA3D_BUFFER is
* the most efficient format to use when creating new surfaces
* expressly for index or vertex data.
*/
- SVGA3D_BUFFER = 37,
- SVGA3D_Z_D24X8 = 38,
+ SVGA3D_BUFFER = 37,
+
+ SVGA3D_Z_D24X8 = 38,
- SVGA3D_V16U16 = 39,
+ SVGA3D_V16U16 = 39,
- SVGA3D_G16R16 = 40,
- SVGA3D_A16B16G16R16 = 41,
+ SVGA3D_G16R16 = 40,
+ SVGA3D_A16B16G16R16 = 41,
/* Packed Video formats */
- SVGA3D_UYVY = 42,
- SVGA3D_YUY2 = 43,
+ SVGA3D_UYVY = 42,
+ SVGA3D_YUY2 = 43,
+
+ /* Planar video formats */
+ SVGA3D_NV12 = 44,
+
+ /* Video format with alpha */
+ SVGA3D_AYUV = 45,
+
+ SVGA3D_BC4_UNORM = 108,
+ SVGA3D_BC5_UNORM = 111,
+
+ /* Advanced D3D9 depth formats. */
+ SVGA3D_Z_DF16 = 118,
+ SVGA3D_Z_DF24 = 119,
+ SVGA3D_Z_D24S8_INT = 120,
SVGA3D_FORMAT_MAX
} SVGA3dSurfaceFormat;
@@ -414,10 +431,20 @@ typedef enum {
SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
+ SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
+ SVGA3D_RS_LINEAA = 98, /* SVGA3dBool */
+ SVGA3D_RS_LINEWIDTH = 99, /* float */
SVGA3D_RS_MAX
} SVGA3dRenderStateName;
typedef enum {
+ SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
+ SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
+ SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
+ SVGA3D_TRANSPARENCYANTIALIAS_MAX
+} SVGA3dTransparencyAntialiasType;
+
+typedef enum {
SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
@@ -728,10 +755,10 @@ typedef enum {
SVGA3D_TEX_FILTER_NEAREST = 1,
SVGA3D_TEX_FILTER_LINEAR = 2,
SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
- SVGA3D_TEX_FILTER_FLATCUBIC = 4, // Deprecated, not implemented
- SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, // Deprecated, not implemented
- SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, // Not currently implemented
- SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, // Not currently implemented
+ SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
+ SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
+ SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
+ SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
SVGA3D_TEX_FILTER_MAX
} SVGA3dTextureFilter;
@@ -799,19 +826,19 @@ typedef enum {
typedef enum {
SVGA3D_DECLUSAGE_POSITION = 0,
- SVGA3D_DECLUSAGE_BLENDWEIGHT, // 1
- SVGA3D_DECLUSAGE_BLENDINDICES, // 2
- SVGA3D_DECLUSAGE_NORMAL, // 3
- SVGA3D_DECLUSAGE_PSIZE, // 4
- SVGA3D_DECLUSAGE_TEXCOORD, // 5
- SVGA3D_DECLUSAGE_TANGENT, // 6
- SVGA3D_DECLUSAGE_BINORMAL, // 7
- SVGA3D_DECLUSAGE_TESSFACTOR, // 8
- SVGA3D_DECLUSAGE_POSITIONT, // 9
- SVGA3D_DECLUSAGE_COLOR, // 10
- SVGA3D_DECLUSAGE_FOG, // 11
- SVGA3D_DECLUSAGE_DEPTH, // 12
- SVGA3D_DECLUSAGE_SAMPLE, // 13
+ SVGA3D_DECLUSAGE_BLENDWEIGHT, /* 1 */
+ SVGA3D_DECLUSAGE_BLENDINDICES, /* 2 */
+ SVGA3D_DECLUSAGE_NORMAL, /* 3 */
+ SVGA3D_DECLUSAGE_PSIZE, /* 4 */
+ SVGA3D_DECLUSAGE_TEXCOORD, /* 5 */
+ SVGA3D_DECLUSAGE_TANGENT, /* 6 */
+ SVGA3D_DECLUSAGE_BINORMAL, /* 7 */
+ SVGA3D_DECLUSAGE_TESSFACTOR, /* 8 */
+ SVGA3D_DECLUSAGE_POSITIONT, /* 9 */
+ SVGA3D_DECLUSAGE_COLOR, /* 10 */
+ SVGA3D_DECLUSAGE_FOG, /* 11 */
+ SVGA3D_DECLUSAGE_DEPTH, /* 12 */
+ SVGA3D_DECLUSAGE_SAMPLE, /* 13 */
SVGA3D_DECLUSAGE_MAX
} SVGA3dDeclUsage;
@@ -819,10 +846,10 @@ typedef enum {
SVGA3D_DECLMETHOD_DEFAULT = 0,
SVGA3D_DECLMETHOD_PARTIALU,
SVGA3D_DECLMETHOD_PARTIALV,
- SVGA3D_DECLMETHOD_CROSSUV, // Normal
+ SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
SVGA3D_DECLMETHOD_UV,
- SVGA3D_DECLMETHOD_LOOKUP, // Lookup a displacement map
- SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map
+ SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
+ SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
} SVGA3dDeclMethod;
typedef enum {
@@ -930,7 +957,6 @@ typedef enum {
} SVGA3dCubeFace;
typedef enum {
- SVGA3D_SHADERTYPE_COMPILED_DX8 = 0,
SVGA3D_SHADERTYPE_VS = 1,
SVGA3D_SHADERTYPE_PS = 2,
SVGA3D_SHADERTYPE_MAX
@@ -968,12 +994,18 @@ typedef enum {
} SVGA3dTransferType;
/*
- * The maximum number vertex arrays we're guaranteed to support in
+ * The maximum number of vertex arrays we're guaranteed to support in
* SVGA_3D_CMD_DRAWPRIMITIVES.
*/
#define SVGA3D_MAX_VERTEX_ARRAYS 32
/*
+ * The maximum number of primitive ranges we're guaranteed to support
+ * in SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
+
+/*
* Identifiers for commands in the command FIFO.
*
* IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
@@ -990,7 +1022,7 @@ typedef enum {
#define SVGA_3D_CMD_LEGACY_BASE 1000
#define SVGA_3D_CMD_BASE 1040
-#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0
+#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 /* Deprecated */
#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
@@ -1008,7 +1040,7 @@ typedef enum {
#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
-#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 // Deprecated
+#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 /* Deprecated */
#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
@@ -1018,9 +1050,13 @@ typedef enum {
#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
-#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 // Deprecated
+#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 /* Deprecated */
#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
-#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 30
+#define SVGA_3D_CMD_SURFACE_DEFINE_V2 SVGA_3D_CMD_BASE + 30
+#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
+#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
+#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
+#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42
#define SVGA_3D_CMD_FUTURE_MAX 2000
@@ -1031,9 +1067,9 @@ typedef enum {
typedef struct {
union {
struct {
- uint16 function; // SVGA3dFogFunction
- uint8 type; // SVGA3dFogType
- uint8 base; // SVGA3dFogBase
+ uint16 function; /* SVGA3dFogFunction */
+ uint8 type; /* SVGA3dFogType */
+ uint8 base; /* SVGA3dFogBase */
};
uint32 uintValue;
};
@@ -1109,6 +1145,8 @@ typedef enum {
SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
+ SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
+ SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
} SVGA3dSurfaceFlags;
typedef
@@ -1121,6 +1159,12 @@ struct {
uint32 sid;
SVGA3dSurfaceFlags surfaceFlags;
SVGA3dSurfaceFormat format;
+ /*
+ * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+ * structures must have the same value of numMipLevels field.
+ * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+ * numMipLevels set to 0.
+ */
SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
/*
* Followed by an SVGA3dSize structure for each mip level in each face.
@@ -1135,6 +1179,31 @@ struct {
typedef
struct {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ /*
+ * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+ * structures must have the same value of numMipLevels field.
+ * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+ * numMipLevels set to 0.
+ */
+ SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
+ uint32 multisampleCount;
+ SVGA3dTextureFilter autogenFilter;
+ /*
+ * Followed by an SVGA3dSize structure for each mip level in each face.
+ *
+ * A note on surface sizes: Sizes are always specified in pixels,
+ * even if the true surface size is not a multiple of the minimum
+ * block size of the surface's format. For example, a 3x3x1 DXT1
+ * compressed texture would actually be stored as a 4x4x1 image in
+ * memory.
+ */
+} SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
+
+typedef
+struct {
uint32 sid;
} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
@@ -1474,10 +1543,12 @@ struct {
* SVGA3dCmdDrawPrimitives structure. In order,
* they are:
*
- * 1. SVGA3dVertexDecl, quantity 'numVertexDecls'
- * 2. SVGA3dPrimitiveRange, quantity 'numRanges'
+ * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
+ * SVGA3D_MAX_VERTEX_ARRAYS;
+ * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
+ * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
* 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
- * the frequency divisor for this the corresponding vertex decl)
+ * the frequency divisor for the corresponding vertex decl).
*/
} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
@@ -1671,6 +1742,12 @@ struct {
/* Clipping: zero or more SVGASignedRects follow */
} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
+typedef
+struct {
+ uint32 sid;
+ SVGA3dTextureFilter filter;
+} SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
+
/*
* Capability query index.
@@ -1774,6 +1851,32 @@ typedef enum {
SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
+ SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
+ SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
+ SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
+ SVGA3D_DEVCAP_SUPERSAMPLE = 73,
+ SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
+ SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
+ SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
+
+ /*
+ * This is the maximum number of SVGA context IDs that the guest
+ * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+ */
+ SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
+
+ /*
+ * This is the maximum number of SVGA surface IDs that the guest
+ * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+ */
+ SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
+
+ SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
+
+ SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM = 82,
+ SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
/*
* Don't add new caps into the previous section; the values in this
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h
index 7b85e9b8c85..8e8d9682e01 100644
--- a/drivers/gpu/drm/vmwgfx/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/svga_escape.h
@@ -75,7 +75,7 @@
*/
#define SVGA_ESCAPE_VMWARE_HINT 0x00030000
-#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 // Deprecated
+#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 /* Deprecated */
typedef
struct {
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h
index f753d73c14b..f38416fcb04 100644
--- a/drivers/gpu/drm/vmwgfx/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/svga_overlay.h
@@ -38,9 +38,9 @@
* Video formats we support
*/
-#define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2'
-#define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2'
-#define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y'
+#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */
+#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */
+#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */
typedef enum {
SVGA_OVERLAY_FORMAT_INVALID = 0,
@@ -68,7 +68,7 @@ struct SVGAEscapeVideoSetRegs {
uint32 streamId;
} header;
- // May include zero or more items.
+ /* May include zero or more items. */
struct {
uint32 registerId;
uint32 value;
@@ -134,12 +134,12 @@ struct {
*/
static inline bool
-VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN
- uint32 *width, // IN / OUT
- uint32 *height, // IN / OUT
- uint32 *size, // OUT
- uint32 *pitches, // OUT (optional)
- uint32 *offsets) // OUT (optional)
+VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */
+ uint32 *width, /* IN / OUT */
+ uint32 *height, /* IN / OUT */
+ uint32 *size, /* OUT */
+ uint32 *pitches, /* OUT (optional) */
+ uint32 *offsets) /* OUT (optional) */
{
int tmp;
@@ -198,4 +198,4 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN
return true;
}
-#endif // _SVGA_OVERLAY_H_
+#endif /* _SVGA_OVERLAY_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 1b96c2ec07d..01f63cb4967 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -39,6 +39,15 @@
#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
/*
+ * SVGA_REG_ENABLE bit definitions.
+ */
+#define SVGA_REG_ENABLE_DISABLE 0
+#define SVGA_REG_ENABLE_ENABLE 1
+#define SVGA_REG_ENABLE_HIDE 2
+#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\
+ SVGA_REG_ENABLE_HIDE)
+
+/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
* cursor bypass mode. This is still supported, but no new guest
* drivers should use it.
@@ -158,7 +167,9 @@ enum {
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
- SVGA_REG_TOP = 46, /* Must be 1 more than the last register */
+ SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
+ SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
+ SVGA_REG_TOP = 48, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -265,7 +276,7 @@ enum {
* possible.
*/
#define SVGA_GMR_NULL ((uint32) -1)
-#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) // Guest Framebuffer (GFB)
+#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */
typedef
struct SVGAGuestMemDescriptor {
@@ -306,13 +317,35 @@ struct SVGAGMRImageFormat {
struct {
uint32 bitsPerPixel : 8;
uint32 colorDepth : 8;
- uint32 reserved : 16; // Must be zero
+ uint32 reserved : 16; /* Must be zero */
};
uint32 value;
};
} SVGAGMRImageFormat;
+typedef
+struct SVGAGuestImage {
+ SVGAGuestPtr ptr;
+
+ /*
+ * A note on interpretation of pitch: This value of pitch is the
+ * number of bytes between vertically adjacent image
+ * blocks. Normally this is the number of bytes between the first
+ * pixel of two adjacent scanlines. With compressed textures,
+ * however, this may represent the number of bytes between
+ * compression blocks rather than between rows of pixels.
+ *
+ * XXX: Compressed textures currently must be tightly packed in guest memory.
+ *
+ * If the image is 1-dimensional, pitch is ignored.
+ *
+ * If 'pitch' is zero, the SVGA3D device calculates a pitch value
+ * assuming each row of blocks is tightly packed.
+ */
+ uint32 pitch;
+} SVGAGuestImage;
+
/*
* SVGAColorBGRX --
*
@@ -328,7 +361,7 @@ struct SVGAColorBGRX {
uint32 b : 8;
uint32 g : 8;
uint32 r : 8;
- uint32 x : 8; // Unused
+ uint32 x : 8; /* Unused */
};
uint32 value;
@@ -370,23 +403,34 @@ struct SVGASignedPoint {
* Note the holes in the bitfield. Missing bits have been deprecated,
* and must not be reused. Those capabilities will never be reported
* by new versions of the SVGA device.
+ *
+ * SVGA_CAP_GMR2 --
+ * Provides asynchronous commands to define and remap guest memory
+ * regions. Adds device registers SVGA_REG_GMRS_MAX_PAGES and
+ * SVGA_REG_MEMORY_SIZE.
+ *
+ * SVGA_CAP_SCREEN_OBJECT_2 --
+ * Allow screen object support, and require backing stores from the
+ * guest for each screen object.
*/
#define SVGA_CAP_NONE 0x00000000
#define SVGA_CAP_RECT_COPY 0x00000002
#define SVGA_CAP_CURSOR 0x00000020
-#define SVGA_CAP_CURSOR_BYPASS 0x00000040 // Legacy (Use Cursor Bypass 3 instead)
-#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 // Legacy (Use Cursor Bypass 3 instead)
+#define SVGA_CAP_CURSOR_BYPASS 0x00000040 /* Legacy (Use Cursor Bypass 3 instead) */
+#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 /* Legacy (Use Cursor Bypass 3 instead) */
#define SVGA_CAP_8BIT_EMULATION 0x00000100
#define SVGA_CAP_ALPHA_CURSOR 0x00000200
#define SVGA_CAP_3D 0x00004000
#define SVGA_CAP_EXTENDED_FIFO 0x00008000
-#define SVGA_CAP_MULTIMON 0x00010000 // Legacy multi-monitor support
+#define SVGA_CAP_MULTIMON 0x00010000 /* Legacy multi-monitor support */
#define SVGA_CAP_PITCHLOCK 0x00020000
#define SVGA_CAP_IRQMASK 0x00040000
-#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 // Legacy multi-monitor support
+#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 /* Legacy multi-monitor support */
#define SVGA_CAP_GMR 0x00100000
#define SVGA_CAP_TRACES 0x00200000
+#define SVGA_CAP_GMR2 0x00400000
+#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000
/*
@@ -431,7 +475,7 @@ enum {
SVGA_FIFO_CAPABILITIES = 4,
SVGA_FIFO_FLAGS,
- // Valid with SVGA_FIFO_CAP_FENCE:
+ /* Valid with SVGA_FIFO_CAP_FENCE: */
SVGA_FIFO_FENCE,
/*
@@ -444,33 +488,47 @@ enum {
* extended FIFO.
*/
- // Valid if exists (i.e. if extended FIFO enabled):
+ /* Valid if exists (i.e. if extended FIFO enabled): */
SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */
- // Valid with SVGA_FIFO_CAP_PITCHLOCK:
+ /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */
SVGA_FIFO_PITCHLOCK,
- // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3:
+ /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */
SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */
SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */
SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */
SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */
SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
- // Valid with SVGA_FIFO_CAP_RESERVE:
+ /* Valid with SVGA_FIFO_CAP_RESERVE: */
SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */
/*
- * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT:
+ * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2:
*
* By default this is SVGA_ID_INVALID, to indicate that the cursor
* coordinates are specified relative to the virtual root. If this
* is set to a specific screen ID, cursor position is reinterpreted
- * as a signed offset relative to that screen's origin. This is the
- * only way to place the cursor on a non-rooted screen.
+ * as a signed offset relative to that screen's origin.
*/
SVGA_FIFO_CURSOR_SCREEN_ID,
/*
+ * Valid with SVGA_FIFO_CAP_DEAD
+ *
+ * An arbitrary value written by the host, drivers should not use it.
+ */
+ SVGA_FIFO_DEAD,
+
+ /*
+ * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED:
+ *
+ * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h)
+ * on platforms that can enforce graphics resource limits.
+ */
+ SVGA_FIFO_3D_HWVERSION_REVISED,
+
+ /*
* XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
* registers, but this must be done carefully and with judicious use of
* capability bits, since comparisons based on SVGA_FIFO_MIN aren't
@@ -508,7 +566,7 @@ enum {
* sets SVGA_FIFO_MIN high enough to leave room for them.
*/
- // Valid if register exists:
+ /* Valid if register exists: */
SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */
@@ -709,6 +767,37 @@ enum {
*
* - When a screen is resized, either using Screen Object commands or
* legacy multimon registers, its contents are preserved.
+ *
+ * SVGA_FIFO_CAP_GMR2 --
+ *
+ * Provides new commands to define and remap guest memory regions (GMR).
+ *
+ * New 2D commands:
+ * DEFINE_GMR2, REMAP_GMR2.
+ *
+ * SVGA_FIFO_CAP_3D_HWVERSION_REVISED --
+ *
+ * Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists.
+ * This register may replace SVGA_FIFO_3D_HWVERSION on platforms
+ * that enforce graphics resource limits. This allows the platform
+ * to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest
+ * drivers that do not limit their resources.
+ *
+ * Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators
+ * are codependent (and thus we use a single capability bit).
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT_2 --
+ *
+ * Modifies the DEFINE_SCREEN command to include a guest provided
+ * backing store in GMR memory and the bytesPerLine for the backing
+ * store. This capability requires the use of a backing store when
+ * creating screen objects. However if SVGA_FIFO_CAP_SCREEN_OBJECT
+ * is present then backing stores are optional.
+ *
+ * SVGA_FIFO_CAP_DEAD --
+ *
+ * Drivers should not use this cap bit. This cap bit can not be
+ * reused since some hosts already expose it.
*/
#define SVGA_FIFO_CAP_NONE 0
@@ -720,6 +809,10 @@ enum {
#define SVGA_FIFO_CAP_ESCAPE (1<<5)
#define SVGA_FIFO_CAP_RESERVE (1<<6)
#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7)
+#define SVGA_FIFO_CAP_GMR2 (1<<8)
+#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED SVGA_FIFO_CAP_GMR2
+#define SVGA_FIFO_CAP_SCREEN_OBJECT_2 (1<<9)
+#define SVGA_FIFO_CAP_DEAD (1<<10)
/*
@@ -730,7 +823,7 @@ enum {
#define SVGA_FIFO_FLAG_NONE 0
#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0)
-#define SVGA_FIFO_FLAG_RESERVED (1<<31) // Internal use only
+#define SVGA_FIFO_FLAG_RESERVED (1<<31) /* Internal use only */
/*
* FIFO reservation sentinel value
@@ -763,22 +856,22 @@ enum {
SVGA_VIDEO_DATA_OFFSET,
SVGA_VIDEO_FORMAT,
SVGA_VIDEO_COLORKEY,
- SVGA_VIDEO_SIZE, // Deprecated
+ SVGA_VIDEO_SIZE, /* Deprecated */
SVGA_VIDEO_WIDTH,
SVGA_VIDEO_HEIGHT,
SVGA_VIDEO_SRC_X,
SVGA_VIDEO_SRC_Y,
SVGA_VIDEO_SRC_WIDTH,
SVGA_VIDEO_SRC_HEIGHT,
- SVGA_VIDEO_DST_X, // Signed int32
- SVGA_VIDEO_DST_Y, // Signed int32
+ SVGA_VIDEO_DST_X, /* Signed int32 */
+ SVGA_VIDEO_DST_Y, /* Signed int32 */
SVGA_VIDEO_DST_WIDTH,
SVGA_VIDEO_DST_HEIGHT,
SVGA_VIDEO_PITCH_1,
SVGA_VIDEO_PITCH_2,
SVGA_VIDEO_PITCH_3,
- SVGA_VIDEO_DATA_GMRID, // Optional, defaults to SVGA_GMR_FRAMEBUFFER
- SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID)
+ SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
+ SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */
SVGA_VIDEO_NUM_REGS
};
@@ -829,15 +922,51 @@ typedef struct SVGAOverlayUnit {
* compatibility. New flags can be added, and the struct may grow,
* but existing fields must retain their meaning.
*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of
+ * a SVGAGuestPtr that is used to back the screen contents. This
+ * memory must come from the GFB. The guest is not allowed to
+ * access the memory and doing so will have undefined results. The
+ * backing store is required to be page aligned and the size is
+ * padded to the next page boundry. The number of pages is:
+ * (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE
+ *
+ * The pitch in the backingStore is required to be at least large
+ * enough to hold a 32bbp scanline. It is recommended that the
+ * driver pad bytesPerLine for a potential performance win.
+ *
+ * The cloneCount field is treated as a hint from the guest that
+ * the user wants this display to be cloned, countCount times. A
+ * value of zero means no cloning should happen.
+ */
+
+#define SVGA_SCREEN_MUST_BE_SET (1 << 0) /* Must be set or results undefined */
+#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
+#define SVGA_SCREEN_IS_PRIMARY (1 << 1) /* Guest considers this screen to be 'primary' */
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */
+
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is
+ * deactivated the base layer is defined to lose all contents and
+ * become black. When a screen is deactivated the backing store is
+ * optional. When set backingPtr and bytesPerLine will be ignored.
*/
+#define SVGA_SCREEN_DEACTIVATE (1 << 3)
-#define SVGA_SCREEN_HAS_ROOT (1 << 0) // Screen is present in the virtual coord space
-#define SVGA_SCREEN_IS_PRIMARY (1 << 1) // Guest considers this screen to be 'primary'
-#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) // Guest is running a fullscreen app here
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When this flag is set
+ * the screen contents will be outputted as all black to the user
+ * though the base layer contents is preserved. The screen base layer
+ * can still be read and written to like normal though the no visible
+ * effect will be seen by the user. When the flag is changed the
+ * screen will be blanked or redrawn to the current contents as needed
+ * without any extra commands from the driver. This flag only has an
+ * effect when the screen is not deactivated.
+ */
+#define SVGA_SCREEN_BLANKING (1 << 4)
typedef
struct SVGAScreenObject {
- uint32 structSize; // sizeof(SVGAScreenObject)
+ uint32 structSize; /* sizeof(SVGAScreenObject) */
uint32 id;
uint32 flags;
struct {
@@ -847,7 +976,14 @@ struct SVGAScreenObject {
struct {
int32 x;
int32 y;
- } root; // Only used if SVGA_SCREEN_HAS_ROOT is set.
+ } root;
+
+ /*
+ * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional
+ * with SVGA_FIFO_CAP_SCREEN_OBJECT.
+ */
+ SVGAGuestImage backingStore;
+ uint32 cloneCount;
} SVGAScreenObject;
@@ -885,6 +1021,8 @@ typedef enum {
SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38,
SVGA_CMD_ANNOTATION_FILL = 39,
SVGA_CMD_ANNOTATION_COPY = 40,
+ SVGA_CMD_DEFINE_GMR2 = 41,
+ SVGA_CMD_REMAP_GMR2 = 42,
SVGA_CMD_MAX
} SVGAFifoCmdId;
@@ -920,7 +1058,7 @@ typedef enum {
*/
typedef
-struct {
+struct SVGAFifoCmdUpdate {
uint32 x;
uint32 y;
uint32 width;
@@ -939,7 +1077,7 @@ struct {
*/
typedef
-struct {
+struct SVGAFifoCmdRectCopy {
uint32 srcX;
uint32 srcY;
uint32 destX;
@@ -963,14 +1101,14 @@ struct {
*/
typedef
-struct {
- uint32 id; // Reserved, must be zero.
+struct SVGAFifoCmdDefineCursor {
+ uint32 id; /* Reserved, must be zero. */
uint32 hotspotX;
uint32 hotspotY;
uint32 width;
uint32 height;
- uint32 andMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
- uint32 xorMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
+ uint32 andMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */
+ uint32 xorMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */
/*
* Followed by scanline data for AND mask, then XOR mask.
* Each scanline is padded to a 32-bit boundary.
@@ -992,8 +1130,8 @@ struct {
*/
typedef
-struct {
- uint32 id; // Reserved, must be zero.
+struct SVGAFifoCmdDefineAlphaCursor {
+ uint32 id; /* Reserved, must be zero. */
uint32 hotspotX;
uint32 hotspotY;
uint32 width;
@@ -1015,7 +1153,7 @@ struct {
*/
typedef
-struct {
+struct SVGAFifoCmdUpdateVerbose {
uint32 x;
uint32 y;
uint32 width;
@@ -1040,13 +1178,13 @@ struct {
#define SVGA_ROP_COPY 0x03
typedef
-struct {
- uint32 color; // In the same format as the GFB
+struct SVGAFifoCmdFrontRopFill {
+ uint32 color; /* In the same format as the GFB */
uint32 x;
uint32 y;
uint32 width;
uint32 height;
- uint32 rop; // Must be SVGA_ROP_COPY
+ uint32 rop; /* Must be SVGA_ROP_COPY */
} SVGAFifoCmdFrontRopFill;
@@ -1083,7 +1221,7 @@ struct {
*/
typedef
-struct {
+struct SVGAFifoCmdEscape {
uint32 nsid;
uint32 size;
/* followed by 'size' bytes of data */
@@ -1113,12 +1251,12 @@ struct {
* registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
struct {
- SVGAScreenObject screen; // Variable-length according to version
+ SVGAScreenObject screen; /* Variable-length according to version */
} SVGAFifoCmdDefineScreen;
@@ -1129,7 +1267,7 @@ struct {
* re-use.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
@@ -1182,7 +1320,7 @@ struct {
* GMRFB.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
@@ -1219,7 +1357,7 @@ struct {
* SVGA_CMD_ANNOTATION_* commands for details.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
@@ -1267,7 +1405,7 @@ struct {
* the time any subsequent FENCE commands are reached.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
@@ -1302,7 +1440,7 @@ struct {
* user's display is being remoted over a network connection.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
@@ -1334,7 +1472,7 @@ struct {
* undefined.
*
* Availability:
- * SVGA_FIFO_CAP_SCREEN_OBJECT
+ * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
typedef
@@ -1343,4 +1481,72 @@ struct {
uint32 srcScreenId;
} SVGAFifoCmdAnnotationCopy;
+
+/*
+ * SVGA_CMD_DEFINE_GMR2 --
+ *
+ * Define guest memory region v2. See the description of GMRs above.
+ *
+ * Availability:
+ * SVGA_CAP_GMR2
+ */
+
+typedef
+struct {
+ uint32 gmrId;
+ uint32 numPages;
+} SVGAFifoCmdDefineGMR2;
+
+
+/*
+ * SVGA_CMD_REMAP_GMR2 --
+ *
+ * Remap guest memory region v2. See the description of GMRs above.
+ *
+ * This command allows guest to modify a portion of an existing GMR by
+ * invalidating it or reassigning it to different guest physical pages.
+ * The pages are identified by physical page number (PPN). The pages
+ * are assumed to be pinned and valid for DMA operations.
+ *
+ * Description of command flags:
+ *
+ * SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR.
+ * The PPN list must not overlap with the remap region (this can be
+ * handled trivially by referencing a separate GMR). If flag is
+ * disabled, PPN list is appended to SVGARemapGMR command.
+ *
+ * SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise
+ * it is in PPN32 format.
+ *
+ * SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry.
+ * A single PPN can be used to invalidate a portion of a GMR or
+ * map it to to a single guest scratch page.
+ *
+ * Availability:
+ * SVGA_CAP_GMR2
+ */
+
+typedef enum {
+ SVGA_REMAP_GMR2_PPN32 = 0,
+ SVGA_REMAP_GMR2_VIA_GMR = (1 << 0),
+ SVGA_REMAP_GMR2_PPN64 = (1 << 1),
+ SVGA_REMAP_GMR2_SINGLE_PPN = (1 << 2),
+} SVGARemapGMR2Flags;
+
+typedef
+struct {
+ uint32 gmrId;
+ SVGARemapGMR2Flags flags;
+ uint32 offsetPages; /* offset in pages to begin remap */
+ uint32 numPages; /* number of pages to remap */
+ /*
+ * Followed by additional data depending on SVGARemapGMR2Flags.
+ *
+ * If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows.
+ * Otherwise an array of page descriptors in PPN32 or PPN64 format
+ * (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag
+ * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
+ */
+} SVGAFifoCmdRemapGMR2;
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 87e43e0733b..5a72ed90823 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -42,6 +42,10 @@ static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED;
+static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
+ TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_NO_EVICT;
+
struct ttm_placement vmw_vram_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -56,6 +60,11 @@ static uint32_t vram_gmr_placement_flags[] = {
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
+static uint32_t gmr_vram_placement_flags[] = {
+ VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
+ TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+};
+
struct ttm_placement vmw_vram_gmr_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -65,6 +74,20 @@ struct ttm_placement vmw_vram_gmr_placement = {
.busy_placement = &gmr_placement_flags
};
+static uint32_t vram_gmr_ne_placement_flags[] = {
+ TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
+ VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
+struct ttm_placement vmw_vram_gmr_ne_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 2,
+ .placement = vram_gmr_ne_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &gmr_ne_placement_flags
+};
+
struct ttm_placement vmw_vram_sys_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -92,6 +115,30 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
+static uint32_t evictable_placement_flags[] = {
+ TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
+ TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
+ VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+};
+
+struct ttm_placement vmw_evictable_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 3,
+ .placement = evictable_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_srf_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .num_busy_placement = 2,
+ .placement = &gmr_placement_flags,
+ .busy_placement = gmr_vram_placement_flags
+};
+
struct vmw_ttm_backend {
struct ttm_backend backend;
struct page **pages;
@@ -274,39 +321,39 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
static void *vmw_sync_obj_ref(void *sync_obj)
{
- return sync_obj;
+
+ return (void *)
+ vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
}
static void vmw_sync_obj_unref(void **sync_obj)
{
- *sync_obj = NULL;
+ vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
}
static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
{
- struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
-
- mutex_lock(&dev_priv->hw_mutex);
- vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
- mutex_unlock(&dev_priv->hw_mutex);
+ vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
return 0;
}
static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
{
- struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
- uint32_t sequence = (unsigned long) sync_obj;
+ unsigned long flags = (unsigned long) sync_arg;
+ return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
+ (uint32_t) flags);
- return vmw_fence_signaled(dev_priv, sequence);
}
static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
- struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
- uint32_t sequence = (unsigned long) sync_obj;
+ unsigned long flags = (unsigned long) sync_arg;
- return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
+ return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
+ (uint32_t) flags,
+ lazy, interruptible,
+ VMW_FENCE_WAIT_TIMEOUT);
}
struct ttm_bo_driver vmw_bo_driver = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
new file mode 100644
index 00000000000..3fa884db08a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -0,0 +1,322 @@
+/**************************************************************************
+ *
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "ttm/ttm_placement.h"
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+
+/**
+ * vmw_dmabuf_to_placement - Validate a buffer to placement.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @pin: Pin buffer if true.
+ * @interruptible: Use interruptible wait.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ struct ttm_placement *placement,
+ bool interruptible)
+{
+ struct vmw_master *vmaster = dev_priv->active_master;
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+
+ ret = ttm_write_lock(&vmaster->lock, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+
+ ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err;
+
+ ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_write_unlock(&vmaster->lock);
+ return ret;
+}
+
+/**
+ * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo if @pin == true to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @pin: Pin buffer if true.
+ * @interruptible: Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible)
+{
+ struct vmw_master *vmaster = dev_priv->active_master;
+ struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_placement *placement;
+ int ret;
+
+ ret = ttm_write_lock(&vmaster->lock, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (pin)
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+
+ ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err;
+
+ /**
+ * Put BO in VRAM if there is space, otherwise as a GMR.
+ * If there is no space in VRAM and GMR ids are all used up,
+ * start evicting GMRs to make room. If the DMA buffer can't be
+ * used as a GMR, this will return -ENOMEM.
+ */
+
+ if (pin)
+ placement = &vmw_vram_gmr_ne_placement;
+ else
+ placement = &vmw_vram_gmr_placement;
+
+ ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ if (likely(ret == 0) || ret == -ERESTARTSYS)
+ goto err_unreserve;
+
+
+ /**
+ * If that failed, try VRAM again, this time evicting
+ * previous contents.
+ */
+
+ if (pin)
+ placement = &vmw_vram_ne_placement;
+ else
+ placement = &vmw_vram_placement;
+
+ ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+
+err_unreserve:
+ ttm_bo_unreserve(bo);
+err:
+ ttm_write_unlock(&vmaster->lock);
+ return ret;
+}
+
+/**
+ * vmw_dmabuf_to_vram - Move a buffer to vram.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @pin: Pin buffer in vram if true.
+ * @interruptible: Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible)
+{
+ struct ttm_placement *placement;
+
+ if (pin)
+ placement = &vmw_vram_ne_placement;
+ else
+ placement = &vmw_vram_placement;
+
+ return vmw_dmabuf_to_placement(dev_priv, buf,
+ placement,
+ interruptible);
+}
+
+/**
+ * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo if @pin == true to avoid failures.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to move.
+ * @pin: Pin buffer in vram if true.
+ * @interruptible: Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible)
+{
+ struct vmw_master *vmaster = dev_priv->active_master;
+ struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_placement placement;
+ int ret = 0;
+
+ if (pin)
+ placement = vmw_vram_ne_placement;
+ else
+ placement = vmw_vram_placement;
+ placement.lpfn = bo->num_pages;
+
+ ret = ttm_write_lock(&vmaster->lock, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (pin)
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+
+ ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err_unlock;
+
+ /* Is this buffer already in vram but not at the start of it? */
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.start < bo->num_pages &&
+ bo->mem.start > 0)
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
+ false, false);
+
+ ret = ttm_bo_validate(bo, &placement, interruptible, false, false);
+
+ /* For some reason we didn't up at the start of vram */
+ WARN_ON(ret == 0 && bo->offset != 0);
+
+ ttm_bo_unreserve(bo);
+err_unlock:
+ ttm_write_unlock(&vmaster->lock);
+
+ return ret;
+}
+
+
+/**
+ * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to unpin.
+ * @interruptible: Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible)
+{
+ /*
+ * We could in theory early out if the buffer is
+ * unpinned but we need to lock and reserve the buffer
+ * anyways so we don't gain much by that.
+ */
+ return vmw_dmabuf_to_placement(dev_priv, buf,
+ &vmw_evictable_placement,
+ interruptible);
+}
+
+
+/**
+ * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
+ * of a buffer.
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
+ * @ptr: SVGAGuestPtr returning the result.
+ */
+void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
+ SVGAGuestPtr *ptr)
+{
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
+ ptr->offset = bo->offset;
+ } else {
+ ptr->gmrId = bo->mem.start;
+ ptr->offset = 0;
+ }
+}
+
+
+/**
+ * vmw_bo_pin - Pin or unpin a buffer object without moving it.
+ *
+ * @bo: The buffer object. Must be reserved, and present either in VRAM
+ * or GMR memory.
+ * @pin: Whether to pin or unpin.
+ *
+ */
+void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
+{
+ uint32_t pl_flags;
+ struct ttm_placement placement;
+ uint32_t old_mem_type = bo->mem.mem_type;
+ int ret;
+
+ BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(old_mem_type != TTM_PL_VRAM &&
+ old_mem_type != VMW_PL_FLAG_GMR);
+
+ pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
+ if (pin)
+ pl_flags |= TTM_PL_FLAG_NO_EVICT;
+
+ memset(&placement, 0, sizeof(placement));
+ placement.num_placement = 1;
+ placement.placement = &pl_flags;
+
+ ret = ttm_bo_validate(bo, &placement, false, true, true);
+
+ BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 96949b93d92..dff8fc76715 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -24,6 +24,7 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
+#include <linux/module.h>
#include "drmP.h"
#include "vmwgfx_drv.h"
@@ -82,17 +83,31 @@
#define DRM_IOCTL_VMW_EXECBUF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
struct drm_vmw_execbuf_arg)
-#define DRM_IOCTL_VMW_FIFO_DEBUG \
- DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
- struct drm_vmw_fifo_debug_arg)
+#define DRM_IOCTL_VMW_GET_3D_CAP \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
+ struct drm_vmw_get_3d_cap_arg)
#define DRM_IOCTL_VMW_FENCE_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
struct drm_vmw_fence_wait_arg)
+#define DRM_IOCTL_VMW_FENCE_SIGNALED \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
+ struct drm_vmw_fence_signaled_arg)
+#define DRM_IOCTL_VMW_FENCE_UNREF \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
+ struct drm_vmw_fence_arg)
+#define DRM_IOCTL_VMW_FENCE_EVENT \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
+ struct drm_vmw_fence_event_arg)
+#define DRM_IOCTL_VMW_PRESENT \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
+ struct drm_vmw_present_arg)
+#define DRM_IOCTL_VMW_PRESENT_READBACK \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
+ struct drm_vmw_present_readback_arg)
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
- DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
struct drm_vmw_update_layout_arg)
-
/**
* The core DRM version of this macro doesn't account for
* DRM_COMMAND_BASE.
@@ -135,12 +150,28 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
- DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
+ VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
+ vmw_fence_obj_signaled_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_FENCE_EVENT,
+ vmw_fence_event_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
- DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
+
+ /* these allow direct access to the framebuffers mark as master only */
+ VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
+ DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
+ vmw_present_readback_ioctl,
+ DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
+ vmw_kms_update_layout_ioctl,
+ DRM_MASTER | DRM_UNLOCKED),
};
static struct pci_device_id vmw_pci_id_list[] = {
@@ -189,8 +220,78 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" GMR.\n");
if (capabilities & SVGA_CAP_TRACES)
DRM_INFO(" Traces.\n");
+ if (capabilities & SVGA_CAP_GMR2)
+ DRM_INFO(" GMR2.\n");
+ if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
+ DRM_INFO(" Screen Object 2.\n");
+}
+
+
+/**
+ * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
+ * the start of a buffer object.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function will idle the buffer using an uninterruptible wait, then
+ * map the first page and initialize a pending occlusion query result structure,
+ * Finally it will unmap the buffer.
+ *
+ * TODO: Since we're only mapping a single page, we should optimize the map
+ * to use kmap_atomic / iomap_atomic.
+ */
+static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
+{
+ struct ttm_bo_kmap_obj map;
+ volatile SVGA3dQueryResult *result;
+ bool dummy;
+ int ret;
+ struct ttm_bo_device *bdev = &dev_priv->bdev;
+ struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+
+ ttm_bo_reserve(bo, false, false, false, 0);
+ spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bdev->fence_lock);
+ if (unlikely(ret != 0))
+ (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
+ 10*HZ);
+
+ ret = ttm_bo_kmap(bo, 0, 1, &map);
+ if (likely(ret == 0)) {
+ result = ttm_kmap_obj_virtual(&map, &dummy);
+ result->totalSize = sizeof(*result);
+ result->state = SVGA3D_QUERYSTATE_PENDING;
+ result->result32 = 0xff;
+ ttm_bo_kunmap(&map);
+ } else
+ DRM_ERROR("Dummy query buffer map failed.\n");
+ ttm_bo_unreserve(bo);
+}
+
+
+/**
+ * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
+ *
+ * @dev_priv: A device private structure.
+ *
+ * This function creates a small buffer object that holds the query
+ * result for dummy queries emitted as query barriers.
+ * No interruptible waits are done within this function.
+ *
+ * Returns an error if bo creation fails.
+ */
+static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
+{
+ return ttm_bo_create(&dev_priv->bdev,
+ PAGE_SIZE,
+ ttm_bo_type_device,
+ &vmw_vram_sys_placement,
+ 0, 0, false, NULL,
+ &dev_priv->dummy_query_bo);
}
+
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
@@ -200,16 +301,42 @@ static int vmw_request_device(struct vmw_private *dev_priv)
DRM_ERROR("Unable to initialize FIFO.\n");
return ret;
}
+ vmw_fence_fifo_up(dev_priv->fman);
+ ret = vmw_dummy_query_bo_create(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_no_query_bo;
+ vmw_dummy_query_bo_prepare(dev_priv);
return 0;
+
+out_no_query_bo:
+ vmw_fence_fifo_down(dev_priv->fman);
+ vmw_fifo_release(dev_priv, &dev_priv->fifo);
+ return ret;
}
static void vmw_release_device(struct vmw_private *dev_priv)
{
+ /*
+ * Previous destructions should've released
+ * the pinned bo.
+ */
+
+ BUG_ON(dev_priv->pinned_bo != NULL);
+
+ ttm_bo_unref(&dev_priv->dummy_query_bo);
+ vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
-int vmw_3d_resource_inc(struct vmw_private *dev_priv)
+/**
+ * Increase the 3d resource refcount.
+ * If the count was prevously zero, initialize the fifo, switching to svga
+ * mode. Note that the master holds a ref as well, and may request an
+ * explicit switch to svga mode if fb is not running, using @unhide_svga.
+ */
+int vmw_3d_resource_inc(struct vmw_private *dev_priv,
+ bool unhide_svga)
{
int ret = 0;
@@ -218,19 +345,42 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv)
ret = vmw_request_device(dev_priv);
if (unlikely(ret != 0))
--dev_priv->num_3d_resources;
+ } else if (unhide_svga) {
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ vmw_read(dev_priv, SVGA_REG_ENABLE) &
+ ~SVGA_REG_ENABLE_HIDE);
+ mutex_unlock(&dev_priv->hw_mutex);
}
+
mutex_unlock(&dev_priv->release_mutex);
return ret;
}
-
-void vmw_3d_resource_dec(struct vmw_private *dev_priv)
+/**
+ * Decrease the 3d resource refcount.
+ * If the count reaches zero, disable the fifo, switching to vga mode.
+ * Note that the master holds a refcount as well, and may request an
+ * explicit switch to vga mode when it releases its refcount to account
+ * for the situation of an X server vt switch to VGA with 3d resources
+ * active.
+ */
+void vmw_3d_resource_dec(struct vmw_private *dev_priv,
+ bool hide_svga)
{
int32_t n3d;
mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv);
+ else if (hide_svga) {
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ vmw_read(dev_priv, SVGA_REG_ENABLE) |
+ SVGA_REG_ENABLE_HIDE);
+ mutex_unlock(&dev_priv->hw_mutex);
+ }
+
n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex);
@@ -252,7 +402,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
- dev_priv->last_read_sequence = (uint32_t) -100;
+ dev_priv->last_read_seqno = (uint32_t) -100;
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
@@ -263,8 +413,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
- atomic_set(&dev_priv->fence_queue_waiters, 0);
+ dev_priv->fence_queue_waiters = 0;
atomic_set(&dev_priv->fifo_queue_waiters, 0);
+ INIT_LIST_HEAD(&dev_priv->surface_lru);
+ dev_priv->used_memory_size = 0;
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
@@ -285,6 +437,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+ dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
+ dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+ dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
+ dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
if (dev_priv->capabilities & SVGA_CAP_GMR) {
dev_priv->max_gmr_descriptors =
vmw_read(dev_priv,
@@ -292,11 +448,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->max_gmr_ids =
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
}
-
- dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
- dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
- dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
- dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
+ if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+ dev_priv->max_gmr_pages =
+ vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
+ dev_priv->memory_size =
+ vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
+ dev_priv->memory_size -= dev_priv->vram_size;
+ } else {
+ /*
+ * An arbitrary limit of 512MiB on surface
+ * memory. But all HWV8 hardware supports GMR2.
+ */
+ dev_priv->memory_size = 512*1024*1024;
+ }
mutex_unlock(&dev_priv->hw_mutex);
@@ -308,6 +472,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("Max GMR descriptors is %u\n",
(unsigned)dev_priv->max_gmr_descriptors);
}
+ if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+ DRM_INFO("Max number of GMR pages is %u\n",
+ (unsigned)dev_priv->max_gmr_pages);
+ DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
+ (unsigned)dev_priv->memory_size / 1024);
+ }
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
dev_priv->vram_start, dev_priv->vram_size / 1024);
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
@@ -394,22 +564,34 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_device;
}
}
+
+ dev_priv->fman = vmw_fence_manager_init(dev_priv);
+ if (unlikely(dev_priv->fman == NULL))
+ goto out_no_fman;
+
+ /* Need to start the fifo to check if we can do screen objects */
+ ret = vmw_3d_resource_inc(dev_priv, true);
+ if (unlikely(ret != 0))
+ goto out_no_fifo;
+ vmw_kms_save_vga(dev_priv);
+
+ /* Start kms and overlay systems, needs fifo. */
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
vmw_overlay_init(dev_priv);
+
+ /* 3D Depends on Screen Objects being used. */
+ DRM_INFO("Detected %sdevice 3D availability.\n",
+ vmw_fifo_have_3d(dev_priv) ?
+ "" : "no ");
+
+ /* We might be done with the fifo now */
if (dev_priv->enable_fb) {
- ret = vmw_3d_resource_inc(dev_priv);
- if (unlikely(ret != 0))
- goto out_no_fifo;
- vmw_kms_save_vga(dev_priv);
vmw_fb_init(dev_priv);
- DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
- "Detected device 3D availability.\n" :
- "Detected no device 3D availability.\n");
} else {
- DRM_INFO("Delayed 3D detection since we're not "
- "running the device in SVGA mode yet.\n");
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
}
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
@@ -426,15 +608,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
return 0;
out_no_irq:
- if (dev_priv->enable_fb) {
+ if (dev_priv->enable_fb)
vmw_fb_close(dev_priv);
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv);
- }
-out_no_fifo:
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
+ /* We still have a 3D resource reference held */
+ if (dev_priv->enable_fb) {
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv, false);
+ }
+out_no_fifo:
+ vmw_fence_manager_takedown(dev_priv->fman);
+out_no_fman:
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -467,15 +653,18 @@ static int vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb);
+ if (dev_priv->ctx.cmd_bounce)
+ vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
if (dev_priv->enable_fb) {
vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv);
+ vmw_3d_resource_dec(dev_priv, false);
}
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
+ vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -646,7 +835,7 @@ static int vmw_master_set(struct drm_device *dev,
int ret = 0;
if (!dev_priv->enable_fb) {
- ret = vmw_3d_resource_inc(dev_priv);
+ ret = vmw_3d_resource_inc(dev_priv, true);
if (unlikely(ret != 0))
return ret;
vmw_kms_save_vga(dev_priv);
@@ -688,7 +877,7 @@ out_no_active_lock:
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
}
return ret;
}
@@ -709,7 +898,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
- vmw_kms_idle_workqueues(vmaster);
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -726,7 +915,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
}
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -761,6 +950,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory.
*/
+ vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
ttm_bo_swapout_all(&dev_priv->bdev);
break;
@@ -835,7 +1025,7 @@ static int vmw_pm_prepare(struct device *kdev)
*/
dev_priv->suspended = true;
if (dev_priv->enable_fb)
- vmw_3d_resource_dec(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
if (dev_priv->num_3d_resources != 0) {
@@ -843,7 +1033,7 @@ static int vmw_pm_prepare(struct device *kdev)
"while 3D resources are active.\n");
if (dev_priv->enable_fb)
- vmw_3d_resource_inc(dev_priv);
+ vmw_3d_resource_inc(dev_priv, true);
dev_priv->suspended = false;
return -EBUSY;
}
@@ -862,7 +1052,7 @@ static void vmw_pm_complete(struct device *kdev)
* start fifo.
*/
if (dev_priv->enable_fb)
- vmw_3d_resource_inc(dev_priv);
+ vmw_3d_resource_inc(dev_priv, false);
dev_priv->suspended = false;
}
@@ -886,6 +1076,8 @@ static struct drm_driver driver = {
.irq_uninstall = vmw_irq_uninstall,
.irq_handler = vmw_irq_handler,
.get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
.reclaim_buffers_locked = NULL,
.ioctls = vmw_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
@@ -902,7 +1094,8 @@ static struct drm_driver driver = {
.release = drm_release,
.unlocked_ioctl = vmw_unlocked_ioctl,
.mmap = vmw_mmap,
- .poll = drm_poll,
+ .poll = vmw_fops_poll,
+ .read = vmw_fops_read,
.fasync = drm_fasync,
#if defined(CONFIG_COMPAT)
.compat_ioctl = drm_compat_ioctl,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 10fc01f69c4..8cca91a93bd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -38,20 +38,27 @@
#include "ttm/ttm_lock.h"
#include "ttm/ttm_execbuf_util.h"
#include "ttm/ttm_module.h"
+#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20100927"
-#define VMWGFX_DRIVER_MAJOR 1
-#define VMWGFX_DRIVER_MINOR 4
+#define VMWGFX_DRIVER_DATE "20111025"
+#define VMWGFX_DRIVER_MAJOR 2
+#define VMWGFX_DRIVER_MINOR 3
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
-#define VMWGFX_MAX_GMRS 2048
+#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
+#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMW_PL_GMR TTM_PL_PRIV0
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
+#define VMW_RES_CONTEXT ttm_driver_type0
+#define VMW_RES_SURFACE ttm_driver_type1
+#define VMW_RES_STREAM ttm_driver_type2
+#define VMW_RES_FENCE ttm_driver_type3
+
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
@@ -72,9 +79,11 @@ struct vmw_resource {
int id;
enum ttm_object_type res_type;
bool avail;
+ void (*remove_from_lists) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
void (*res_free) (struct vmw_resource *res);
-
+ struct list_head validate_head;
+ struct list_head query_head; /* Protected by the cmdbuf mutex */
/* TODO is a generic snooper needed? */
#if 0
void (*snoop)(struct vmw_resource *res,
@@ -90,8 +99,12 @@ struct vmw_cursor_snooper {
uint32_t *image;
};
+struct vmw_framebuffer;
+struct vmw_surface_offset;
+
struct vmw_surface {
struct vmw_resource res;
+ struct list_head lru_head; /* Protected by the resource lock */
uint32_t flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
@@ -102,9 +115,12 @@ struct vmw_surface {
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
+ struct ttm_buffer_object *backup;
+ struct vmw_surface_offset *offsets;
+ uint32_t backup_size;
};
-struct vmw_fence_queue {
+struct vmw_marker_queue {
struct list_head head;
struct timespec lag;
struct timespec lag_time;
@@ -115,16 +131,12 @@ struct vmw_fifo_state {
unsigned long reserved_size;
__le32 *dynamic_buffer;
__le32 *static_buffer;
- __le32 *last_buffer;
- uint32_t last_data_size;
- uint32_t last_buffer_size;
- bool last_buffer_add;
unsigned long static_buffer_size;
bool using_bounce_buffer;
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
- struct vmw_fence_queue fence_queue;
+ struct vmw_marker_queue marker_queue;
};
struct vmw_relocation {
@@ -136,6 +148,8 @@ struct vmw_sw_context{
struct ida bo_list;
uint32_t last_cid;
bool cid_valid;
+ bool kernel; /**< is the called made from the kernel */
+ struct vmw_resource *cur_ctx;
uint32_t last_sid;
uint32_t sid_translation;
bool sid_valid;
@@ -143,8 +157,16 @@ struct vmw_sw_context{
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
- struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
+ struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
uint32_t cur_val_buf;
+ uint32_t *cmd_bounce;
+ uint32_t cmd_bounce_size;
+ struct list_head resource_list;
+ uint32_t fence_flags;
+ struct list_head query_list;
+ struct ttm_buffer_object *cur_query_bo;
+ uint32_t cur_query_cid;
+ bool query_cid_valid;
};
struct vmw_legacy_display;
@@ -185,6 +207,8 @@ struct vmw_private {
uint32_t capabilities;
uint32_t max_gmr_descriptors;
uint32_t max_gmr_ids;
+ uint32_t max_gmr_pages;
+ uint32_t memory_size;
bool has_gmr;
struct mutex hw_mutex;
@@ -195,12 +219,7 @@ struct vmw_private {
struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
uint32_t vga_width;
uint32_t vga_height;
- uint32_t vga_depth;
uint32_t vga_bpp;
- uint32_t vga_pseudo;
- uint32_t vga_red_mask;
- uint32_t vga_green_mask;
- uint32_t vga_blue_mask;
uint32_t vga_bpl;
uint32_t vga_pitchlock;
@@ -212,6 +231,7 @@ struct vmw_private {
void *fb_info;
struct vmw_legacy_display *ldu_priv;
+ struct vmw_screen_object_display *sou_priv;
struct vmw_overlay *overlay_priv;
/*
@@ -240,13 +260,16 @@ struct vmw_private {
* Fencing and IRQs.
*/
- atomic_t fence_seq;
+ atomic_t marker_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
- atomic_t fence_queue_waiters;
+ int fence_queue_waiters; /* Protected by hw_mutex */
+ int goal_queue_waiters; /* Protected by hw_mutex */
atomic_t fifo_queue_waiters;
- uint32_t last_read_sequence;
+ uint32_t last_read_seqno;
spinlock_t irq_lock;
+ struct vmw_fence_manager *fman;
+ uint32_t irq_mask;
/*
* Device state
@@ -285,6 +308,26 @@ struct vmw_private {
struct mutex release_mutex;
uint32_t num_3d_resources;
+
+ /*
+ * Query processing. These members
+ * are protected by the cmdbuf mutex.
+ */
+
+ struct ttm_buffer_object *dummy_query_bo;
+ struct ttm_buffer_object *pinned_bo;
+ uint32_t query_cid;
+ bool dummy_query_bo_pinned;
+
+ /*
+ * Surface swapping. The "surface_lru" list is protected by the
+ * resource lock in order to be able to destroy a surface and take
+ * it off the lru atomically. "used_memory_size" is currently
+ * protected by the cmdbuf mutex for simplicity.
+ */
+
+ struct list_head surface_lru;
+ uint32_t used_memory_size;
};
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -319,8 +362,8 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
return val;
}
-int vmw_3d_resource_inc(struct vmw_private *dev_priv);
-void vmw_3d_resource_dec(struct vmw_private *dev_priv);
+int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
+void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
/**
* GMR utilities - vmwgfx_gmr.c
@@ -345,7 +388,8 @@ extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
- int id);
+ int id,
+ struct vmw_resource **p_res);
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
@@ -363,6 +407,8 @@ extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
extern int vmw_surface_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle, int *id);
+extern int vmw_surface_validate(struct vmw_private *dev_priv,
+ struct vmw_surface *srf);
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
@@ -378,10 +424,6 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out);
-extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo);
-extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *bo);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -390,7 +432,30 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t *inout_id,
struct vmw_resource **out);
+extern void vmw_resource_unreserve(struct list_head *list);
+/**
+ * DMA buffer helper routines - vmwgfx_dmabuf.c
+ */
+extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo,
+ struct ttm_placement *placement,
+ bool interruptible);
+extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible);
+extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible);
+extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo,
+ bool pin, bool interruptible);
+extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo,
+ bool interruptible);
+extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
+ SVGAGuestPtr *ptr);
+extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -398,8 +463,16 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
+extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int vmw_present_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern unsigned int vmw_fops_poll(struct file *filp,
+ struct poll_table_struct *wait);
+extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset);
/**
* Fifo utilities - vmwgfx_fifo.c
@@ -412,11 +485,12 @@ extern void vmw_fifo_release(struct vmw_private *dev_priv,
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
- uint32_t *sequence);
+ uint32_t *seqno);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
-extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
+extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+ uint32_t cid);
/**
* TTM glue - vmwgfx_ttm_glue.c
@@ -434,7 +508,10 @@ extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
+extern struct ttm_placement vmw_vram_gmr_ne_placement;
extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_placement vmw_evictable_placement;
+extern struct ttm_placement vmw_srf_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
@@ -444,45 +521,70 @@ extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int vmw_execbuf_process(struct drm_file *file_priv,
+ struct vmw_private *dev_priv,
+ void __user *user_commands,
+ void *kernel_commands,
+ uint32_t command_size,
+ uint64_t throttle_us,
+ struct drm_vmw_fence_rep __user
+ *user_fence_rep);
+
+extern void
+vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ bool only_on_cid_match, uint32_t cid);
+
+extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
+ struct vmw_private *dev_priv,
+ struct vmw_fence_obj **p_fence,
+ uint32_t *p_handle);
+extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+ struct vmw_fpriv *vmw_fp,
+ int ret,
+ struct drm_vmw_fence_rep __user
+ *user_fence_rep,
+ struct vmw_fence_obj *fence,
+ uint32_t fence_handle);
/**
* IRQs and wating - vmwgfx_irq.c
*/
extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
-extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
- uint32_t sequence, bool interruptible,
- unsigned long timeout);
+extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
+ uint32_t seqno, bool interruptible,
+ unsigned long timeout);
extern void vmw_irq_preinstall(struct drm_device *dev);
extern int vmw_irq_postinstall(struct drm_device *dev);
extern void vmw_irq_uninstall(struct drm_device *dev);
-extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
- uint32_t sequence);
-extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
+ uint32_t seqno);
extern int vmw_fallback_wait(struct vmw_private *dev_priv,
bool lazy,
bool fifo_idle,
- uint32_t sequence,
+ uint32_t seqno,
bool interruptible,
unsigned long timeout);
-extern void vmw_update_sequence(struct vmw_private *dev_priv,
+extern void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state);
-
+extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
+extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
+extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
+extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
/**
- * Rudimentary fence objects currently used only for throttling -
- * vmwgfx_fence.c
+ * Rudimentary fence-like objects currently used only for throttling -
+ * vmwgfx_marker.c
*/
-extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
-extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
-extern int vmw_fence_push(struct vmw_fence_queue *queue,
- uint32_t sequence);
-extern int vmw_fence_pull(struct vmw_fence_queue *queue,
- uint32_t signaled_sequence);
+extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
+extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
+extern int vmw_marker_push(struct vmw_marker_queue *queue,
+ uint32_t seqno);
+extern int vmw_marker_pull(struct vmw_marker_queue *queue,
+ uint32_t signaled_seqno);
extern int vmw_wait_lag(struct vmw_private *dev_priv,
- struct vmw_fence_queue *queue, uint32_t us);
+ struct vmw_marker_queue *queue, uint32_t us);
/**
* Kernel framebuffer - vmwgfx_fb.c
@@ -508,16 +610,31 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header);
-void vmw_kms_write_svga(struct vmw_private *vmw_priv,
- unsigned width, unsigned height, unsigned pitch,
- unsigned bbp, unsigned depth);
-int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ unsigned width, unsigned height, unsigned pitch,
+ unsigned bpp, unsigned depth);
void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
+int vmw_enable_vblank(struct drm_device *dev, int crtc);
+void vmw_disable_vblank(struct drm_device *dev, int crtc);
+int vmw_kms_present(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct vmw_surface *surface,
+ uint32_t sid, int32_t destX, int32_t destY,
+ struct drm_vmw_rect *clips,
+ uint32_t num_clips);
+int vmw_kms_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *clips,
+ uint32_t num_clips);
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/**
* Overlay control - vmwgfx_overlay.c
@@ -576,4 +693,8 @@ static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer
return NULL;
}
+static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
+{
+ return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
+}
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 41b95ed6dbc..40932fbdac0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -44,10 +44,71 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
return 0;
}
+static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
+ struct vmw_resource **p_res)
+{
+ struct vmw_resource *res = *p_res;
+
+ if (list_empty(&res->validate_head)) {
+ list_add_tail(&res->validate_head, &sw_context->resource_list);
+ *p_res = NULL;
+ } else
+ vmw_resource_unreference(p_res);
+}
+
+/**
+ * vmw_bo_to_validate_list - add a bo to a validate list
+ *
+ * @sw_context: The software context used for this command submission batch.
+ * @bo: The buffer object to add.
+ * @fence_flags: Fence flags to be or'ed with any other fence flags for
+ * this buffer on this submission batch.
+ * @p_val_node: If non-NULL Will be updated with the validate node number
+ * on return.
+ *
+ * Returns -EINVAL if the limit of number of buffer objects per command
+ * submission is reached.
+ */
+static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
+ struct ttm_buffer_object *bo,
+ uint32_t fence_flags,
+ uint32_t *p_val_node)
+{
+ uint32_t val_node;
+ struct ttm_validate_buffer *val_buf;
+
+ val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
+
+ if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+ DRM_ERROR("Max number of DMA buffers per submission"
+ " exceeded.\n");
+ return -EINVAL;
+ }
+
+ val_buf = &sw_context->val_bufs[val_node];
+ if (unlikely(val_node == sw_context->cur_val_buf)) {
+ val_buf->new_sync_obj_arg = NULL;
+ val_buf->bo = ttm_bo_reference(bo);
+ list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+ ++sw_context->cur_val_buf;
+ }
+
+ val_buf->new_sync_obj_arg = (void *)
+ ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
+ sw_context->fence_flags |= fence_flags;
+
+ if (p_val_node)
+ *p_val_node = val_node;
+
+ return 0;
+}
+
static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
+ struct vmw_resource *ctx;
+
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
__le32 cid;
@@ -58,7 +119,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
return 0;
- ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
+ ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
+ &ctx);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use context %u\n",
(unsigned) cmd->cid);
@@ -67,6 +129,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
sw_context->last_cid = cmd->cid;
sw_context->cid_valid = true;
+ sw_context->cur_ctx = ctx;
+ vmw_resource_to_validate_list(sw_context, &ctx);
return 0;
}
@@ -75,29 +139,45 @@ static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
uint32_t *sid)
{
+ struct vmw_surface *srf;
+ int ret;
+ struct vmw_resource *res;
+
if (*sid == SVGA3D_INVALID_ID)
return 0;
- if (unlikely((!sw_context->sid_valid ||
- *sid != sw_context->last_sid))) {
- int real_id;
- int ret = vmw_surface_check(dev_priv, sw_context->tfile,
- *sid, &real_id);
+ if (likely((sw_context->sid_valid &&
+ *sid == sw_context->last_sid))) {
+ *sid = sw_context->sid_translation;
+ return 0;
+ }
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could ot find or use surface 0x%08x "
- "address 0x%08lx\n",
- (unsigned int) *sid,
- (unsigned long) sid);
- return ret;
- }
+ ret = vmw_user_surface_lookup_handle(dev_priv,
+ sw_context->tfile,
+ *sid, &srf);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could ot find or use surface 0x%08x "
+ "address 0x%08lx\n",
+ (unsigned int) *sid,
+ (unsigned long) sid);
+ return ret;
+ }
- sw_context->last_sid = *sid;
- sw_context->sid_valid = true;
- *sid = real_id;
- sw_context->sid_translation = real_id;
- } else
- *sid = sw_context->sid_translation;
+ ret = vmw_surface_validate(dev_priv, srf);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Could not validate surface.\n");
+ vmw_surface_unreference(&srf);
+ return ret;
+ }
+
+ sw_context->last_sid = *sid;
+ sw_context->sid_valid = true;
+ sw_context->sid_translation = srf->res.id;
+ *sid = sw_context->sid_translation;
+
+ res = &srf->res;
+ vmw_resource_to_validate_list(sw_context, &res);
return 0;
}
@@ -166,6 +246,12 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
} *cmd;
cmd = container_of(header, struct vmw_sid_cmd, header);
+
+ if (unlikely(!sw_context->kernel)) {
+ DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
+ return -EPERM;
+ }
+
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
}
@@ -178,10 +264,179 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
SVGA3dCmdPresent body;
} *cmd;
+
cmd = container_of(header, struct vmw_sid_cmd, header);
+
+ if (unlikely(!sw_context->kernel)) {
+ DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
+ return -EPERM;
+ }
+
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
}
+/**
+ * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context for the next query.
+ * @new_query_bo: The new buffer holding query results.
+ * @sw_context: The software context used for this command submission.
+ *
+ * This function checks whether @new_query_bo is suitable for holding
+ * query results, and if another buffer currently is pinned for query
+ * results. If so, the function prepares the state of @sw_context for
+ * switching pinned buffers after successful submission of the current
+ * command batch. It also checks whether we're using a new query context.
+ * In that case, it makes sure we emit a query barrier for the old
+ * context before the current query buffer is fenced.
+ */
+static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
+ uint32_t cid,
+ struct ttm_buffer_object *new_query_bo,
+ struct vmw_sw_context *sw_context)
+{
+ int ret;
+ bool add_cid = false;
+ uint32_t cid_to_add;
+
+ if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
+
+ if (unlikely(new_query_bo->num_pages > 4)) {
+ DRM_ERROR("Query buffer too large.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(sw_context->cur_query_bo != NULL)) {
+ BUG_ON(!sw_context->query_cid_valid);
+ add_cid = true;
+ cid_to_add = sw_context->cur_query_cid;
+ ret = vmw_bo_to_validate_list(sw_context,
+ sw_context->cur_query_bo,
+ DRM_VMW_FENCE_FLAG_EXEC,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ sw_context->cur_query_bo = new_query_bo;
+
+ ret = vmw_bo_to_validate_list(sw_context,
+ dev_priv->dummy_query_bo,
+ DRM_VMW_FENCE_FLAG_EXEC,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ }
+
+ if (unlikely(cid != sw_context->cur_query_cid &&
+ sw_context->query_cid_valid)) {
+ add_cid = true;
+ cid_to_add = sw_context->cur_query_cid;
+ }
+
+ sw_context->cur_query_cid = cid;
+ sw_context->query_cid_valid = true;
+
+ if (add_cid) {
+ struct vmw_resource *ctx = sw_context->cur_ctx;
+
+ if (list_empty(&ctx->query_head))
+ list_add_tail(&ctx->query_head,
+ &sw_context->query_list);
+ ret = vmw_bo_to_validate_list(sw_context,
+ dev_priv->dummy_query_bo,
+ DRM_VMW_FENCE_FLAG_EXEC,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+
+/**
+ * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
+ *
+ * @dev_priv: The device private structure.
+ * @sw_context: The software context used for this command submission batch.
+ *
+ * This function will check if we're switching query buffers, and will then,
+ * if no other query waits are issued this command submission batch,
+ * issue a dummy occlusion query wait used as a query barrier. When the fence
+ * object following that query wait has signaled, we are sure that all
+ * preseding queries have finished, and the old query buffer can be unpinned.
+ * However, since both the new query buffer and the old one are fenced with
+ * that fence, we can do an asynchronus unpin now, and be sure that the
+ * old query buffer won't be moved until the fence has signaled.
+ *
+ * As mentioned above, both the new - and old query buffers need to be fenced
+ * using a sequence emitted *after* calling this function.
+ */
+static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context)
+{
+
+ struct vmw_resource *ctx, *next_ctx;
+ int ret;
+
+ /*
+ * The validate list should still hold references to all
+ * contexts here.
+ */
+
+ list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
+ query_head) {
+ list_del_init(&ctx->query_head);
+
+ BUG_ON(list_empty(&ctx->validate_head));
+
+ ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
+
+ if (unlikely(ret != 0))
+ DRM_ERROR("Out of fifo space for dummy query.\n");
+ }
+
+ if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
+ if (dev_priv->pinned_bo) {
+ vmw_bo_pin(dev_priv->pinned_bo, false);
+ ttm_bo_unref(&dev_priv->pinned_bo);
+ }
+
+ vmw_bo_pin(sw_context->cur_query_bo, true);
+
+ /*
+ * We pin also the dummy_query_bo buffer so that we
+ * don't need to validate it when emitting
+ * dummy queries in context destroy paths.
+ */
+
+ vmw_bo_pin(dev_priv->dummy_query_bo, true);
+ dev_priv->dummy_query_bo_pinned = true;
+
+ dev_priv->query_cid = sw_context->cur_query_cid;
+ dev_priv->pinned_bo =
+ ttm_bo_reference(sw_context->cur_query_bo);
+ }
+}
+
+/**
+ * vmw_query_switch_backoff - clear query barrier list
+ * @sw_context: The sw context used for this submission batch.
+ *
+ * This function is used as part of an error path, where a previously
+ * set up list of query barriers needs to be cleared.
+ *
+ */
+static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
+{
+ struct list_head *list, *next;
+
+ list_for_each_safe(list, next, &sw_context->query_list) {
+ list_del_init(list);
+ }
+}
+
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
@@ -191,8 +446,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
- uint32_t cur_validate_node;
- struct ttm_validate_buffer *val_buf;
int ret;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
@@ -212,22 +465,11 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
- cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
- if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
- DRM_ERROR("Max number of DMA buffers per submission"
- " exceeded.\n");
- ret = -EINVAL;
+ ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
+ &reloc->index);
+ if (unlikely(ret != 0))
goto out_no_reloc;
- }
- reloc->index = cur_validate_node;
- if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
- val_buf = &sw_context->val_bufs[cur_validate_node];
- val_buf->bo = ttm_bo_reference(bo);
- val_buf->new_sync_obj_arg = (void *) dev_priv;
- list_add_tail(&val_buf->head, &sw_context->validate_nodes);
- ++sw_context->cur_val_buf;
- }
*vmw_bo_p = vmw_bo;
return 0;
@@ -259,8 +501,11 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
+ ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
+ &vmw_bo->base, sw_context);
+
vmw_dmabuf_unreference(&vmw_bo);
- return 0;
+ return ret;
}
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
@@ -273,6 +518,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery q;
} *cmd;
int ret;
+ struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -286,10 +532,19 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
return ret;
vmw_dmabuf_unreference(&vmw_bo);
+
+ /*
+ * This wait will act as a barrier for previous waits for this
+ * context.
+ */
+
+ ctx = sw_context->cur_ctx;
+ if (!list_empty(&ctx->query_head))
+ list_del_init(&ctx->query_head);
+
return 0;
}
-
static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -302,6 +557,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
+ struct vmw_resource *res;
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -318,18 +574,28 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
goto out_no_reloc;
}
- /**
+ ret = vmw_surface_validate(dev_priv, srf);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Culd not validate surface.\n");
+ goto out_no_validate;
+ }
+
+ /*
* Patch command stream with device SID.
*/
-
cmd->dma.host.sid = srf->res.id;
vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
- /**
- * FIXME: May deadlock here when called from the
- * command parsing code.
- */
- vmw_surface_unreference(&srf);
+ vmw_dmabuf_unreference(&vmw_bo);
+
+ res = &srf->res;
+ vmw_resource_to_validate_list(sw_context, &res);
+
+ return 0;
+
+out_no_validate:
+ vmw_surface_unreference(&srf);
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
return ret;
@@ -419,6 +685,71 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
return 0;
}
+static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ void *buf)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ int ret;
+
+ struct {
+ uint32_t header;
+ SVGAFifoCmdDefineGMRFB body;
+ } *cmd = buf;
+
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->body.ptr,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+
+ return ret;
+}
+
+static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ void *buf, uint32_t *size)
+{
+ uint32_t size_remaining = *size;
+ uint32_t cmd_id;
+
+ cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+ switch (cmd_id) {
+ case SVGA_CMD_UPDATE:
+ *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
+ break;
+ case SVGA_CMD_DEFINE_GMRFB:
+ *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
+ break;
+ case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
+ *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+ break;
+ case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
+ *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+ break;
+ default:
+ DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
+ return -EINVAL;
+ }
+
+ if (*size > size_remaining) {
+ DRM_ERROR("Invalid SVGA command (size mismatch):"
+ " %u.\n", cmd_id);
+ return -EINVAL;
+ }
+
+ if (unlikely(!sw_context->kernel)) {
+ DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
+ return -EPERM;
+ }
+
+ if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
+ return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
+
+ return 0;
+}
typedef int (*vmw_cmd_func) (struct vmw_private *,
struct vmw_sw_context *,
@@ -471,11 +802,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
int ret;
- cmd_id = ((uint32_t *)buf)[0];
- if (cmd_id == SVGA_CMD_UPDATE) {
- *size = 5 << 2;
- return 0;
- }
+ cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+ /* Handle any none 3D commands */
+ if (unlikely(cmd_id < SVGA_CMD_MAX))
+ return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
+
cmd_id = le32_to_cpu(header->id);
*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
@@ -500,7 +831,8 @@ out_err:
static int vmw_cmd_check_all(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- void *buf, uint32_t size)
+ void *buf,
+ uint32_t size)
{
int32_t cur_size = size;
int ret;
@@ -550,7 +882,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
struct ttm_validate_buffer *entry, *next;
+ struct vmw_resource *res, *res_next;
+ /*
+ * Drop references to DMA buffers held during command submission.
+ */
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
head) {
list_del(&entry->head);
@@ -559,6 +895,16 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
+
+ /*
+ * Drop references to resources held during command submission.
+ */
+ vmw_resource_unreserve(&sw_context->resource_list);
+ list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
+ validate_head) {
+ list_del_init(&res->validate_head);
+ vmw_resource_unreference(&res);
+ }
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -566,6 +912,16 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
{
int ret;
+
+ /*
+ * Don't validate pinned buffers.
+ */
+
+ if (bo == dev_priv->pinned_bo ||
+ (bo == dev_priv->dummy_query_bo &&
+ dev_priv->dummy_query_bo_pinned))
+ return 0;
+
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
@@ -602,57 +958,208 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
return 0;
}
-int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
+ uint32_t size)
+{
+ if (likely(sw_context->cmd_bounce_size >= size))
+ return 0;
+
+ if (sw_context->cmd_bounce_size == 0)
+ sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
+
+ while (sw_context->cmd_bounce_size < size) {
+ sw_context->cmd_bounce_size =
+ PAGE_ALIGN(sw_context->cmd_bounce_size +
+ (sw_context->cmd_bounce_size >> 1));
+ }
+
+ if (sw_context->cmd_bounce != NULL)
+ vfree(sw_context->cmd_bounce);
+
+ sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
+
+ if (sw_context->cmd_bounce == NULL) {
+ DRM_ERROR("Failed to allocate command bounce buffer.\n");
+ sw_context->cmd_bounce_size = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_execbuf_fence_commands - create and submit a command stream fence
+ *
+ * Creates a fence object and submits a command stream marker.
+ * If this fails for some reason, We sync the fifo and return NULL.
+ * It is then safe to fence buffers with a NULL pointer.
+ *
+ * If @p_handle is not NULL @file_priv must also not be NULL. Creates
+ * a userspace handle if @p_handle is not NULL, otherwise not.
+ */
+
+int vmw_execbuf_fence_commands(struct drm_file *file_priv,
+ struct vmw_private *dev_priv,
+ struct vmw_fence_obj **p_fence,
+ uint32_t *p_handle)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
- struct drm_vmw_fence_rep fence_rep;
- struct drm_vmw_fence_rep __user *user_fence_rep;
- int ret;
- void *user_cmd;
- void *cmd;
uint32_t sequence;
- struct vmw_sw_context *sw_context = &dev_priv->ctx;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+ bool synced = false;
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
+ /* p_handle implies file_priv. */
+ BUG_ON(p_handle != NULL && file_priv == NULL);
- ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+ ret = vmw_fifo_send_fence(dev_priv, &sequence);
if (unlikely(ret != 0)) {
- ret = -ERESTARTSYS;
- goto out_no_cmd_mutex;
+ DRM_ERROR("Fence submission error. Syncing.\n");
+ synced = true;
}
- cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving fifo space for commands.\n");
- ret = -ENOMEM;
- goto out_unlock;
+ if (p_handle != NULL)
+ ret = vmw_user_fence_create(file_priv, dev_priv->fman,
+ sequence,
+ DRM_VMW_FENCE_FLAG_EXEC,
+ p_fence, p_handle);
+ else
+ ret = vmw_fence_create(dev_priv->fman, sequence,
+ DRM_VMW_FENCE_FLAG_EXEC,
+ p_fence);
+
+ if (unlikely(ret != 0 && !synced)) {
+ (void) vmw_fallback_wait(dev_priv, false, false,
+ sequence, false,
+ VMW_FENCE_WAIT_TIMEOUT);
+ *p_fence = NULL;
}
- user_cmd = (void __user *)(unsigned long)arg->commands;
- ret = copy_from_user(cmd, user_cmd, arg->command_size);
+ return 0;
+}
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- DRM_ERROR("Failed copying commands.\n");
- goto out_commit;
+/**
+ * vmw_execbuf_copy_fence_user - copy fence object information to
+ * user-space.
+ *
+ * @dev_priv: Pointer to a vmw_private struct.
+ * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
+ * @ret: Return value from fence object creation.
+ * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
+ * which the information should be copied.
+ * @fence: Pointer to the fenc object.
+ * @fence_handle: User-space fence handle.
+ *
+ * This function copies fence information to user-space. If copying fails,
+ * The user-space struct drm_vmw_fence_rep::error member is hopefully
+ * left untouched, and if it's preloaded with an -EFAULT by user-space,
+ * the error will hopefully be detected.
+ * Also if copying fails, user-space will be unable to signal the fence
+ * object so we wait for it immediately, and then unreference the
+ * user-space reference.
+ */
+void
+vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+ struct vmw_fpriv *vmw_fp,
+ int ret,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct vmw_fence_obj *fence,
+ uint32_t fence_handle)
+{
+ struct drm_vmw_fence_rep fence_rep;
+
+ if (user_fence_rep == NULL)
+ return;
+
+ memset(&fence_rep, 0, sizeof(fence_rep));
+
+ fence_rep.error = ret;
+ if (ret == 0) {
+ BUG_ON(fence == NULL);
+
+ fence_rep.handle = fence_handle;
+ fence_rep.seqno = fence->seqno;
+ vmw_update_seqno(dev_priv, &dev_priv->fifo);
+ fence_rep.passed_seqno = dev_priv->last_read_seqno;
}
+ /*
+ * copy_to_user errors will be detected by user space not
+ * seeing fence_rep::error filled in. Typically
+ * user-space would have pre-set that member to -EFAULT.
+ */
+ ret = copy_to_user(user_fence_rep, &fence_rep,
+ sizeof(fence_rep));
+
+ /*
+ * User-space lost the fence object. We need to sync
+ * and unreference the handle.
+ */
+ if (unlikely(ret != 0) && (fence_rep.error == 0)) {
+ ttm_ref_object_base_unref(vmw_fp->tfile,
+ fence_handle, TTM_REF_USAGE);
+ DRM_ERROR("Fence copy error. Syncing.\n");
+ (void) vmw_fence_obj_wait(fence, fence->signal_mask,
+ false, false,
+ VMW_FENCE_WAIT_TIMEOUT);
+ }
+}
+
+int vmw_execbuf_process(struct drm_file *file_priv,
+ struct vmw_private *dev_priv,
+ void __user *user_commands,
+ void *kernel_commands,
+ uint32_t command_size,
+ uint64_t throttle_us,
+ struct drm_vmw_fence_rep __user *user_fence_rep)
+{
+ struct vmw_sw_context *sw_context = &dev_priv->ctx;
+ struct vmw_fence_obj *fence;
+ uint32_t handle;
+ void *cmd;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+ if (unlikely(ret != 0))
+ return -ERESTARTSYS;
+
+ if (kernel_commands == NULL) {
+ sw_context->kernel = false;
+
+ ret = vmw_resize_cmd_bounce(sw_context, command_size);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+
+ ret = copy_from_user(sw_context->cmd_bounce,
+ user_commands, command_size);
+
+ if (unlikely(ret != 0)) {
+ ret = -EFAULT;
+ DRM_ERROR("Failed copying commands.\n");
+ goto out_unlock;
+ }
+ kernel_commands = sw_context->cmd_bounce;
+ } else
+ sw_context->kernel = true;
+
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
sw_context->cid_valid = false;
sw_context->sid_valid = false;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
+ sw_context->fence_flags = 0;
+ INIT_LIST_HEAD(&sw_context->query_list);
+ INIT_LIST_HEAD(&sw_context->resource_list);
+ sw_context->cur_query_bo = dev_priv->pinned_bo;
+ sw_context->cur_query_cid = dev_priv->query_cid;
+ sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
INIT_LIST_HEAD(&sw_context->validate_nodes);
- ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
+ ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
+ command_size);
if (unlikely(ret != 0))
goto out_err;
+
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
@@ -663,57 +1170,206 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
vmw_apply_relocations(sw_context);
- if (arg->throttle_us) {
- ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
- arg->throttle_us);
+ if (throttle_us) {
+ ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
+ throttle_us);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_throttle;
}
- vmw_fifo_commit(dev_priv, arg->command_size);
-
- ret = vmw_fifo_send_fence(dev_priv, &sequence);
+ cmd = vmw_fifo_reserve(dev_priv, command_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving fifo space for commands.\n");
+ ret = -ENOMEM;
+ goto out_throttle;
+ }
- ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
- (void *)(unsigned long) sequence);
- vmw_clear_validations(sw_context);
- mutex_unlock(&dev_priv->cmdbuf_mutex);
+ memcpy(cmd, kernel_commands, command_size);
+ vmw_fifo_commit(dev_priv, command_size);
+ vmw_query_bo_switch_commit(dev_priv, sw_context);
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
+ &fence,
+ (user_fence_rep) ? &handle : NULL);
/*
* This error is harmless, because if fence submission fails,
- * vmw_fifo_send_fence will sync.
+ * vmw_fifo_send_fence will sync. The error will be propagated to
+ * user-space in @fence_rep
*/
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
- fence_rep.error = ret;
- fence_rep.fence_seq = (uint64_t) sequence;
- fence_rep.pad64 = 0;
-
- user_fence_rep = (struct drm_vmw_fence_rep __user *)
- (unsigned long)arg->fence_rep;
+ ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
+ (void *) fence);
- /*
- * copy_to_user errors will be detected by user space not
- * seeing fence_rep::error filled in.
- */
+ vmw_clear_validations(sw_context);
+ vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
+ user_fence_rep, fence, handle);
- ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
- vmw_kms_cursor_post_execbuf(dev_priv);
- ttm_read_unlock(&vmaster->lock);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
return 0;
+
out_err:
vmw_free_relocations(sw_context);
+out_throttle:
+ vmw_query_switch_backoff(sw_context);
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
vmw_clear_validations(sw_context);
-out_commit:
- vmw_fifo_commit(dev_priv, 0);
out_unlock:
mutex_unlock(&dev_priv->cmdbuf_mutex);
-out_no_cmd_mutex:
+ return ret;
+}
+
+/**
+ * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function is called to idle the fifo and unpin the query buffer
+ * if the normal way to do this hits an error, which should typically be
+ * extremely rare.
+ */
+static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
+{
+ DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
+
+ (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
+ vmw_bo_pin(dev_priv->pinned_bo, false);
+ vmw_bo_pin(dev_priv->dummy_query_bo, false);
+ dev_priv->dummy_query_bo_pinned = false;
+}
+
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ * @only_on_cid_match: Only flush and unpin if the current active query cid
+ * matches @cid.
+ * @cid: Optional context id to match.
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ bool only_on_cid_match, uint32_t cid)
+{
+ int ret = 0;
+ struct list_head validate_list;
+ struct ttm_validate_buffer pinned_val, query_val;
+ struct vmw_fence_obj *fence;
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+
+ if (dev_priv->pinned_bo == NULL)
+ goto out_unlock;
+
+ if (only_on_cid_match && cid != dev_priv->query_cid)
+ goto out_unlock;
+
+ INIT_LIST_HEAD(&validate_list);
+
+ pinned_val.new_sync_obj_arg = (void *)(unsigned long)
+ DRM_VMW_FENCE_FLAG_EXEC;
+ pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+ list_add_tail(&pinned_val.head, &validate_list);
+
+ query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
+ query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+ list_add_tail(&query_val.head, &validate_list);
+
+ do {
+ ret = ttm_eu_reserve_buffers(&validate_list);
+ } while (ret == -ERESTARTSYS);
+
+ if (unlikely(ret != 0)) {
+ vmw_execbuf_unpin_panic(dev_priv);
+ goto out_no_reserve;
+ }
+
+ ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+ if (unlikely(ret != 0)) {
+ vmw_execbuf_unpin_panic(dev_priv);
+ goto out_no_emit;
+ }
+
+ vmw_bo_pin(dev_priv->pinned_bo, false);
+ vmw_bo_pin(dev_priv->dummy_query_bo, false);
+ dev_priv->dummy_query_bo_pinned = false;
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+
+ ttm_bo_unref(&query_val.bo);
+ ttm_bo_unref(&pinned_val.bo);
+ ttm_bo_unref(&dev_priv->pinned_bo);
+
+out_unlock:
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ return;
+
+out_no_emit:
+ ttm_eu_backoff_reservation(&validate_list);
+out_no_reserve:
+ ttm_bo_unref(&query_val.bo);
+ ttm_bo_unref(&pinned_val.bo);
+ ttm_bo_unref(&dev_priv->pinned_bo);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
+
+
+int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+
+ /*
+ * This will allow us to extend the ioctl argument while
+ * maintaining backwards compatibility:
+ * We take different code paths depending on the value of
+ * arg->version.
+ */
+
+ if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
+ DRM_ERROR("Incorrect execbuf version.\n");
+ DRM_ERROR("You're running outdated experimental "
+ "vmwgfx user-space drivers.");
+ return -EINVAL;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_execbuf_process(file_priv, dev_priv,
+ (void __user *)(unsigned long)arg->commands,
+ NULL, arg->command_size, arg->throttle_us,
+ (void __user *)(unsigned long)arg->fence_rep);
+
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ vmw_kms_cursor_post_execbuf(dev_priv);
+
+out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index bfab60c938a..34e51a1695b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -26,6 +26,8 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include "drmP.h"
#include "vmwgfx_drv.h"
@@ -158,10 +160,14 @@ static int vmw_fb_set_par(struct fb_info *info)
{
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
+ int ret;
+
+ ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+ info->fix.line_length,
+ par->bpp, par->depth);
+ if (ret)
+ return ret;
- vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
- info->fix.line_length,
- par->bpp, par->depth);
if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
/* TODO check if pitch and offset changes */
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
@@ -405,14 +411,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
struct fb_info *info;
unsigned initial_width, initial_height;
unsigned fb_width, fb_height;
- unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
+ unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
int ret;
/* XXX These shouldn't be hardcoded. */
initial_width = 800;
initial_height = 600;
- fb_bbp = 32;
+ fb_bpp = 32;
fb_depth = 24;
/* XXX As shouldn't these be as well. */
@@ -422,7 +428,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
initial_width = min(fb_width, initial_width);
initial_height = min(fb_height, initial_height);
- fb_pitch = fb_width * fb_bbp / 8;
+ fb_pitch = fb_width * fb_bpp / 8;
fb_size = fb_pitch * fb_height;
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
@@ -437,7 +443,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
par = info->par;
par->vmw_priv = vmw_priv;
par->depth = fb_depth;
- par->bpp = fb_bbp;
+ par->bpp = fb_bpp;
par->vmalloc = NULL;
par->max_width = fb_width;
par->max_height = fb_height;
@@ -588,58 +594,6 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
return 0;
}
-int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *vmw_bo)
-{
- struct ttm_buffer_object *bo = &vmw_bo->base;
- int ret = 0;
-
- ret = ttm_bo_reserve(bo, false, false, false, 0);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
- ttm_bo_unreserve(bo);
-
- return ret;
-}
-
-int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
- struct vmw_dma_buffer *vmw_bo)
-{
- struct ttm_buffer_object *bo = &vmw_bo->base;
- struct ttm_placement ne_placement = vmw_vram_ne_placement;
- int ret = 0;
-
- ne_placement.lpfn = bo->num_pages;
-
- /* interuptable? */
- ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_reserve(bo, false, false, false, 0);
- if (unlikely(ret != 0))
- goto err_unlock;
-
- if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start < bo->num_pages &&
- bo->mem.start > 0)
- (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
- false, false);
-
- ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
-
- /* Could probably bug on */
- WARN_ON(bo->offset != 0);
-
- ttm_bo_unreserve(bo);
-err_unlock:
- ttm_write_unlock(&vmw_priv->active_master->lock);
-
- return ret;
-}
-
int vmw_fb_off(struct vmw_private *vmw_priv)
{
struct fb_info *info;
@@ -661,7 +615,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
par->bo_ptr = NULL;
ttm_bo_kunmap(&par->map);
- vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
+ vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
return 0;
}
@@ -687,7 +641,7 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
/* Make sure that all overlays are stoped when we take over */
vmw_overlay_stop_all(vmw_priv);
- ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
+ ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
if (unlikely(ret != 0)) {
DRM_ERROR("could not move buffer to start of VRAM\n");
goto err_no_buffer;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 61eacc1b5ca..15fb26088d6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,149 +25,1100 @@
*
**************************************************************************/
-
+#include "drmP.h"
#include "vmwgfx_drv.h"
-struct vmw_fence {
- struct list_head head;
- uint32_t sequence;
- struct timespec submitted;
+#define VMW_FENCE_WRAP (1 << 31)
+
+struct vmw_fence_manager {
+ int num_fence_objects;
+ struct vmw_private *dev_priv;
+ spinlock_t lock;
+ struct list_head fence_list;
+ struct work_struct work;
+ u32 user_fence_size;
+ u32 fence_size;
+ u32 event_fence_action_size;
+ bool fifo_down;
+ struct list_head cleanup_list;
+ uint32_t pending_actions[VMW_ACTION_MAX];
+ struct mutex goal_irq_mutex;
+ bool goal_irq_on; /* Protected by @goal_irq_mutex */
+ bool seqno_valid; /* Protected by @lock, and may not be set to true
+ without the @goal_irq_mutex held. */
};
-void vmw_fence_queue_init(struct vmw_fence_queue *queue)
+struct vmw_user_fence {
+ struct ttm_base_object base;
+ struct vmw_fence_obj fence;
+};
+
+/**
+ * struct vmw_event_fence_action - fence action that delivers a drm event.
+ *
+ * @e: A struct drm_pending_event that controls the event delivery.
+ * @action: A struct vmw_fence_action to hook up to a fence.
+ * @fence: A referenced pointer to the fence to keep it alive while @action
+ * hangs on it.
+ * @dev: Pointer to a struct drm_device so we can access the event stuff.
+ * @kref: Both @e and @action has destructors, so we need to refcount.
+ * @size: Size accounted for this object.
+ * @tv_sec: If non-null, the variable pointed to will be assigned
+ * current time tv_sec val when the fence signals.
+ * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
+ * be assigned the current time tv_usec val when the fence signals.
+ */
+struct vmw_event_fence_action {
+ struct drm_pending_event e;
+ struct vmw_fence_action action;
+ struct vmw_fence_obj *fence;
+ struct drm_device *dev;
+ struct kref kref;
+ uint32_t size;
+ uint32_t *tv_sec;
+ uint32_t *tv_usec;
+};
+
+/**
+ * Note on fencing subsystem usage of irqs:
+ * Typically the vmw_fences_update function is called
+ *
+ * a) When a new fence seqno has been submitted by the fifo code.
+ * b) On-demand when we have waiters. Sleeping waiters will switch on the
+ * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
+ * irq is received. When the last fence waiter is gone, that IRQ is masked
+ * away.
+ *
+ * In situations where there are no waiters and we don't submit any new fences,
+ * fence objects may not be signaled. This is perfectly OK, since there are
+ * no consumers of the signaled data, but that is NOT ok when there are fence
+ * actions attached to a fence. The fencing subsystem then makes use of the
+ * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
+ * which has an action attached, and each time vmw_fences_update is called,
+ * the subsystem makes sure the fence goal seqno is updated.
+ *
+ * The fence goal seqno irq is on as long as there are unsignaled fence
+ * objects with actions attached to them.
+ */
+
+static void vmw_fence_obj_destroy_locked(struct kref *kref)
+{
+ struct vmw_fence_obj *fence =
+ container_of(kref, struct vmw_fence_obj, kref);
+
+ struct vmw_fence_manager *fman = fence->fman;
+ unsigned int num_fences;
+
+ list_del_init(&fence->head);
+ num_fences = --fman->num_fence_objects;
+ spin_unlock_irq(&fman->lock);
+ if (fence->destroy)
+ fence->destroy(fence);
+ else
+ kfree(fence);
+
+ spin_lock_irq(&fman->lock);
+}
+
+
+/**
+ * Execute signal actions on fences recently signaled.
+ * This is done from a workqueue so we don't have to execute
+ * signal actions from atomic context.
+ */
+
+static void vmw_fence_work_func(struct work_struct *work)
{
- INIT_LIST_HEAD(&queue->head);
- queue->lag = ns_to_timespec(0);
- getrawmonotonic(&queue->lag_time);
- spin_lock_init(&queue->lock);
+ struct vmw_fence_manager *fman =
+ container_of(work, struct vmw_fence_manager, work);
+ struct list_head list;
+ struct vmw_fence_action *action, *next_action;
+ bool seqno_valid;
+
+ do {
+ INIT_LIST_HEAD(&list);
+ mutex_lock(&fman->goal_irq_mutex);
+
+ spin_lock_irq(&fman->lock);
+ list_splice_init(&fman->cleanup_list, &list);
+ seqno_valid = fman->seqno_valid;
+ spin_unlock_irq(&fman->lock);
+
+ if (!seqno_valid && fman->goal_irq_on) {
+ fman->goal_irq_on = false;
+ vmw_goal_waiter_remove(fman->dev_priv);
+ }
+ mutex_unlock(&fman->goal_irq_mutex);
+
+ if (list_empty(&list))
+ return;
+
+ /*
+ * At this point, only we should be able to manipulate the
+ * list heads of the actions we have on the private list.
+ * hence fman::lock not held.
+ */
+
+ list_for_each_entry_safe(action, next_action, &list, head) {
+ list_del_init(&action->head);
+ if (action->cleanup)
+ action->cleanup(action);
+ }
+ } while (1);
}
-void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
+struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
{
- struct vmw_fence *fence, *next;
+ struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
- spin_lock(&queue->lock);
- list_for_each_entry_safe(fence, next, &queue->head, head) {
- kfree(fence);
- }
- spin_unlock(&queue->lock);
+ if (unlikely(fman == NULL))
+ return NULL;
+
+ fman->dev_priv = dev_priv;
+ spin_lock_init(&fman->lock);
+ INIT_LIST_HEAD(&fman->fence_list);
+ INIT_LIST_HEAD(&fman->cleanup_list);
+ INIT_WORK(&fman->work, &vmw_fence_work_func);
+ fman->fifo_down = true;
+ fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
+ fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
+ fman->event_fence_action_size =
+ ttm_round_pot(sizeof(struct vmw_event_fence_action));
+ mutex_init(&fman->goal_irq_mutex);
+
+ return fman;
}
-int vmw_fence_push(struct vmw_fence_queue *queue,
- uint32_t sequence)
+void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{
- struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+ unsigned long irq_flags;
+ bool lists_empty;
- if (unlikely(!fence))
- return -ENOMEM;
+ (void) cancel_work_sync(&fman->work);
- fence->sequence = sequence;
- getrawmonotonic(&fence->submitted);
- spin_lock(&queue->lock);
- list_add_tail(&fence->head, &queue->head);
- spin_unlock(&queue->lock);
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ lists_empty = list_empty(&fman->fence_list) &&
+ list_empty(&fman->cleanup_list);
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
- return 0;
+ BUG_ON(!lists_empty);
+ kfree(fman);
}
-int vmw_fence_pull(struct vmw_fence_queue *queue,
- uint32_t signaled_sequence)
+static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
+ struct vmw_fence_obj *fence,
+ u32 seqno,
+ uint32_t mask,
+ void (*destroy) (struct vmw_fence_obj *fence))
{
- struct vmw_fence *fence, *next;
- struct timespec now;
- bool updated = false;
+ unsigned long irq_flags;
+ unsigned int num_fences;
+ int ret = 0;
- spin_lock(&queue->lock);
- getrawmonotonic(&now);
+ fence->seqno = seqno;
+ INIT_LIST_HEAD(&fence->seq_passed_actions);
+ fence->fman = fman;
+ fence->signaled = 0;
+ fence->signal_mask = mask;
+ kref_init(&fence->kref);
+ fence->destroy = destroy;
+ init_waitqueue_head(&fence->queue);
- if (list_empty(&queue->head)) {
- queue->lag = ns_to_timespec(0);
- queue->lag_time = now;
- updated = true;
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ if (unlikely(fman->fifo_down)) {
+ ret = -EBUSY;
goto out_unlock;
}
+ list_add_tail(&fence->head, &fman->fence_list);
+ num_fences = ++fman->num_fence_objects;
- list_for_each_entry_safe(fence, next, &queue->head, head) {
- if (signaled_sequence - fence->sequence > (1 << 30))
- continue;
+out_unlock:
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+ return ret;
- queue->lag = timespec_sub(now, fence->submitted);
- queue->lag_time = now;
- updated = true;
- list_del(&fence->head);
- kfree(fence);
+}
+
+struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
+{
+ if (unlikely(fence == NULL))
+ return NULL;
+
+ kref_get(&fence->kref);
+ return fence;
+}
+
+/**
+ * vmw_fence_obj_unreference
+ *
+ * Note that this function may not be entered with disabled irqs since
+ * it may re-enable them in the destroy function.
+ *
+ */
+void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
+{
+ struct vmw_fence_obj *fence = *fence_p;
+ struct vmw_fence_manager *fman;
+
+ if (unlikely(fence == NULL))
+ return;
+
+ fman = fence->fman;
+ *fence_p = NULL;
+ spin_lock_irq(&fman->lock);
+ BUG_ON(atomic_read(&fence->kref.refcount) == 0);
+ kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
+ spin_unlock_irq(&fman->lock);
+}
+
+void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
+ struct list_head *list)
+{
+ struct vmw_fence_action *action, *next_action;
+
+ list_for_each_entry_safe(action, next_action, list, head) {
+ list_del_init(&action->head);
+ fman->pending_actions[action->type]--;
+ if (action->seq_passed != NULL)
+ action->seq_passed(action);
+
+ /*
+ * Add the cleanup action to the cleanup list so that
+ * it will be performed by a worker task.
+ */
+
+ list_add_tail(&action->head, &fman->cleanup_list);
}
+}
+
+/**
+ * vmw_fence_goal_new_locked - Figure out a new device fence goal
+ * seqno if needed.
+ *
+ * @fman: Pointer to a fence manager.
+ * @passed_seqno: The seqno the device currently signals as passed.
+ *
+ * This function should be called with the fence manager lock held.
+ * It is typically called when we have a new passed_seqno, and
+ * we might need to update the fence goal. It checks to see whether
+ * the current fence goal has already passed, and, in that case,
+ * scans through all unsignaled fences to get the next fence object with an
+ * action attached, and sets the seqno of that fence as a new fence goal.
+ *
+ * returns true if the device goal seqno was updated. False otherwise.
+ */
+static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
+ u32 passed_seqno)
+{
+ u32 goal_seqno;
+ __le32 __iomem *fifo_mem;
+ struct vmw_fence_obj *fence;
+
+ if (likely(!fman->seqno_valid))
+ return false;
+
+ fifo_mem = fman->dev_priv->mmio_virt;
+ goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
+ return false;
+
+ fman->seqno_valid = false;
+ list_for_each_entry(fence, &fman->fence_list, head) {
+ if (!list_empty(&fence->seq_passed_actions)) {
+ fman->seqno_valid = true;
+ iowrite32(fence->seqno,
+ fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ break;
+ }
+ }
+
+ return true;
+}
-out_unlock:
- spin_unlock(&queue->lock);
- return (updated) ? 0 : -EBUSY;
+/**
+ * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
+ * needed.
+ *
+ * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
+ * considered as a device fence goal.
+ *
+ * This function should be called with the fence manager lock held.
+ * It is typically called when an action has been attached to a fence to
+ * check whether the seqno of that fence should be used for a fence
+ * goal interrupt. This is typically needed if the current fence goal is
+ * invalid, or has a higher seqno than that of the current fence object.
+ *
+ * returns true if the device goal seqno was updated. False otherwise.
+ */
+static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
+{
+ u32 goal_seqno;
+ __le32 __iomem *fifo_mem;
+
+ if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC)
+ return false;
+
+ fifo_mem = fence->fman->dev_priv->mmio_virt;
+ goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ if (likely(fence->fman->seqno_valid &&
+ goal_seqno - fence->seqno < VMW_FENCE_WRAP))
+ return false;
+
+ iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ fence->fman->seqno_valid = true;
+
+ return true;
}
-static struct timespec vmw_timespec_add(struct timespec t1,
- struct timespec t2)
+void vmw_fences_update(struct vmw_fence_manager *fman)
{
- t1.tv_sec += t2.tv_sec;
- t1.tv_nsec += t2.tv_nsec;
- if (t1.tv_nsec >= 1000000000L) {
- t1.tv_sec += 1;
- t1.tv_nsec -= 1000000000L;
+ unsigned long flags;
+ struct vmw_fence_obj *fence, *next_fence;
+ struct list_head action_list;
+ bool needs_rerun;
+ uint32_t seqno, new_seqno;
+ __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
+
+ seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+rerun:
+ spin_lock_irqsave(&fman->lock, flags);
+ list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
+ if (seqno - fence->seqno < VMW_FENCE_WRAP) {
+ list_del_init(&fence->head);
+ fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
+ INIT_LIST_HEAD(&action_list);
+ list_splice_init(&fence->seq_passed_actions,
+ &action_list);
+ vmw_fences_perform_actions(fman, &action_list);
+ wake_up_all(&fence->queue);
+ } else
+ break;
}
- return t1;
+ needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
+
+ if (!list_empty(&fman->cleanup_list))
+ (void) schedule_work(&fman->work);
+ spin_unlock_irqrestore(&fman->lock, flags);
+
+ /*
+ * Rerun if the fence goal seqno was updated, and the
+ * hardware might have raced with that update, so that
+ * we missed a fence_goal irq.
+ */
+
+ if (unlikely(needs_rerun)) {
+ new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ if (new_seqno != seqno) {
+ seqno = new_seqno;
+ goto rerun;
+ }
+ }
+}
+
+bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
+ uint32_t flags)
+{
+ struct vmw_fence_manager *fman = fence->fman;
+ unsigned long irq_flags;
+ uint32_t signaled;
+
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ signaled = fence->signaled;
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+
+ flags &= fence->signal_mask;
+ if ((signaled & flags) == flags)
+ return 1;
+
+ if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0)
+ vmw_fences_update(fman);
+
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ signaled = fence->signaled;
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+
+ return ((signaled & flags) == flags);
}
-static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
+int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
+ uint32_t flags, bool lazy,
+ bool interruptible, unsigned long timeout)
{
- struct timespec now;
+ struct vmw_private *dev_priv = fence->fman->dev_priv;
+ long ret;
+
+ if (likely(vmw_fence_obj_signaled(fence, flags)))
+ return 0;
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ vmw_seqno_waiter_add(dev_priv);
+
+ if (interruptible)
+ ret = wait_event_interruptible_timeout
+ (fence->queue,
+ vmw_fence_obj_signaled(fence, flags),
+ timeout);
+ else
+ ret = wait_event_timeout
+ (fence->queue,
+ vmw_fence_obj_signaled(fence, flags),
+ timeout);
+
+ vmw_seqno_waiter_remove(dev_priv);
+
+ if (unlikely(ret == 0))
+ ret = -EBUSY;
+ else if (likely(ret > 0))
+ ret = 0;
+
+ return ret;
+}
+
+void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
+{
+ struct vmw_private *dev_priv = fence->fman->dev_priv;
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+}
+
+static void vmw_fence_destroy(struct vmw_fence_obj *fence)
+{
+ struct vmw_fence_manager *fman = fence->fman;
+
+ kfree(fence);
+ /*
+ * Free kernel space accounting.
+ */
+ ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
+ fman->fence_size);
+}
+
+int vmw_fence_create(struct vmw_fence_manager *fman,
+ uint32_t seqno,
+ uint32_t mask,
+ struct vmw_fence_obj **p_fence)
+{
+ struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
+ struct vmw_fence_obj *fence;
+ int ret;
+
+ ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
+ false, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (unlikely(fence == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_object;
+ }
+
+ ret = vmw_fence_obj_init(fman, fence, seqno, mask,
+ vmw_fence_destroy);
+ if (unlikely(ret != 0))
+ goto out_err_init;
+
+ *p_fence = fence;
+ return 0;
+
+out_err_init:
+ kfree(fence);
+out_no_object:
+ ttm_mem_global_free(mem_glob, fman->fence_size);
+ return ret;
+}
+
+
+static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
+{
+ struct vmw_user_fence *ufence =
+ container_of(fence, struct vmw_user_fence, fence);
+ struct vmw_fence_manager *fman = fence->fman;
+
+ kfree(ufence);
+ /*
+ * Free kernel space accounting.
+ */
+ ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
+ fman->user_fence_size);
+}
+
+static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_fence *ufence =
+ container_of(base, struct vmw_user_fence, base);
+ struct vmw_fence_obj *fence = &ufence->fence;
+
+ *p_base = NULL;
+ vmw_fence_obj_unreference(&fence);
+}
+
+int vmw_user_fence_create(struct drm_file *file_priv,
+ struct vmw_fence_manager *fman,
+ uint32_t seqno,
+ uint32_t mask,
+ struct vmw_fence_obj **p_fence,
+ uint32_t *p_handle)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_user_fence *ufence;
+ struct vmw_fence_obj *tmp;
+ struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
+ int ret;
+
+ /*
+ * Kernel memory space accounting, since this object may
+ * be created by a user-space request.
+ */
+
+ ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
+ false, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
+ if (unlikely(ufence == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_object;
+ }
+
+ ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
+ mask, vmw_user_fence_destroy);
+ if (unlikely(ret != 0)) {
+ kfree(ufence);
+ goto out_no_object;
+ }
+
+ /*
+ * The base object holds a reference which is freed in
+ * vmw_user_fence_base_release.
+ */
+ tmp = vmw_fence_obj_reference(&ufence->fence);
+ ret = ttm_base_object_init(tfile, &ufence->base, false,
+ VMW_RES_FENCE,
+ &vmw_user_fence_base_release, NULL);
- spin_lock(&queue->lock);
- getrawmonotonic(&now);
- queue->lag = vmw_timespec_add(queue->lag,
- timespec_sub(now, queue->lag_time));
- queue->lag_time = now;
- spin_unlock(&queue->lock);
- return queue->lag;
+
+ if (unlikely(ret != 0)) {
+ /*
+ * Free the base object's reference
+ */
+ vmw_fence_obj_unreference(&tmp);
+ goto out_err;
+ }
+
+ *p_fence = &ufence->fence;
+ *p_handle = ufence->base.hash.key;
+
+ return 0;
+out_err:
+ tmp = &ufence->fence;
+ vmw_fence_obj_unreference(&tmp);
+out_no_object:
+ ttm_mem_global_free(mem_glob, fman->user_fence_size);
+ return ret;
}
-static bool vmw_lag_lt(struct vmw_fence_queue *queue,
- uint32_t us)
+/**
+ * vmw_fence_fifo_down - signal all unsignaled fence objects.
+ */
+
+void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
+{
+ unsigned long irq_flags;
+ struct list_head action_list;
+ int ret;
+
+ /*
+ * The list may be altered while we traverse it, so always
+ * restart when we've released the fman->lock.
+ */
+
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ fman->fifo_down = true;
+ while (!list_empty(&fman->fence_list)) {
+ struct vmw_fence_obj *fence =
+ list_entry(fman->fence_list.prev, struct vmw_fence_obj,
+ head);
+ kref_get(&fence->kref);
+ spin_unlock_irq(&fman->lock);
+
+ ret = vmw_fence_obj_wait(fence, fence->signal_mask,
+ false, false,
+ VMW_FENCE_WAIT_TIMEOUT);
+
+ if (unlikely(ret != 0)) {
+ list_del_init(&fence->head);
+ fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
+ INIT_LIST_HEAD(&action_list);
+ list_splice_init(&fence->seq_passed_actions,
+ &action_list);
+ vmw_fences_perform_actions(fman, &action_list);
+ wake_up_all(&fence->queue);
+ }
+
+ spin_lock_irq(&fman->lock);
+
+ BUG_ON(!list_empty(&fence->head));
+ kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
+ }
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+}
+
+void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
{
- struct timespec lag, cond;
+ unsigned long irq_flags;
- cond = ns_to_timespec((s64) us * 1000);
- lag = vmw_fifo_lag(queue);
- return (timespec_compare(&lag, &cond) < 1);
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ fman->fifo_down = false;
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
}
-int vmw_wait_lag(struct vmw_private *dev_priv,
- struct vmw_fence_queue *queue, uint32_t us)
+
+int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- struct vmw_fence *fence;
- uint32_t sequence;
+ struct drm_vmw_fence_wait_arg *arg =
+ (struct drm_vmw_fence_wait_arg *)data;
+ unsigned long timeout;
+ struct ttm_base_object *base;
+ struct vmw_fence_obj *fence;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
+ uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
+
+ /*
+ * 64-bit division not present on 32-bit systems, so do an
+ * approximation. (Divide by 1000000).
+ */
+
+ wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
+ (wait_timeout >> 26);
+
+ if (!arg->cookie_valid) {
+ arg->cookie_valid = 1;
+ arg->kernel_cookie = jiffies + wait_timeout;
+ }
+
+ base = ttm_base_object_lookup(tfile, arg->handle);
+ if (unlikely(base == NULL)) {
+ printk(KERN_ERR "Wait invalid fence object handle "
+ "0x%08lx.\n",
+ (unsigned long)arg->handle);
+ return -EINVAL;
+ }
+
+ fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+
+ timeout = jiffies;
+ if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
+ ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
+ 0 : -EBUSY);
+ goto out;
+ }
+
+ timeout = (unsigned long)arg->kernel_cookie - timeout;
+
+ ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
+
+out:
+ ttm_base_object_unref(&base);
+
+ /*
+ * Optionally unref the fence object.
+ */
+
+ if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
+ return ttm_ref_object_base_unref(tfile, arg->handle,
+ TTM_REF_USAGE);
+ return ret;
+}
+
+int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_fence_signaled_arg *arg =
+ (struct drm_vmw_fence_signaled_arg *) data;
+ struct ttm_base_object *base;
+ struct vmw_fence_obj *fence;
+ struct vmw_fence_manager *fman;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ base = ttm_base_object_lookup(tfile, arg->handle);
+ if (unlikely(base == NULL)) {
+ printk(KERN_ERR "Fence signaled invalid fence object handle "
+ "0x%08lx.\n",
+ (unsigned long)arg->handle);
+ return -EINVAL;
+ }
+
+ fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+ fman = fence->fman;
+
+ arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
+ spin_lock_irq(&fman->lock);
+
+ arg->signaled_flags = fence->signaled;
+ arg->passed_seqno = dev_priv->last_read_seqno;
+ spin_unlock_irq(&fman->lock);
+
+ ttm_base_object_unref(&base);
+
+ return 0;
+}
+
+
+int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_fence_arg *arg =
+ (struct drm_vmw_fence_arg *) data;
+
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ arg->handle,
+ TTM_REF_USAGE);
+}
+
+/**
+ * vmw_event_fence_action_destroy
+ *
+ * @kref: The struct kref embedded in a struct vmw_event_fence_action.
+ *
+ * The vmw_event_fence_action destructor that may be called either after
+ * the fence action cleanup, or when the event is delivered.
+ * It frees both the vmw_event_fence_action struct and the actual
+ * event structure copied to user-space.
+ */
+static void vmw_event_fence_action_destroy(struct kref *kref)
+{
+ struct vmw_event_fence_action *eaction =
+ container_of(kref, struct vmw_event_fence_action, kref);
+ struct ttm_mem_global *mem_glob =
+ vmw_mem_glob(vmw_priv(eaction->dev));
+ uint32_t size = eaction->size;
+
+ kfree(eaction->e.event);
+ kfree(eaction);
+ ttm_mem_global_free(mem_glob, size);
+}
+
+
+/**
+ * vmw_event_fence_action_delivered
+ *
+ * @e: The struct drm_pending_event embedded in a struct
+ * vmw_event_fence_action.
+ *
+ * The struct drm_pending_event destructor that is called by drm
+ * once the event is delivered. Since we don't know whether this function
+ * will be called before or after the fence action destructor, we
+ * free a refcount and destroy if it becomes zero.
+ */
+static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
+{
+ struct vmw_event_fence_action *eaction =
+ container_of(e, struct vmw_event_fence_action, e);
+
+ kref_put(&eaction->kref, vmw_event_fence_action_destroy);
+}
+
+
+/**
+ * vmw_event_fence_action_seq_passed
+ *
+ * @action: The struct vmw_fence_action embedded in a struct
+ * vmw_event_fence_action.
+ *
+ * This function is called when the seqno of the fence where @action is
+ * attached has passed. It queues the event on the submitter's event list.
+ * This function is always called from atomic context, and may be called
+ * from irq context. It ups a refcount reflecting that we now have two
+ * destructors.
+ */
+static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
+{
+ struct vmw_event_fence_action *eaction =
+ container_of(action, struct vmw_event_fence_action, action);
+ struct drm_device *dev = eaction->dev;
+ struct drm_file *file_priv = eaction->e.file_priv;
+ unsigned long irq_flags;
+
+ kref_get(&eaction->kref);
+ spin_lock_irqsave(&dev->event_lock, irq_flags);
+
+ if (likely(eaction->tv_sec != NULL)) {
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+ *eaction->tv_sec = tv.tv_sec;
+ *eaction->tv_usec = tv.tv_usec;
+ }
+
+ list_add_tail(&eaction->e.link, &file_priv->event_list);
+ wake_up_all(&file_priv->event_wait);
+ spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+}
+
+/**
+ * vmw_event_fence_action_cleanup
+ *
+ * @action: The struct vmw_fence_action embedded in a struct
+ * vmw_event_fence_action.
+ *
+ * This function is the struct vmw_fence_action destructor. It's typically
+ * called from a workqueue.
+ */
+static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
+{
+ struct vmw_event_fence_action *eaction =
+ container_of(action, struct vmw_event_fence_action, action);
+
+ vmw_fence_obj_unreference(&eaction->fence);
+ kref_put(&eaction->kref, vmw_event_fence_action_destroy);
+}
+
+
+/**
+ * vmw_fence_obj_add_action - Add an action to a fence object.
+ *
+ * @fence - The fence object.
+ * @action - The action to add.
+ *
+ * Note that the action callbacks may be executed before this function
+ * returns.
+ */
+void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
+ struct vmw_fence_action *action)
+{
+ struct vmw_fence_manager *fman = fence->fman;
+ unsigned long irq_flags;
+ bool run_update = false;
+
+ mutex_lock(&fman->goal_irq_mutex);
+ spin_lock_irqsave(&fman->lock, irq_flags);
+
+ fman->pending_actions[action->type]++;
+ if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) {
+ struct list_head action_list;
- while (!vmw_lag_lt(queue, us)) {
- spin_lock(&queue->lock);
- if (list_empty(&queue->head))
- sequence = atomic_read(&dev_priv->fence_seq);
- else {
- fence = list_first_entry(&queue->head,
- struct vmw_fence, head);
- sequence = fence->sequence;
+ INIT_LIST_HEAD(&action_list);
+ list_add_tail(&action->head, &action_list);
+ vmw_fences_perform_actions(fman, &action_list);
+ } else {
+ list_add_tail(&action->head, &fence->seq_passed_actions);
+
+ /*
+ * This function may set fman::seqno_valid, so it must
+ * be run with the goal_irq_mutex held.
+ */
+ run_update = vmw_fence_goal_check_locked(fence);
+ }
+
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+
+ if (run_update) {
+ if (!fman->goal_irq_on) {
+ fman->goal_irq_on = true;
+ vmw_goal_waiter_add(fman->dev_priv);
}
- spin_unlock(&queue->lock);
+ vmw_fences_update(fman);
+ }
+ mutex_unlock(&fman->goal_irq_mutex);
- ret = vmw_wait_fence(dev_priv, false, sequence, true,
- 3*HZ);
+}
- if (unlikely(ret != 0))
- return ret;
+/**
+ * vmw_event_fence_action_create - Post an event for sending when a fence
+ * object seqno has passed.
+ *
+ * @file_priv: The file connection on which the event should be posted.
+ * @fence: The fence object on which to post the event.
+ * @event: Event to be posted. This event should've been alloced
+ * using k[mz]alloc, and should've been completely initialized.
+ * @interruptible: Interruptible waits if possible.
+ *
+ * As a side effect, the object pointed to by @event may have been
+ * freed when this function returns. If this function returns with
+ * an error code, the caller needs to free that object.
+ */
+
+int vmw_event_fence_action_create(struct drm_file *file_priv,
+ struct vmw_fence_obj *fence,
+ struct drm_event *event,
+ uint32_t *tv_sec,
+ uint32_t *tv_usec,
+ bool interruptible)
+{
+ struct vmw_event_fence_action *eaction;
+ struct ttm_mem_global *mem_glob =
+ vmw_mem_glob(fence->fman->dev_priv);
+ struct vmw_fence_manager *fman = fence->fman;
+ uint32_t size = fman->event_fence_action_size +
+ ttm_round_pot(event->length);
+ int ret;
+
+ /*
+ * Account for internal structure size as well as the
+ * event size itself.
+ */
+
+ ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
- (void) vmw_fence_pull(queue, sequence);
+ eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
+ if (unlikely(eaction == NULL)) {
+ ttm_mem_global_free(mem_glob, size);
+ return -ENOMEM;
}
+
+ eaction->e.event = event;
+ eaction->e.file_priv = file_priv;
+ eaction->e.destroy = vmw_event_fence_action_delivered;
+
+ eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
+ eaction->action.cleanup = vmw_event_fence_action_cleanup;
+ eaction->action.type = VMW_ACTION_EVENT;
+
+ eaction->fence = vmw_fence_obj_reference(fence);
+ eaction->dev = fman->dev_priv->dev;
+ eaction->size = size;
+ eaction->tv_sec = tv_sec;
+ eaction->tv_usec = tv_usec;
+
+ kref_init(&eaction->kref);
+ vmw_fence_obj_add_action(fence, &eaction->action);
+
return 0;
}
+int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_fence_event_arg *arg =
+ (struct drm_vmw_fence_event_arg *) data;
+ struct vmw_fence_obj *fence = NULL;
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct drm_vmw_fence_rep __user *user_fence_rep =
+ (struct drm_vmw_fence_rep __user *)(unsigned long)
+ arg->fence_rep;
+ uint32_t handle;
+ unsigned long irq_flags;
+ struct drm_vmw_event_fence *event;
+ int ret;
+
+ /*
+ * Look up an existing fence object,
+ * and if user-space wants a new reference,
+ * add one.
+ */
+ if (arg->handle) {
+ struct ttm_base_object *base =
+ ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Fence event invalid fence object handle "
+ "0x%08lx.\n",
+ (unsigned long)arg->handle);
+ return -EINVAL;
+ }
+ fence = &(container_of(base, struct vmw_user_fence,
+ base)->fence);
+ (void) vmw_fence_obj_reference(fence);
+
+ if (user_fence_rep != NULL) {
+ bool existed;
+
+ ret = ttm_ref_object_add(vmw_fp->tfile, base,
+ TTM_REF_USAGE, &existed);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to reference a fence "
+ "object.\n");
+ goto out_no_ref_obj;
+ }
+ handle = base->hash.key;
+ }
+ ttm_base_object_unref(&base);
+ }
+
+ /*
+ * Create a new fence object.
+ */
+ if (!fence) {
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
+ &fence,
+ (user_fence_rep) ?
+ &handle : NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Fence event failed to create fence.\n");
+ return ret;
+ }
+ }
+
+ BUG_ON(fence == NULL);
+
+ spin_lock_irqsave(&dev->event_lock, irq_flags);
+
+ ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0;
+ if (likely(ret == 0))
+ file_priv->event_space -= sizeof(*event);
+
+ spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate event space for this file.\n");
+ goto out_no_event_space;
+ }
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (unlikely(event == NULL)) {
+ DRM_ERROR("Failed to allocate an event.\n");
+ goto out_no_event;
+ }
+
+ event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
+ event->base.length = sizeof(*event);
+ event->user_data = arg->user_data;
+
+ if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
+ ret = vmw_event_fence_action_create(file_priv, fence,
+ &event->base,
+ &event->tv_sec,
+ &event->tv_usec,
+ true);
+ else
+ ret = vmw_event_fence_action_create(file_priv, fence,
+ &event->base,
+ NULL,
+ NULL,
+ true);
+
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to attach event to fence.\n");
+ goto out_no_attach;
+ }
+
+ vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
+ handle);
+ vmw_fence_obj_unreference(&fence);
+ return 0;
+out_no_attach:
+ kfree(event);
+out_no_event:
+ spin_lock_irqsave(&dev->event_lock, irq_flags);
+ file_priv->event_space += sizeof(*event);
+ spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+out_no_event_space:
+ if (user_fence_rep != NULL)
+ ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ handle, TTM_REF_USAGE);
+out_no_ref_obj:
+ vmw_fence_obj_unreference(&fence);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
new file mode 100644
index 00000000000..0854a2096b5
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -0,0 +1,113 @@
+/**************************************************************************
+ *
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_FENCE_H_
+
+#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
+
+struct vmw_private;
+
+struct vmw_fence_manager;
+
+/**
+ *
+ *
+ */
+enum vmw_action_type {
+ VMW_ACTION_EVENT = 0,
+ VMW_ACTION_MAX
+};
+
+struct vmw_fence_action {
+ struct list_head head;
+ enum vmw_action_type type;
+ void (*seq_passed) (struct vmw_fence_action *action);
+ void (*cleanup) (struct vmw_fence_action *action);
+};
+
+struct vmw_fence_obj {
+ struct kref kref;
+ u32 seqno;
+
+ struct vmw_fence_manager *fman;
+ struct list_head head;
+ uint32_t signaled;
+ uint32_t signal_mask;
+ struct list_head seq_passed_actions;
+ void (*destroy)(struct vmw_fence_obj *fence);
+ wait_queue_head_t queue;
+};
+
+extern struct vmw_fence_manager *
+vmw_fence_manager_init(struct vmw_private *dev_priv);
+
+extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman);
+
+extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p);
+
+extern struct vmw_fence_obj *
+vmw_fence_obj_reference(struct vmw_fence_obj *fence);
+
+extern void vmw_fences_update(struct vmw_fence_manager *fman);
+
+extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
+ uint32_t flags);
+
+extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags,
+ bool lazy,
+ bool interruptible, unsigned long timeout);
+
+extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
+
+extern int vmw_fence_create(struct vmw_fence_manager *fman,
+ uint32_t seqno,
+ uint32_t mask,
+ struct vmw_fence_obj **p_fence);
+
+extern int vmw_user_fence_create(struct drm_file *file_priv,
+ struct vmw_fence_manager *fman,
+ uint32_t sequence,
+ uint32_t mask,
+ struct vmw_fence_obj **p_fence,
+ uint32_t *p_handle);
+
+extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
+
+extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
+
+extern int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#endif /* _VMWGFX_FENCE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 635c0ffee7f..03bbc2a6f9a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -45,7 +45,11 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (hwversion == 0)
return false;
- if (hwversion < SVGA3D_HWVERSION_WS65_B1)
+ if (hwversion < SVGA3D_HWVERSION_WS8_B1)
+ return false;
+
+ /* Non-Screen Object path does not support surfaces */
+ if (!dev_priv->sou_priv)
return false;
return true;
@@ -72,22 +76,12 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
uint32_t max;
uint32_t min;
uint32_t dummy;
- int ret;
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
return -ENOMEM;
- fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
- fifo->last_data_size = 0;
- fifo->last_buffer_add = false;
- fifo->last_buffer = vmalloc(fifo->last_buffer_size);
- if (unlikely(fifo->last_buffer == NULL)) {
- ret = -ENOMEM;
- goto out_err;
- }
-
fifo->dynamic_buffer = NULL;
fifo->reserved_size = 0;
fifo->using_bounce_buffer = false;
@@ -137,14 +131,10 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
(unsigned int) min,
(unsigned int) fifo->capabilities);
- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
- iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
- vmw_fence_queue_init(&fifo->fence_queue);
+ atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+ iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
+ vmw_marker_queue_init(&fifo->marker_queue);
return vmw_fifo_send_fence(dev_priv, &dummy);
-out_err:
- vfree(fifo->static_buffer);
- fifo->static_buffer = NULL;
- return ret;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
@@ -170,7 +160,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
- dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
dev_priv->config_done_state);
@@ -180,12 +170,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->traces_state);
mutex_unlock(&dev_priv->hw_mutex);
- vmw_fence_queue_takedown(&fifo->fence_queue);
-
- if (likely(fifo->last_buffer != NULL)) {
- vfree(fifo->last_buffer);
- fifo->last_buffer = NULL;
- }
+ vmw_marker_queue_takedown(&fifo->marker_queue);
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
@@ -262,9 +247,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- vmw_write(dev_priv, SVGA_REG_IRQMASK,
- vmw_read(dev_priv, SVGA_REG_IRQMASK) |
- SVGA_IRQFLAG_FIFO_PROGRESS);
+ dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
@@ -286,9 +270,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
mutex_lock(&dev_priv->hw_mutex);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
- vmw_write(dev_priv, SVGA_REG_IRQMASK,
- vmw_read(dev_priv, SVGA_REG_IRQMASK) &
- ~SVGA_IRQFLAG_FIFO_PROGRESS);
+ dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
@@ -296,6 +279,16 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
return ret;
}
+/**
+ * Reserve @bytes number of bytes in the fifo.
+ *
+ * This function will return NULL (error) on two conditions:
+ * If it timeouts waiting for fifo space, or if @bytes is larger than the
+ * available fifo space.
+ *
+ * Returns:
+ * Pointer to the fifo, or null on error (possible hardware hang).
+ */
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
@@ -466,7 +459,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
mutex_unlock(&fifo_state->fifo_mutex);
}
-int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
+int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
@@ -476,16 +469,16 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
- *sequence = atomic_read(&dev_priv->fence_seq);
+ *seqno = atomic_read(&dev_priv->marker_seq);
ret = -ENOMEM;
- (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
+ (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
false, 3*HZ);
goto out_err;
}
do {
- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
- } while (*sequence == 0);
+ *seqno = atomic_add_return(1, &dev_priv->marker_seq);
+ } while (*seqno == 0);
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
@@ -502,61 +495,68 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
cmd_fence = (struct svga_fifo_cmd_fence *)
((unsigned long)fm + sizeof(__le32));
- iowrite32(*sequence, &cmd_fence->fence);
- fifo_state->last_buffer_add = true;
+ iowrite32(*seqno, &cmd_fence->fence);
vmw_fifo_commit(dev_priv, bytes);
- fifo_state->last_buffer_add = false;
- (void) vmw_fence_push(&fifo_state->fence_queue, *sequence);
- vmw_update_sequence(dev_priv, fifo_state);
+ (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
+ vmw_update_seqno(dev_priv, fifo_state);
out_err:
return ret;
}
/**
- * Map the first page of the FIFO read-only to user-space.
+ * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * This function is used to emit a dummy occlusion query with
+ * no primitives rendered between query begin and query end.
+ * It's used to provide a query barrier, in order to know that when
+ * this query is finished, all preceding queries are also finished.
+ *
+ * A Query results structure should have been initialized at the start
+ * of the dev_priv->dummy_query_bo buffer object. And that buffer object
+ * must also be either reserved or pinned when this function is called.
+ *
+ * Returns -ENOMEM on failure to reserve fifo space.
*/
-
-static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+ uint32_t cid)
{
- int ret;
- unsigned long address = (unsigned long)vmf->virtual_address;
+ /*
+ * A query wait without a preceding query end will
+ * actually finish all queries for this cid
+ * without writing to the query result structure.
+ */
- if (address != vma->vm_start)
- return VM_FAULT_SIGBUS;
+ struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForQuery body;
+ } *cmd;
- ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
- if (likely(ret == -EBUSY || ret == 0))
- return VM_FAULT_NOPAGE;
- else if (ret == -ENOMEM)
- return VM_FAULT_OOM;
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- return VM_FAULT_SIGBUS;
-}
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Out of fifo space for dummy query.\n");
+ return -ENOMEM;
+ }
-static struct vm_operations_struct vmw_fifo_vm_ops = {
- .fault = vmw_fifo_vm_fault,
- .open = NULL,
- .close = NULL
-};
+ cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = cid;
+ cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
+
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
+ cmd->body.guestResult.offset = bo->offset;
+ } else {
+ cmd->body.guestResult.gmrId = bo->mem.start;
+ cmd->body.guestResult.offset = 0;
+ }
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
-int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv;
- struct vmw_private *dev_priv;
-
- file_priv = filp->private_data;
- dev_priv = vmw_priv(file_priv->minor->dev);
-
- if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
- (vma->vm_end - vma->vm_start) != PAGE_SIZE)
- return -EINVAL;
-
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
- vma->vm_page_prot);
- vma->vm_ops = &vmw_fifo_vm_ops;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index de0c5948521..f4e7763a769 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,6 +29,77 @@
#include "drmP.h"
#include "ttm/ttm_bo_driver.h"
+#define VMW_PPN_SIZE sizeof(unsigned long)
+
+static int vmw_gmr2_bind(struct vmw_private *dev_priv,
+ struct page *pages[],
+ unsigned long num_pages,
+ int gmr_id)
+{
+ SVGAFifoCmdDefineGMR2 define_cmd;
+ SVGAFifoCmdRemapGMR2 remap_cmd;
+ uint32_t define_size = sizeof(define_cmd) + 4;
+ uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
+ uint32_t *cmd;
+ uint32_t *cmd_orig;
+ uint32_t i;
+
+ cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
+ if (unlikely(cmd == NULL))
+ return -ENOMEM;
+
+ define_cmd.gmrId = gmr_id;
+ define_cmd.numPages = num_pages;
+
+ remap_cmd.gmrId = gmr_id;
+ remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
+ SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
+ remap_cmd.offsetPages = 0;
+ remap_cmd.numPages = num_pages;
+
+ *cmd++ = SVGA_CMD_DEFINE_GMR2;
+ memcpy(cmd, &define_cmd, sizeof(define_cmd));
+ cmd += sizeof(define_cmd) / sizeof(uint32);
+
+ *cmd++ = SVGA_CMD_REMAP_GMR2;
+ memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
+ cmd += sizeof(remap_cmd) / sizeof(uint32);
+
+ for (i = 0; i < num_pages; ++i) {
+ if (VMW_PPN_SIZE > 4)
+ *cmd = page_to_pfn(*pages++);
+ else
+ *((uint64_t *)cmd) = page_to_pfn(*pages++);
+
+ cmd += VMW_PPN_SIZE / sizeof(*cmd);
+ }
+
+ vmw_fifo_commit(dev_priv, define_size + remap_size);
+
+ return 0;
+}
+
+static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
+ int gmr_id)
+{
+ SVGAFifoCmdDefineGMR2 define_cmd;
+ uint32_t define_size = sizeof(define_cmd) + 4;
+ uint32_t *cmd;
+
+ cmd = vmw_fifo_reserve(dev_priv, define_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("GMR2 unbind failed.\n");
+ return;
+ }
+ define_cmd.gmrId = gmr_id;
+ define_cmd.numPages = 0;
+
+ *cmd++ = SVGA_CMD_DEFINE_GMR2;
+ memcpy(cmd, &define_cmd, sizeof(define_cmd));
+
+ vmw_fifo_commit(dev_priv, define_size);
+}
+
/**
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
* the number of used descriptors.
@@ -170,6 +241,9 @@ int vmw_gmr_bind(struct vmw_private *dev_priv,
struct list_head desc_pages;
int ret;
+ if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
+ return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
+
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
return -EINVAL;
@@ -192,6 +266,11 @@ int vmw_gmr_bind(struct vmw_private *dev_priv,
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
{
+ if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
+ vmw_gmr2_unbind(dev_priv, gmr_id);
+ return;
+ }
+
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
wmb();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index ac6e0d1bd62..5f717152cff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -40,6 +40,8 @@ struct vmwgfx_gmrid_man {
spinlock_t lock;
struct ida gmr_ida;
uint32_t max_gmr_ids;
+ uint32_t max_gmr_pages;
+ uint32_t used_gmr_pages;
};
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
@@ -49,33 +51,50 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
{
struct vmwgfx_gmrid_man *gman =
(struct vmwgfx_gmrid_man *)man->priv;
- int ret;
+ int ret = 0;
int id;
mem->mm_node = NULL;
- do {
- if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0))
- return -ENOMEM;
+ spin_lock(&gman->lock);
+
+ if (gman->max_gmr_pages > 0) {
+ gman->used_gmr_pages += bo->num_pages;
+ if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
+ goto out_err_locked;
+ }
+ do {
+ spin_unlock(&gman->lock);
+ if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
spin_lock(&gman->lock);
- ret = ida_get_new(&gman->gmr_ida, &id);
+ ret = ida_get_new(&gman->gmr_ida, &id);
if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
ida_remove(&gman->gmr_ida, id);
- spin_unlock(&gman->lock);
- return 0;
+ ret = 0;
+ goto out_err_locked;
}
-
- spin_unlock(&gman->lock);
-
} while (ret == -EAGAIN);
if (likely(ret == 0)) {
mem->mm_node = gman;
mem->start = id;
- }
+ mem->num_pages = bo->num_pages;
+ } else
+ goto out_err_locked;
+
+ spin_unlock(&gman->lock);
+ return 0;
+out_err:
+ spin_lock(&gman->lock);
+out_err_locked:
+ gman->used_gmr_pages -= bo->num_pages;
+ spin_unlock(&gman->lock);
return ret;
}
@@ -88,6 +107,7 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
if (mem->mm_node) {
spin_lock(&gman->lock);
ida_remove(&gman->gmr_ida, mem->start);
+ gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock);
mem->mm_node = NULL;
}
@@ -96,6 +116,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
+ struct vmw_private *dev_priv =
+ container_of(man->bdev, struct vmw_private, bdev);
struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL);
@@ -103,6 +125,8 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
return -ENOMEM;
spin_lock_init(&gman->lock);
+ gman->max_gmr_pages = dev_priv->max_gmr_pages;
+ gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
gman->max_gmr_ids = p_size;
man->priv = (void *) gman;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 570d57775a5..3f6343502d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -27,6 +27,7 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_drm.h"
+#include "vmwgfx_kms.h"
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -45,9 +46,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_3D:
param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
break;
- case DRM_VMW_PARAM_FIFO_OFFSET:
- param->value = dev_priv->mmio_start;
- break;
case DRM_VMW_PARAM_HW_CAPS:
param->value = dev_priv->capabilities;
break;
@@ -57,6 +55,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_MAX_FB_SIZE:
param->value = dev_priv->vram_size;
break;
+ case DRM_VMW_PARAM_FIFO_HW_VERSION:
+ {
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+ param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ break;
+ }
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
@@ -66,25 +71,259 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
+
+int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_vmw_get_3d_cap_arg *arg =
+ (struct drm_vmw_get_3d_cap_arg *) data;
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- struct drm_vmw_fifo_debug_arg *arg =
- (struct drm_vmw_fifo_debug_arg *)data;
- __le32 __user *buffer = (__le32 __user *)
- (unsigned long)arg->debug_buffer;
+ uint32_t size;
+ __le32 __iomem *fifo_mem;
+ void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
+ void *bounce;
+ int ret;
- if (unlikely(fifo_state->last_buffer == NULL))
+ if (unlikely(arg->pad64 != 0)) {
+ DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
+ }
+
+ size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2;
+
+ if (arg->max_size < size)
+ size = arg->max_size;
+
+ bounce = vmalloc(size);
+ if (unlikely(bounce == NULL)) {
+ DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
+ return -ENOMEM;
+ }
+
+ fifo_mem = dev_priv->mmio_virt;
+ memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+
+ ret = copy_to_user(buffer, bounce, size);
+ vfree(bounce);
+
+ if (unlikely(ret != 0))
+ DRM_ERROR("Failed to report 3D caps info.\n");
+
+ return ret;
+}
+
+int vmw_present_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_present_arg *arg =
+ (struct drm_vmw_present_arg *)data;
+ struct vmw_surface *surface;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ struct drm_vmw_rect __user *clips_ptr;
+ struct drm_vmw_rect *clips = NULL;
+ struct drm_mode_object *obj;
+ struct vmw_framebuffer *vfb;
+ uint32_t num_clips;
+ int ret;
+
+ num_clips = arg->num_clips;
+ clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+
+ if (unlikely(num_clips == 0))
+ return 0;
+
+ if (clips_ptr == NULL) {
+ DRM_ERROR("Variable clips_ptr must be specified.\n");
+ ret = -EINVAL;
+ goto out_clips;
+ }
+
+ clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ if (clips == NULL) {
+ DRM_ERROR("Failed to allocate clip rect list.\n");
+ ret = -ENOMEM;
+ goto out_clips;
+ }
+
+ ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
+ if (ret) {
+ DRM_ERROR("Failed to copy clip rects from userspace.\n");
+ ret = -EFAULT;
+ goto out_no_copy;
+ }
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_no_mode_mutex;
+ }
+
+ obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_ERROR("Invalid framebuffer id.\n");
+ ret = -EINVAL;
+ goto out_no_fb;
+ }
+
+ vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
+ if (!vfb->dmabuf) {
+ DRM_ERROR("Framebuffer not dmabuf backed.\n");
+ ret = -EINVAL;
+ goto out_no_fb;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ goto out_no_ttm_lock;
+
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
+ &surface);
+ if (ret)
+ goto out_no_surface;
+
+ ret = vmw_kms_present(dev_priv, file_priv,
+ vfb, surface, arg->sid,
+ arg->dest_x, arg->dest_y,
+ clips, num_clips);
- if (arg->debug_buffer_size < fifo_state->last_data_size) {
- arg->used_size = arg->debug_buffer_size;
- arg->did_not_fit = 1;
- } else {
- arg->used_size = fifo_state->last_data_size;
- arg->did_not_fit = 0;
+ /* vmw_user_surface_lookup takes one ref so does new_fb */
+ vmw_surface_unreference(&surface);
+
+out_no_surface:
+ ttm_read_unlock(&vmaster->lock);
+out_no_ttm_lock:
+out_no_fb:
+ mutex_unlock(&dev->mode_config.mutex);
+out_no_mode_mutex:
+out_no_copy:
+ kfree(clips);
+out_clips:
+ return ret;
+}
+
+int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_present_readback_arg *arg =
+ (struct drm_vmw_present_readback_arg *)data;
+ struct drm_vmw_fence_rep __user *user_fence_rep =
+ (struct drm_vmw_fence_rep __user *)
+ (unsigned long)arg->fence_rep;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ struct drm_vmw_rect __user *clips_ptr;
+ struct drm_vmw_rect *clips = NULL;
+ struct drm_mode_object *obj;
+ struct vmw_framebuffer *vfb;
+ uint32_t num_clips;
+ int ret;
+
+ num_clips = arg->num_clips;
+ clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+
+ if (unlikely(num_clips == 0))
+ return 0;
+
+ if (clips_ptr == NULL) {
+ DRM_ERROR("Argument clips_ptr must be specified.\n");
+ ret = -EINVAL;
+ goto out_clips;
+ }
+
+ clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ if (clips == NULL) {
+ DRM_ERROR("Failed to allocate clip rect list.\n");
+ ret = -ENOMEM;
+ goto out_clips;
+ }
+
+ ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
+ if (ret) {
+ DRM_ERROR("Failed to copy clip rects from userspace.\n");
+ ret = -EFAULT;
+ goto out_no_copy;
+ }
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_no_mode_mutex;
+ }
+
+ obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_ERROR("Invalid framebuffer id.\n");
+ ret = -EINVAL;
+ goto out_no_fb;
+ }
+
+ vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
+ if (!vfb->dmabuf) {
+ DRM_ERROR("Framebuffer not dmabuf backed.\n");
+ ret = -EINVAL;
+ goto out_no_fb;
}
- return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ goto out_no_ttm_lock;
+
+ ret = vmw_kms_readback(dev_priv, file_priv,
+ vfb, user_fence_rep,
+ clips, num_clips);
+
+ ttm_read_unlock(&vmaster->lock);
+out_no_ttm_lock:
+out_no_fb:
+ mutex_unlock(&dev->mode_config.mutex);
+out_no_mode_mutex:
+out_no_copy:
+ kfree(clips);
+out_clips:
+ return ret;
+}
+
+
+/**
+ * vmw_fops_poll - wrapper around the drm_poll function
+ *
+ * @filp: See the linux fops poll documentation.
+ * @wait: See the linux fops poll documentation.
+ *
+ * Wrapper around the drm_poll function that makes sure the device is
+ * processing the fifo if drm_poll decides to wait.
+ */
+unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct vmw_private *dev_priv =
+ vmw_priv(file_priv->minor->dev);
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ return drm_poll(filp, wait);
+}
+
+
+/**
+ * vmw_fops_read - wrapper around the drm_read function
+ *
+ * @filp: See the linux fops read documentation.
+ * @buffer: See the linux fops read documentation.
+ * @count: See the linux fops read documentation.
+ * offset: See the linux fops read documentation.
+ *
+ * Wrapper around the drm_read function that makes sure the device is
+ * processing the fifo if drm_read decides to wait.
+ */
+ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct vmw_private *dev_priv =
+ vmw_priv(file_priv->minor->dev);
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ return drm_read(filp, buffer, count, offset);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index e92298a6a38..cabc95f7517 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -34,26 +34,33 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
- uint32_t status;
+ uint32_t status, masked_status;
spin_lock(&dev_priv->irq_lock);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ masked_status = status & dev_priv->irq_mask;
spin_unlock(&dev_priv->irq_lock);
- if (status & SVGA_IRQFLAG_ANY_FENCE)
+ if (likely(status))
+ outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+
+ if (!masked_status)
+ return IRQ_NONE;
+
+ if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
+ SVGA_IRQFLAG_FENCE_GOAL)) {
+ vmw_fences_update(dev_priv->fman);
wake_up_all(&dev_priv->fence_queue);
- if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
+ }
+
+ if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
- if (likely(status)) {
- outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
+ return IRQ_HANDLED;
}
-static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
+static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
{
uint32_t busy;
@@ -64,43 +71,43 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
return (busy == 0);
}
-void vmw_update_sequence(struct vmw_private *dev_priv,
+void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
- uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
-
- if (dev_priv->last_read_sequence != sequence) {
- dev_priv->last_read_sequence = sequence;
- vmw_fence_pull(&fifo_state->fence_queue, sequence);
+ if (dev_priv->last_read_seqno != seqno) {
+ dev_priv->last_read_seqno = seqno;
+ vmw_marker_pull(&fifo_state->marker_queue, seqno);
+ vmw_fences_update(dev_priv->fman);
}
}
-bool vmw_fence_signaled(struct vmw_private *dev_priv,
- uint32_t sequence)
+bool vmw_seqno_passed(struct vmw_private *dev_priv,
+ uint32_t seqno)
{
struct vmw_fifo_state *fifo_state;
bool ret;
- if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
fifo_state = &dev_priv->fifo;
- vmw_update_sequence(dev_priv, fifo_state);
- if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ vmw_update_seqno(dev_priv, fifo_state);
+ if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
- vmw_fifo_idle(dev_priv, sequence))
+ vmw_fifo_idle(dev_priv, seqno))
return true;
/**
- * Then check if the sequence is higher than what we've actually
+ * Then check if the seqno is higher than what we've actually
* emitted. Then the fence is stale and signaled.
*/
- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
+ ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
> VMW_FENCE_WRAP);
return ret;
@@ -109,7 +116,7 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
int vmw_fallback_wait(struct vmw_private *dev_priv,
bool lazy,
bool fifo_idle,
- uint32_t sequence,
+ uint32_t seqno,
bool interruptible,
unsigned long timeout)
{
@@ -123,7 +130,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
DEFINE_WAIT(__wait);
wait_condition = (fifo_idle) ? &vmw_fifo_idle :
- &vmw_fence_signaled;
+ &vmw_seqno_passed;
/**
* Block command submission while waiting for idle.
@@ -131,14 +138,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
if (fifo_idle)
down_read(&fifo_state->rwsem);
- signal_seq = atomic_read(&dev_priv->fence_seq);
+ signal_seq = atomic_read(&dev_priv->marker_seq);
ret = 0;
for (;;) {
prepare_to_wait(&dev_priv->fence_queue, &__wait,
(interruptible) ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
- if (wait_condition(dev_priv, sequence))
+ if (wait_condition(dev_priv, seqno))
break;
if (time_after_eq(jiffies, end_jiffies)) {
DRM_ERROR("SVGA device lockup.\n");
@@ -175,68 +182,110 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
return ret;
}
-int vmw_wait_fence(struct vmw_private *dev_priv,
- bool lazy, uint32_t sequence,
- bool interruptible, unsigned long timeout)
+void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->hw_mutex);
+ if (dev_priv->fence_queue_waiters++ == 0) {
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ outl(SVGA_IRQFLAG_ANY_FENCE,
+ dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+}
+
+void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->hw_mutex);
+ if (--dev_priv->fence_queue_waiters == 0) {
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+}
+
+
+void vmw_goal_waiter_add(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->hw_mutex);
+ if (dev_priv->goal_queue_waiters++ == 0) {
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ outl(SVGA_IRQFLAG_FENCE_GOAL,
+ dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+}
+
+void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->hw_mutex);
+ if (--dev_priv->goal_queue_waiters == 0) {
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+}
+
+int vmw_wait_seqno(struct vmw_private *dev_priv,
+ bool lazy, uint32_t seqno,
+ bool interruptible, unsigned long timeout)
{
long ret;
- unsigned long irq_flags;
struct vmw_fifo_state *fifo = &dev_priv->fifo;
- if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return 0;
- if (likely(vmw_fence_signaled(dev_priv, sequence)))
+ if (likely(vmw_seqno_passed(dev_priv, seqno)))
return 0;
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
- return vmw_fallback_wait(dev_priv, lazy, true, sequence,
+ return vmw_fallback_wait(dev_priv, lazy, true, seqno,
interruptible, timeout);
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
- return vmw_fallback_wait(dev_priv, lazy, false, sequence,
+ return vmw_fallback_wait(dev_priv, lazy, false, seqno,
interruptible, timeout);
- mutex_lock(&dev_priv->hw_mutex);
- if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
- spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
- outl(SVGA_IRQFLAG_ANY_FENCE,
- dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- vmw_write(dev_priv, SVGA_REG_IRQMASK,
- vmw_read(dev_priv, SVGA_REG_IRQMASK) |
- SVGA_IRQFLAG_ANY_FENCE);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
- }
- mutex_unlock(&dev_priv->hw_mutex);
+ vmw_seqno_waiter_add(dev_priv);
if (interruptible)
ret = wait_event_interruptible_timeout
(dev_priv->fence_queue,
- vmw_fence_signaled(dev_priv, sequence),
+ vmw_seqno_passed(dev_priv, seqno),
timeout);
else
ret = wait_event_timeout
(dev_priv->fence_queue,
- vmw_fence_signaled(dev_priv, sequence),
+ vmw_seqno_passed(dev_priv, seqno),
timeout);
+ vmw_seqno_waiter_remove(dev_priv);
+
if (unlikely(ret == 0))
ret = -EBUSY;
else if (likely(ret > 0))
ret = 0;
- mutex_lock(&dev_priv->hw_mutex);
- if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
- spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
- vmw_write(dev_priv, SVGA_REG_IRQMASK,
- vmw_read(dev_priv, SVGA_REG_IRQMASK) &
- ~SVGA_IRQFLAG_ANY_FENCE);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
- }
- mutex_unlock(&dev_priv->hw_mutex);
-
return ret;
}
@@ -273,25 +322,3 @@ void vmw_irq_uninstall(struct drm_device *dev)
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
-
-#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
-
-int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_fence_wait_arg *arg =
- (struct drm_vmw_fence_wait_arg *)data;
- unsigned long timeout;
-
- if (!arg->cookie_valid) {
- arg->cookie_valid = 1;
- arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
- }
-
- timeout = jiffies;
- if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
- return -EBUSY;
-
- timeout = (unsigned long)arg->kernel_cookie - timeout;
- return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index dfe32e62bd9..880e285d757 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -27,12 +27,10 @@
#include "vmwgfx_kms.h"
+
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
-static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
-static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
-
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
@@ -107,12 +105,17 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
struct vmw_dma_buffer *dmabuf = NULL;
int ret;
+ /* A lot of the code assumes this */
+ if (handle && (width != 64 || height != 64))
+ return -EINVAL;
+
if (handle) {
ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
handle, &surface);
if (!ret) {
if (!surface->snooper.image) {
DRM_ERROR("surface not suitable for cursor\n");
+ vmw_surface_unreference(&surface);
return -EINVAL;
}
} else {
@@ -178,7 +181,9 @@ err_unreserve:
return 0;
}
- vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
+ vmw_cursor_update_position(dev_priv, true,
+ du->cursor_x + du->hotspot_x,
+ du->cursor_y + du->hotspot_y);
return 0;
}
@@ -193,7 +198,8 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
du->cursor_y = y + crtc->y;
vmw_cursor_update_position(dev_priv, shown,
- du->cursor_x, du->cursor_y);
+ du->cursor_x + du->hotspot_x,
+ du->cursor_y + du->hotspot_y);
return 0;
}
@@ -214,7 +220,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
- int ret;
+ int i, ret;
cmd = container_of(header, struct vmw_dma_cmd, header);
@@ -236,16 +242,19 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
sizeof(SVGA3dCopyBox);
- if (cmd->dma.guest.pitch != (64 * 4) ||
- cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
box->x != 0 || box->y != 0 || box->z != 0 ||
box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
- box->w != 64 || box->h != 64 || box->d != 1 ||
- box_count != 1) {
+ box->d != 1 || box_count != 1) {
/* TODO handle none page aligned offsets */
- /* TODO handle partial uploads and pitch != 256 */
- /* TODO handle more then one copy (size != 64) */
- DRM_ERROR("lazy programmer, can't handle weird stuff\n");
+ /* TODO handle more dst & src != 0 */
+ /* TODO handle more then one copy */
+ DRM_ERROR("Cant snoop dma request for cursor!\n");
+ DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
+ box->srcx, box->srcy, box->srcz,
+ box->x, box->y, box->z,
+ box->w, box->h, box->d, box_count,
+ cmd->dma.guest.ptr.offset);
return;
}
@@ -264,7 +273,16 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
virtual = ttm_kmap_obj_virtual(&map, &dummy);
- memcpy(srf->snooper.image, virtual, 64*64*4);
+ if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
+ memcpy(srf->snooper.image, virtual, 64*64*4);
+ } else {
+ /* Image is unsigned pointer. */
+ for (i = 0; i < box->h; i++)
+ memcpy(srf->snooper.image + i * 64,
+ virtual + i * cmd->dma.guest.pitch,
+ box->w * 4);
+ }
+
srf->snooper.age++;
/* we can't call this function from this function since execbuf has
@@ -329,41 +347,10 @@ struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
struct vmw_dma_buffer *buffer;
- struct delayed_work d_work;
- struct mutex work_lock;
- bool present_fs;
struct list_head head;
struct drm_master *master;
};
-/**
- * vmw_kms_idle_workqueues - Flush workqueues on this master
- *
- * @vmaster - Pointer identifying the master, for the surfaces of which
- * we idle the dirty work queues.
- *
- * This function should be called with the ttm lock held in exclusive mode
- * to idle all dirty work queues before the fifo is taken down.
- *
- * The work task may actually requeue itself, but after the flush returns we're
- * sure that there's nothing to present, since the ttm lock is held in
- * exclusive mode, so the fifo will never get used.
- */
-
-void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
-{
- struct vmw_framebuffer_surface *entry;
-
- mutex_lock(&vmaster->fb_surf_mutex);
- list_for_each_entry(entry, &vmaster->fb_surf, head) {
- if (cancel_delayed_work_sync(&entry->d_work))
- (void) entry->d_work.work.func(&entry->d_work.work);
-
- (void) cancel_delayed_work_sync(&entry->d_work);
- }
- mutex_unlock(&vmaster->fb_surf_mutex);
-}
-
void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
@@ -375,65 +362,129 @@ void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
list_del(&vfbs->head);
mutex_unlock(&vmaster->fb_surf_mutex);
- cancel_delayed_work_sync(&vfbs->d_work);
drm_master_put(&vfbs->master);
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
+ ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs);
}
-static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
+static int do_surface_dirty_sou(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int inc)
{
- struct delayed_work *d_work =
- container_of(work, struct delayed_work, work);
- struct vmw_framebuffer_surface *vfbs =
- container_of(d_work, struct vmw_framebuffer_surface, d_work);
- struct vmw_surface *surf = vfbs->surface;
- struct drm_framebuffer *framebuffer = &vfbs->base.base;
- struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct drm_clip_rect *clips_ptr;
+ struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_crtc *crtc;
+ size_t fifo_size;
+ int i, num_units;
+ int ret = 0; /* silence warning */
+ int left, right, top, bottom;
struct {
SVGA3dCmdHeader header;
- SVGA3dCmdPresent body;
- SVGA3dCopyRect cr;
+ SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
+ SVGASignedRect *blits;
- /**
- * Strictly we should take the ttm_lock in read mode before accessing
- * the fifo, to make sure the fifo is present and up. However,
- * instead we flush all workqueues under the ttm lock in exclusive mode
- * before taking down the fifo.
- */
- mutex_lock(&vfbs->work_lock);
- if (!vfbs->present_fs)
- goto out_unlock;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL))
- goto out_resched;
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
- cmd->body.sid = cpu_to_le32(surf->res.id);
- cmd->cr.x = cpu_to_le32(0);
- cmd->cr.y = cpu_to_le32(0);
- cmd->cr.srcx = cmd->cr.x;
- cmd->cr.srcy = cmd->cr.y;
- cmd->cr.w = cpu_to_le32(framebuffer->width);
- cmd->cr.h = cpu_to_le32(framebuffer->height);
- vfbs->present_fs = false;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-out_resched:
- /**
- * Will not re-add if already pending.
- */
- schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
-out_unlock:
- mutex_unlock(&vfbs->work_lock);
-}
+ num_units = 0;
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
+ head) {
+ if (crtc->fb != &framebuffer->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
+
+ BUG_ON(!clips || !num_clips);
+
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
+ cmd = kzalloc(fifo_size, GFP_KERNEL);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Temporary fifo memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
+ left = clips->x1;
+ right = clips->x2;
+ top = clips->y1;
+ bottom = clips->y2;
+
+ /* skip the first clip rect */
+ for (i = 1, clips_ptr = clips + inc;
+ i < num_clips; i++, clips_ptr += inc) {
+ left = min_t(int, left, (int)clips_ptr->x1);
+ right = max_t(int, right, (int)clips_ptr->x2);
+ top = min_t(int, top, (int)clips_ptr->y1);
+ bottom = max_t(int, bottom, (int)clips_ptr->y2);
+ }
+
+ /* only need to do this once */
+ memset(cmd, 0, fifo_size);
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
+
+ cmd->body.srcRect.left = left;
+ cmd->body.srcRect.right = right;
+ cmd->body.srcRect.top = top;
+ cmd->body.srcRect.bottom = bottom;
+
+ clips_ptr = clips;
+ blits = (SVGASignedRect *)&cmd[1];
+ for (i = 0; i < num_clips; i++, clips_ptr += inc) {
+ blits[i].left = clips_ptr->x1 - left;
+ blits[i].right = clips_ptr->x2 - left;
+ blits[i].top = clips_ptr->y1 - top;
+ blits[i].bottom = clips_ptr->y2 - top;
+ }
+
+ /* do per unit writing, reuse fifo for each */
+ for (i = 0; i < num_units; i++) {
+ struct vmw_display_unit *unit = units[i];
+ int clip_x1 = left - unit->crtc.x;
+ int clip_y1 = top - unit->crtc.y;
+ int clip_x2 = right - unit->crtc.x;
+ int clip_y2 = bottom - unit->crtc.y;
+
+ /* skip any crtcs that misses the clip region */
+ if (clip_x1 >= unit->crtc.mode.hdisplay ||
+ clip_y1 >= unit->crtc.mode.vdisplay ||
+ clip_x2 <= 0 || clip_y2 <= 0)
+ continue;
+
+ /* need to reset sid as it is changed by execbuf */
+ cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
+
+ cmd->body.destScreenId = unit->unit;
+
+ /*
+ * The blit command is a lot more resilient then the
+ * readback command when it comes to clip rects. So its
+ * okay to go out of bounds.
+ */
+
+ cmd->body.destRect.left = clip_x1;
+ cmd->body.destRect.right = clip_x2;
+ cmd->body.destRect.top = clip_y1;
+ cmd->body.destRect.bottom = clip_y2;
+ ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+ fifo_size, 0, NULL);
+
+ if (unlikely(ret != 0))
+ break;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
@@ -444,44 +495,20 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
- struct vmw_surface *surf = vfbs->surface;
struct drm_clip_rect norect;
- SVGA3dCopyRect *cr;
- int i, inc = 1;
- int ret;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdPresent body;
- SVGA3dCopyRect cr;
- } *cmd;
+ int ret, inc = 1;
if (unlikely(vfbs->master != file_priv->master))
return -EINVAL;
+ /* Require ScreenObject support for 3D */
+ if (!dev_priv->sou_priv)
+ return -EINVAL;
+
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
- if (!num_clips ||
- !(dev_priv->fifo.capabilities &
- SVGA_FIFO_CAP_SCREEN_OBJECT)) {
- int ret;
-
- mutex_lock(&vfbs->work_lock);
- vfbs->present_fs = true;
- ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
- mutex_unlock(&vfbs->work_lock);
- if (ret) {
- /**
- * No work pending, Force immediate present.
- */
- vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
- }
- ttm_read_unlock(&vmaster->lock);
- return 0;
- }
-
if (!num_clips) {
num_clips = 1;
clips = &norect;
@@ -493,29 +520,10 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
inc = 2; /* skip source rects */
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- ttm_read_unlock(&vmaster->lock);
- return -ENOMEM;
- }
+ ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
+ flags, color,
+ clips, num_clips, inc);
- memset(cmd, 0, sizeof(*cmd));
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
- cmd->body.sid = cpu_to_le32(surf->res.id);
-
- for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
- cr->x = cpu_to_le16(clips->x1);
- cr->y = cpu_to_le16(clips->y1);
- cr->srcx = cr->x;
- cr->srcy = cr->y;
- cr->w = cpu_to_le16(clips->x2 - clips->x1);
- cr->h = cpu_to_le16(clips->y2 - clips->y1);
- }
-
- vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
ttm_read_unlock(&vmaster->lock);
return 0;
}
@@ -540,6 +548,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
+ /* 3D is only supported on HWv8 hosts which supports screen objects */
+ if (!dev_priv->sou_priv)
+ return -ENOSYS;
+
/*
* Sanity checks.
*/
@@ -567,6 +579,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
case 15:
format = SVGA3D_A1R5G5B5;
break;
+ case 8:
+ format = SVGA3D_LUMINANCE8;
+ break;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
@@ -599,14 +614,11 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
- vfbs->base.pin = &vmw_surface_dmabuf_pin;
- vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
vfbs->surface = surface;
+ vfbs->base.user_handle = mode_cmd->handle;
vfbs->master = drm_master_get(file_priv->master);
- mutex_init(&vfbs->work_lock);
mutex_lock(&vmaster->fb_surf_mutex);
- INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
list_add_tail(&vfbs->head, &vmaster->fb_surf);
mutex_unlock(&vmaster->fb_surf_mutex);
@@ -641,48 +653,33 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer);
vmw_dmabuf_unreference(&vfbd->buffer);
+ ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
-int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
- struct drm_file *file_priv,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
+static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment)
{
- struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- struct drm_clip_rect norect;
- int ret;
+ size_t fifo_size;
+ int i;
+
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
- int i, increment = 1;
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- if (!num_clips) {
- num_clips = 1;
- clips = &norect;
- norect.x1 = norect.y1 = 0;
- norect.x2 = framebuffer->width;
- norect.y2 = framebuffer->height;
- } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
- num_clips /= 2;
- increment = 2;
- }
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
+ fifo_size = sizeof(*cmd) * num_clips;
+ cmd = vmw_fifo_reserve(dev_priv, fifo_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
- ttm_read_unlock(&vmaster->lock);
return -ENOMEM;
}
+ memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips += increment) {
cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd[i].body.x = cpu_to_le32(clips->x1);
@@ -691,57 +688,186 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
}
- vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
- ttm_read_unlock(&vmaster->lock);
-
+ vmw_fifo_commit(dev_priv, fifo_size);
return 0;
}
-static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
- .destroy = vmw_framebuffer_dmabuf_destroy,
- .dirty = vmw_framebuffer_dmabuf_dirty,
- .create_handle = vmw_framebuffer_create_handle,
-};
-
-static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
+static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
+ struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer)
{
- struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_framebuffer_surface *vfbs =
- vmw_framebuffer_to_vfbs(&vfb->base);
- unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
+ int depth = framebuffer->base.depth;
+ size_t fifo_size;
int ret;
- vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
- if (unlikely(vfbs->buffer == NULL))
+ struct {
+ uint32_t header;
+ SVGAFifoCmdDefineGMRFB body;
+ } *cmd;
+
+ /* Emulate RGBA support, contrary to svga_reg.h this is not
+ * supported by hosts. This is only a problem if we are reading
+ * this value later and expecting what we uploaded back.
+ */
+ if (depth == 32)
+ depth = 24;
+
+ fifo_size = sizeof(*cmd);
+ cmd = kmalloc(fifo_size, GFP_KERNEL);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
return -ENOMEM;
+ }
- vmw_overlay_pause_all(dev_priv);
- ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
- &vmw_vram_ne_placement,
- false, &vmw_dmabuf_bo_free);
- vmw_overlay_resume_all(dev_priv);
+ memset(cmd, 0, fifo_size);
+ cmd->header = SVGA_CMD_DEFINE_GMRFB;
+ cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
+ cmd->body.format.colorDepth = depth;
+ cmd->body.format.reserved = 0;
+ cmd->body.bytesPerLine = framebuffer->base.pitch;
+ cmd->body.ptr.gmrId = framebuffer->user_handle;
+ cmd->body.ptr.offset = 0;
+
+ ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+ fifo_size, 0, NULL);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
+ struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment)
+{
+ struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_clip_rect *clips_ptr;
+ int i, k, num_units, ret;
+ struct drm_crtc *crtc;
+ size_t fifo_size;
+
+ struct {
+ uint32_t header;
+ SVGAFifoCmdBlitGMRFBToScreen body;
+ } *blits;
+
+ ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
if (unlikely(ret != 0))
- vfbs->buffer = NULL;
+ return ret; /* define_gmrfb prints warnings */
+
+ fifo_size = sizeof(*blits) * num_clips;
+ blits = kmalloc(fifo_size, GFP_KERNEL);
+ if (unlikely(blits == NULL)) {
+ DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
+ return -ENOMEM;
+ }
+
+ num_units = 0;
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+ if (crtc->fb != &framebuffer->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
+
+ for (k = 0; k < num_units; k++) {
+ struct vmw_display_unit *unit = units[k];
+ int hit_num = 0;
+
+ clips_ptr = clips;
+ for (i = 0; i < num_clips; i++, clips_ptr += increment) {
+ int clip_x1 = clips_ptr->x1 - unit->crtc.x;
+ int clip_y1 = clips_ptr->y1 - unit->crtc.y;
+ int clip_x2 = clips_ptr->x2 - unit->crtc.x;
+ int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+
+ /* skip any crtcs that misses the clip region */
+ if (clip_x1 >= unit->crtc.mode.hdisplay ||
+ clip_y1 >= unit->crtc.mode.vdisplay ||
+ clip_x2 <= 0 || clip_y2 <= 0)
+ continue;
+
+ blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+ blits[hit_num].body.destScreenId = unit->unit;
+ blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
+ blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
+ blits[hit_num].body.destRect.left = clip_x1;
+ blits[hit_num].body.destRect.top = clip_y1;
+ blits[hit_num].body.destRect.right = clip_x2;
+ blits[hit_num].body.destRect.bottom = clip_y2;
+ hit_num++;
+ }
+
+ /* no clips hit the crtc */
+ if (hit_num == 0)
+ continue;
+
+ fifo_size = sizeof(*blits) * hit_num;
+ ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
+ fifo_size, 0, NULL);
+
+ if (unlikely(ret != 0))
+ break;
+ }
+
+ kfree(blits);
return ret;
}
-static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
+int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
{
- struct ttm_buffer_object *bo;
- struct vmw_framebuffer_surface *vfbs =
- vmw_framebuffer_to_vfbs(&vfb->base);
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(framebuffer);
+ struct drm_clip_rect norect;
+ int ret, increment = 1;
- if (unlikely(vfbs->buffer == NULL))
- return 0;
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
- bo = &vfbs->buffer->base;
- ttm_bo_unref(&bo);
- vfbs->buffer = NULL;
+ if (!num_clips) {
+ num_clips = 1;
+ clips = &norect;
+ norect.x1 = norect.y1 = 0;
+ norect.x2 = framebuffer->width;
+ norect.y2 = framebuffer->height;
+ } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ num_clips /= 2;
+ increment = 2;
+ }
- return 0;
+ if (dev_priv->ldu_priv) {
+ ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
+ flags, color,
+ clips, num_clips, increment);
+ } else {
+ ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
+ flags, color,
+ clips, num_clips, increment);
+ }
+
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
}
+static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
+ .destroy = vmw_framebuffer_dmabuf_destroy,
+ .dirty = vmw_framebuffer_dmabuf_dirty,
+ .create_handle = vmw_framebuffer_create_handle,
+};
+
+/**
+ * Pin the dmabuffer to the start of vram.
+ */
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
@@ -749,10 +875,12 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
vmw_framebuffer_to_vfbd(&vfb->base);
int ret;
+ /* This code should not be used with screen objects */
+ BUG_ON(dev_priv->sou_priv);
vmw_overlay_pause_all(dev_priv);
- ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
+ ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
vmw_overlay_resume_all(dev_priv);
@@ -772,7 +900,7 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
return 0;
}
- return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
+ return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
}
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
@@ -794,6 +922,33 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
return -EINVAL;
}
+ /* Limited framebuffer color depth support for screen objects */
+ if (dev_priv->sou_priv) {
+ switch (mode_cmd->depth) {
+ case 32:
+ case 24:
+ /* Only support 32 bpp for 32 and 24 depth fbs */
+ if (mode_cmd->bpp == 32)
+ break;
+
+ DRM_ERROR("Invalid color depth/bbp: %d %d\n",
+ mode_cmd->depth, mode_cmd->bpp);
+ return -EINVAL;
+ case 16:
+ case 15:
+ /* Only support 16 bpp for 16 and 15 depth fbs */
+ if (mode_cmd->bpp == 16)
+ break;
+
+ DRM_ERROR("Invalid color depth/bbp: %d %d\n",
+ mode_cmd->depth, mode_cmd->bpp);
+ return -EINVAL;
+ default:
+ DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+ return -EINVAL;
+ }
+ }
+
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
if (!vfbd) {
ret = -ENOMEM;
@@ -815,9 +970,13 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
- vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
- vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
+ if (!dev_priv->sou_priv) {
+ vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
+ vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
+ }
+ vfbd->base.dmabuf = true;
vfbd->buffer = dmabuf;
+ vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
return 0;
@@ -843,6 +1002,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
+ struct ttm_base_object *user_obj;
u64 required_size;
int ret;
@@ -855,7 +1015,22 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
required_size = mode_cmd->pitch * mode_cmd->height;
if (unlikely(required_size > (u64) dev_priv->vram_size)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
- return NULL;
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Take a reference on the user object of the resource
+ * backing the kms fb. This ensures that user-space handle
+ * lookups on that resource will always work as long as
+ * it's registered with a kms framebuffer. This is important,
+ * since vmw_execbuf_process identifies resources in the
+ * command stream using user-space handles.
+ */
+
+ user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle);
+ if (unlikely(user_obj == NULL)) {
+ DRM_ERROR("Could not locate requested kms frame buffer.\n");
+ return ERR_PTR(-ENOENT);
}
/**
@@ -878,8 +1053,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+ ttm_base_object_unref(&user_obj);
return ERR_PTR(ret);
- }
+ } else
+ vfb->user_obj = user_obj;
return &vfb->base;
try_dmabuf:
@@ -899,8 +1076,10 @@ try_dmabuf:
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+ ttm_base_object_unref(&user_obj);
return ERR_PTR(ret);
- }
+ } else
+ vfb->user_obj = user_obj;
return &vfb->base;
@@ -908,6 +1087,7 @@ err_not_scanout:
DRM_ERROR("surface not marked as scanout\n");
/* vmw_user_surface_lookup takes one ref */
vmw_surface_unreference(&surface);
+ ttm_base_object_unref(&user_obj);
return ERR_PTR(-EINVAL);
}
@@ -916,6 +1096,210 @@ static struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
};
+int vmw_kms_present(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct vmw_surface *surface,
+ uint32_t sid,
+ int32_t destX, int32_t destY,
+ struct drm_vmw_rect *clips,
+ uint32_t num_clips)
+{
+ struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_crtc *crtc;
+ size_t fifo_size;
+ int i, k, num_units;
+ int ret = 0; /* silence warning */
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBlitSurfaceToScreen body;
+ } *cmd;
+ SVGASignedRect *blits;
+
+ num_units = 0;
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+ if (crtc->fb != &vfb->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
+
+ BUG_ON(surface == NULL);
+ BUG_ON(!clips || !num_clips);
+
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
+ cmd = kmalloc(fifo_size, GFP_KERNEL);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed to allocate temporary fifo memory.\n");
+ return -ENOMEM;
+ }
+
+ /* only need to do this once */
+ memset(cmd, 0, fifo_size);
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
+
+ cmd->body.srcRect.left = 0;
+ cmd->body.srcRect.right = surface->sizes[0].width;
+ cmd->body.srcRect.top = 0;
+ cmd->body.srcRect.bottom = surface->sizes[0].height;
+
+ blits = (SVGASignedRect *)&cmd[1];
+ for (i = 0; i < num_clips; i++) {
+ blits[i].left = clips[i].x;
+ blits[i].right = clips[i].x + clips[i].w;
+ blits[i].top = clips[i].y;
+ blits[i].bottom = clips[i].y + clips[i].h;
+ }
+
+ for (k = 0; k < num_units; k++) {
+ struct vmw_display_unit *unit = units[k];
+ int clip_x1 = destX - unit->crtc.x;
+ int clip_y1 = destY - unit->crtc.y;
+ int clip_x2 = clip_x1 + surface->sizes[0].width;
+ int clip_y2 = clip_y1 + surface->sizes[0].height;
+
+ /* skip any crtcs that misses the clip region */
+ if (clip_x1 >= unit->crtc.mode.hdisplay ||
+ clip_y1 >= unit->crtc.mode.vdisplay ||
+ clip_x2 <= 0 || clip_y2 <= 0)
+ continue;
+
+ /* need to reset sid as it is changed by execbuf */
+ cmd->body.srcImage.sid = sid;
+
+ cmd->body.destScreenId = unit->unit;
+
+ /*
+ * The blit command is a lot more resilient then the
+ * readback command when it comes to clip rects. So its
+ * okay to go out of bounds.
+ */
+
+ cmd->body.destRect.left = clip_x1;
+ cmd->body.destRect.right = clip_x2;
+ cmd->body.destRect.top = clip_y1;
+ cmd->body.destRect.bottom = clip_y2;
+
+ ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+ fifo_size, 0, NULL);
+
+ if (unlikely(ret != 0))
+ break;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+int vmw_kms_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *clips,
+ uint32_t num_clips)
+{
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(&vfb->base);
+ struct vmw_dma_buffer *dmabuf = vfbd->buffer;
+ struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_crtc *crtc;
+ size_t fifo_size;
+ int i, k, ret, num_units, blits_pos;
+
+ struct {
+ uint32_t header;
+ SVGAFifoCmdDefineGMRFB body;
+ } *cmd;
+ struct {
+ uint32_t header;
+ SVGAFifoCmdBlitScreenToGMRFB body;
+ } *blits;
+
+ num_units = 0;
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+ if (crtc->fb != &vfb->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
+
+ BUG_ON(dmabuf == NULL);
+ BUG_ON(!clips || !num_clips);
+
+ /* take a safe guess at fifo size */
+ fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
+ cmd = kmalloc(fifo_size, GFP_KERNEL);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed to allocate temporary fifo memory.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, fifo_size);
+ cmd->header = SVGA_CMD_DEFINE_GMRFB;
+ cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
+ cmd->body.format.colorDepth = vfb->base.depth;
+ cmd->body.format.reserved = 0;
+ cmd->body.bytesPerLine = vfb->base.pitch;
+ cmd->body.ptr.gmrId = vfb->user_handle;
+ cmd->body.ptr.offset = 0;
+
+ blits = (void *)&cmd[1];
+ blits_pos = 0;
+ for (i = 0; i < num_units; i++) {
+ struct drm_vmw_rect *c = clips;
+ for (k = 0; k < num_clips; k++, c++) {
+ /* transform clip coords to crtc origin based coords */
+ int clip_x1 = c->x - units[i]->crtc.x;
+ int clip_x2 = c->x - units[i]->crtc.x + c->w;
+ int clip_y1 = c->y - units[i]->crtc.y;
+ int clip_y2 = c->y - units[i]->crtc.y + c->h;
+ int dest_x = c->x;
+ int dest_y = c->y;
+
+ /* compensate for clipping, we negate
+ * a negative number and add that.
+ */
+ if (clip_x1 < 0)
+ dest_x += -clip_x1;
+ if (clip_y1 < 0)
+ dest_y += -clip_y1;
+
+ /* clip */
+ clip_x1 = max(clip_x1, 0);
+ clip_y1 = max(clip_y1, 0);
+ clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
+ clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
+
+ /* and cull any rects that misses the crtc */
+ if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
+ clip_y1 >= units[i]->crtc.mode.vdisplay ||
+ clip_x2 <= 0 || clip_y2 <= 0)
+ continue;
+
+ blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
+ blits[blits_pos].body.srcScreenId = units[i]->unit;
+ blits[blits_pos].body.destOrigin.x = dest_x;
+ blits[blits_pos].body.destOrigin.y = dest_y;
+
+ blits[blits_pos].body.srcRect.left = clip_x1;
+ blits[blits_pos].body.srcRect.top = clip_y1;
+ blits[blits_pos].body.srcRect.right = clip_x2;
+ blits[blits_pos].body.srcRect.bottom = clip_y2;
+ blits_pos++;
+ }
+ }
+ /* reset size here and use calculated exact size from loops */
+ fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
+
+ ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
+ 0, user_fence_rep);
+
+ kfree(cmd);
+
+ return ret;
+}
+
int vmw_kms_init(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -929,7 +1313,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
- ret = vmw_kms_init_legacy_display_system(dev_priv);
+ ret = vmw_kms_init_screen_object_display(dev_priv);
+ if (ret) /* Fallback */
+ (void)vmw_kms_init_legacy_display_system(dev_priv);
return 0;
}
@@ -942,7 +1328,10 @@ int vmw_kms_close(struct vmw_private *dev_priv)
* drm_encoder_cleanup which takes the lock we deadlock.
*/
drm_mode_config_cleanup(dev_priv->dev);
- vmw_kms_close_legacy_display_system(dev_priv);
+ if (dev_priv->sou_priv)
+ vmw_kms_close_screen_object_display(dev_priv);
+ else
+ vmw_kms_close_legacy_display_system(dev_priv);
return 0;
}
@@ -987,9 +1376,9 @@ out:
return ret;
}
-void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
- unsigned bbp, unsigned depth)
+ unsigned bpp, unsigned depth)
{
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
@@ -997,11 +1386,15 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
- vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
- vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
+
+ if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
+ DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
+ depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
+ return -EINVAL;
+ }
+
+ return 0;
}
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
@@ -1011,12 +1404,7 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
- vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
- vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
- vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
- vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
- vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_priv->vga_pitchlock =
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
@@ -1065,12 +1453,7 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
- vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
- vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
- vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
vmw_priv->vga_pitchlock);
@@ -1095,6 +1478,312 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
return 0;
}
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+ uint32_t pitch,
+ uint32_t height)
+{
+ return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
+}
+
+
+/**
+ * Function called by DRM code called with vbl_lock held.
+ */
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ return 0;
+}
+
+/**
+ * Function called by DRM code called with vbl_lock held.
+ */
+int vmw_enable_vblank(struct drm_device *dev, int crtc)
+{
+ return -ENOSYS;
+}
+
+/**
+ * Function called by DRM code called with vbl_lock held.
+ */
+void vmw_disable_vblank(struct drm_device *dev, int crtc)
+{
+}
+
+
+/*
+ * Small shared kms functions.
+ */
+
+int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
+ struct drm_vmw_rect *rects)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_display_unit *du;
+ struct drm_connector *con;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+#if 0
+ {
+ unsigned int i;
+
+ DRM_INFO("%s: new layout ", __func__);
+ for (i = 0; i < num; i++)
+ DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
+ rects[i].w, rects[i].h);
+ DRM_INFO("\n");
+ }
+#endif
+
+ list_for_each_entry(con, &dev->mode_config.connector_list, head) {
+ du = vmw_connector_to_du(con);
+ if (num > du->unit) {
+ du->pref_width = rects[du->unit].w;
+ du->pref_height = rects[du->unit].h;
+ du->pref_active = true;
+ du->gui_x = rects[du->unit].x;
+ du->gui_y = rects[du->unit].y;
+ } else {
+ du->pref_width = 800;
+ du->pref_height = 600;
+ du->pref_active = false;
+ }
+ con->status = vmw_du_connector_detect(con, true);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
+void vmw_du_crtc_save(struct drm_crtc *crtc)
+{
+}
+
+void vmw_du_crtc_restore(struct drm_crtc *crtc)
+{
+}
+
+void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size)
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ int i;
+
+ for (i = 0; i < size; i++) {
+ DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
+ r[i], g[i], b[i]);
+ vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
+ vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
+ vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
+ }
+}
+
+void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+{
+}
+
+void vmw_du_connector_save(struct drm_connector *connector)
+{
+}
+
+void vmw_du_connector_restore(struct drm_connector *connector)
+{
+}
+
+enum drm_connector_status
+vmw_du_connector_detect(struct drm_connector *connector, bool force)
+{
+ uint32_t num_displays;
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_display_unit *du = vmw_connector_to_du(connector);
+
+ mutex_lock(&dev_priv->hw_mutex);
+ num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return ((vmw_connector_to_du(connector)->unit < num_displays &&
+ du->pref_active) ?
+ connector_status_connected : connector_status_disconnected);
+}
+
+static struct drm_display_mode vmw_kms_connector_builtin[] = {
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1853x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* Terminate */
+ { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
+};
+
+/**
+ * vmw_guess_mode_timing - Provide fake timings for a
+ * 60Hz vrefresh mode.
+ *
+ * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
+ * members filled in.
+ */
+static void vmw_guess_mode_timing(struct drm_display_mode *mode)
+{
+ mode->hsync_start = mode->hdisplay + 50;
+ mode->hsync_end = mode->hsync_start + 50;
+ mode->htotal = mode->hsync_end + 50;
+
+ mode->vsync_start = mode->vdisplay + 50;
+ mode->vsync_end = mode->vsync_start + 50;
+ mode->vtotal = mode->vsync_end + 50;
+
+ mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+}
+
+
+int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ uint32_t max_width, uint32_t max_height)
+{
+ struct vmw_display_unit *du = vmw_connector_to_du(connector);
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *bmode;
+ struct drm_display_mode prefmode = { DRM_MODE("preferred",
+ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
+ int i;
+
+ /* Add preferred mode */
+ {
+ mode = drm_mode_duplicate(dev, &prefmode);
+ if (!mode)
+ return 0;
+ mode->hdisplay = du->pref_width;
+ mode->vdisplay = du->pref_height;
+ vmw_guess_mode_timing(mode);
+
+ if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
+ mode->vdisplay)) {
+ drm_mode_probed_add(connector, mode);
+ } else {
+ drm_mode_destroy(dev, mode);
+ mode = NULL;
+ }
+
+ if (du->pref_mode) {
+ list_del_init(&du->pref_mode->head);
+ drm_mode_destroy(dev, du->pref_mode);
+ }
+
+ /* mode might be null here, this is intended */
+ du->pref_mode = mode;
+ }
+
+ for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
+ bmode = &vmw_kms_connector_builtin[i];
+ if (bmode->hdisplay > max_width ||
+ bmode->vdisplay > max_height)
+ continue;
+
+ if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
+ bmode->vdisplay))
+ continue;
+
+ mode = drm_mode_duplicate(dev, bmode);
+ if (!mode)
+ return 0;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ /* Move the prefered mode first, help apps pick the right mode. */
+ if (du->pref_mode)
+ list_move(&du->pref_mode->head, &connector->probed_modes);
+
+ drm_mode_connector_list_update(connector);
+
+ return 1;
+}
+
+int vmw_du_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1106,6 +1795,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_rect *rects;
unsigned rects_size;
int ret;
+ int i;
+ struct drm_mode_config *mode_config = &dev->mode_config;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
@@ -1113,7 +1804,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600};
- vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
+ vmw_du_update_layout(dev_priv, 1, &def_rect);
goto out_unlock;
}
@@ -1132,7 +1823,18 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
goto out_free;
}
- vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
+ for (i = 0; i < arg->num_outputs; ++i) {
+ if (rects->x < 0 ||
+ rects->y < 0 ||
+ rects->x + rects->w > mode_config->max_width ||
+ rects->y + rects->h > mode_config->max_height) {
+ DRM_ERROR("Invalid GUI layout.\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
out_free:
kfree(rects);
@@ -1140,15 +1842,3 @@ out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}
-
-bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height)
-{
- return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
-}
-
-u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
-{
- return 0;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8a398a0339b..af8e6e5bd96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -31,6 +31,8 @@
#include "drmP.h"
#include "vmwgfx_drv.h"
+#define VMWGFX_NUM_DISPLAY_UNITS 8
+
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
@@ -45,6 +47,9 @@ struct vmw_framebuffer {
struct drm_framebuffer base;
int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb);
+ bool dmabuf;
+ struct ttm_base_object *user_obj;
+ uint32_t user_handle;
};
@@ -83,22 +88,65 @@ struct vmw_display_unit {
int hotspot_y;
unsigned unit;
+
+ /*
+ * Prefered mode tracking.
+ */
+ unsigned pref_width;
+ unsigned pref_height;
+ bool pref_active;
+ struct drm_display_mode *pref_mode;
+
+ /*
+ * Gui positioning
+ */
+ int gui_x;
+ int gui_y;
+ bool is_implicit;
};
+#define vmw_crtc_to_du(x) \
+ container_of(x, struct vmw_display_unit, crtc)
+#define vmw_connector_to_du(x) \
+ container_of(x, struct vmw_display_unit, connector)
+
+
/*
* Shared display unit functions - vmwgfx_kms.c
*/
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
+void vmw_du_crtc_save(struct drm_crtc *crtc);
+void vmw_du_crtc_restore(struct drm_crtc *crtc);
+void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size);
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+void vmw_du_connector_save(struct drm_connector *connector);
+void vmw_du_connector_restore(struct drm_connector *connector);
+enum drm_connector_status
+vmw_du_connector_detect(struct drm_connector *connector, bool force);
+int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ uint32_t max_width, uint32_t max_height);
+int vmw_du_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+
/*
* Legacy display unit functions - vmwgfx_ldu.c
*/
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
-int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
+
+/*
+ * Screen Objects display functions - vmwgfx_scrn.c
+ */
+int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
+int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects);
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b3a2cd5118d..90c5e392849 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -27,7 +27,6 @@
#include "vmwgfx_kms.h"
-#define VMWGFX_LDU_NUM_DU 8
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
@@ -51,11 +50,6 @@ struct vmw_legacy_display {
struct vmw_legacy_display_unit {
struct vmw_display_unit base;
- unsigned pref_width;
- unsigned pref_height;
- bool pref_active;
- struct drm_display_mode *pref_mode;
-
struct list_head active;
};
@@ -71,20 +65,6 @@ static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
* Legacy Display Unit CRTC functions
*/
-static void vmw_ldu_crtc_save(struct drm_crtc *crtc)
-{
-}
-
-static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
-{
-}
-
-static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
- u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
-{
-}
-
static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
{
vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
@@ -114,10 +94,8 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
return 0;
fb = entry->base.crtc.fb;
- vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
- fb->bits_per_pixel, fb->depth);
-
- return 0;
+ return vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+ fb->bits_per_pixel, fb->depth);
}
if (!list_empty(&lds->active)) {
@@ -265,9 +243,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
vmw_ldu_del_active(dev_priv, ldu);
- vmw_ldu_commit_list(dev_priv);
-
- return 0;
+ return vmw_ldu_commit_list(dev_priv);
}
@@ -292,21 +268,20 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
vmw_ldu_add_active(dev_priv, ldu, vfb);
- vmw_ldu_commit_list(dev_priv);
-
- return 0;
+ return vmw_ldu_commit_list(dev_priv);
}
static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
- .save = vmw_ldu_crtc_save,
- .restore = vmw_ldu_crtc_restore,
+ .save = vmw_du_crtc_save,
+ .restore = vmw_du_crtc_restore,
.cursor_set = vmw_du_crtc_cursor_set,
.cursor_move = vmw_du_crtc_cursor_move,
- .gamma_set = vmw_ldu_crtc_gamma_set,
+ .gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_ldu_crtc_destroy,
.set_config = vmw_ldu_crtc_set_config,
};
+
/*
* Legacy Display Unit encoder functions
*/
@@ -324,183 +299,18 @@ static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
* Legacy Display Unit connector functions
*/
-static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode)
-{
-}
-
-static void vmw_ldu_connector_save(struct drm_connector *connector)
-{
-}
-
-static void vmw_ldu_connector_restore(struct drm_connector *connector)
-{
-}
-
-static enum drm_connector_status
- vmw_ldu_connector_detect(struct drm_connector *connector,
- bool force)
-{
- if (vmw_connector_to_ldu(connector)->pref_active)
- return connector_status_connected;
- return connector_status_disconnected;
-}
-
-static const struct drm_display_mode vmw_ldu_connector_builtin[] = {
- /* 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1152x864@75Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@60Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
- 1472, 1664, 0, 768, 771, 778, 798, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@60Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
- 1480, 1680, 0, 800, 803, 809, 831, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x960@60Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
- 1488, 1800, 0, 960, 961, 964, 1000, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@60Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1360x768@60Hz */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
- 1536, 1792, 0, 768, 771, 777, 795, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@60Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
- 1672, 1904, 0, 900, 903, 909, 934, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@60Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@60Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@60Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@60Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@60Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@60Hz */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* Terminate */
- { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
-};
-
-static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
- uint32_t max_width, uint32_t max_height)
-{
- struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
- struct drm_device *dev = connector->dev;
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_display_mode *mode = NULL;
- struct drm_display_mode prefmode = { DRM_MODE("preferred",
- DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
- };
- int i;
-
- /* Add preferred mode */
- {
- mode = drm_mode_duplicate(dev, &prefmode);
- if (!mode)
- return 0;
- mode->hdisplay = ldu->pref_width;
- mode->vdisplay = ldu->pref_height;
- mode->vrefresh = drm_mode_vrefresh(mode);
- if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
- mode->vdisplay)) {
- drm_mode_probed_add(connector, mode);
-
- if (ldu->pref_mode) {
- list_del_init(&ldu->pref_mode->head);
- drm_mode_destroy(dev, ldu->pref_mode);
- }
-
- ldu->pref_mode = mode;
- }
- }
-
- for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
- const struct drm_display_mode *bmode;
-
- bmode = &vmw_ldu_connector_builtin[i];
- if (bmode->hdisplay > max_width ||
- bmode->vdisplay > max_height)
- continue;
-
- if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
- bmode->vdisplay))
- continue;
-
- mode = drm_mode_duplicate(dev, bmode);
- if (!mode)
- return 0;
- mode->vrefresh = drm_mode_vrefresh(mode);
-
- drm_mode_probed_add(connector, mode);
- }
-
- drm_mode_connector_list_update(connector);
-
- return 1;
-}
-
-static int vmw_ldu_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- return 0;
-}
-
static void vmw_ldu_connector_destroy(struct drm_connector *connector)
{
vmw_ldu_destroy(vmw_connector_to_ldu(connector));
}
static struct drm_connector_funcs vmw_legacy_connector_funcs = {
- .dpms = vmw_ldu_connector_dpms,
- .save = vmw_ldu_connector_save,
- .restore = vmw_ldu_connector_restore,
- .detect = vmw_ldu_connector_detect,
- .fill_modes = vmw_ldu_connector_fill_modes,
- .set_property = vmw_ldu_connector_set_property,
+ .dpms = vmw_du_connector_dpms,
+ .save = vmw_du_connector_save,
+ .restore = vmw_du_connector_restore,
+ .detect = vmw_du_connector_detect,
+ .fill_modes = vmw_du_connector_fill_modes,
+ .set_property = vmw_du_connector_set_property,
.destroy = vmw_ldu_connector_destroy,
};
@@ -523,23 +333,26 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
INIT_LIST_HEAD(&ldu->active);
- ldu->pref_active = (unit == 0);
- ldu->pref_width = 800;
- ldu->pref_height = 600;
- ldu->pref_mode = NULL;
+ ldu->base.pref_active = (unit == 0);
+ ldu->base.pref_width = 800;
+ ldu->base.pref_height = 600;
+ ldu->base.pref_mode = NULL;
+ ldu->base.is_implicit = true;
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- connector->status = vmw_ldu_connector_detect(connector, true);
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ connector->status = vmw_du_connector_detect(connector, true);
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_VIRTUAL);
drm_mode_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
drm_connector_attach_property(connector,
dev->mode_config.dirty_info_property,
1);
@@ -550,8 +363,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- int i;
- int ret;
+ int i, ret;
if (dev_priv->ldu_priv) {
DRM_INFO("ldu system already on\n");
@@ -559,7 +371,6 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
}
dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
-
if (!dev_priv->ldu_priv)
return -ENOMEM;
@@ -568,18 +379,31 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
dev_priv->ldu_priv->last_num_active = 0;
dev_priv->ldu_priv->fb = NULL;
- drm_mode_create_dirty_info_property(dev_priv->dev);
+ /* for old hardware without multimon only enable one display */
+ if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
+ ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+ else
+ ret = drm_vblank_init(dev, 1);
+ if (ret != 0)
+ goto err_free;
+
+ ret = drm_mode_create_dirty_info_property(dev);
+ if (ret != 0)
+ goto err_vblank_cleanup;
- if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
- for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
+ if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
+ for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_ldu_init(dev_priv, i);
- ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
- } else {
- /* for old hardware without multimon only enable one display */
+ else
vmw_ldu_init(dev_priv, 0);
- ret = drm_vblank_init(dev, 1);
- }
+ return 0;
+
+err_vblank_cleanup:
+ drm_vblank_cleanup(dev);
+err_free:
+ kfree(dev_priv->ldu_priv);
+ dev_priv->ldu_priv = NULL;
return ret;
}
@@ -587,52 +411,14 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- drm_vblank_cleanup(dev);
if (!dev_priv->ldu_priv)
return -ENOSYS;
+ drm_vblank_cleanup(dev);
+
BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
kfree(dev_priv->ldu_priv);
return 0;
}
-
-int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
- struct drm_vmw_rect *rects)
-{
- struct drm_device *dev = dev_priv->dev;
- struct vmw_legacy_display_unit *ldu;
- struct drm_connector *con;
- int i;
-
- mutex_lock(&dev->mode_config.mutex);
-
-#if 0
- DRM_INFO("%s: new layout ", __func__);
- for (i = 0; i < (int)num; i++)
- DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
- rects[i].w, rects[i].h);
- DRM_INFO("\n");
-#else
- (void)i;
-#endif
-
- list_for_each_entry(con, &dev->mode_config.connector_list, head) {
- ldu = vmw_connector_to_ldu(con);
- if (num > ldu->base.unit) {
- ldu->pref_width = rects[ldu->base.unit].w;
- ldu->pref_height = rects[ldu->base.unit].h;
- ldu->pref_active = true;
- } else {
- ldu->pref_width = 800;
- ldu->pref_height = 600;
- ldu->pref_active = false;
- }
- con->status = vmw_ldu_connector_detect(con, true);
- }
-
- mutex_unlock(&dev->mode_config.mutex);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
new file mode 100644
index 00000000000..8a8725c2716
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
@@ -0,0 +1,171 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "vmwgfx_drv.h"
+
+struct vmw_marker {
+ struct list_head head;
+ uint32_t seqno;
+ struct timespec submitted;
+};
+
+void vmw_marker_queue_init(struct vmw_marker_queue *queue)
+{
+ INIT_LIST_HEAD(&queue->head);
+ queue->lag = ns_to_timespec(0);
+ getrawmonotonic(&queue->lag_time);
+ spin_lock_init(&queue->lock);
+}
+
+void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
+{
+ struct vmw_marker *marker, *next;
+
+ spin_lock(&queue->lock);
+ list_for_each_entry_safe(marker, next, &queue->head, head) {
+ kfree(marker);
+ }
+ spin_unlock(&queue->lock);
+}
+
+int vmw_marker_push(struct vmw_marker_queue *queue,
+ uint32_t seqno)
+{
+ struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
+
+ if (unlikely(!marker))
+ return -ENOMEM;
+
+ marker->seqno = seqno;
+ getrawmonotonic(&marker->submitted);
+ spin_lock(&queue->lock);
+ list_add_tail(&marker->head, &queue->head);
+ spin_unlock(&queue->lock);
+
+ return 0;
+}
+
+int vmw_marker_pull(struct vmw_marker_queue *queue,
+ uint32_t signaled_seqno)
+{
+ struct vmw_marker *marker, *next;
+ struct timespec now;
+ bool updated = false;
+
+ spin_lock(&queue->lock);
+ getrawmonotonic(&now);
+
+ if (list_empty(&queue->head)) {
+ queue->lag = ns_to_timespec(0);
+ queue->lag_time = now;
+ updated = true;
+ goto out_unlock;
+ }
+
+ list_for_each_entry_safe(marker, next, &queue->head, head) {
+ if (signaled_seqno - marker->seqno > (1 << 30))
+ continue;
+
+ queue->lag = timespec_sub(now, marker->submitted);
+ queue->lag_time = now;
+ updated = true;
+ list_del(&marker->head);
+ kfree(marker);
+ }
+
+out_unlock:
+ spin_unlock(&queue->lock);
+
+ return (updated) ? 0 : -EBUSY;
+}
+
+static struct timespec vmw_timespec_add(struct timespec t1,
+ struct timespec t2)
+{
+ t1.tv_sec += t2.tv_sec;
+ t1.tv_nsec += t2.tv_nsec;
+ if (t1.tv_nsec >= 1000000000L) {
+ t1.tv_sec += 1;
+ t1.tv_nsec -= 1000000000L;
+ }
+
+ return t1;
+}
+
+static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
+{
+ struct timespec now;
+
+ spin_lock(&queue->lock);
+ getrawmonotonic(&now);
+ queue->lag = vmw_timespec_add(queue->lag,
+ timespec_sub(now, queue->lag_time));
+ queue->lag_time = now;
+ spin_unlock(&queue->lock);
+ return queue->lag;
+}
+
+
+static bool vmw_lag_lt(struct vmw_marker_queue *queue,
+ uint32_t us)
+{
+ struct timespec lag, cond;
+
+ cond = ns_to_timespec((s64) us * 1000);
+ lag = vmw_fifo_lag(queue);
+ return (timespec_compare(&lag, &cond) < 1);
+}
+
+int vmw_wait_lag(struct vmw_private *dev_priv,
+ struct vmw_marker_queue *queue, uint32_t us)
+{
+ struct vmw_marker *marker;
+ uint32_t seqno;
+ int ret;
+
+ while (!vmw_lag_lt(queue, us)) {
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->head))
+ seqno = atomic_read(&dev_priv->marker_seq);
+ else {
+ marker = list_first_entry(&queue->head,
+ struct vmw_marker, head);
+ seqno = marker->seqno;
+ }
+ spin_unlock(&queue->lock);
+
+ ret = vmw_wait_seqno(dev_priv, false, seqno, true,
+ 3*HZ);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ (void) vmw_marker_pull(queue, seqno);
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 07ce02da78a..14399eec9c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -87,48 +87,6 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
}
/**
- * Pin or unpin a buffer in vram.
- *
- * @dev_priv: Driver private.
- * @buf: DMA buffer to pin or unpin.
- * @pin: Pin buffer in vram if true.
- * @interruptible: Use interruptible wait.
- *
- * Takes the current masters ttm lock in read.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buf,
- bool pin, bool interruptible)
-{
- struct ttm_buffer_object *bo = &buf->base;
- struct ttm_placement *overlay_placement = &vmw_vram_placement;
- int ret;
-
- ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
- if (unlikely(ret != 0))
- goto err;
-
- if (pin)
- overlay_placement = &vmw_vram_ne_placement;
-
- ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
-
- ttm_bo_unreserve(bo);
-
-err:
- ttm_read_unlock(&dev_priv->active_master->lock);
-
- return ret;
-}
-
-/**
* Send put command to hw.
*
* Returns
@@ -139,68 +97,80 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
+ struct vmw_escape_video_flush *flush;
+ size_t fifo_size;
+ bool have_so = dev_priv->sou_priv ? true : false;
+ int i, num_items;
+ SVGAGuestPtr ptr;
+
struct {
struct vmw_escape_header escape;
struct {
- struct {
- uint32_t cmdType;
- uint32_t streamId;
- } header;
- struct {
- uint32_t registerId;
- uint32_t value;
- } items[SVGA_VIDEO_PITCH_3 + 1];
- } body;
- struct vmw_escape_video_flush flush;
+ uint32_t cmdType;
+ uint32_t streamId;
+ } header;
} *cmds;
- uint32_t offset;
- int i, ret;
+ struct {
+ uint32_t registerId;
+ uint32_t value;
+ } *items;
- for (;;) {
- cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
- if (cmds)
- break;
+ /* defines are a index needs + 1 */
+ if (have_so)
+ num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
+ else
+ num_items = SVGA_VIDEO_PITCH_3 + 1;
- ret = vmw_fallback_wait(dev_priv, false, true, 0,
- interruptible, 3*HZ);
- if (interruptible && ret == -ERESTARTSYS)
- return ret;
- else
- BUG_ON(ret != 0);
+ fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
+
+ cmds = vmw_fifo_reserve(dev_priv, fifo_size);
+ /* hardware has hung, can't do anything here */
+ if (!cmds)
+ return -ENOMEM;
+
+ items = (typeof(items))&cmds[1];
+ flush = (struct vmw_escape_video_flush *)&items[num_items];
+
+ /* the size is header + number of items */
+ fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
+
+ cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+ cmds->header.streamId = arg->stream_id;
+
+ /* the IDs are neatly numbered */
+ for (i = 0; i < num_items; i++)
+ items[i].registerId = i;
+
+ vmw_bo_get_guest_ptr(&buf->base, &ptr);
+ ptr.offset += arg->offset;
+
+ items[SVGA_VIDEO_ENABLED].value = true;
+ items[SVGA_VIDEO_FLAGS].value = arg->flags;
+ items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
+ items[SVGA_VIDEO_FORMAT].value = arg->format;
+ items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
+ items[SVGA_VIDEO_SIZE].value = arg->size;
+ items[SVGA_VIDEO_WIDTH].value = arg->width;
+ items[SVGA_VIDEO_HEIGHT].value = arg->height;
+ items[SVGA_VIDEO_SRC_X].value = arg->src.x;
+ items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
+ items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
+ items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
+ items[SVGA_VIDEO_DST_X].value = arg->dst.x;
+ items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
+ items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
+ items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
+ items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
+ items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
+ items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
+ if (have_so) {
+ items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId;
+ items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
}
- fill_escape(&cmds->escape, sizeof(cmds->body));
- cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
- cmds->body.header.streamId = arg->stream_id;
-
- for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
- cmds->body.items[i].registerId = i;
-
- offset = buf->base.offset + arg->offset;
-
- cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
- cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
- cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
- cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
- cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
- cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
- cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
- cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
- cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
- cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
- cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
- cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
- cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
- cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
- cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
- cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
- cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
- cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
- cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
-
- fill_flush(&cmds->flush, arg->stream_id);
+ fill_flush(flush, arg->stream_id);
- vmw_fifo_commit(dev_priv, sizeof(*cmds));
+ vmw_fifo_commit(dev_priv, fifo_size);
return 0;
}
@@ -248,6 +218,25 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
}
/**
+ * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
+ *
+ * With the introduction of screen objects buffers could now be
+ * used with GMRs instead of being locked to vram.
+ */
+static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool inter)
+{
+ if (!pin)
+ return vmw_dmabuf_unpin(dev_priv, buf, inter);
+
+ if (!dev_priv->sou_priv)
+ return vmw_dmabuf_to_vram(dev_priv, buf, true, inter);
+
+ return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter);
+}
+
+/**
* Stop or pause a stream.
*
* If the stream is paused the no evict flag is removed from the buffer
@@ -279,8 +268,8 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
return ret;
/* We just remove the NO_EVICT flag so no -ENOMEM */
- ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
- interruptible);
+ ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
+ interruptible);
if (interruptible && ret == -ERESTARTSYS)
return ret;
else
@@ -342,7 +331,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
/* We don't start the old stream if we are interrupted.
* Might return -ENOMEM if it can't fit the buffer in vram.
*/
- ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
+ ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
if (ret)
return ret;
@@ -351,7 +340,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
/* This one needs to happen no matter what. We only remove
* the NO_EVICT flag so this is safe from -ENOMEM.
*/
- BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
+ BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
+ != 0);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index bfe1bcce7f8..86c5e4cceb3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -31,10 +31,6 @@
#include "ttm/ttm_placement.h"
#include "drmP.h"
-#define VMW_RES_CONTEXT ttm_driver_type0
-#define VMW_RES_SURFACE ttm_driver_type1
-#define VMW_RES_STREAM ttm_driver_type2
-
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
@@ -43,6 +39,7 @@ struct vmw_user_context {
struct vmw_user_surface {
struct ttm_base_object base;
struct vmw_surface srf;
+ uint32_t size;
};
struct vmw_user_dma_buffer {
@@ -65,6 +62,17 @@ struct vmw_user_stream {
struct vmw_stream stream;
};
+struct vmw_surface_offset {
+ uint32_t face;
+ uint32_t mip;
+ uint32_t bo_offset;
+};
+
+
+static uint64_t vmw_user_context_size;
+static uint64_t vmw_user_surface_size;
+static uint64_t vmw_user_stream_size;
+
static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object *bo)
{
@@ -84,13 +92,36 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
return res;
}
+
+/**
+ * vmw_resource_release_id - release a resource id to the id manager.
+ *
+ * @res: Pointer to the resource.
+ *
+ * Release the resource id to the resource id manager and set it to -1
+ */
+static void vmw_resource_release_id(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ write_lock(&dev_priv->resource_lock);
+ if (res->id != -1)
+ idr_remove(res->idr, res->id);
+ res->id = -1;
+ write_unlock(&dev_priv->resource_lock);
+}
+
static void vmw_resource_release(struct kref *kref)
{
struct vmw_resource *res =
container_of(kref, struct vmw_resource, kref);
struct vmw_private *dev_priv = res->dev_priv;
+ int id = res->id;
+ struct idr *idr = res->idr;
- idr_remove(res->idr, res->id);
+ res->avail = false;
+ if (res->remove_from_lists != NULL)
+ res->remove_from_lists(res);
write_unlock(&dev_priv->resource_lock);
if (likely(res->hw_destroy != NULL))
@@ -102,6 +133,9 @@ static void vmw_resource_release(struct kref *kref)
kfree(res);
write_lock(&dev_priv->resource_lock);
+
+ if (id != -1)
+ idr_remove(idr, id);
}
void vmw_resource_unreference(struct vmw_resource **p_res)
@@ -115,28 +149,29 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
write_unlock(&dev_priv->resource_lock);
}
-static int vmw_resource_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- struct idr *idr,
- enum ttm_object_type obj_type,
- void (*res_free) (struct vmw_resource *res))
+
+/**
+ * vmw_resource_alloc_id - release a resource id to the id manager.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @res: Pointer to the resource.
+ *
+ * Allocate the lowest free resource from the resource manager, and set
+ * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
+ */
+static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
+ struct vmw_resource *res)
{
int ret;
- kref_init(&res->kref);
- res->hw_destroy = NULL;
- res->res_free = res_free;
- res->res_type = obj_type;
- res->idr = idr;
- res->avail = false;
- res->dev_priv = dev_priv;
+ BUG_ON(res->id != -1);
do {
- if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
+ if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
return -ENOMEM;
write_lock(&dev_priv->resource_lock);
- ret = idr_get_new_above(idr, res, 1, &res->id);
+ ret = idr_get_new_above(res->idr, res, 1, &res->id);
write_unlock(&dev_priv->resource_lock);
} while (ret == -EAGAIN);
@@ -144,6 +179,33 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
return ret;
}
+
+static int vmw_resource_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ struct idr *idr,
+ enum ttm_object_type obj_type,
+ bool delay_id,
+ void (*res_free) (struct vmw_resource *res),
+ void (*remove_from_lists)
+ (struct vmw_resource *res))
+{
+ kref_init(&res->kref);
+ res->hw_destroy = NULL;
+ res->res_free = res_free;
+ res->remove_from_lists = remove_from_lists;
+ res->res_type = obj_type;
+ res->idr = idr;
+ res->avail = false;
+ res->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&res->query_head);
+ INIT_LIST_HEAD(&res->validate_head);
+ res->id = -1;
+ if (delay_id)
+ return 0;
+ else
+ return vmw_resource_alloc_id(dev_priv, res);
+}
+
/**
* vmw_resource_activate
*
@@ -198,8 +260,12 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyContext body;
- } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ } *cmd;
+
+ vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
@@ -211,7 +277,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
cmd->body.cid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv);
+ vmw_3d_resource_dec(dev_priv, false);
}
static int vmw_context_init(struct vmw_private *dev_priv,
@@ -226,14 +292,17 @@ static int vmw_context_init(struct vmw_private *dev_priv,
} *cmd;
ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
- VMW_RES_CONTEXT, res_free);
+ VMW_RES_CONTEXT, false, res_free, NULL);
if (unlikely(ret != 0)) {
- if (res_free == NULL)
- kfree(res);
- else
- res_free(res);
- return ret;
+ DRM_ERROR("Failed to allocate a resource id.\n");
+ goto out_early;
+ }
+
+ if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
+ DRM_ERROR("Out of hw context ids.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
}
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
@@ -248,9 +317,16 @@ static int vmw_context_init(struct vmw_private *dev_priv,
cmd->body.cid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv);
+ (void) vmw_3d_resource_inc(dev_priv, false);
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
+
+out_early:
+ if (res_free == NULL)
+ kfree(res);
+ else
+ res_free(res);
+ return ret;
}
struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
@@ -273,8 +349,11 @@ static void vmw_user_context_free(struct vmw_resource *res)
{
struct vmw_user_context *ctx =
container_of(res, struct vmw_user_context, res);
+ struct vmw_private *dev_priv = res->dev_priv;
kfree(ctx);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_context_size);
}
/**
@@ -328,23 +407,56 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ struct vmw_user_context *ctx;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- if (unlikely(ctx == NULL))
- return -ENOMEM;
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of contexts anyway.
+ */
+
+ if (unlikely(vmw_user_context_size == 0))
+ vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_context_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for context"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (unlikely(ctx == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_context_size);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
res = &ctx->res;
ctx->base.shareable = false;
ctx->base.tfile = NULL;
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
if (unlikely(ret != 0))
- return ret;
+ goto out_unlock;
tmp = vmw_resource_reference(&ctx->res);
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
@@ -358,13 +470,16 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
arg->cid = res->id;
out_err:
vmw_resource_unreference(&res);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
return ret;
}
int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
- int id)
+ int id,
+ struct vmw_resource **p_res)
{
struct vmw_resource *res;
int ret = 0;
@@ -376,6 +491,8 @@ int vmw_context_check(struct vmw_private *dev_priv,
container_of(res, struct vmw_user_context, res);
if (ctx->base.tfile != tfile && !ctx->base.shareable)
ret = -EPERM;
+ if (p_res)
+ *p_res = vmw_resource_reference(res);
} else
ret = -EINVAL;
read_unlock(&dev_priv->resource_lock);
@@ -383,102 +500,638 @@ int vmw_context_check(struct vmw_private *dev_priv,
return ret;
}
+struct vmw_bpp {
+ uint8_t bpp;
+ uint8_t s_bpp;
+};
+
+/*
+ * Size table for the supported SVGA3D surface formats. It consists of
+ * two values. The bpp value and the s_bpp value which is short for
+ * "stride bits per pixel" The values are given in such a way that the
+ * minimum stride for the image is calculated using
+ *
+ * min_stride = w*s_bpp
+ *
+ * and the total memory requirement for the image is
+ *
+ * h*min_stride*bpp/s_bpp
+ *
+ */
+static const struct vmw_bpp vmw_sf_bpp[] = {
+ [SVGA3D_FORMAT_INVALID] = {0, 0},
+ [SVGA3D_X8R8G8B8] = {32, 32},
+ [SVGA3D_A8R8G8B8] = {32, 32},
+ [SVGA3D_R5G6B5] = {16, 16},
+ [SVGA3D_X1R5G5B5] = {16, 16},
+ [SVGA3D_A1R5G5B5] = {16, 16},
+ [SVGA3D_A4R4G4B4] = {16, 16},
+ [SVGA3D_Z_D32] = {32, 32},
+ [SVGA3D_Z_D16] = {16, 16},
+ [SVGA3D_Z_D24S8] = {32, 32},
+ [SVGA3D_Z_D15S1] = {16, 16},
+ [SVGA3D_LUMINANCE8] = {8, 8},
+ [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
+ [SVGA3D_LUMINANCE16] = {16, 16},
+ [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
+ [SVGA3D_DXT1] = {4, 16},
+ [SVGA3D_DXT2] = {8, 32},
+ [SVGA3D_DXT3] = {8, 32},
+ [SVGA3D_DXT4] = {8, 32},
+ [SVGA3D_DXT5] = {8, 32},
+ [SVGA3D_BUMPU8V8] = {16, 16},
+ [SVGA3D_BUMPL6V5U5] = {16, 16},
+ [SVGA3D_BUMPX8L8V8U8] = {32, 32},
+ [SVGA3D_ARGB_S10E5] = {16, 16},
+ [SVGA3D_ARGB_S23E8] = {32, 32},
+ [SVGA3D_A2R10G10B10] = {32, 32},
+ [SVGA3D_V8U8] = {16, 16},
+ [SVGA3D_Q8W8V8U8] = {32, 32},
+ [SVGA3D_CxV8U8] = {16, 16},
+ [SVGA3D_X8L8V8U8] = {32, 32},
+ [SVGA3D_A2W10V10U10] = {32, 32},
+ [SVGA3D_ALPHA8] = {8, 8},
+ [SVGA3D_R_S10E5] = {16, 16},
+ [SVGA3D_R_S23E8] = {32, 32},
+ [SVGA3D_RG_S10E5] = {16, 16},
+ [SVGA3D_RG_S23E8] = {32, 32},
+ [SVGA3D_BUFFER] = {8, 8},
+ [SVGA3D_Z_D24X8] = {32, 32},
+ [SVGA3D_V16U16] = {32, 32},
+ [SVGA3D_G16R16] = {32, 32},
+ [SVGA3D_A16B16G16R16] = {64, 64},
+ [SVGA3D_UYVY] = {12, 12},
+ [SVGA3D_YUY2] = {12, 12},
+ [SVGA3D_NV12] = {12, 8},
+ [SVGA3D_AYUV] = {32, 32},
+ [SVGA3D_BC4_UNORM] = {4, 16},
+ [SVGA3D_BC5_UNORM] = {8, 32},
+ [SVGA3D_Z_DF16] = {16, 16},
+ [SVGA3D_Z_DF24] = {24, 24},
+ [SVGA3D_Z_D24S8_INT] = {32, 32}
+};
+
/**
* Surface management.
*/
+struct vmw_surface_dma {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA body;
+ SVGA3dCopyBox cb;
+ SVGA3dCmdSurfaceDMASuffix suffix;
+};
+
+struct vmw_surface_define {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineSurface body;
+};
+
+struct vmw_surface_destroy {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroySurface body;
+};
+
+
+/**
+ * vmw_surface_dma_size - Compute fifo size for a dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface dma command for backup or
+ * restoration of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
+{
+ return srf->num_sizes * sizeof(struct vmw_surface_dma);
+}
+
+
+/**
+ * vmw_surface_define_size - Compute fifo size for a surface define command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface define command for the definition
+ * of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
+{
+ return sizeof(struct vmw_surface_define) + srf->num_sizes *
+ sizeof(SVGA3dSize);
+}
+
+
+/**
+ * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ *
+ * Computes the required size for a surface destroy command for the destruction
+ * of a hw surface.
+ */
+static inline uint32_t vmw_surface_destroy_size(void)
+{
+ return sizeof(struct vmw_surface_destroy);
+}
+
+/**
+ * vmw_surface_destroy_encode - Encode a surface_destroy command.
+ *
+ * @id: The surface id
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_destroy_encode(uint32_t id,
+ void *cmd_space)
+{
+ struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
+ cmd_space;
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.sid = id;
+}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_define_encode(const struct vmw_surface *srf,
+ void *cmd_space)
+{
+ struct vmw_surface_define *cmd = (struct vmw_surface_define *)
+ cmd_space;
+ struct drm_vmw_size *src_size;
+ SVGA3dSize *cmd_size;
+ uint32_t cmd_len;
+ int i;
+
+ cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
+ cmd->header.size = cmd_len;
+ cmd->body.sid = srf->res.id;
+ cmd->body.surfaceFlags = srf->flags;
+ cmd->body.format = cpu_to_le32(srf->format);
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+
+ cmd += 1;
+ cmd_size = (SVGA3dSize *) cmd;
+ src_size = srf->sizes;
+
+ for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+ cmd_size->width = src_size->width;
+ cmd_size->height = src_size->height;
+ cmd_size->depth = src_size->depth;
+ }
+}
+
+
+/**
+ * vmw_surface_dma_encode - Encode a surface_dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
+ * should be placed or read from.
+ * @to_surface: Boolean whether to DMA to the surface or from the surface.
+ */
+static void vmw_surface_dma_encode(struct vmw_surface *srf,
+ void *cmd_space,
+ const SVGAGuestPtr *ptr,
+ bool to_surface)
+{
+ uint32_t i;
+ uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
+ uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
+ struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
+
+ for (i = 0; i < srf->num_sizes; ++i) {
+ SVGA3dCmdHeader *header = &cmd->header;
+ SVGA3dCmdSurfaceDMA *body = &cmd->body;
+ SVGA3dCopyBox *cb = &cmd->cb;
+ SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
+ const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
+ const struct drm_vmw_size *cur_size = &srf->sizes[i];
+
+ header->id = SVGA_3D_CMD_SURFACE_DMA;
+ header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
+
+ body->guest.ptr = *ptr;
+ body->guest.ptr.offset += cur_offset->bo_offset;
+ body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
+ body->host.sid = srf->res.id;
+ body->host.face = cur_offset->face;
+ body->host.mipmap = cur_offset->mip;
+ body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
+ SVGA3D_READ_HOST_VRAM);
+ cb->x = 0;
+ cb->y = 0;
+ cb->z = 0;
+ cb->srcx = 0;
+ cb->srcy = 0;
+ cb->srcz = 0;
+ cb->w = cur_size->width;
+ cb->h = cur_size->height;
+ cb->d = cur_size->depth;
+
+ suffix->suffixSize = sizeof(*suffix);
+ suffix->maximumOffset = body->guest.pitch*cur_size->height*
+ cur_size->depth*bpp / stride_bpp;
+ suffix->flags.discard = 0;
+ suffix->flags.unsynchronized = 0;
+ suffix->flags.reserved = 0;
+ ++cmd;
+ }
+};
+
+
static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroySurface body;
- } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ struct vmw_surface *srf;
+ void *cmd;
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
+ if (res->id != -1) {
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.sid = cpu_to_le32(res->id);
+ cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv);
+ vmw_surface_destroy_encode(res->id, cmd);
+ vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+
+ /*
+ * used_memory_size_atomic, or separate lock
+ * to avoid taking dev_priv::cmdbuf_mutex in
+ * the destroy path.
+ */
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ srf = container_of(res, struct vmw_surface, res);
+ dev_priv->used_memory_size -= srf->backup_size;
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ }
+ vmw_3d_resource_dec(dev_priv, false);
}
void vmw_surface_res_free(struct vmw_resource *res)
{
struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
+ if (srf->backup)
+ ttm_bo_unref(&srf->backup);
+ kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
kfree(srf);
}
-int vmw_surface_init(struct vmw_private *dev_priv,
- struct vmw_surface *srf,
- void (*res_free) (struct vmw_resource *res))
+
+/**
+ * vmw_surface_do_validate - make a surface available to the device.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @srf: Pointer to a struct vmw_surface.
+ *
+ * If the surface doesn't have a hw id, allocate one, and optionally
+ * DMA the backed up surface contents to the device.
+ *
+ * Returns -EBUSY if there wasn't sufficient device resources to
+ * complete the validation. Retry after freeing up resources.
+ *
+ * May return other errors if the kernel is out of guest resources.
+ */
+int vmw_surface_do_validate(struct vmw_private *dev_priv,
+ struct vmw_surface *srf)
{
- int ret;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineSurface body;
- } *cmd;
- SVGA3dSize *cmd_size;
struct vmw_resource *res = &srf->res;
- struct drm_vmw_size *src_size;
- size_t submit_size;
- uint32_t cmd_len;
- int i;
+ struct list_head val_list;
+ struct ttm_validate_buffer val_buf;
+ uint32_t submit_size;
+ uint8_t *cmd;
+ int ret;
- BUG_ON(res_free == NULL);
- ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
- VMW_RES_SURFACE, res_free);
+ if (likely(res->id != -1))
+ return 0;
+
+ if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
+ dev_priv->memory_size))
+ return -EBUSY;
+
+ /*
+ * Reserve- and validate the backup DMA bo.
+ */
+
+ if (srf->backup) {
+ INIT_LIST_HEAD(&val_list);
+ val_buf.bo = ttm_bo_reference(srf->backup);
+ val_buf.new_sync_obj_arg = (void *)((unsigned long)
+ DRM_VMW_FENCE_FLAG_EXEC);
+ list_add_tail(&val_buf.head, &val_list);
+ ret = ttm_eu_reserve_buffers(&val_list);
+ if (unlikely(ret != 0))
+ goto out_no_reserve;
+
+ ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
+ true, false, false);
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+ }
+
+ /*
+ * Alloc id for the resource.
+ */
+ ret = vmw_resource_alloc_id(dev_priv, res);
if (unlikely(ret != 0)) {
- res_free(res);
- return ret;
+ DRM_ERROR("Failed to allocate a surface id.\n");
+ goto out_no_id;
+ }
+ if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
}
- submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
- cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+ /*
+ * Encode surface define- and dma commands.
+ */
+
+ submit_size = vmw_surface_define_size(srf);
+ if (srf->backup)
+ submit_size += vmw_surface_dma_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed for create surface.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "validation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
}
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
- cmd->header.size = cpu_to_le32(cmd_len);
- cmd->body.sid = cpu_to_le32(res->id);
- cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
- cmd->body.format = cpu_to_le32(srf->format);
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
- cmd->body.face[i].numMipLevels =
- cpu_to_le32(srf->mip_levels[i]);
+ vmw_surface_define_encode(srf, cmd);
+ if (srf->backup) {
+ SVGAGuestPtr ptr;
+
+ cmd += vmw_surface_define_size(srf);
+ vmw_bo_get_guest_ptr(srf->backup, &ptr);
+ vmw_surface_dma_encode(srf, cmd, &ptr, true);
}
- cmd += 1;
- cmd_size = (SVGA3dSize *) cmd;
- src_size = srf->sizes;
+ vmw_fifo_commit(dev_priv, submit_size);
- for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
- cmd_size->width = cpu_to_le32(src_size->width);
- cmd_size->height = cpu_to_le32(src_size->height);
- cmd_size->depth = cpu_to_le32(src_size->depth);
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ if (srf->backup) {
+ struct vmw_fence_obj *fence;
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+ ttm_eu_fence_buffer_objects(&val_list, fence);
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+ ttm_bo_unref(&val_buf.bo);
+ ttm_bo_unref(&srf->backup);
+ }
+
+ /*
+ * Surface memory usage accounting.
+ */
+
+ dev_priv->used_memory_size += srf->backup_size;
+
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+out_no_validate:
+ if (srf->backup)
+ ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+ if (srf->backup)
+ ttm_bo_unref(&val_buf.bo);
+ return ret;
+}
+
+/**
+ * vmw_surface_evict - Evict a hw surface.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * DMA the contents of a hw surface to a backup guest buffer object,
+ * and destroy the hw surface, releasing its id.
+ */
+int vmw_surface_evict(struct vmw_private *dev_priv,
+ struct vmw_surface *srf)
+{
+ struct vmw_resource *res = &srf->res;
+ struct list_head val_list;
+ struct ttm_validate_buffer val_buf;
+ uint32_t submit_size;
+ uint8_t *cmd;
+ int ret;
+ struct vmw_fence_obj *fence;
+ SVGAGuestPtr ptr;
+
+ BUG_ON(res->id == -1);
+
+ /*
+ * Create a surface backup buffer object.
+ */
+
+ if (!srf->backup) {
+ ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
+ ttm_bo_type_device,
+ &vmw_srf_placement, 0, 0, true,
+ NULL, &srf->backup);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ /*
+ * Reserve- and validate the backup DMA bo.
+ */
+
+ INIT_LIST_HEAD(&val_list);
+ val_buf.bo = ttm_bo_reference(srf->backup);
+ val_buf.new_sync_obj_arg = (void *)(unsigned long)
+ DRM_VMW_FENCE_FLAG_EXEC;
+ list_add_tail(&val_buf.head, &val_list);
+ ret = ttm_eu_reserve_buffers(&val_list);
+ if (unlikely(ret != 0))
+ goto out_no_reserve;
+
+ ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
+ true, false, false);
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+
+
+ /*
+ * Encode the dma- and surface destroy commands.
+ */
+
+ submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "eviction.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
}
+ vmw_bo_get_guest_ptr(srf->backup, &ptr);
+ vmw_surface_dma_encode(srf, cmd, &ptr, false);
+ cmd += vmw_surface_dma_size(srf);
+ vmw_surface_destroy_encode(res->id, cmd);
vmw_fifo_commit(dev_priv, submit_size);
- (void) vmw_3d_resource_inc(dev_priv);
- vmw_resource_activate(res, vmw_hw_surface_destroy);
+
+ /*
+ * Surface memory usage accounting.
+ */
+
+ dev_priv->used_memory_size -= srf->backup_size;
+
+ /*
+ * Create a fence object and fence the DMA buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+ ttm_eu_fence_buffer_objects(&val_list, fence);
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+ ttm_bo_unref(&val_buf.bo);
+
+ /*
+ * Release the surface ID.
+ */
+
+ vmw_resource_release_id(res);
+
return 0;
+
+out_no_fifo:
+out_no_validate:
+ if (srf->backup)
+ ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+ ttm_bo_unref(&val_buf.bo);
+ ttm_bo_unref(&srf->backup);
+ return ret;
+}
+
+
+/**
+ * vmw_surface_validate - make a surface available to the device, evicting
+ * other surfaces if needed.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @srf: Pointer to a struct vmw_surface.
+ *
+ * Try to validate a surface and if it fails due to limited device resources,
+ * repeatedly try to evict other surfaces until the request can be
+ * acommodated.
+ *
+ * May return errors if out of resources.
+ */
+int vmw_surface_validate(struct vmw_private *dev_priv,
+ struct vmw_surface *srf)
+{
+ int ret;
+ struct vmw_surface *evict_srf;
+
+ do {
+ write_lock(&dev_priv->resource_lock);
+ list_del_init(&srf->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+
+ ret = vmw_surface_do_validate(dev_priv, srf);
+ if (likely(ret != -EBUSY))
+ break;
+
+ write_lock(&dev_priv->resource_lock);
+ if (list_empty(&dev_priv->surface_lru)) {
+ DRM_ERROR("Out of device memory for surfaces.\n");
+ ret = -EBUSY;
+ write_unlock(&dev_priv->resource_lock);
+ break;
+ }
+
+ evict_srf = vmw_surface_reference
+ (list_first_entry(&dev_priv->surface_lru,
+ struct vmw_surface,
+ lru_head));
+ list_del_init(&evict_srf->lru_head);
+
+ write_unlock(&dev_priv->resource_lock);
+ (void) vmw_surface_evict(dev_priv, evict_srf);
+
+ vmw_surface_unreference(&evict_srf);
+
+ } while (1);
+
+ if (unlikely(ret != 0 && srf->res.id != -1)) {
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
+ write_unlock(&dev_priv->resource_lock);
+ }
+
+ return ret;
+}
+
+
+/**
+ * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
+ *
+ * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
+ *
+ * As part of the resource destruction, remove the surface from any
+ * lookup lists.
+ */
+static void vmw_surface_remove_from_lists(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
+
+ list_del_init(&srf->lru_head);
+}
+
+int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+ struct vmw_resource *res = &srf->res;
+
+ BUG_ON(res_free == NULL);
+ INIT_LIST_HEAD(&srf->lru_head);
+ ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
+ VMW_RES_SURFACE, true, res_free,
+ vmw_surface_remove_from_lists);
+
+ if (unlikely(ret != 0))
+ res_free(res);
+
+ /*
+ * The surface won't be visible to hardware until a
+ * surface validate.
+ */
+
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_resource_activate(res, vmw_hw_surface_destroy);
+ return ret;
}
static void vmw_user_surface_free(struct vmw_resource *res)
@@ -486,12 +1139,58 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
+ struct vmw_private *dev_priv = srf->res.dev_priv;
+ uint32_t size = user_srf->size;
+ if (srf->backup)
+ ttm_bo_unref(&srf->backup);
+ kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
kfree(user_srf);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_resource_unreserve - unreserve resources previously reserved for
+ * command submission.
+ *
+ * @list_head: list of resources to unreserve.
+ *
+ * Currently only surfaces are considered, and unreserving a surface
+ * means putting it back on the device's surface lru list,
+ * so that it can be evicted if necessary.
+ * This function traverses the resource list and
+ * checks whether resources are surfaces, and in that case puts them back
+ * on the device's surface LRU list.
+ */
+void vmw_resource_unreserve(struct list_head *list)
+{
+ struct vmw_resource *res;
+ struct vmw_surface *srf;
+ rwlock_t *lock = NULL;
+
+ list_for_each_entry(res, list, validate_head) {
+
+ if (res->res_free != &vmw_surface_res_free &&
+ res->res_free != &vmw_user_surface_free)
+ continue;
+
+ if (unlikely(lock == NULL)) {
+ lock = &res->dev_priv->resource_lock;
+ write_lock(lock);
+ }
+
+ srf = container_of(res, struct vmw_surface, res);
+ list_del_init(&srf->lru_head);
+ list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
+ }
+
+ if (lock != NULL)
+ write_unlock(lock);
}
+
int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle, struct vmw_surface **out)
@@ -556,8 +1255,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf =
- kmalloc(sizeof(*user_srf), GFP_KERNEL);
+ struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
struct vmw_resource *res;
struct vmw_resource *tmp;
@@ -568,10 +1266,51 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct drm_vmw_size __user *user_sizes;
int ret;
- int i;
+ int i, j;
+ uint32_t cur_bo_offset;
+ struct drm_vmw_size *cur_size;
+ struct vmw_surface_offset *cur_offset;
+ uint32_t stride_bpp;
+ uint32_t bpp;
+ uint32_t num_sizes;
+ uint32_t size;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
- if (unlikely(user_srf == NULL))
- return -ENOMEM;
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+
+ num_sizes = 0;
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ num_sizes += req->mip_levels[i];
+
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+ DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
+ size = vmw_user_surface_size + 128 +
+ ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
+ ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
+
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ size, false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(user_srf == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
srf = &user_srf->srf;
res = &srf->res;
@@ -579,21 +1318,22 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->flags = req->flags;
srf->format = req->format;
srf->scanout = req->scanout;
- memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
- srf->num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- srf->num_sizes += srf->mip_levels[i];
+ srf->backup = NULL;
- if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS) {
- ret = -EINVAL;
- goto out_err0;
- }
+ memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+ srf->num_sizes = num_sizes;
+ user_srf->size = size;
srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
if (unlikely(srf->sizes == NULL)) {
ret = -ENOMEM;
- goto out_err0;
+ goto out_no_sizes;
+ }
+ srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
+ GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_offsets;
}
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
@@ -603,9 +1343,32 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->num_sizes * sizeof(*srf->sizes));
if (unlikely(ret != 0)) {
ret = -EFAULT;
- goto out_err1;
+ goto out_no_copy;
}
+ cur_bo_offset = 0;
+ cur_offset = srf->offsets;
+ cur_size = srf->sizes;
+
+ bpp = vmw_sf_bpp[srf->format].bpp;
+ stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
+
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ for (j = 0; j < srf->mip_levels[i]; ++j) {
+ uint32_t stride =
+ (cur_size->width * stride_bpp + 7) >> 3;
+
+ cur_offset->face = i;
+ cur_offset->mip = j;
+ cur_offset->bo_offset = cur_bo_offset;
+ cur_bo_offset += stride * cur_size->height *
+ cur_size->depth * bpp / stride_bpp;
+ ++cur_offset;
+ ++cur_size;
+ }
+ }
+ srf->backup_size = cur_bo_offset;
+
if (srf->scanout &&
srf->num_sizes == 1 &&
srf->sizes[0].width == 64 &&
@@ -617,7 +1380,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (!srf->snooper.image) {
DRM_ERROR("Failed to allocate cursor_image\n");
ret = -ENOMEM;
- goto out_err1;
+ goto out_no_copy;
}
} else {
srf->snooper.image = NULL;
@@ -634,7 +1397,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
if (unlikely(ret != 0))
- return ret;
+ goto out_unlock;
tmp = vmw_resource_reference(&srf->res);
ret = ttm_base_object_init(tfile, &user_srf->base,
@@ -644,7 +1407,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
vmw_resource_unreference(&res);
- return ret;
+ goto out_unlock;
}
rep->sid = user_srf->base.hash.key;
@@ -652,11 +1415,19 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("Created bad Surface ID.\n");
vmw_resource_unreference(&res);
+
+ ttm_read_unlock(&vmaster->lock);
return 0;
-out_err1:
+out_no_copy:
+ kfree(srf->offsets);
+out_no_offsets:
kfree(srf->sizes);
-out_err0:
+out_no_sizes:
kfree(user_srf);
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
return ret;
}
@@ -970,7 +1741,7 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
int ret;
ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
- VMW_RES_STREAM, res_free);
+ VMW_RES_STREAM, false, res_free, NULL);
if (unlikely(ret != 0)) {
if (res_free == NULL)
@@ -1000,8 +1771,11 @@ static void vmw_user_stream_free(struct vmw_resource *res)
{
struct vmw_user_stream *stream =
container_of(res, struct vmw_user_stream, stream.res);
+ struct vmw_private *dev_priv = res->dev_priv;
kfree(stream);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_stream_size);
}
/**
@@ -1055,23 +1829,56 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ struct vmw_user_stream *stream;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- if (unlikely(stream == NULL))
- return -ENOMEM;
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of streams anyway?
+ */
+
+ if (unlikely(vmw_user_stream_size == 0))
+ vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_stream_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for stream"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+
+ stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ if (unlikely(stream == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_stream_size);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
res = &stream->stream.res;
stream->base.shareable = false;
stream->base.tfile = NULL;
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
if (unlikely(ret != 0))
- return ret;
+ goto out_unlock;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
@@ -1085,6 +1892,8 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
arg->stream_id = res->id;
out_err:
vmw_resource_unreference(&res);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
new file mode 100644
index 00000000000..4defdcf1c72
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -0,0 +1,537 @@
+/**************************************************************************
+ *
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_kms.h"
+
+
+#define vmw_crtc_to_sou(x) \
+ container_of(x, struct vmw_screen_object_unit, base.crtc)
+#define vmw_encoder_to_sou(x) \
+ container_of(x, struct vmw_screen_object_unit, base.encoder)
+#define vmw_connector_to_sou(x) \
+ container_of(x, struct vmw_screen_object_unit, base.connector)
+
+struct vmw_screen_object_display {
+ unsigned num_implicit;
+
+ struct vmw_framebuffer *implicit_fb;
+};
+
+/**
+ * Display unit using screen objects.
+ */
+struct vmw_screen_object_unit {
+ struct vmw_display_unit base;
+
+ unsigned long buffer_size; /**< Size of allocated buffer */
+ struct vmw_dma_buffer *buffer; /**< Backing store buffer */
+
+ bool defined;
+ bool active_implicit;
+};
+
+static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
+{
+ vmw_display_unit_cleanup(&sou->base);
+ kfree(sou);
+}
+
+
+/*
+ * Screen Object Display Unit CRTC functions
+ */
+
+static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
+{
+ vmw_sou_destroy(vmw_crtc_to_sou(crtc));
+}
+
+static void vmw_sou_del_active(struct vmw_private *vmw_priv,
+ struct vmw_screen_object_unit *sou)
+{
+ struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
+
+ if (sou->active_implicit) {
+ if (--(ld->num_implicit) == 0)
+ ld->implicit_fb = NULL;
+ sou->active_implicit = false;
+ }
+}
+
+static void vmw_sou_add_active(struct vmw_private *vmw_priv,
+ struct vmw_screen_object_unit *sou,
+ struct vmw_framebuffer *vfb)
+{
+ struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
+
+ BUG_ON(!ld->num_implicit && ld->implicit_fb);
+
+ if (!sou->active_implicit && sou->base.is_implicit) {
+ ld->implicit_fb = vfb;
+ sou->active_implicit = true;
+ ld->num_implicit++;
+ }
+}
+
+/**
+ * Send the fifo command to create a screen.
+ */
+static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
+ struct vmw_screen_object_unit *sou,
+ uint32_t x, uint32_t y,
+ struct drm_display_mode *mode)
+{
+ size_t fifo_size;
+
+ struct {
+ struct {
+ uint32_t cmdType;
+ } header;
+ SVGAScreenObject obj;
+ } *cmd;
+
+ BUG_ON(!sou->buffer);
+
+ fifo_size = sizeof(*cmd);
+ cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+ /* The hardware has hung, nothing we can do about it here. */
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, fifo_size);
+ cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
+ cmd->obj.structSize = sizeof(SVGAScreenObject);
+ cmd->obj.id = sou->base.unit;
+ cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
+ (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
+ cmd->obj.size.width = mode->hdisplay;
+ cmd->obj.size.height = mode->vdisplay;
+ if (sou->base.is_implicit) {
+ cmd->obj.root.x = x;
+ cmd->obj.root.y = y;
+ } else {
+ cmd->obj.root.x = sou->base.gui_x;
+ cmd->obj.root.y = sou->base.gui_y;
+ }
+
+ /* Ok to assume that buffer is pinned in vram */
+ vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
+ cmd->obj.backingStore.pitch = mode->hdisplay * 4;
+
+ vmw_fifo_commit(dev_priv, fifo_size);
+
+ sou->defined = true;
+
+ return 0;
+}
+
+/**
+ * Send the fifo command to destroy a screen.
+ */
+static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
+ struct vmw_screen_object_unit *sou)
+{
+ size_t fifo_size;
+ int ret;
+
+ struct {
+ struct {
+ uint32_t cmdType;
+ } header;
+ SVGAFifoCmdDestroyScreen body;
+ } *cmd;
+
+ /* no need to do anything */
+ if (unlikely(!sou->defined))
+ return 0;
+
+ fifo_size = sizeof(*cmd);
+ cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+ /* the hardware has hung, nothing we can do about it here */
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, fifo_size);
+ cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
+ cmd->body.screenId = sou->base.unit;
+
+ vmw_fifo_commit(dev_priv, fifo_size);
+
+ /* Force sync */
+ ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
+ if (unlikely(ret != 0))
+ DRM_ERROR("Failed to sync with HW");
+ else
+ sou->defined = false;
+
+ return ret;
+}
+
+/**
+ * Free the backing store.
+ */
+static void vmw_sou_backing_free(struct vmw_private *dev_priv,
+ struct vmw_screen_object_unit *sou)
+{
+ struct ttm_buffer_object *bo;
+
+ if (unlikely(sou->buffer == NULL))
+ return;
+
+ bo = &sou->buffer->base;
+ ttm_bo_unref(&bo);
+ sou->buffer = NULL;
+ sou->buffer_size = 0;
+}
+
+/**
+ * Allocate the backing store for the buffer.
+ */
+static int vmw_sou_backing_alloc(struct vmw_private *dev_priv,
+ struct vmw_screen_object_unit *sou,
+ unsigned long size)
+{
+ int ret;
+
+ if (sou->buffer_size == size)
+ return 0;
+
+ if (sou->buffer)
+ vmw_sou_backing_free(dev_priv, sou);
+
+ sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL);
+ if (unlikely(sou->buffer == NULL))
+ return -ENOMEM;
+
+ /* After we have alloced the backing store might not be able to
+ * resume the overlays, this is preferred to failing to alloc.
+ */
+ vmw_overlay_pause_all(dev_priv);
+ ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
+ &vmw_vram_ne_placement,
+ false, &vmw_dmabuf_bo_free);
+ vmw_overlay_resume_all(dev_priv);
+
+ if (unlikely(ret != 0))
+ sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
+ else
+ sou->buffer_size = size;
+
+ return ret;
+}
+
+static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
+{
+ struct vmw_private *dev_priv;
+ struct vmw_screen_object_unit *sou;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ struct drm_encoder *encoder;
+ struct vmw_framebuffer *vfb;
+ struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ if (!set)
+ return -EINVAL;
+
+ if (!set->crtc)
+ return -EINVAL;
+
+ /* get the sou */
+ crtc = set->crtc;
+ sou = vmw_crtc_to_sou(crtc);
+ vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
+ dev_priv = vmw_priv(crtc->dev);
+
+ if (set->num_connectors > 1) {
+ DRM_ERROR("to many connectors\n");
+ return -EINVAL;
+ }
+
+ if (set->num_connectors == 1 &&
+ set->connectors[0] != &sou->base.connector) {
+ DRM_ERROR("connector doesn't match %p %p\n",
+ set->connectors[0], &sou->base.connector);
+ return -EINVAL;
+ }
+
+ /* sou only supports one fb active at the time */
+ if (sou->base.is_implicit &&
+ dev_priv->sou_priv->implicit_fb && vfb &&
+ !(dev_priv->sou_priv->num_implicit == 1 &&
+ sou->active_implicit) &&
+ dev_priv->sou_priv->implicit_fb != vfb) {
+ DRM_ERROR("Multiple framebuffers not supported\n");
+ return -EINVAL;
+ }
+
+ /* since they always map one to one these are safe */
+ connector = &sou->base.connector;
+ encoder = &sou->base.encoder;
+
+ /* should we turn the crtc off */
+ if (set->num_connectors == 0 || !set->mode || !set->fb) {
+ ret = vmw_sou_fifo_destroy(dev_priv, sou);
+ /* the hardware has hung don't do anything more */
+ if (unlikely(ret != 0))
+ return ret;
+
+ connector->encoder = NULL;
+ encoder->crtc = NULL;
+ crtc->fb = NULL;
+ crtc->x = 0;
+ crtc->y = 0;
+
+ vmw_sou_del_active(dev_priv, sou);
+
+ vmw_sou_backing_free(dev_priv, sou);
+
+ return 0;
+ }
+
+
+ /* we now know we want to set a mode */
+ mode = set->mode;
+ fb = set->fb;
+
+ if (set->x + mode->hdisplay > fb->width ||
+ set->y + mode->vdisplay > fb->height) {
+ DRM_ERROR("set outside of framebuffer\n");
+ return -EINVAL;
+ }
+
+ vmw_fb_off(dev_priv);
+
+ if (mode->hdisplay != crtc->mode.hdisplay ||
+ mode->vdisplay != crtc->mode.vdisplay) {
+ /* no need to check if depth is different, because backing
+ * store depth is forced to 4 by the device.
+ */
+
+ ret = vmw_sou_fifo_destroy(dev_priv, sou);
+ /* the hardware has hung don't do anything more */
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_sou_backing_free(dev_priv, sou);
+ }
+
+ if (!sou->buffer) {
+ /* forced to depth 4 by the device */
+ size_t size = mode->hdisplay * mode->vdisplay * 4;
+ ret = vmw_sou_backing_alloc(dev_priv, sou, size);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode);
+ if (unlikely(ret != 0)) {
+ /*
+ * We are in a bit of a situation here, the hardware has
+ * hung and we may or may not have a buffer hanging of
+ * the screen object, best thing to do is not do anything
+ * if we where defined, if not just turn the crtc of.
+ * Not what userspace wants but it needs to htfu.
+ */
+ if (sou->defined)
+ return ret;
+
+ connector->encoder = NULL;
+ encoder->crtc = NULL;
+ crtc->fb = NULL;
+ crtc->x = 0;
+ crtc->y = 0;
+
+ return ret;
+ }
+
+ vmw_sou_add_active(dev_priv, sou, vfb);
+
+ connector->encoder = encoder;
+ encoder->crtc = crtc;
+ crtc->mode = *mode;
+ crtc->fb = fb;
+ crtc->x = set->x;
+ crtc->y = set->y;
+
+ return 0;
+}
+
+static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
+ .save = vmw_du_crtc_save,
+ .restore = vmw_du_crtc_restore,
+ .cursor_set = vmw_du_crtc_cursor_set,
+ .cursor_move = vmw_du_crtc_cursor_move,
+ .gamma_set = vmw_du_crtc_gamma_set,
+ .destroy = vmw_sou_crtc_destroy,
+ .set_config = vmw_sou_crtc_set_config,
+};
+
+/*
+ * Screen Object Display Unit encoder functions
+ */
+
+static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
+{
+ vmw_sou_destroy(vmw_encoder_to_sou(encoder));
+}
+
+static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
+ .destroy = vmw_sou_encoder_destroy,
+};
+
+/*
+ * Screen Object Display Unit connector functions
+ */
+
+static void vmw_sou_connector_destroy(struct drm_connector *connector)
+{
+ vmw_sou_destroy(vmw_connector_to_sou(connector));
+}
+
+static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+ .dpms = vmw_du_connector_dpms,
+ .save = vmw_du_connector_save,
+ .restore = vmw_du_connector_restore,
+ .detect = vmw_du_connector_detect,
+ .fill_modes = vmw_du_connector_fill_modes,
+ .set_property = vmw_du_connector_set_property,
+ .destroy = vmw_sou_connector_destroy,
+};
+
+static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
+{
+ struct vmw_screen_object_unit *sou;
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+ sou = kzalloc(sizeof(*sou), GFP_KERNEL);
+ if (!sou)
+ return -ENOMEM;
+
+ sou->base.unit = unit;
+ crtc = &sou->base.crtc;
+ encoder = &sou->base.encoder;
+ connector = &sou->base.connector;
+
+ sou->active_implicit = false;
+
+ sou->base.pref_active = (unit == 0);
+ sou->base.pref_width = 800;
+ sou->base.pref_height = 600;
+ sou->base.pref_mode = NULL;
+ sou->base.is_implicit = true;
+
+ drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ connector->status = vmw_du_connector_detect(connector, true);
+
+ drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL);
+ drm_mode_connector_attach_encoder(connector, encoder);
+ encoder->possible_crtcs = (1 << unit);
+ encoder->possible_clones = 0;
+
+ drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.dirty_info_property,
+ 1);
+
+ return 0;
+}
+
+int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int i, ret;
+
+ if (dev_priv->sou_priv) {
+ DRM_INFO("sou system already on\n");
+ return -EINVAL;
+ }
+
+ if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) {
+ DRM_INFO("Not using screen objects,"
+ " missing cap SCREEN_OBJECT_2\n");
+ return -ENOSYS;
+ }
+
+ ret = -ENOMEM;
+ dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL);
+ if (unlikely(!dev_priv->sou_priv))
+ goto err_no_mem;
+
+ dev_priv->sou_priv->num_implicit = 0;
+ dev_priv->sou_priv->implicit_fb = NULL;
+
+ ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+ if (unlikely(ret != 0))
+ goto err_free;
+
+ ret = drm_mode_create_dirty_info_property(dev);
+ if (unlikely(ret != 0))
+ goto err_vblank_cleanup;
+
+ for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
+ vmw_sou_init(dev_priv, i);
+
+ DRM_INFO("Screen objects system initialized\n");
+
+ return 0;
+
+err_vblank_cleanup:
+ drm_vblank_cleanup(dev);
+err_free:
+ kfree(dev_priv->sou_priv);
+ dev_priv->sou_priv = NULL;
+err_no_mem:
+ return ret;
+}
+
+int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (!dev_priv->sou_priv)
+ return -ENOSYS;
+
+ drm_vblank_cleanup(dev);
+
+ kfree(dev_priv->sou_priv);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 1e8eedd901e..d3c11f5184f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -34,9 +34,8 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
struct vmw_private *dev_priv;
if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
- if (vmw_fifo_mmap(filp, vma) == 0)
- return 0;
- return drm_mmap(filp, vma);
+ DRM_ERROR("Illegal attempt to mmap old fifo space.\n");
+ return -EINVAL;
}
file_priv = filp->private_data;