diff options
author | Dave Airlie <airlied@starflyer.(none)> | 2006-01-02 21:32:48 +1100 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2006-01-02 21:32:48 +1100 |
commit | d985c1088146607532093d9eaaaf99758f6a4d21 (patch) | |
tree | a289ae9a79e694ef1f0cf3b0a153c6d1258b7aa8 /drivers/char/drm/radeon_state.c | |
parent | b0cae664ebc85f2431c5a7c9e192a2a2ef72ecc7 (diff) |
drm: major update from CVS for radeon and core
This patch pull in a lot of changes from CVS to the main core DRM,
and updates the radeon driver to 1.21.0 that supports r300 texrect
and radeon card type ioctl.
Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm/radeon_state.c')
-rw-r--r-- | drivers/char/drm/radeon_state.c | 184 |
1 files changed, 72 insertions, 112 deletions
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c index 9e816c63a8a..e9d8ec3a099 100644 --- a/drivers/char/drm/radeon_state.c +++ b/drivers/char/drm/radeon_state.c @@ -1,5 +1,5 @@ -/* radeon_state.c -- State support for Radeon -*- linux-c -*- - * +/* radeon_state.c -- State support for Radeon -*- linux-c -*- */ +/* * Copyright 2000 VA Linux Systems, Inc., Fremont, California. * All Rights Reserved. * @@ -72,10 +72,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * case RADEON_EMIT_PP_MISC: if (radeon_check_and_fixup_offset(dev_priv, filp_priv, - &data[(RADEON_RB3D_DEPTHOFFSET - - - RADEON_PP_MISC) / - 4])) { + &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { DRM_ERROR("Invalid depth buffer offset\n"); return DRM_ERR(EINVAL); } @@ -83,10 +80,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * case RADEON_EMIT_PP_CNTL: if (radeon_check_and_fixup_offset(dev_priv, filp_priv, - &data[(RADEON_RB3D_COLOROFFSET - - - RADEON_PP_CNTL) / - 4])) { + &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { DRM_ERROR("Invalid colour buffer offset\n"); return DRM_ERR(EINVAL); } @@ -109,10 +103,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * case RADEON_EMIT_PP_TXFILTER_1: case RADEON_EMIT_PP_TXFILTER_2: if (radeon_check_and_fixup_offset(dev_priv, filp_priv, - &data[(RADEON_PP_TXOFFSET_0 - - - RADEON_PP_TXFILTER_0) / - 4])) { + &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { DRM_ERROR("Invalid R100 texture offset\n"); return DRM_ERR(EINVAL); } @@ -126,8 +117,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * case R200_EMIT_PP_CUBIC_OFFSETS_5:{ int i; for (i = 0; i < 5; i++) { - if (radeon_check_and_fixup_offset - (dev_priv, filp_priv, &data[i])) { + if (radeon_check_and_fixup_offset(dev_priv, + filp_priv, + &data[i])) { DRM_ERROR ("Invalid R200 cubic texture offset\n"); return DRM_ERR(EINVAL); @@ -239,8 +231,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * dev_priv, - drm_file_t * filp_priv, - drm_radeon_kcmd_buffer_t *cmdbuf, + drm_file_t *filp_priv, + drm_radeon_kcmd_buffer_t * + cmdbuf, unsigned int *cmdsz) { u32 *cmd = (u32 *) cmdbuf->buf; @@ -555,7 +548,8 @@ static struct { {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"}, {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"}, {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"}, - {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"}, + {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, + "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"}, {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"}, {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"}, {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"}, @@ -569,7 +563,7 @@ static struct { {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"}, {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */ - {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */ + {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */ {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"}, {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"}, {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"}, @@ -592,7 +586,7 @@ static struct { {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"}, {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"}, {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"}, - {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */ + {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */ {R200_PP_AFS_1, 32, "R200_PP_AFS_1"}, {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"}, {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"}, @@ -985,8 +979,8 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev, * rendering a quad into just those buffers. Thus, we have to * make sure the 3D engine is configured correctly. */ - if ((dev_priv->microcode_version == UCODE_R200) && - (flags & (RADEON_DEPTH | RADEON_STENCIL))) { + else if ((dev_priv->microcode_version == UCODE_R200) && + (flags & (RADEON_DEPTH | RADEON_STENCIL))) { int tempPP_CNTL; int tempRE_CNTL; @@ -1637,6 +1631,14 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); dwords = size / 4; +#define RADEON_COPY_MT(_buf, _data, _width) \ + do { \ + if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\ + DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ + return DRM_ERR(EFAULT); \ + } \ + } while(0) + if (microtile) { /* texture micro tiling in use, minimum texture width is thus 16 bytes. however, we cannot use blitter directly for texture width < 64 bytes, @@ -1648,46 +1650,19 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, from user space. */ if (tex->height == 1) { if (tex_width >= 64 || tex_width <= 16) { - if (DRM_COPY_FROM_USER(buffer, data, - tex_width * - sizeof(u32))) { - DRM_ERROR - ("EFAULT on pad, %d bytes\n", - tex_width); - return DRM_ERR(EFAULT); - } + RADEON_COPY_MT(buffer, data, + tex_width * sizeof(u32)); } else if (tex_width == 32) { - if (DRM_COPY_FROM_USER - (buffer, data, 16)) { - DRM_ERROR - ("EFAULT on pad, %d bytes\n", - tex_width); - return DRM_ERR(EFAULT); - } - if (DRM_COPY_FROM_USER - (buffer + 8, data + 16, 16)) { - DRM_ERROR - ("EFAULT on pad, %d bytes\n", - tex_width); - return DRM_ERR(EFAULT); - } + RADEON_COPY_MT(buffer, data, 16); + RADEON_COPY_MT(buffer + 8, + data + 16, 16); } } else if (tex_width >= 64 || tex_width == 16) { - if (DRM_COPY_FROM_USER(buffer, data, - dwords * sizeof(u32))) { - DRM_ERROR("EFAULT on data, %d dwords\n", - dwords); - return DRM_ERR(EFAULT); - } + RADEON_COPY_MT(buffer, data, + dwords * sizeof(u32)); } else if (tex_width < 16) { for (i = 0; i < tex->height; i++) { - if (DRM_COPY_FROM_USER - (buffer, data, tex_width)) { - DRM_ERROR - ("EFAULT on pad, %d bytes\n", - tex_width); - return DRM_ERR(EFAULT); - } + RADEON_COPY_MT(buffer, data, tex_width); buffer += 4; data += tex_width; } @@ -1695,37 +1670,13 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, /* TODO: make sure this works when not fitting in one buffer (i.e. 32bytes x 2048...) */ for (i = 0; i < tex->height; i += 2) { - if (DRM_COPY_FROM_USER - (buffer, data, 16)) { - DRM_ERROR - ("EFAULT on pad, %d bytes\n", - tex_width); - return DRM_ERR(EFAULT); - } + RADEON_COPY_MT(buffer, data, 16); data += 16; - if (DRM_COPY_FROM_USER - (buffer + 8, data, 16)) { - DRM_ERROR - ("EFAULT on pad, %d bytes\n", - tex_width); - return DRM_ERR(EFAULT); - } + RADEON_COPY_MT(buffer + 8, data, 16); data += 16; - if (DRM_COPY_FROM_USER - (buffer + 4, data, 16)) { - DRM_ERROR - ("EFAULT on pad, %d bytes\n", - tex_width); - return DRM_ERR(EFAULT); - } + RADEON_COPY_MT(buffer + 4, data, 16); data += 16; - if (DRM_COPY_FROM_USER - (buffer + 12, data, 16)) { - DRM_ERROR - ("EFAULT on pad, %d bytes\n", - tex_width); - return DRM_ERR(EFAULT); - } + RADEON_COPY_MT(buffer + 12, data, 16); data += 16; buffer += 16; } @@ -1735,31 +1686,22 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, /* Texture image width is larger than the minimum, so we * can upload it directly. */ - if (DRM_COPY_FROM_USER(buffer, data, - dwords * sizeof(u32))) { - DRM_ERROR("EFAULT on data, %d dwords\n", - dwords); - return DRM_ERR(EFAULT); - } + RADEON_COPY_MT(buffer, data, + dwords * sizeof(u32)); } else { /* Texture image width is less than the minimum, so we * need to pad out each image scanline to the minimum * width. */ for (i = 0; i < tex->height; i++) { - if (DRM_COPY_FROM_USER - (buffer, data, tex_width)) { - DRM_ERROR - ("EFAULT on pad, %d bytes\n", - tex_width); - return DRM_ERR(EFAULT); - } + RADEON_COPY_MT(buffer, data, tex_width); buffer += 8; data += tex_width; } } } +#undef RADEON_COPY_MT buf->filp = filp; buf->used = size; offset = dev_priv->gart_buffers_offset + buf->offset; @@ -1821,7 +1763,7 @@ static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple) } static void radeon_apply_surface_regs(int surf_index, - drm_radeon_private_t * dev_priv) + drm_radeon_private_t *dev_priv) { if (!dev_priv->mmio) return; @@ -1847,8 +1789,8 @@ static void radeon_apply_surface_regs(int surf_index, * freed, we suddenly need two surfaces to store A and C, which might * not always be available. */ -static int alloc_surface(drm_radeon_surface_alloc_t * new, - drm_radeon_private_t * dev_priv, DRMFILE filp) +static int alloc_surface(drm_radeon_surface_alloc_t *new, + drm_radeon_private_t *dev_priv, DRMFILE filp) { struct radeon_virt_surface *s; int i; @@ -2158,6 +2100,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) LOCK_TEST_WITH_RETURN(dev, filp); + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data, @@ -2596,9 +2543,9 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv, return 0; } -static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv, +static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv, drm_radeon_cmd_header_t header, - drm_radeon_kcmd_buffer_t * cmdbuf) + drm_radeon_kcmd_buffer_t *cmdbuf) { int sz = header.scalars.count; int start = header.scalars.offset; @@ -2618,9 +2565,9 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv, /* God this is ugly */ -static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv, +static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv, drm_radeon_cmd_header_t header, - drm_radeon_kcmd_buffer_t * cmdbuf) + drm_radeon_kcmd_buffer_t *cmdbuf) { int sz = header.scalars.count; int start = ((unsigned int)header.scalars.offset) + 0x100; @@ -2638,9 +2585,9 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv, return 0; } -static __inline__ int radeon_emit_vectors(drm_radeon_private_t * dev_priv, +static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv, drm_radeon_cmd_header_t header, - drm_radeon_kcmd_buffer_t * cmdbuf) + drm_radeon_kcmd_buffer_t *cmdbuf) { int sz = header.vectors.count; int start = header.vectors.offset; @@ -2685,8 +2632,8 @@ static int radeon_emit_packet3(drm_device_t * dev, return 0; } -static int radeon_emit_packet3_cliprect(drm_device_t * dev, - drm_file_t * filp_priv, +static int radeon_emit_packet3_cliprect(drm_device_t *dev, + drm_file_t *filp_priv, drm_radeon_kcmd_buffer_t *cmdbuf, int orig_nbox) { @@ -2818,7 +2765,8 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); if (kbuf == NULL) return DRM_ERR(ENOMEM); - if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, cmdbuf.bufsz)) { + if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, + cmdbuf.bufsz)) { drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); return DRM_ERR(EFAULT); } @@ -2981,7 +2929,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) value = dev_priv->gart_vm_start; break; case RADEON_PARAM_REGISTER_HANDLE: - value = dev_priv->mmio_offset; + value = dev_priv->mmio->offset; break; case RADEON_PARAM_STATUS_HANDLE: value = dev_priv->ring_rptr_offset; @@ -3004,6 +2952,15 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) case RADEON_PARAM_GART_TEX_HANDLE: value = dev_priv->gart_textures_offset; break; + + case RADEON_PARAM_CARD_TYPE: + if (dev_priv->flags & CHIP_IS_PCIE) + value = RADEON_CARD_PCIE; + else if (dev_priv->flags & CHIP_IS_AGP) + value = RADEON_CARD_AGP; + else + value = RADEON_CARD_PCI; + break; default: return DRM_ERR(EINVAL); } @@ -3066,6 +3023,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) /* When a client dies: * - Check for and clean up flipped page state * - Free any alloced GART memory. + * - Free any alloced radeon surfaces. * * DRM infrastructure takes care of reclaiming dma buffers. */ @@ -3092,6 +3050,7 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv) drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_driver_file_fields *radeon_priv; + DRM_DEBUG("\n"); radeon_priv = (struct drm_radeon_driver_file_fields *) drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES); @@ -3100,6 +3059,7 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv) return -ENOMEM; filp_priv->driver_priv = radeon_priv; + if (dev_priv) radeon_priv->radeon_fb_delta = dev_priv->fb_location; else |