diff options
Diffstat (limited to 'drivers/gpu/drm')
27 files changed, 650 insertions, 119 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 3a22eb9be37..4cd35d8fd79 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -71,6 +71,7 @@ config DRM_I915 select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT select FB + select FRAMEBUFFER_CONSOLE if !EMBEDDED tristate "i915 driver" help Choose this option if you have a system that has Intel 830M, 845G, @@ -83,6 +84,12 @@ config DRM_I915 config DRM_I915_KMS bool "Enable modesetting on intel by default" depends on DRM_I915 + # i915 KMS depends on ACPI_VIDEO when ACPI is enabled + # but for select to work, need to select ACPI_VIDEO's dependencies, ick + select VIDEO_OUTPUT_CONTROL if ACPI + select BACKLIGHT_CLASS_DEVICE if ACPI + select INPUT if ACPI + select ACPI_VIDEO if ACPI help Choose this option if you want kernel modesetting enabled by default, and you have a new enough userspace to support this. Running old diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a04639dc633..45890447fee 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -561,7 +561,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, int saved_x, saved_y; struct drm_encoder *encoder; bool ret = true; - bool depth_changed, bpp_changed; adjusted_mode = drm_mode_duplicate(dev, mode); @@ -570,15 +569,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (!crtc->enabled) return true; - if (old_fb && crtc->fb) { - depth_changed = (old_fb->depth != crtc->fb->depth); - bpp_changed = (old_fb->bits_per_pixel != - crtc->fb->bits_per_pixel); - } else { - depth_changed = true; - bpp_changed = true; - } - saved_mode = crtc->mode; saved_x = crtc->x; saved_y = crtc->y; @@ -590,15 +580,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, crtc->x = x; crtc->y = y; - if (drm_mode_equal(&saved_mode, &crtc->mode)) { - if (saved_x != crtc->x || saved_y != crtc->y || - depth_changed || bpp_changed) { - ret = !crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, - old_fb); - goto done; - } - } - /* Pass our mode to the connectors and the CRTC to give them a chance to * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index c4ada8b6295..f01def16a66 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -456,7 +456,8 @@ int drm_ioctl(struct inode *inode, struct file *filp, retcode = -EINVAL; } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || - ((ioctl->flags & DRM_MASTER) && !file_priv->is_master)) { + ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) || + (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) { retcode = -EACCES; } else { if (cmd & (IOC_IN | IOC_OUT)) { diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index d009661781b..b9631e3a1ea 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -159,6 +159,9 @@ void drm_master_put(struct drm_master **master) int drm_setmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + if (file_priv->is_master) + return 0; + if (file_priv->minor->master && file_priv->minor->master != file_priv->master) return -EINVAL; @@ -169,6 +172,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, file_priv->minor->master != file_priv->master) { mutex_lock(&dev->struct_mutex); file_priv->minor->master = drm_master_get(file_priv->master); + file_priv->is_master = 1; mutex_unlock(&dev->struct_mutex); } @@ -178,10 +182,15 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, int drm_dropmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - if (!file_priv->master) + if (!file_priv->is_master) + return -EINVAL; + + if (!file_priv->minor->master) return -EINVAL; + mutex_lock(&dev->struct_mutex); drm_master_put(&file_priv->minor->master); + file_priv->is_master = 0; mutex_unlock(&dev->struct_mutex); return 0; } @@ -393,14 +402,14 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, if (dev->driver->load) { ret = dev->driver->load(dev, ent->driver_data); if (ret) - goto err_g3; + goto err_g4; } /* setup the grouping for the legacy output */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group); if (ret) - goto err_g3; + goto err_g4; } list_add_tail(&dev->driver_item, &driver->device_list); @@ -411,8 +420,11 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, return 0; -err_g3: +err_g4: drm_put_minor(&dev->primary); +err_g3: + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_put_minor(&dev->control); err_g2: pci_disable_device(pdev); err_g1: @@ -493,11 +505,11 @@ void drm_put_dev(struct drm_device *dev) dev->agp = NULL; } - drm_ht_remove(&dev->map_hash); - drm_ctxbitmap_cleanup(dev); - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) drm_rmmap(dev, r_list->map); + drm_ht_remove(&dev->map_hash); + + drm_ctxbitmap_cleanup(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_put_minor(&dev->control); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index bc0c6849360..8f9372921f8 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -132,6 +132,7 @@ void drm_sysfs_destroy(void) */ static void drm_sysfs_device_release(struct device *dev) { + memset(dev, 0, sizeof(struct device)); return; } @@ -488,9 +489,7 @@ int drm_sysfs_device_add(struct drm_minor *minor) return 0; - device_unregister(&minor->kdev); err_out: - return err; } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a000cf02882..53d54455262 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -713,18 +713,18 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); if (ret) { DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); - goto fail_batch_free; + goto fail_clip_free; } if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); -fail_batch_free: - drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER); fail_clip_free: drm_free(cliprects, cmdbuf->num_cliprects * sizeof(struct drm_clip_rect), DRM_MEM_DRIVER); +fail_batch_free: + drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER); return ret; } @@ -1011,8 +1011,16 @@ static int i915_load_modeset_init(struct drm_device *dev) /* Basic memrange allocator for stolen space (aka vram) */ drm_mm_init(&dev_priv->vram, 0, prealloc_size); - /* Let GEM Manage from end of prealloc space to end of aperture */ - i915_gem_do_init(dev, prealloc_size, agp_size); + /* Let GEM Manage from end of prealloc space to end of aperture. + * + * However, leave one page at the end still bound to the scratch page. + * There are a number of places where the hardware apparently + * prefetches past the end of the object, and we've seen multiple + * hangs with the GPU head pointer stuck in a batchbuffer bound + * at the last page of the aperture. One page should be enough to + * keep any prefetching inside of the aperture. + */ + i915_gem_do_init(dev, prealloc_size, agp_size - 4096); ret = i915_gem_init_ringbuffer(dev); if (ret) @@ -1232,7 +1240,7 @@ int i915_driver_unload(struct drm_device *dev) if (dev_priv->regs != NULL) iounmap(dev_priv->regs); - intel_opregion_free(dev); + intel_opregion_free(dev, 0); if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_modeset_cleanup(dev); @@ -1350,6 +1358,7 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), + DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6503e2210f6..98560e1e899 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -77,7 +77,7 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) drm_irq_uninstall(dev); } - intel_opregion_free(dev); + intel_opregion_free(dev, 1); if (state.event == PM_EVENT_SUSPEND) { /* Shut down the device */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3750d800304..9b149fe824c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -283,6 +283,7 @@ typedef struct drm_i915_private { u8 saveAR[21]; u8 saveDACMASK; u8 saveCR[37]; + uint64_t saveFENCE[16]; struct { struct drm_mm gtt_space; @@ -446,6 +447,9 @@ struct drm_i915_gem_object { uint32_t tiling_mode; uint32_t stride; + /** Record of address bit 17 of each page at last unbind. */ + long *bit_17; + /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ uint32_t agp_type; @@ -635,9 +639,13 @@ int i915_gem_attach_phys_object(struct drm_device *dev, void i915_gem_detach_phys_object(struct drm_device *dev, struct drm_gem_object *obj); void i915_gem_free_all_phys_object(struct drm_device *dev); +int i915_gem_object_get_pages(struct drm_gem_object *obj); +void i915_gem_object_put_pages(struct drm_gem_object *obj); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); +void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); +void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); /* i915_gem_debug.c */ void i915_gem_dump_object(struct drm_gem_object *obj, int len, @@ -667,12 +675,12 @@ extern int i915_restore_state(struct drm_device *dev); #ifdef CONFIG_ACPI /* i915_opregion.c */ extern int intel_opregion_init(struct drm_device *dev, int resume); -extern void intel_opregion_free(struct drm_device *dev); +extern void intel_opregion_free(struct drm_device *dev, int suspend); extern void opregion_asle_intr(struct drm_device *dev); extern void opregion_enable_asle(struct drm_device *dev); #else static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } -static inline void intel_opregion_free(struct drm_device *dev) { return; } +static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; } static inline void opregion_asle_intr(struct drm_device *dev) { return; } static inline void opregion_enable_asle(struct drm_device *dev) { return; } #endif @@ -698,13 +706,8 @@ extern void intel_modeset_cleanup(struct drm_device *dev); #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) #define I915_READ8(reg) readb(dev_priv->regs + (reg)) #define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) -#ifdef writeq #define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) -#else -#define I915_WRITE64(reg, val) (writel(val, dev_priv->regs + (reg)), \ - writel(upper_32_bits(val), dev_priv->regs + \ - (reg) + 4)) -#endif +#define I915_READ64(reg) readq(dev_priv->regs + (reg)) #define POSTING_READ(reg) (void)I915_READ(reg) #define I915_VERBOSE 0 @@ -780,15 +783,18 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2A42 || \ (dev)->pci_device == 0x2E02 || \ (dev)->pci_device == 0x2E12 || \ - (dev)->pci_device == 0x2E22) + (dev)->pci_device == 0x2E22 || \ + (dev)->pci_device == 0x2E32) -#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) +#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ + (dev)->pci_device == 0x2A12) #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ (dev)->pci_device == 0x2E12 || \ (dev)->pci_device == 0x2E22 || \ + (dev)->pci_device == 0x2E32 || \ IS_GM45(dev)) #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1449b452cc6..b189b49c760 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -43,8 +43,6 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, uint64_t offset, uint64_t size); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); -static int i915_gem_object_get_pages(struct drm_gem_object *obj); -static void i915_gem_object_put_pages(struct drm_gem_object *obj); static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment); @@ -143,15 +141,27 @@ fast_shmem_read(struct page **pages, int length) { char __iomem *vaddr; - int ret; + int unwritten; vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); if (vaddr == NULL) return -ENOMEM; - ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); + unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); kunmap_atomic(vaddr, KM_USER0); - return ret; + if (unwritten) + return -EFAULT; + + return 0; +} + +static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) +{ + drm_i915_private_t *dev_priv = obj->dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && + obj_priv->tiling_mode != I915_TILING_NONE; } static inline int @@ -181,6 +191,64 @@ slow_shmem_copy(struct page *dst_page, return 0; } +static inline int +slow_shmem_bit17_copy(struct page *gpu_page, + int gpu_offset, + struct page *cpu_page, + int cpu_offset, + int length, + int is_read) +{ + char *gpu_vaddr, *cpu_vaddr; + + /* Use the unswizzled path if this page isn't affected. */ + if ((page_to_phys(gpu_page) & (1 << 17)) == 0) { + if (is_read) + return slow_shmem_copy(cpu_page, cpu_offset, + gpu_page, gpu_offset, length); + else + return slow_shmem_copy(gpu_page, gpu_offset, + cpu_page, cpu_offset, length); + } + + gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); + if (gpu_vaddr == NULL) + return -ENOMEM; + + cpu_vaddr = kmap_atomic(cpu_page, KM_USER1); + if (cpu_vaddr == NULL) { + kunmap_atomic(gpu_vaddr, KM_USER0); + return -ENOMEM; + } + + /* Copy the data, XORing A6 with A17 (1). The user already knows he's + * XORing with the other bits (A9 for Y, A9 and A10 for X) + */ + while (length > 0) { + int cacheline_end = ALIGN(gpu_offset + 1, 64); + int this_length = min(cacheline_end - gpu_offset, length); + int swizzled_gpu_offset = gpu_offset ^ 64; + + if (is_read) { + memcpy(cpu_vaddr + cpu_offset, + gpu_vaddr + swizzled_gpu_offset, + this_length); + } else { + memcpy(gpu_vaddr + swizzled_gpu_offset, + cpu_vaddr + cpu_offset, + this_length); + } + cpu_offset += this_length; + gpu_offset += this_length; + length -= this_length; + } + + kunmap_atomic(cpu_vaddr, KM_USER1); + kunmap_atomic(gpu_vaddr, KM_USER0); + + return 0; +} + /** * This is the fast shmem pread path, which attempts to copy_from_user directly * from the backing pages of the object to the user's address space. On a @@ -269,6 +337,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, int page_length; int ret; uint64_t data_ptr = args->data_ptr; + int do_bit17_swizzling; remain = args->size; @@ -286,13 +355,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, down_read(&mm->mmap_sem); pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, - num_pages, 0, 0, user_pages, NULL); + num_pages, 1, 0, user_pages, NULL); up_read(&mm->mmap_sem); if (pinned_pages < num_pages) { ret = -EFAULT; goto fail_put_user_pages; } + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + mutex_lock(&dev->struct_mutex); ret = i915_gem_object_get_pages(obj); @@ -327,11 +398,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, if ((data_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - data_page_offset; - ret = slow_shmem_copy(user_pages[data_page_index], - data_page_offset, - obj_priv->pages[shmem_page_index], - shmem_page_offset, - page_length); + if (do_bit17_swizzling) { + ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length, + 1); + } else { + ret = slow_shmem_copy(user_pages[data_page_index], + data_page_offset, + obj_priv->pages[shmem_page_index], + shmem_page_offset, + page_length); + } if (ret) goto fail_put_pages; @@ -383,9 +463,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); - if (ret != 0) + if (i915_gem_object_needs_bit17_swizzle(obj)) { ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); + } else { + ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); + if (ret != 0) + ret = i915_gem_shmem_pread_slow(dev, obj, args, + file_priv); + } drm_gem_object_unreference(obj); @@ -727,6 +812,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, int page_length; int ret; uint64_t data_ptr = args->data_ptr; + int do_bit17_swizzling; remain = args->size; @@ -751,6 +837,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, goto fail_put_user_pages; } + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + mutex_lock(&dev->struct_mutex); ret = i915_gem_object_get_pages(obj); @@ -785,11 +873,20 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, if ((data_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - data_page_offset; - ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], - shmem_page_offset, - user_pages[data_page_index], - data_page_offset, - page_length); + if (do_bit17_swizzling) { + ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length, + 0); + } else { + ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length); + } if (ret) goto fail_put_pages; @@ -854,6 +951,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file_priv); } + } else if (i915_gem_object_needs_bit17_swizzle(obj)) { + ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); } else { ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); if (ret == -EFAULT) { @@ -1285,7 +1384,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, return 0; } -static void +void i915_gem_object_put_pages(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; @@ -1297,6 +1396,9 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) if (--obj_priv->pages_refcount != 0) return; + if (obj_priv->tiling_mode != I915_TILING_NONE) + i915_gem_object_save_bit_17_swizzle(obj); + for (i = 0; i < page_count; i++) if (obj_priv->pages[i] != NULL) { if (obj_priv->dirty) @@ -1494,8 +1596,19 @@ i915_gem_retire_request(struct drm_device *dev, if (obj->write_domain != 0) i915_gem_object_move_to_flushing(obj); - else + else { + /* Take a reference on the object so it won't be + * freed while the spinlock is held. The list + * protection for this spinlock is safe when breaking + * the lock like this since the next thing we do + * is just get the head of the list again. + */ + drm_gem_object_reference(obj); i915_gem_object_move_to_inactive(obj); + spin_unlock(&dev_priv->mm.active_list_lock); + drm_gem_object_unreference(obj); + spin_lock(&dev_priv->mm.active_list_lock); + } } out: spin_unlock(&dev_priv->mm.active_list_lock); @@ -1578,11 +1691,20 @@ static int i915_wait_request(struct drm_device *dev, uint32_t seqno) { drm_i915_private_t *dev_priv = dev->dev_private; + u32 ier; int ret = 0; BUG_ON(seqno == 0); if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { + ier = I915_READ(IER); + if (!ier) { + DRM_ERROR("something (likely vbetool) disabled " + "interrupts, re-enabling\n"); + i915_driver_irq_preinstall(dev); + i915_driver_irq_postinstall(dev); + } + dev_priv->mm.waiting_gem_seqno = seqno; i915_user_irq_get(dev); ret = wait_event_interruptible(dev_priv->irq_queue, @@ -1884,7 +2006,7 @@ i915_gem_evict_everything(struct drm_device *dev) return ret; } -static int +int i915_gem_object_get_pages(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; @@ -1922,6 +2044,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) } obj_priv->pages[i] = page; } + + if (obj_priv->tiling_mode != I915_TILING_NONE) + i915_gem_object_do_bit_17_swizzle(obj); + return 0; } @@ -3002,13 +3128,13 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, drm_free(*relocs, reloc_count * sizeof(**relocs), DRM_MEM_DRIVER); *relocs = NULL; - return ret; + return -EFAULT; } reloc_index += exec_list[i].relocation_count; } - return ret; + return 0; } static int @@ -3017,23 +3143,28 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, struct drm_i915_gem_relocation_entry *relocs) { uint32_t reloc_count = 0, i; - int ret; + int ret = 0; for (i = 0; i < buffer_count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; + int unwritten; user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; - if (ret == 0) { - ret = copy_to_user(user_relocs, - &relocs[reloc_count], - exec_list[i].relocation_count * - sizeof(*relocs)); + unwritten = copy_to_user(user_relocs, + &relocs[reloc_count], + exec_list[i].relocation_count * + sizeof(*relocs)); + + if (unwritten) { + ret = -EFAULT; + goto err; } reloc_count += exec_list[i].relocation_count; } +err: drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); return ret; @@ -3243,7 +3374,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec_offset = exec_list[args->buffer_count - 1].offset; #if WATCH_EXEC - i915_gem_dump_object(object_list[args->buffer_count - 1], + i915_gem_dump_object(batch_obj, args->batch_len, __func__, ~0); @@ -3308,10 +3439,12 @@ err: (uintptr_t) args->buffers_ptr, exec_list, sizeof(*exec_list) * args->buffer_count); - if (ret) + if (ret) { + ret = -EFAULT; DRM_ERROR("failed to copy %d exec entries " "back to user (%d)\n", args->buffer_count, ret); + } } /* Copy the updated relocations out regardless of current error @@ -3593,6 +3726,7 @@ void i915_gem_free_object(struct drm_gem_object *obj) i915_gem_free_mmap_offset(obj); drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); + kfree(obj_priv->bit_17); drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); } @@ -3962,8 +4096,10 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, dev_priv->mm.suspended = 0; ret = i915_gem_init_ringbuffer(dev); - if (ret != 0) + if (ret != 0) { + mutex_unlock(&dev->struct_mutex); return ret; + } spin_lock(&dev_priv->mm.active_list_lock); BUG_ON(!list_empty(&dev_priv->mm.active_list)); diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index a1ac0c5e730..986f1082c59 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -234,6 +234,96 @@ static int i915_hws_info(struct seq_file *m, void *data) return 0; } +static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count) +{ + int page, i; + uint32_t *mem; + + for (page = 0; page < page_count; page++) { + mem = kmap(pages[page]); + for (i = 0; i < PAGE_SIZE; i += 4) + seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); + kunmap(pages[page]); + } +} + +static int i915_batchbuffer_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret; + + spin_lock(&dev_priv->mm.active_list_lock); + + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { + obj = obj_priv->obj; + if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { + ret = i915_gem_object_get_pages(obj); + if (ret) { + DRM_ERROR("Failed to get pages: %d\n", ret); + spin_unlock(&dev_priv->mm.active_list_lock); + return ret; + } + + seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset); + i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE); + + i915_gem_object_put_pages(obj); + } + } + + spin_unlock(&dev_priv->mm.active_list_lock); + + return 0; +} + +static int i915_ringbuffer_data(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u8 *virt; + uint32_t *ptr, off; + + if (!dev_priv->ring.ring_obj) { + seq_printf(m, "No ringbuffer setup\n"); + return 0; + } + + virt = dev_priv->ring.virtual_start; + + for (off = 0; off < dev_priv->ring.Size; off += 4) { + ptr = (uint32_t *)(virt + off); + seq_printf(m, "%08x : %08x\n", off, *ptr); + } + + return 0; +} + +static int i915_ringbuffer_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned int head, tail, mask; + + head = I915_READ(PRB0_HEAD) & HEAD_ADDR; + tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + mask = dev_priv->ring.tail_mask; + + seq_printf(m, "RingHead : %08x\n", head); + seq_printf(m, "RingTail : %08x\n", tail); + seq_printf(m, "RingMask : %08x\n", mask); + seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); + seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); + + return 0; +} + + static struct drm_info_list i915_gem_debugfs_list[] = { {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, @@ -243,6 +333,9 @@ static struct drm_info_list i915_gem_debugfs_list[] = { {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, {"i915_gem_hws", i915_hws_info, 0}, + {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, + {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, + {"i915_batchbuffers", i915_batchbuffer_info, 0}, }; #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 6be3f927c86..52a059354e8 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -25,6 +25,8 @@ * */ +#include "linux/string.h" +#include "linux/bitops.h" #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -127,8 +129,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) swizzle_y = I915_BIT_6_SWIZZLE_9_11; } else { /* Bit 17 swizzling by the CPU in addition. */ - swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; - swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; + swizzle_y = I915_BIT_6_SWIZZLE_9_17; } break; } @@ -281,13 +283,25 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); if (args->tiling_mode == I915_TILING_NONE) { - obj_priv->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; + + /* Hide bit 17 swizzling from the user. This prevents old Mesa + * from aborting the application on sw fallbacks to bit 17, + * and we use the pread/pwrite bit17 paths to swizzle for it. + * If there was a user that was relying on the swizzle + * information for drm_intel_bo_map()ed reads/writes this would + * break it, but we don't have any of those. + */ + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9; + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; + /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; @@ -354,8 +368,100 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, DRM_ERROR("unknown tiling mode\n"); } + /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9; + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return 0; } + +/** + * Swap every 64 bytes of this page around, to account for it having a new + * bit 17 of its physical address and therefore being interpreted differently + * by the GPU. + */ +static int +i915_gem_swizzle_page(struct page *page) +{ + char *vaddr; + int i; + char temp[64]; + + vaddr = kmap(page); + if (vaddr == NULL) + return -ENOMEM; + + for (i = 0; i < PAGE_SIZE; i += 128) { + memcpy(temp, &vaddr[i], 64); + memcpy(&vaddr[i], &vaddr[i + 64], 64); + memcpy(&vaddr[i + 64], temp, 64); + } + + kunmap(page); + + return 0; +} + +void +i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int page_count = obj->size >> PAGE_SHIFT; + int i; + + if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) + return; + + if (obj_priv->bit_17 == NULL) + return; + + for (i = 0; i < page_count; i++) { + char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; + if ((new_bit_17 & 0x1) != + (test_bit(i, obj_priv->bit_17) != 0)) { + int ret = i915_gem_swizzle_page(obj_priv->pages[i]); + if (ret != 0) { + DRM_ERROR("Failed to swizzle page\n"); + return; + } + set_page_dirty(obj_priv->pages[i]); + } + } +} + +void +i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int page_count = obj->size >> PAGE_SHIFT; + int i; + + if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) + return; + + if (obj_priv->bit_17 == NULL) { + obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * + sizeof(long), GFP_KERNEL); + if (obj_priv->bit_17 == NULL) { + DRM_ERROR("Failed to allocate memory for bit 17 " + "record\n"); + return; + } + } + + for (i = 0; i < page_count; i++) { + if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) + __set_bit(i, obj_priv->bit_17); + else + __clear_bit(i, obj_priv->bit_17); + } +} diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ee7ce7b78cf..98bb4c878c4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -406,7 +406,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, drm_i915_irq_emit_t *emit = data; int result; - if (!dev_priv) { + if (!dev_priv || !dev_priv->ring.virtual_start) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index 69427722d20..dc425e74a26 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c @@ -370,11 +370,8 @@ int intel_opregion_init(struct drm_device *dev, int resume) if (mboxes & MBOX_ACPI) { DRM_DEBUG("Public ACPI methods supported\n"); opregion->acpi = base + OPREGION_ACPI_OFFSET; - if (drm_core_check_feature(dev, DRIVER_MODESET)) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_didl_outputs(dev); - if (!resume) - acpi_video_register(); - } } else { DRM_DEBUG("Public ACPI methods not supported\n"); err = -ENOTSUPP; @@ -389,8 +386,13 @@ int intel_opregion_init(struct drm_device *dev, int resume) if (mboxes & MBOX_ASLE) { DRM_DEBUG("ASLE supported\n"); opregion->asle = base + OPREGION_ASLE_OFFSET; + opregion_enable_asle(dev); } + if (!resume) + acpi_video_register(); + + /* Notify BIOS we are ready to handle ACPI video ext notifs. * Right now, all the events are handled by the ACPI video module. * We don't actually need to do anything with them. */ @@ -408,7 +410,7 @@ err_out: return err; } -void intel_opregion_free(struct drm_device *dev) +void intel_opregion_free(struct drm_device *dev, int suspend) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; @@ -416,6 +418,9 @@ void intel_opregion_free(struct drm_device *dev) if (!opregion->enabled) return; + if (!suspend) + acpi_video_exit(); + opregion->acpi->drdy = 0; system_opregion = NULL; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e805b590ae7..15da44cf21b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -526,6 +526,7 @@ #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) #define D_STATE 0x6104 #define CG_2D_DIS 0x6200 +#define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) #define CG_3D_DIS 0x6204 /* @@ -1446,6 +1447,7 @@ #define DISPPLANE_NO_LINE_DOUBLE 0 #define DISPPLANE_STEREO_POLARITY_FIRST 0 #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) +#define DISPPLANE_TILED (1<<10) #define DSPAADDR 0x70184 #define DSPASTRIDE 0x70188 #define DSPAPOS 0x7018C /* reserved */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index d669cc2b42c..ce8a21344a7 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -349,6 +349,18 @@ int i915_save_state(struct drm_device *dev) for (i = 0; i < 3; i++) dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); + /* Fences */ + if (IS_I965G(dev)) { + for (i = 0; i < 16; i++) + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + } else { + for (i = 0; i < 8; i++) + dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + } i915_save_vga(dev); return 0; @@ -371,6 +383,18 @@ int i915_restore_state(struct drm_device *dev) /* Display arbitration */ I915_WRITE(DSPARB, dev_priv->saveDSPARB); + /* Fences */ + if (IS_I965G(dev)) { + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); + } else { + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); + } + /* Pipe & plane A info */ /* Prime the clock */ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 9bdd959260a..19148c3df63 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -161,7 +161,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) hotplug_en &= CRT_FORCE_HOTPLUG_MASK; hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; - if (IS_GM45(dev)) + if (IS_G4X(dev)) hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 64773ce5296..3387cf32f38 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -367,6 +367,7 @@ static const intel_limit_t intel_limits[] = { .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, + .find_pll = intel_find_best_PLL, }, { /* INTEL_LIMIT_IGD_LVDS */ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, @@ -380,6 +381,7 @@ static const intel_limit_t intel_limits[] = { /* IGD only supports single-channel mode. */ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, + .find_pll = intel_find_best_PLL, }, }; @@ -655,6 +657,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR); int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; + int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF); int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; u32 dspcntr, alignment; int ret; @@ -731,6 +734,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, mutex_unlock(&dev->struct_mutex); return -EINVAL; } + if (IS_I965G(dev)) { + if (obj_priv->tiling_mode != I915_TILING_NONE) + dspcntr |= DISPPLANE_TILED; + else + dspcntr &= ~DISPPLANE_TILED; + } + I915_WRITE(dspcntr_reg, dspcntr); Start = obj_priv->gtt_offset; @@ -743,6 +753,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, I915_READ(dspbase); I915_WRITE(dspsurf, Start); I915_READ(dspsurf); + I915_WRITE(dsptileoff, (y << 16) | x); } else { I915_WRITE(dspbase, Start + Offset); I915_READ(dspbase); @@ -1793,6 +1804,37 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) } } +int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; + struct drm_crtc *crtc = NULL; + int pipe = -1; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + if (crtc->base.id == pipe_from_crtc_id->crtc_id) { + pipe = intel_crtc->pipe; + break; + } + } + + if (pipe == -1) { + DRM_ERROR("no such CRTC id\n"); + return -EINVAL; + } + + pipe_from_crtc_id->pipe = pipe; + + return 0; +} + struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) { struct drm_crtc *crtc = NULL; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 957daef8edf..cd4b9c5f715 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -109,7 +109,7 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, void intel_i2c_destroy(struct intel_i2c_chan *chan); int intel_ddc_get_modes(struct intel_output *intel_output); extern bool intel_ddc_probe(struct intel_output *intel_output); - +void intel_i2c_quirk_set(struct drm_device *dev, bool enable); extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); extern bool intel_sdvo_init(struct drm_device *dev, int output_device); @@ -125,6 +125,8 @@ extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); +int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern void intel_wait_for_vblank(struct drm_device *dev); extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index b7f0ebe9f81..e4652dcdd9b 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -864,8 +864,8 @@ static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3) static struct sysrq_key_op sysrq_intelfb_restore_op = { .handler = intelfb_sysrq, - .help_msg = "force fb", - .action_msg = "force restore of fb console", + .help_msg = "force-fb(V)", + .action_msg = "Restore framebuffer console", }; int intelfb_probe(struct drm_device *dev) @@ -898,7 +898,7 @@ int intelfb_probe(struct drm_device *dev) ret = intelfb_single_fb_probe(dev); } - register_sysrq_key('g', &sysrq_intelfb_restore_op); + register_sysrq_key('v', &sysrq_intelfb_restore_op); return ret; } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index b06a4a3ff08..d0983bb93a1 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -38,7 +38,7 @@ struct intel_hdmi_priv { u32 sdvox_reg; u32 save_SDVOX; - int has_hdmi_sink; + bool has_hdmi_sink; }; static void intel_hdmi_mode_set(struct drm_encoder *encoder, @@ -128,6 +128,22 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, return true; } +static void +intel_hdmi_sink_detect(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; + struct edid *edid = NULL; + + edid = drm_get_edid(&intel_output->base, + &intel_output->ddc_bus->adapter); + if (edid != NULL) { + hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); + kfree(edid); + intel_output->base.display_info.raw_edid = NULL; + } +} + static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector) { @@ -139,11 +155,18 @@ intel_hdmi_detect(struct drm_connector *connector) temp = I915_READ(PORT_HOTPLUG_EN); - I915_WRITE(PORT_HOTPLUG_EN, - temp | - HDMIB_HOTPLUG_INT_EN | - HDMIC_HOTPLUG_INT_EN | - HDMID_HOTPLUG_INT_EN); + switch (hdmi_priv->sdvox_reg) { + case SDVOB: + temp |= HDMIB_HOTPLUG_INT_EN; + break; + case SDVOC: + temp |= HDMIC_HOTPLUG_INT_EN; + break; + default: + return connector_status_unknown; + } + + I915_WRITE(PORT_HOTPLUG_EN, temp); POSTING_READ(PORT_HOTPLUG_EN); @@ -158,9 +181,10 @@ intel_hdmi_detect(struct drm_connector *connector) return connector_status_unknown; } - if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) + if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) { + intel_hdmi_sink_detect(connector); return connector_status_connected; - else + } else return connector_status_disconnected; } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 5ee9d4c2575..f7061f68d05 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -34,6 +34,21 @@ #include "i915_drm.h" #include "i915_drv.h" +void intel_i2c_quirk_set(struct drm_device *dev, bool enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* When using bit bashing for I2C, this bit needs to be set to 1 */ + if (!IS_IGD(dev)) + return; + if (enable) + I915_WRITE(CG_2D_DIS, + I915_READ(CG_2D_DIS) | DPCUNIT_CLOCK_GATE_DISABLE); + else + I915_WRITE(CG_2D_DIS, + I915_READ(CG_2D_DIS) & (~DPCUNIT_CLOCK_GATE_DISABLE)); +} + /* * Intel GPIO access functions */ @@ -153,8 +168,10 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, goto out_free; /* JJJ: raise SCL and SDA? */ + intel_i2c_quirk_set(dev, true); set_data(chan, 1); set_clock(chan, 1); + intel_i2c_quirk_set(dev, false); udelay(20); return chan; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6619f26e46a..439a8651499 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -384,7 +384,51 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = { .destroy = intel_lvds_enc_destroy, }; +static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) +{ + DRM_DEBUG("Skipping LVDS initialization for %s\n", id->ident); + return 1; +} +/* These systems claim to have LVDS, but really don't */ +static const struct dmi_system_id __initdata intel_no_lvds[] = { + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Apple Mac Mini (Core series)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Apple Mac Mini (Core 2 series)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "MSI IM-945GSE-A", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MSI"), + DMI_MATCH(DMI_PRODUCT_NAME, "A9830IMS"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Dell Studio Hybrid", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"), + }, + }, + + /* FIXME: add a check for the Aopen Mini PC */ + + { } /* terminating entry */ +}; /** * intel_lvds_init - setup LVDS connectors on this device @@ -404,15 +448,9 @@ void intel_lvds_init(struct drm_device *dev) u32 lvds; int pipe; - /* Blacklist machines that we know falsely report LVDS. */ - /* FIXME: add a check for the Aopen Mini PC */ - - /* Apple Mac Mini Core Duo and Mac Mini Core 2 Duo */ - if(dmi_match(DMI_PRODUCT_NAME, "Macmini1,1") || - dmi_match(DMI_PRODUCT_NAME, "Macmini2,1")) { - DRM_DEBUG("Skipping LVDS initialization for Apple Mac Mini\n"); + /* Skip init on machines we know falsely report LVDS */ + if (dmi_check_system(intel_no_lvds)) return; - } intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); if (!intel_output) { diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 07d7ec97616..e0910fefce8 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -27,6 +27,7 @@ #include <linux/fb.h> #include "drmP.h" #include "intel_drv.h" +#include "i915_drv.h" /** * intel_ddc_probe @@ -52,7 +53,10 @@ bool intel_ddc_probe(struct intel_output *intel_output) } }; + intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2); + intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); + if (ret == 2) return true; @@ -70,8 +74,10 @@ int intel_ddc_get_modes(struct intel_output *intel_output) struct edid *edid; int ret = 0; + intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); edid = drm_get_edid(&intel_output->base, &intel_output->ddc_bus->adapter); + intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); if (edid) { drm_mode_connector_update_edid_property(&intel_output->base, edid); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 7b31f55f55c..9913651c1e1 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1357,6 +1357,23 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) intel_sdvo_read_response(intel_output, &response, 2); } +static void +intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct edid *edid = NULL; + + intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); + edid = drm_get_edid(&intel_output->base, + &intel_output->ddc_bus->adapter); + if (edid != NULL) { + sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); + kfree(edid); + intel_output->base.display_info.raw_edid = NULL; + } +} + static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) { u8 response[2]; @@ -1371,9 +1388,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect if (status != SDVO_CMD_STATUS_SUCCESS) return connector_status_unknown; - if ((response[0] != 0) || (response[1] != 0)) + if ((response[0] != 0) || (response[1] != 0)) { + intel_sdvo_hdmi_sink_detect(connector); return connector_status_connected; - else + } else return connector_status_disconnected; } diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c index 32de4cedc36..077c0455a6b 100644 --- a/drivers/gpu/drm/r128/r128_cce.c +++ b/drivers/gpu/drm/r128/r128_cce.c @@ -511,9 +511,9 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) #if __OS_HAS_AGP if (!dev_priv->is_pci) { - drm_core_ioremap(dev_priv->cce_ring, dev); - drm_core_ioremap(dev_priv->ring_rptr, dev); - drm_core_ioremap(dev->agp_buffer_map, dev); + drm_core_ioremap_wc(dev_priv->cce_ring, dev); + drm_core_ioremap_wc(dev_priv->ring_rptr, dev); + drm_core_ioremap_wc(dev->agp_buffer_map, dev); if (!dev_priv->cce_ring->handle || !dev_priv->ring_rptr->handle || !dev->agp_buffer_map->handle) { diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index ed4d27e6ee6..8071d965f14 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -99,9 +99,10 @@ * 1.27- Add support for IGP GART * 1.28- Add support for VBL on CRTC2 * 1.29- R500 3D cmd buffer support + * 1.30- Add support for occlusion queries */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 29 +#define DRIVER_MINOR 30 #define DRIVER_PATCHLEVEL 0 /* diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c index 7a339dba6a6..bfb92d28326 100644 --- a/drivers/gpu/drm/via/via_dma.c +++ b/drivers/gpu/drm/via/via_dma.c @@ -481,11 +481,13 @@ static int via_wait_idle(drm_via_private_t * dev_priv) { int count = 10000000; - while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--); + while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count) + ; - while (count-- && (VIA_READ(VIA_REG_STATUS) & + while (count && (VIA_READ(VIA_REG_STATUS) & (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | - VIA_3D_ENG_BUSY))) ; + VIA_3D_ENG_BUSY))) + --count; return count; } @@ -705,7 +707,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file * switch (d_siz->func) { case VIA_CMDBUF_SPACE: while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size) - && count--) { + && --count) { if (!d_siz->wait) { break; } @@ -717,7 +719,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file * break; case VIA_CMDBUF_LAG: while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size) - && count--) { + && --count) { if (!d_siz->wait) { break; } |