diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 170 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 92 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_crt.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 276 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_fb.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sdvo.c | 8 |
12 files changed, 433 insertions, 191 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9c9998c4dce..a894ade0309 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { obj = obj_priv->obj; if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { - ret = i915_gem_object_get_pages(obj); + ret = i915_gem_object_get_pages(obj, 0); if (ret) { DRM_ERROR("Failed to get pages: %d\n", ret); spin_unlock(&dev_priv->mm.active_list_lock); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index e660ac07f3b..2307f98349f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, if (cmdbuf->num_cliprects) { cliprects = kcalloc(cmdbuf->num_cliprects, sizeof(struct drm_clip_rect), GFP_KERNEL); - if (cliprects == NULL) + if (cliprects == NULL) { + ret = -ENOMEM; goto fail_batch_free; + } ret = copy_from_user(cliprects, cmdbuf->cliprects, cmdbuf->num_cliprects * diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 46d88965852..79beffcf593 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = { const static struct intel_device_info intel_pineview_info = { .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, - .has_pipe_cxsr = 1, + .need_gfx_hws = 1, .has_hotplug = 1, }; @@ -174,78 +174,100 @@ const static struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); #endif -static int i915_suspend(struct drm_device *dev, pm_message_t state) +static int i915_drm_freeze(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!dev || !dev_priv) { - DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv); - DRM_ERROR("DRM not initialized, aborting suspend.\n"); - return -ENODEV; - } - - if (state.event == PM_EVENT_PRETHAW) - return 0; - pci_save_state(dev->pdev); /* If KMS is active, we do the leavevt stuff here */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { - if (i915_gem_idle(dev)) + int error = i915_gem_idle(dev); + if (error) { dev_err(&dev->pdev->dev, - "GEM idle failed, resume may fail\n"); + "GEM idle failed, resume might fail\n"); + return error; + } drm_irq_uninstall(dev); } i915_save_state(dev); + return 0; +} + +static void i915_drm_suspend(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + intel_opregion_free(dev, 1); + /* Modeset on resume, not lid events */ + dev_priv->modeset_on_lid = 0; +} + +static int i915_suspend(struct drm_device *dev, pm_message_t state) +{ + int error; + + if (!dev || !dev->dev_private) { + DRM_ERROR("dev: %p\n", dev); + DRM_ERROR("DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } + + if (state.event == PM_EVENT_PRETHAW) + return 0; + + error = i915_drm_freeze(dev); + if (error) + return error; + + i915_drm_suspend(dev); + if (state.event == PM_EVENT_SUSPEND) { /* Shut down the device */ pci_disable_device(dev->pdev); pci_set_power_state(dev->pdev, PCI_D3hot); } - /* Modeset on resume, not lid events */ - dev_priv->modeset_on_lid = 0; - return 0; } -static int i915_resume(struct drm_device *dev) +static int i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; - - if (pci_enable_device(dev->pdev)) - return -1; - pci_set_master(dev->pdev); - - i915_restore_state(dev); - - intel_opregion_init(dev, 1); + int error = 0; /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); dev_priv->mm.suspended = 0; - ret = i915_gem_init_ringbuffer(dev); - if (ret != 0) - ret = -1; + error = i915_gem_init_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); drm_irq_install(dev); - } - if (drm_core_check_feature(dev, DRIVER_MODESET)) { + /* Resume the modeset for every activated CRTC */ drm_helper_resume_force_mode(dev); } dev_priv->modeset_on_lid = 0; - return ret; + return error; +} + +static int i915_resume(struct drm_device *dev) +{ + if (pci_enable_device(dev->pdev)) + return -EIO; + + pci_set_master(dev->pdev); + + i915_restore_state(dev); + + intel_opregion_init(dev, 1); + + return i915_drm_thaw(dev); } /** @@ -386,57 +408,69 @@ i915_pci_remove(struct pci_dev *pdev) drm_put_dev(dev); } -static int -i915_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int i915_pm_suspend(struct device *dev) { - struct drm_device *dev = pci_get_drvdata(pdev); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + int error; - return i915_suspend(dev, state); -} + if (!drm_dev || !drm_dev->dev_private) { + dev_err(dev, "DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } -static int -i915_pci_resume(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); + error = i915_drm_freeze(drm_dev); + if (error) + return error; - return i915_resume(dev); -} + i915_drm_suspend(drm_dev); -static int -i915_pm_suspend(struct device *dev) -{ - return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND); -} + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); -static int -i915_pm_resume(struct device *dev) -{ - return i915_pci_resume(to_pci_dev(dev)); + return 0; } -static int -i915_pm_freeze(struct device *dev) +static int i915_pm_resume(struct device *dev) { - return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_resume(drm_dev); } -static int -i915_pm_thaw(struct device *dev) +static int i915_pm_freeze(struct device *dev) { - /* thaw during hibernate, do nothing! */ - return 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + if (!drm_dev || !drm_dev->dev_private) { + dev_err(dev, "DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } + + return i915_drm_freeze(drm_dev); } -static int -i915_pm_poweroff(struct device *dev) +static int i915_pm_thaw(struct device *dev) { - return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_drm_thaw(drm_dev); } -static int -i915_pm_restore(struct device *dev) +static int i915_pm_poweroff(struct device *dev) { - return i915_pci_resume(to_pci_dev(dev)); + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + int error; + + error = i915_drm_freeze(drm_dev); + if (!error) + i915_drm_suspend(drm_dev); + + return error; } const struct dev_pm_ops i915_pm_ops = { @@ -445,7 +479,7 @@ const struct dev_pm_ops i915_pm_ops = { .freeze = i915_pm_freeze, .thaw = i915_pm_thaw, .poweroff = i915_pm_poweroff, - .restore = i915_pm_restore, + .restore = i915_pm_resume, }; static struct vm_operations_struct i915_gem_vm_ops = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c1669488b5..b99b6a841d9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -493,6 +493,15 @@ typedef struct drm_i915_private { struct list_head flushing_list; /** + * List of objects currently pending a GPU write flush. + * + * All elements on this list will belong to either the + * active_list or flushing_list, last_rendering_seqno can + * be used to differentiate between the two elements. + */ + struct list_head gpu_write_list; + + /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. * @@ -592,6 +601,8 @@ struct drm_i915_gem_object { /** This object's place on the active/flushing/inactive lists */ struct list_head list; + /** This object's place on GPU write list */ + struct list_head gpu_write_list; /** This object's place on the fenced object LRU */ struct list_head fence_list; @@ -872,7 +883,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev, void i915_gem_detach_phys_object(struct drm_device *dev, struct drm_gem_object *obj); void i915_gem_free_all_phys_object(struct drm_device *dev); -int i915_gem_object_get_pages(struct drm_gem_object *obj); +int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); void i915_gem_object_put_pages(struct drm_gem_object *obj); void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0c67924ca80..ec8a0d7ffa3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_get_pages(obj); + ret = i915_gem_object_get_pages(obj, 0); if (ret != 0) goto fail_unlock; @@ -321,40 +321,24 @@ fail_unlock: return ret; } -static inline gfp_t -i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) -{ - return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); -} - -static inline void -i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) -{ - mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); -} - static int i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) { int ret; - ret = i915_gem_object_get_pages(obj); + ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); /* If we've insufficient memory to map in the pages, attempt * to make some space by throwing out some old buffers. */ if (ret == -ENOMEM) { struct drm_device *dev = obj->dev; - gfp_t gfp; ret = i915_gem_evict_something(dev, obj->size); if (ret) return ret; - gfp = i915_gem_object_get_page_gfp_mask(obj); - i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); - ret = i915_gem_object_get_pages(obj); - i915_gem_object_set_page_gfp_mask (obj, gfp); + ret = i915_gem_object_get_pages(obj, 0); } return ret; @@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_get_pages(obj); + ret = i915_gem_object_get_pages(obj, 0); if (ret != 0) goto fail_unlock; @@ -1568,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) else list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + BUG_ON(!list_empty(&obj_priv->gpu_write_list)); + obj_priv->last_rendering_seqno = 0; if (obj_priv->active) { obj_priv->active = 0; @@ -1638,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, struct drm_i915_gem_object *obj_priv, *next; list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.flushing_list, list) { + &dev_priv->mm.gpu_write_list, + gpu_write_list) { struct drm_gem_object *obj = obj_priv->obj; if ((obj->write_domain & flush_domains) == @@ -1646,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, uint32_t old_write_domain = obj->write_domain; obj->write_domain = 0; + list_del_init(&obj_priv->gpu_write_list); i915_gem_object_move_to_active(obj, seqno); trace_i915_gem_object_change_domain(obj, @@ -2100,8 +2088,8 @@ static int i915_gem_evict_everything(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t seqno; int ret; + uint32_t seqno; bool lists_empty; spin_lock(&dev_priv->mm.active_list_lock); @@ -2123,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev) if (ret) return ret; + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + ret = i915_gem_evict_from_inactive_list(dev); if (ret) return ret; @@ -2230,7 +2220,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) } int -i915_gem_object_get_pages(struct drm_gem_object *obj) +i915_gem_object_get_pages(struct drm_gem_object *obj, + gfp_t gfpmask) { struct drm_i915_gem_object *obj_priv = obj->driver_private; int page_count, i; @@ -2256,7 +2247,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) inode = obj->filp->f_path.dentry->d_inode; mapping = inode->i_mapping; for (i = 0; i < page_count; i++) { - page = read_mapping_page(mapping, i, NULL); + page = read_cache_page_gfp(mapping, i, + mapping_gfp_mask (mapping) | + __GFP_COLD | + gfpmask); if (IS_ERR(page)) { ret = PTR_ERR(page); i915_gem_object_put_pages(obj); @@ -2579,7 +2573,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = obj->driver_private; struct drm_mm_node *free_space; - bool retry_alloc = false; + gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; int ret; if (obj_priv->madv != I915_MADV_WILLNEED) { @@ -2623,15 +2617,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) DRM_INFO("Binding object of size %zd at 0x%08x\n", obj->size, obj_priv->gtt_offset); #endif - if (retry_alloc) { - i915_gem_object_set_page_gfp_mask (obj, - i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); - } - ret = i915_gem_object_get_pages(obj); - if (retry_alloc) { - i915_gem_object_set_page_gfp_mask (obj, - i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); - } + ret = i915_gem_object_get_pages(obj, gfpmask); if (ret) { drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; @@ -2641,9 +2627,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) ret = i915_gem_evict_something(dev, obj->size); if (ret) { /* now try to shrink everyone else */ - if (! retry_alloc) { - retry_alloc = true; - goto search_free; + if (gfpmask) { + gfpmask = 0; + goto search_free; } return ret; @@ -2721,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) old_write_domain = obj->write_domain; i915_gem_flush(dev, 0, obj->write_domain); seqno = i915_add_request(dev, NULL, obj->write_domain); - obj->write_domain = 0; + BUG_ON(obj->write_domain); i915_gem_object_move_to_active(obj, seqno); trace_i915_gem_object_change_domain(obj, @@ -3584,6 +3570,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, uint32_t reloc_count = 0, i; int ret = 0; + if (relocs == NULL) + return 0; + for (i = 0; i < buffer_count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; int unwritten; @@ -3673,7 +3662,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; - struct drm_i915_gem_relocation_entry *relocs; + struct drm_i915_gem_relocation_entry *relocs = NULL; int ret = 0, ret2, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains, reloc_index; @@ -3699,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (args->num_cliprects != 0) { cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), GFP_KERNEL); - if (cliprects == NULL) + if (cliprects == NULL) { + ret = -ENOMEM; goto pre_mutex_err; + } ret = copy_from_user(cliprects, (struct drm_clip_rect __user *) @@ -3742,6 +3733,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (object_list[i] == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", exec_list[i].handle, i); + /* prevent error path from reading uninitialized data */ + args->buffer_count = i + 1; ret = -EBADF; goto err; } @@ -3750,6 +3743,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (obj_priv->in_execbuffer) { DRM_ERROR("Object %p appears more than once in object list\n", object_list[i]); + /* prevent error path from reading uninitialized data */ + args->buffer_count = i + 1; ret = -EBADF; goto err; } @@ -3863,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_flush(dev, dev->invalidate_domains, dev->flush_domains); - if (dev->flush_domains) + if (dev->flush_domains & I915_GEM_GPU_DOMAINS) (void)i915_add_request(dev, file_priv, dev->flush_domains); } for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; + struct drm_i915_gem_object *obj_priv = obj->driver_private; uint32_t old_write_domain = obj->write_domain; obj->write_domain = obj->pending_write_domain; + if (obj->write_domain) + list_move_tail(&obj_priv->gpu_write_list, + &dev_priv->mm.gpu_write_list); + else + list_del_init(&obj_priv->gpu_write_list); + trace_i915_gem_object_change_domain(obj, obj->read_domains, old_write_domain); @@ -3946,6 +3948,7 @@ err: mutex_unlock(&dev->struct_mutex); +pre_mutex_err: /* Copy the updated relocations out regardless of current error * state. Failure to update the relocs would mean that the next * time userland calls execbuf, it would do so with presumed offset @@ -3960,7 +3963,6 @@ err: ret = ret2; } -pre_mutex_err: drm_free_large(object_list); kfree(cliprects); @@ -4383,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) obj_priv->obj = obj; obj_priv->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj_priv->list); + INIT_LIST_HEAD(&obj_priv->gpu_write_list); INIT_LIST_HEAD(&obj_priv->fence_list); obj_priv->madv = I915_MADV_WILLNEED; @@ -4834,6 +4837,7 @@ i915_gem_load(struct drm_device *dev) spin_lock_init(&dev_priv->mm.active_list_lock); INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); + INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.request_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); @@ -4946,7 +4950,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, if (!obj_priv->phys_obj) return; - ret = i915_gem_object_get_pages(obj); + ret = i915_gem_object_get_pages(obj, 0); if (ret) goto out; @@ -5004,7 +5008,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; obj_priv->phys_obj->cur_obj = obj; - ret = i915_gem_object_get_pages(obj); + ret = i915_gem_object_get_pages(obj, 0); if (ret) { DRM_ERROR("failed to get page list\n"); goto out; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 89a071a3e6f..a17d6bdfe63 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) if (de_iir & DE_GSE) ironlake_opregion_gse_intr(dev); + if (de_iir & DE_PLANEA_FLIP_DONE) { + intel_prepare_page_flip(dev, 0); + intel_finish_page_flip(dev, 0); + } + + if (de_iir & DE_PLANEB_FLIP_DONE) { + intel_prepare_page_flip(dev, 1); + intel_finish_page_flip(dev, 1); + } + + if (de_iir & DE_PIPEA_VBLANK) + drm_handle_vblank(dev, 0); + + if (de_iir & DE_PIPEB_VBLANK) + drm_handle_vblank(dev, 1); + /* check event from PCH */ if ((de_iir & DE_PCH_EVENT) && (pch_iir & SDE_HOTPLUG_MASK)) { @@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) if (!(pipeconf & PIPEACONF_ENABLE)) return -EINVAL; - if (IS_IRONLAKE(dev)) - return 0; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (IS_I965G(dev)) + if (IS_IRONLAKE(dev)) + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); + else if (IS_I965G(dev)) i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_ENABLE); else @@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - if (IS_IRONLAKE(dev)) - return; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, - PIPE_VBLANK_INTERRUPT_ENABLE | - PIPE_START_VBLANK_INTERRUPT_ENABLE); + if (IS_IRONLAKE(dev)) + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); + else + i915_disable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } @@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; /* enable kind of interrupts always enabled */ - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; u32 render_mask = GT_USER_INTERRUPT; u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; dev_priv->irq_mask_reg = ~display_mask; - dev_priv->de_irq_enable_reg = display_mask; + dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 847006c5218..ab1bd2d3d3b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -338,6 +338,7 @@ #define FBC_CTL_PERIODIC (1<<30) #define FBC_CTL_INTERVAL_SHIFT (16) #define FBC_CTL_UNCOMPRESSIBLE (1<<14) +#define FBC_C3_IDLE (1<<13) #define FBC_CTL_STRIDE_SHIFT (5) #define FBC_CTL_FENCENO (1<<0) #define FBC_COMMAND 0x0320c diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index ddefc871edf..79dd4026586 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) adpa = I915_READ(PCH_ADPA); adpa &= ~ADPA_CRT_HOTPLUG_MASK; + /* disable HPD first */ + I915_WRITE(PCH_ADPA, adpa); + (void)I915_READ(PCH_ADPA); adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | ADPA_CRT_HOTPLUG_WARMUP_10MS | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 45da78ef4a9..b27202d23eb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -240,33 +240,86 @@ struct intel_limit { #define IRONLAKE_DOT_MAX 350000 #define IRONLAKE_VCO_MIN 1760000 #define IRONLAKE_VCO_MAX 3510000 -#define IRONLAKE_N_MIN 1 -#define IRONLAKE_N_MAX 6 -#define IRONLAKE_M_MIN 79 -#define IRONLAKE_M_MAX 127 #define IRONLAKE_M1_MIN 12 #define IRONLAKE_M1_MAX 22 #define IRONLAKE_M2_MIN 5 #define IRONLAKE_M2_MAX 9 -#define IRONLAKE_P_SDVO_DAC_MIN 5 -#define IRONLAKE_P_SDVO_DAC_MAX 80 -#define IRONLAKE_P_LVDS_MIN 28 -#define IRONLAKE_P_LVDS_MAX 112 -#define IRONLAKE_P1_MIN 1 -#define IRONLAKE_P1_MAX 8 -#define IRONLAKE_P2_SDVO_DAC_SLOW 10 -#define IRONLAKE_P2_SDVO_DAC_FAST 5 -#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */ -#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ -#define IRONLAKE_P_DISPLAY_PORT_MIN 10 -#define IRONLAKE_P_DISPLAY_PORT_MAX 20 -#define IRONLAKE_P2_DISPLAY_PORT_FAST 10 -#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 -#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 -#define IRONLAKE_P1_DISPLAY_PORT_MIN 1 -#define IRONLAKE_P1_DISPLAY_PORT_MAX 2 +/* We have parameter ranges for different type of outputs. */ + +/* DAC & HDMI Refclk 120Mhz */ +#define IRONLAKE_DAC_N_MIN 1 +#define IRONLAKE_DAC_N_MAX 5 +#define IRONLAKE_DAC_M_MIN 79 +#define IRONLAKE_DAC_M_MAX 127 +#define IRONLAKE_DAC_P_MIN 5 +#define IRONLAKE_DAC_P_MAX 80 +#define IRONLAKE_DAC_P1_MIN 1 +#define IRONLAKE_DAC_P1_MAX 8 +#define IRONLAKE_DAC_P2_SLOW 10 +#define IRONLAKE_DAC_P2_FAST 5 + +/* LVDS single-channel 120Mhz refclk */ +#define IRONLAKE_LVDS_S_N_MIN 1 +#define IRONLAKE_LVDS_S_N_MAX 3 +#define IRONLAKE_LVDS_S_M_MIN 79 +#define IRONLAKE_LVDS_S_M_MAX 118 +#define IRONLAKE_LVDS_S_P_MIN 28 +#define IRONLAKE_LVDS_S_P_MAX 112 +#define IRONLAKE_LVDS_S_P1_MIN 2 +#define IRONLAKE_LVDS_S_P1_MAX 8 +#define IRONLAKE_LVDS_S_P2_SLOW 14 +#define IRONLAKE_LVDS_S_P2_FAST 14 + +/* LVDS dual-channel 120Mhz refclk */ +#define IRONLAKE_LVDS_D_N_MIN 1 +#define IRONLAKE_LVDS_D_N_MAX 3 +#define IRONLAKE_LVDS_D_M_MIN 79 +#define IRONLAKE_LVDS_D_M_MAX 127 +#define IRONLAKE_LVDS_D_P_MIN 14 +#define IRONLAKE_LVDS_D_P_MAX 56 +#define IRONLAKE_LVDS_D_P1_MIN 2 +#define IRONLAKE_LVDS_D_P1_MAX 8 +#define IRONLAKE_LVDS_D_P2_SLOW 7 +#define IRONLAKE_LVDS_D_P2_FAST 7 + +/* LVDS single-channel 100Mhz refclk */ +#define IRONLAKE_LVDS_S_SSC_N_MIN 1 +#define IRONLAKE_LVDS_S_SSC_N_MAX 2 +#define IRONLAKE_LVDS_S_SSC_M_MIN 79 +#define IRONLAKE_LVDS_S_SSC_M_MAX 126 +#define IRONLAKE_LVDS_S_SSC_P_MIN 28 +#define IRONLAKE_LVDS_S_SSC_P_MAX 112 +#define IRONLAKE_LVDS_S_SSC_P1_MIN 2 +#define IRONLAKE_LVDS_S_SSC_P1_MAX 8 +#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14 +#define IRONLAKE_LVDS_S_SSC_P2_FAST 14 + +/* LVDS dual-channel 100Mhz refclk */ +#define IRONLAKE_LVDS_D_SSC_N_MIN 1 +#define IRONLAKE_LVDS_D_SSC_N_MAX 3 +#define IRONLAKE_LVDS_D_SSC_M_MIN 79 +#define IRONLAKE_LVDS_D_SSC_M_MAX 126 +#define IRONLAKE_LVDS_D_SSC_P_MIN 14 +#define IRONLAKE_LVDS_D_SSC_P_MAX 42 +#define IRONLAKE_LVDS_D_SSC_P1_MIN 2 +#define IRONLAKE_LVDS_D_SSC_P1_MAX 6 +#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7 +#define IRONLAKE_LVDS_D_SSC_P2_FAST 7 + +/* DisplayPort */ +#define IRONLAKE_DP_N_MIN 1 +#define IRONLAKE_DP_N_MAX 2 +#define IRONLAKE_DP_M_MIN 81 +#define IRONLAKE_DP_M_MAX 90 +#define IRONLAKE_DP_P_MIN 10 +#define IRONLAKE_DP_P_MAX 20 +#define IRONLAKE_DP_P2_FAST 10 +#define IRONLAKE_DP_P2_SLOW 10 +#define IRONLAKE_DP_P2_LIMIT 0 +#define IRONLAKE_DP_P1_MIN 1 +#define IRONLAKE_DP_P1_MAX 2 static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, @@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = { .find_pll = intel_find_best_PLL, }; -static const intel_limit_t intel_limits_ironlake_sdvo = { +static const intel_limit_t intel_limits_ironlake_dac = { .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, - .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, - .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, + .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, + .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, - .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, - .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, + .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, + .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, - .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, - .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, + .p2_slow = IRONLAKE_DAC_P2_SLOW, + .p2_fast = IRONLAKE_DAC_P2_FAST }, .find_pll = intel_g4x_find_best_PLL, }; -static const intel_limit_t intel_limits_ironlake_lvds = { +static const intel_limit_t intel_limits_ironlake_single_lvds = { .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, - .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, - .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, + .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, + .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, - .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, - .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, + .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, + .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, - .p2_slow = IRONLAKE_P2_LVDS_SLOW, - .p2_fast = IRONLAKE_P2_LVDS_FAST }, + .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, + .p2_fast = IRONLAKE_LVDS_S_P2_FAST }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_dual_lvds = { + .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, + .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, + .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, + .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, + .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, + .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, + .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, + .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, + .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, + .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, + .p2_fast = IRONLAKE_LVDS_D_P2_FAST }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { + .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, + .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, + .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, + .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, + .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, + .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, + .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, + .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, + .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, + .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, + .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { + .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, + .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, + .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, + .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, + .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, + .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, + .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, + .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, + .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, + .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, + .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST }, .find_pll = intel_g4x_find_best_PLL, }; @@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = { .max = IRONLAKE_DOT_MAX }, .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX}, - .n = { .min = IRONLAKE_N_MIN, - .max = IRONLAKE_N_MAX }, - .m = { .min = IRONLAKE_M_MIN, - .max = IRONLAKE_M_MAX }, + .n = { .min = IRONLAKE_DP_N_MIN, + .max = IRONLAKE_DP_N_MAX }, + .m = { .min = IRONLAKE_DP_M_MIN, + .max = IRONLAKE_DP_M_MAX }, .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, - .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, - .max = IRONLAKE_P_DISPLAY_PORT_MAX }, - .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, - .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, - .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, - .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, - .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, + .p = { .min = IRONLAKE_DP_P_MIN, + .max = IRONLAKE_DP_P_MAX }, + .p1 = { .min = IRONLAKE_DP_P1_MIN, + .max = IRONLAKE_DP_P1_MAX}, + .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT, + .p2_slow = IRONLAKE_DP_P2_SLOW, + .p2_fast = IRONLAKE_DP_P2_FAST }, .find_pll = intel_find_pll_ironlake_dp, }; static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) { + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; const intel_limit_t *limit; - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits_ironlake_lvds; - else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || + int refclk = 120; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) + refclk = 100; + + if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == + LVDS_CLKB_POWER_UP) { + /* LVDS dual channel */ + if (refclk == 100) + limit = &intel_limits_ironlake_dual_lvds_100m; + else + limit = &intel_limits_ironlake_dual_lvds; + } else { + if (refclk == 100) + limit = &intel_limits_ironlake_single_lvds_100m; + else + limit = &intel_limits_ironlake_single_lvds; + } + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || HAS_eDP) limit = &intel_limits_ironlake_display_port; else - limit = &intel_limits_ironlake_sdvo; + limit = &intel_limits_ironlake_dac; return limit; } @@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) /* enable it... */ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; + if (IS_I945GM(dev)) + fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; if (obj_priv->tiling_mode != I915_TILING_NONE) @@ -1638,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); + drm_vblank_off(dev, pipe); /* Disable display plane */ temp = I915_READ(dspcntr_reg); if ((temp & DISPLAY_PLANE_ENABLE) != 0) { @@ -2519,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, sr_entries = roundup(sr_entries / cacheline_size, 1); DRM_DEBUG("self-refresh entries: %d\n", sr_entries); I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + } else { + /* Turn off self refresh if both pipes are enabled */ + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); } DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", @@ -2562,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, srwm = 1; srwm &= 0x3f; I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + } else { + /* Turn off self refresh if both pipes are enabled */ + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", @@ -2630,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, if (srwm < 0) srwm = 1; I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); + } else { + /* Turn off self refresh if both pipes are enabled */ + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", @@ -3949,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) struct intel_unpin_work { struct work_struct work; struct drm_device *dev; - struct drm_gem_object *obj; + struct drm_gem_object *old_fb_obj; + struct drm_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; int pending; }; @@ -3960,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work) container_of(__work, struct intel_unpin_work, work); mutex_lock(&work->dev->struct_mutex); - i915_gem_object_unpin(work->obj); - drm_gem_object_unreference(work->obj); + i915_gem_object_unpin(work->old_fb_obj); + drm_gem_object_unreference(work->pending_flip_obj); + drm_gem_object_unreference(work->old_fb_obj); mutex_unlock(&work->dev->struct_mutex); kfree(work); } @@ -3984,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { + if (work && !work->pending) { + obj_priv = work->pending_flip_obj->driver_private; + DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", + obj_priv, + atomic_read(&obj_priv->pending_flip)); + } spin_unlock_irqrestore(&dev->event_lock, flags); return; } @@ -4004,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev->event_lock, flags); - obj_priv = work->obj->driver_private; - if (atomic_dec_and_test(&obj_priv->pending_flip)) + obj_priv = work->pending_flip_obj->driver_private; + + /* Initial scanout buffer will have a 0 pending flip count */ + if ((atomic_read(&obj_priv->pending_flip) == 0) || + atomic_dec_and_test(&obj_priv->pending_flip)) DRM_WAKEUP(&dev_priv->pending_flip_queue); schedule_work(&work->work); } @@ -4018,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); - if (intel_crtc->unpin_work) + if (intel_crtc->unpin_work) { intel_crtc->unpin_work->pending = 1; + } else { + DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); + } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -4035,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; unsigned long flags; - int ret; + int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; + int ret, pipesrc; RING_LOCALS; work = kzalloc(sizeof *work, GFP_KERNEL); @@ -4047,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->dev = crtc->dev; intel_fb = to_intel_framebuffer(crtc->fb); - work->obj = intel_fb->obj; + work->old_fb_obj = intel_fb->obj; INIT_WORK(&work->work, intel_unpin_work_fn); /* We borrow the event spin lock for protecting unpin_work */ spin_lock_irqsave(&dev->event_lock, flags); if (intel_crtc->unpin_work) { + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&dev->event_lock, flags); kfree(work); mutex_unlock(&dev->struct_mutex); @@ -4066,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ret = intel_pin_and_fence_fb_obj(dev, obj); if (ret != 0) { + DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", + obj->driver_private); kfree(work); + intel_crtc->unpin_work = NULL; mutex_unlock(&dev->struct_mutex); return ret; } - /* Reference the old fb object for the scheduled work. */ - drm_gem_object_reference(work->obj); + /* Reference the objects for the scheduled work. */ + drm_gem_object_reference(work->old_fb_obj); + drm_gem_object_reference(obj); crtc->fb = fb; i915_gem_object_flush_write_domain(obj); drm_vblank_get(dev, intel_crtc->pipe); obj_priv = obj->driver_private; atomic_inc(&obj_priv->pending_flip); + work->pending_flip_obj = obj; BEGIN_LP_RING(4); OUT_RING(MI_DISPLAY_FLIP | @@ -4086,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, OUT_RING(fb->pitch); if (IS_I965G(dev)) { OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); - OUT_RING((fb->width << 16) | fb->height); + pipesrc = I915_READ(pipesrc_reg); + OUT_RING(pipesrc & 0x0fff0fff); } else { OUT_RING(obj_priv->gtt_offset); OUT_RING(MI_NOOP); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 371d753e362..aaabbcbe590 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_pin(fbo, PAGE_SIZE); + ret = i915_gem_object_pin(fbo, 64*1024); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index aa74e59bec6..b1d0acbae4e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = { { .ident = "Samsung SX20S", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), + DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), DMI_MATCH(DMI_BOARD_NAME, "SX20S"), }, }, @@ -623,6 +623,13 @@ static const struct dmi_system_id bad_lid_status[] = { }, }, { + .ident = "Aspire 1810T", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), + }, + }, + { .ident = "PC-81005", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), @@ -643,7 +650,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect { enum drm_connector_status status = connector_status_connected; - if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) + if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) status = connector_status_disconnected; return status; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index eaacfd0920d..82678d30ab0 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) connector->connector_type = DRM_MODE_CONNECTOR_VGA; intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); + } else if (flags & SDVO_OUTPUT_CVBS0) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; + encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; + connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; + sdvo_priv->is_tv = true; + intel_output->needs_tv_clock = true; + intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; } else if (flags & SDVO_OUTPUT_LVDS0) { sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; |