diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-05 16:02:01 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-05 16:02:01 -0700 |
commit | fc1caf6eafb30ea185720e29f7f5eccca61ecd60 (patch) | |
tree | 666dabc25a9b02e5c05f9eba32fa6b0d8027341a /drivers/gpu/drm/i915 | |
parent | 9779714c8af09d57527f18d9aa2207dcc27a8687 (diff) | |
parent | 96576a9e1a0cdb8a43d3af5846be0948f52b4460 (diff) |
Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (204 commits)
agp: intel-agp: do not use PCI resources before pci_enable_device()
agp: efficeon-agp: do not use PCI resources before pci_enable_device()
drm: kill BKL from common code
drm/kms: Simplify setup of the initial I2C encoder config.
drm,io-mapping: Specify slot to use for atomic mappings
drm/radeon/kms: only expose underscan on avivo chips
drm/radeon: add new pci ids
drm: Cleanup after failing to create master->unique and dev->name
drm/radeon: tone down overchatty acpi debug messages.
drm/radeon/kms: enable underscan option for digital connectors
drm/radeon/kms: fix calculation of h/v scaling factors
drm/radeon/kms/igp: sideport is AMD only
drm/radeon/kms: handle the case of no active displays properly in the bandwidth code
drm: move ttm global code to core drm
drm/i915: Clear the Ironlake dithering flags when the pipe doesn't want it.
drm/radeon/kms: make sure HPD is set to NONE on analog-only connectors
drm/radeon/kms: make sure rio_mem is valid before unmapping it
drm/agp/i915: trim stolen space to 32M
drm/i915: Unset cursor if out-of-bounds upon mode change (v4)
drm/i915: Unreference object not handle on creation
...
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 66 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 172 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 56 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_suspend.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_trace.h | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 586 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 143 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_hdmi.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 331 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sdvo.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_tv.c | 12 |
17 files changed, 988 insertions, 577 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2305a1234f1..f19ffe87af3 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -34,12 +34,15 @@ #include "i915_drm.h" #include "i915_drv.h" #include "i915_trace.h" +#include <linux/pci.h> #include <linux/vgaarb.h> #include <linux/acpi.h> #include <linux/pnp.h> #include <linux/vga_switcheroo.h> #include <linux/slab.h> +extern int intel_max_stolen; /* from AGP driver */ + /** * Sets up the hardware status page for devices that need a physical address * in the register. @@ -1256,7 +1259,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) drm_mm_put_block(compressed_fb); } - if (!IS_GM45(dev)) { + if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, 4096, 0); if (!compressed_llb) { @@ -1282,8 +1285,9 @@ static void i915_setup_compression(struct drm_device *dev, int size) intel_disable_fbc(dev); dev_priv->compressed_fb = compressed_fb; - - if (IS_GM45(dev)) { + if (IS_IRONLAKE_M(dev)) + I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); + else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, compressed_fb->start); } else { I915_WRITE(FBC_CFB_BASE, cfb_base); @@ -1291,7 +1295,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) dev_priv->compressed_llb = compressed_llb; } - DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, + DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, ll_base, size >> 20); } @@ -1354,7 +1358,7 @@ static int i915_load_modeset_init(struct drm_device *dev, int fb_bar = IS_I9XX(dev) ? 2 : 0; int ret = 0; - dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & + dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) & 0xff000000; /* Basic memrange allocator for stolen space (aka vram) */ @@ -2063,8 +2067,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* Add register map (needed for suspend/resume) */ mmio_bar = IS_I9XX(dev) ? 0 : 1; - base = drm_get_resource_start(dev, mmio_bar); - size = drm_get_resource_len(dev, mmio_bar); + base = pci_resource_start(dev->pdev, mmio_bar); + size = pci_resource_len(dev->pdev, mmio_bar); if (i915_get_bridge_dev(dev)) { ret = -EIO; @@ -2104,6 +2108,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (ret) goto out_iomapfree; + if (prealloc_size > intel_max_stolen) { + DRM_INFO("detected %dM stolen memory, trimming to %dM\n", + prealloc_size >> 20, intel_max_stolen >> 20); + prealloc_size = intel_max_stolen; + } + dev_priv->wq = create_singlethread_workqueue("i915"); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 423dc90c1e2..5044f653e8e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -93,11 +93,11 @@ static const struct intel_device_info intel_i945gm_info = { }; static const struct intel_device_info intel_i965g_info = { - .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, + .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_i965gm_info = { - .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, + .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, }; @@ -114,7 +114,7 @@ static const struct intel_device_info intel_g45_info = { }; static const struct intel_device_info intel_gm45_info = { - .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, + .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, @@ -134,7 +134,7 @@ static const struct intel_device_info intel_ironlake_d_info = { static const struct intel_device_info intel_ironlake_m_info = { .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, - .need_gfx_hws = 1, .has_rc6 = 1, + .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, }; @@ -148,33 +148,33 @@ static const struct intel_device_info intel_sandybridge_m_info = { .has_hotplug = 1, .is_gen6 = 1, }; -static const struct pci_device_id pciidlist[] = { - INTEL_VGA_DEVICE(0x3577, &intel_i830_info), - INTEL_VGA_DEVICE(0x2562, &intel_845g_info), - INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), +static const struct pci_device_id pciidlist[] = { /* aka */ + INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ + INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ + INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), - INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), - INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), - INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), - INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), - INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), - INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), - INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), - INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), - INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), - INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), - INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), - INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), - INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), - INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), - INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), - INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), - INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), - INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), - INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), - INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), - INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), - INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), + INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ + INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ + INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ + INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ + INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ + INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ + INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ + INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ + INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ + INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ + INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ + INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ + INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ + INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ + INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ + INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ + INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ + INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ + INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ + INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ + INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ + INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), @@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags) /* * Clear request list */ - i915_gem_retire_requests(dev, &dev_priv->render_ring); + i915_gem_retire_requests(dev); if (need_display) i915_save_display(dev); @@ -413,7 +413,7 @@ int i965_reset(struct drm_device *dev, u8 flags) static int __devinit i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - return drm_get_dev(pdev, ent, &driver); + return drm_get_pci_dev(pdev, ent, &driver); } static void @@ -482,7 +482,7 @@ static int i915_pm_poweroff(struct device *dev) return i915_drm_freeze(drm_dev); } -const struct dev_pm_ops i915_pm_ops = { +static const struct dev_pm_ops i915_pm_ops = { .suspend = i915_pm_suspend, .resume = i915_pm_resume, .freeze = i915_pm_freeze, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2e1744d37ad..906663b9929 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -176,7 +176,8 @@ struct drm_i915_display_funcs { int (*get_display_clock_speed)(struct drm_device *dev); int (*get_fifo_size)(struct drm_device *dev, int plane); void (*update_wm)(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size); + int planeb_clock, int sr_hdisplay, int sr_htotal, + int pixel_size); /* clock updates for mode set */ /* cursor updates */ /* render clock increase/decrease */ @@ -200,6 +201,8 @@ struct intel_device_info { u8 need_gfx_hws : 1; u8 is_g4x : 1; u8 is_pineview : 1; + u8 is_broadwater : 1; + u8 is_crestline : 1; u8 is_ironlake : 1; u8 is_gen6 : 1; u8 has_fbc : 1; @@ -288,6 +291,8 @@ typedef struct drm_i915_private { struct timer_list hangcheck_timer; int hangcheck_count; uint32_t last_acthd; + uint32_t last_instdone; + uint32_t last_instdone1; struct drm_mm vram; @@ -547,6 +552,14 @@ typedef struct drm_i915_private { struct list_head fence_list; /** + * List of objects currently pending being freed. + * + * These objects are no longer in use, but due to a signal + * we were prevented from freeing them at the appointed time. + */ + struct list_head deferred_free_list; + + /** * We leave the user IRQ off as much as possible, * but this means that requests will finish and never * be retired once the system goes idle. Set a timer to @@ -677,7 +690,7 @@ struct drm_i915_gem_object { * * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) */ - int fence_reg : 5; + signed int fence_reg : 5; /** * Used for checking the object doesn't appear more than once @@ -713,7 +726,7 @@ struct drm_i915_gem_object { * * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 * bits with absolutely no headroom. So use 4 bits. */ - int pin_count : 4; + unsigned int pin_count : 4; #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf /** AGP memory structure for our GTT binding. */ @@ -743,7 +756,7 @@ struct drm_i915_gem_object { uint32_t stride; /** Record of address bit 17 of each page at last unbind. */ - long *bit_17; + unsigned long *bit_17; /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ uint32_t agp_type; @@ -955,8 +968,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev, bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); -void i915_gem_retire_requests(struct drm_device *dev, - struct intel_ring_buffer *ring); +void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_work_handler(struct work_struct *work); void i915_gem_clflush_object(struct drm_gem_object *obj); int i915_gem_object_set_domain(struct drm_gem_object *obj, @@ -986,7 +998,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev); int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); void i915_gem_object_put_pages(struct drm_gem_object *obj); void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); -void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); +int i915_gem_object_flush_write_domain(struct drm_gem_object *obj); void i915_gem_shrinker_init(void); void i915_gem_shrinker_exit(void); @@ -1046,6 +1058,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern void i8xx_disable_fbc(struct drm_device *dev); extern void g4x_disable_fbc(struct drm_device *dev); +extern void ironlake_disable_fbc(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev); extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); extern bool intel_fbc_enabled(struct drm_device *dev); @@ -1135,6 +1148,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) #define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) #define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) +#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) +#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5aa747fc25a..2a4ed7ca8b4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -35,7 +35,7 @@ #include <linux/swap.h> #include <linux/pci.h> -static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); +static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, @@ -53,6 +53,7 @@ static int i915_gem_evict_from_inactive_list(struct drm_device *dev); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv); +static void i915_gem_free_object_tail(struct drm_gem_object *obj); static LIST_HEAD(shrink_list); static DEFINE_SPINLOCK(shrink_list_lock); @@ -127,8 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return -ENOMEM; ret = drm_gem_handle_create(file_priv, obj, &handle); - drm_gem_object_handle_unreference_unlocked(obj); - + drm_gem_object_unreference_unlocked(obj); if (ret) return ret; @@ -496,10 +496,10 @@ fast_user_write(struct io_mapping *mapping, char *vaddr_atomic; unsigned long unwritten; - vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); + vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0); unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, user_data, length); - io_mapping_unmap_atomic(vaddr_atomic); + io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); if (unwritten) return -EFAULT; return 0; @@ -1709,9 +1709,9 @@ i915_get_gem_seqno(struct drm_device *dev, /** * This function clears the request list as sequence numbers are passed. */ -void -i915_gem_retire_requests(struct drm_device *dev, - struct intel_ring_buffer *ring) +static void +i915_gem_retire_requests_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; @@ -1751,6 +1751,30 @@ i915_gem_retire_requests(struct drm_device *dev, } void +i915_gem_retire_requests(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + if (!list_empty(&dev_priv->mm.deferred_free_list)) { + struct drm_i915_gem_object *obj_priv, *tmp; + + /* We must be careful that during unbind() we do not + * accidentally infinitely recurse into retire requests. + * Currently: + * retire -> free -> unbind -> wait -> retire_ring + */ + list_for_each_entry_safe(obj_priv, tmp, + &dev_priv->mm.deferred_free_list, + list) + i915_gem_free_object_tail(&obj_priv->base); + } + + i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); + if (HAS_BSD(dev)) + i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); +} + +void i915_gem_retire_work_handler(struct work_struct *work) { drm_i915_private_t *dev_priv; @@ -1761,10 +1785,7 @@ i915_gem_retire_work_handler(struct work_struct *work) dev = dev_priv->dev; mutex_lock(&dev->struct_mutex); - i915_gem_retire_requests(dev, &dev_priv->render_ring); - - if (HAS_BSD(dev)) - i915_gem_retire_requests(dev, &dev_priv->bsd_ring); + i915_gem_retire_requests(dev); if (!dev_priv->mm.suspended && (!list_empty(&dev_priv->render_ring.request_list) || @@ -1832,7 +1853,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, * a separate wait queue to handle that. */ if (ret == 0) - i915_gem_retire_requests(dev, ring); + i915_gem_retire_requests_ring(dev, ring); return ret; } @@ -1945,11 +1966,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj) * before we unbind. */ ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("set_domain failed: %d\n", ret); + if (ret == -ERESTARTSYS) return ret; - } + /* Continue on if we fail due to EIO, the GPU is hung so we + * should be safe and we need to cleanup or else we might + * cause memory corruption through use-after-free. + */ BUG_ON(obj_priv->active); @@ -1985,7 +2007,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) trace_i915_gem_object_unbind(obj); - return 0; + return ret; } static struct drm_gem_object * @@ -2107,10 +2129,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) struct intel_ring_buffer *render_ring = &dev_priv->render_ring; struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; for (;;) { - i915_gem_retire_requests(dev, render_ring); - - if (HAS_BSD(dev)) - i915_gem_retire_requests(dev, bsd_ring); + i915_gem_retire_requests(dev); /* If there's an inactive buffer available now, grab it * and be done. @@ -2583,7 +2602,10 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) if (!IS_I965G(dev)) { int ret; - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret != 0) + return ret; + ret = i915_gem_object_wait_rendering(obj); if (ret != 0) return ret; @@ -2634,10 +2656,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) if (free_space != NULL) { obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, alignment); - if (obj_priv->gtt_space != NULL) { - obj_priv->gtt_space->private = obj; + if (obj_priv->gtt_space != NULL) obj_priv->gtt_offset = obj_priv->gtt_space->start; - } } if (obj_priv->gtt_space == NULL) { /* If the gtt is empty and we're still having trouble @@ -2733,7 +2753,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj) } /** Flushes any GPU write domain for the object if it's dirty. */ -static void +static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; @@ -2741,17 +2761,18 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) - return; + return 0; /* Queue the GPU write cache flushing we need. */ old_write_domain = obj->write_domain; i915_gem_flush(dev, 0, obj->write_domain); - (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring); - BUG_ON(obj->write_domain); + if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0) + return -ENOMEM; trace_i915_gem_object_change_domain(obj, obj->read_domains, old_write_domain); + return 0; } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -2795,9 +2816,11 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) old_write_domain); } -void +int i915_gem_object_flush_write_domain(struct drm_gem_object *obj) { + int ret = 0; + switch (obj->write_domain) { case I915_GEM_DOMAIN_GTT: i915_gem_object_flush_gtt_write_domain(obj); @@ -2806,9 +2829,11 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj) i915_gem_object_flush_cpu_write_domain(obj); break; default: - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); break; } + + return ret; } /** @@ -2828,7 +2853,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) if (obj_priv->gtt_space == NULL) return -EINVAL; - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret != 0) + return ret; + /* Wait on any GPU rendering and flushing to occur. */ ret = i915_gem_object_wait_rendering(obj); if (ret != 0) @@ -2878,7 +2906,9 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) if (obj_priv->gtt_space == NULL) return -EINVAL; - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + return ret; /* Wait on any GPU rendering and flushing to occur. */ if (obj_priv->active) { @@ -2926,7 +2956,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) uint32_t old_write_domain, old_read_domains; int ret; - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + return ret; + /* Wait on any GPU rendering and flushing to occur. */ ret = i915_gem_object_wait_rendering(obj); if (ret != 0) @@ -3216,7 +3249,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, if (offset == 0 && size == obj->size) return i915_gem_object_set_to_cpu_domain(obj, 0); - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + return ret; + /* Wait on any GPU rendering and flushing to occur. */ ret = i915_gem_object_wait_rendering(obj); if (ret != 0) @@ -3451,7 +3487,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, reloc_offset = obj_priv->gtt_offset + reloc->offset; reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, (reloc_offset & - ~(PAGE_SIZE - 1))); + ~(PAGE_SIZE - 1)), + KM_USER0); reloc_entry = (uint32_t __iomem *)(reloc_page + (reloc_offset & (PAGE_SIZE - 1))); reloc_val = target_obj_priv->gtt_offset + reloc->delta; @@ -3462,7 +3499,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, readl(reloc_entry), reloc_val); #endif writel(reloc_val, reloc_entry); - io_mapping_unmap_atomic(reloc_page); + io_mapping_unmap_atomic(reloc_page, KM_USER0); /* The updated presumed offset for this entry will be * copied back out to the user. @@ -4313,7 +4350,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_busy *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - drm_i915_private_t *dev_priv = dev->dev_private; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { @@ -4328,10 +4364,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * actually unmasked, and our working set ends up being larger than * required. */ - i915_gem_retire_requests(dev, &dev_priv->render_ring); - - if (HAS_BSD(dev)) - i915_gem_retire_requests(dev, &dev_priv->bsd_ring); + i915_gem_retire_requests(dev); obj_priv = to_intel_bo(obj); /* Don't count being on the flushing list against the object being @@ -4441,20 +4474,19 @@ int i915_gem_init_object(struct drm_gem_object *obj) return 0; } -void i915_gem_free_object(struct drm_gem_object *obj) +static void i915_gem_free_object_tail(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int ret; - trace_i915_gem_object_destroy(obj); - - while (obj_priv->pin_count > 0) - i915_gem_object_unpin(obj); - - if (obj_priv->phys_obj) - i915_gem_detach_phys_object(dev, obj); - - i915_gem_object_unbind(obj); + ret = i915_gem_object_unbind(obj); + if (ret == -ERESTARTSYS) { + list_move(&obj_priv->list, + &dev_priv->mm.deferred_free_list); + return; + } if (obj_priv->mmap_offset) i915_gem_free_mmap_offset(obj); @@ -4466,6 +4498,22 @@ void i915_gem_free_object(struct drm_gem_object *obj) kfree(obj_priv); } +void i915_gem_free_object(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + trace_i915_gem_object_destroy(obj); + + while (obj_priv->pin_count > 0) + i915_gem_object_unpin(obj); + + if (obj_priv->phys_obj) + i915_gem_detach_phys_object(dev, obj); + + i915_gem_free_object_tail(obj); +} + /** Unbinds all inactive objects. */ static int i915_gem_evict_from_inactive_list(struct drm_device *dev) @@ -4689,9 +4737,19 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); mutex_unlock(&dev->struct_mutex); - drm_irq_install(dev); + ret = drm_irq_install(dev); + if (ret) + goto cleanup_ringbuffer; return 0; + +cleanup_ringbuffer: + mutex_lock(&dev->struct_mutex); + i915_gem_cleanup_ringbuffer(dev); + dev_priv->mm.suspended = 1; + mutex_unlock(&dev->struct_mutex); + + return ret; } int @@ -4729,6 +4787,7 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); + INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); INIT_LIST_HEAD(&dev_priv->render_ring.active_list); INIT_LIST_HEAD(&dev_priv->render_ring.request_list); if (HAS_BSD(dev)) { @@ -5027,10 +5086,7 @@ rescan: continue; spin_unlock(&shrink_list_lock); - i915_gem_retire_requests(dev, &dev_priv->render_ring); - - if (HAS_BSD(dev)) - i915_gem_retire_requests(dev, &dev_priv->bsd_ring); + i915_gem_retire_requests(dev); list_for_each_entry_safe(obj_priv, next_obj, &dev_priv->mm.inactive_list, diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 4b7c49d4257..155719e4d16 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -333,8 +333,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, i915_gem_release_mmap(obj); if (ret != 0) { - WARN(ret != -ERESTARTSYS, - "failed to reset object for tiling switch"); args->tiling_mode = obj_priv->tiling_mode; args->stride = obj_priv->stride; goto err; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index dba53d4b9fb..85785a8844e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -171,10 +171,10 @@ void intel_enable_asle (struct drm_device *dev) ironlake_enable_display_irq(dev_priv, DE_GSE); else { i915_enable_pipestat(dev_priv, 1, - I915_LEGACY_BLC_EVENT_ENABLE); + PIPE_LEGACY_BLC_EVENT_ENABLE); if (IS_I965G(dev)) i915_enable_pipestat(dev_priv, 0, - I915_LEGACY_BLC_EVENT_ENABLE); + PIPE_LEGACY_BLC_EVENT_ENABLE); } } @@ -842,7 +842,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) u32 iir, new_iir; u32 pipea_stats, pipeb_stats; u32 vblank_status; - u32 vblank_enable; int vblank = 0; unsigned long irqflags; int irq_received; @@ -856,13 +855,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) iir = I915_READ(IIR); - if (IS_I965G(dev)) { - vblank_status = I915_START_VBLANK_INTERRUPT_STATUS; - vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE; - } else { - vblank_status = I915_VBLANK_INTERRUPT_STATUS; - vblank_enable = I915_VBLANK_INTERRUPT_ENABLE; - } + if (IS_I965G(dev)) + vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; + else + vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; for (;;) { irq_received = iir != 0; @@ -966,8 +962,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) intel_finish_page_flip(dev, 1); } - if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || - (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || + if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || + (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || (iir & I915_ASLE_INTERRUPT)) opregion_asle_intr(dev); @@ -1233,16 +1229,21 @@ void i915_hangcheck_elapsed(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t acthd; + uint32_t acthd, instdone, instdone1; /* No reset support on this chip yet. */ if (IS_GEN6(dev)) return; - if (!IS_I965G(dev)) + if (!IS_I965G(dev)) { acthd = I915_READ(ACTHD); - else + instdone = I915_READ(INSTDONE); + instdone1 = 0; + } else { acthd = I915_READ(ACTHD_I965); + instdone = I915_READ(INSTDONE_I965); + instdone1 = I915_READ(INSTDONE1); + } /* If all work is done then ACTHD clearly hasn't advanced. */ if (list_empty(&dev_priv->render_ring.request_list) || @@ -1253,21 +1254,24 @@ void i915_hangcheck_elapsed(unsigned long data) return; } - if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) { - DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); - i915_handle_error(dev, true); - return; - } + if (dev_priv->last_acthd == acthd && + dev_priv->last_instdone == instdone && + dev_priv->last_instdone1 == instdone1) { + if (dev_priv->hangcheck_count++ > 1) { + DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); + i915_handle_error(dev, true); + return; + } + } else { + dev_priv->hangcheck_count = 0; + + dev_priv->last_acthd = acthd; + dev_priv->last_instdone = instdone; + dev_priv->last_instdone1 = instdone1; + } /* Reset timer case chip hangs without another request being added */ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); - - if (acthd != dev_priv->last_acthd) - dev_priv->hangcheck_count = 0; - else - dev_priv->hangcheck_count++; - - dev_priv->last_acthd = acthd; } /* drm_dma.h hooks diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cf41c672def..281db6e5403 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -442,7 +442,7 @@ #define GEN6_RENDER_IMR 0x20a8 #define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) #define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) -#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6) +#define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6) #define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) #define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) #define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) @@ -530,6 +530,21 @@ #define DPFC_CHICKEN 0x3224 #define DPFC_HT_MODIFY (1<<31) +/* Framebuffer compression for Ironlake */ +#define ILK_DPFC_CB_BASE 0x43200 +#define ILK_DPFC_CONTROL 0x43208 +/* The bit 28-8 is reserved */ +#define DPFC_RESERVED (0x1FFFFF00) +#define ILK_DPFC_RECOMP_CTL 0x4320c +#define ILK_DPFC_STATUS 0x43210 +#define ILK_DPFC_FENCE_YOFF 0x43218 +#define ILK_DPFC_CHICKEN 0x43224 +#define ILK_FBC_RT_BASE 0x2128 +#define ILK_FBC_RT_VALID (1<<0) + +#define ILK_DISPLAY_CHICKEN1 0x42000 +#define ILK_FBCQ_DIS (1<<22) + /* * GPIO regs */ @@ -595,32 +610,6 @@ #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ -#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) -#define I915_CRC_ERROR_ENABLE (1UL<<29) -#define I915_CRC_DONE_ENABLE (1UL<<28) -#define I915_GMBUS_EVENT_ENABLE (1UL<<27) -#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25) -#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) -#define I915_DPST_EVENT_ENABLE (1UL<<23) -#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22) -#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) -#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) -#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ -#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) -#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16) -#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) -#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12) -#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11) -#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9) -#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) -#define I915_DPST_EVENT_STATUS (1UL<<7) -#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6) -#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) -#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) -#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ -#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1) -#define I915_OVERLAY_UPDATED_STATUS (1UL<<0) - #define SRX_INDEX 0x3c4 #define SRX_DATA 0x3c5 #define SR01 1 @@ -2166,7 +2155,8 @@ #define I830_FIFO_LINE_SIZE 32 #define G4X_FIFO_SIZE 127 -#define I945_FIFO_SIZE 127 /* 945 & 965 */ +#define I965_FIFO_SIZE 512 +#define I945_FIFO_SIZE 127 #define I915_FIFO_SIZE 95 #define I855GM_FIFO_SIZE 127 /* In cachelines */ #define I830_FIFO_SIZE 95 @@ -2185,6 +2175,9 @@ #define PINEVIEW_CURSOR_DFT_WM 0 #define PINEVIEW_CURSOR_GUARD_WM 5 +#define I965_CURSOR_FIFO 64 +#define I965_CURSOR_MAX_WM 32 +#define I965_CURSOR_DFT_WM 8 /* define the Watermark register on Ironlake */ #define WM0_PIPEA_ILK 0x45100 @@ -2212,6 +2205,9 @@ #define ILK_DISPLAY_FIFO 128 #define ILK_DISPLAY_MAXWM 64 #define ILK_DISPLAY_DFTWM 8 +#define ILK_CURSOR_FIFO 32 +#define ILK_CURSOR_MAXWM 16 +#define ILK_CURSOR_DFTWM 8 #define ILK_DISPLAY_SR_FIFO 512 #define ILK_DISPLAY_MAX_SRWM 0x1ff @@ -2510,6 +2506,10 @@ #define ILK_VSDPFD_FULL (1<<21) #define ILK_DSPCLK_GATE 0x42020 #define ILK_DPARB_CLK_GATE (1<<5) +/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ +#define ILK_CLK_FBC (1<<7) +#define ILK_DPFC_DIS1 (1<<8) +#define ILK_DPFC_DIS2 (1<<9) #define DISP_ARB_CTL 0x45000 #define DISP_TILE_SURFACE_SWIZZLING (1<<13) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 60a5800fba6..6e2025274db 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -602,7 +602,9 @@ void i915_save_display(struct drm_device *dev) /* Only save FBC state on the platform that supports FBC */ if (I915_HAS_FBC(dev)) { - if (IS_GM45(dev)) { + if (IS_IRONLAKE_M(dev)) { + dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); + } else if (IS_GM45(dev)) { dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); } else { dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); @@ -706,7 +708,10 @@ void i915_restore_display(struct drm_device *dev) /* only restore FBC info on the platform that supports FBC*/ if (I915_HAS_FBC(dev)) { - if (IS_GM45(dev)) { + if (IS_IRONLAKE_M(dev)) { + ironlake_disable_fbc(dev); + I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); + } else if (IS_GM45(dev)) { g4x_disable_fbc(dev); I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else { diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index fab21760dd5..fea97a21cc1 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -262,6 +262,42 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end, TP_ARGS(dev) ); +TRACE_EVENT(i915_flip_request, + TP_PROTO(int plane, struct drm_gem_object *obj), + + TP_ARGS(plane, obj), + + TP_STRUCT__entry( + __field(int, plane) + __field(struct drm_gem_object *, obj) + ), + + TP_fast_assign( + __entry->plane = plane; + __entry->obj = obj; + ), + + TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) +); + +TRACE_EVENT(i915_flip_complete, + TP_PROTO(int plane, struct drm_gem_object *obj), + + TP_ARGS(plane, obj), + + TP_STRUCT__entry( + __field(int, plane) + __field(struct drm_gem_object *, obj) + ), + + TP_fast_assign( + __entry->plane = plane; + __entry->obj = obj; + ), + + TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) +); + #endif /* _I915_TRACE_H_ */ /* This part must be outside protection */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 714bf539918..1e5e0d379fa 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -33,6 +33,7 @@ #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" +#include "i915_trace.h" #include "drm_dp_helper.h" #include "drm_crtc_helper.h" @@ -42,6 +43,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type); static void intel_update_watermarks(struct drm_device *dev); static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); +static void intel_crtc_update_cursor(struct drm_crtc *crtc); typedef struct { /* given values */ @@ -322,6 +324,9 @@ struct intel_limit { #define IRONLAKE_DP_P1_MIN 1 #define IRONLAKE_DP_P1_MAX 2 +/* FDI */ +#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ + static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); @@ -1125,6 +1130,67 @@ static bool g4x_fbc_enabled(struct drm_device *dev) return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; } +static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA : + DPFC_CTL_PLANEB; + unsigned long stall_watermark = 200; + u32 dpfc_ctl; + + dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; + dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_plane = intel_crtc->plane; + + dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + dpfc_ctl &= DPFC_RESERVED; + dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); + if (obj_priv->tiling_mode != I915_TILING_NONE) { + dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); + I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); + } else { + I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); + } + + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); + I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | + (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | + (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); + I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); + I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); + /* enable it... */ + I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) | + DPFC_CTL_EN); + + DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); +} + +void ironlake_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); + intel_wait_for_vblank(dev); + + DRM_DEBUG_KMS("disabled FBC\n"); +} + +static bool ironlake_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; +} + bool intel_fbc_enabled(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1286,7 +1352,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) switch (obj_priv->tiling_mode) { case I915_TILING_NONE: - alignment = 64 * 1024; + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) + alignment = 128 * 1024; + else if (IS_I965G(dev)) + alignment = 4 * 1024; + else + alignment = 64 * 1024; break; case I915_TILING_X: /* pin() will align the object as required by fence */ @@ -1653,6 +1724,15 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; u32 temp, tries = 0; + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit + for train result */ + temp = I915_READ(fdi_rx_imr_reg); + temp &= ~FDI_RX_SYMBOL_LOCK; + temp &= ~FDI_RX_BIT_LOCK; + I915_WRITE(fdi_rx_imr_reg, temp); + I915_READ(fdi_rx_imr_reg); + udelay(150); + /* enable CPU FDI TX and PCH FDI RX */ temp = I915_READ(fdi_tx_reg); temp |= FDI_TX_ENABLE; @@ -1670,16 +1750,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) I915_READ(fdi_rx_reg); udelay(150); - /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit - for train result */ - temp = I915_READ(fdi_rx_imr_reg); - temp &= ~FDI_RX_SYMBOL_LOCK; - temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(fdi_rx_imr_reg, temp); - I915_READ(fdi_rx_imr_reg); - udelay(150); - - for (;;) { + for (tries = 0; tries < 5; tries++) { temp = I915_READ(fdi_rx_iir_reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); @@ -1689,14 +1760,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) temp | FDI_RX_BIT_LOCK); break; } - - tries++; - - if (tries > 5) { - DRM_DEBUG_KMS("FDI train 1 fail!\n"); - break; - } } + if (tries == 5) + DRM_DEBUG_KMS("FDI train 1 fail!\n"); /* Train 2 */ temp = I915_READ(fdi_tx_reg); @@ -1712,7 +1778,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) tries = 0; - for (;;) { + for (tries = 0; tries < 5; tries++) { temp = I915_READ(fdi_rx_iir_reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); @@ -1722,14 +1788,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } - - tries++; - - if (tries > 5) { - DRM_DEBUG_KMS("FDI train 2 fail!\n"); - break; - } } + if (tries == 5) + DRM_DEBUG_KMS("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done\n"); } @@ -1754,6 +1815,15 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; u32 temp, i; + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit + for train result */ + temp = I915_READ(fdi_rx_imr_reg); + temp &= ~FDI_RX_SYMBOL_LOCK; + temp &= ~FDI_RX_BIT_LOCK; + I915_WRITE(fdi_rx_imr_reg, temp); + I915_READ(fdi_rx_imr_reg); + udelay(150); + /* enable CPU FDI TX and PCH FDI RX */ temp = I915_READ(fdi_tx_reg); temp |= FDI_TX_ENABLE; @@ -1779,15 +1849,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) I915_READ(fdi_rx_reg); udelay(150); - /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit - for train result */ - temp = I915_READ(fdi_rx_imr_reg); - temp &= ~FDI_RX_SYMBOL_LOCK; - temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(fdi_rx_imr_reg, temp); - I915_READ(fdi_rx_imr_reg); - udelay(150); - for (i = 0; i < 4; i++ ) { temp = I915_READ(fdi_tx_reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; @@ -1942,7 +2003,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) } /* Enable panel fitting for LVDS */ - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) + || HAS_eDP || intel_pch_has_edp(crtc)) { temp = I915_READ(pf_ctl_reg); I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); @@ -2037,9 +2099,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) reg = I915_READ(trans_dp_ctl); reg &= ~TRANS_DP_PORT_SEL_MASK; reg = TRANS_DP_OUTPUT_ENABLE | - TRANS_DP_ENH_FRAMING | - TRANS_DP_VSYNC_ACTIVE_HIGH | - TRANS_DP_HSYNC_ACTIVE_HIGH; + TRANS_DP_ENH_FRAMING; + + if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) + reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; + if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) + reg |= TRANS_DP_VSYNC_ACTIVE_HIGH; switch (intel_trans_dp_port_sel(crtc)) { case PCH_DP_B: @@ -2079,6 +2144,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) intel_crtc_load_lut(crtc); + intel_update_fbc(crtc, &crtc->mode); + break; case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); @@ -2093,6 +2160,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) I915_READ(dspbase_reg); } + if (dev_priv->cfb_plane == plane && + dev_priv->display.disable_fbc) + dev_priv->display.disable_fbc(dev); + i915_disable_vga(dev); /* disable cpu pipe, disable after all planes disabled */ @@ -2472,8 +2543,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; if (HAS_PCH_SPLIT(dev)) { /* FDI link clock is fixed at 2.7G */ - if (mode->clock * 3 > 27000 * 4) - return MODE_CLOCK_HIGH; + if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) + return false; } return true; } @@ -2655,6 +2726,20 @@ static struct intel_watermark_params g4x_wm_info = { 2, G4X_FIFO_LINE_SIZE, }; +static struct intel_watermark_params g4x_cursor_wm_info = { + I965_CURSOR_FIFO, + I965_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static struct intel_watermark_params i965_cursor_wm_info = { + I965_CURSOR_FIFO, + I965_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + I915_FIFO_LINE_SIZE, +}; static struct intel_watermark_params i945_wm_info = { I945_FIFO_SIZE, I915_MAX_WM, @@ -2692,6 +2777,14 @@ static struct intel_watermark_params ironlake_display_wm_info = { ILK_FIFO_LINE_SIZE }; +static struct intel_watermark_params ironlake_cursor_wm_info = { + ILK_CURSOR_FIFO, + ILK_CURSOR_MAXWM, + ILK_CURSOR_DFTWM, + 2, + ILK_FIFO_LINE_SIZE +}; + static struct intel_watermark_params ironlake_display_srwm_info = { ILK_DISPLAY_SR_FIFO, ILK_DISPLAY_MAX_SRWM, @@ -2741,7 +2834,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, */ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 1000; - entries_required /= wm->cacheline_size; + entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); @@ -2752,8 +2845,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, /* Don't promote wm_size to unsigned... */ if (wm_size > (long)wm->max_wm) wm_size = wm->max_wm; - if (wm_size <= 0) + if (wm_size <= 0) { wm_size = wm->default_wm; + DRM_ERROR("Insufficient FIFO for plane, expect flickering:" + " entries required = %ld, available = %lu.\n", + entries_required + wm->guard_size, + wm->fifo_size); + } + return wm_size; } @@ -2862,11 +2961,9 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) uint32_t dsparb = I915_READ(DSPARB); int size; - if (plane == 0) - size = dsparb & 0x7f; - else - size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - - (dsparb & 0x7f); + size = dsparb & 0x7f; + if (plane) + size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", size); @@ -2880,11 +2977,9 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane) uint32_t dsparb = I915_READ(DSPARB); int size; - if (plane == 0) - size = dsparb & 0x1ff; - else - size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - - (dsparb & 0x1ff); + size = dsparb & 0x1ff; + if (plane) + size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; size >>= 1; /* Convert to cachelines */ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, @@ -2925,7 +3020,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) } static void pineview_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size) + int planeb_clock, int sr_hdisplay, int unused, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; u32 reg; @@ -2990,7 +3086,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, } static void g4x_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size) + int planeb_clock, int sr_hdisplay, int sr_htotal, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; int total_size, cacheline_size; @@ -3014,12 +3111,12 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, */ entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / 1000; - entries_required /= G4X_FIFO_LINE_SIZE; + entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); planea_wm = entries_required + planea_params.guard_size; entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / 1000; - entries_required /= G4X_FIFO_LINE_SIZE; + entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); planeb_wm = entries_required + planeb_params.guard_size; cursora_wm = cursorb_wm = 16; @@ -3033,13 +3130,24 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, static const int sr_latency_ns = 12000; sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_hdisplay * 1000) / sr_clock); + line_time_us = ((sr_htotal * 1000) / sr_clock); /* Use ns/us then divide to preserve precision */ - sr_entries = (((sr_latency_ns / line_time_us) + 1) * - pixel_size * sr_hdisplay) / 1000; - sr_entries = roundup(sr_entries / cacheline_size, 1); - DRM_DEBUG("self-refresh entries: %d\n", sr_entries); + sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * sr_hdisplay; + sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); + + entries_required = (((sr_latency_ns / line_time_us) + + 1000) / 1000) * pixel_size * 64; + entries_required = DIV_ROUND_UP(entries_required, + g4x_cursor_wm_info.cacheline_size); + cursor_sr = entries_required + g4x_cursor_wm_info.guard_size; + + if (cursor_sr > g4x_cursor_wm_info.max_wm) + cursor_sr = g4x_cursor_wm_info.max_wm; + DRM_DEBUG_KMS("self-refresh watermark: display plane %d " + "cursor %d\n", sr_entries, cursor_sr); + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); } else { /* Turn off self refresh if both pipes are enabled */ @@ -3064,11 +3172,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, } static void i965_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size) + int planeb_clock, int sr_hdisplay, int sr_htotal, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long line_time_us; int sr_clock, sr_entries, srwm = 1; + int cursor_sr = 16; /* Calc sr entries for one plane configs */ if (sr_hdisplay && (!planea_clock || !planeb_clock)) { @@ -3076,17 +3186,31 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, static const int sr_latency_ns = 12000; sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_hdisplay * 1000) / sr_clock); + line_time_us = ((sr_htotal * 1000) / sr_clock); /* Use ns/us then divide to preserve precision */ - sr_entries = (((sr_latency_ns / line_time_us) + 1) * - pixel_size * sr_hdisplay) / 1000; - sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1); + sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * sr_hdisplay; + sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); DRM_DEBUG("self-refresh entries: %d\n", sr_entries); - srwm = I945_FIFO_SIZE - sr_entries; + srwm = I965_FIFO_SIZE - sr_entries; if (srwm < 0) srwm = 1; - srwm &= 0x3f; + srwm &= 0x1ff; + + sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * 64; + sr_entries = DIV_ROUND_UP(sr_entries, + i965_cursor_wm_info.cacheline_size); + cursor_sr = i965_cursor_wm_info.fifo_size - + (sr_entries + i965_cursor_wm_info.guard_size); + + if (cursor_sr > i965_cursor_wm_info.max_wm) + cursor_sr = i965_cursor_wm_info.max_wm; + + DRM_DEBUG_KMS("self-refresh watermark: display plane %d " + "cursor %d\n", srwm, cursor_sr); + if (IS_I965GM(dev)) I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); } else { @@ -3103,10 +3227,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | (8 << 0)); I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); + /* update cursor SR watermark */ + I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); } static void i9xx_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size) + int planeb_clock, int sr_hdisplay, int sr_htotal, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t fwater_lo; @@ -3151,12 +3278,12 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, static const int sr_latency_ns = 6000; sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_hdisplay * 1000) / sr_clock); + line_time_us = ((sr_htotal * 1000) / sr_clock); /* Use ns/us then divide to preserve precision */ - sr_entries = (((sr_latency_ns / line_time_us) + 1) * - pixel_size * sr_hdisplay) / 1000; - sr_entries = roundup(sr_entries / cacheline_size, 1); + sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * sr_hdisplay; + sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); srwm = total_size - sr_entries; if (srwm < 0) @@ -3194,7 +3321,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, } static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, - int unused2, int pixel_size) + int unused2, int unused3, int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; @@ -3212,9 +3339,11 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, } #define ILK_LP0_PLANE_LATENCY 700 +#define ILK_LP0_CURSOR_LATENCY 1300 static void ironlake_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size) + int planeb_clock, int sr_hdisplay, int sr_htotal, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; int planea_wm, planeb_wm, cursora_wm, cursorb_wm; @@ -3222,20 +3351,48 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, unsigned long line_time_us; int sr_clock, entries_required; u32 reg_value; + int line_count; + int planea_htotal = 0, planeb_htotal = 0; + struct drm_crtc *crtc; + struct intel_crtc *intel_crtc; + + /* Need htotal for all active display plane */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + intel_crtc = to_intel_crtc(crtc); + if (crtc->enabled) { + if (intel_crtc->plane == 0) + planea_htotal = crtc->mode.htotal; + else + planeb_htotal = crtc->mode.htotal; + } + } /* Calculate and update the watermark for plane A */ if (planea_clock) { entries_required = ((planea_clock / 1000) * pixel_size * ILK_LP0_PLANE_LATENCY) / 1000; entries_required = DIV_ROUND_UP(entries_required, - ironlake_display_wm_info.cacheline_size); + ironlake_display_wm_info.cacheline_size); planea_wm = entries_required + ironlake_display_wm_info.guard_size; if (planea_wm > (int)ironlake_display_wm_info.max_wm) planea_wm = ironlake_display_wm_info.max_wm; - cursora_wm = 16; + /* Use the large buffer method to calculate cursor watermark */ + line_time_us = (planea_htotal * 1000) / planea_clock; + + /* Use ns/us then divide to preserve precision */ + line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; + + /* calculate the cursor watermark for cursor A */ + entries_required = line_count * 64 * pixel_size; + entries_required = DIV_ROUND_UP(entries_required, + ironlake_cursor_wm_info.cacheline_size); + cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size; + if (cursora_wm > ironlake_cursor_wm_info.max_wm) + cursora_wm = ironlake_cursor_wm_info.max_wm; + reg_value = I915_READ(WM0_PIPEA_ILK); reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | @@ -3249,14 +3406,27 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, entries_required = ((planeb_clock / 1000) * pixel_size * ILK_LP0_PLANE_LATENCY) / 1000; entries_required = DIV_ROUND_UP(entries_required, - ironlake_display_wm_info.cacheline_size); + ironlake_display_wm_info.cacheline_size); planeb_wm = entries_required + ironlake_display_wm_info.guard_size; if (planeb_wm > (int)ironlake_display_wm_info.max_wm) planeb_wm = ironlake_display_wm_info.max_wm; - cursorb_wm = 16; + /* Use the large buffer method to calculate cursor watermark */ + line_time_us = (planeb_htotal * 1000) / planeb_clock; + + /* Use ns/us then divide to preserve precision */ + line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; + + /* calculate the cursor watermark for cursor B */ + entries_required = line_count * 64 * pixel_size; + entries_required = DIV_ROUND_UP(entries_required, + ironlake_cursor_wm_info.cacheline_size); + cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size; + if (cursorb_wm > ironlake_cursor_wm_info.max_wm) + cursorb_wm = ironlake_cursor_wm_info.max_wm; + reg_value = I915_READ(WM0_PIPEB_ILK); reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | @@ -3271,12 +3441,12 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, * display plane is used. */ if (!planea_clock || !planeb_clock) { - int line_count; + /* Read the self-refresh latency. The unit is 0.5us */ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_hdisplay * 1000) / sr_clock); + line_time_us = ((sr_htotal * 1000) / sr_clock); /* Use ns/us then divide to preserve precision */ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) @@ -3285,14 +3455,14 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, /* calculate the self-refresh watermark for display plane */ entries_required = line_count * sr_hdisplay * pixel_size; entries_required = DIV_ROUND_UP(entries_required, - ironlake_display_srwm_info.cacheline_size); + ironlake_display_srwm_info.cacheline_size); sr_wm = entries_required + ironlake_display_srwm_info.guard_size; /* calculate the self-refresh watermark for display cursor */ entries_required = line_count * pixel_size * 64; entries_required = DIV_ROUND_UP(entries_required, - ironlake_cursor_srwm_info.cacheline_size); + ironlake_cursor_srwm_info.cacheline_size); cursor_wm = entries_required + ironlake_cursor_srwm_info.guard_size; @@ -3336,6 +3506,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, * bytes per pixel * where * line time = htotal / dotclock + * surface width = hdisplay for normal plane and 64 for cursor * and latency is assumed to be high, as above. * * The final value programmed to the register should always be rounded up, @@ -3352,6 +3523,7 @@ static void intel_update_watermarks(struct drm_device *dev) int sr_hdisplay = 0; unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; int enabled = 0, pixel_size = 0; + int sr_htotal = 0; if (!dev_priv->display.update_wm) return; @@ -3372,6 +3544,7 @@ static void intel_update_watermarks(struct drm_device *dev) } sr_hdisplay = crtc->mode.hdisplay; sr_clock = crtc->mode.clock; + sr_htotal = crtc->mode.htotal; if (crtc->fb) pixel_size = crtc->fb->bits_per_pixel / 8; else @@ -3383,7 +3556,7 @@ static void intel_update_watermarks(struct drm_device *dev) return; dev_priv->display.update_wm(dev, planea_clock, planeb_clock, - sr_hdisplay, pixel_size); + sr_hdisplay, sr_htotal, pixel_size); } static int intel_crtc_mode_set(struct drm_crtc *crtc, @@ -3502,6 +3675,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, return -EINVAL; } + /* Ensure that the cursor is valid for the new mode before changing... */ + intel_crtc_update_cursor(crtc); + if (is_lvds && dev_priv->lvds_downclock_avail) { has_reduced_clock = limit->find_pll(limit, crtc, dev_priv->lvds_downclock, @@ -3568,7 +3744,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, temp |= PIPE_8BPC; else temp |= PIPE_6BPC; - } else if (is_edp) { + } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) { switch (dev_priv->edp_bpp/3) { case 8: temp |= PIPE_8BPC; @@ -3811,6 +3987,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, udelay(150); } + if (HAS_PCH_SPLIT(dev)) { + pipeconf &= ~PIPE_ENABLE_DITHER; + pipeconf &= ~PIPE_DITHER_TYPE_MASK; + } + /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. @@ -3853,16 +4034,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (dev_priv->lvds_dither) { if (HAS_PCH_SPLIT(dev)) { pipeconf |= PIPE_ENABLE_DITHER; - pipeconf &= ~PIPE_DITHER_TYPE_MASK; pipeconf |= PIPE_DITHER_TYPE_ST01; } else lvds |= LVDS_ENABLE_DITHER; } else { - if (HAS_PCH_SPLIT(dev)) { - pipeconf &= ~PIPE_ENABLE_DITHER; - pipeconf &= ~PIPE_DITHER_TYPE_MASK; - } else + if (!HAS_PCH_SPLIT(dev)) { lvds &= ~LVDS_ENABLE_DITHER; + } } } I915_WRITE(lvds_reg, lvds); @@ -4038,6 +4216,85 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) } } +/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ +static void intel_crtc_update_cursor(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int x = intel_crtc->cursor_x; + int y = intel_crtc->cursor_y; + uint32_t base, pos; + bool visible; + + pos = 0; + + if (crtc->fb) { + base = intel_crtc->cursor_addr; + if (x > (int) crtc->fb->width) + base = 0; + + if (y > (int) crtc->fb->height) + base = 0; + } else + base = 0; + + if (x < 0) { + if (x + intel_crtc->cursor_width < 0) + base = 0; + + pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; + x = -x; + } + pos |= x << CURSOR_X_SHIFT; + + if (y < 0) { + if (y + intel_crtc->cursor_height < 0) + base = 0; + + pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; + y = -y; + } + pos |= y << CURSOR_Y_SHIFT; + + visible = base != 0; + if (!visible && !intel_crtc->cursor_visble) + return; + + I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); + if (intel_crtc->cursor_visble != visible) { + uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); + if (base) { + /* Hooray for CUR*CNTR differences */ + if (IS_MOBILE(dev) || IS_I9XX(dev)) { + cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); + cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; + cntl |= pipe << 28; /* Connect to correct pipe */ + } else { + cntl &= ~(CURSOR_FORMAT_MASK); + cntl |= CURSOR_ENABLE; + cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; + } + } else { + if (IS_MOBILE(dev) || IS_I9XX(dev)) { + cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); + cntl |= CURSOR_MODE_DISABLE; + } else { + cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); + } + } + I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); + + intel_crtc->cursor_visble = visible; + } + /* and commit changes on next vblank */ + I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); + + if (visible) + intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); +} + static int intel_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, @@ -4048,11 +4305,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_gem_object *bo; struct drm_i915_gem_object *obj_priv; - int pipe = intel_crtc->pipe; - uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; - uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; - uint32_t temp = I915_READ(control); - size_t addr; + uint32_t addr; int ret; DRM_DEBUG_KMS("\n"); @@ -4060,12 +4313,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, /* if we want to turn off the cursor ignore width and height */ if (!handle) { DRM_DEBUG_KMS("cursor off\n"); - if (IS_MOBILE(dev) || IS_I9XX(dev)) { - temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); - temp |= CURSOR_MODE_DISABLE; - } else { - temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); - } addr = 0; bo = NULL; mutex_lock(&dev->struct_mutex); @@ -4107,7 +4354,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, addr = obj_priv->gtt_offset; } else { - ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); + ret = i915_gem_attach_phys_object(dev, bo, + (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); if (ret) { DRM_ERROR("failed to attach phys object\n"); goto fail_locked; @@ -4118,21 +4366,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, if (!IS_I9XX(dev)) I915_WRITE(CURSIZE, (height << 12) | width); - /* Hooray for CUR*CNTR differences */ - if (IS_MOBILE(dev) || IS_I9XX(dev)) { - temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); - temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; - temp |= (pipe << 28); /* Connect to correct pipe */ - } else { - temp &= ~(CURSOR_FORMAT_MASK); - temp |= CURSOR_ENABLE; - temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; - } - finish: - I915_WRITE(control, temp); - I915_WRITE(base, addr); - if (intel_crtc->cursor_bo) { if (dev_priv->info->cursor_needs_physical) { if (intel_crtc->cursor_bo != bo) @@ -4146,6 +4380,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, intel_crtc->cursor_addr = addr; intel_crtc->cursor_bo = bo; + intel_crtc->cursor_width = width; + intel_crtc->cursor_height = height; + + intel_crtc_update_cursor(crtc); return 0; fail_unpin: @@ -4159,34 +4397,12 @@ fail: static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_framebuffer *intel_fb; - int pipe = intel_crtc->pipe; - uint32_t temp = 0; - uint32_t adder; - if (crtc->fb) { - intel_fb = to_intel_framebuffer(crtc->fb); - intel_mark_busy(dev, intel_fb->obj); - } + intel_crtc->cursor_x = x; + intel_crtc->cursor_y = y; - if (x < 0) { - temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; - x = -x; - } - if (y < 0) { - temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; - y = -y; - } - - temp |= x << CURSOR_X_SHIFT; - temp |= y << CURSOR_Y_SHIFT; - - adder = intel_crtc->cursor_addr; - I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); - I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder); + intel_crtc_update_cursor(crtc); return 0; } @@ -4770,6 +4986,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev, atomic_dec_and_test(&obj_priv->pending_flip)) DRM_WAKEUP(&dev_priv->pending_flip_queue); schedule_work(&work->work); + + trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); } void intel_finish_page_flip(struct drm_device *dev, int pipe) @@ -4847,27 +5065,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, obj); - if (ret != 0) { - mutex_unlock(&dev->struct_mutex); - - spin_lock_irqsave(&dev->event_lock, flags); - intel_crtc->unpin_work = NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); - - kfree(work); - - DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", - to_intel_bo(obj)); - return ret; - } + if (ret) + goto cleanup_work; /* Reference the objects for the scheduled work. */ drm_gem_object_reference(work->old_fb_obj); drm_gem_object_reference(obj); crtc->fb = fb; - i915_gem_object_flush_write_domain(obj); - drm_vblank_get(dev, intel_crtc->pipe); + ret = i915_gem_object_flush_write_domain(obj); + if (ret) + goto cleanup_objs; + + ret = drm_vblank_get(dev, intel_crtc->pipe); + if (ret) + goto cleanup_objs; + obj_priv = to_intel_bo(obj); atomic_inc(&obj_priv->pending_flip); work->pending_flip_obj = obj; @@ -4905,7 +5118,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, mutex_unlock(&dev->struct_mutex); + trace_i915_flip_request(intel_crtc->plane, obj); + return 0; + +cleanup_objs: + drm_gem_object_unreference(work->old_fb_obj); + drm_gem_object_unreference(obj); +cleanup_work: + mutex_unlock(&dev->struct_mutex); + + spin_lock_irqsave(&dev->event_lock, flags); + intel_crtc->unpin_work = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + kfree(work); + + return ret; } static const struct drm_crtc_helper_funcs intel_helper_funcs = { @@ -5032,19 +5261,26 @@ static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_encoder *encoder; + bool dpd_is_edp = false; - intel_crt_init(dev); - - /* Set up integrated LVDS */ if (IS_MOBILE(dev) && !IS_I830(dev)) intel_lvds_init(dev); if (HAS_PCH_SPLIT(dev)) { - int found; + dpd_is_edp = intel_dpd_is_edp(dev); if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) intel_dp_init(dev, DP_A); + if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) + intel_dp_init(dev, PCH_DP_D); + } + + intel_crt_init(dev); + + if (HAS_PCH_SPLIT(dev)) { + int found; + if (I915_READ(HDMIB) & PORT_DETECTED) { /* PCH SDVOB multiplex with HDMIB */ found = intel_sdvo_init(dev, PCH_SDVOB); @@ -5063,7 +5299,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_C) & DP_DETECTED) intel_dp_init(dev, PCH_DP_C); - if (I915_READ(PCH_DP_D) & DP_DETECTED) + if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) intel_dp_init(dev, PCH_DP_D); } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { @@ -5472,6 +5708,26 @@ void intel_init_clock_gating(struct drm_device *dev) (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); } + /* + * Based on the document from hardware guys the following bits + * should be set unconditionally in order to enable FBC. + * The bit 22 of 0x42000 + * The bit 22 of 0x42004 + * The bit 7,8,9 of 0x42020. + */ + if (IS_IRONLAKE_M(dev)) { + I915_WRITE(ILK_DISPLAY_CHICKEN1, + I915_READ(ILK_DISPLAY_CHICKEN1) | + ILK_FBCQ_DIS); + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE); + I915_WRITE(ILK_DSPCLK_GATE, + I915_READ(ILK_DSPCLK_GATE) | + ILK_DPFC_DIS1 | + ILK_DPFC_DIS2 | + ILK_CLK_FBC); + } return; } else if (IS_G4X(dev)) { uint32_t dspclk_gate; @@ -5550,7 +5806,11 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.dpms = i9xx_crtc_dpms; if (I915_HAS_FBC(dev)) { - if (IS_GM45(dev)) { + if (IS_IRONLAKE_M(dev)) { + dev_priv->display.fbc_enabled = ironlake_fbc_enabled; + dev_priv->display.enable_fbc = ironlake_enable_fbc; + dev_priv->display.disable_fbc = ironlake_disable_fbc; + } else if (IS_GM45(dev)) { dev_priv->display.fbc_enabled = g4x_fbc_enabled; dev_priv->display.enable_fbc = g4x_enable_fbc; dev_priv->display.disable_fbc = g4x_disable_fbc; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5dde80f9e65..40be1fa65be 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -43,6 +43,7 @@ #define DP_LINK_CONFIGURATION_SIZE 9 #define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) +#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp) struct intel_dp_priv { uint32_t output_reg; @@ -56,6 +57,7 @@ struct intel_dp_priv { struct intel_encoder *intel_encoder; struct i2c_adapter adapter; struct i2c_algo_dp_aux_data algo; + bool is_pch_edp; }; static void @@ -128,8 +130,9 @@ intel_dp_link_required(struct drm_device *dev, struct intel_encoder *intel_encoder, int pixel_clock) { struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - if (IS_eDP(intel_encoder)) + if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) return (pixel_clock * dev_priv->edp_bpp) / 8; else return pixel_clock * 3; @@ -147,9 +150,21 @@ intel_dp_mode_valid(struct drm_connector *connector, { struct drm_encoder *encoder = intel_attached_encoder(connector); struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); int max_lanes = intel_dp_max_lane_count(intel_encoder); + if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && + dev_priv->panel_fixed_mode) { + if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) + return MODE_PANEL; + + if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay) + return MODE_PANEL; + } + /* only refuse the mode on non eDP since we have seen some wierd eDP panels which are outside spec tolerances but somehow work by magic */ if (!IS_eDP(intel_encoder) && @@ -508,11 +523,37 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, { struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; int lane_count, clock; int max_lane_count = intel_dp_max_lane_count(intel_encoder); int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; + if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && + dev_priv->panel_fixed_mode) { + struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; + + adjusted_mode->hdisplay = fixed_mode->hdisplay; + adjusted_mode->hsync_start = fixed_mode->hsync_start; + adjusted_mode->hsync_end = fixed_mode->hsync_end; + adjusted_mode->htotal = fixed_mode->htotal; + + adjusted_mode->vdisplay = fixed_mode->vdisplay; + adjusted_mode->vsync_start = fixed_mode->vsync_start; + adjusted_mode->vsync_end = fixed_mode->vsync_end; + adjusted_mode->vtotal = fixed_mode->vtotal; + + adjusted_mode->clock = fixed_mode->clock; + drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); + + /* + * the mode->clock is used to calculate the Data&Link M/N + * of the pipe. For the eDP the fixed clock should be used. + */ + mode->clock = dev_priv->panel_fixed_mode->clock; + } + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); @@ -531,7 +572,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, } } - if (IS_eDP(intel_encoder)) { + if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { /* okay we failed just pick the highest */ dp_priv->lane_count = max_lane_count; dp_priv->link_bw = bws[max_clock]; @@ -563,14 +604,14 @@ intel_reduce_ratio(uint32_t *num, uint32_t *den) } static void -intel_dp_compute_m_n(int bytes_per_pixel, +intel_dp_compute_m_n(int bpp, int nlanes, int pixel_clock, int link_clock, struct intel_dp_m_n *m_n) { m_n->tu = 64; - m_n->gmch_m = pixel_clock * bytes_per_pixel; + m_n->gmch_m = (pixel_clock * bpp) >> 3; m_n->gmch_n = link_clock * nlanes; intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); m_n->link_m = pixel_clock; @@ -578,6 +619,28 @@ intel_dp_compute_m_n(int bytes_per_pixel, intel_reduce_ratio(&m_n->link_m, &m_n->link_n); } +bool intel_pch_has_edp(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_encoder *encoder; + + list_for_each_entry(encoder, &mode_config->encoder_list, head) { + struct intel_encoder *intel_encoder; + struct intel_dp_priv *dp_priv; + + if (!encoder || encoder->crtc != crtc) + continue; + + intel_encoder = enc_to_intel_encoder(encoder); + dp_priv = intel_encoder->dev_priv; + + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) + return dp_priv->is_pch_edp; + } + return false; +} + void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -587,7 +650,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_encoder *encoder; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int lane_count = 4; + int lane_count = 4, bpp = 24; struct intel_dp_m_n m_n; /* @@ -605,6 +668,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { lane_count = dp_priv->lane_count; + if (IS_PCH_eDP(dp_priv)) + bpp = dev_priv->edp_bpp; break; } } @@ -614,7 +679,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, * the number of bytes_per_pixel post-LUT, which we always * set up for 8-bits of R/G/B, or 3 bytes total. */ - intel_dp_compute_m_n(3, lane_count, + intel_dp_compute_m_n(bpp, lane_count, mode->clock, adjusted_mode->clock, &m_n); if (HAS_PCH_SPLIT(dev)) { @@ -796,7 +861,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) if (mode != DRM_MODE_DPMS_ON) { if (dp_reg & DP_PORT_EN) { intel_dp_link_down(intel_encoder, dp_priv->DP); - if (IS_eDP(intel_encoder)) { + if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { ironlake_edp_backlight_off(dev); ironlake_edp_panel_off(dev); } @@ -804,7 +869,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) } else { if (!(dp_reg & DP_PORT_EN)) { intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); - if (IS_eDP(intel_encoder)) { + if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { ironlake_edp_panel_on(dev); ironlake_edp_backlight_on(dev); } @@ -1340,17 +1405,32 @@ static int intel_dp_get_modes(struct drm_connector *connector) struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); struct drm_device *dev = intel_encoder->enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; int ret; /* We should parse the EDID data and find out if it has an audio sink */ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); - if (ret) + if (ret) { + if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && + !dev_priv->panel_fixed_mode) { + struct drm_display_mode *newmode; + list_for_each_entry(newmode, &connector->probed_modes, + head) { + if (newmode->type & DRM_MODE_TYPE_PREFERRED) { + dev_priv->panel_fixed_mode = + drm_mode_duplicate(dev, newmode); + break; + } + } + } + return ret; + } /* if eDP has no EDID, try to use fixed panel mode from VBT */ - if (IS_eDP(intel_encoder)) { + if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { if (dev_priv->panel_fixed_mode != NULL) { struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); @@ -1435,6 +1515,26 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc) return -1; } +/* check the VBT to see whether the eDP is on DP-D port */ +bool intel_dpd_is_edp(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct child_device_config *p_child; + int i; + + if (!dev_priv->child_dev_num) + return false; + + for (i = 0; i < dev_priv->child_dev_num; i++) { + p_child = dev_priv->child_dev + i; + + if (p_child->dvo_port == PORT_IDPD && + p_child->device_type == DEVICE_TYPE_eDP) + return true; + } + return false; +} + void intel_dp_init(struct drm_device *dev, int output_reg) { @@ -1444,6 +1544,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) struct intel_connector *intel_connector; struct intel_dp_priv *dp_priv; const char *name = NULL; + int type; intel_encoder = kcalloc(sizeof(struct intel_encoder) + sizeof(struct intel_dp_priv), 1, GFP_KERNEL); @@ -1458,18 +1559,24 @@ intel_dp_init(struct drm_device *dev, int output_reg) dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); + if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D)) + if (intel_dpd_is_edp(dev)) + dp_priv->is_pch_edp = true; + + if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { + type = DRM_MODE_CONNECTOR_eDP; + intel_encoder->type = INTEL_OUTPUT_EDP; + } else { + type = DRM_MODE_CONNECTOR_DisplayPort; + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + } + connector = &intel_connector->base; - drm_connector_init(dev, connector, &intel_dp_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort); + drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); connector->polled = DRM_CONNECTOR_POLL_HPD; - if (output_reg == DP_A) - intel_encoder->type = INTEL_OUTPUT_EDP; - else - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; - if (output_reg == DP_B || output_reg == PCH_DP_B) intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); else if (output_reg == DP_C || output_reg == PCH_DP_C) @@ -1528,7 +1635,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) intel_encoder->ddc_bus = &dp_priv->adapter; intel_encoder->hot_plug = intel_dp_hot_plug; - if (output_reg == DP_A) { + if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { /* initialize panel mode from VBT if available for eDP */ if (dev_priv->lfp_lvds_vbt_mode) { dev_priv->panel_fixed_mode = diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 2f7970be905..b2190148703 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -143,8 +143,6 @@ struct intel_crtc { struct drm_crtc base; enum pipe pipe; enum plane plane; - struct drm_gem_object *cursor_bo; - uint32_t cursor_addr; u8 lut_r[256], lut_g[256], lut_b[256]; int dpms_mode; bool busy; /* is scanout buffer being updated frequently? */ @@ -153,6 +151,12 @@ struct intel_crtc { struct intel_overlay *overlay; struct intel_unpin_work *unpin_work; int fdi_lanes; + + struct drm_gem_object *cursor_bo; + uint32_t cursor_addr; + int16_t cursor_x, cursor_y; + int16_t cursor_width, cursor_height; + bool cursor_visble; }; #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) @@ -179,6 +183,8 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); +extern bool intel_pch_has_edp(struct drm_crtc *crtc); +extern bool intel_dpd_is_edp(struct drm_device *dev); extern void intel_edp_link_config (struct intel_encoder *, int *, int *); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 83bd764b000..197887ed182 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -54,10 +54,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; u32 sdvox; - sdvox = SDVO_ENCODING_HDMI | - SDVO_BORDER_ENABLE | - SDVO_VSYNC_ACTIVE_HIGH | - SDVO_HSYNC_ACTIVE_HIGH; + sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + sdvox |= SDVO_VSYNC_ACTIVE_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + sdvox |= SDVO_HSYNC_ACTIVE_HIGH; if (hdmi_priv->has_hdmi_sink) { sdvox |= SDVO_AUDIO_ENABLE; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 0eab8df5bf7..0a2e60059fb 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -156,31 +156,73 @@ static int intel_lvds_mode_valid(struct drm_connector *connector, return MODE_OK; } +static void +centre_horizontally(struct drm_display_mode *mode, + int width) +{ + u32 border, sync_pos, blank_width, sync_width; + + /* keep the hsync and hblank widths constant */ + sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start; + blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start; + sync_pos = (blank_width - sync_width + 1) / 2; + + border = (mode->hdisplay - width + 1) / 2; + border += border & 1; /* make the border even */ + + mode->crtc_hdisplay = width; + mode->crtc_hblank_start = width + border; + mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width; + + mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; + mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; +} + +static void +centre_vertically(struct drm_display_mode *mode, + int height) +{ + u32 border, sync_pos, blank_width, sync_width; + + /* keep the vsync and vblank widths constant */ + sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start; + blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start; + sync_pos = (blank_width - sync_width + 1) / 2; + + border = (mode->vdisplay - height + 1) / 2; + + mode->crtc_vdisplay = height; + mode->crtc_vblank_start = height + border; + mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width; + + mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; + mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; +} + +static inline u32 panel_fitter_scaling(u32 source, u32 target) +{ + /* + * Floating point operation is not supported. So the FACTOR + * is defined, which can avoid the floating point computation + * when calculating the panel ratio. + */ +#define ACCURACY 12 +#define FACTOR (1 << ACCURACY) + u32 ratio = source * FACTOR / target; + return (FACTOR * ratio + FACTOR/2) / FACTOR; +} + static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - /* - * float point operation is not supported . So the PANEL_RATIO_FACTOR - * is defined, which can avoid the float point computation when - * calculating the panel ratio. - */ -#define PANEL_RATIO_FACTOR 8192 struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct drm_encoder *tmp_encoder; struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; - u32 pfit_control = 0, pfit_pgm_ratios = 0; - int left_border = 0, right_border = 0, top_border = 0; - int bottom_border = 0; - bool border = 0; - int panel_ratio, desired_ratio, vert_scale, horiz_scale; - int horiz_ratio, vert_ratio; - u32 hsync_width, vsync_width; - u32 hblank_width, vblank_width; - u32 hsync_pos, vsync_pos; + u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; /* Should never happen!! */ if (!IS_I965G(dev) && intel_crtc->pipe == 0) { @@ -200,27 +242,25 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, if (dev_priv->panel_fixed_mode == NULL) return true; /* - * If we have timings from the BIOS for the panel, put them in + * We have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ - if (dev_priv->panel_fixed_mode != NULL) { - adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; - adjusted_mode->hsync_start = - dev_priv->panel_fixed_mode->hsync_start; - adjusted_mode->hsync_end = - dev_priv->panel_fixed_mode->hsync_end; - adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; - adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; - adjusted_mode->vsync_start = - dev_priv->panel_fixed_mode->vsync_start; - adjusted_mode->vsync_end = - dev_priv->panel_fixed_mode->vsync_end; - adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; - adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; - drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); - } + adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; + adjusted_mode->hsync_start = + dev_priv->panel_fixed_mode->hsync_start; + adjusted_mode->hsync_end = + dev_priv->panel_fixed_mode->hsync_end; + adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; + adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; + adjusted_mode->vsync_start = + dev_priv->panel_fixed_mode->vsync_start; + adjusted_mode->vsync_end = + dev_priv->panel_fixed_mode->vsync_end; + adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; + adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; + drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); /* Make sure pre-965s set dither correctly */ if (!IS_I965G(dev)) { @@ -230,11 +270,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, /* Native modes don't need fitting */ if (adjusted_mode->hdisplay == mode->hdisplay && - adjusted_mode->vdisplay == mode->vdisplay) { - pfit_pgm_ratios = 0; - border = 0; + adjusted_mode->vdisplay == mode->vdisplay) goto out; - } /* full screen scale for now */ if (HAS_PCH_SPLIT(dev)) @@ -242,25 +279,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, /* 965+ wants fuzzy fitting */ if (IS_I965G(dev)) - pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | - PFIT_FILTER_FUZZY; - - hsync_width = adjusted_mode->crtc_hsync_end - - adjusted_mode->crtc_hsync_start; - vsync_width = adjusted_mode->crtc_vsync_end - - adjusted_mode->crtc_vsync_start; - hblank_width = adjusted_mode->crtc_hblank_end - - adjusted_mode->crtc_hblank_start; - vblank_width = adjusted_mode->crtc_vblank_end - - adjusted_mode->crtc_vblank_start; - /* - * Deal with panel fitting options. Figure out how to stretch the - * image based on its aspect ratio & the current panel fitting mode. - */ - panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR / - adjusted_mode->vdisplay; - desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR / - mode->vdisplay; + pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | + PFIT_FILTER_FUZZY); + /* * Enable automatic panel scaling for non-native modes so that they fill * the screen. Should be enabled before the pipe is enabled, according @@ -278,170 +299,63 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * For centered modes, we have to calculate border widths & * heights and modify the values programmed into the CRTC. */ - left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2; - right_border = left_border; - if (mode->hdisplay & 1) - right_border++; - top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2; - bottom_border = top_border; - if (mode->vdisplay & 1) - bottom_border++; - /* Set active & border values */ - adjusted_mode->crtc_hdisplay = mode->hdisplay; - /* Keep the boder be even */ - if (right_border & 1) - right_border++; - /* use the border directly instead of border minuse one */ - adjusted_mode->crtc_hblank_start = mode->hdisplay + - right_border; - /* keep the blank width constant */ - adjusted_mode->crtc_hblank_end = - adjusted_mode->crtc_hblank_start + hblank_width; - /* get the hsync pos relative to hblank start */ - hsync_pos = (hblank_width - hsync_width) / 2; - /* keep the hsync pos be even */ - if (hsync_pos & 1) - hsync_pos++; - adjusted_mode->crtc_hsync_start = - adjusted_mode->crtc_hblank_start + hsync_pos; - /* keep the hsync width constant */ - adjusted_mode->crtc_hsync_end = - adjusted_mode->crtc_hsync_start + hsync_width; - adjusted_mode->crtc_vdisplay = mode->vdisplay; - /* use the border instead of border minus one */ - adjusted_mode->crtc_vblank_start = mode->vdisplay + - bottom_border; - /* keep the vblank width constant */ - adjusted_mode->crtc_vblank_end = - adjusted_mode->crtc_vblank_start + vblank_width; - /* get the vsync start postion relative to vblank start */ - vsync_pos = (vblank_width - vsync_width) / 2; - adjusted_mode->crtc_vsync_start = - adjusted_mode->crtc_vblank_start + vsync_pos; - /* keep the vsync width constant */ - adjusted_mode->crtc_vsync_end = - adjusted_mode->crtc_vsync_start + vsync_width; - border = 1; + centre_horizontally(adjusted_mode, mode->hdisplay); + centre_vertically(adjusted_mode, mode->vdisplay); + border = LVDS_BORDER_ENABLE; break; + case DRM_MODE_SCALE_ASPECT: - /* Scale but preserve the spect ratio */ - pfit_control |= PFIT_ENABLE; + /* Scale but preserve the aspect ratio */ if (IS_I965G(dev)) { + u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; + u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; + + pfit_control |= PFIT_ENABLE; /* 965+ is easy, it does everything in hw */ - if (panel_ratio > desired_ratio) + if (scaled_width > scaled_height) pfit_control |= PFIT_SCALING_PILLAR; - else if (panel_ratio < desired_ratio) + else if (scaled_width < scaled_height) pfit_control |= PFIT_SCALING_LETTER; else pfit_control |= PFIT_SCALING_AUTO; } else { + u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; + u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; /* * For earlier chips we have to calculate the scaling * ratio by hand and program it into the * PFIT_PGM_RATIO register */ - u32 horiz_bits, vert_bits, bits = 12; - horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/ - adjusted_mode->hdisplay; - vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/ - adjusted_mode->vdisplay; - horiz_scale = adjusted_mode->hdisplay * - PANEL_RATIO_FACTOR / mode->hdisplay; - vert_scale = adjusted_mode->vdisplay * - PANEL_RATIO_FACTOR / mode->vdisplay; - - /* retain aspect ratio */ - if (panel_ratio > desired_ratio) { /* Pillar */ - u32 scaled_width; - scaled_width = mode->hdisplay * vert_scale / - PANEL_RATIO_FACTOR; - horiz_ratio = vert_ratio; - pfit_control |= (VERT_AUTO_SCALE | - VERT_INTERP_BILINEAR | - HORIZ_INTERP_BILINEAR); - /* Pillar will have left/right borders */ - left_border = (adjusted_mode->hdisplay - - scaled_width) / 2; - right_border = left_border; - if (mode->hdisplay & 1) /* odd resolutions */ - right_border++; - /* keep the border be even */ - if (right_border & 1) - right_border++; - adjusted_mode->crtc_hdisplay = scaled_width; - /* use border instead of border minus one */ - adjusted_mode->crtc_hblank_start = - scaled_width + right_border; - /* keep the hblank width constant */ - adjusted_mode->crtc_hblank_end = - adjusted_mode->crtc_hblank_start + - hblank_width; - /* - * get the hsync start pos relative to - * hblank start - */ - hsync_pos = (hblank_width - hsync_width) / 2; - /* keep the hsync_pos be even */ - if (hsync_pos & 1) - hsync_pos++; - adjusted_mode->crtc_hsync_start = - adjusted_mode->crtc_hblank_start + - hsync_pos; - /* keept hsync width constant */ - adjusted_mode->crtc_hsync_end = - adjusted_mode->crtc_hsync_start + - hsync_width; - border = 1; - } else if (panel_ratio < desired_ratio) { /* letter */ - u32 scaled_height = mode->vdisplay * - horiz_scale / PANEL_RATIO_FACTOR; - vert_ratio = horiz_ratio; - pfit_control |= (HORIZ_AUTO_SCALE | - VERT_INTERP_BILINEAR | - HORIZ_INTERP_BILINEAR); - /* Letterbox will have top/bottom border */ - top_border = (adjusted_mode->vdisplay - - scaled_height) / 2; - bottom_border = top_border; - if (mode->vdisplay & 1) - bottom_border++; - adjusted_mode->crtc_vdisplay = scaled_height; - /* use border instead of border minus one */ - adjusted_mode->crtc_vblank_start = - scaled_height + bottom_border; - /* keep the vblank width constant */ - adjusted_mode->crtc_vblank_end = - adjusted_mode->crtc_vblank_start + - vblank_width; - /* - * get the vsync start pos relative to - * vblank start - */ - vsync_pos = (vblank_width - vsync_width) / 2; - adjusted_mode->crtc_vsync_start = - adjusted_mode->crtc_vblank_start + - vsync_pos; - /* keep the vsync width constant */ - adjusted_mode->crtc_vsync_end = - adjusted_mode->crtc_vsync_start + - vsync_width; - border = 1; - } else { - /* Aspects match, Let hw scale both directions */ - pfit_control |= (VERT_AUTO_SCALE | - HORIZ_AUTO_SCALE | + if (scaled_width > scaled_height) { /* pillar */ + centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay); + + border = LVDS_BORDER_ENABLE; + if (mode->vdisplay != adjusted_mode->vdisplay) { + u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay); + pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | + bits << PFIT_VERT_SCALE_SHIFT); + pfit_control |= (PFIT_ENABLE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + } + } else if (scaled_width < scaled_height) { /* letter */ + centre_vertically(adjusted_mode, scaled_width / mode->hdisplay); + + border = LVDS_BORDER_ENABLE; + if (mode->hdisplay != adjusted_mode->hdisplay) { + u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay); + pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | + bits << PFIT_VERT_SCALE_SHIFT); + pfit_control |= (PFIT_ENABLE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + } + } else + /* Aspects match, Let hw scale both directions */ + pfit_control |= (PFIT_ENABLE | + VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | HORIZ_INTERP_BILINEAR); - } - horiz_bits = (1 << bits) * horiz_ratio / - PANEL_RATIO_FACTOR; - vert_bits = (1 << bits) * vert_ratio / - PANEL_RATIO_FACTOR; - pfit_pgm_ratios = - ((vert_bits << PFIT_VERT_SCALE_SHIFT) & - PFIT_VERT_SCALE_MASK) | - ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) & - PFIT_HORIZ_SCALE_MASK); } break; @@ -458,6 +372,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, VERT_INTERP_BILINEAR | HORIZ_INTERP_BILINEAR); break; + default: break; } @@ -465,14 +380,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, out: lvds_priv->pfit_control = pfit_control; lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; - /* - * When there exists the border, it means that the LVDS_BORDR - * should be enabled. - */ - if (border) - dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE; - else - dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE); + dev_priv->lvds_border_bits = border; + /* * XXX: It would be nice to support lower refresh rates on the * panels to reduce power consumption, and perhaps match the diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d7ad5139d17..d39aea24eab 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -65,7 +65,7 @@ #define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ #define OCMD_TVSYNCFLIP_PARITY (0x1<<9) #define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) -#define OCMD_BUF_TYPE_MASK (Ox1<<5) +#define OCMD_BUF_TYPE_MASK (0x1<<5) #define OCMD_BUF_TYPE_FRAME (0x0<<5) #define OCMD_BUF_TYPE_FIELD (0x1<<5) #define OCMD_TEST_MODE (0x1<<4) @@ -185,7 +185,8 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over if (OVERLAY_NONPHYSICAL(overlay->dev)) { regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, - overlay->reg_bo->gtt_offset); + overlay->reg_bo->gtt_offset, + KM_USER0); if (!regs) { DRM_ERROR("failed to map overlay regs in GTT\n"); @@ -200,7 +201,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) { if (OVERLAY_NONPHYSICAL(overlay->dev)) - io_mapping_unmap_atomic(overlay->virt_addr); + io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0); overlay->virt_addr = NULL; @@ -958,7 +959,7 @@ static int check_overlay_src(struct drm_device *dev, || rec->src_width < N_HORIZ_Y_TAPS*4) return -EINVAL; - /* check alingment constrains */ + /* check alignment constraints */ switch (rec->flags & I915_OVERLAY_TYPE_MASK) { case I915_OVERLAY_RGB: /* not implemented */ @@ -990,7 +991,10 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; /* stride checking */ - stride_mask = 63; + if (IS_I830(dev) || IS_845G(dev)) + stride_mask = 255; + else + stride_mask = 63; if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) return -EINVAL; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 03c231be227..d9d4d51aa89 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1237,9 +1237,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, /* Set the SDVO control regs. */ if (IS_I965G(dev)) { - sdvox |= SDVO_BORDER_ENABLE | - SDVO_VSYNC_ACTIVE_HIGH | - SDVO_HSYNC_ACTIVE_HIGH; + sdvox |= SDVO_BORDER_ENABLE; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + sdvox |= SDVO_VSYNC_ACTIVE_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + sdvox |= SDVO_HSYNC_ACTIVE_HIGH; } else { sdvox |= I915_READ(sdvo_priv->sdvo_reg); switch (sdvo_priv->sdvo_reg) { diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index d2d4e4045ca..cc3726a4a1c 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -476,7 +476,7 @@ static const struct tv_mode tv_modes[] = { .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, - .burst_ena = 8, + .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, @@ -896,8 +896,6 @@ static const struct tv_mode tv_modes[] = { }, }; -#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0]) - static void intel_tv_dpms(struct drm_encoder *encoder, int mode) { @@ -1512,7 +1510,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop tv_priv->margin[TV_MARGIN_BOTTOM] = val; changed = true; } else if (property == dev->mode_config.tv_mode_property) { - if (val >= NUM_TV_MODES) { + if (val >= ARRAY_SIZE(tv_modes)) { ret = -EINVAL; goto out; } @@ -1693,13 +1691,13 @@ intel_tv_init(struct drm_device *dev) connector->doublescan_allowed = false; /* Create TV properties then attach current values */ - tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES, + tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes), GFP_KERNEL); if (!tv_format_names) goto out; - for (i = 0; i < NUM_TV_MODES; i++) + for (i = 0; i < ARRAY_SIZE(tv_modes); i++) tv_format_names[i] = tv_modes[i].name; - drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names); + drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names); drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, initial_mode); |