diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-08 09:52:16 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-08 09:52:16 -0700 |
commit | e9f37d3a8d126e73f5737ef548cdf6f618e295e4 (patch) | |
tree | 831eb4952637828a7bbafa361185e0ca57aa86ed /drivers/gpu/drm/i915 | |
parent | 5fb6b953bb7aa86a9c8ea760934982cedc45c52b (diff) | |
parent | c39b06951f1dc2e384650288676c5b7dcc0ec92c (diff) |
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"Highlights:
- drm:
Generic display port aux features, primary plane support, drm
master management fixes, logging cleanups, enforced locking checks
(instead of docs), documentation improvements, minor number
handling cleanup, pseudofs for shared inodes.
- ttm:
add ability to allocate from both ends
- i915:
broadwell features, power domain and runtime pm, per-process
address space infrastructure (not enabled)
- msm:
power management, hdmi audio support
- nouveau:
ongoing GPU fault recovery, initial maxwell support, random fixes
- exynos:
refactored driver to clean up a lot of abstraction, DP support
moved into drm, LVDS bridge support added, parallel panel support
- gma500:
SGX MMU support, SGX irq handling, asle irq work fixes
- radeon:
video engine bringup, ring handling fixes, use dp aux helpers
- vmwgfx:
add rendernode support"
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (849 commits)
DRM: armada: fix corruption while loading cursors
drm/dp_helper: don't return EPROTO for defers (v2)
drm/bridge: export ptn3460_init function
drm/exynos: remove MODULE_DEVICE_TABLE definitions
ARM: dts: exynos4412-trats2: enable exynos/fimd node
ARM: dts: exynos4210-trats: enable exynos/fimd node
ARM: dts: exynos4412-trats2: add panel node
ARM: dts: exynos4210-trats: add panel node
ARM: dts: exynos4: add MIPI DSI Master node
drm/panel: add S6E8AA0 driver
ARM: dts: exynos4210-universal_c210: add proper panel node
drm/panel: add ld9040 driver
panel/ld9040: add DT bindings
panel/s6e8aa0: add DT bindings
drm/exynos: add DSIM driver
exynos/dsim: add DT bindings
drm/exynos: disallow fbdev initialization if no device is connected
drm/mipi_dsi: create dsi devices only for nodes with reg property
drm/mipi_dsi: add flags to DSI messages
Skip intel_crt_init for Dell XPS 8700
...
Diffstat (limited to 'drivers/gpu/drm/i915')
48 files changed, 8785 insertions, 4135 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 9fd44f5f3b3..b1445b73465 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -3,57 +3,69 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. ccflags-y := -Iinclude/drm -i915-y := i915_drv.o i915_dma.o i915_irq.o \ - i915_gpu_error.o \ + +# Please keep these build lists sorted! + +# core driver code +i915-y := i915_drv.o \ + i915_params.o \ i915_suspend.o \ - i915_gem.o \ + i915_sysfs.o \ + intel_pm.o +i915-$(CONFIG_COMPAT) += i915_ioc32.o +i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o + +# GEM code +i915-y += i915_cmd_parser.o \ i915_gem_context.o \ i915_gem_debug.o \ + i915_gem_dmabuf.o \ i915_gem_evict.o \ i915_gem_execbuffer.o \ i915_gem_gtt.o \ + i915_gem.o \ i915_gem_stolen.o \ i915_gem_tiling.o \ - i915_sysfs.o \ + i915_gpu_error.o \ + i915_irq.o \ i915_trace_points.o \ - i915_ums.o \ + intel_ringbuffer.o \ + intel_uncore.o + +# modesetting core code +i915-y += intel_bios.o \ intel_display.o \ - intel_crt.o \ - intel_lvds.o \ - intel_dsi.o \ - intel_dsi_cmd.o \ - intel_dsi_pll.o \ - intel_bios.o \ - intel_ddi.o \ - intel_dp.o \ - intel_hdmi.o \ - intel_sdvo.o \ intel_modes.o \ - intel_panel.o \ - intel_pm.o \ - intel_i2c.o \ - intel_tv.o \ - intel_dvo.o \ - intel_ringbuffer.o \ intel_overlay.o \ - intel_sprite.o \ intel_sideband.o \ - intel_uncore.o \ + intel_sprite.o +i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o +i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o + +# modesetting output/encoder code +i915-y += dvo_ch7017.o \ dvo_ch7xxx.o \ - dvo_ch7017.o \ dvo_ivch.o \ - dvo_tfp410.o \ - dvo_sil164.o \ dvo_ns2501.o \ - i915_gem_dmabuf.o - -i915-$(CONFIG_COMPAT) += i915_ioc32.o - -i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o - -i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o + dvo_sil164.o \ + dvo_tfp410.o \ + intel_crt.o \ + intel_ddi.o \ + intel_dp.o \ + intel_dsi_cmd.o \ + intel_dsi.o \ + intel_dsi_pll.o \ + intel_dvo.o \ + intel_hdmi.o \ + intel_i2c.o \ + intel_lvds.o \ + intel_panel.o \ + intel_sdvo.o \ + intel_tv.o -i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o +# legacy horrors +i915-y += i915_dma.o \ + i915_ums.o obj-$(CONFIG_DRM_I915) += i915.o diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index af42e94f684..a0f5bdd6949 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -340,9 +340,9 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) for (i = 0; i < CH7xxx_NUM_REGS; i++) { uint8_t val; if ((i % 8) == 0) - DRM_LOG_KMS("\n %02X: ", i); + DRM_DEBUG_KMS("\n %02X: ", i); ch7xxx_readb(dvo, i, &val); - DRM_LOG_KMS("%02X ", val); + DRM_DEBUG_KMS("%02X ", val); } } diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index baaf65bf0bd..0f1865d7d4d 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c @@ -377,41 +377,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo) uint16_t val; ivch_read(dvo, VR00, &val); - DRM_LOG_KMS("VR00: 0x%04x\n", val); + DRM_DEBUG_KMS("VR00: 0x%04x\n", val); ivch_read(dvo, VR01, &val); - DRM_LOG_KMS("VR01: 0x%04x\n", val); + DRM_DEBUG_KMS("VR01: 0x%04x\n", val); ivch_read(dvo, VR30, &val); - DRM_LOG_KMS("VR30: 0x%04x\n", val); + DRM_DEBUG_KMS("VR30: 0x%04x\n", val); ivch_read(dvo, VR40, &val); - DRM_LOG_KMS("VR40: 0x%04x\n", val); + DRM_DEBUG_KMS("VR40: 0x%04x\n", val); /* GPIO registers */ ivch_read(dvo, VR80, &val); - DRM_LOG_KMS("VR80: 0x%04x\n", val); + DRM_DEBUG_KMS("VR80: 0x%04x\n", val); ivch_read(dvo, VR81, &val); - DRM_LOG_KMS("VR81: 0x%04x\n", val); + DRM_DEBUG_KMS("VR81: 0x%04x\n", val); ivch_read(dvo, VR82, &val); - DRM_LOG_KMS("VR82: 0x%04x\n", val); + DRM_DEBUG_KMS("VR82: 0x%04x\n", val); ivch_read(dvo, VR83, &val); - DRM_LOG_KMS("VR83: 0x%04x\n", val); + DRM_DEBUG_KMS("VR83: 0x%04x\n", val); ivch_read(dvo, VR84, &val); - DRM_LOG_KMS("VR84: 0x%04x\n", val); + DRM_DEBUG_KMS("VR84: 0x%04x\n", val); ivch_read(dvo, VR85, &val); - DRM_LOG_KMS("VR85: 0x%04x\n", val); + DRM_DEBUG_KMS("VR85: 0x%04x\n", val); ivch_read(dvo, VR86, &val); - DRM_LOG_KMS("VR86: 0x%04x\n", val); + DRM_DEBUG_KMS("VR86: 0x%04x\n", val); ivch_read(dvo, VR87, &val); - DRM_LOG_KMS("VR87: 0x%04x\n", val); + DRM_DEBUG_KMS("VR87: 0x%04x\n", val); ivch_read(dvo, VR88, &val); - DRM_LOG_KMS("VR88: 0x%04x\n", val); + DRM_DEBUG_KMS("VR88: 0x%04x\n", val); /* Scratch register 0 - AIM Panel type */ ivch_read(dvo, VR8E, &val); - DRM_LOG_KMS("VR8E: 0x%04x\n", val); + DRM_DEBUG_KMS("VR8E: 0x%04x\n", val); /* Scratch register 1 - Status register */ ivch_read(dvo, VR8F, &val); - DRM_LOG_KMS("VR8F: 0x%04x\n", val); + DRM_DEBUG_KMS("VR8F: 0x%04x\n", val); } static void ivch_destroy(struct intel_dvo_device *dvo) diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c index 954acb2c702..8155ded7907 100644 --- a/drivers/gpu/drm/i915/dvo_ns2501.c +++ b/drivers/gpu/drm/i915/dvo_ns2501.c @@ -490,15 +490,15 @@ static void ns2501_dump_regs(struct intel_dvo_device *dvo) uint8_t val; ns2501_readb(dvo, NS2501_FREQ_LO, &val); - DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val); + DRM_DEBUG_KMS("NS2501_FREQ_LO: 0x%02x\n", val); ns2501_readb(dvo, NS2501_FREQ_HI, &val); - DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val); + DRM_DEBUG_KMS("NS2501_FREQ_HI: 0x%02x\n", val); ns2501_readb(dvo, NS2501_REG8, &val); - DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val); + DRM_DEBUG_KMS("NS2501_REG8: 0x%02x\n", val); ns2501_readb(dvo, NS2501_REG9, &val); - DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val); + DRM_DEBUG_KMS("NS2501_REG9: 0x%02x\n", val); ns2501_readb(dvo, NS2501_REGC, &val); - DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val); + DRM_DEBUG_KMS("NS2501_REGC: 0x%02x\n", val); } static void ns2501_destroy(struct intel_dvo_device *dvo) diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 4debd32e3e4..7b3e9e93620 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c @@ -246,15 +246,15 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo) uint8_t val; sil164_readb(dvo, SIL164_FREQ_LO, &val); - DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val); + DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val); sil164_readb(dvo, SIL164_FREQ_HI, &val); - DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val); + DRM_DEBUG_KMS("SIL164_FREQ_HI: 0x%02x\n", val); sil164_readb(dvo, SIL164_REG8, &val); - DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val); + DRM_DEBUG_KMS("SIL164_REG8: 0x%02x\n", val); sil164_readb(dvo, SIL164_REG9, &val); - DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val); + DRM_DEBUG_KMS("SIL164_REG9: 0x%02x\n", val); sil164_readb(dvo, SIL164_REGC, &val); - DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val); + DRM_DEBUG_KMS("SIL164_REGC: 0x%02x\n", val); } static void sil164_destroy(struct intel_dvo_device *dvo) diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index e17f1b07e91..12ea4b16469 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c @@ -267,33 +267,33 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo) uint8_t val, val2; tfp410_readb(dvo, TFP410_REV, &val); - DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val); + DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_1, &val); - DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val); + DRM_DEBUG_KMS("TFP410_CTL1: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_2, &val); - DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val); + DRM_DEBUG_KMS("TFP410_CTL2: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_3, &val); - DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val); + DRM_DEBUG_KMS("TFP410_CTL3: 0x%02X\n", val); tfp410_readb(dvo, TFP410_USERCFG, &val); - DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val); + DRM_DEBUG_KMS("TFP410_USERCFG: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_DLY, &val); - DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val); + DRM_DEBUG_KMS("TFP410_DE_DLY: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_CTL, &val); - DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val); + DRM_DEBUG_KMS("TFP410_DE_CTL: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_TOP, &val); - DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val); + DRM_DEBUG_KMS("TFP410_DE_TOP: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); - DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); + DRM_DEBUG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); - DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); + DRM_DEBUG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_H_RES_LO, &val); tfp410_readb(dvo, TFP410_H_RES_HI, &val2); - DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val); + DRM_DEBUG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_V_RES_LO, &val); tfp410_readb(dvo, TFP410_V_RES_HI, &val2); - DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); + DRM_DEBUG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); } static void tfp410_destroy(struct intel_dvo_device *dvo) diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c new file mode 100644 index 00000000000..4cf6d020d51 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -0,0 +1,485 @@ +/* + * Copyright © 2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Brad Volkin <bradley.d.volkin@intel.com> + * + */ + +#include "i915_drv.h" + +/** + * DOC: i915 batch buffer command parser + * + * Motivation: + * Certain OpenGL features (e.g. transform feedback, performance monitoring) + * require userspace code to submit batches containing commands such as + * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some + * generations of the hardware will noop these commands in "unsecure" batches + * (which includes all userspace batches submitted via i915) even though the + * commands may be safe and represent the intended programming model of the + * device. + * + * The software command parser is similar in operation to the command parsing + * done in hardware for unsecure batches. However, the software parser allows + * some operations that would be noop'd by hardware, if the parser determines + * the operation is safe, and submits the batch as "secure" to prevent hardware + * parsing. + * + * Threats: + * At a high level, the hardware (and software) checks attempt to prevent + * granting userspace undue privileges. There are three categories of privilege. + * + * First, commands which are explicitly defined as privileged or which should + * only be used by the kernel driver. The parser generally rejects such + * commands, though it may allow some from the drm master process. + * + * Second, commands which access registers. To support correct/enhanced + * userspace functionality, particularly certain OpenGL extensions, the parser + * provides a whitelist of registers which userspace may safely access (for both + * normal and drm master processes). + * + * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc). + * The parser always rejects such commands. + * + * The majority of the problematic commands fall in the MI_* range, with only a + * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW). + * + * Implementation: + * Each ring maintains tables of commands and registers which the parser uses in + * scanning batch buffers submitted to that ring. + * + * Since the set of commands that the parser must check for is significantly + * smaller than the number of commands supported, the parser tables contain only + * those commands required by the parser. This generally works because command + * opcode ranges have standard command length encodings. So for commands that + * the parser does not need to check, it can easily skip them. This is + * implementated via a per-ring length decoding vfunc. + * + * Unfortunately, there are a number of commands that do not follow the standard + * length encoding for their opcode range, primarily amongst the MI_* commands. + * To handle this, the parser provides a way to define explicit "skip" entries + * in the per-ring command tables. + * + * Other command table entries map fairly directly to high level categories + * mentioned above: rejected, master-only, register whitelist. The parser + * implements a number of checks, including the privileged memory checks, via a + * general bitmasking mechanism. + */ + +static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) +{ + u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; + u32 subclient = + (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; + + if (client == INSTR_MI_CLIENT) + return 0x3F; + else if (client == INSTR_RC_CLIENT) { + if (subclient == INSTR_MEDIA_SUBCLIENT) + return 0xFFFF; + else + return 0xFF; + } + + DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header); + return 0; +} + +static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header) +{ + u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; + u32 subclient = + (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; + + if (client == INSTR_MI_CLIENT) + return 0x3F; + else if (client == INSTR_RC_CLIENT) { + if (subclient == INSTR_MEDIA_SUBCLIENT) + return 0xFFF; + else + return 0xFF; + } + + DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header); + return 0; +} + +static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) +{ + u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; + + if (client == INSTR_MI_CLIENT) + return 0x3F; + else if (client == INSTR_BC_CLIENT) + return 0xFF; + + DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header); + return 0; +} + +static void validate_cmds_sorted(struct intel_ring_buffer *ring) +{ + int i; + + if (!ring->cmd_tables || ring->cmd_table_count == 0) + return; + + for (i = 0; i < ring->cmd_table_count; i++) { + const struct drm_i915_cmd_table *table = &ring->cmd_tables[i]; + u32 previous = 0; + int j; + + for (j = 0; j < table->count; j++) { + const struct drm_i915_cmd_descriptor *desc = + &table->table[i]; + u32 curr = desc->cmd.value & desc->cmd.mask; + + if (curr < previous) + DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n", + ring->id, i, j, curr, previous); + + previous = curr; + } + } +} + +static void check_sorted(int ring_id, const u32 *reg_table, int reg_count) +{ + int i; + u32 previous = 0; + + for (i = 0; i < reg_count; i++) { + u32 curr = reg_table[i]; + + if (curr < previous) + DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n", + ring_id, i, curr, previous); + + previous = curr; + } +} + +static void validate_regs_sorted(struct intel_ring_buffer *ring) +{ + check_sorted(ring->id, ring->reg_table, ring->reg_count); + check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count); +} + +/** + * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer + * @ring: the ringbuffer to initialize + * + * Optionally initializes fields related to batch buffer command parsing in the + * struct intel_ring_buffer based on whether the platform requires software + * command parsing. + */ +void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring) +{ + if (!IS_GEN7(ring->dev)) + return; + + switch (ring->id) { + case RCS: + ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask; + break; + case VCS: + ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; + break; + case BCS: + ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; + break; + case VECS: + /* VECS can use the same length_mask function as VCS */ + ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; + break; + default: + DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n", + ring->id); + BUG(); + } + + validate_cmds_sorted(ring); + validate_regs_sorted(ring); +} + +static const struct drm_i915_cmd_descriptor* +find_cmd_in_table(const struct drm_i915_cmd_table *table, + u32 cmd_header) +{ + int i; + + for (i = 0; i < table->count; i++) { + const struct drm_i915_cmd_descriptor *desc = &table->table[i]; + u32 masked_cmd = desc->cmd.mask & cmd_header; + u32 masked_value = desc->cmd.value & desc->cmd.mask; + + if (masked_cmd == masked_value) + return desc; + } + + return NULL; +} + +/* + * Returns a pointer to a descriptor for the command specified by cmd_header. + * + * The caller must supply space for a default descriptor via the default_desc + * parameter. If no descriptor for the specified command exists in the ring's + * command parser tables, this function fills in default_desc based on the + * ring's default length encoding and returns default_desc. + */ +static const struct drm_i915_cmd_descriptor* +find_cmd(struct intel_ring_buffer *ring, + u32 cmd_header, + struct drm_i915_cmd_descriptor *default_desc) +{ + u32 mask; + int i; + + for (i = 0; i < ring->cmd_table_count; i++) { + const struct drm_i915_cmd_descriptor *desc; + + desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header); + if (desc) + return desc; + } + + mask = ring->get_cmd_length_mask(cmd_header); + if (!mask) + return NULL; + + BUG_ON(!default_desc); + default_desc->flags = CMD_DESC_SKIP; + default_desc->length.mask = mask; + + return default_desc; +} + +static bool valid_reg(const u32 *table, int count, u32 addr) +{ + if (table && count != 0) { + int i; + + for (i = 0; i < count; i++) { + if (table[i] == addr) + return true; + } + } + + return false; +} + +static u32 *vmap_batch(struct drm_i915_gem_object *obj) +{ + int i; + void *addr = NULL; + struct sg_page_iter sg_iter; + struct page **pages; + + pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); + if (pages == NULL) { + DRM_DEBUG_DRIVER("Failed to get space for pages\n"); + goto finish; + } + + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { + pages[i] = sg_page_iter_page(&sg_iter); + i++; + } + + addr = vmap(pages, i, 0, PAGE_KERNEL); + if (addr == NULL) { + DRM_DEBUG_DRIVER("Failed to vmap pages\n"); + goto finish; + } + +finish: + if (pages) + drm_free_large(pages); + return (u32*)addr; +} + +/** + * i915_needs_cmd_parser() - should a given ring use software command parsing? + * @ring: the ring in question + * + * Only certain platforms require software batch buffer command parsing, and + * only when enabled via module paramter. + * + * Return: true if the ring requires software command parsing + */ +bool i915_needs_cmd_parser(struct intel_ring_buffer *ring) +{ + /* No command tables indicates a platform without parsing */ + if (!ring->cmd_tables) + return false; + + return (i915.enable_cmd_parser == 1); +} + +#define LENGTH_BIAS 2 + +/** + * i915_parse_cmds() - parse a submitted batch buffer for privilege violations + * @ring: the ring on which the batch is to execute + * @batch_obj: the batch buffer in question + * @batch_start_offset: byte offset in the batch at which execution starts + * @is_master: is the submitting process the drm master? + * + * Parses the specified batch buffer looking for privilege violations as + * described in the overview. + * + * Return: non-zero if the parser finds violations or otherwise fails + */ +int i915_parse_cmds(struct intel_ring_buffer *ring, + struct drm_i915_gem_object *batch_obj, + u32 batch_start_offset, + bool is_master) +{ + int ret = 0; + u32 *cmd, *batch_base, *batch_end; + struct drm_i915_cmd_descriptor default_desc = { 0 }; + int needs_clflush = 0; + + ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush); + if (ret) { + DRM_DEBUG_DRIVER("CMD: failed to prep read\n"); + return ret; + } + + batch_base = vmap_batch(batch_obj); + if (!batch_base) { + DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n"); + i915_gem_object_unpin_pages(batch_obj); + return -ENOMEM; + } + + if (needs_clflush) + drm_clflush_virt_range((char *)batch_base, batch_obj->base.size); + + cmd = batch_base + (batch_start_offset / sizeof(*cmd)); + batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end)); + + while (cmd < batch_end) { + const struct drm_i915_cmd_descriptor *desc; + u32 length; + + if (*cmd == MI_BATCH_BUFFER_END) + break; + + desc = find_cmd(ring, *cmd, &default_desc); + if (!desc) { + DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", + *cmd); + ret = -EINVAL; + break; + } + + if (desc->flags & CMD_DESC_FIXED) + length = desc->length.fixed; + else + length = ((*cmd & desc->length.mask) + LENGTH_BIAS); + + if ((batch_end - cmd) < length) { + DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%td\n", + *cmd, + length, + (unsigned long)(batch_end - cmd)); + ret = -EINVAL; + break; + } + + if (desc->flags & CMD_DESC_REJECT) { + DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd); + ret = -EINVAL; + break; + } + + if ((desc->flags & CMD_DESC_MASTER) && !is_master) { + DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n", + *cmd); + ret = -EINVAL; + break; + } + + if (desc->flags & CMD_DESC_REGISTER) { + u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask; + + if (!valid_reg(ring->reg_table, + ring->reg_count, reg_addr)) { + if (!is_master || + !valid_reg(ring->master_reg_table, + ring->master_reg_count, + reg_addr)) { + DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n", + reg_addr, + *cmd, + ring->id); + ret = -EINVAL; + break; + } + } + } + + if (desc->flags & CMD_DESC_BITMASK) { + int i; + + for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) { + u32 dword; + + if (desc->bits[i].mask == 0) + break; + + dword = cmd[desc->bits[i].offset] & + desc->bits[i].mask; + + if (dword != desc->bits[i].expected) { + DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n", + *cmd, + desc->bits[i].mask, + desc->bits[i].expected, + dword, ring->id); + ret = -EINVAL; + break; + } + } + + if (ret) + break; + } + + cmd += length; + } + + if (cmd >= batch_end) { + DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); + ret = -EINVAL; + } + + vunmap(batch_base); + + i915_gem_object_unpin_pages(batch_obj); + + return ret; +} diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b2b46c52294..195fe5bc0aa 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -98,7 +98,7 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj) { if (obj->user_pin_count > 0) return "P"; - else if (obj->pin_count > 0) + else if (i915_gem_obj_is_pinned(obj)) return "p"; else return " "; @@ -123,6 +123,8 @@ static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { struct i915_vma *vma; + int pin_count = 0; + seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", &obj->base, get_pin_flag(obj), @@ -139,8 +141,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) seq_printf(m, " (name: %d)", obj->base.name); - if (obj->pin_count) - seq_printf(m, " (pinned x %d)", obj->pin_count); + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (vma->pin_count > 0) + pin_count++; + seq_printf(m, " (pinned x %d)", pin_count); if (obj->pin_display) seq_printf(m, " (display)"); if (obj->fence_reg != I915_FENCE_REG_NONE) @@ -295,28 +299,62 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) } while (0) struct file_stats { + struct drm_i915_file_private *file_priv; int count; - size_t total, active, inactive, unbound; + size_t total, unbound; + size_t global, shared; + size_t active, inactive; }; static int per_file_stats(int id, void *ptr, void *data) { struct drm_i915_gem_object *obj = ptr; struct file_stats *stats = data; + struct i915_vma *vma; stats->count++; stats->total += obj->base.size; - if (i915_gem_obj_ggtt_bound(obj)) { - if (!list_empty(&obj->ring_list)) - stats->active += obj->base.size; - else - stats->inactive += obj->base.size; + if (obj->base.name || obj->base.dma_buf) + stats->shared += obj->base.size; + + if (USES_FULL_PPGTT(obj->base.dev)) { + list_for_each_entry(vma, &obj->vma_list, vma_link) { + struct i915_hw_ppgtt *ppgtt; + + if (!drm_mm_node_allocated(&vma->node)) + continue; + + if (i915_is_ggtt(vma->vm)) { + stats->global += obj->base.size; + continue; + } + + ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); + if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv) + continue; + + if (obj->ring) /* XXX per-vma statistic */ + stats->active += obj->base.size; + else + stats->inactive += obj->base.size; + + return 0; + } } else { - if (!list_empty(&obj->global_list)) - stats->unbound += obj->base.size; + if (i915_gem_obj_ggtt_bound(obj)) { + stats->global += obj->base.size; + if (obj->ring) + stats->active += obj->base.size; + else + stats->inactive += obj->base.size; + return 0; + } } + if (!list_empty(&obj->global_list)) + stats->unbound += obj->base.size; + return 0; } @@ -407,6 +445,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) struct task_struct *task; memset(&stats, 0, sizeof(stats)); + stats.file_priv = file->driver_priv; idr_for_each(&file->object_idr, per_file_stats, &stats); /* * Although we have a valid reference on file->pid, that does @@ -416,12 +455,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data) */ rcu_read_lock(); task = pid_task(file->pid, PIDTYPE_PID); - seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", + seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", task ? task->comm : "<unknown>", stats.count, stats.total, stats.active, stats.inactive, + stats.global, + stats.shared, stats.unbound); rcu_read_unlock(); } @@ -447,7 +488,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) total_obj_size = total_gtt_size = count = 0; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (list == PINNED_LIST && obj->pin_count == 0) + if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) continue; seq_puts(m, " "); @@ -520,7 +561,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; struct drm_i915_gem_request *gem_request; int ret, count, i; @@ -565,7 +606,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; int ret, i; @@ -588,7 +629,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; int ret, i, pipe; @@ -598,7 +639,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data) intel_runtime_pm_get(dev_priv); if (INTEL_INFO(dev)->gen >= 8) { - int i; seq_printf(m, "Master Interrupt Control:\t%08x\n", I915_READ(GEN8_MASTER_IRQ)); @@ -611,16 +651,16 @@ static int i915_interrupt_info(struct seq_file *m, void *data) i, I915_READ(GEN8_GT_IER(i))); } - for_each_pipe(i) { + for_each_pipe(pipe) { seq_printf(m, "Pipe %c IMR:\t%08x\n", - pipe_name(i), - I915_READ(GEN8_DE_PIPE_IMR(i))); + pipe_name(pipe), + I915_READ(GEN8_DE_PIPE_IMR(pipe))); seq_printf(m, "Pipe %c IIR:\t%08x\n", - pipe_name(i), - I915_READ(GEN8_DE_PIPE_IIR(i))); + pipe_name(pipe), + I915_READ(GEN8_DE_PIPE_IIR(pipe))); seq_printf(m, "Pipe %c IER:\t%08x\n", - pipe_name(i), - I915_READ(GEN8_DE_PIPE_IER(i))); + pipe_name(pipe), + I915_READ(GEN8_DE_PIPE_IER(pipe))); } seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", @@ -712,8 +752,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "Graphics Interrupt mask: %08x\n", I915_READ(GTIMR)); } - seq_printf(m, "Interrupts received: %d\n", - atomic_read(&dev_priv->irq_received)); for_each_ring(ring, dev_priv, i) { if (INTEL_INFO(dev)->gen >= 6) { seq_printf(m, @@ -732,7 +770,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int i, ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -761,7 +799,7 @@ static int i915_hws_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; const u32 *hws; int i; @@ -872,7 +910,7 @@ static int i915_next_seqno_get(void *data, u64 *val) { struct drm_device *dev = data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -909,7 +947,7 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u16 crstanddelay; int ret; @@ -932,7 +970,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; intel_runtime_pm_get(dev_priv); @@ -1025,7 +1063,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) max_freq * GT_FREQUENCY_MULTIPLIER); seq_printf(m, "Max overclocked frequency: %dMHz\n", - dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); + dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER); } else if (IS_VALLEYVIEW(dev)) { u32 freq_sts, val; @@ -1058,7 +1096,7 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 delayfreq; int ret, i; @@ -1089,7 +1127,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 inttoext; int ret, i; @@ -1113,7 +1151,7 @@ static int ironlake_drpc_info(struct seq_file *m) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 rgvmodectl, rstdbyctl; u16 crstandvid; int ret; @@ -1339,13 +1377,15 @@ static int i915_fbc_status(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; if (!HAS_FBC(dev)) { seq_puts(m, "FBC unsupported on this chipset\n"); return 0; } + intel_runtime_pm_get(dev_priv); + if (intel_fbc_enabled(dev)) { seq_puts(m, "FBC enabled\n"); } else { @@ -1389,6 +1429,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused) } seq_putc(m, '\n'); } + + intel_runtime_pm_put(dev_priv); + return 0; } @@ -1403,11 +1446,15 @@ static int i915_ips_status(struct seq_file *m, void *unused) return 0; } + intel_runtime_pm_get(dev_priv); + if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE) seq_puts(m, "enabled\n"); else seq_puts(m, "disabled\n"); + intel_runtime_pm_put(dev_priv); + return 0; } @@ -1415,9 +1462,11 @@ static int i915_sr_status(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; bool sr_enabled = false; + intel_runtime_pm_get(dev_priv); + if (HAS_PCH_SPLIT(dev)) sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) @@ -1427,6 +1476,8 @@ static int i915_sr_status(struct seq_file *m, void *unused) else if (IS_PINEVIEW(dev)) sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; + intel_runtime_pm_put(dev_priv); + seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" : "disabled"); @@ -1437,7 +1488,7 @@ static int i915_emon_status(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long temp, chipset, gfx; int ret; @@ -1465,8 +1516,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = 0; int gpu_freq, ia_freq; if (!(IS_GEN6(dev) || IS_GEN7(dev))) { @@ -1474,17 +1525,18 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) return 0; } + intel_runtime_pm_get(dev_priv); + flush_delayed_work(&dev_priv->rps.delayed_resume_work); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); if (ret) - return ret; - intel_runtime_pm_get(dev_priv); + goto out; seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); - for (gpu_freq = dev_priv->rps.min_delay; - gpu_freq <= dev_priv->rps.max_delay; + for (gpu_freq = dev_priv->rps.min_freq_softlimit; + gpu_freq <= dev_priv->rps.max_freq_softlimit; gpu_freq++) { ia_freq = gpu_freq; sandybridge_pcode_read(dev_priv, @@ -1496,17 +1548,18 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ((ia_freq >> 8) & 0xff) * 100); } - intel_runtime_pm_put(dev_priv); mutex_unlock(&dev_priv->rps.hw_lock); - return 0; +out: + intel_runtime_pm_put(dev_priv); + return ret; } static int i915_gfxec(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -1526,7 +1579,7 @@ static int i915_opregion(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); int ret; @@ -1600,7 +1653,7 @@ static int i915_context_status(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; struct i915_hw_context *ctx; int ret, i; @@ -1733,6 +1786,17 @@ static int i915_swizzle_info(struct seq_file *m, void *data) return 0; } +static int per_file_ctx(int id, void *ptr, void *data) +{ + struct i915_hw_context *ctx = ptr; + struct seq_file *m = data; + struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx); + + ppgtt->debug_dump(ppgtt, m); + + return 0; +} + static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1744,7 +1808,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) return; seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages); - seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages); + seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries); for_each_ring(ring, dev_priv, unused) { seq_printf(m, "%s\n", ring->name); for (i = 0; i < 4; i++) { @@ -1762,6 +1826,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; + struct drm_file *file; int i; if (INTEL_INFO(dev)->gen == 6) @@ -1780,6 +1845,20 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) seq_puts(m, "aliasing PPGTT:\n"); seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); + + ppgtt->debug_dump(ppgtt, m); + } else + return; + + list_for_each_entry_reverse(file, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_hw_ppgtt *pvt_ppgtt; + + pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx); + seq_printf(m, "proc: %s\n", + get_pid_task(file->pid, PIDTYPE_PID)->comm); + seq_puts(m, " default context:\n"); + idr_for_each(&file_priv->context_idr, per_file_ctx, m); } seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); } @@ -1892,6 +1971,47 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) return 0; } +static int i915_sink_crc(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct intel_encoder *encoder; + struct intel_connector *connector; + struct intel_dp *intel_dp = NULL; + int ret; + u8 crc[6]; + + drm_modeset_lock_all(dev); + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + + if (connector->base.dpms != DRM_MODE_DPMS_ON) + continue; + + if (!connector->base.encoder) + continue; + + encoder = to_intel_encoder(connector->base.encoder); + if (encoder->type != INTEL_OUTPUT_EDP) + continue; + + intel_dp = enc_to_intel_dp(&encoder->base); + + ret = intel_dp_sink_crc(intel_dp, crc); + if (ret) + goto out; + + seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", + crc[0], crc[1], crc[2], + crc[3], crc[4], crc[5]); + goto out; + } + ret = -ENODEV; +out: + drm_modeset_unlock_all(dev); + return ret; +} + static int i915_energy_uJ(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; @@ -1903,12 +2023,16 @@ static int i915_energy_uJ(struct seq_file *m, void *data) if (INTEL_INFO(dev)->gen < 6) return -ENODEV; + intel_runtime_pm_get(dev_priv); + rdmsrl(MSR_RAPL_POWER_UNIT, power); power = (power & 0x1f00) >> 8; units = 1000000 / (1 << power); /* convert to uJ */ power = I915_READ(MCH_SECP_NRG_STTS); power *= units; + intel_runtime_pm_put(dev_priv); + seq_printf(m, "%llu", (long long unsigned)power); return 0; @@ -1925,15 +2049,9 @@ static int i915_pc8_status(struct seq_file *m, void *unused) return 0; } - mutex_lock(&dev_priv->pc8.lock); - seq_printf(m, "Requirements met: %s\n", - yesno(dev_priv->pc8.requirements_met)); - seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle)); - seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count); + seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); seq_printf(m, "IRQs disabled: %s\n", - yesno(dev_priv->pc8.irqs_disabled)); - seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled)); - mutex_unlock(&dev_priv->pc8.lock); + yesno(dev_priv->pm.irqs_disabled)); return 0; } @@ -1961,6 +2079,28 @@ static const char *power_domain_str(enum intel_display_power_domain domain) return "TRANSCODER_C"; case POWER_DOMAIN_TRANSCODER_EDP: return "TRANSCODER_EDP"; + case POWER_DOMAIN_PORT_DDI_A_2_LANES: + return "PORT_DDI_A_2_LANES"; + case POWER_DOMAIN_PORT_DDI_A_4_LANES: + return "PORT_DDI_A_4_LANES"; + case POWER_DOMAIN_PORT_DDI_B_2_LANES: + return "PORT_DDI_B_2_LANES"; + case POWER_DOMAIN_PORT_DDI_B_4_LANES: + return "PORT_DDI_B_4_LANES"; + case POWER_DOMAIN_PORT_DDI_C_2_LANES: + return "PORT_DDI_C_2_LANES"; + case POWER_DOMAIN_PORT_DDI_C_4_LANES: + return "PORT_DDI_C_4_LANES"; + case POWER_DOMAIN_PORT_DDI_D_2_LANES: + return "PORT_DDI_D_2_LANES"; + case POWER_DOMAIN_PORT_DDI_D_4_LANES: + return "PORT_DDI_D_4_LANES"; + case POWER_DOMAIN_PORT_DSI: + return "PORT_DSI"; + case POWER_DOMAIN_PORT_CRT: + return "PORT_CRT"; + case POWER_DOMAIN_PORT_OTHER: + return "PORT_OTHER"; case POWER_DOMAIN_VGA: return "VGA"; case POWER_DOMAIN_AUDIO: @@ -2008,6 +2148,215 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) return 0; } +static void intel_seq_print_mode(struct seq_file *m, int tabs, + struct drm_display_mode *mode) +{ + int i; + + for (i = 0; i < tabs; i++) + seq_putc(m, '\t'); + + seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", + mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); +} + +static void intel_encoder_info(struct seq_file *m, + struct intel_crtc *intel_crtc, + struct intel_encoder *intel_encoder) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_crtc *crtc = &intel_crtc->base; + struct intel_connector *intel_connector; + struct drm_encoder *encoder; + + encoder = &intel_encoder->base; + seq_printf(m, "\tencoder %d: type: %s, connectors:\n", + encoder->base.id, drm_get_encoder_name(encoder)); + for_each_connector_on_encoder(dev, encoder, intel_connector) { + struct drm_connector *connector = &intel_connector->base; + seq_printf(m, "\t\tconnector %d: type: %s, status: %s", + connector->base.id, + drm_get_connector_name(connector), + drm_get_connector_status_name(connector->status)); + if (connector->status == connector_status_connected) { + struct drm_display_mode *mode = &crtc->mode; + seq_printf(m, ", mode:\n"); + intel_seq_print_mode(m, 2, mode); + } else { + seq_putc(m, '\n'); + } + } +} + +static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_crtc *crtc = &intel_crtc->base; + struct intel_encoder *intel_encoder; + + seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", + crtc->primary->fb->base.id, crtc->x, crtc->y, + crtc->primary->fb->width, crtc->primary->fb->height); + for_each_encoder_on_crtc(dev, crtc, intel_encoder) + intel_encoder_info(m, intel_crtc, intel_encoder); +} + +static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) +{ + struct drm_display_mode *mode = panel->fixed_mode; + + seq_printf(m, "\tfixed mode:\n"); + intel_seq_print_mode(m, 2, mode); +} + +static void intel_dp_info(struct seq_file *m, + struct intel_connector *intel_connector) +{ + struct intel_encoder *intel_encoder = intel_connector->encoder; + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + + seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); + seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" : + "no"); + if (intel_encoder->type == INTEL_OUTPUT_EDP) + intel_panel_info(m, &intel_connector->panel); +} + +static void intel_hdmi_info(struct seq_file *m, + struct intel_connector *intel_connector) +{ + struct intel_encoder *intel_encoder = intel_connector->encoder; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); + + seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" : + "no"); +} + +static void intel_lvds_info(struct seq_file *m, + struct intel_connector *intel_connector) +{ + intel_panel_info(m, &intel_connector->panel); +} + +static void intel_connector_info(struct seq_file *m, + struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_encoder *intel_encoder = intel_connector->encoder; + struct drm_display_mode *mode; + + seq_printf(m, "connector %d: type %s, status: %s\n", + connector->base.id, drm_get_connector_name(connector), + drm_get_connector_status_name(connector->status)); + if (connector->status == connector_status_connected) { + seq_printf(m, "\tname: %s\n", connector->display_info.name); + seq_printf(m, "\tphysical dimensions: %dx%dmm\n", + connector->display_info.width_mm, + connector->display_info.height_mm); + seq_printf(m, "\tsubpixel order: %s\n", + drm_get_subpixel_order_name(connector->display_info.subpixel_order)); + seq_printf(m, "\tCEA rev: %d\n", + connector->display_info.cea_rev); + } + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || + intel_encoder->type == INTEL_OUTPUT_EDP) + intel_dp_info(m, intel_connector); + else if (intel_encoder->type == INTEL_OUTPUT_HDMI) + intel_hdmi_info(m, intel_connector); + else if (intel_encoder->type == INTEL_OUTPUT_LVDS) + intel_lvds_info(m, intel_connector); + + seq_printf(m, "\tmodes:\n"); + list_for_each_entry(mode, &connector->modes, head) + intel_seq_print_mode(m, 2, mode); +} + +static bool cursor_active(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 state; + + if (IS_845G(dev) || IS_I865G(dev)) + state = I915_READ(_CURACNTR) & CURSOR_ENABLE; + else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) + state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; + else + state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; + + return state; +} + +static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pos; + + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) + pos = I915_READ(CURPOS_IVB(pipe)); + else + pos = I915_READ(CURPOS(pipe)); + + *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; + if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) + *x = -*x; + + *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; + if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) + *y = -*y; + + return cursor_active(dev, pipe); +} + +static int i915_display_info(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc; + struct drm_connector *connector; + + intel_runtime_pm_get(dev_priv); + drm_modeset_lock_all(dev); + seq_printf(m, "CRTC info\n"); + seq_printf(m, "---------\n"); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { + bool active; + int x, y; + + seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", + crtc->base.base.id, pipe_name(crtc->pipe), + yesno(crtc->active)); + if (crtc->active) { + intel_crtc_info(m, crtc); + + active = cursor_position(dev, crtc->pipe, &x, &y); + seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n", + yesno(crtc->cursor_visible), + x, y, crtc->cursor_addr, + yesno(active)); + } + } + + seq_printf(m, "\n"); + seq_printf(m, "Connector info\n"); + seq_printf(m, "--------------\n"); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + intel_connector_info(m, connector); + } + drm_modeset_unlock_all(dev); + intel_runtime_pm_put(dev_priv); + + return 0; +} + struct pipe_crc_info { const char *name; struct drm_device *dev; @@ -2332,8 +2681,6 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, if (need_stable_symbols) { uint32_t tmp = I915_READ(PORT_DFT2_G4X); - WARN_ON(!IS_G4X(dev)); - tmp |= DC_BALANCE_RESET_VLV; if (pipe == PIPE_A) tmp |= PIPE_A_SCRAMBLE_RESET; @@ -2756,11 +3103,179 @@ static const struct file_operations i915_display_crc_ctl_fops = { .write = display_crc_ctl_write }; +static void wm_latency_show(struct seq_file *m, const uint16_t wm[5]) +{ + struct drm_device *dev = m->private; + int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4; + int level; + + drm_modeset_lock_all(dev); + + for (level = 0; level < num_levels; level++) { + unsigned int latency = wm[level]; + + /* WM1+ latency values in 0.5us units */ + if (level > 0) + latency *= 5; + + seq_printf(m, "WM%d %u (%u.%u usec)\n", + level, wm[level], + latency / 10, latency % 10); + } + + drm_modeset_unlock_all(dev); +} + +static int pri_wm_latency_show(struct seq_file *m, void *data) +{ + struct drm_device *dev = m->private; + + wm_latency_show(m, to_i915(dev)->wm.pri_latency); + + return 0; +} + +static int spr_wm_latency_show(struct seq_file *m, void *data) +{ + struct drm_device *dev = m->private; + + wm_latency_show(m, to_i915(dev)->wm.spr_latency); + + return 0; +} + +static int cur_wm_latency_show(struct seq_file *m, void *data) +{ + struct drm_device *dev = m->private; + + wm_latency_show(m, to_i915(dev)->wm.cur_latency); + + return 0; +} + +static int pri_wm_latency_open(struct inode *inode, struct file *file) +{ + struct drm_device *dev = inode->i_private; + + if (!HAS_PCH_SPLIT(dev)) + return -ENODEV; + + return single_open(file, pri_wm_latency_show, dev); +} + +static int spr_wm_latency_open(struct inode *inode, struct file *file) +{ + struct drm_device *dev = inode->i_private; + + if (!HAS_PCH_SPLIT(dev)) + return -ENODEV; + + return single_open(file, spr_wm_latency_show, dev); +} + +static int cur_wm_latency_open(struct inode *inode, struct file *file) +{ + struct drm_device *dev = inode->i_private; + + if (!HAS_PCH_SPLIT(dev)) + return -ENODEV; + + return single_open(file, cur_wm_latency_show, dev); +} + +static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp, uint16_t wm[5]) +{ + struct seq_file *m = file->private_data; + struct drm_device *dev = m->private; + uint16_t new[5] = { 0 }; + int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4; + int level; + int ret; + char tmp[32]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + + tmp[len] = '\0'; + + ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]); + if (ret != num_levels) + return -EINVAL; + + drm_modeset_lock_all(dev); + + for (level = 0; level < num_levels; level++) + wm[level] = new[level]; + + drm_modeset_unlock_all(dev); + + return len; +} + + +static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_device *dev = m->private; + + return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency); +} + +static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_device *dev = m->private; + + return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency); +} + +static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_device *dev = m->private; + + return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency); +} + +static const struct file_operations i915_pri_wm_latency_fops = { + .owner = THIS_MODULE, + .open = pri_wm_latency_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = pri_wm_latency_write +}; + +static const struct file_operations i915_spr_wm_latency_fops = { + .owner = THIS_MODULE, + .open = spr_wm_latency_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = spr_wm_latency_write +}; + +static const struct file_operations i915_cur_wm_latency_fops = { + .owner = THIS_MODULE, + .open = cur_wm_latency_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = cur_wm_latency_write +}; + static int i915_wedged_get(void *data, u64 *val) { struct drm_device *dev = data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; *val = atomic_read(&dev_priv->gpu_error.reset_counter); @@ -2772,9 +3287,8 @@ i915_wedged_set(void *data, u64 val) { struct drm_device *dev = data; - DRM_INFO("Manually setting wedged to %llu\n", val); - i915_handle_error(dev, val); - + i915_handle_error(dev, val, + "Manually setting wedged to %llu", val); return 0; } @@ -2786,7 +3300,7 @@ static int i915_ring_stop_get(void *data, u64 *val) { struct drm_device *dev = data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; *val = dev_priv->gpu_error.stop_rings; @@ -2929,7 +3443,7 @@ i915_drop_caches_set(void *data, u64 val) list_for_each_entry(vm, &dev_priv->vm_list, global_link) { list_for_each_entry_safe(vma, x, &vm->inactive_list, mm_list) { - if (vma->obj->pin_count) + if (vma->pin_count) continue; ret = i915_vma_unbind(vma); @@ -2963,7 +3477,7 @@ static int i915_max_freq_get(void *data, u64 *val) { struct drm_device *dev = data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (!(IS_GEN6(dev) || IS_GEN7(dev))) @@ -2976,9 +3490,9 @@ i915_max_freq_get(void *data, u64 *val) return ret; if (IS_VALLEYVIEW(dev)) - *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); + *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); else - *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; + *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER; mutex_unlock(&dev_priv->rps.hw_lock); return 0; @@ -2989,6 +3503,7 @@ i915_max_freq_set(void *data, u64 val) { struct drm_device *dev = data; struct drm_i915_private *dev_priv = dev->dev_private; + u32 rp_state_cap, hw_max, hw_min; int ret; if (!(IS_GEN6(dev) || IS_GEN7(dev))) @@ -3007,14 +3522,29 @@ i915_max_freq_set(void *data, u64 val) */ if (IS_VALLEYVIEW(dev)) { val = vlv_freq_opcode(dev_priv, val); - dev_priv->rps.max_delay = val; - valleyview_set_rps(dev, val); + + hw_max = valleyview_rps_max_freq(dev_priv); + hw_min = valleyview_rps_min_freq(dev_priv); } else { do_div(val, GT_FREQUENCY_MULTIPLIER); - dev_priv->rps.max_delay = val; - gen6_set_rps(dev, val); + + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + hw_max = dev_priv->rps.max_freq; + hw_min = (rp_state_cap >> 16) & 0xff; + } + + if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { + mutex_unlock(&dev_priv->rps.hw_lock); + return -EINVAL; } + dev_priv->rps.max_freq_softlimit = val; + + if (IS_VALLEYVIEW(dev)) + valleyview_set_rps(dev, val); + else + gen6_set_rps(dev, val); + mutex_unlock(&dev_priv->rps.hw_lock); return 0; @@ -3028,7 +3558,7 @@ static int i915_min_freq_get(void *data, u64 *val) { struct drm_device *dev = data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (!(IS_GEN6(dev) || IS_GEN7(dev))) @@ -3041,9 +3571,9 @@ i915_min_freq_get(void *data, u64 *val) return ret; if (IS_VALLEYVIEW(dev)) - *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); + *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); else - *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; + *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER; mutex_unlock(&dev_priv->rps.hw_lock); return 0; @@ -3054,6 +3584,7 @@ i915_min_freq_set(void *data, u64 val) { struct drm_device *dev = data; struct drm_i915_private *dev_priv = dev->dev_private; + u32 rp_state_cap, hw_max, hw_min; int ret; if (!(IS_GEN6(dev) || IS_GEN7(dev))) @@ -3072,13 +3603,29 @@ i915_min_freq_set(void *data, u64 val) */ if (IS_VALLEYVIEW(dev)) { val = vlv_freq_opcode(dev_priv, val); - dev_priv->rps.min_delay = val; - valleyview_set_rps(dev, val); + + hw_max = valleyview_rps_max_freq(dev_priv); + hw_min = valleyview_rps_min_freq(dev_priv); } else { do_div(val, GT_FREQUENCY_MULTIPLIER); - dev_priv->rps.min_delay = val; - gen6_set_rps(dev, val); + + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + hw_max = dev_priv->rps.max_freq; + hw_min = (rp_state_cap >> 16) & 0xff; + } + + if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { + mutex_unlock(&dev_priv->rps.hw_lock); + return -EINVAL; } + + dev_priv->rps.min_freq_softlimit = val; + + if (IS_VALLEYVIEW(dev)) + valleyview_set_rps(dev, val); + else + gen6_set_rps(dev, val); + mutex_unlock(&dev_priv->rps.hw_lock); return 0; @@ -3092,7 +3639,7 @@ static int i915_cache_sharing_get(void *data, u64 *val) { struct drm_device *dev = data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 snpcr; int ret; @@ -3152,7 +3699,6 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) if (INTEL_INFO(dev)->gen < 6) return 0; - intel_runtime_pm_get(dev_priv); gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); return 0; @@ -3167,7 +3713,6 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) return 0; gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); return 0; } @@ -3248,9 +3793,11 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_dpio", i915_dpio_info, 0}, {"i915_llc", i915_llc, 0}, {"i915_edp_psr_status", i915_edp_psr_status, 0}, + {"i915_sink_crc_eDP1", i915_sink_crc, 0}, {"i915_energy_uJ", i915_energy_uJ, 0}, {"i915_pc8_status", i915_pc8_status, 0}, {"i915_power_domain_info", i915_power_domain_info, 0}, + {"i915_display_info", i915_display_info, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) @@ -3269,6 +3816,9 @@ static const struct i915_debugfs_files { {"i915_error_state", &i915_error_state_fops}, {"i915_next_seqno", &i915_next_seqno_fops}, {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, + {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, + {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, + {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, }; void intel_display_crc_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 15a74f979b4..96177eec0a0 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -82,7 +82,7 @@ intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) void i915_update_dri1_breadcrumb(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; /* @@ -103,7 +103,7 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev) static void i915_write_hws_pga(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 addr; addr = dev_priv->status_page_dmah->busaddr; @@ -118,7 +118,7 @@ static void i915_write_hws_pga(struct drm_device *dev) */ static void i915_free_hws(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = LP_RING(dev_priv); if (dev_priv->status_page_dmah) { @@ -137,7 +137,7 @@ static void i915_free_hws(struct drm_device *dev) void i915_kernel_lost_context(struct drm_device * dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; struct intel_ring_buffer *ring = LP_RING(dev_priv); @@ -164,7 +164,7 @@ void i915_kernel_lost_context(struct drm_device * dev) static int i915_dma_cleanup(struct drm_device * dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int i; /* Make sure interrupts are disabled here because the uninstall ioctl @@ -188,7 +188,7 @@ static int i915_dma_cleanup(struct drm_device * dev) static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret; @@ -233,7 +233,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) static int i915_dma_resume(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = LP_RING(dev_priv); DRM_DEBUG_DRIVER("%s\n", __func__); @@ -357,7 +357,7 @@ static int validate_cmd(int cmd) static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int i, ret; if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) @@ -431,7 +431,7 @@ i915_emit_box(struct drm_device *dev, static void i915_emit_breadcrumb(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; dev_priv->dri1.counter++; @@ -547,7 +547,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, static int i915_dispatch_flip(struct drm_device * dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret; @@ -625,10 +625,9 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data, static int i915_batchbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) - master_priv->sarea_priv; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv; + drm_i915_sarea_t *sarea_priv; drm_i915_batchbuffer_t *batch = data; int ret; struct drm_clip_rect *cliprects = NULL; @@ -636,6 +635,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, if (drm_core_check_feature(dev, DRIVER_MODESET)) return -ENODEV; + master_priv = dev->primary->master->driver_priv; + sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; + if (!dev_priv->dri1.allow_batchbuffer) { DRM_ERROR("Batchbuffer ioctl disabled\n"); return -EINVAL; @@ -681,10 +683,9 @@ fail_free: static int i915_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) - master_priv->sarea_priv; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv; + drm_i915_sarea_t *sarea_priv; drm_i915_cmdbuffer_t *cmdbuf = data; struct drm_clip_rect *cliprects = NULL; void *batch_data; @@ -696,6 +697,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, if (drm_core_check_feature(dev, DRIVER_MODESET)) return -ENODEV; + master_priv = dev->primary->master->driver_priv; + sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); if (cmdbuf->num_cliprects < 0) @@ -749,7 +753,7 @@ fail_batch_free: static int i915_emit_irq(struct drm_device * dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; i915_kernel_lost_context(dev); @@ -775,7 +779,7 @@ static int i915_emit_irq(struct drm_device * dev) static int i915_wait_irq(struct drm_device * dev, int irq_nr) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret = 0; struct intel_ring_buffer *ring = LP_RING(dev_priv); @@ -812,7 +816,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) static int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; drm_i915_irq_emit_t *emit = data; int result; @@ -843,7 +847,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data, static int i915_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; drm_i915_irq_wait_t *irqwait = data; if (drm_core_check_feature(dev, DRIVER_MODESET)) @@ -860,7 +864,7 @@ static int i915_irq_wait(struct drm_device *dev, void *data, static int i915_vblank_pipe_get(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; drm_i915_vblank_pipe_t *pipe = data; if (drm_core_check_feature(dev, DRIVER_MODESET)) @@ -921,7 +925,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data, static int i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; drm_i915_getparam_t *param = data; int value; @@ -990,7 +994,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = HAS_WT(dev); break; case I915_PARAM_HAS_ALIASING_PPGTT: - value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; + value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev); break; case I915_PARAM_HAS_WAIT_TIMEOUT: value = 1; @@ -1029,7 +1033,7 @@ static int i915_getparam(struct drm_device *dev, void *data, static int i915_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; drm_i915_setparam_t *param = data; if (!dev_priv) { @@ -1064,7 +1068,7 @@ static int i915_setparam(struct drm_device *dev, void *data, static int i915_set_status_page(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; drm_i915_hws_addr_t *hws = data; struct intel_ring_buffer *ring; @@ -1132,7 +1136,7 @@ static int i915_get_bridge_dev(struct drm_device *dev) static int intel_alloc_mchbar_resource(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp_lo, temp_hi = 0; u64 mchbar_addr; @@ -1178,11 +1182,14 @@ intel_alloc_mchbar_resource(struct drm_device *dev) static void intel_setup_mchbar(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; bool enabled; + if (IS_VALLEYVIEW(dev)) + return; + dev_priv->mchbar_need_disable = false; if (IS_I915G(dev) || IS_I915GM(dev)) { @@ -1215,7 +1222,7 @@ intel_setup_mchbar(struct drm_device *dev) static void intel_teardown_mchbar(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; @@ -1317,12 +1324,12 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_vga_switcheroo; + intel_power_domains_init_hw(dev_priv); + ret = drm_irq_install(dev); if (ret) goto cleanup_gem_stolen; - intel_power_domains_init_hw(dev); - /* Important: The output setup functions called by modeset_init need * working irqs for e.g. gmbus and dp aux transfers. */ intel_modeset_init(dev); @@ -1339,7 +1346,7 @@ static int i915_load_modeset_init(struct drm_device *dev) /* FIXME: do pre/post-mode set stuff in core KMS code */ dev->vblank_disable_allowed = true; if (INTEL_INFO(dev)->num_pipes == 0) { - intel_display_power_put(dev, POWER_DOMAIN_VGA); + intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); return 0; } @@ -1374,10 +1381,10 @@ cleanup_gem: i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); - i915_gem_cleanup_aliasing_ppgtt(dev); + WARN_ON(dev_priv->mm.aliasing_ppgtt); drm_mm_takedown(&dev_priv->gtt.base.mm); cleanup_power: - intel_display_power_put(dev, POWER_DOMAIN_VGA); + intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); drm_irq_uninstall(dev); cleanup_gem_stolen: i915_gem_cleanup_stolen(dev); @@ -1442,7 +1449,7 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) static void i915_dump_device_info(struct drm_i915_private *dev_priv) { - const struct intel_device_info *info = dev_priv->info; + const struct intel_device_info *info = &dev_priv->info; #define PRINT_S(name) "%s" #define SEP_EMPTY @@ -1459,6 +1466,62 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv) #undef SEP_COMMA } +/* + * Determine various intel_device_info fields at runtime. + * + * Use it when either: + * - it's judged too laborious to fill n static structures with the limit + * when a simple if statement does the job, + * - run-time checks (eg read fuse/strap registers) are needed. + * + * This function needs to be called: + * - after the MMIO has been setup as we are reading registers, + * - after the PCH has been detected, + * - before the first usage of the fields it can tweak. + */ +static void intel_device_info_runtime_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_device_info *info; + enum pipe pipe; + + info = (struct intel_device_info *)&dev_priv->info; + + if (IS_VALLEYVIEW(dev)) + for_each_pipe(pipe) + info->num_sprites[pipe] = 2; + else + for_each_pipe(pipe) + info->num_sprites[pipe] = 1; + + if (i915.disable_display) { + DRM_INFO("Display disabled (module parameter)\n"); + info->num_pipes = 0; + } else if (info->num_pipes > 0 && + (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && + !IS_VALLEYVIEW(dev)) { + u32 fuse_strap = I915_READ(FUSE_STRAP); + u32 sfuse_strap = I915_READ(SFUSE_STRAP); + + /* + * SFUSE_STRAP is supposed to have a bit signalling the display + * is fused off. Unfortunately it seems that, at least in + * certain cases, fused off display means that PCH display + * reads don't land anywhere. In that case, we read 0s. + * + * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK + * should be set when taking over after the firmware. + */ + if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || + sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || + (dev_priv->pch_type == PCH_CPT && + !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { + DRM_INFO("Display fused off, disabling\n"); + info->num_pipes = 0; + } + } +} + /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device @@ -1473,7 +1536,7 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv) int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; - struct intel_device_info *info; + struct intel_device_info *info, *device_info; int ret = 0, mmio_bar, mmio_size; uint32_t aperture_size; @@ -1496,7 +1559,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->dev_private = (void *)dev_priv; dev_priv->dev = dev; - dev_priv->info = info; + + /* copy initial configuration to dev_priv->info */ + device_info = (struct intel_device_info *)&dev_priv->info; + *device_info = *info; spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); @@ -1545,8 +1611,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto put_bridge; } - intel_uncore_early_sanitize(dev); - /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev); @@ -1635,9 +1699,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); - dev_priv->num_plane = 1; - if (IS_VALLEYVIEW(dev)) - dev_priv->num_plane = 2; + intel_device_info_runtime_init(dev); if (INTEL_INFO(dev)->num_pipes) { ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); @@ -1645,7 +1707,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_gem_unload; } - intel_power_domains_init(dev); + intel_power_domains_init(dev_priv); if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = i915_load_modeset_init(dev); @@ -1674,7 +1736,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) return 0; out_power_well: - intel_power_domains_remove(dev); + intel_power_domains_remove(dev_priv); drm_vblank_cleanup(dev); out_gem_unload: if (dev_priv->mm.inactive_shrinker.scan_objects) @@ -1724,8 +1786,8 @@ int i915_driver_unload(struct drm_device *dev) /* The i915.ko module is still not prepared to be loaded when * the power well is not enabled, so just enable it in case * we're going to unload/reload. */ - intel_display_set_init_power(dev, true); - intel_power_domains_remove(dev); + intel_display_set_init_power(dev_priv, true); + intel_power_domains_remove(dev_priv); i915_teardown_sysfs(dev); @@ -1761,8 +1823,6 @@ int i915_driver_unload(struct drm_device *dev) cancel_work_sync(&dev_priv->gpu_error.work); i915_destroy_error_state(dev); - cancel_delayed_work_sync(&dev_priv->pc8.enable_work); - if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); @@ -1776,8 +1836,8 @@ int i915_driver_unload(struct drm_device *dev) i915_gem_free_all_phys_object(dev); i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); + WARN_ON(dev_priv->mm.aliasing_ppgtt); mutex_unlock(&dev->struct_mutex); - i915_gem_cleanup_aliasing_ppgtt(dev); i915_gem_cleanup_stolen(dev); if (!I915_NEED_GFX_HWS(dev)) @@ -1835,7 +1895,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file) */ void i915_driver_lastclose(struct drm_device * dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; /* On gen6+ we refuse to init without kms enabled, but then the drm core * goes right around and calls lastclose. Check for this and don't clean diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ec7bb0fc71b..82f4d1f47d3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -38,134 +38,30 @@ #include <linux/module.h> #include <drm/drm_crtc_helper.h> -static int i915_modeset __read_mostly = -1; -module_param_named(modeset, i915_modeset, int, 0400); -MODULE_PARM_DESC(modeset, - "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " - "1=on, -1=force vga console preference [default])"); - -unsigned int i915_fbpercrtc __always_unused = 0; -module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); - -int i915_panel_ignore_lid __read_mostly = 1; -module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); -MODULE_PARM_DESC(panel_ignore_lid, - "Override lid status (0=autodetect, 1=autodetect disabled [default], " - "-1=force lid closed, -2=force lid open)"); - -unsigned int i915_powersave __read_mostly = 1; -module_param_named(powersave, i915_powersave, int, 0600); -MODULE_PARM_DESC(powersave, - "Enable powersavings, fbc, downclocking, etc. (default: true)"); - -int i915_semaphores __read_mostly = -1; -module_param_named(semaphores, i915_semaphores, int, 0400); -MODULE_PARM_DESC(semaphores, - "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); - -int i915_enable_rc6 __read_mostly = -1; -module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400); -MODULE_PARM_DESC(i915_enable_rc6, - "Enable power-saving render C-state 6. " - "Different stages can be selected via bitmask values " - "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " - "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " - "default: -1 (use per-chip default)"); - -int i915_enable_fbc __read_mostly = -1; -module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); -MODULE_PARM_DESC(i915_enable_fbc, - "Enable frame buffer compression for power savings " - "(default: -1 (use per-chip default))"); - -unsigned int i915_lvds_downclock __read_mostly = 0; -module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); -MODULE_PARM_DESC(lvds_downclock, - "Use panel (LVDS/eDP) downclocking for power savings " - "(default: false)"); - -int i915_lvds_channel_mode __read_mostly; -module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600); -MODULE_PARM_DESC(lvds_channel_mode, - "Specify LVDS channel mode " - "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); - -int i915_panel_use_ssc __read_mostly = -1; -module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); -MODULE_PARM_DESC(lvds_use_ssc, - "Use Spread Spectrum Clock with panels [LVDS/eDP] " - "(default: auto from VBT)"); - -int i915_vbt_sdvo_panel_type __read_mostly = -1; -module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); -MODULE_PARM_DESC(vbt_sdvo_panel_type, - "Override/Ignore selection of SDVO panel mode in the VBT " - "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); - -static bool i915_try_reset __read_mostly = true; -module_param_named(reset, i915_try_reset, bool, 0600); -MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); - -bool i915_enable_hangcheck __read_mostly = true; -module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); -MODULE_PARM_DESC(enable_hangcheck, - "Periodically check GPU activity for detecting hangs. " - "WARNING: Disabling this can cause system wide hangs. " - "(default: true)"); - -int i915_enable_ppgtt __read_mostly = -1; -module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400); -MODULE_PARM_DESC(i915_enable_ppgtt, - "Enable PPGTT (default: true)"); - -int i915_enable_psr __read_mostly = 0; -module_param_named(enable_psr, i915_enable_psr, int, 0600); -MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); - -unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT); -module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); -MODULE_PARM_DESC(preliminary_hw_support, - "Enable preliminary hardware support."); - -int i915_disable_power_well __read_mostly = 1; -module_param_named(disable_power_well, i915_disable_power_well, int, 0600); -MODULE_PARM_DESC(disable_power_well, - "Disable the power well when possible (default: true)"); - -int i915_enable_ips __read_mostly = 1; -module_param_named(enable_ips, i915_enable_ips, int, 0600); -MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); - -bool i915_fastboot __read_mostly = 0; -module_param_named(fastboot, i915_fastboot, bool, 0600); -MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time " - "(default: false)"); - -int i915_enable_pc8 __read_mostly = 1; -module_param_named(enable_pc8, i915_enable_pc8, int, 0600); -MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)"); - -int i915_pc8_timeout __read_mostly = 5000; -module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600); -MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)"); - -bool i915_prefault_disable __read_mostly; -module_param_named(prefault_disable, i915_prefault_disable, bool, 0600); -MODULE_PARM_DESC(prefault_disable, - "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only."); - static struct drm_driver driver; +#define GEN_DEFAULT_PIPEOFFSETS \ + .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ + PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ + .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ + TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ + .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \ + .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \ + .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } + + static const struct intel_device_info intel_i830_info = { .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, .has_overlay = 1, .overlay_needs_physical = 1, .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_845g_info = { .gen = 2, .num_pipes = 1, .has_overlay = 1, .overlay_needs_physical = 1, .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_i85x_info = { @@ -174,18 +70,21 @@ static const struct intel_device_info intel_i85x_info = { .has_overlay = 1, .overlay_needs_physical = 1, .has_fbc = 1, .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_i865g_info = { .gen = 2, .num_pipes = 1, .has_overlay = 1, .overlay_needs_physical = 1, .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_i915g_info = { .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, .has_overlay = 1, .overlay_needs_physical = 1, .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_i915gm_info = { .gen = 3, .is_mobile = 1, .num_pipes = 2, @@ -194,11 +93,13 @@ static const struct intel_device_info intel_i915gm_info = { .supports_tv = 1, .has_fbc = 1, .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_i945g_info = { .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, .has_overlay = 1, .overlay_needs_physical = 1, .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_i945gm_info = { .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, @@ -207,6 +108,7 @@ static const struct intel_device_info intel_i945gm_info = { .supports_tv = 1, .has_fbc = 1, .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_i965g_info = { @@ -214,6 +116,7 @@ static const struct intel_device_info intel_i965g_info = { .has_hotplug = 1, .has_overlay = 1, .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_i965gm_info = { @@ -222,6 +125,7 @@ static const struct intel_device_info intel_i965gm_info = { .has_overlay = 1, .supports_tv = 1, .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_g33_info = { @@ -229,12 +133,14 @@ static const struct intel_device_info intel_g33_info = { .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, .ring_mask = RENDER_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_g45_info = { .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, .has_pipe_cxsr = 1, .has_hotplug = 1, .ring_mask = RENDER_RING | BSD_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_gm45_info = { @@ -243,18 +149,21 @@ static const struct intel_device_info intel_gm45_info = { .has_pipe_cxsr = 1, .has_hotplug = 1, .supports_tv = 1, .ring_mask = RENDER_RING | BSD_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_pineview_info = { .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_ironlake_d_info = { .gen = 5, .num_pipes = 2, .need_gfx_hws = 1, .has_hotplug = 1, .ring_mask = RENDER_RING | BSD_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_ironlake_m_info = { @@ -262,6 +171,7 @@ static const struct intel_device_info intel_ironlake_m_info = { .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 1, .ring_mask = RENDER_RING | BSD_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_sandybridge_d_info = { @@ -270,6 +180,7 @@ static const struct intel_device_info intel_sandybridge_d_info = { .has_fbc = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING, .has_llc = 1, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_sandybridge_m_info = { @@ -278,6 +189,7 @@ static const struct intel_device_info intel_sandybridge_m_info = { .has_fbc = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING, .has_llc = 1, + GEN_DEFAULT_PIPEOFFSETS, }; #define GEN7_FEATURES \ @@ -290,18 +202,21 @@ static const struct intel_device_info intel_sandybridge_m_info = { static const struct intel_device_info intel_ivybridge_d_info = { GEN7_FEATURES, .is_ivybridge = 1, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_ivybridge_m_info = { GEN7_FEATURES, .is_ivybridge = 1, .is_mobile = 1, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_ivybridge_q_info = { GEN7_FEATURES, .is_ivybridge = 1, .num_pipes = 0, /* legal, last one wins */ + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_valleyview_m_info = { @@ -312,6 +227,7 @@ static const struct intel_device_info intel_valleyview_m_info = { .display_mmio_offset = VLV_DISPLAY_BASE, .has_fbc = 0, /* legal, last one wins */ .has_llc = 0, /* legal, last one wins */ + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_valleyview_d_info = { @@ -321,6 +237,7 @@ static const struct intel_device_info intel_valleyview_d_info = { .display_mmio_offset = VLV_DISPLAY_BASE, .has_fbc = 0, /* legal, last one wins */ .has_llc = 0, /* legal, last one wins */ + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_haswell_d_info = { @@ -329,6 +246,7 @@ static const struct intel_device_info intel_haswell_d_info = { .has_ddi = 1, .has_fpga_dbg = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_haswell_m_info = { @@ -338,6 +256,7 @@ static const struct intel_device_info intel_haswell_m_info = { .has_ddi = 1, .has_fpga_dbg = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_broadwell_d_info = { @@ -346,6 +265,8 @@ static const struct intel_device_info intel_broadwell_d_info = { .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .has_llc = 1, .has_ddi = 1, + .has_fbc = 1, + GEN_DEFAULT_PIPEOFFSETS, }; static const struct intel_device_info intel_broadwell_m_info = { @@ -354,6 +275,8 @@ static const struct intel_device_info intel_broadwell_m_info = { .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .has_llc = 1, .has_ddi = 1, + .has_fbc = 1, + GEN_DEFAULT_PIPEOFFSETS, }; /* @@ -475,14 +398,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6) return false; + if (i915.semaphores >= 0) + return i915.semaphores; + /* Until we get further testing... */ - if (IS_GEN8(dev)) { - WARN_ON(!i915_preliminary_hw_support); + if (IS_GEN8(dev)) return false; - } - - if (i915_semaphores >= 0) - return i915_semaphores; #ifdef CONFIG_INTEL_IOMMU /* Enable semaphores on SNB when IO remapping is off */ @@ -507,8 +428,7 @@ static int i915_drm_freeze(struct drm_device *dev) /* We do a lot of poking in a lot of registers, make sure they work * properly. */ - hsw_disable_package_c8(dev_priv); - intel_display_set_init_power(dev, true); + intel_display_set_init_power(dev_priv, true); drm_kms_helper_poll_disable(dev); @@ -546,11 +466,14 @@ static int i915_drm_freeze(struct drm_device *dev) i915_save_state(dev); intel_opregion_fini(dev); + intel_uncore_fini(dev); console_lock(); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); console_unlock(); + dev_priv->suspend_count++; + return 0; } @@ -614,14 +537,21 @@ static void intel_resume_hotplug(struct drm_device *dev) drm_helper_hpd_irq_event(dev); } -static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) +static int i915_drm_thaw_early(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int error = 0; intel_uncore_early_sanitize(dev); - intel_uncore_sanitize(dev); + intel_power_domains_init_hw(dev_priv); + + return 0; +} + +static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int error = 0; if (drm_core_check_feature(dev, DRIVER_MODESET) && restore_gtt_mappings) { @@ -630,14 +560,13 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) mutex_unlock(&dev->struct_mutex); } - intel_power_domains_init_hw(dev); - i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_init_pch_refclk(dev); + drm_mode_config_reset(dev); mutex_lock(&dev->struct_mutex); @@ -650,7 +579,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) intel_modeset_init_hw(dev); drm_modeset_lock_all(dev); - drm_mode_config_reset(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); @@ -680,10 +608,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) schedule_work(&dev_priv->console_resume_work); } - /* Undo what we did at i915_drm_freeze so the refcount goes back to the - * expected level. */ - hsw_enable_package_c8(dev_priv); - mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); @@ -700,19 +624,33 @@ static int i915_drm_thaw(struct drm_device *dev) return __i915_drm_thaw(dev, true); } -int i915_resume(struct drm_device *dev) +static int i915_resume_early(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; + /* + * We have a resume ordering issue with the snd-hda driver also + * requiring our device to be power up. Due to the lack of a + * parent/child relationship we currently solve this with an early + * resume hook. + * + * FIXME: This should be solved with a special hdmi sink device or + * similar so that power domains can be employed. + */ if (pci_enable_device(dev->pdev)) return -EIO; pci_set_master(dev->pdev); + return i915_drm_thaw_early(dev); +} + +int i915_resume(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + /* * Platforms with opregion should have sane BIOS, older ones (gen3 and * earlier) need to restore the GTT mappings since the BIOS might clear @@ -726,6 +664,14 @@ int i915_resume(struct drm_device *dev) return 0; } +static int i915_resume_legacy(struct drm_device *dev) +{ + i915_resume_early(dev); + i915_resume(dev); + + return 0; +} + /** * i915_reset - reset chip after a hang * @dev: drm device to reset @@ -743,11 +689,11 @@ int i915_resume(struct drm_device *dev) */ int i915_reset(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; bool simulated; int ret; - if (!i915_try_reset) + if (!i915.reset) return 0; mutex_lock(&dev->struct_mutex); @@ -802,6 +748,17 @@ int i915_reset(struct drm_device *dev) drm_irq_uninstall(dev); drm_irq_install(dev); + + /* rps/rc6 re-init is necessary to restore state lost after the + * reset and the re-install of drm irq. Skip for ironlake per + * previous concerns that it doesn't respond well to some forms + * of re-init after reset. */ + if (INTEL_INFO(dev)->gen > 5) { + mutex_lock(&dev->struct_mutex); + intel_enable_gt_powersave(dev); + mutex_unlock(&dev->struct_mutex); + } + intel_hpd_init(dev); } else { mutex_unlock(&dev->struct_mutex); @@ -815,7 +772,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct intel_device_info *intel_info = (struct intel_device_info *) ent->driver_data; - if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) { + if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { DRM_INFO("This hardware requires preliminary hardware support.\n" "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); return -ENODEV; @@ -846,7 +803,6 @@ static int i915_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - int error; if (!drm_dev || !drm_dev->dev_private) { dev_err(dev, "DRM not initialized, aborting suspend.\n"); @@ -856,9 +812,25 @@ static int i915_pm_suspend(struct device *dev) if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - error = i915_drm_freeze(drm_dev); - if (error) - return error; + return i915_drm_freeze(drm_dev); +} + +static int i915_pm_suspend_late(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + /* + * We have a suspedn ordering issue with the snd-hda driver also + * requiring our device to be power up. Due to the lack of a + * parent/child relationship we currently solve this with an late + * suspend hook. + * + * FIXME: This should be solved with a special hdmi sink device or + * similar so that power domains can be employed. + */ + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); @@ -866,6 +838,14 @@ static int i915_pm_suspend(struct device *dev) return 0; } +static int i915_pm_resume_early(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_resume_early(drm_dev); +} + static int i915_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -887,6 +867,14 @@ static int i915_pm_freeze(struct device *dev) return i915_drm_freeze(drm_dev); } +static int i915_pm_thaw_early(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_drm_thaw_early(drm_dev); +} + static int i915_pm_thaw(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -910,9 +898,13 @@ static int i915_runtime_suspend(struct device *device) struct drm_i915_private *dev_priv = dev->dev_private; WARN_ON(!HAS_RUNTIME_PM(dev)); + assert_force_wake_inactive(dev_priv); DRM_DEBUG_KMS("Suspending device\n"); + if (HAS_PC8(dev)) + hsw_enable_pc8(dev_priv); + i915_gem_release_all_mmaps(dev_priv); del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); @@ -927,6 +919,7 @@ static int i915_runtime_suspend(struct device *device) */ intel_opregion_notify_adapter(dev, PCI_D1); + DRM_DEBUG_KMS("Device suspended\n"); return 0; } @@ -943,15 +936,23 @@ static int i915_runtime_resume(struct device *device) intel_opregion_notify_adapter(dev, PCI_D0); dev_priv->pm.suspended = false; + if (HAS_PC8(dev)) + hsw_disable_pc8(dev_priv); + + DRM_DEBUG_KMS("Device resumed\n"); return 0; } static const struct dev_pm_ops i915_pm_ops = { .suspend = i915_pm_suspend, + .suspend_late = i915_pm_suspend_late, + .resume_early = i915_pm_resume_early, .resume = i915_pm_resume, .freeze = i915_pm_freeze, + .thaw_early = i915_pm_thaw_early, .thaw = i915_pm_thaw, .poweroff = i915_pm_poweroff, + .restore_early = i915_pm_resume_early, .restore = i915_pm_resume, .runtime_suspend = i915_runtime_suspend, .runtime_resume = i915_runtime_resume, @@ -994,7 +995,7 @@ static struct drm_driver driver = { /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ .suspend = i915_suspend, - .resume = i915_resume, + .resume = i915_resume_legacy, .device_is_agp = i915_driver_device_is_agp, .master_create = i915_master_create, @@ -1046,14 +1047,14 @@ static int __init i915_init(void) * the default behavior. */ #if defined(CONFIG_DRM_I915_KMS) - if (i915_modeset != 0) + if (i915.modeset != 0) driver.driver_features |= DRIVER_MODESET; #endif - if (i915_modeset == 1) + if (i915.modeset == 1) driver.driver_features |= DRIVER_MODESET; #ifdef CONFIG_VGA_CONSOLE - if (vgacon_text_force() && i915_modeset == -1) + if (vgacon_text_force() && i915.modeset == -1) driver.driver_features &= ~DRIVER_MODESET; #endif diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index df77e20e3c3..0905cd91558 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -58,7 +58,8 @@ enum pipe { PIPE_A = 0, PIPE_B, PIPE_C, - I915_MAX_PIPES + _PIPE_EDP, + I915_MAX_PIPES = _PIPE_EDP }; #define pipe_name(p) ((p) + 'A') @@ -66,7 +67,8 @@ enum transcoder { TRANSCODER_A = 0, TRANSCODER_B, TRANSCODER_C, - TRANSCODER_EDP = 0xF, + TRANSCODER_EDP, + I915_MAX_TRANSCODERS }; #define transcoder_name(t) ((t) + 'A') @@ -77,7 +79,7 @@ enum plane { }; #define plane_name(p) ((p) + 'A') -#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A') +#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') enum port { PORT_A = 0, @@ -112,6 +114,17 @@ enum intel_display_power_domain { POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_TRANSCODER_EDP, + POWER_DOMAIN_PORT_DDI_A_2_LANES, + POWER_DOMAIN_PORT_DDI_A_4_LANES, + POWER_DOMAIN_PORT_DDI_B_2_LANES, + POWER_DOMAIN_PORT_DDI_B_4_LANES, + POWER_DOMAIN_PORT_DDI_C_2_LANES, + POWER_DOMAIN_PORT_DDI_C_4_LANES, + POWER_DOMAIN_PORT_DDI_D_2_LANES, + POWER_DOMAIN_PORT_DDI_D_4_LANES, + POWER_DOMAIN_PORT_DSI, + POWER_DOMAIN_PORT_CRT, + POWER_DOMAIN_PORT_OTHER, POWER_DOMAIN_VGA, POWER_DOMAIN_AUDIO, POWER_DOMAIN_INIT, @@ -119,8 +132,6 @@ enum intel_display_power_domain { POWER_DOMAIN_NUM, }; -#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) - #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) @@ -128,14 +139,6 @@ enum intel_display_power_domain { ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ (tran) + POWER_DOMAIN_TRANSCODER_A) -#define HSW_ALWAYS_ON_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PIPE_A) | \ - BIT(POWER_DOMAIN_TRANSCODER_EDP)) -#define BDW_ALWAYS_ON_POWER_DOMAINS ( \ - BIT(POWER_DOMAIN_PIPE_A) | \ - BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ - BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) - enum hpd_pin { HPD_NONE = 0, HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ @@ -157,11 +160,16 @@ enum hpd_pin { I915_GEM_DOMAIN_VERTEX) #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) +#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ if ((intel_encoder)->base.crtc == (__crtc)) +#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ + list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ + if ((intel_connector)->base.encoder == (__encoder)) + struct drm_i915_private; enum intel_dpll_id { @@ -295,53 +303,87 @@ struct intel_display_error_state; struct drm_i915_error_state { struct kref ref; + struct timeval time; + + char error_msg[128]; + u32 reset_count; + u32 suspend_count; + + /* Generic register state */ u32 eir; u32 pgtbl_er; u32 ier; u32 ccid; u32 derrmr; u32 forcewake; - bool waiting[I915_NUM_RINGS]; - u32 pipestat[I915_MAX_PIPES]; - u32 tail[I915_NUM_RINGS]; - u32 head[I915_NUM_RINGS]; - u32 ctl[I915_NUM_RINGS]; - u32 ipeir[I915_NUM_RINGS]; - u32 ipehr[I915_NUM_RINGS]; - u32 instdone[I915_NUM_RINGS]; - u32 acthd[I915_NUM_RINGS]; - u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; - u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1]; - u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ - /* our own tracking of ring head and tail */ - u32 cpu_ring_head[I915_NUM_RINGS]; - u32 cpu_ring_tail[I915_NUM_RINGS]; u32 error; /* gen6+ */ u32 err_int; /* gen7 */ - u32 bbstate[I915_NUM_RINGS]; - u32 instpm[I915_NUM_RINGS]; - u32 instps[I915_NUM_RINGS]; - u32 extra_instdone[I915_NUM_INSTDONE_REG]; - u32 seqno[I915_NUM_RINGS]; - u64 bbaddr[I915_NUM_RINGS]; - u32 fault_reg[I915_NUM_RINGS]; u32 done_reg; - u32 faddr[I915_NUM_RINGS]; + u32 gac_eco; + u32 gam_ecochk; + u32 gab_ctl; + u32 gfx_mode; + u32 extra_instdone[I915_NUM_INSTDONE_REG]; + u32 pipestat[I915_MAX_PIPES]; u64 fence[I915_MAX_NUM_FENCES]; - struct timeval time; + struct intel_overlay_error_state *overlay; + struct intel_display_error_state *display; + struct drm_i915_error_ring { bool valid; + /* Software tracked state */ + bool waiting; + int hangcheck_score; + enum intel_ring_hangcheck_action hangcheck_action; + int num_requests; + + /* our own tracking of ring head and tail */ + u32 cpu_ring_head; + u32 cpu_ring_tail; + + u32 semaphore_seqno[I915_NUM_RINGS - 1]; + + /* Register state */ + u32 tail; + u32 head; + u32 ctl; + u32 hws; + u32 ipeir; + u32 ipehr; + u32 instdone; + u32 bbstate; + u32 instpm; + u32 instps; + u32 seqno; + u64 bbaddr; + u64 acthd; + u32 fault_reg; + u32 faddr; + u32 rc_psmi; /* sleep state */ + u32 semaphore_mboxes[I915_NUM_RINGS - 1]; + struct drm_i915_error_object { int page_count; u32 gtt_offset; u32 *pages[0]; - } *ringbuffer, *batchbuffer, *ctx; + } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; + struct drm_i915_error_request { long jiffies; u32 seqno; u32 tail; } *requests; - int num_requests; + + struct { + u32 gfx_mode; + union { + u64 pdp[4]; + u32 pp_dir_base; + }; + } vm_info; + + pid_t pid; + char comm[TASK_COMM_LEN]; } ring[I915_NUM_RINGS]; struct drm_i915_error_buffer { u32 size; @@ -358,15 +400,13 @@ struct drm_i915_error_state { s32 ring:4; u32 cache_level:3; } **active_bo, **pinned_bo; + u32 *active_bo_count, *pinned_bo_count; - struct intel_overlay_error_state *overlay; - struct intel_display_error_state *display; - int hangcheck_score[I915_NUM_RINGS]; - enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS]; }; struct intel_connector; struct intel_crtc_config; +struct intel_plane_config; struct intel_crtc; struct intel_limit; struct dpll; @@ -405,6 +445,8 @@ struct drm_i915_display_funcs { * fills out the pipe-config with the hw state. */ bool (*get_pipe_config)(struct intel_crtc *, struct intel_crtc_config *); + void (*get_plane_config)(struct intel_crtc *, + struct intel_plane_config *); int (*crtc_mode_set)(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); @@ -420,8 +462,9 @@ struct drm_i915_display_funcs { struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, uint32_t flags); - int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y); + int (*update_primary_plane)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y); void (*hpd_irq_setup)(struct drm_device *dev); /* clock updates for mode set */ /* cursor updates */ @@ -469,7 +512,7 @@ struct intel_uncore { unsigned fw_rendercount; unsigned fw_mediacount; - struct delayed_work force_wake_work; + struct timer_list force_wake_timer; }; #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ @@ -504,9 +547,16 @@ struct intel_uncore { struct intel_device_info { u32 display_mmio_offset; u8 num_pipes:3; + u8 num_sprites[I915_MAX_PIPES]; u8 gen; u8 ring_mask; /* Rings supported by the HW */ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); + /* Register offsets for the various display pipes and transcoders */ + int pipe_offsets[I915_MAX_TRANSCODERS]; + int trans_offsets[I915_MAX_TRANSCODERS]; + int dpll_offsets[I915_MAX_PIPES]; + int dpll_md_offsets[I915_MAX_PIPES]; + int palette_offsets[I915_MAX_PIPES]; }; #undef DEFINE_FLAG @@ -524,6 +574,57 @@ enum i915_cache_level { typedef uint32_t gen6_gtt_pte_t; +/** + * A VMA represents a GEM BO that is bound into an address space. Therefore, a + * VMA's presence cannot be guaranteed before binding, or after unbinding the + * object into/from the address space. + * + * To make things as simple as possible (ie. no refcounting), a VMA's lifetime + * will always be <= an objects lifetime. So object refcounting should cover us. + */ +struct i915_vma { + struct drm_mm_node node; + struct drm_i915_gem_object *obj; + struct i915_address_space *vm; + + /** This object's place on the active/inactive lists */ + struct list_head mm_list; + + struct list_head vma_link; /* Link in the object's VMA list */ + + /** This vma's place in the batchbuffer or on the eviction list */ + struct list_head exec_list; + + /** + * Used for performing relocations during execbuffer insertion. + */ + struct hlist_node exec_node; + unsigned long exec_handle; + struct drm_i915_gem_exec_object2 *exec_entry; + + /** + * How many users have pinned this object in GTT space. The following + * users can each hold at most one reference: pwrite/pread, pin_ioctl + * (via user_pin_count), execbuffer (objects are not allowed multiple + * times for the same batchbuffer), and the framebuffer code. When + * switching/pageflipping, the framebuffer code has at most two buffers + * pinned per crtc. + * + * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 + * bits with absolutely no headroom. So use 4 bits. */ + unsigned int pin_count:4; +#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf + + /** Unmap an object from an address space. This usually consists of + * setting the valid PTE entries to a reserved scratch page. */ + void (*unbind_vma)(struct i915_vma *vma); + /* Map an object into an address space with the given cache flags. */ +#define GLOBAL_BIND (1<<0) + void (*bind_vma)(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); +}; + struct i915_address_space { struct drm_mm mm; struct drm_device *dev; @@ -564,12 +665,12 @@ struct i915_address_space { enum i915_cache_level level, bool valid); /* Create a valid PTE */ void (*clear_range)(struct i915_address_space *vm, - unsigned int first_entry, - unsigned int num_entries, + uint64_t start, + uint64_t length, bool use_scratch); void (*insert_entries)(struct i915_address_space *vm, struct sg_table *st, - unsigned int first_entry, + uint64_t start, enum i915_cache_level cache_level); void (*cleanup)(struct i915_address_space *vm); }; @@ -603,55 +704,34 @@ struct i915_gtt { }; #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) +#define GEN8_LEGACY_PDPS 4 struct i915_hw_ppgtt { struct i915_address_space base; + struct kref ref; + struct drm_mm_node node; unsigned num_pd_entries; + unsigned num_pd_pages; /* gen8+ */ union { struct page **pt_pages; - struct page *gen8_pt_pages; + struct page **gen8_pt_pages[GEN8_LEGACY_PDPS]; }; struct page *pd_pages; - int num_pd_pages; - int num_pt_pages; union { uint32_t pd_offset; - dma_addr_t pd_dma_addr[4]; + dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS]; }; union { dma_addr_t *pt_dma_addr; dma_addr_t *gen8_pt_dma_addr[4]; }; - int (*enable)(struct drm_device *dev); -}; - -/** - * A VMA represents a GEM BO that is bound into an address space. Therefore, a - * VMA's presence cannot be guaranteed before binding, or after unbinding the - * object into/from the address space. - * - * To make things as simple as possible (ie. no refcounting), a VMA's lifetime - * will always be <= an objects lifetime. So object refcounting should cover us. - */ -struct i915_vma { - struct drm_mm_node node; - struct drm_i915_gem_object *obj; - struct i915_address_space *vm; - - /** This object's place on the active/inactive lists */ - struct list_head mm_list; - struct list_head vma_link; /* Link in the object's VMA list */ - - /** This vma's place in the batchbuffer or on the eviction list */ - struct list_head exec_list; - - /** - * Used for performing relocations during execbuffer insertion. - */ - struct hlist_node exec_node; - unsigned long exec_handle; - struct drm_i915_gem_exec_object2 *exec_entry; + struct i915_hw_context *ctx; + int (*enable)(struct i915_hw_ppgtt *ppgtt); + int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, + struct intel_ring_buffer *ring, + bool synchronous); + void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); }; struct i915_ctx_hang_stats { @@ -676,9 +756,10 @@ struct i915_hw_context { bool is_initialized; uint8_t remap_slice; struct drm_i915_file_private *file_priv; - struct intel_ring_buffer *ring; + struct intel_ring_buffer *last_ring; struct drm_i915_gem_object *obj; struct i915_ctx_hang_stats hang_stats; + struct i915_address_space *vm; struct list_head link; }; @@ -831,11 +912,7 @@ struct i915_suspend_saved_registers { u32 savePFIT_CONTROL; u32 save_palette_a[256]; u32 save_palette_b[256]; - u32 saveDPFC_CB_BASE; - u32 saveFBC_CFB_BASE; - u32 saveFBC_LL_BASE; u32 saveFBC_CONTROL; - u32 saveFBC_CONTROL2; u32 saveIER; u32 saveIIR; u32 saveIMR; @@ -905,15 +982,24 @@ struct intel_gen6_power_mgmt { struct work_struct work; u32 pm_iir; - /* The below variables an all the rps hw state are protected by - * dev->struct mutext. */ - u8 cur_delay; - u8 min_delay; - u8 max_delay; - u8 rpe_delay; - u8 rp1_delay; - u8 rp0_delay; - u8 hw_max; + /* Frequencies are stored in potentially platform dependent multiples. + * In other words, *_freq needs to be multiplied by X to be interesting. + * Soft limits are those which are used for the dynamic reclocking done + * by the driver (raise frequencies under heavy loads, and lower for + * lighter loads). Hard limits are those imposed by the hardware. + * + * A distinction is made for overclocking, which is never enabled by + * default, and is considered to be above the hard limit if it's + * possible at all. + */ + u8 cur_freq; /* Current frequency (cached, may not == HW) */ + u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ + u8 max_freq_softlimit; /* Max frequency permitted by the driver */ + u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ + u8 min_freq; /* AKA RPn. Minimum frequency */ + u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ + u8 rp1_freq; /* "less than" RP0 power/freqency */ + u8 rp0_freq; /* Non-overclocked max frequency. */ int last_adj; enum { LOW_POWER, BETWEEN, HIGH_POWER } power; @@ -953,6 +1039,36 @@ struct intel_ilk_power_mgmt { struct drm_i915_gem_object *renderctx; }; +struct drm_i915_private; +struct i915_power_well; + +struct i915_power_well_ops { + /* + * Synchronize the well's hw state to match the current sw state, for + * example enable/disable it based on the current refcount. Called + * during driver init and resume time, possibly after first calling + * the enable/disable handlers. + */ + void (*sync_hw)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* + * Enable the well and resources that depend on it (for example + * interrupts located on the well). Called after the 0->1 refcount + * transition. + */ + void (*enable)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* + * Disable the well and resources that depend on it. Called after + * the 1->0 refcount transition. + */ + void (*disable)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* Returns the hw enabled state. */ + bool (*is_enabled)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); +}; + /* Power well structure for haswell */ struct i915_power_well { const char *name; @@ -960,11 +1076,8 @@ struct i915_power_well { /* power well enable/disable usage count */ int count; unsigned long domains; - void *data; - void (*set)(struct drm_device *dev, struct i915_power_well *power_well, - bool enable); - bool (*is_enabled)(struct drm_device *dev, - struct i915_power_well *power_well); + unsigned long data; + const struct i915_power_well_ops *ops; }; struct i915_power_domains { @@ -1061,6 +1174,14 @@ struct i915_gem_mm { */ bool interruptible; + /** + * Is the GPU currently considered idle, or busy executing userspace + * requests? Whilst idle, we attempt to power down the hardware and + * display clocks. In order to reduce the effect on performance, there + * is a slight delay before we do so. + */ + bool busy; + /** Bit 6 swizzling required for X tiling */ uint32_t bit_6_swizzle_x; /** Bit 6 swizzling required for Y tiling */ @@ -1226,44 +1347,19 @@ struct ilk_wm_values { }; /* - * This struct tracks the state needed for the Package C8+ feature. - * - * Package states C8 and deeper are really deep PC states that can only be - * reached when all the devices on the system allow it, so even if the graphics - * device allows PC8+, it doesn't mean the system will actually get to these - * states. - * - * Our driver only allows PC8+ when all the outputs are disabled, the power well - * is disabled and the GPU is idle. When these conditions are met, we manually - * do the other conditions: disable the interrupts, clocks and switch LCPLL - * refclk to Fclk. - * - * When we really reach PC8 or deeper states (not just when we allow it) we lose - * the state of some registers, so when we come back from PC8+ we need to - * restore this state. We don't get into PC8+ if we're not in RC6, so we don't - * need to take care of the registers kept by RC6. + * This struct helps tracking the state needed for runtime PM, which puts the + * device in PCI D3 state. Notice that when this happens, nothing on the + * graphics device works, even register access, so we don't get interrupts nor + * anything else. * - * The interrupt disabling is part of the requirements. We can only leave the - * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we - * can lock the machine. + * Every piece of our code that needs to actually touch the hardware needs to + * either call intel_runtime_pm_get or call intel_display_power_get with the + * appropriate power domain. * - * Ideally every piece of our code that needs PC8+ disabled would call - * hsw_disable_package_c8, which would increment disable_count and prevent the - * system from reaching PC8+. But we don't have a symmetric way to do this for - * everything, so we have the requirements_met and gpu_idle variables. When we - * switch requirements_met or gpu_idle to true we decrease disable_count, and - * increase it in the opposite case. The requirements_met variable is true when - * all the CRTCs, encoders and the power well are disabled. The gpu_idle - * variable is true when the GPU is idle. - * - * In addition to everything, we only actually enable PC8+ if disable_count - * stays at zero for at least some seconds. This is implemented with the - * enable_work variable. We do this so we don't enable/disable PC8 dozens of - * consecutive times when all screens are disabled and some background app - * queries the state of our connectors, or we have some application constantly - * waking up to use the GPU. Only after the enable_work function actually - * enables PC8+ the "enable" variable will become true, which means that it can - * be false even if disable_count is 0. + * Our driver uses the autosuspend delay feature, which means we'll only really + * suspend if we stay with zero refcount for a certain amount of time. The + * default value is currently very conservative (see intel_init_runtime_pm), but + * it can be changed with the standard runtime PM files from sysfs. * * The irqs_disabled variable becomes true exactly after we disable the IRQs and * goes back to false exactly before we reenable the IRQs. We use this variable @@ -1273,17 +1369,11 @@ struct ilk_wm_values { * inside struct regsave so when we restore the IRQs they will contain the * latest expected values. * - * For more, read "Display Sequences for Package C8" on our documentation. + * For more, read the Documentation/power/runtime_pm.txt. */ -struct i915_package_c8 { - bool requirements_met; - bool gpu_idle; +struct i915_runtime_pm { + bool suspended; bool irqs_disabled; - /* Only true after the delayed work task actually enables it. */ - bool enabled; - int disable_count; - struct mutex lock; - struct delayed_work enable_work; struct { uint32_t deimr; @@ -1294,10 +1384,6 @@ struct i915_package_c8 { } regsave; }; -struct i915_runtime_pm { - bool suspended; -}; - enum intel_pipe_crc_source { INTEL_PIPE_CRC_SOURCE_NONE, INTEL_PIPE_CRC_SOURCE_PLANE1, @@ -1332,7 +1418,7 @@ typedef struct drm_i915_private { struct drm_device *dev; struct kmem_cache *slab; - const struct intel_device_info *info; + const struct intel_device_info info; int relative_constants_mode; @@ -1361,11 +1447,11 @@ typedef struct drm_i915_private { drm_dma_handle_t *status_page_dmah; struct resource mch_res; - atomic_t irq_received; - /* protects the irq masks */ spinlock_t irq_lock; + bool display_irqs_enabled; + /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ struct pm_qos_request pm_qos; @@ -1379,6 +1465,8 @@ typedef struct drm_i915_private { }; u32 gt_irq_mask; u32 pm_irq_mask; + u32 pm_rps_events; + u32 pipestat_irq_mask[I915_MAX_PIPES]; struct work_struct hotplug_work; bool enable_hotplug_processing; @@ -1394,8 +1482,6 @@ typedef struct drm_i915_private { u32 hpd_event_bits; struct timer_list hotplug_reenable_timer; - int num_plane; - struct i915_fbc fbc; struct intel_opregion opregion; struct intel_vbt_data vbt; @@ -1445,8 +1531,8 @@ typedef struct drm_i915_private { struct sdvo_device_mapping sdvo_mappings[2]; - struct drm_crtc *plane_to_crtc_mapping[3]; - struct drm_crtc *pipe_to_crtc_mapping[3]; + struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; + struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; wait_queue_head_t pending_flip_queue; #ifdef CONFIG_DEBUG_FS @@ -1506,6 +1592,7 @@ typedef struct drm_i915_private { u32 fdi_rx_config; + u32 suspend_count; struct i915_suspend_saved_registers regfile; struct { @@ -1525,8 +1612,6 @@ typedef struct drm_i915_private { struct ilk_wm_values hw; } wm; - struct i915_package_c8 pc8; - struct i915_runtime_pm pm; /* Old dri1 support infrastructure, beware the dragons ya fools entering @@ -1627,18 +1712,6 @@ struct drm_i915_gem_object { */ unsigned int fence_dirty:1; - /** How many users have pinned this object in GTT space. The following - * users can each hold at most one reference: pwrite/pread, pin_ioctl - * (via user_pin_count), execbuffer (objects are not allowed multiple - * times for the same batchbuffer), and the framebuffer code. When - * switching/pageflipping, the framebuffer code has at most two buffers - * pinned per crtc. - * - * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 - * bits with absolutely no headroom. So use 4 bits. */ - unsigned int pin_count:4; -#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf - /** * Is the object at the current location in the gtt mappable and * fenceable? Used to avoid costly recalculations. @@ -1697,7 +1770,6 @@ struct drm_i915_gem_object { /** for phy allocated objects */ struct drm_i915_gem_phys_object *phys_obj; }; -#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) @@ -1743,6 +1815,7 @@ struct drm_i915_gem_request { struct drm_i915_file_private { struct drm_i915_private *dev_priv; + struct drm_file *file; struct { spinlock_t lock; @@ -1751,11 +1824,95 @@ struct drm_i915_file_private { } mm; struct idr context_idr; - struct i915_ctx_hang_stats hang_stats; + struct i915_hw_context *private_default_ctx; atomic_t rps_wait_boost; }; -#define INTEL_INFO(dev) (to_i915(dev)->info) +/* + * A command that requires special handling by the command parser. + */ +struct drm_i915_cmd_descriptor { + /* + * Flags describing how the command parser processes the command. + * + * CMD_DESC_FIXED: The command has a fixed length if this is set, + * a length mask if not set + * CMD_DESC_SKIP: The command is allowed but does not follow the + * standard length encoding for the opcode range in + * which it falls + * CMD_DESC_REJECT: The command is never allowed + * CMD_DESC_REGISTER: The command should be checked against the + * register whitelist for the appropriate ring + * CMD_DESC_MASTER: The command is allowed if the submitting process + * is the DRM master + */ + u32 flags; +#define CMD_DESC_FIXED (1<<0) +#define CMD_DESC_SKIP (1<<1) +#define CMD_DESC_REJECT (1<<2) +#define CMD_DESC_REGISTER (1<<3) +#define CMD_DESC_BITMASK (1<<4) +#define CMD_DESC_MASTER (1<<5) + + /* + * The command's unique identification bits and the bitmask to get them. + * This isn't strictly the opcode field as defined in the spec and may + * also include type, subtype, and/or subop fields. + */ + struct { + u32 value; + u32 mask; + } cmd; + + /* + * The command's length. The command is either fixed length (i.e. does + * not include a length field) or has a length field mask. The flag + * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has + * a length mask. All command entries in a command table must include + * length information. + */ + union { + u32 fixed; + u32 mask; + } length; + + /* + * Describes where to find a register address in the command to check + * against the ring's register whitelist. Only valid if flags has the + * CMD_DESC_REGISTER bit set. + */ + struct { + u32 offset; + u32 mask; + } reg; + +#define MAX_CMD_DESC_BITMASKS 3 + /* + * Describes command checks where a particular dword is masked and + * compared against an expected value. If the command does not match + * the expected value, the parser rejects it. Only valid if flags has + * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero + * are valid. + */ + struct { + u32 offset; + u32 mask; + u32 expected; + } bits[MAX_CMD_DESC_BITMASKS]; +}; + +/* + * A table of commands requiring special handling by the command parser. + * + * Each ring has an array of tables. Each table consists of an array of command + * descriptors, which must be sorted with command opcodes in ascending order. + */ +struct drm_i915_cmd_table { + const struct drm_i915_cmd_descriptor *table; + int count; +}; + +#define INTEL_INFO(dev) (&to_i915(dev)->info) #define IS_I830(dev) ((dev)->pdev->device == 0x3577) #define IS_845G(dev) ((dev)->pdev->device == 0x2562) @@ -1824,7 +1981,11 @@ struct drm_i915_file_private { #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) -#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) +#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) +#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \ + && !IS_BROADWELL(dev)) +#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) +#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) @@ -1887,32 +2048,40 @@ struct drm_i915_file_private { extern const struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; -extern unsigned int i915_fbpercrtc __always_unused; -extern int i915_panel_ignore_lid __read_mostly; -extern unsigned int i915_powersave __read_mostly; -extern int i915_semaphores __read_mostly; -extern unsigned int i915_lvds_downclock __read_mostly; -extern int i915_lvds_channel_mode __read_mostly; -extern int i915_panel_use_ssc __read_mostly; -extern int i915_vbt_sdvo_panel_type __read_mostly; -extern int i915_enable_rc6 __read_mostly; -extern int i915_enable_fbc __read_mostly; -extern bool i915_enable_hangcheck __read_mostly; -extern int i915_enable_ppgtt __read_mostly; -extern int i915_enable_psr __read_mostly; -extern unsigned int i915_preliminary_hw_support __read_mostly; -extern int i915_disable_power_well __read_mostly; -extern int i915_enable_ips __read_mostly; -extern bool i915_fastboot __read_mostly; -extern int i915_enable_pc8 __read_mostly; -extern int i915_pc8_timeout __read_mostly; -extern bool i915_prefault_disable __read_mostly; extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_resume(struct drm_device *dev); extern int i915_master_create(struct drm_device *dev, struct drm_master *master); extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); +/* i915_params.c */ +struct i915_params { + int modeset; + int panel_ignore_lid; + unsigned int powersave; + int semaphores; + unsigned int lvds_downclock; + int lvds_channel_mode; + int panel_use_ssc; + int vbt_sdvo_panel_type; + int enable_rc6; + int enable_fbc; + int enable_ppgtt; + int enable_psr; + unsigned int preliminary_hw_support; + int disable_power_well; + int enable_ips; + int invert_brightness; + int enable_cmd_parser; + /* leave bools at the end to not create holes */ + bool enable_hangcheck; + bool fastboot; + bool prefault_disable; + bool reset; + bool disable_display; +}; +extern struct i915_params i915 __read_mostly; + /* i915_dma.c */ void i915_update_dri1_breadcrumb(struct drm_device *dev); extern void i915_kernel_lost_context(struct drm_device * dev); @@ -1943,8 +2112,12 @@ extern void intel_console_resume(struct work_struct *work); /* i915_irq.c */ void i915_queue_hangcheck(struct drm_device *dev); -void i915_handle_error(struct drm_device *dev, bool wedged); +__printf(3, 4) +void i915_handle_error(struct drm_device *dev, bool wedged, + const char *fmt, ...); +void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir, + int new_delay); extern void intel_irq_init(struct drm_device *dev); extern void intel_hpd_init(struct drm_device *dev); @@ -1955,10 +2128,15 @@ extern void intel_uncore_check_errors(struct drm_device *dev); extern void intel_uncore_fini(struct drm_device *dev); void -i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask); +i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, + u32 status_mask); void -i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask); +i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, + u32 status_mask); + +void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); +void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); /* i915_gem.c */ int i915_gem_init_ioctl(struct drm_device *dev, void *data, @@ -2014,22 +2192,27 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); +void i915_init_vm(struct drm_i915_private *dev_priv, + struct i915_address_space *vm); void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_vma_destroy(struct i915_vma *vma); +#define PIN_MAPPABLE 0x1 +#define PIN_NONBLOCK 0x2 +#define PIN_GLOBAL 0x4 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, uint32_t alignment, - bool map_and_fenceable, - bool nonblocking); -void i915_gem_object_unpin(struct drm_i915_gem_object *obj); + unsigned flags); int __must_check i915_vma_unbind(struct i915_vma *vma); -int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); +int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, + int *needs_clflush); + int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) { @@ -2096,8 +2279,10 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) } } +struct drm_i915_gem_request * +i915_gem_find_active_request(struct intel_ring_buffer *ring); + bool i915_gem_retire_requests(struct drm_device *dev); -void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, bool interruptible); static inline bool i915_reset_in_progress(struct i915_gpu_error *error) @@ -2186,6 +2371,13 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm); struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); +static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { + struct i915_vma *vma; + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (vma->pin_count > 0) + return true; + return false; +} /* Some GGTT VM helpers */ #define obj_to_ggtt(obj) \ @@ -2217,54 +2409,69 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) static inline int __must_check i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, uint32_t alignment, - bool map_and_fenceable, - bool nonblocking) + unsigned flags) { - return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, - map_and_fenceable, nonblocking); + return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL); } +static inline int +i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) +{ + return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); +} + +void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); + /* i915_gem_context.c */ +#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base) int __must_check i915_gem_context_init(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev); +void i915_gem_context_reset(struct drm_device *dev); +int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); +int i915_gem_context_enable(struct drm_i915_private *dev_priv); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); int i915_switch_context(struct intel_ring_buffer *ring, - struct drm_file *file, int to_id); + struct drm_file *file, struct i915_hw_context *to); +struct i915_hw_context * +i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); void i915_gem_context_free(struct kref *ctx_ref); static inline void i915_gem_context_reference(struct i915_hw_context *ctx) { - kref_get(&ctx->ref); + if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev)) + kref_get(&ctx->ref); } static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) { - kref_put(&ctx->ref, i915_gem_context_free); + if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev)) + kref_put(&ctx->ref, i915_gem_context_free); +} + +static inline bool i915_gem_context_is_default(const struct i915_hw_context *c) +{ + return c->id == DEFAULT_CONTEXT_ID; } -struct i915_ctx_hang_stats * __must_check -i915_gem_context_get_hang_stats(struct drm_device *dev, - struct drm_file *file, - u32 id); int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -/* i915_gem_gtt.c */ -void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); -void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level); -void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_object *obj); +/* i915_gem_evict.c */ +int __must_check i915_gem_evict_something(struct drm_device *dev, + struct i915_address_space *vm, + int min_size, + unsigned alignment, + unsigned cache_level, + unsigned flags); +int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); +int i915_gem_evict_everything(struct drm_device *dev); +/* i915_gem_gtt.c */ void i915_check_and_clear_faults(struct drm_device *dev); void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); -void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level); -void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); void i915_gem_init_global_gtt(struct drm_device *dev); void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, @@ -2275,18 +2482,8 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6) intel_gtt_chipset_flush(); } - - -/* i915_gem_evict.c */ -int __must_check i915_gem_evict_something(struct drm_device *dev, - struct i915_address_space *vm, - int min_size, - unsigned alignment, - unsigned cache_level, - bool mappable, - bool nonblock); -int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); -int i915_gem_evict_everything(struct drm_device *dev); +int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); +bool intel_enable_ppgtt(struct drm_device *dev, bool full); /* i915_gem_stolen.c */ int i915_gem_init_stolen(struct drm_device *dev); @@ -2305,7 +2502,7 @@ void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { - drm_i915_private_t *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && obj->tiling_mode != I915_TILING_NONE; @@ -2343,7 +2540,8 @@ static inline void i915_error_state_buf_release( { kfree(eb->buf); } -void i915_capture_error_state(struct drm_device *dev); +void i915_capture_error_state(struct drm_device *dev, bool wedge, + const char *error_msg); void i915_error_state_get(struct drm_device *dev, struct i915_error_state_file_priv *error_priv); void i915_error_state_put(struct i915_error_state_file_priv *error_priv); @@ -2352,6 +2550,14 @@ void i915_destroy_error_state(struct drm_device *dev); void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); const char *i915_cache_level_str(int type); +/* i915_cmd_parser.c */ +void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring); +bool i915_needs_cmd_parser(struct intel_ring_buffer *ring); +int i915_parse_cmds(struct intel_ring_buffer *ring, + struct drm_i915_gem_object *batch_obj, + u32 batch_start_offset, + bool is_master); + /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); @@ -2425,10 +2631,12 @@ extern void intel_modeset_suspend_hw(struct drm_device *dev); extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); +extern void intel_connector_unregister(struct intel_connector *); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern void intel_modeset_setup_hw_state(struct drm_device *dev, bool force_restore); extern void i915_redisable_vga(struct drm_device *dev); +extern void i915_redisable_vga_power_on(struct drm_device *dev); extern bool intel_fbc_enabled(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); @@ -2463,6 +2671,7 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, */ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); +void assert_force_wake_inactive(struct drm_i915_private *dev_priv); int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); @@ -2525,9 +2734,26 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) +/* Be very careful with read/write 64-bit values. On 32-bit machines, they + * will be implemented using 2 32-bit writes in an arbitrary order with + * an arbitrary delay between them. This can cause the hardware to + * act upon the intermediate value, possibly leading to corruption and + * machine death. You have been warned. + */ #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) +#define I915_READ64_2x32(lower_reg, upper_reg) ({ \ + u32 upper = I915_READ(upper_reg); \ + u32 lower = I915_READ(lower_reg); \ + u32 tmp = I915_READ(upper_reg); \ + if (upper != tmp) { \ + upper = tmp; \ + lower = I915_READ(lower_reg); \ + WARN_ON(I915_READ(upper_reg) != upper); \ + } \ + (u64)upper << 32 | lower; }) + #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) @@ -2566,4 +2792,31 @@ timespec_to_jiffies_timeout(const struct timespec *value) return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); } +/* + * If you need to wait X milliseconds between events A and B, but event B + * doesn't happen exactly after event A, you record the timestamp (jiffies) of + * when event A happened, then just before event B you call this function and + * pass the timestamp as the first argument, and X as the second argument. + */ +static inline void +wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) +{ + unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; + + /* + * Don't re-read the value of "jiffies" every time since it may change + * behind our back and break the math. + */ + tmp_jiffies = jiffies; + target_jiffies = timestamp_jiffies + + msecs_to_jiffies_timeout(to_wait_ms); + + if (time_after(target_jiffies, tmp_jiffies)) { + remaining_jiffies = target_jiffies - tmp_jiffies; + while (remaining_jiffies) + remaining_jiffies = + schedule_timeout_uninterruptible(remaining_jiffies); + } +} + #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 00c83615472..6370a761d13 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -43,12 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o static __must_check int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool readonly); -static __must_check int -i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - unsigned alignment, - bool map_and_fenceable, - bool nonblocking); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -67,6 +61,7 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target); static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); +static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); static bool cpu_cache_is_coherent(struct drm_device *dev, enum i915_cache_level level) @@ -204,7 +199,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, pinned = 0; mutex_lock(&dev->struct_mutex); list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) - if (obj->pin_count) + if (i915_gem_obj_is_pinned(obj)) pinned += i915_gem_obj_ggtt_size(obj); mutex_unlock(&dev->struct_mutex); @@ -332,6 +327,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, return 0; } +/* + * Pins the specified object's pages and synchronizes the object with + * GPU accesses. Sets needs_clflush to non-zero if the caller should + * flush the object from the CPU cache. + */ +int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, + int *needs_clflush) +{ + int ret; + + *needs_clflush = 0; + + if (!obj->base.filp) + return -EINVAL; + + if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { + /* If we're not in the cpu read domain, set ourself into the gtt + * read domain and manually flush cachelines (if required). This + * optimizes for the case when the gpu will dirty the data + * anyway again before the next pread happens. */ + *needs_clflush = !cpu_cache_is_coherent(obj->base.dev, + obj->cache_level); + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) + return ret; + } + + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; + + i915_gem_object_pin_pages(obj); + + return ret; +} + /* Per-page copy function for the shmem pread fastpath. * Flushes invalid cachelines before reading the target if * needs_clflush is set. */ @@ -429,23 +460,10 @@ i915_gem_shmem_pread(struct drm_device *dev, obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { - /* If we're not in the cpu read domain, set ourself into the gtt - * read domain and manually flush cachelines (if required). This - * optimizes for the case when the gpu will dirty the data - * anyway again before the next pread happens. */ - needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level); - ret = i915_gem_object_wait_rendering(obj, true); - if (ret) - return ret; - } - - ret = i915_gem_object_get_pages(obj); + ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); if (ret) return ret; - i915_gem_object_pin_pages(obj); - offset = args->offset; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, @@ -476,7 +494,7 @@ i915_gem_shmem_pread(struct drm_device *dev, mutex_unlock(&dev->struct_mutex); - if (likely(!i915_prefault_disable) && !prefaulted) { + if (likely(!i915.prefault_disable) && !prefaulted) { ret = fault_in_multipages_writeable(user_data, remain); /* Userspace is tricking us, but we've already clobbered * its pages with the prefault and promised to write the @@ -492,12 +510,10 @@ i915_gem_shmem_pread(struct drm_device *dev, mutex_lock(&dev->struct_mutex); -next_page: - mark_page_accessed(page); - if (ret) goto out; +next_page: remain -= page_length; user_data += page_length; offset += page_length; @@ -599,13 +615,13 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_i915_gem_pwrite *args, struct drm_file *file) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; ssize_t remain; loff_t offset, page_base; char __user *user_data; int page_offset, page_length, ret; - ret = i915_gem_obj_ggtt_pin(obj, 0, true, true); + ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); if (ret) goto out; @@ -651,7 +667,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, } out_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); out: return ret; } @@ -677,9 +693,8 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, if (needs_clflush_before) drm_clflush_virt_range(vaddr + shmem_page_offset, page_length); - ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, - user_data, - page_length); + ret = __copy_from_user_inatomic(vaddr + shmem_page_offset, + user_data, page_length); if (needs_clflush_after) drm_clflush_virt_range(vaddr + shmem_page_offset, page_length); @@ -813,13 +828,10 @@ i915_gem_shmem_pwrite(struct drm_device *dev, mutex_lock(&dev->struct_mutex); -next_page: - set_page_dirty(page); - mark_page_accessed(page); - if (ret) goto out; +next_page: remain -= page_length; user_data += page_length; offset += page_length; @@ -868,7 +880,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, args->size)) return -EFAULT; - if (likely(!i915_prefault_disable)) { + if (likely(!i915.prefault_disable)) { ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), args->size); if (ret) @@ -1014,7 +1026,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, struct timespec *timeout, struct drm_i915_file_private *file_priv) { - drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; const bool irq_test_in_progress = ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); struct timespec before, now; @@ -1022,14 +1035,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, unsigned long timeout_expire; int ret; - WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); + WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n"); if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) return 0; timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; - if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) { + if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) { gen6_rps_boost(dev_priv); if (file_priv) mod_delayed_work(dev_priv->wq, @@ -1184,7 +1197,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, */ static __must_check int i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, - struct drm_file *file, + struct drm_i915_file_private *file_priv, bool readonly) { struct drm_device *dev = obj->base.dev; @@ -1211,7 +1224,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); mutex_unlock(&dev->struct_mutex); - ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv); + ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv); mutex_lock(&dev->struct_mutex); if (ret) return ret; @@ -1260,7 +1273,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, * We will repeat the flush holding the lock in the normal manner * to catch cases where we are gazumped. */ - ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain); + ret = i915_gem_object_wait_rendering__nonblocking(obj, + file->driver_priv, + !write_domain); if (ret) goto unref; @@ -1374,7 +1389,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); struct drm_device *dev = obj->base.dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; pgoff_t page_offset; unsigned long pfn; int ret = 0; @@ -1392,6 +1407,15 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) trace_i915_gem_object_fault(obj, page_offset, true, write); + /* Try to flush the object off the GPU first without holding the lock. + * Upon reacquiring the lock, we will perform our sanity checks and then + * repeat the flush holding the lock in the normal manner to catch cases + * where we are gazumped. + */ + ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write); + if (ret) + goto unlock; + /* Access to snoopable pages through the GTT is incoherent. */ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { ret = -EINVAL; @@ -1399,7 +1423,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } /* Now bind it into the GTT if needed */ - ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); if (ret) goto unlock; @@ -1420,7 +1444,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Finally, remap it using the new GTT offset */ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); unpin: - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); unlock: mutex_unlock(&dev->struct_mutex); out: @@ -1453,6 +1477,7 @@ out: ret = VM_FAULT_OOM; break; case -ENOSPC: + case -EFAULT: ret = VM_FAULT_SIGBUS; break; default: @@ -1501,7 +1526,8 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) if (!obj->fault_mappable) return; - drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping); + drm_vma_node_unmap(&obj->base.vma_node, + obj->base.dev->anon_inode->i_mapping); obj->fault_mappable = false; } @@ -1617,8 +1643,8 @@ i915_gem_mmap_gtt(struct drm_file *file, } if (obj->madv != I915_MADV_WILLNEED) { - DRM_ERROR("Attempting to mmap a purgeable buffer\n"); - ret = -EINVAL; + DRM_DEBUG("Attempting to mmap a purgeable buffer\n"); + ret = -EFAULT; goto out; } @@ -1971,8 +1997,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) return 0; if (obj->madv != I915_MADV_WILLNEED) { - DRM_ERROR("Attempting to obtain a purgeable object\n"); - return -EINVAL; + DRM_DEBUG("Attempting to obtain a purgeable object\n"); + return -EFAULT; } BUG_ON(obj->pages_pin_count); @@ -2035,13 +2061,17 @@ static void i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; - struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); + struct i915_address_space *vm; + struct i915_vma *vma; BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); BUG_ON(!obj->active); - list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list); + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + vma = i915_gem_obj_to_vma(obj, vm); + if (vma && !list_empty(&vma->mm_list)) + list_move_tail(&vma->mm_list, &vm->inactive_list); + } list_del_init(&obj->ring_list); obj->ring = NULL; @@ -2134,10 +2164,9 @@ int __i915_add_request(struct intel_ring_buffer *ring, struct drm_i915_gem_object *obj, u32 *out_seqno) { - drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_gem_request *request; u32 request_ring_position, request_start; - int was_empty; int ret; request_start = intel_ring_get_tail(ring); @@ -2188,7 +2217,6 @@ int __i915_add_request(struct intel_ring_buffer *ring, i915_gem_context_reference(request->ctx); request->emitted_jiffies = jiffies; - was_empty = list_empty(&ring->request_list); list_add_tail(&request->list, &ring->request_list); request->file_priv = NULL; @@ -2209,13 +2237,11 @@ int __i915_add_request(struct intel_ring_buffer *ring, if (!dev_priv->ums.mm_suspended) { i915_queue_hangcheck(ring->dev); - if (was_empty) { - cancel_delayed_work_sync(&dev_priv->mm.idle_work); - queue_delayed_work(dev_priv->wq, - &dev_priv->mm.retire_work, - round_jiffies_up_relative(HZ)); - intel_mark_busy(dev_priv->dev); - } + cancel_delayed_work_sync(&dev_priv->mm.idle_work); + queue_delayed_work(dev_priv->wq, + &dev_priv->mm.retire_work, + round_jiffies_up_relative(HZ)); + intel_mark_busy(dev_priv->dev); } if (out_seqno) @@ -2237,125 +2263,46 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) spin_unlock(&file_priv->mm.lock); } -static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj, - struct i915_address_space *vm) +static bool i915_context_is_banned(struct drm_i915_private *dev_priv, + const struct i915_hw_context *ctx) { - if (acthd >= i915_gem_obj_offset(obj, vm) && - acthd < i915_gem_obj_offset(obj, vm) + obj->base.size) - return true; + unsigned long elapsed; - return false; -} + elapsed = get_seconds() - ctx->hang_stats.guilty_ts; -static bool i915_head_inside_request(const u32 acthd_unmasked, - const u32 request_start, - const u32 request_end) -{ - const u32 acthd = acthd_unmasked & HEAD_ADDR; + if (ctx->hang_stats.banned) + return true; - if (request_start < request_end) { - if (acthd >= request_start && acthd < request_end) - return true; - } else if (request_start > request_end) { - if (acthd >= request_start || acthd < request_end) + if (elapsed <= DRM_I915_CTX_BAN_PERIOD) { + if (!i915_gem_context_is_default(ctx)) { + DRM_DEBUG("context hanging too fast, banning!\n"); return true; - } - - return false; -} - -static struct i915_address_space * -request_to_vm(struct drm_i915_gem_request *request) -{ - struct drm_i915_private *dev_priv = request->ring->dev->dev_private; - struct i915_address_space *vm; - - vm = &dev_priv->gtt.base; - - return vm; -} - -static bool i915_request_guilty(struct drm_i915_gem_request *request, - const u32 acthd, bool *inside) -{ - /* There is a possibility that unmasked head address - * pointing inside the ring, matches the batch_obj address range. - * However this is extremely unlikely. - */ - if (request->batch_obj) { - if (i915_head_inside_object(acthd, request->batch_obj, - request_to_vm(request))) { - *inside = true; + } else if (dev_priv->gpu_error.stop_rings == 0) { + DRM_ERROR("gpu hanging too fast, banning!\n"); return true; } } - if (i915_head_inside_request(acthd, request->head, request->tail)) { - *inside = false; - return true; - } - return false; } -static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs) +static void i915_set_reset_status(struct drm_i915_private *dev_priv, + struct i915_hw_context *ctx, + const bool guilty) { - const unsigned long elapsed = get_seconds() - hs->guilty_ts; - - if (hs->banned) - return true; - - if (elapsed <= DRM_I915_CTX_BAN_PERIOD) { - DRM_ERROR("context hanging too fast, declaring banned!\n"); - return true; - } - - return false; -} + struct i915_ctx_hang_stats *hs; -static void i915_set_reset_status(struct intel_ring_buffer *ring, - struct drm_i915_gem_request *request, - u32 acthd) -{ - struct i915_ctx_hang_stats *hs = NULL; - bool inside, guilty; - unsigned long offset = 0; - - /* Innocent until proven guilty */ - guilty = false; - - if (request->batch_obj) - offset = i915_gem_obj_offset(request->batch_obj, - request_to_vm(request)); + if (WARN_ON(!ctx)) + return; - if (ring->hangcheck.action != HANGCHECK_WAIT && - i915_request_guilty(request, acthd, &inside)) { - DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n", - ring->name, - inside ? "inside" : "flushing", - offset, - request->ctx ? request->ctx->id : 0, - acthd); + hs = &ctx->hang_stats; - guilty = true; - } - - /* If contexts are disabled or this is the default context, use - * file_priv->reset_state - */ - if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID) - hs = &request->ctx->hang_stats; - else if (request->file_priv) - hs = &request->file_priv->hang_stats; - - if (hs) { - if (guilty) { - hs->banned = i915_context_is_banned(hs); - hs->batch_active++; - hs->guilty_ts = get_seconds(); - } else { - hs->batch_pending++; - } + if (guilty) { + hs->banned = i915_context_is_banned(dev_priv, ctx); + hs->batch_active++; + hs->guilty_ts = get_seconds(); + } else { + hs->batch_pending++; } } @@ -2370,19 +2317,41 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) kfree(request); } -static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, - struct intel_ring_buffer *ring) +struct drm_i915_gem_request * +i915_gem_find_active_request(struct intel_ring_buffer *ring) { - u32 completed_seqno = ring->get_seqno(ring, false); - u32 acthd = intel_ring_get_active_head(ring); struct drm_i915_gem_request *request; + u32 completed_seqno; + + completed_seqno = ring->get_seqno(ring, false); list_for_each_entry(request, &ring->request_list, list) { if (i915_seqno_passed(completed_seqno, request->seqno)) continue; - i915_set_reset_status(ring, request, acthd); + return request; } + + return NULL; +} + +static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, + struct intel_ring_buffer *ring) +{ + struct drm_i915_gem_request *request; + bool ring_hung; + + request = i915_gem_find_active_request(ring); + + if (request == NULL) + return; + + ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; + + i915_set_reset_status(dev_priv, request->ctx, ring_hung); + + list_for_each_entry_continue(request, &ring->request_list, list) + i915_set_reset_status(dev_priv, request->ctx, false); } static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, @@ -2456,13 +2425,15 @@ void i915_gem_reset(struct drm_device *dev) i915_gem_cleanup_ringbuffer(dev); + i915_gem_context_reset(dev); + i915_gem_restore_fences(dev); } /** * This function clears the request list as sequence numbers are passed. */ -void +static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) { uint32_t seqno; @@ -2474,6 +2445,24 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) seqno = ring->get_seqno(ring, true); + /* Move any buffers on the active list that are no longer referenced + * by the ringbuffer to the flushing/inactive lists as appropriate, + * before we free the context associated with the requests. + */ + while (!list_empty(&ring->active_list)) { + struct drm_i915_gem_object *obj; + + obj = list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); + + if (!i915_seqno_passed(seqno, obj->last_read_seqno)) + break; + + i915_gem_object_move_to_inactive(obj); + } + + while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; @@ -2495,22 +2484,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) i915_gem_free_request(request); } - /* Move any buffers on the active list that are no longer referenced - * by the ringbuffer to the flushing/inactive lists as appropriate. - */ - while (!list_empty(&ring->active_list)) { - struct drm_i915_gem_object *obj; - - obj = list_first_entry(&ring->active_list, - struct drm_i915_gem_object, - ring_list); - - if (!i915_seqno_passed(seqno, obj->last_read_seqno)) - break; - - i915_gem_object_move_to_inactive(obj); - } - if (unlikely(ring->trace_irq_seqno && i915_seqno_passed(seqno, ring->trace_irq_seqno))) { ring->irq_put(ring); @@ -2523,7 +2496,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) bool i915_gem_retire_requests(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; bool idle = true; int i; @@ -2615,7 +2588,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_wait *args = data; struct drm_i915_gem_object *obj; struct intel_ring_buffer *ring = NULL; @@ -2750,22 +2723,18 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) int i915_vma_unbind(struct i915_vma *vma) { struct drm_i915_gem_object *obj = vma->obj; - drm_i915_private_t *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; int ret; - /* For now we only ever use 1 vma per object */ - WARN_ON(!list_is_singular(&obj->vma_list)); - if (list_empty(&vma->vma_link)) return 0; if (!drm_mm_node_allocated(&vma->node)) { i915_gem_vma_destroy(vma); - return 0; } - if (obj->pin_count) + if (vma->pin_count) return -EBUSY; BUG_ON(obj->pages == NULL); @@ -2787,15 +2756,11 @@ int i915_vma_unbind(struct i915_vma *vma) trace_i915_vma_unbind(vma); - if (obj->has_global_gtt_mapping) - i915_gem_gtt_unbind_object(obj); - if (obj->has_aliasing_ppgtt_mapping) { - i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); - obj->has_aliasing_ppgtt_mapping = 0; - } + vma->unbind_vma(vma); + i915_gem_gtt_finish_object(obj); - list_del(&vma->mm_list); + list_del_init(&vma->mm_list); /* Avoid an unnecessary call to unbind on rebind. */ if (i915_is_ggtt(vma->vm)) obj->map_and_fenceable = true; @@ -2817,35 +2782,15 @@ int i915_vma_unbind(struct i915_vma *vma) return 0; } -/** - * Unbinds an object from the global GTT aperture. - */ -int -i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - struct i915_address_space *ggtt = &dev_priv->gtt.base; - - if (!i915_gem_obj_ggtt_bound(obj)) - return 0; - - if (obj->pin_count) - return -EBUSY; - - BUG_ON(obj->pages == NULL); - - return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt)); -} - int i915_gpu_idle(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; int ret, i; /* Flush everything onto the inactive list. */ for_each_ring(ring, dev_priv, i) { - ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); + ret = i915_switch_context(ring, NULL, ring->default_context); if (ret) return ret; @@ -2860,7 +2805,7 @@ int i915_gpu_idle(struct drm_device *dev) static void i965_write_fence_reg(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int fence_reg; int fence_pitch_shift; @@ -2912,7 +2857,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, static void i915_write_fence_reg(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 val; if (obj) { @@ -2956,7 +2901,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, static void i830_write_fence_reg(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; uint32_t val; if (obj) { @@ -3259,18 +3204,17 @@ static void i915_gem_verify_gtt(struct drm_device *dev) /** * Finds free space in the GTT aperture and binds the object there. */ -static int +static struct i915_vma * i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_address_space *vm, unsigned alignment, - bool map_and_fenceable, - bool nonblocking) + unsigned flags) { struct drm_device *dev = obj->base.dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 size, fence_size, fence_alignment, unfenced_alignment; size_t gtt_max = - map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total; + flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; struct i915_vma *vma; int ret; @@ -3282,57 +3226,49 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, obj->tiling_mode, true); unfenced_alignment = i915_gem_get_gtt_alignment(dev, - obj->base.size, - obj->tiling_mode, false); + obj->base.size, + obj->tiling_mode, false); if (alignment == 0) - alignment = map_and_fenceable ? fence_alignment : + alignment = flags & PIN_MAPPABLE ? fence_alignment : unfenced_alignment; - if (map_and_fenceable && alignment & (fence_alignment - 1)) { - DRM_ERROR("Invalid object alignment requested %u\n", alignment); - return -EINVAL; + if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) { + DRM_DEBUG("Invalid object alignment requested %u\n", alignment); + return ERR_PTR(-EINVAL); } - size = map_and_fenceable ? fence_size : obj->base.size; + size = flags & PIN_MAPPABLE ? fence_size : obj->base.size; /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ if (obj->base.size > gtt_max) { - DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", + DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", obj->base.size, - map_and_fenceable ? "mappable" : "total", + flags & PIN_MAPPABLE ? "mappable" : "total", gtt_max); - return -E2BIG; + return ERR_PTR(-E2BIG); } ret = i915_gem_object_get_pages(obj); if (ret) - return ret; + return ERR_PTR(ret); i915_gem_object_pin_pages(obj); - BUG_ON(!i915_is_ggtt(vm)); - vma = i915_gem_obj_lookup_or_create_vma(obj, vm); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); + if (IS_ERR(vma)) goto err_unpin; - } - - /* For now we only ever use 1 vma per object */ - WARN_ON(!list_is_singular(&obj->vma_list)); search_free: ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, size, alignment, obj->cache_level, 0, gtt_max, - DRM_MM_SEARCH_DEFAULT); + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); if (ret) { ret = i915_gem_evict_something(dev, vm, size, alignment, - obj->cache_level, - map_and_fenceable, - nonblocking); + obj->cache_level, flags); if (ret == 0) goto search_free; @@ -3363,19 +3299,23 @@ search_free: obj->map_and_fenceable = mappable && fenceable; } - WARN_ON(map_and_fenceable && !obj->map_and_fenceable); + WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); + + trace_i915_vma_bind(vma, flags); + vma->bind_vma(vma, obj->cache_level, + flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0); - trace_i915_vma_bind(vma, map_and_fenceable); i915_gem_verify_gtt(dev); - return 0; + return vma; err_remove_node: drm_mm_remove_node(&vma->node); err_free_vma: i915_gem_vma_destroy(vma); + vma = ERR_PTR(ret); err_unpin: i915_gem_object_unpin_pages(obj); - return ret; + return vma; } bool @@ -3470,7 +3410,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) { - drm_i915_private_t *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; uint32_t old_write_domain, old_read_domains; int ret; @@ -3528,25 +3468,22 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { struct drm_device *dev = obj->base.dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct i915_vma *vma; + struct i915_vma *vma, *next; int ret; if (obj->cache_level == cache_level) return 0; - if (obj->pin_count) { + if (i915_gem_obj_is_pinned(obj)) { DRM_DEBUG("can not change the cache level of pinned objects\n"); return -EBUSY; } - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { ret = i915_vma_unbind(vma); if (ret) return ret; - - break; } } @@ -3567,11 +3504,10 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return ret; } - if (obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(obj, cache_level); - if (obj->has_aliasing_ppgtt_mapping) - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, - obj, cache_level); + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (drm_mm_node_allocated(&vma->node)) + vma->bind_vma(vma, cache_level, + obj->has_global_gtt_mapping ? GLOBAL_BIND : 0); } list_for_each_entry(vma, &obj->vma_list, vma_link) @@ -3695,7 +3631,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj) * subtracting the potential reference by the user, any pin_count * remains, it must be due to another use by the display engine. */ - return obj->pin_count - !!obj->user_pin_count; + return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count; } /* @@ -3740,7 +3676,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, * (e.g. libkms for the bootup splash), we have to ensure that we * always use map_and_fenceable for all scanout buffers. */ - ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false); + ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE); if (ret) goto err_unpin_display; @@ -3769,7 +3705,7 @@ err_unpin_display: void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) { - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); obj->pin_display = is_pin_display(obj); } @@ -3896,65 +3832,63 @@ int i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, uint32_t alignment, - bool map_and_fenceable, - bool nonblocking) + unsigned flags) { struct i915_vma *vma; int ret; - if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) - return -EBUSY; - - WARN_ON(map_and_fenceable && !i915_is_ggtt(vm)); + if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) + return -EINVAL; vma = i915_gem_obj_to_vma(obj, vm); - if (vma) { + if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) + return -EBUSY; + if ((alignment && vma->node.start & (alignment - 1)) || - (map_and_fenceable && !obj->map_and_fenceable)) { - WARN(obj->pin_count, + (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) { + WARN(vma->pin_count, "bo is already pinned with incorrect alignment:" " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", i915_gem_obj_offset(obj, vm), alignment, - map_and_fenceable, + flags & PIN_MAPPABLE, obj->map_and_fenceable); ret = i915_vma_unbind(vma); if (ret) return ret; + + vma = NULL; } } - if (!i915_gem_obj_bound(obj, vm)) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - - ret = i915_gem_object_bind_to_vm(obj, vm, alignment, - map_and_fenceable, - nonblocking); - if (ret) - return ret; - - if (!dev_priv->mm.aliasing_ppgtt) - i915_gem_gtt_bind_object(obj, obj->cache_level); + if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { + vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags); + if (IS_ERR(vma)) + return PTR_ERR(vma); } - if (!obj->has_global_gtt_mapping && map_and_fenceable) - i915_gem_gtt_bind_object(obj, obj->cache_level); + if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping) + vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); - obj->pin_count++; - obj->pin_mappable |= map_and_fenceable; + vma->pin_count++; + if (flags & PIN_MAPPABLE) + obj->pin_mappable |= true; return 0; } void -i915_gem_object_unpin(struct drm_i915_gem_object *obj) +i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) { - BUG_ON(obj->pin_count == 0); - BUG_ON(!i915_gem_obj_bound_any(obj)); + struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); - if (--obj->pin_count == 0) + BUG_ON(!vma); + BUG_ON(vma->pin_count == 0); + BUG_ON(!i915_gem_obj_ggtt_bound(obj)); + + if (--vma->pin_count == 0) obj->pin_mappable = false; } @@ -3966,6 +3900,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj; int ret; + if (INTEL_INFO(dev)->gen >= 6) + return -ENODEV; + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -3977,13 +3914,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, } if (obj->madv != I915_MADV_WILLNEED) { - DRM_ERROR("Attempting to pin a purgeable buffer\n"); - ret = -EINVAL; + DRM_DEBUG("Attempting to pin a purgeable buffer\n"); + ret = -EFAULT; goto out; } if (obj->pin_filp != NULL && obj->pin_filp != file) { - DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", + DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); ret = -EINVAL; goto out; @@ -3995,7 +3932,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, } if (obj->user_pin_count == 0) { - ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); + ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE); if (ret) goto out; } @@ -4030,7 +3967,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, } if (obj->pin_filp != file) { - DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", + DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", args->handle); ret = -EINVAL; goto out; @@ -4038,7 +3975,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, obj->user_pin_count--; if (obj->user_pin_count == 0) { obj->pin_filp = NULL; - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); } out: @@ -4118,7 +4055,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, goto unlock; } - if (obj->pin_count) { + if (i915_gem_obj_is_pinned(obj)) { ret = -EINVAL; goto out; } @@ -4219,7 +4156,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); struct drm_device *dev = obj->base.dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct i915_vma *vma, *next; intel_runtime_pm_get(dev_priv); @@ -4229,12 +4166,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) if (obj->phys_obj) i915_gem_detach_phys_object(dev, obj); - obj->pin_count = 0; - /* NB: 0 or 1 elements */ - WARN_ON(!list_empty(&obj->vma_list) && - !list_is_singular(&obj->vma_list)); list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { - int ret = i915_vma_unbind(vma); + int ret; + + vma->pin_count = 0; + ret = i915_vma_unbind(vma); if (WARN_ON(ret == -ERESTARTSYS)) { bool was_interruptible; @@ -4283,41 +4219,6 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, return NULL; } -static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) -{ - struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); - if (vma == NULL) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&vma->vma_link); - INIT_LIST_HEAD(&vma->mm_list); - INIT_LIST_HEAD(&vma->exec_list); - vma->vm = vm; - vma->obj = obj; - - /* Keep GGTT vmas first to make debug easier */ - if (i915_is_ggtt(vm)) - list_add(&vma->vma_link, &obj->vma_list); - else - list_add_tail(&vma->vma_link, &obj->vma_list); - - return vma; -} - -struct i915_vma * -i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) -{ - struct i915_vma *vma; - - vma = i915_gem_obj_to_vma(obj, vm); - if (!vma) - vma = __i915_gem_vma_create(obj, vm); - - return vma; -} - void i915_gem_vma_destroy(struct i915_vma *vma) { WARN_ON(vma->node.allocated); @@ -4334,7 +4235,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma) int i915_gem_suspend(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; mutex_lock(&dev->struct_mutex); @@ -4376,7 +4277,7 @@ err: int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; int i, ret; @@ -4406,7 +4307,7 @@ int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice) void i915_gem_init_swizzling(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen < 5 || dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) @@ -4494,7 +4395,7 @@ cleanup_render_ring: int i915_gem_init_hw(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret, i; if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) @@ -4508,9 +4409,15 @@ i915_gem_init_hw(struct drm_device *dev) LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); if (HAS_PCH_NOP(dev)) { - u32 temp = I915_READ(GEN7_MSG_CTL); - temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); - I915_WRITE(GEN7_MSG_CTL, temp); + if (IS_IVYBRIDGE(dev)) { + u32 temp = I915_READ(GEN7_MSG_CTL); + temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); + I915_WRITE(GEN7_MSG_CTL, temp); + } else if (INTEL_INFO(dev)->gen >= 7) { + u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); + temp &= ~RESET_PCH_HANDSHAKE_ENABLE; + I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); + } } i915_gem_init_swizzling(dev); @@ -4523,25 +4430,23 @@ i915_gem_init_hw(struct drm_device *dev) i915_gem_l3_remap(&dev_priv->ring[RCS], i); /* - * XXX: There was some w/a described somewhere suggesting loading - * contexts before PPGTT. + * XXX: Contexts should only be initialized once. Doing a switch to the + * default context switch however is something we'd like to do after + * reset or thaw (the latter may not actually be necessary for HW, but + * goes with our code better). Context switching requires rings (for + * the do_switch), but before enabling PPGTT. So don't move this. */ - ret = i915_gem_context_init(dev); + ret = i915_gem_context_enable(dev_priv); if (ret) { - i915_gem_cleanup_ringbuffer(dev); - DRM_ERROR("Context initialization failed %d\n", ret); - return ret; - } - - if (dev_priv->mm.aliasing_ppgtt) { - ret = dev_priv->mm.aliasing_ppgtt->enable(dev); - if (ret) { - i915_gem_cleanup_aliasing_ppgtt(dev); - DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n"); - } + DRM_ERROR("Context enable failed %d\n", ret); + goto err_out; } return 0; + +err_out: + i915_gem_cleanup_ringbuffer(dev); + return ret; } int i915_gem_init(struct drm_device *dev) @@ -4560,10 +4465,18 @@ int i915_gem_init(struct drm_device *dev) i915_gem_init_global_gtt(dev); + ret = i915_gem_context_init(dev); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ret; + } + ret = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); if (ret) { - i915_gem_cleanup_aliasing_ppgtt(dev); + WARN_ON(dev_priv->mm.aliasing_ppgtt); + i915_gem_context_fini(dev); + drm_mm_takedown(&dev_priv->gtt.base.mm); return ret; } @@ -4576,7 +4489,7 @@ int i915_gem_init(struct drm_device *dev) void i915_gem_cleanup_ringbuffer(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; int i; @@ -4658,20 +4571,22 @@ init_ring_lists(struct intel_ring_buffer *ring) INIT_LIST_HEAD(&ring->request_list); } -static void i915_init_vm(struct drm_i915_private *dev_priv, - struct i915_address_space *vm) +void i915_init_vm(struct drm_i915_private *dev_priv, + struct i915_address_space *vm) { + if (!i915_is_ggtt(vm)) + drm_mm_init(&vm->mm, vm->start, vm->total); vm->dev = dev_priv->dev; INIT_LIST_HEAD(&vm->active_list); INIT_LIST_HEAD(&vm->inactive_list); INIT_LIST_HEAD(&vm->global_link); - list_add(&vm->global_link, &dev_priv->vm_list); + list_add_tail(&vm->global_link, &dev_priv->vm_list); } void i915_gem_load(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int i; dev_priv->slab = @@ -4738,7 +4653,7 @@ i915_gem_load(struct drm_device *dev) static int i915_gem_init_phys_object(struct drm_device *dev, int id, int size, int align) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_phys_object *phys_obj; int ret; @@ -4770,7 +4685,7 @@ kfree_obj: static void i915_gem_free_phys_object(struct drm_device *dev, int id) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_phys_object *phys_obj; if (!dev_priv->mm.phys_objs[id - 1]) @@ -4837,7 +4752,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, int align) { struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; int page_count; int i; @@ -4950,6 +4865,7 @@ i915_gem_file_idle_work_handler(struct work_struct *work) int i915_gem_open(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv; + int ret; DRM_DEBUG_DRIVER("\n"); @@ -4959,15 +4875,18 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) file->driver_priv = file_priv; file_priv->dev_priv = dev->dev_private; + file_priv->file = file; spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); INIT_DELAYED_WORK(&file_priv->mm.idle_work, i915_gem_file_idle_work_handler); - idr_init(&file_priv->context_idr); + ret = i915_gem_context_open(dev, file); + if (ret) + kfree(file_priv); - return 0; + return ret; } static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) @@ -5014,7 +4933,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc) if (obj->active) continue; - if (obj->pin_count == 0 && obj->pages_pin_count == 0) + if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0) count += obj->base.size >> PAGE_SHIFT; } @@ -5031,7 +4950,8 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; - if (vm == &dev_priv->mm.aliasing_ppgtt->base) + if (!dev_priv->mm.aliasing_ppgtt || + vm == &dev_priv->mm.aliasing_ppgtt->base) vm = &dev_priv->gtt.base; BUG_ON(list_empty(&o->vma_list)); @@ -5072,7 +4992,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; - if (vm == &dev_priv->mm.aliasing_ppgtt->base) + if (!dev_priv->mm.aliasing_ppgtt || + vm == &dev_priv->mm.aliasing_ppgtt->base) vm = &dev_priv->gtt.base; BUG_ON(list_empty(&o->vma_list)); @@ -5127,7 +5048,7 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) return NULL; vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link); - if (WARN_ON(vma->vm != obj_to_ggtt(obj))) + if (vma->vm != obj_to_ggtt(obj)) return NULL; return vma; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index e08acaba540..6043062ffce 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -93,11 +93,63 @@ * I've seen in a spec to date, and that was a workaround for a non-shipping * part. It should be safe to decrease this, but it's more future proof as is. */ -#define CONTEXT_ALIGN (64<<10) +#define GEN6_CONTEXT_ALIGN (64<<10) +#define GEN7_CONTEXT_ALIGN 4096 -static struct i915_hw_context * -i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); -static int do_switch(struct i915_hw_context *to); +static int do_switch(struct intel_ring_buffer *ring, + struct i915_hw_context *to); + +static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) +{ + struct drm_device *dev = ppgtt->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_address_space *vm = &ppgtt->base; + + if (ppgtt == dev_priv->mm.aliasing_ppgtt || + (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) { + ppgtt->base.cleanup(&ppgtt->base); + return; + } + + /* + * Make sure vmas are unbound before we take down the drm_mm + * + * FIXME: Proper refcounting should take care of this, this shouldn't be + * needed at all. + */ + if (!list_empty(&vm->active_list)) { + struct i915_vma *vma; + + list_for_each_entry(vma, &vm->active_list, mm_list) + if (WARN_ON(list_empty(&vma->vma_link) || + list_is_singular(&vma->vma_link))) + break; + + i915_gem_evict_vm(&ppgtt->base, true); + } else { + i915_gem_retire_requests(dev); + i915_gem_evict_vm(&ppgtt->base, false); + } + + ppgtt->base.cleanup(&ppgtt->base); +} + +static void ppgtt_release(struct kref *kref) +{ + struct i915_hw_ppgtt *ppgtt = + container_of(kref, struct i915_hw_ppgtt, ref); + + do_ppgtt_cleanup(ppgtt); + kfree(ppgtt); +} + +static size_t get_context_alignment(struct drm_device *dev) +{ + if (IS_GEN6(dev)) + return GEN6_CONTEXT_ALIGN; + + return GEN7_CONTEXT_ALIGN; +} static int get_context_size(struct drm_device *dev) { @@ -131,14 +183,44 @@ void i915_gem_context_free(struct kref *ctx_ref) { struct i915_hw_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); + struct i915_hw_ppgtt *ppgtt = NULL; - list_del(&ctx->link); + /* We refcount even the aliasing PPGTT to keep the code symmetric */ + if (USES_PPGTT(ctx->obj->base.dev)) + ppgtt = ctx_to_ppgtt(ctx); + + /* XXX: Free up the object before tearing down the address space, in + * case we're bound in the PPGTT */ drm_gem_object_unreference(&ctx->obj->base); + + if (ppgtt) + kref_put(&ppgtt->ref, ppgtt_release); + list_del(&ctx->link); kfree(ctx); } +static struct i915_hw_ppgtt * +create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx) +{ + struct i915_hw_ppgtt *ppgtt; + int ret; + + ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); + if (!ppgtt) + return ERR_PTR(-ENOMEM); + + ret = i915_gem_init_ppgtt(dev, ppgtt); + if (ret) { + kfree(ppgtt); + return ERR_PTR(ret); + } + + ppgtt->ctx = ctx; + return ppgtt; +} + static struct i915_hw_context * -create_hw_context(struct drm_device *dev, +__create_hw_context(struct drm_device *dev, struct drm_i915_file_private *file_priv) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -166,18 +248,13 @@ create_hw_context(struct drm_device *dev, goto err_out; } - /* The ring associated with the context object is handled by the normal - * object tracking code. We give an initial ring value simple to pass an - * assertion in the context switch code. - */ - ctx->ring = &dev_priv->ring[RCS]; list_add_tail(&ctx->link, &dev_priv->context_list); /* Default context will never have a file_priv */ if (file_priv == NULL) return ctx; - ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0, + ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0, GFP_KERNEL); if (ret < 0) goto err_out; @@ -196,67 +273,136 @@ err_out: return ERR_PTR(ret); } -static inline bool is_default_context(struct i915_hw_context *ctx) -{ - return (ctx == ctx->ring->default_context); -} - /** * The default context needs to exist per ring that uses contexts. It stores the * context state of the GPU for applications that don't utilize HW contexts, as * well as an idle case. */ -static int create_default_context(struct drm_i915_private *dev_priv) +static struct i915_hw_context * +i915_gem_create_context(struct drm_device *dev, + struct drm_i915_file_private *file_priv, + bool create_vm) { + const bool is_global_default_ctx = file_priv == NULL; + struct drm_i915_private *dev_priv = dev->dev_private; struct i915_hw_context *ctx; - int ret; + int ret = 0; - BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); - ctx = create_hw_context(dev_priv->dev, NULL); + ctx = __create_hw_context(dev, file_priv); if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - /* We may need to do things with the shrinker which require us to - * immediately switch back to the default context. This can cause a - * problem as pinning the default context also requires GTT space which - * may not be available. To avoid this we always pin the - * default context. - */ - ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false); - if (ret) { - DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); - goto err_destroy; - } + return ctx; - ret = do_switch(ctx); - if (ret) { - DRM_DEBUG_DRIVER("Switch failed %d\n", ret); - goto err_unpin; + if (is_global_default_ctx) { + /* We may need to do things with the shrinker which + * require us to immediately switch back to the default + * context. This can cause a problem as pinning the + * default context also requires GTT space which may not + * be available. To avoid this we always pin the default + * context. + */ + ret = i915_gem_obj_ggtt_pin(ctx->obj, + get_context_alignment(dev), 0); + if (ret) { + DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); + goto err_destroy; + } } - dev_priv->ring[RCS].default_context = ctx; + if (create_vm) { + struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx); + + if (IS_ERR_OR_NULL(ppgtt)) { + DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", + PTR_ERR(ppgtt)); + ret = PTR_ERR(ppgtt); + goto err_unpin; + } else + ctx->vm = &ppgtt->base; + + /* This case is reserved for the global default context and + * should only happen once. */ + if (is_global_default_ctx) { + if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) { + ret = -EEXIST; + goto err_unpin; + } + + dev_priv->mm.aliasing_ppgtt = ppgtt; + } + } else if (USES_PPGTT(dev)) { + /* For platforms which only have aliasing PPGTT, we fake the + * address space and refcounting. */ + ctx->vm = &dev_priv->mm.aliasing_ppgtt->base; + kref_get(&dev_priv->mm.aliasing_ppgtt->ref); + } else + ctx->vm = &dev_priv->gtt.base; - DRM_DEBUG_DRIVER("Default HW context loaded\n"); - return 0; + return ctx; err_unpin: - i915_gem_object_unpin(ctx->obj); + if (is_global_default_ctx) + i915_gem_object_ggtt_unpin(ctx->obj); err_destroy: i915_gem_context_unreference(ctx); - return ret; + return ERR_PTR(ret); +} + +void i915_gem_context_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int i; + + if (!HAS_HW_CONTEXTS(dev)) + return; + + /* Prevent the hardware from restoring the last context (which hung) on + * the next switch */ + for (i = 0; i < I915_NUM_RINGS; i++) { + struct i915_hw_context *dctx; + if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) + continue; + + /* Do a fake switch to the default context */ + ring = &dev_priv->ring[i]; + dctx = ring->default_context; + if (WARN_ON(!dctx)) + continue; + + if (!ring->last_context) + continue; + + if (ring->last_context == dctx) + continue; + + if (i == RCS) { + WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, + get_context_alignment(dev), 0)); + /* Fake a finish/inactive */ + dctx->obj->base.write_domain = 0; + dctx->obj->active = 0; + } + + i915_gem_context_unreference(ring->last_context); + i915_gem_context_reference(dctx); + ring->last_context = dctx; + } } int i915_gem_context_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int ret; + struct intel_ring_buffer *ring; + int i; if (!HAS_HW_CONTEXTS(dev)) return 0; - /* If called from reset, or thaw... we've been here already */ - if (dev_priv->ring[RCS].default_context) + /* Init should only be called once per module load. Eventually the + * restriction on the context_disabled check can be loosened. */ + if (WARN_ON(dev_priv->ring[RCS].default_context)) return 0; dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); @@ -266,11 +412,23 @@ int i915_gem_context_init(struct drm_device *dev) return -E2BIG; } - ret = create_default_context(dev_priv); - if (ret) { - DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n", - ret); - return ret; + dev_priv->ring[RCS].default_context = + i915_gem_create_context(dev, NULL, USES_PPGTT(dev)); + + if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) { + DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n", + PTR_ERR(dev_priv->ring[RCS].default_context)); + return PTR_ERR(dev_priv->ring[RCS].default_context); + } + + for (i = RCS + 1; i < I915_NUM_RINGS; i++) { + if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) + continue; + + ring = &dev_priv->ring[i]; + + /* NB: RCS will hold a ref for all rings */ + ring->default_context = dev_priv->ring[RCS].default_context; } DRM_DEBUG_DRIVER("HW context support initialized\n"); @@ -281,6 +439,7 @@ void i915_gem_context_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; + int i; if (!HAS_HW_CONTEXTS(dev)) return; @@ -300,59 +459,129 @@ void i915_gem_context_fini(struct drm_device *dev) if (dev_priv->ring[RCS].last_context == dctx) { /* Fake switch to NULL context */ WARN_ON(dctx->obj->active); - i915_gem_object_unpin(dctx->obj); + i915_gem_object_ggtt_unpin(dctx->obj); i915_gem_context_unreference(dctx); + dev_priv->ring[RCS].last_context = NULL; } - i915_gem_object_unpin(dctx->obj); + for (i = 0; i < I915_NUM_RINGS; i++) { + struct intel_ring_buffer *ring = &dev_priv->ring[i]; + if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) + continue; + + if (ring->last_context) + i915_gem_context_unreference(ring->last_context); + + ring->default_context = NULL; + ring->last_context = NULL; + } + + i915_gem_object_ggtt_unpin(dctx->obj); i915_gem_context_unreference(dctx); - dev_priv->ring[RCS].default_context = NULL; - dev_priv->ring[RCS].last_context = NULL; + dev_priv->mm.aliasing_ppgtt = NULL; +} + +int i915_gem_context_enable(struct drm_i915_private *dev_priv) +{ + struct intel_ring_buffer *ring; + int ret, i; + + if (!HAS_HW_CONTEXTS(dev_priv->dev)) + return 0; + + /* This is the only place the aliasing PPGTT gets enabled, which means + * it has to happen before we bail on reset */ + if (dev_priv->mm.aliasing_ppgtt) { + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + ppgtt->enable(ppgtt); + } + + /* FIXME: We should make this work, even in reset */ + if (i915_reset_in_progress(&dev_priv->gpu_error)) + return 0; + + BUG_ON(!dev_priv->ring[RCS].default_context); + + for_each_ring(ring, dev_priv, i) { + ret = do_switch(ring, ring->default_context); + if (ret) + return ret; + } + + return 0; } static int context_idr_cleanup(int id, void *p, void *data) { struct i915_hw_context *ctx = p; - BUG_ON(id == DEFAULT_CONTEXT_ID); + /* Ignore the default context because close will handle it */ + if (i915_gem_context_is_default(ctx)) + return 0; i915_gem_context_unreference(ctx); return 0; } -struct i915_ctx_hang_stats * -i915_gem_context_get_hang_stats(struct drm_device *dev, - struct drm_file *file, - u32 id) +int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_hw_context *ctx; + struct drm_i915_private *dev_priv = dev->dev_private; - if (id == DEFAULT_CONTEXT_ID) - return &file_priv->hang_stats; + if (!HAS_HW_CONTEXTS(dev)) { + /* Cheat for hang stats */ + file_priv->private_default_ctx = + kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL); - if (!HAS_HW_CONTEXTS(dev)) - return ERR_PTR(-ENOENT); + if (file_priv->private_default_ctx == NULL) + return -ENOMEM; - ctx = i915_gem_context_get(file->driver_priv, id); - if (ctx == NULL) - return ERR_PTR(-ENOENT); + file_priv->private_default_ctx->vm = &dev_priv->gtt.base; + return 0; + } + + idr_init(&file_priv->context_idr); - return &ctx->hang_stats; + mutex_lock(&dev->struct_mutex); + file_priv->private_default_ctx = + i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev)); + mutex_unlock(&dev->struct_mutex); + + if (IS_ERR(file_priv->private_default_ctx)) { + idr_destroy(&file_priv->context_idr); + return PTR_ERR(file_priv->private_default_ctx); + } + + return 0; } void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; + if (!HAS_HW_CONTEXTS(dev)) { + kfree(file_priv->private_default_ctx); + return; + } + idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); + i915_gem_context_unreference(file_priv->private_default_ctx); idr_destroy(&file_priv->context_idr); } -static struct i915_hw_context * +struct i915_hw_context * i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) { - return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); + struct i915_hw_context *ctx; + + if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev)) + return file_priv->private_default_ctx; + + ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); + if (!ctx) + return ERR_PTR(-ENOENT); + + return ctx; } static inline int @@ -390,7 +619,10 @@ mi_set_context(struct intel_ring_buffer *ring, MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | hw_flags); - /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */ + /* + * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP + * WaMiSetContext_Hang:snb,ivb,vlv + */ intel_ring_emit(ring, MI_NOOP); if (IS_GEN7(ring->dev)) @@ -403,21 +635,30 @@ mi_set_context(struct intel_ring_buffer *ring, return ret; } -static int do_switch(struct i915_hw_context *to) +static int do_switch(struct intel_ring_buffer *ring, + struct i915_hw_context *to) { - struct intel_ring_buffer *ring = to->ring; + struct drm_i915_private *dev_priv = ring->dev->dev_private; struct i915_hw_context *from = ring->last_context; + struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to); u32 hw_flags = 0; int ret, i; - BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0); + if (from != NULL && ring == &dev_priv->ring[RCS]) { + BUG_ON(from->obj == NULL); + BUG_ON(!i915_gem_obj_is_pinned(from->obj)); + } - if (from == to && !to->remap_slice) + if (from == to && from->last_ring == ring && !to->remap_slice) return 0; - ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false); - if (ret) - return ret; + /* Trying to pin first makes error handling easier. */ + if (ring == &dev_priv->ring[RCS]) { + ret = i915_gem_obj_ggtt_pin(to->obj, + get_context_alignment(ring->dev), 0); + if (ret) + return ret; + } /* * Pin can switch back to the default context if we end up calling into @@ -426,6 +667,18 @@ static int do_switch(struct i915_hw_context *to) */ from = ring->last_context; + if (USES_FULL_PPGTT(ring->dev)) { + ret = ppgtt->switch_mm(ppgtt, ring, false); + if (ret) + goto unpin_out; + } + + if (ring != &dev_priv->ring[RCS]) { + if (from) + i915_gem_context_unreference(from); + goto done; + } + /* * Clear this page out of any CPU caches for coherent swap-in/out. Note * that thanks to write = false in this call and us not setting any gpu @@ -435,22 +688,21 @@ static int do_switch(struct i915_hw_context *to) * XXX: We need a real interface to do this instead of trickery. */ ret = i915_gem_object_set_to_gtt_domain(to->obj, false); - if (ret) { - i915_gem_object_unpin(to->obj); - return ret; - } + if (ret) + goto unpin_out; - if (!to->obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(to->obj, to->obj->cache_level); + if (!to->obj->has_global_gtt_mapping) { + struct i915_vma *vma = i915_gem_obj_to_vma(to->obj, + &dev_priv->gtt.base); + vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND); + } - if (!to->is_initialized || is_default_context(to)) + if (!to->is_initialized || i915_gem_context_is_default(to)) hw_flags |= MI_RESTORE_INHIBIT; ret = mi_set_context(ring, to, hw_flags); - if (ret) { - i915_gem_object_unpin(to->obj); - return ret; - } + if (ret) + goto unpin_out; for (i = 0; i < MAX_L3_SLICES; i++) { if (!(to->remap_slice & (1<<i))) @@ -484,22 +736,30 @@ static int do_switch(struct i915_hw_context *to) BUG_ON(from->obj->ring != ring); /* obj is kept alive until the next request by its active ref */ - i915_gem_object_unpin(from->obj); + i915_gem_object_ggtt_unpin(from->obj); i915_gem_context_unreference(from); } + to->is_initialized = true; + +done: i915_gem_context_reference(to); ring->last_context = to; - to->is_initialized = true; + to->last_ring = ring; return 0; + +unpin_out: + if (ring->id == RCS) + i915_gem_object_ggtt_unpin(to->obj); + return ret; } /** * i915_switch_context() - perform a GPU context switch. * @ring: ring for which we'll execute the context switch * @file_priv: file_priv associated with the context, may be NULL - * @id: context id number + * @to: the context to switch to * * The context life cycle is simple. The context refcount is incremented and * decremented by 1 and create and destroy. If the context is in use by the GPU, @@ -508,31 +768,21 @@ static int do_switch(struct i915_hw_context *to) */ int i915_switch_context(struct intel_ring_buffer *ring, struct drm_file *file, - int to_id) + struct i915_hw_context *to) { struct drm_i915_private *dev_priv = ring->dev->dev_private; - struct i915_hw_context *to; - - if (!HAS_HW_CONTEXTS(ring->dev)) - return 0; WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); - if (ring != &dev_priv->ring[RCS]) - return 0; - - if (to_id == DEFAULT_CONTEXT_ID) { - to = ring->default_context; - } else { - if (file == NULL) - return -EINVAL; + BUG_ON(file && to == NULL); - to = i915_gem_context_get(file->driver_priv, to_id); - if (to == NULL) - return -ENOENT; + /* We have the fake context */ + if (!HAS_HW_CONTEXTS(ring->dev)) { + ring->last_context = to; + return 0; } - return do_switch(to); + return do_switch(ring, to); } int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, @@ -543,9 +793,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct i915_hw_context *ctx; int ret; - if (!(dev->driver->driver_features & DRIVER_GEM)) - return -ENODEV; - if (!HAS_HW_CONTEXTS(dev)) return -ENODEV; @@ -553,7 +800,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - ctx = create_hw_context(dev, file_priv); + ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev)); mutex_unlock(&dev->struct_mutex); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -572,17 +819,17 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct i915_hw_context *ctx; int ret; - if (!(dev->driver->driver_features & DRIVER_GEM)) - return -ENODEV; + if (args->ctx_id == DEFAULT_CONTEXT_ID) + return -ENOENT; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; ctx = i915_gem_context_get(file_priv, args->ctx_id); - if (!ctx) { + if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); - return -ENOENT; + return PTR_ERR(ctx); } idr_remove(&ctx->file_priv->context_idr, ctx->id); diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 775d506b320..f462d1b51d9 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -34,7 +34,7 @@ int i915_verify_lists(struct drm_device *dev) { static int warned; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; int err = 0; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 2ca280f9ee5..75fca63dc8c 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -36,7 +36,7 @@ static bool mark_free(struct i915_vma *vma, struct list_head *unwind) { - if (vma->obj->pin_count) + if (vma->pin_count) return false; if (WARN_ON(!list_empty(&vma->exec_list))) @@ -46,18 +46,37 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) return drm_mm_scan_add_block(&vma->node); } +/** + * i915_gem_evict_something - Evict vmas to make room for binding a new one + * @dev: drm_device + * @vm: address space to evict from + * @size: size of the desired free space + * @alignment: alignment constraint of the desired free space + * @cache_level: cache_level for the desired space + * @mappable: whether the free space must be mappable + * @nonblocking: whether evicting active objects is allowed or not + * + * This function will try to evict vmas until a free space satisfying the + * requirements is found. Callers must check first whether any such hole exists + * already before calling this function. + * + * This function is used by the object/vma binding code. + * + * To clarify: This is for freeing up virtual address space, not for freeing + * memory in e.g. the shrinker. + */ int i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, int min_size, unsigned alignment, unsigned cache_level, - bool mappable, bool nonblocking) + unsigned flags) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; struct i915_vma *vma; int ret = 0; int pass = 0; - trace_i915_gem_evict(dev, min_size, alignment, mappable); + trace_i915_gem_evict(dev, min_size, alignment, flags); /* * The goal is to evict objects and amalgamate space in LRU order. @@ -83,7 +102,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, */ INIT_LIST_HEAD(&unwind_list); - if (mappable) { + if (flags & PIN_MAPPABLE) { BUG_ON(!i915_is_ggtt(vm)); drm_mm_init_scan_with_range(&vm->mm, min_size, alignment, cache_level, 0, @@ -98,7 +117,7 @@ search_again: goto found; } - if (nonblocking) + if (flags & PIN_NONBLOCK) goto none; /* Now merge in the soon-to-be-expired objects... */ @@ -122,7 +141,7 @@ none: /* Can we unpin some objects such as idle hw contents, * or pending flips? */ - if (nonblocking) + if (flags & PIN_NONBLOCK) return -ENOSPC; /* Only idle the GPU and repeat the search once */ @@ -177,19 +196,19 @@ found: } /** - * i915_gem_evict_vm - Try to free up VM space + * i915_gem_evict_vm - Evict all idle vmas from a vm * - * @vm: Address space to evict from + * @vm: Address space to cleanse * @do_idle: Boolean directing whether to idle first. * - * VM eviction is about freeing up virtual address space. If one wants fine - * grained eviction, they should see evict something for more details. In terms - * of freeing up actual system memory, this function may not accomplish the - * desired result. An object may be shared in multiple address space, and this - * function will not assert those objects be freed. + * This function evicts all idles vmas from a vm. If all unpinned vmas should be + * evicted the @do_idle needs to be set to true. * - * Using do_idle will result in a more complete eviction because it retires, and - * inactivates current BOs. + * This is used by the execbuf code as a last-ditch effort to defragment the + * address space. + * + * To clarify: This is for freeing up virtual address space, not for freeing + * memory in e.g. the shrinker. */ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) { @@ -207,16 +226,24 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) } list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) - if (vma->obj->pin_count == 0) + if (vma->pin_count == 0) WARN_ON(i915_vma_unbind(vma)); return 0; } +/** + * i915_gem_evict_everything - Try to evict all objects + * @dev: Device to evict objects for + * + * This functions tries to evict all gem objects from all address spaces. Used + * by the shrinker as a last-ditch effort and for suspend, before releasing the + * backing storage of all unbound objects. + */ int i915_gem_evict_everything(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct i915_address_space *vm; bool lists_empty = true; int ret; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d269ecf46e2..7447160155a 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -91,6 +91,7 @@ eb_lookup_vmas(struct eb_vmas *eb, struct i915_address_space *vm, struct drm_file *file) { + struct drm_i915_private *dev_priv = vm->dev->dev_private; struct drm_i915_gem_object *obj; struct list_head objects; int i, ret; @@ -125,6 +126,20 @@ eb_lookup_vmas(struct eb_vmas *eb, i = 0; while (!list_empty(&objects)) { struct i915_vma *vma; + struct i915_address_space *bind_vm = vm; + + if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT && + USES_FULL_PPGTT(vm->dev)) { + ret = -EINVAL; + goto err; + } + + /* If we have secure dispatch, or the userspace assures us that + * they know what they're doing, use the GGTT VM. + */ + if (((args->flags & I915_EXEC_SECURE) && + (i == (args->buffer_count - 1)))) + bind_vm = &dev_priv->gtt.base; obj = list_first_entry(&objects, struct drm_i915_gem_object, @@ -138,7 +153,7 @@ eb_lookup_vmas(struct eb_vmas *eb, * from the (obj, vm) we don't run the risk of creating * duplicated vmas for the same vm. */ - vma = i915_gem_obj_lookup_or_create_vma(obj, vm); + vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm); if (IS_ERR(vma)) { DRM_DEBUG("Failed to lookup VMA\n"); ret = PTR_ERR(vma); @@ -217,7 +232,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) i915_gem_object_unpin_fence(obj); if (entry->flags & __EXEC_OBJECT_HAS_PIN) - i915_gem_object_unpin(obj); + vma->pin_count--; entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); } @@ -327,8 +342,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_vmas *eb, - struct drm_i915_gem_relocation_entry *reloc, - struct i915_address_space *vm) + struct drm_i915_gem_relocation_entry *reloc) { struct drm_device *dev = obj->base.dev; struct drm_gem_object *target_obj; @@ -352,8 +366,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, if (unlikely(IS_GEN6(dev) && reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && !target_i915_obj->has_global_gtt_mapping)) { - i915_gem_gtt_bind_object(target_i915_obj, - target_i915_obj->cache_level); + struct i915_vma *vma = + list_first_entry(&target_i915_obj->vma_list, + typeof(*vma), vma_link); + vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND); } /* Validate that the target is in a valid r/w GPU domain */ @@ -451,8 +467,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, do { u64 offset = r->presumed_offset; - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, - vma->vm); + ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r); if (ret) return ret; @@ -481,8 +496,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, int i, ret; for (i = 0; i < entry->relocation_count; i++) { - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], - vma->vm); + ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]); if (ret) return ret; } @@ -527,21 +541,26 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, struct intel_ring_buffer *ring, bool *need_reloc) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; - bool need_fence, need_mappable; - struct drm_i915_gem_object *obj = vma->obj; + bool need_fence; + unsigned flags; int ret; + flags = 0; + need_fence = has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(vma); + if (need_fence || need_reloc_mappable(vma)) + flags |= PIN_MAPPABLE; - ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable, - false); + if (entry->flags & EXEC_OBJECT_NEEDS_GTT) + flags |= PIN_GLOBAL; + + ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); if (ret) return ret; @@ -560,14 +579,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, } } - /* Ensure ppgtt mapping exists if needed */ - if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, - obj, obj->cache_level); - - obj->has_aliasing_ppgtt_mapping = 1; - } - if (entry->offset != vma->node.start) { entry->offset = vma->node.start; *need_reloc = true; @@ -578,10 +589,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; } - if (entry->flags & EXEC_OBJECT_NEEDS_GTT && - !obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(obj, obj->cache_level); - return 0; } @@ -891,7 +898,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, if (!access_ok(VERIFY_WRITE, ptr, length)) return -EFAULT; - if (likely(!i915_prefault_disable)) { + if (likely(!i915.prefault_disable)) { if (fault_in_multipages_readable(ptr, length)) return -EFAULT; } @@ -900,22 +907,27 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, return 0; } -static int +static struct i915_hw_context * i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, - const u32 ctx_id) + struct intel_ring_buffer *ring, const u32 ctx_id) { + struct i915_hw_context *ctx = NULL; struct i915_ctx_hang_stats *hs; - hs = i915_gem_context_get_hang_stats(dev, file, ctx_id); - if (IS_ERR(hs)) - return PTR_ERR(hs); + if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID) + return ERR_PTR(-EINVAL); + + ctx = i915_gem_context_get(file->driver_priv, ctx_id); + if (IS_ERR(ctx)) + return ctx; + hs = &ctx->hang_stats; if (hs->banned) { DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id); - return -EIO; + return ERR_PTR(-EIO); } - return 0; + return ctx; } static void @@ -939,7 +951,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, if (obj->base.write_domain) { obj->dirty = 1; obj->last_write_seqno = intel_ring_get_seqno(ring); - if (obj->pin_count) /* check for potential scanout */ + /* check for potential scanout */ + if (i915_gem_obj_ggtt_bound(obj) && + i915_gem_obj_to_ggtt(obj)->pin_count) intel_mark_fb_busy(obj, ring); } @@ -964,7 +978,7 @@ static int i915_reset_gen7_sol_offsets(struct drm_device *dev, struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret, i; if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) @@ -989,16 +1003,17 @@ static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, - struct drm_i915_gem_exec_object2 *exec, - struct i915_address_space *vm) + struct drm_i915_gem_exec_object2 *exec) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; struct drm_clip_rect *cliprects = NULL; struct intel_ring_buffer *ring; + struct i915_hw_context *ctx; + struct i915_address_space *vm; const u32 ctx_id = i915_execbuffer2_get_context_id(*args); - u32 exec_start, exec_len; + u32 exec_start = args->batch_start_offset, exec_len; u32 mask, flags; int ret, mode, i; bool need_relocs; @@ -1020,41 +1035,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (args->flags & I915_EXEC_IS_PINNED) flags |= I915_DISPATCH_PINNED; - switch (args->flags & I915_EXEC_RING_MASK) { - case I915_EXEC_DEFAULT: - case I915_EXEC_RENDER: - ring = &dev_priv->ring[RCS]; - break; - case I915_EXEC_BSD: - ring = &dev_priv->ring[VCS]; - if (ctx_id != DEFAULT_CONTEXT_ID) { - DRM_DEBUG("Ring %s doesn't support contexts\n", - ring->name); - return -EPERM; - } - break; - case I915_EXEC_BLT: - ring = &dev_priv->ring[BCS]; - if (ctx_id != DEFAULT_CONTEXT_ID) { - DRM_DEBUG("Ring %s doesn't support contexts\n", - ring->name); - return -EPERM; - } - break; - case I915_EXEC_VEBOX: - ring = &dev_priv->ring[VECS]; - if (ctx_id != DEFAULT_CONTEXT_ID) { - DRM_DEBUG("Ring %s doesn't support contexts\n", - ring->name); - return -EPERM; - } - break; - - default: + if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) { DRM_DEBUG("execbuf with unknown ring: %d\n", (int)(args->flags & I915_EXEC_RING_MASK)); return -EINVAL; } + + if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT) + ring = &dev_priv->ring[RCS]; + else + ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1]; + if (!intel_ring_initialized(ring)) { DRM_DEBUG("execbuf with invalid ring: %d\n", (int)(args->flags & I915_EXEC_RING_MASK)); @@ -1136,11 +1127,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto pre_mutex_err; } - ret = i915_gem_validate_context(dev, file, ctx_id); - if (ret) { + ctx = i915_gem_validate_context(dev, file, ring, ctx_id); + if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); + ret = PTR_ERR(ctx); goto pre_mutex_err; - } + } + + i915_gem_context_reference(ctx); + + vm = ctx->vm; + if (!USES_FULL_PPGTT(dev)) + vm = &dev_priv->gtt.base; eb = eb_create(args); if (eb == NULL) { @@ -1184,17 +1182,46 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; + if (i915_needs_cmd_parser(ring)) { + ret = i915_parse_cmds(ring, + batch_obj, + args->batch_start_offset, + file->is_master); + if (ret) + goto err; + + /* + * XXX: Actually do this when enabling batch copy... + * + * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit + * from MI_BATCH_BUFFER_START commands issued in the + * dispatch_execbuffer implementations. We specifically don't + * want that set when the command parser is enabled. + */ + } + /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. * hsw should have this fixed, but bdw mucks it up again. */ - if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); + if (flags & I915_DISPATCH_SECURE && + !batch_obj->has_global_gtt_mapping) { + /* When we have multiple VMs, we'll need to make sure that we + * allocate space first */ + struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj); + BUG_ON(!vma); + vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND); + } + + if (flags & I915_DISPATCH_SECURE) + exec_start += i915_gem_obj_ggtt_offset(batch_obj); + else + exec_start += i915_gem_obj_offset(batch_obj, vm); ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); if (ret) goto err; - ret = i915_switch_context(ring, file, ctx_id); + ret = i915_switch_context(ring, file, ctx); if (ret) goto err; @@ -1219,8 +1246,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - exec_start = i915_gem_obj_offset(batch_obj, vm) + - args->batch_start_offset; + exec_len = args->batch_len; if (cliprects) { for (i = 0; i < args->num_cliprects; i++) { @@ -1249,6 +1275,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); err: + /* the request owns the ref now */ + i915_gem_context_unreference(ctx); eb_destroy(eb); mutex_unlock(&dev->struct_mutex); @@ -1270,7 +1298,6 @@ int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_execbuffer2 exec2; struct drm_i915_gem_exec_object *exec_list = NULL; @@ -1326,8 +1353,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec2.flags = I915_EXEC_RENDER; i915_execbuffer2_set_context_id(exec2, 0); - ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list, - &dev_priv->gtt.base); + ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ for (i = 0; i < args->buffer_count; i++) @@ -1353,7 +1379,6 @@ int i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_exec_object2 *exec2_list = NULL; int ret; @@ -1384,8 +1409,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -EFAULT; } - ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list, - &dev_priv->gtt.base); + ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ ret = copy_to_user(to_user_ptr(args->buffers_ptr), diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d278be11080..ab5e93c30aa 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1,5 +1,6 @@ /* * Copyright © 2010 Daniel Vetter + * Copyright © 2011-2014 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -22,12 +23,38 @@ * */ +#include <linux/seq_file.h> #include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_trace.h" #include "intel_drv.h" +static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv); + +bool intel_enable_ppgtt(struct drm_device *dev, bool full) +{ + if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) + return false; + + if (i915.enable_ppgtt == 1 && full) + return false; + +#ifdef CONFIG_INTEL_IOMMU + /* Disable ppgtt on SNB if VT-d is on. */ + if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { + DRM_INFO("Disabling PPGTT because VT-d is on\n"); + return false; + } +#endif + + /* Full ppgtt disabled by default for now due to issues. */ + if (full) + return false; /* HAS_PPGTT(dev) */ + else + return HAS_ALIASING_PPGTT(dev); +} + #define GEN6_PPGTT_PD_ENTRIES 512 #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) typedef uint64_t gen8_gtt_pte_t; @@ -63,13 +90,31 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; #define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) #define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) -#define GEN8_LEGACY_PDPS 4 + +/* GEN8 legacy style addressis defined as a 3 level page table: + * 31:30 | 29:21 | 20:12 | 11:0 + * PDPE | PDE | PTE | offset + * The difference as compared to normal x86 3 level page table is the PDPEs are + * programmed via register. + */ +#define GEN8_PDPE_SHIFT 30 +#define GEN8_PDPE_MASK 0x3 +#define GEN8_PDE_SHIFT 21 +#define GEN8_PDE_MASK 0x1ff +#define GEN8_PTE_SHIFT 12 +#define GEN8_PTE_MASK 0x1ff #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ #define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ +static void ppgtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); +static void ppgtt_unbind_vma(struct i915_vma *vma); +static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt); + static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, enum i915_cache_level level, bool valid) @@ -199,12 +244,19 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, /* Broadwell Page Directory Pointer Descriptors */ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, - uint64_t val) + uint64_t val, bool synchronous) { + struct drm_i915_private *dev_priv = ring->dev->dev_private; int ret; BUG_ON(entry >= 4); + if (synchronous) { + I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32); + I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val); + return 0; + } + ret = intel_ring_begin(ring, 6); if (ret) return ret; @@ -220,216 +272,357 @@ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, return 0; } -static int gen8_ppgtt_enable(struct drm_device *dev) +static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, + struct intel_ring_buffer *ring, + bool synchronous) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - int i, j, ret; + int i, ret; /* bit of a hack to find the actual last used pd */ int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; - for_each_ring(ring, dev_priv, j) { - I915_WRITE(RING_MODE_GEN7(ring), - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - } - for (i = used_pd - 1; i >= 0; i--) { dma_addr_t addr = ppgtt->pd_dma_addr[i]; - for_each_ring(ring, dev_priv, j) { - ret = gen8_write_pdp(ring, i, addr); - if (ret) - goto err_out; - } + ret = gen8_write_pdp(ring, i, addr, synchronous); + if (ret) + return ret; } - return 0; -err_out: - for_each_ring(ring, dev_priv, j) - I915_WRITE(RING_MODE_GEN7(ring), - _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE)); - return ret; + return 0; } static void gen8_ppgtt_clear_range(struct i915_address_space *vm, - unsigned first_entry, - unsigned num_entries, + uint64_t start, + uint64_t length, bool use_scratch) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); gen8_gtt_pte_t *pt_vaddr, scratch_pte; - unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE; - unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE; + unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; + unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; + unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; + unsigned num_entries = length >> PAGE_SHIFT; unsigned last_pte, i; scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr, I915_CACHE_LLC, use_scratch); while (num_entries) { - struct page *page_table = &ppgtt->gen8_pt_pages[act_pt]; + struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde]; - last_pte = first_pte + num_entries; + last_pte = pte + num_entries; if (last_pte > GEN8_PTES_PER_PAGE) last_pte = GEN8_PTES_PER_PAGE; pt_vaddr = kmap_atomic(page_table); - for (i = first_pte; i < last_pte; i++) + for (i = pte; i < last_pte; i++) { pt_vaddr[i] = scratch_pte; + num_entries--; + } kunmap_atomic(pt_vaddr); - num_entries -= last_pte - first_pte; - first_pte = 0; - act_pt++; + pte = 0; + if (++pde == GEN8_PDES_PER_PAGE) { + pdpe++; + pde = 0; + } } } static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, struct sg_table *pages, - unsigned first_entry, + uint64_t start, enum i915_cache_level cache_level) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); gen8_gtt_pte_t *pt_vaddr; - unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE; - unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE; + unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; + unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; + unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; struct sg_page_iter sg_iter; pt_vaddr = NULL; + for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { + if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS)) + break; + if (pt_vaddr == NULL) - pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]); + pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]); - pt_vaddr[act_pte] = + pt_vaddr[pte] = gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), cache_level, true); - if (++act_pte == GEN8_PTES_PER_PAGE) { + if (++pte == GEN8_PTES_PER_PAGE) { kunmap_atomic(pt_vaddr); pt_vaddr = NULL; - act_pt++; - act_pte = 0; + if (++pde == GEN8_PDES_PER_PAGE) { + pdpe++; + pde = 0; + } + pte = 0; } } if (pt_vaddr) kunmap_atomic(pt_vaddr); } +static void gen8_free_page_tables(struct page **pt_pages) +{ + int i; + + if (pt_pages == NULL) + return; + + for (i = 0; i < GEN8_PDES_PER_PAGE; i++) + if (pt_pages[i]) + __free_pages(pt_pages[i], 0); +} + +static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt) +{ + int i; + + for (i = 0; i < ppgtt->num_pd_pages; i++) { + gen8_free_page_tables(ppgtt->gen8_pt_pages[i]); + kfree(ppgtt->gen8_pt_pages[i]); + kfree(ppgtt->gen8_pt_dma_addr[i]); + } + + __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); +} + +static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) +{ + struct pci_dev *hwdev = ppgtt->base.dev->pdev; + int i, j; + + for (i = 0; i < ppgtt->num_pd_pages; i++) { + /* TODO: In the future we'll support sparse mappings, so this + * will have to change. */ + if (!ppgtt->pd_dma_addr[i]) + continue; + + pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + + for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { + dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; + if (addr) + pci_unmap_page(hwdev, addr, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + } + } +} + static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - int i, j; + list_del(&vm->global_link); drm_mm_takedown(&vm->mm); - for (i = 0; i < ppgtt->num_pd_pages ; i++) { - if (ppgtt->pd_dma_addr[i]) { - pci_unmap_page(ppgtt->base.dev->pdev, - ppgtt->pd_dma_addr[i], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + gen8_ppgtt_unmap_pages(ppgtt); + gen8_ppgtt_free(ppgtt); +} - for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { - dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; - if (addr) - pci_unmap_page(ppgtt->base.dev->pdev, - addr, - PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); +static struct page **__gen8_alloc_page_tables(void) +{ + struct page **pt_pages; + int i; - } - } - kfree(ppgtt->gen8_pt_dma_addr[i]); + pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL); + if (!pt_pages) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < GEN8_PDES_PER_PAGE; i++) { + pt_pages[i] = alloc_page(GFP_KERNEL); + if (!pt_pages[i]) + goto bail; } - __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT)); - __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); + return pt_pages; + +bail: + gen8_free_page_tables(pt_pages); + kfree(pt_pages); + return ERR_PTR(-ENOMEM); } -/** - * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a - * net effect resembling a 2-level page table in normal x86 terms. Each PDP - * represents 1GB of memory - * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space. - * - * TODO: Do something with the size parameter - **/ -static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) +static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt, + const int max_pdp) { - struct page *pt_pages; - int i, j, ret = -ENOMEM; - const int max_pdp = DIV_ROUND_UP(size, 1 << 30); - const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; + struct page **pt_pages[GEN8_LEGACY_PDPS]; + int i, ret; - if (size % (1<<30)) - DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); + for (i = 0; i < max_pdp; i++) { + pt_pages[i] = __gen8_alloc_page_tables(); + if (IS_ERR(pt_pages[i])) { + ret = PTR_ERR(pt_pages[i]); + goto unwind_out; + } + } - /* FIXME: split allocation into smaller pieces. For now we only ever do - * this once, but with full PPGTT, the multiple contiguous allocations - * will be bad. + /* NB: Avoid touching gen8_pt_pages until last to keep the allocation, + * "atomic" - for cleanup purposes. */ + for (i = 0; i < max_pdp; i++) + ppgtt->gen8_pt_pages[i] = pt_pages[i]; + + return 0; + +unwind_out: + while (i--) { + gen8_free_page_tables(pt_pages[i]); + kfree(pt_pages[i]); + } + + return ret; +} + +static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt) +{ + int i; + + for (i = 0; i < ppgtt->num_pd_pages; i++) { + ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, + sizeof(dma_addr_t), + GFP_KERNEL); + if (!ppgtt->gen8_pt_dma_addr[i]) + return -ENOMEM; + } + + return 0; +} + +static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, + const int max_pdp) +{ ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); if (!ppgtt->pd_pages) return -ENOMEM; - pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT)); - if (!pt_pages) { + ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); + BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); + + return 0; +} + +static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, + const int max_pdp) +{ + int ret; + + ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp); + if (ret) + return ret; + + ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp); + if (ret) { __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); - return -ENOMEM; + return ret; } - ppgtt->gen8_pt_pages = pt_pages; - ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); - ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT); ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; - ppgtt->enable = gen8_ppgtt_enable; - ppgtt->base.clear_range = gen8_ppgtt_clear_range; - ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; - ppgtt->base.cleanup = gen8_ppgtt_cleanup; - ppgtt->base.start = 0; - ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE; - BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); + ret = gen8_ppgtt_allocate_dma(ppgtt); + if (ret) + gen8_ppgtt_free(ppgtt); - /* - * - Create a mapping for the page directories. - * - For each page directory: - * allocate space for page table mappings. - * map each page table - */ - for (i = 0; i < max_pdp; i++) { - dma_addr_t temp; - temp = pci_map_page(ppgtt->base.dev->pdev, - &ppgtt->pd_pages[i], 0, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp)) - goto err_out; + return ret; +} - ppgtt->pd_dma_addr[i] = temp; +static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, + const int pd) +{ + dma_addr_t pd_addr; + int ret; - ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL); - if (!ppgtt->gen8_pt_dma_addr[i]) - goto err_out; + pd_addr = pci_map_page(ppgtt->base.dev->pdev, + &ppgtt->pd_pages[pd], 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { - struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j]; - temp = pci_map_page(ppgtt->base.dev->pdev, - p, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); + ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); + if (ret) + return ret; - if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp)) - goto err_out; + ppgtt->pd_dma_addr[pd] = pd_addr; - ppgtt->gen8_pt_dma_addr[i][j] = temp; + return 0; +} + +static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, + const int pd, + const int pt) +{ + dma_addr_t pt_addr; + struct page *p; + int ret; + + p = ppgtt->gen8_pt_pages[pd][pt]; + pt_addr = pci_map_page(ppgtt->base.dev->pdev, + p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); + if (ret) + return ret; + + ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; + + return 0; +} + +/** + * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers + * with a net effect resembling a 2-level page table in normal x86 terms. Each + * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address + * space. + * + * FIXME: split allocation into smaller pieces. For now we only ever do this + * once, but with full PPGTT, the multiple contiguous allocations will be bad. + * TODO: Do something with the size parameter + */ +static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) +{ + const int max_pdp = DIV_ROUND_UP(size, 1 << 30); + const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; + int i, j, ret; + + if (size % (1<<30)) + DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); + + /* 1. Do all our allocations for page directories and page tables. */ + ret = gen8_ppgtt_alloc(ppgtt, max_pdp); + if (ret) + return ret; + + /* + * 2. Create DMA mappings for the page directories and page tables. + */ + for (i = 0; i < max_pdp; i++) { + ret = gen8_ppgtt_setup_page_directories(ppgtt, i); + if (ret) + goto bail; + + for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { + ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); + if (ret) + goto bail; } } - /* For now, the PPGTT helper functions all require that the PDEs are + /* + * 3. Map all the page directory entires to point to the page tables + * we've allocated. + * + * For now, the PPGTT helper functions all require that the PDEs are * plugged in correctly. So we do that now/here. For aliasing PPGTT, we - * will never need to touch the PDEs again */ + * will never need to touch the PDEs again. + */ for (i = 0; i < max_pdp; i++) { gen8_ppgtt_pde_t *pd_vaddr; pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]); @@ -441,23 +634,85 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) kunmap_atomic(pd_vaddr); } - ppgtt->base.clear_range(&ppgtt->base, 0, - ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE, - true); + ppgtt->enable = gen8_ppgtt_enable; + ppgtt->switch_mm = gen8_mm_switch; + ppgtt->base.clear_range = gen8_ppgtt_clear_range; + ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; + ppgtt->base.cleanup = gen8_ppgtt_cleanup; + ppgtt->base.start = 0; + ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE; + + ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", - ppgtt->num_pt_pages, - (ppgtt->num_pt_pages - num_pt_pages) + - size % (1<<30)); + ppgtt->num_pd_entries, + (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30)); return 0; -err_out: - ppgtt->base.cleanup(&ppgtt->base); +bail: + gen8_ppgtt_unmap_pages(ppgtt); + gen8_ppgtt_free(ppgtt); return ret; } +static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) +{ + struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; + struct i915_address_space *vm = &ppgtt->base; + gen6_gtt_pte_t __iomem *pd_addr; + gen6_gtt_pte_t scratch_pte; + uint32_t pd_entry; + int pte, pde; + + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); + + pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + + ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); + + seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, + ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries); + for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { + u32 expected; + gen6_gtt_pte_t *pt_vaddr; + dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde]; + pd_entry = readl(pd_addr + pde); + expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); + + if (pd_entry != expected) + seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", + pde, + pd_entry, + expected); + seq_printf(m, "\tPDE: %x\n", pd_entry); + + pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]); + for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) { + unsigned long va = + (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) + + (pte * PAGE_SIZE); + int i; + bool found = false; + for (i = 0; i < 4; i++) + if (pt_vaddr[pte + i] != scratch_pte) + found = true; + if (!found) + continue; + + seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); + for (i = 0; i < 4; i++) { + if (pt_vaddr[pte + i] != scratch_pte) + seq_printf(m, " %08x", pt_vaddr[pte + i]); + else + seq_puts(m, " SCRATCH "); + } + seq_puts(m, "\n"); + } + kunmap_atomic(pt_vaddr); + } +} + static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; @@ -480,73 +735,235 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) readl(pd_addr); } -static int gen6_ppgtt_enable(struct drm_device *dev) +static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) +{ + BUG_ON(ppgtt->pd_offset & 0x3f); + + return (ppgtt->pd_offset / 64) << 16; +} + +static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, + struct intel_ring_buffer *ring, + bool synchronous) +{ + struct drm_device *dev = ppgtt->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + /* If we're in reset, we can assume the GPU is sufficiently idle to + * manually frob these bits. Ideally we could use the ring functions, + * except our error handling makes it quite difficult (can't use + * intel_ring_begin, ring->flush, or intel_ring_advance) + * + * FIXME: We should try not to special case reset + */ + if (synchronous || + i915_reset_in_progress(&dev_priv->gpu_error)) { + WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); + I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); + POSTING_READ(RING_PP_DIR_BASE(ring)); + return 0; + } + + /* NB: TLBs must be flushed and invalidated before a switch */ + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + if (ret) + return ret; + + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); + intel_ring_emit(ring, PP_DIR_DCLV_2G); + intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); + intel_ring_emit(ring, get_pd_offset(ppgtt)); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + return 0; +} + +static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, + struct intel_ring_buffer *ring, + bool synchronous) +{ + struct drm_device *dev = ppgtt->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + /* If we're in reset, we can assume the GPU is sufficiently idle to + * manually frob these bits. Ideally we could use the ring functions, + * except our error handling makes it quite difficult (can't use + * intel_ring_begin, ring->flush, or intel_ring_advance) + * + * FIXME: We should try not to special case reset + */ + if (synchronous || + i915_reset_in_progress(&dev_priv->gpu_error)) { + WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); + I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); + POSTING_READ(RING_PP_DIR_BASE(ring)); + return 0; + } + + /* NB: TLBs must be flushed and invalidated before a switch */ + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + if (ret) + return ret; + + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); + intel_ring_emit(ring, PP_DIR_DCLV_2G); + intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); + intel_ring_emit(ring, get_pd_offset(ppgtt)); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + /* XXX: RCS is the only one to auto invalidate the TLBs? */ + if (ring->id != RCS) { + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + if (ret) + return ret; + } + + return 0; +} + +static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, + struct intel_ring_buffer *ring, + bool synchronous) +{ + struct drm_device *dev = ppgtt->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!synchronous) + return 0; + + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); + I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); + + POSTING_READ(RING_PP_DIR_DCLV(ring)); + + return 0; +} + +static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) { - drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t pd_offset; + struct drm_device *dev = ppgtt->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - int i; + int j, ret; - BUG_ON(ppgtt->pd_offset & 0x3f); + for_each_ring(ring, dev_priv, j) { + I915_WRITE(RING_MODE_GEN7(ring), + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - gen6_write_pdes(ppgtt); + /* We promise to do a switch later with FULL PPGTT. If this is + * aliasing, this is the one and only switch we'll do */ + if (USES_FULL_PPGTT(dev)) + continue; - pd_offset = ppgtt->pd_offset; - pd_offset /= 64; /* in cachelines, */ - pd_offset <<= 16; + ret = ppgtt->switch_mm(ppgtt, ring, true); + if (ret) + goto err_out; + } - if (INTEL_INFO(dev)->gen == 6) { - uint32_t ecochk, gab_ctl, ecobits; + return 0; - ecobits = I915_READ(GAC_ECO_BITS); - I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | - ECOBITS_PPGTT_CACHE64B); +err_out: + for_each_ring(ring, dev_priv, j) + I915_WRITE(RING_MODE_GEN7(ring), + _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE)); + return ret; +} - gab_ctl = I915_READ(GAB_CTL); - I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); +static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) +{ + struct drm_device *dev = ppgtt->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + uint32_t ecochk, ecobits; + int i; - ecochk = I915_READ(GAM_ECOCHK); - I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | - ECOCHK_PPGTT_CACHE64B); - I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - } else if (INTEL_INFO(dev)->gen >= 7) { - uint32_t ecochk, ecobits; + ecobits = I915_READ(GAC_ECO_BITS); + I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); - ecobits = I915_READ(GAC_ECO_BITS); - I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); + ecochk = I915_READ(GAM_ECOCHK); + if (IS_HASWELL(dev)) { + ecochk |= ECOCHK_PPGTT_WB_HSW; + } else { + ecochk |= ECOCHK_PPGTT_LLC_IVB; + ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; + } + I915_WRITE(GAM_ECOCHK, ecochk); - ecochk = I915_READ(GAM_ECOCHK); - if (IS_HASWELL(dev)) { - ecochk |= ECOCHK_PPGTT_WB_HSW; - } else { - ecochk |= ECOCHK_PPGTT_LLC_IVB; - ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; - } - I915_WRITE(GAM_ECOCHK, ecochk); + for_each_ring(ring, dev_priv, i) { + int ret; /* GFX_MODE is per-ring on gen7+ */ + I915_WRITE(RING_MODE_GEN7(ring), + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + + /* We promise to do a switch later with FULL PPGTT. If this is + * aliasing, this is the one and only switch we'll do */ + if (USES_FULL_PPGTT(dev)) + continue; + + ret = ppgtt->switch_mm(ppgtt, ring, true); + if (ret) + return ret; } - for_each_ring(ring, dev_priv, i) { - if (INTEL_INFO(dev)->gen >= 7) - I915_WRITE(RING_MODE_GEN7(ring), - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + return 0; +} - I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); - I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); +static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) +{ + struct drm_device *dev = ppgtt->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + uint32_t ecochk, gab_ctl, ecobits; + int i; + + ecobits = I915_READ(GAC_ECO_BITS); + I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | + ECOBITS_PPGTT_CACHE64B); + + gab_ctl = I915_READ(GAB_CTL); + I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); + + ecochk = I915_READ(GAM_ECOCHK); + I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); + + I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + + for_each_ring(ring, dev_priv, i) { + int ret = ppgtt->switch_mm(ppgtt, ring, true); + if (ret) + return ret; } + return 0; } /* PPGTT support for Sandybdrige/Gen6 and later */ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, - unsigned first_entry, - unsigned num_entries, + uint64_t start, + uint64_t length, bool use_scratch) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); gen6_gtt_pte_t *pt_vaddr, scratch_pte; + unsigned first_entry = start >> PAGE_SHIFT; + unsigned num_entries = length >> PAGE_SHIFT; unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned last_pte, i; @@ -573,12 +990,13 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, struct sg_table *pages, - unsigned first_entry, + uint64_t start, enum i915_cache_level cache_level) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); gen6_gtt_pte_t *pt_vaddr; + unsigned first_entry = start >> PAGE_SHIFT; unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; struct sg_page_iter sg_iter; @@ -602,65 +1020,130 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, kunmap_atomic(pt_vaddr); } -static void gen6_ppgtt_cleanup(struct i915_address_space *vm) +static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) { - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); int i; - drm_mm_takedown(&ppgtt->base.mm); - if (ppgtt->pt_dma_addr) { for (i = 0; i < ppgtt->num_pd_entries; i++) pci_unmap_page(ppgtt->base.dev->pdev, ppgtt->pt_dma_addr[i], 4096, PCI_DMA_BIDIRECTIONAL); } +} + +static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) +{ + int i; kfree(ppgtt->pt_dma_addr); for (i = 0; i < ppgtt->num_pd_entries; i++) __free_page(ppgtt->pt_pages[i]); kfree(ppgtt->pt_pages); - kfree(ppgtt); } -static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) +static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + + list_del(&vm->global_link); + drm_mm_takedown(&ppgtt->base.mm); + drm_mm_remove_node(&ppgtt->node); + + gen6_ppgtt_unmap_pages(ppgtt); + gen6_ppgtt_free(ppgtt); +} + +static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) +{ +#define GEN6_PD_ALIGN (PAGE_SIZE * 16) +#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE) struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - unsigned first_pd_entry_in_global_pt; - int i; - int ret = -ENOMEM; + bool retried = false; + int ret; - /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 - * entries. For aliasing ppgtt support we just steal them at the end for - * now. */ - first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); + /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The + * allocator works in address space sizes, so it's multiplied by page + * size. We allocate at the top of the GTT to avoid fragmentation. + */ + BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); +alloc: + ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, + &ppgtt->node, GEN6_PD_SIZE, + GEN6_PD_ALIGN, 0, + 0, dev_priv->gtt.base.total, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); + if (ret == -ENOSPC && !retried) { + ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, + GEN6_PD_SIZE, GEN6_PD_ALIGN, + I915_CACHE_NONE, 0); + if (ret) + return ret; + + retried = true; + goto alloc; + } + + if (ppgtt->node.start < dev_priv->gtt.mappable_end) + DRM_DEBUG("Forced to use aperture for PDEs\n"); - ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; - ppgtt->enable = gen6_ppgtt_enable; - ppgtt->base.clear_range = gen6_ppgtt_clear_range; - ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; - ppgtt->base.cleanup = gen6_ppgtt_cleanup; - ppgtt->base.scratch = dev_priv->gtt.base.scratch; - ppgtt->base.start = 0; - ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; + return ret; +} + +static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) +{ + int i; + ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *), GFP_KERNEL); + if (!ppgtt->pt_pages) return -ENOMEM; for (i = 0; i < ppgtt->num_pd_entries; i++) { ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); - if (!ppgtt->pt_pages[i]) - goto err_pt_alloc; + if (!ppgtt->pt_pages[i]) { + gen6_ppgtt_free(ppgtt); + return -ENOMEM; + } + } + + return 0; +} + +static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) +{ + int ret; + + ret = gen6_ppgtt_allocate_page_directories(ppgtt); + if (ret) + return ret; + + ret = gen6_ppgtt_allocate_page_tables(ppgtt); + if (ret) { + drm_mm_remove_node(&ppgtt->node); + return ret; } ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t), GFP_KERNEL); - if (!ppgtt->pt_dma_addr) - goto err_pt_alloc; + if (!ppgtt->pt_dma_addr) { + drm_mm_remove_node(&ppgtt->node); + gen6_ppgtt_free(ppgtt); + return -ENOMEM; + } + + return 0; +} + +static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt) +{ + struct drm_device *dev = ppgtt->base.dev; + int i; for (i = 0; i < ppgtt->num_pd_entries; i++) { dma_addr_t pt_addr; @@ -669,48 +1152,71 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(dev->pdev, pt_addr)) { - ret = -EIO; - goto err_pd_pin; - + gen6_ppgtt_unmap_pages(ppgtt); + return -EIO; } + ppgtt->pt_dma_addr[i] = pt_addr; } - ppgtt->base.clear_range(&ppgtt->base, 0, - ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true); + return 0; +} - ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); +static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) +{ + struct drm_device *dev = ppgtt->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; - return 0; + ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; + if (IS_GEN6(dev)) { + ppgtt->enable = gen6_ppgtt_enable; + ppgtt->switch_mm = gen6_mm_switch; + } else if (IS_HASWELL(dev)) { + ppgtt->enable = gen7_ppgtt_enable; + ppgtt->switch_mm = hsw_mm_switch; + } else if (IS_GEN7(dev)) { + ppgtt->enable = gen7_ppgtt_enable; + ppgtt->switch_mm = gen7_mm_switch; + } else + BUG(); -err_pd_pin: - if (ppgtt->pt_dma_addr) { - for (i--; i >= 0; i--) - pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], - 4096, PCI_DMA_BIDIRECTIONAL); - } -err_pt_alloc: - kfree(ppgtt->pt_dma_addr); - for (i = 0; i < ppgtt->num_pd_entries; i++) { - if (ppgtt->pt_pages[i]) - __free_page(ppgtt->pt_pages[i]); + ret = gen6_ppgtt_alloc(ppgtt); + if (ret) + return ret; + + ret = gen6_ppgtt_setup_page_tables(ppgtt); + if (ret) { + gen6_ppgtt_free(ppgtt); + return ret; } - kfree(ppgtt->pt_pages); - return ret; + ppgtt->base.clear_range = gen6_ppgtt_clear_range; + ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; + ppgtt->base.cleanup = gen6_ppgtt_cleanup; + ppgtt->base.start = 0; + ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; + ppgtt->debug_dump = gen6_dump_ppgtt; + + ppgtt->pd_offset = + ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); + + ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); + + DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", + ppgtt->node.size >> 20, + ppgtt->node.start / PAGE_SIZE); + + return 0; } -static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) +int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_hw_ppgtt *ppgtt; - int ret; - - ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); - if (!ppgtt) - return -ENOMEM; + int ret = 0; ppgtt->base.dev = dev; + ppgtt->base.scratch = dev_priv->gtt.base.scratch; if (INTEL_INFO(dev)->gen < 8) ret = gen6_ppgtt_init(ppgtt); @@ -719,45 +1225,37 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) else BUG(); - if (ret) - kfree(ppgtt); - else { - dev_priv->mm.aliasing_ppgtt = ppgtt; + if (!ret) { + struct drm_i915_private *dev_priv = dev->dev_private; + kref_init(&ppgtt->ref); drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, ppgtt->base.total); + i915_init_vm(dev_priv, &ppgtt->base); + if (INTEL_INFO(dev)->gen < 8) { + gen6_write_pdes(ppgtt); + DRM_DEBUG("Adding PPGTT at offset %x\n", + ppgtt->pd_offset << 10); + } } return ret; } -void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) +static void +ppgtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - - if (!ppgtt) - return; - - ppgtt->base.cleanup(&ppgtt->base); - dev_priv->mm.aliasing_ppgtt = NULL; + vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, + cache_level); } -void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level) +static void ppgtt_unbind_vma(struct i915_vma *vma) { - ppgtt->base.insert_entries(&ppgtt->base, obj->pages, - i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, - cache_level); -} - -void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_object *obj) -{ - ppgtt->base.clear_range(&ppgtt->base, - i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT, - true); + vma->vm->clear_range(vma->vm, + vma->node.start, + vma->obj->base.size, + true); } extern int intel_iommu_gfx_mapped; @@ -840,8 +1338,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) i915_check_and_clear_faults(dev); dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, - dev_priv->gtt.base.start / PAGE_SIZE, - dev_priv->gtt.base.total / PAGE_SIZE, + dev_priv->gtt.base.start, + dev_priv->gtt.base.total, true); } @@ -849,18 +1347,46 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; + struct i915_address_space *vm; i915_check_and_clear_faults(dev); /* First fill our portion of the GTT with scratch pages */ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, - dev_priv->gtt.base.start / PAGE_SIZE, - dev_priv->gtt.base.total / PAGE_SIZE, + dev_priv->gtt.base.start, + dev_priv->gtt.base.total, true); list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + struct i915_vma *vma = i915_gem_obj_to_vma(obj, + &dev_priv->gtt.base); + if (!vma) + continue; + i915_gem_clflush_object(obj, obj->pin_display); - i915_gem_gtt_bind_object(obj, obj->cache_level); + /* The bind_vma code tries to be smart about tracking mappings. + * Unfortunately above, we've just wiped out the mappings + * without telling our object about it. So we need to fake it. + */ + obj->has_global_gtt_mapping = 0; + vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); + } + + + if (INTEL_INFO(dev)->gen >= 8) { + gen8_setup_private_ppat(dev_priv); + return; + } + + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + /* TODO: Perhaps it shouldn't be gen6 specific */ + if (i915_is_ggtt(vm)) { + if (dev_priv->mm.aliasing_ppgtt) + gen6_write_pdes(dev_priv->mm.aliasing_ppgtt); + continue; + } + + gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); } i915_gem_chipset_flush(dev); @@ -891,10 +1417,11 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, - unsigned int first_entry, + uint64_t start, enum i915_cache_level level) { struct drm_i915_private *dev_priv = vm->dev->dev_private; + unsigned first_entry = start >> PAGE_SHIFT; gen8_gtt_pte_t __iomem *gtt_entries = (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; int i = 0; @@ -936,10 +1463,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, */ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, - unsigned int first_entry, + uint64_t start, enum i915_cache_level level) { struct drm_i915_private *dev_priv = vm->dev->dev_private; + unsigned first_entry = start >> PAGE_SHIFT; gen6_gtt_pte_t __iomem *gtt_entries = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; int i = 0; @@ -971,11 +1499,13 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, } static void gen8_ggtt_clear_range(struct i915_address_space *vm, - unsigned int first_entry, - unsigned int num_entries, + uint64_t start, + uint64_t length, bool use_scratch) { struct drm_i915_private *dev_priv = vm->dev->dev_private; + unsigned first_entry = start >> PAGE_SHIFT; + unsigned num_entries = length >> PAGE_SHIFT; gen8_gtt_pte_t scratch_pte, __iomem *gtt_base = (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; @@ -995,11 +1525,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, } static void gen6_ggtt_clear_range(struct i915_address_space *vm, - unsigned int first_entry, - unsigned int num_entries, + uint64_t start, + uint64_t length, bool use_scratch) { struct drm_i915_private *dev_priv = vm->dev->dev_private; + unsigned first_entry = start >> PAGE_SHIFT; + unsigned num_entries = length >> PAGE_SHIFT; gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; @@ -1017,53 +1549,103 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, readl(gtt_base); } -static void i915_ggtt_insert_entries(struct i915_address_space *vm, - struct sg_table *st, - unsigned int pg_start, - enum i915_cache_level cache_level) + +static void i915_ggtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 unused) { + const unsigned long entry = vma->node.start >> PAGE_SHIFT; unsigned int flags = (cache_level == I915_CACHE_NONE) ? AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; - intel_gtt_insert_sg_entries(st, pg_start, flags); - + BUG_ON(!i915_is_ggtt(vma->vm)); + intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags); + vma->obj->has_global_gtt_mapping = 1; } static void i915_ggtt_clear_range(struct i915_address_space *vm, - unsigned int first_entry, - unsigned int num_entries, + uint64_t start, + uint64_t length, bool unused) { + unsigned first_entry = start >> PAGE_SHIFT; + unsigned num_entries = length >> PAGE_SHIFT; intel_gtt_clear_range(first_entry, num_entries); } +static void i915_ggtt_unbind_vma(struct i915_vma *vma) +{ + const unsigned int first = vma->node.start >> PAGE_SHIFT; + const unsigned int size = vma->obj->base.size >> PAGE_SHIFT; + + BUG_ON(!i915_is_ggtt(vma->vm)); + vma->obj->has_global_gtt_mapping = 0; + intel_gtt_clear_range(first, size); +} -void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level) +static void ggtt_bind_vma(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) { - struct drm_device *dev = obj->base.dev; + struct drm_device *dev = vma->vm->dev; struct drm_i915_private *dev_priv = dev->dev_private; - const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; + struct drm_i915_gem_object *obj = vma->obj; - dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages, - entry, - cache_level); + /* If there is no aliasing PPGTT, or the caller needs a global mapping, + * or we have a global mapping already but the cacheability flags have + * changed, set the global PTEs. + * + * If there is an aliasing PPGTT it is anecdotally faster, so use that + * instead if none of the above hold true. + * + * NB: A global mapping should only be needed for special regions like + * "gtt mappable", SNB errata, or if specified via special execbuf + * flags. At all other times, the GPU will use the aliasing PPGTT. + */ + if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { + if (!obj->has_global_gtt_mapping || + (cache_level != obj->cache_level)) { + vma->vm->insert_entries(vma->vm, obj->pages, + vma->node.start, + cache_level); + obj->has_global_gtt_mapping = 1; + } + } - obj->has_global_gtt_mapping = 1; + if (dev_priv->mm.aliasing_ppgtt && + (!obj->has_aliasing_ppgtt_mapping || + (cache_level != obj->cache_level))) { + struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; + appgtt->base.insert_entries(&appgtt->base, + vma->obj->pages, + vma->node.start, + cache_level); + vma->obj->has_aliasing_ppgtt_mapping = 1; + } } -void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) +static void ggtt_unbind_vma(struct i915_vma *vma) { - struct drm_device *dev = obj->base.dev; + struct drm_device *dev = vma->vm->dev; struct drm_i915_private *dev_priv = dev->dev_private; - const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; - - dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, - entry, - obj->base.size >> PAGE_SHIFT, - true); + struct drm_i915_gem_object *obj = vma->obj; + + if (obj->has_global_gtt_mapping) { + vma->vm->clear_range(vma->vm, + vma->node.start, + obj->base.size, + true); + obj->has_global_gtt_mapping = 0; + } - obj->has_global_gtt_mapping = 0; + if (obj->has_aliasing_ppgtt_mapping) { + struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; + appgtt->base.clear_range(&appgtt->base, + vma->node.start, + obj->base.size, + true); + obj->has_aliasing_ppgtt_mapping = 0; + } } void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) @@ -1145,29 +1727,14 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { - const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); - ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true); + ggtt_vm->clear_range(ggtt_vm, hole_start, + hole_end - hole_start, true); } /* And finally clear the reserved guard page */ - ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true); -} - -static bool -intel_enable_ppgtt(struct drm_device *dev) -{ - if (i915_enable_ppgtt >= 0) - return i915_enable_ppgtt; - -#ifdef CONFIG_INTEL_IOMMU - /* Disable ppgtt on SNB if VT-d is on. */ - if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) - return false; -#endif - - return true; + ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); } void i915_gem_init_global_gtt(struct drm_device *dev) @@ -1178,26 +1745,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev) gtt_size = dev_priv->gtt.base.total; mappable_size = dev_priv->gtt.mappable_end; - if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { - int ret; - - if (INTEL_INFO(dev)->gen <= 7) { - /* PPGTT pdes are stolen from global gtt ptes, so shrink the - * aperture accordingly when using aliasing ppgtt. */ - gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; - } - - i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); - - ret = i915_gem_init_aliasing_ppgtt(dev); - if (!ret) - return; - - DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); - drm_mm_takedown(&dev_priv->gtt.base.mm); - if (INTEL_INFO(dev)->gen < 8) - gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE; - } i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); } @@ -1252,11 +1799,6 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; if (bdw_gmch_ctl) bdw_gmch_ctl = 1 << bdw_gmch_ctl; - if (bdw_gmch_ctl > 4) { - WARN_ON(!i915_preliminary_hw_support); - return 4<<20; - } - return bdw_gmch_ctl << 20; } @@ -1438,7 +1980,6 @@ static int i915_gmch_probe(struct drm_device *dev, dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; - dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; if (unlikely(dev_priv->gtt.do_idle_maps)) DRM_INFO("applying Ironlake quirks for intel_iommu\n"); @@ -1493,3 +2034,62 @@ int i915_gem_gtt_init(struct drm_device *dev) return 0; } + +static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) +{ + struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); + if (vma == NULL) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&vma->vma_link); + INIT_LIST_HEAD(&vma->mm_list); + INIT_LIST_HEAD(&vma->exec_list); + vma->vm = vm; + vma->obj = obj; + + switch (INTEL_INFO(vm->dev)->gen) { + case 8: + case 7: + case 6: + if (i915_is_ggtt(vm)) { + vma->unbind_vma = ggtt_unbind_vma; + vma->bind_vma = ggtt_bind_vma; + } else { + vma->unbind_vma = ppgtt_unbind_vma; + vma->bind_vma = ppgtt_bind_vma; + } + break; + case 5: + case 4: + case 3: + case 2: + BUG_ON(!i915_is_ggtt(vm)); + vma->unbind_vma = i915_ggtt_unbind_vma; + vma->bind_vma = i915_ggtt_bind_vma; + break; + default: + BUG(); + } + + /* Keep GGTT vmas first to make debug easier */ + if (i915_is_ggtt(vm)) + list_add(&vma->vma_link, &obj->vma_list); + else + list_add_tail(&vma->vma_link, &obj->vma_list); + + return vma; +} + +struct i915_vma * +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) +{ + struct i915_vma *vma; + + vma = i915_gem_obj_to_vma(obj, vm); + if (!vma) + vma = __i915_gem_vma_create(obj, vm); + + return vma; +} diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 28d24caa49f..62ef55ba061 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -215,7 +215,7 @@ int i915_gem_init_stolen(struct drm_device *dev) int bios_reserved = 0; #ifdef CONFIG_INTEL_IOMMU - if (intel_iommu_gfx_mapped) { + if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) { DRM_INFO("DMAR active, disabling use of stolen memory\n"); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index b1390534804..cb150e8b433 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -87,7 +87,7 @@ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; @@ -294,7 +294,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; int ret = 0; @@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, return -EINVAL; } - if (obj->pin_count || obj->framebuffer_references) { + if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } @@ -415,7 +415,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_get_tiling *args = data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 990cf8f43ef..12f1d43b2d6 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -238,50 +238,61 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a) static void i915_ring_error_state(struct drm_i915_error_state_buf *m, struct drm_device *dev, - struct drm_i915_error_state *error, - unsigned ring) + struct drm_i915_error_ring *ring) { - BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ - if (!error->ring[ring].valid) + if (!ring->valid) return; - err_printf(m, "%s command stream:\n", ring_str(ring)); - err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); - err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); - err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); - err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); - err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); - err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); - err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); + err_printf(m, " HEAD: 0x%08x\n", ring->head); + err_printf(m, " TAIL: 0x%08x\n", ring->tail); + err_printf(m, " CTL: 0x%08x\n", ring->ctl); + err_printf(m, " HWS: 0x%08x\n", ring->hws); + err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd); + err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir); + err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr); + err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone); if (INTEL_INFO(dev)->gen >= 4) { - err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]); - err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]); - err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); + err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr); + err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate); + err_printf(m, " INSTPS: 0x%08x\n", ring->instps); } - err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); - err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); + err_printf(m, " INSTPM: 0x%08x\n", ring->instpm); + err_printf(m, " FADDR: 0x%08x\n", ring->faddr); if (INTEL_INFO(dev)->gen >= 6) { - err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); - err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); + err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi); + err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg); err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", - error->semaphore_mboxes[ring][0], - error->semaphore_seqno[ring][0]); + ring->semaphore_mboxes[0], + ring->semaphore_seqno[0]); err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", - error->semaphore_mboxes[ring][1], - error->semaphore_seqno[ring][1]); + ring->semaphore_mboxes[1], + ring->semaphore_seqno[1]); if (HAS_VEBOX(dev)) { err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n", - error->semaphore_mboxes[ring][2], - error->semaphore_seqno[ring][2]); + ring->semaphore_mboxes[2], + ring->semaphore_seqno[2]); } } - err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); - err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); - err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); - err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); + if (USES_PPGTT(dev)) { + err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode); + + if (INTEL_INFO(dev)->gen >= 8) { + int i; + for (i = 0; i < 4; i++) + err_printf(m, " PDP%d: 0x%016llx\n", + i, ring->vm_info.pdp[i]); + } else { + err_printf(m, " PP_DIR_BASE: 0x%08x\n", + ring->vm_info.pp_dir_base); + } + } + err_printf(m, " seqno: 0x%08x\n", ring->seqno); + err_printf(m, " waiting: %s\n", yesno(ring->waiting)); + err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head); + err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail); err_printf(m, " hangcheck: %s [%d]\n", - hangcheck_action_to_str(error->hangcheck_action[ring]), - error->hangcheck_score[ring]); + hangcheck_action_to_str(ring->hangcheck_action), + ring->hangcheck_score); } void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) @@ -293,22 +304,54 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) va_end(args); } +static void print_error_obj(struct drm_i915_error_state_buf *m, + struct drm_i915_error_object *obj) +{ + int page, offset, elt; + + for (page = offset = 0; page < obj->page_count; page++) { + for (elt = 0; elt < PAGE_SIZE/4; elt++) { + err_printf(m, "%08x : %08x\n", offset, + obj->pages[page][elt]); + offset += 4; + } + } +} + int i915_error_state_to_str(struct drm_i915_error_state_buf *m, const struct i915_error_state_file_priv *error_priv) { struct drm_device *dev = error_priv->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_error_state *error = error_priv->error; - int i, j, page, offset, elt; + int i, j, offset, elt; + int max_hangcheck_score; if (!error) { err_printf(m, "no error state collected\n"); goto out; } + err_printf(m, "%s\n", error->error_msg); err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, error->time.tv_usec); err_printf(m, "Kernel: " UTS_RELEASE "\n"); + max_hangcheck_score = 0; + for (i = 0; i < ARRAY_SIZE(error->ring); i++) { + if (error->ring[i].hangcheck_score > max_hangcheck_score) + max_hangcheck_score = error->ring[i].hangcheck_score; + } + for (i = 0; i < ARRAY_SIZE(error->ring); i++) { + if (error->ring[i].hangcheck_score == max_hangcheck_score && + error->ring[i].pid != -1) { + err_printf(m, "Active process (on ring %s): %s [%d]\n", + ring_str(i), + error->ring[i].comm, + error->ring[i].pid); + } + } + err_printf(m, "Reset count: %u\n", error->reset_count); + err_printf(m, "Suspend count: %u\n", error->suspend_count); err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); err_printf(m, "EIR: 0x%08x\n", error->eir); err_printf(m, "IER: 0x%08x\n", error->ier); @@ -333,8 +376,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, if (INTEL_INFO(dev)->gen == 7) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); - for (i = 0; i < ARRAY_SIZE(error->ring); i++) - i915_ring_error_state(m, dev, error, i); + for (i = 0; i < ARRAY_SIZE(error->ring); i++) { + err_printf(m, "%s command stream:\n", ring_str(i)); + i915_ring_error_state(m, dev, &error->ring[i]); + } if (error->active_bo) print_error_buffers(m, "Active", @@ -349,18 +394,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, for (i = 0; i < ARRAY_SIZE(error->ring); i++) { struct drm_i915_error_object *obj; - if ((obj = error->ring[i].batchbuffer)) { - err_printf(m, "%s --- gtt_offset = 0x%08x\n", - dev_priv->ring[i].name, + obj = error->ring[i].batchbuffer; + if (obj) { + err_puts(m, dev_priv->ring[i].name); + if (error->ring[i].pid != -1) + err_printf(m, " (submitted by %s [%d])", + error->ring[i].comm, + error->ring[i].pid); + err_printf(m, " --- gtt_offset = 0x%08x\n", obj->gtt_offset); - offset = 0; - for (page = 0; page < obj->page_count; page++) { - for (elt = 0; elt < PAGE_SIZE/4; elt++) { - err_printf(m, "%08x : %08x\n", offset, - obj->pages[page][elt]); - offset += 4; - } - } + print_error_obj(m, obj); + } + + obj = error->ring[i].wa_batchbuffer; + if (obj) { + err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n", + dev_priv->ring[i].name, obj->gtt_offset); + print_error_obj(m, obj); } if (error->ring[i].num_requests) { @@ -379,14 +429,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "%s --- ringbuffer = 0x%08x\n", dev_priv->ring[i].name, obj->gtt_offset); + print_error_obj(m, obj); + } + + if ((obj = error->ring[i].hws_page)) { + err_printf(m, "%s --- HW Status = 0x%08x\n", + dev_priv->ring[i].name, + obj->gtt_offset); offset = 0; - for (page = 0; page < obj->page_count; page++) { - for (elt = 0; elt < PAGE_SIZE/4; elt++) { - err_printf(m, "%08x : %08x\n", - offset, - obj->pages[page][elt]); - offset += 4; - } + for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { + err_printf(m, "[%04x] %08x %08x %08x %08x\n", + offset, + obj->pages[0][elt], + obj->pages[0][elt+1], + obj->pages[0][elt+2], + obj->pages[0][elt+3]); + offset += 16; } } @@ -472,6 +530,7 @@ static void i915_error_state_free(struct kref *error_ref) for (i = 0; i < ARRAY_SIZE(error->ring); i++) { i915_error_object_free(error->ring[i].batchbuffer); i915_error_object_free(error->ring[i].ringbuffer); + i915_error_object_free(error->ring[i].hws_page); i915_error_object_free(error->ring[i].ctx); kfree(error->ring[i].requests); } @@ -485,6 +544,7 @@ static void i915_error_state_free(struct kref *error_ref) static struct drm_i915_error_object * i915_error_object_create_sized(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *src, + struct i915_address_space *vm, const int num_pages) { struct drm_i915_error_object *dst; @@ -498,7 +558,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, if (dst == NULL) return NULL; - reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); + reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm); for (i = 0; i < num_pages; i++) { unsigned long flags; void *d; @@ -508,8 +568,10 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, goto unwind; local_irq_save(flags); - if (reloc_offset < dev_priv->gtt.mappable_end && - src->has_global_gtt_mapping) { + if (src->cache_level == I915_CACHE_NONE && + reloc_offset < dev_priv->gtt.mappable_end && + src->has_global_gtt_mapping && + i915_is_ggtt(vm)) { void __iomem *s; /* Simply ignore tiling or any overlapping fence. @@ -559,8 +621,12 @@ unwind: kfree(dst); return NULL; } -#define i915_error_object_create(dev_priv, src) \ - i915_error_object_create_sized((dev_priv), (src), \ +#define i915_error_object_create(dev_priv, src, vm) \ + i915_error_object_create_sized((dev_priv), (src), (vm), \ + (src)->base.size>>PAGE_SHIFT) + +#define i915_error_ggtt_object_create(dev_priv, src) \ + i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \ (src)->base.size>>PAGE_SHIFT) static void capture_bo(struct drm_i915_error_buffer *err, @@ -575,7 +641,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->write_domain = obj->base.write_domain; err->fence_reg = obj->fence_reg; err->pinned = 0; - if (obj->pin_count > 0) + if (i915_gem_obj_is_pinned(obj)) err->pinned = 1; if (obj->user_pin_count > 0) err->pinned = -1; @@ -608,7 +674,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, int i = 0; list_for_each_entry(obj, head, global_list) { - if (obj->pin_count == 0) + if (!i915_gem_obj_is_pinned(obj)) continue; capture_bo(err++, obj); @@ -619,6 +685,39 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, return i; } +/* Generate a semi-unique error code. The code is not meant to have meaning, The + * code's only purpose is to try to prevent false duplicated bug reports by + * grossly estimating a GPU error state. + * + * TODO Ideally, hashing the batchbuffer would be a very nice way to determine + * the hang if we could strip the GTT offset information from it. + * + * It's only a small step better than a random number in its current form. + */ +static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error, + int *ring_id) +{ + uint32_t error_code = 0; + int i; + + /* IPEHR would be an ideal way to detect errors, as it's the gross + * measure of "the command that hung." However, has some very common + * synchronization commands which almost always appear in the case + * strictly a client bug. Use instdone to differentiate those some. + */ + for (i = 0; i < I915_NUM_RINGS; i++) { + if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) { + if (ring_id) + *ring_id = i; + + return error->ring[i].ipehr ^ error->ring[i].instdone; + } + } + + return error_code; +} + static void i915_gem_record_fences(struct drm_device *dev, struct drm_i915_error_state *error) { @@ -652,107 +751,114 @@ static void i915_gem_record_fences(struct drm_device *dev, } } -static struct drm_i915_error_object * -i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, - struct intel_ring_buffer *ring) -{ - struct i915_address_space *vm; - struct i915_vma *vma; - struct drm_i915_gem_object *obj; - u32 seqno; - - if (!ring->get_seqno) - return NULL; - - if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { - u32 acthd = I915_READ(ACTHD); - - if (WARN_ON(ring->id != RCS)) - return NULL; - - obj = ring->scratch.obj; - if (obj != NULL && - acthd >= i915_gem_obj_ggtt_offset(obj) && - acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) - return i915_error_object_create(dev_priv, obj); - } - - seqno = ring->get_seqno(ring, false); - list_for_each_entry(vm, &dev_priv->vm_list, global_link) { - list_for_each_entry(vma, &vm->active_list, mm_list) { - obj = vma->obj; - if (obj->ring != ring) - continue; - - if (i915_seqno_passed(seqno, obj->last_read_seqno)) - continue; - - if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) - continue; - - /* We need to copy these to an anonymous buffer as the simplest - * method to avoid being overwritten by userspace. - */ - return i915_error_object_create(dev_priv, obj); - } - } - - return NULL; -} - static void i915_record_ring_state(struct drm_device *dev, - struct drm_i915_error_state *error, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring, + struct drm_i915_error_ring *ering) { struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen >= 6) { - error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); - error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); - error->semaphore_mboxes[ring->id][0] + ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); + ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); + ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base)); - error->semaphore_mboxes[ring->id][1] + ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base)); - error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; - error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; + ering->semaphore_seqno[0] = ring->sync_seqno[0]; + ering->semaphore_seqno[1] = ring->sync_seqno[1]; } if (HAS_VEBOX(dev)) { - error->semaphore_mboxes[ring->id][2] = + ering->semaphore_mboxes[2] = I915_READ(RING_SYNC_2(ring->mmio_base)); - error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2]; + ering->semaphore_seqno[2] = ring->sync_seqno[2]; } if (INTEL_INFO(dev)->gen >= 4) { - error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); - error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); - error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); - error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); - error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); - error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base)); + ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base)); + ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base)); + ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); + ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base)); + ering->instps = I915_READ(RING_INSTPS(ring->mmio_base)); + ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base)); if (INTEL_INFO(dev)->gen >= 8) - error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; - error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base)); + ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; + ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base)); } else { - error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); - error->ipeir[ring->id] = I915_READ(IPEIR); - error->ipehr[ring->id] = I915_READ(IPEHR); - error->instdone[ring->id] = I915_READ(INSTDONE); + ering->faddr = I915_READ(DMA_FADD_I8XX); + ering->ipeir = I915_READ(IPEIR); + ering->ipehr = I915_READ(IPEHR); + ering->instdone = I915_READ(INSTDONE); + } + + ering->waiting = waitqueue_active(&ring->irq_queue); + ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base)); + ering->seqno = ring->get_seqno(ring, false); + ering->acthd = intel_ring_get_active_head(ring); + ering->head = I915_READ_HEAD(ring); + ering->tail = I915_READ_TAIL(ring); + ering->ctl = I915_READ_CTL(ring); + + if (I915_NEED_GFX_HWS(dev)) { + int mmio; + + if (IS_GEN7(dev)) { + switch (ring->id) { + default: + case RCS: + mmio = RENDER_HWS_PGA_GEN7; + break; + case BCS: + mmio = BLT_HWS_PGA_GEN7; + break; + case VCS: + mmio = BSD_HWS_PGA_GEN7; + break; + case VECS: + mmio = VEBOX_HWS_PGA_GEN7; + break; + } + } else if (IS_GEN6(ring->dev)) { + mmio = RING_HWS_PGA_GEN6(ring->mmio_base); + } else { + /* XXX: gen8 returns to sanity */ + mmio = RING_HWS_PGA(ring->mmio_base); + } + + ering->hws = I915_READ(mmio); } - error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); - error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); - error->seqno[ring->id] = ring->get_seqno(ring, false); - error->acthd[ring->id] = intel_ring_get_active_head(ring); - error->head[ring->id] = I915_READ_HEAD(ring); - error->tail[ring->id] = I915_READ_TAIL(ring); - error->ctl[ring->id] = I915_READ_CTL(ring); + ering->cpu_ring_head = ring->head; + ering->cpu_ring_tail = ring->tail; + + ering->hangcheck_score = ring->hangcheck.score; + ering->hangcheck_action = ring->hangcheck.action; + + if (USES_PPGTT(dev)) { + int i; - error->cpu_ring_head[ring->id] = ring->head; - error->cpu_ring_tail[ring->id] = ring->tail; + ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); - error->hangcheck_score[ring->id] = ring->hangcheck.score; - error->hangcheck_action[ring->id] = ring->hangcheck.action; + switch (INTEL_INFO(dev)->gen) { + case 8: + for (i = 0; i < 4; i++) { + ering->vm_info.pdp[i] = + I915_READ(GEN8_RING_PDP_UDW(ring, i)); + ering->vm_info.pdp[i] <<= 32; + ering->vm_info.pdp[i] |= + I915_READ(GEN8_RING_PDP_LDW(ring, i)); + } + break; + case 7: + ering->vm_info.pp_dir_base = + I915_READ(RING_PP_DIR_BASE(ring)); + break; + case 6: + ering->vm_info.pp_dir_base = + I915_READ(RING_PP_DIR_BASE_READ(ring)); + break; + } + } } @@ -770,7 +876,9 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring, list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { ering->ctx = i915_error_object_create_sized(dev_priv, - obj, 1); + obj, + &dev_priv->gtt.base, + 1); break; } } @@ -791,14 +899,48 @@ static void i915_gem_record_rings(struct drm_device *dev, error->ring[i].valid = true; - i915_record_ring_state(dev, error, ring); + i915_record_ring_state(dev, ring, &error->ring[i]); - error->ring[i].batchbuffer = - i915_error_first_batchbuffer(dev_priv, ring); + error->ring[i].pid = -1; + request = i915_gem_find_active_request(ring); + if (request) { + /* We need to copy these to an anonymous buffer + * as the simplest method to avoid being overwritten + * by userspace. + */ + error->ring[i].batchbuffer = + i915_error_object_create(dev_priv, + request->batch_obj, + request->ctx ? + request->ctx->vm : + &dev_priv->gtt.base); + + if (HAS_BROKEN_CS_TLB(dev_priv->dev) && + ring->scratch.obj) + error->ring[i].wa_batchbuffer = + i915_error_ggtt_object_create(dev_priv, + ring->scratch.obj); + + if (request->file_priv) { + struct task_struct *task; + + rcu_read_lock(); + task = pid_task(request->file_priv->file->pid, + PIDTYPE_PID); + if (task) { + strcpy(error->ring[i].comm, task->comm); + error->ring[i].pid = task->pid; + } + rcu_read_unlock(); + } + } error->ring[i].ringbuffer = - i915_error_object_create(dev_priv, ring->obj); + i915_error_ggtt_object_create(dev_priv, ring->obj); + if (ring->status_page.obj) + error->ring[i].hws_page = + i915_error_ggtt_object_create(dev_priv, ring->status_page.obj); i915_gem_record_active_context(ring, error, &error->ring[i]); @@ -845,7 +987,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, i++; error->active_bo_count[ndx] = i; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) - if (obj->pin_count) + if (i915_gem_obj_is_pinned(obj)) i++; error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; @@ -879,11 +1021,6 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, list_for_each_entry(vm, &dev_priv->vm_list, global_link) cnt++; - if (WARN(cnt > 1, "Multiple VMs not yet supported\n")) - cnt = 1; - - vm = &dev_priv->gtt.base; - error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), @@ -895,6 +1032,108 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, i915_gem_capture_vm(dev_priv, error, vm, i++); } +/* Capture all registers which don't fit into another category. */ +static void i915_capture_reg_state(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error) +{ + struct drm_device *dev = dev_priv->dev; + int pipe; + + /* General organization + * 1. Registers specific to a single generation + * 2. Registers which belong to multiple generations + * 3. Feature specific registers. + * 4. Everything else + * Please try to follow the order. + */ + + /* 1: Registers specific to a single generation */ + if (IS_VALLEYVIEW(dev)) { + error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); + error->forcewake = I915_READ(FORCEWAKE_VLV); + } + + if (IS_GEN7(dev)) + error->err_int = I915_READ(GEN7_ERR_INT); + + if (IS_GEN6(dev)) { + error->forcewake = I915_READ(FORCEWAKE); + error->gab_ctl = I915_READ(GAB_CTL); + error->gfx_mode = I915_READ(GFX_MODE); + } + + if (IS_GEN2(dev)) + error->ier = I915_READ16(IER); + + /* 2: Registers which belong to multiple generations */ + if (INTEL_INFO(dev)->gen >= 7) + error->forcewake = I915_READ(FORCEWAKE_MT); + + if (INTEL_INFO(dev)->gen >= 6) { + error->derrmr = I915_READ(DERRMR); + error->error = I915_READ(ERROR_GEN6); + error->done_reg = I915_READ(DONE_REG); + } + + /* 3: Feature specific registers */ + if (IS_GEN6(dev) || IS_GEN7(dev)) { + error->gam_ecochk = I915_READ(GAM_ECOCHK); + error->gac_eco = I915_READ(GAC_ECO_BITS); + } + + /* 4: Everything else */ + if (HAS_HW_CONTEXTS(dev)) + error->ccid = I915_READ(CCID); + + if (HAS_PCH_SPLIT(dev)) + error->ier = I915_READ(DEIER) | I915_READ(GTIER); + else { + error->ier = I915_READ(IER); + for_each_pipe(pipe) + error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); + } + + /* 4: Everything else */ + error->eir = I915_READ(EIR); + error->pgtbl_er = I915_READ(PGTBL_ER); + + i915_get_extra_instdone(dev, error->extra_instdone); +} + +static void i915_error_capture_msg(struct drm_device *dev, + struct drm_i915_error_state *error, + bool wedged, + const char *error_msg) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 ecode; + int ring_id = -1, len; + + ecode = i915_error_generate_code(dev_priv, error, &ring_id); + + len = scnprintf(error->error_msg, sizeof(error->error_msg), + "GPU HANG: ecode %d:0x%08x", ring_id, ecode); + + if (ring_id != -1 && error->ring[ring_id].pid != -1) + len += scnprintf(error->error_msg + len, + sizeof(error->error_msg) - len, + ", in %s [%d]", + error->ring[ring_id].comm, + error->ring[ring_id].pid); + + scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, + ", reason: %s, action: %s", + error_msg, + wedged ? "reset" : "continue"); +} + +static void i915_capture_gen_state(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error) +{ + error->reset_count = i915_reset_count(&dev_priv->gpu_error); + error->suspend_count = dev_priv->suspend_count; +} + /** * i915_capture_error_state - capture an error record for later analysis * @dev: drm device @@ -904,18 +1143,13 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, * out a structure which becomes available in debugfs for user level tools * to pick up. */ -void i915_capture_error_state(struct drm_device *dev) +void i915_capture_error_state(struct drm_device *dev, bool wedged, + const char *error_msg) { + static bool warned; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_error_state *error; unsigned long flags; - int pipe; - - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - error = dev_priv->gpu_error.first_error; - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); - if (error) - return; /* Account for pipe specific data like PIPE*STAT */ error = kzalloc(sizeof(*error), GFP_ATOMIC); @@ -924,52 +1158,10 @@ void i915_capture_error_state(struct drm_device *dev) return; } - DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", - dev->primary->index); - DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); - DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); - DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); - DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); - kref_init(&error->ref); - error->eir = I915_READ(EIR); - error->pgtbl_er = I915_READ(PGTBL_ER); - if (HAS_HW_CONTEXTS(dev)) - error->ccid = I915_READ(CCID); - - if (HAS_PCH_SPLIT(dev)) - error->ier = I915_READ(DEIER) | I915_READ(GTIER); - else if (IS_VALLEYVIEW(dev)) - error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); - else if (IS_GEN2(dev)) - error->ier = I915_READ16(IER); - else - error->ier = I915_READ(IER); - - if (INTEL_INFO(dev)->gen >= 6) - error->derrmr = I915_READ(DERRMR); - - if (IS_VALLEYVIEW(dev)) - error->forcewake = I915_READ(FORCEWAKE_VLV); - else if (INTEL_INFO(dev)->gen >= 7) - error->forcewake = I915_READ(FORCEWAKE_MT); - else if (INTEL_INFO(dev)->gen == 6) - error->forcewake = I915_READ(FORCEWAKE); - - if (!HAS_PCH_SPLIT(dev)) - for_each_pipe(pipe) - error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); - - if (INTEL_INFO(dev)->gen >= 6) { - error->error = I915_READ(ERROR_GEN6); - error->done_reg = I915_READ(DONE_REG); - } - - if (INTEL_INFO(dev)->gen == 7) - error->err_int = I915_READ(GEN7_ERR_INT); - - i915_get_extra_instdone(dev, error->extra_instdone); + i915_capture_gen_state(dev_priv, error); + i915_capture_reg_state(dev_priv, error); i915_gem_capture_buffers(dev_priv, error); i915_gem_record_fences(dev, error); i915_gem_record_rings(dev, error); @@ -979,6 +1171,9 @@ void i915_capture_error_state(struct drm_device *dev) error->overlay = intel_overlay_capture_error_state(dev); error->display = intel_display_capture_error_state(dev); + i915_error_capture_msg(dev, error, wedged, error_msg); + DRM_INFO("%s\n", error->error_msg); + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); if (dev_priv->gpu_error.first_error == NULL) { dev_priv->gpu_error.first_error = error; @@ -986,8 +1181,19 @@ void i915_capture_error_state(struct drm_device *dev) } spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); - if (error) + if (error) { i915_error_state_free(&error->ref); + return; + } + + if (!warned) { + DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); + DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); + DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); + DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); + DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); + warned = true; + } } void i915_error_state_get(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d554169ac59..7753249b3a9 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -82,13 +82,13 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ /* For display hotplug interrupt */ static void -ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) +ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); - if (dev_priv->pc8.irqs_disabled) { + if (dev_priv->pm.irqs_disabled) { WARN(1, "IRQs disabled\n"); - dev_priv->pc8.regsave.deimr &= ~mask; + dev_priv->pm.regsave.deimr &= ~mask; return; } @@ -100,13 +100,13 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) } static void -ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) +ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); - if (dev_priv->pc8.irqs_disabled) { + if (dev_priv->pm.irqs_disabled) { WARN(1, "IRQs disabled\n"); - dev_priv->pc8.regsave.deimr |= mask; + dev_priv->pm.regsave.deimr |= mask; return; } @@ -129,10 +129,10 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, { assert_spin_locked(&dev_priv->irq_lock); - if (dev_priv->pc8.irqs_disabled) { + if (dev_priv->pm.irqs_disabled) { WARN(1, "IRQs disabled\n"); - dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; - dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & + dev_priv->pm.regsave.gtimr &= ~interrupt_mask; + dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask & interrupt_mask); return; } @@ -167,10 +167,10 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, assert_spin_locked(&dev_priv->irq_lock); - if (dev_priv->pc8.irqs_disabled) { + if (dev_priv->pm.irqs_disabled) { WARN(1, "IRQs disabled\n"); - dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; - dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & + dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask; + dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask & interrupt_mask); return; } @@ -232,6 +232,18 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev) return true; } +static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg = PIPESTAT(pipe); + u32 pipestat = I915_READ(reg) & 0x7fff0000; + + assert_spin_locked(&dev_priv->irq_lock); + + I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); + POSTING_READ(reg); +} + static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { @@ -301,11 +313,11 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, assert_spin_locked(&dev_priv->irq_lock); - if (dev_priv->pc8.irqs_disabled && + if (dev_priv->pm.irqs_disabled && (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { WARN(1, "IRQs disabled\n"); - dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; - dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & + dev_priv->pm.regsave.sdeimr &= ~interrupt_mask; + dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask & interrupt_mask); return; } @@ -375,16 +387,15 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, * * Returns the previous state of underrun reporting. */ -bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, - enum pipe pipe, bool enable) +bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - unsigned long flags; bool ret; - spin_lock_irqsave(&dev_priv->irq_lock, flags); + assert_spin_locked(&dev_priv->irq_lock); ret = !intel_crtc->cpu_fifo_underrun_disabled; @@ -393,7 +404,9 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, intel_crtc->cpu_fifo_underrun_disabled = !enable; - if (IS_GEN5(dev) || IS_GEN6(dev)) + if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))) + i9xx_clear_fifo_underrun(dev, pipe); + else if (IS_GEN5(dev) || IS_GEN6(dev)) ironlake_set_fifo_underrun_reporting(dev, pipe, enable); else if (IS_GEN7(dev)) ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); @@ -401,10 +414,33 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, broadwell_set_fifo_underrun_reporting(dev, pipe, enable); done: + return ret; +} + +bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long flags; + bool ret; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + return ret; } +static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, + enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + return !intel_crtc->cpu_fifo_underrun_disabled; +} + /** * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages * @dev: drm device @@ -458,45 +494,109 @@ done: } -void -i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) +static void +__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, + u32 enable_mask, u32 status_mask) { u32 reg = PIPESTAT(pipe); - u32 pipestat = I915_READ(reg) & 0x7fff0000; + u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; assert_spin_locked(&dev_priv->irq_lock); - if ((pipestat & mask) == mask) + if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || + status_mask & ~PIPESTAT_INT_STATUS_MASK)) return; + if ((pipestat & enable_mask) == enable_mask) + return; + + dev_priv->pipestat_irq_mask[pipe] |= status_mask; + /* Enable the interrupt, clear any pending status */ - pipestat |= mask | (mask >> 16); + pipestat |= enable_mask | status_mask; I915_WRITE(reg, pipestat); POSTING_READ(reg); } -void -i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) +static void +__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, + u32 enable_mask, u32 status_mask) { u32 reg = PIPESTAT(pipe); - u32 pipestat = I915_READ(reg) & 0x7fff0000; + u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; assert_spin_locked(&dev_priv->irq_lock); - if ((pipestat & mask) == 0) + if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || + status_mask & ~PIPESTAT_INT_STATUS_MASK)) + return; + + if ((pipestat & enable_mask) == 0) return; - pipestat &= ~mask; + dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; + + pipestat &= ~enable_mask; I915_WRITE(reg, pipestat); POSTING_READ(reg); } +static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) +{ + u32 enable_mask = status_mask << 16; + + /* + * On pipe A we don't support the PSR interrupt yet, on pipe B the + * same bit MBZ. + */ + if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) + return 0; + + enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | + SPRITE0_FLIP_DONE_INT_EN_VLV | + SPRITE1_FLIP_DONE_INT_EN_VLV); + if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) + enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; + if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) + enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; + + return enable_mask; +} + +void +i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, + u32 status_mask) +{ + u32 enable_mask; + + if (IS_VALLEYVIEW(dev_priv->dev)) + enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, + status_mask); + else + enable_mask = status_mask << 16; + __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); +} + +void +i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, + u32 status_mask) +{ + u32 enable_mask; + + if (IS_VALLEYVIEW(dev_priv->dev)) + enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, + status_mask); + else + enable_mask = status_mask << 16; + __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); +} + /** * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion */ static void i915_enable_asle_pipestat(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) @@ -504,10 +604,10 @@ static void i915_enable_asle_pipestat(struct drm_device *dev) spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE); + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, PIPE_A, - PIPE_LEGACY_BLC_EVENT_ENABLE); + PIPE_LEGACY_BLC_EVENT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -524,7 +624,7 @@ static void i915_enable_asle_pipestat(struct drm_device *dev) static int i915_pipe_enabled(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; if (drm_core_check_feature(dev, DRIVER_MODESET)) { /* Locking is horribly broken here, but whatever. */ @@ -548,7 +648,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) */ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long high_frame; unsigned long low_frame; u32 high1, high2, low, pixel, vbl_start; @@ -604,7 +704,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int reg = PIPE_FRMCOUNT_GM45(pipe); if (!i915_pipe_enabled(dev, pipe)) { @@ -859,8 +959,8 @@ static bool intel_hpd_irq_event(struct drm_device *dev, static void i915_hotplug_work_func(struct work_struct *work) { - drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - hotplug_work); + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, hotplug_work); struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_connector *intel_connector; @@ -928,9 +1028,14 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_kms_helper_hotplug_event(dev); } +static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) +{ + del_timer_sync(&dev_priv->hotplug_reenable_timer); +} + static void ironlake_rps_change_irq_handler(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 busy_up, busy_down, max_avg, min_avg; u8 new_delay; @@ -981,8 +1086,8 @@ static void notify_ring(struct drm_device *dev, static void gen6_pm_rps_work(struct work_struct *work) { - drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - rps.work); + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, rps.work); u32 pm_iir; int new_delay, adj; @@ -990,13 +1095,13 @@ static void gen6_pm_rps_work(struct work_struct *work) pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; /* Make sure not to corrupt PMIMR state used by ringbuffer code */ - snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); + snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); /* Make sure we didn't queue anything we're not going to process. */ - WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); + WARN_ON(pm_iir & ~dev_priv->pm_rps_events); - if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) + if ((pm_iir & dev_priv->pm_rps_events) == 0) return; mutex_lock(&dev_priv->rps.hw_lock); @@ -1007,36 +1112,38 @@ static void gen6_pm_rps_work(struct work_struct *work) adj *= 2; else adj = 1; - new_delay = dev_priv->rps.cur_delay + adj; + new_delay = dev_priv->rps.cur_freq + adj; /* * For better performance, jump directly * to RPe if we're below it. */ - if (new_delay < dev_priv->rps.rpe_delay) - new_delay = dev_priv->rps.rpe_delay; + if (new_delay < dev_priv->rps.efficient_freq) + new_delay = dev_priv->rps.efficient_freq; } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { - if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) - new_delay = dev_priv->rps.rpe_delay; + if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) + new_delay = dev_priv->rps.efficient_freq; else - new_delay = dev_priv->rps.min_delay; + new_delay = dev_priv->rps.min_freq_softlimit; adj = 0; } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { if (adj < 0) adj *= 2; else adj = -1; - new_delay = dev_priv->rps.cur_delay + adj; + new_delay = dev_priv->rps.cur_freq + adj; } else { /* unknown event */ - new_delay = dev_priv->rps.cur_delay; + new_delay = dev_priv->rps.cur_freq; } /* sysfs frequency interfaces may have snuck in while servicing the * interrupt */ new_delay = clamp_t(int, new_delay, - dev_priv->rps.min_delay, dev_priv->rps.max_delay); - dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; + dev_priv->rps.min_freq_softlimit, + dev_priv->rps.max_freq_softlimit); + + dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; if (IS_VALLEYVIEW(dev_priv->dev)) valleyview_set_rps(dev_priv->dev, new_delay); @@ -1058,8 +1165,8 @@ static void gen6_pm_rps_work(struct work_struct *work) */ static void ivybridge_parity_work(struct work_struct *work) { - drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - l3_parity.error_work); + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, l3_parity.error_work); u32 error_status, row, bank, subbank; char *parity_event[6]; uint32_t misccpctl; @@ -1131,7 +1238,7 @@ out: static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; if (!HAS_L3_DPF(dev)) return; @@ -1177,8 +1284,8 @@ static void snb_gt_irq_handler(struct drm_device *dev, if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { - DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); - i915_handle_error(dev, false); + i915_handle_error(dev, false, "GT error interrupt 0x%08x", + gt_iir); } if (gt_iir & GT_PARITY_ERROR(dev)) @@ -1242,13 +1349,16 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, const u32 *hpd) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int i; bool storm_detected = false; if (!hotplug_trigger) return; + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", + hotplug_trigger); + spin_lock(&dev_priv->irq_lock); for (i = 1; i < HPD_NUM_PINS; i++) { @@ -1295,14 +1405,14 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, static void gmbus_irq_handler(struct drm_device *dev) { - struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; wake_up_all(&dev_priv->gmbus_wait_queue); } static void dp_aux_irq_handler(struct drm_device *dev) { - struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; wake_up_all(&dev_priv->gmbus_wait_queue); } @@ -1408,10 +1518,10 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) * the work queue. */ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) { - if (pm_iir & GEN6_PM_RPS_EVENTS) { + if (pm_iir & dev_priv->pm_rps_events) { spin_lock(&dev_priv->irq_lock); - dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; - snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); + dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; + snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->rps.work); @@ -1422,23 +1532,89 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { - DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); - i915_handle_error(dev_priv->dev, false); + i915_handle_error(dev_priv->dev, false, + "VEBOX CS error interrupt 0x%08x", + pm_iir); } } } +static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pipe_stats[I915_MAX_PIPES] = { }; + int pipe; + + spin_lock(&dev_priv->irq_lock); + for_each_pipe(pipe) { + int reg; + u32 mask, iir_bit = 0; + + /* + * PIPESTAT bits get signalled even when the interrupt is + * disabled with the mask bits, and some of the status bits do + * not generate interrupts at all (like the underrun bit). Hence + * we need to be careful that we only handle what we want to + * handle. + */ + mask = 0; + if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) + mask |= PIPE_FIFO_UNDERRUN_STATUS; + + switch (pipe) { + case PIPE_A: + iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; + break; + case PIPE_B: + iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + break; + } + if (iir & iir_bit) + mask |= dev_priv->pipestat_irq_mask[pipe]; + + if (!mask) + continue; + + reg = PIPESTAT(pipe); + mask |= PIPESTAT_INT_ENABLE_MASK; + pipe_stats[pipe] = I915_READ(reg) & mask; + + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | + PIPESTAT_INT_STATUS_MASK)) + I915_WRITE(reg, pipe_stats[pipe]); + } + spin_unlock(&dev_priv->irq_lock); + + for_each_pipe(pipe) { + if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) + drm_handle_vblank(dev, pipe); + + if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip(dev, pipe); + } + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && + intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) + DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); + } + + if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) + gmbus_irq_handler(dev); +} + static irqreturn_t valleyview_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 iir, gt_iir, pm_iir; irqreturn_t ret = IRQ_NONE; - unsigned long irqflags; - int pipe; - u32 pipe_stats[I915_MAX_PIPES]; - - atomic_inc(&dev_priv->irq_received); while (true) { iir = I915_READ(VLV_IIR); @@ -1452,44 +1628,13 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) snb_gt_irq_handler(dev, dev_priv, gt_iir); - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - for_each_pipe(pipe) { - int reg = PIPESTAT(pipe); - pipe_stats[pipe] = I915_READ(reg); - - /* - * Clear the PIPE*STAT regs before the IIR - */ - if (pipe_stats[pipe] & 0x8000ffff) { - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - DRM_DEBUG_DRIVER("pipe %c underrun\n", - pipe_name(pipe)); - I915_WRITE(reg, pipe_stats[pipe]); - } - } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - - for_each_pipe(pipe) { - if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) - drm_handle_vblank(dev, pipe); - - if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip(dev, pipe); - } - - if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); - } + valleyview_pipestat_irq_handler(dev, iir); /* Consume port. Then clear IIR or we'll miss events */ if (iir & I915_DISPLAY_PORT_INTERRUPT) { u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", - hotplug_status); - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) @@ -1499,8 +1644,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) I915_READ(PORT_HOTPLUG_STAT); } - if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - gmbus_irq_handler(dev); if (pm_iir) gen6_rps_irq_handler(dev_priv, pm_iir); @@ -1516,7 +1659,7 @@ out: static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; @@ -1559,12 +1702,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) if (pch_iir & SDE_TRANSA_FIFO_UNDER) if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false)) - DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); + DRM_ERROR("PCH transcoder A FIFO underrun\n"); if (pch_iir & SDE_TRANSB_FIFO_UNDER) if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, false)) - DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); + DRM_ERROR("PCH transcoder B FIFO underrun\n"); } static void ivb_err_int_handler(struct drm_device *dev) @@ -1580,8 +1723,8 @@ static void ivb_err_int_handler(struct drm_device *dev) if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", - pipe_name(pipe)); + DRM_ERROR("Pipe %c FIFO underrun\n", + pipe_name(pipe)); } if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { @@ -1606,24 +1749,24 @@ static void cpt_serr_int_handler(struct drm_device *dev) if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false)) - DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); + DRM_ERROR("PCH transcoder A FIFO underrun\n"); if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, false)) - DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); + DRM_ERROR("PCH transcoder B FIFO underrun\n"); if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, false)) - DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); + DRM_ERROR("PCH transcoder C FIFO underrun\n"); I915_WRITE(SERR_INT, serr_int); } static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; @@ -1678,8 +1821,8 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", - pipe_name(pipe)); + DRM_ERROR("Pipe %c FIFO underrun\n", + pipe_name(pipe)); if (de_iir & DE_PIPE_CRC_DONE(pipe)) i9xx_pipe_crc_irq_handler(dev, pipe); @@ -1711,7 +1854,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) { struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe i; + enum pipe pipe; if (de_iir & DE_ERR_INT_IVB) ivb_err_int_handler(dev); @@ -1722,14 +1865,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) if (de_iir & DE_GSE_IVB) intel_opregion_asle_intr(dev); - for_each_pipe(i) { - if (de_iir & (DE_PIPE_VBLANK_IVB(i))) - drm_handle_vblank(dev, i); + for_each_pipe(pipe) { + if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) + drm_handle_vblank(dev, pipe); /* plane/pipes map 1:1 on ilk+ */ - if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) { - intel_prepare_page_flip(dev, i); - intel_finish_page_flip_plane(dev, i); + if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip_plane(dev, pipe); } } @@ -1747,12 +1890,10 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) static irqreturn_t ironlake_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; - atomic_inc(&dev_priv->irq_received); - /* We get interrupts on unclaimed registers, so check for this before we * do any I915_{READ,WRITE}. */ intel_uncore_check_errors(dev); @@ -1821,8 +1962,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) uint32_t tmp = 0; enum pipe pipe; - atomic_inc(&dev_priv->irq_received); - master_ctl = I915_READ(GEN8_MASTER_IRQ); master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; if (!master_ctl) @@ -1884,8 +2023,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) - DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", - pipe_name(pipe)); + DRM_ERROR("Pipe %c FIFO underrun\n", + pipe_name(pipe)); } if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { @@ -1962,8 +2101,8 @@ static void i915_error_work_func(struct work_struct *work) { struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, work); - drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, - gpu_error); + struct drm_i915_private *dev_priv = + container_of(error, struct drm_i915_private, gpu_error); struct drm_device *dev = dev_priv->dev; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; @@ -2127,11 +2266,18 @@ static void i915_report_and_clear_eir(struct drm_device *dev) * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ -void i915_handle_error(struct drm_device *dev, bool wedged) +void i915_handle_error(struct drm_device *dev, bool wedged, + const char *fmt, ...) { struct drm_i915_private *dev_priv = dev->dev_private; + va_list args; + char error_msg[80]; - i915_capture_error_state(dev); + va_start(args, fmt); + vscnprintf(error_msg, sizeof(error_msg), fmt, args); + va_end(args); + + i915_capture_error_state(dev, wedged, error_msg); i915_report_and_clear_eir(dev); if (wedged) { @@ -2165,7 +2311,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged) static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_gem_object *obj; @@ -2197,8 +2343,8 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in } else { int dspaddr = DSPADDR(intel_crtc->plane); stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + - crtc->y * crtc->fb->pitches[0] + - crtc->x * crtc->fb->bits_per_pixel/8); + crtc->y * crtc->primary->fb->pitches[0] + + crtc->x * crtc->primary->fb->bits_per_pixel/8); } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -2214,7 +2360,7 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in */ static int i915_enable_vblank(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; if (!i915_pipe_enabled(dev, pipe)) @@ -2223,13 +2369,13 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe) spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, pipe, - PIPE_START_VBLANK_INTERRUPT_ENABLE); + PIPE_START_VBLANK_INTERRUPT_STATUS); else i915_enable_pipestat(dev_priv, pipe, - PIPE_VBLANK_INTERRUPT_ENABLE); + PIPE_VBLANK_INTERRUPT_STATUS); /* maintain vblank delivery even in deep C-states */ - if (dev_priv->info->gen == 3) + if (INTEL_INFO(dev)->gen == 3) I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -2238,7 +2384,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe) static int ironlake_enable_vblank(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); @@ -2255,22 +2401,15 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe) static int valleyview_enable_vblank(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; - u32 imr; if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - imr = I915_READ(VLV_IMR); - if (pipe == PIPE_A) - imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; - else - imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; - I915_WRITE(VLV_IMR, imr); i915_enable_pipestat(dev_priv, pipe, - PIPE_START_VBLANK_INTERRUPT_ENABLE); + PIPE_START_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; @@ -2297,22 +2436,22 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe) */ static void i915_disable_vblank(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - if (dev_priv->info->gen == 3) + if (INTEL_INFO(dev)->gen == 3) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); i915_disable_pipestat(dev_priv, pipe, - PIPE_VBLANK_INTERRUPT_ENABLE | - PIPE_START_VBLANK_INTERRUPT_ENABLE); + PIPE_VBLANK_INTERRUPT_STATUS | + PIPE_START_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } static void ironlake_disable_vblank(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); @@ -2324,19 +2463,12 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe) static void valleyview_disable_vblank(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; - u32 imr; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); i915_disable_pipestat(dev_priv, pipe, - PIPE_START_VBLANK_INTERRUPT_ENABLE); - imr = I915_READ(VLV_IMR); - if (pipe == PIPE_A) - imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; - else - imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; - I915_WRITE(VLV_IMR, imr); + PIPE_START_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -2373,29 +2505,43 @@ static struct intel_ring_buffer * semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) { struct drm_i915_private *dev_priv = ring->dev->dev_private; - u32 cmd, ipehr, acthd, acthd_min; + u32 cmd, ipehr, head; + int i; ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); if ((ipehr & ~(0x3 << 16)) != (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) return NULL; - /* ACTHD is likely pointing to the dword after the actual command, - * so scan backwards until we find the MBOX. + /* + * HEAD is likely pointing to the dword after the actual command, + * so scan backwards until we find the MBOX. But limit it to just 3 + * dwords. Note that we don't care about ACTHD here since that might + * point at at batch, and semaphores are always emitted into the + * ringbuffer itself. */ - acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; - acthd_min = max((int)acthd - 3 * 4, 0); - do { - cmd = ioread32(ring->virtual_start + acthd); + head = I915_READ_HEAD(ring) & HEAD_ADDR; + + for (i = 4; i; --i) { + /* + * Be paranoid and presume the hw has gone off into the wild - + * our ring is smaller than what the hardware (and hence + * HEAD_ADDR) allows. Also handles wrap-around. + */ + head &= ring->size - 1; + + /* This here seems to blow up */ + cmd = ioread32(ring->virtual_start + head); if (cmd == ipehr) break; - acthd -= 4; - if (acthd < acthd_min) - return NULL; - } while (1); + head -= 4; + } - *seqno = ioread32(ring->virtual_start+acthd+4)+1; + if (!i) + return NULL; + + *seqno = ioread32(ring->virtual_start + head + 4) + 1; return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; } @@ -2429,7 +2575,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) } static enum intel_ring_hangcheck_action -ring_stuck(struct intel_ring_buffer *ring, u32 acthd) +ring_stuck(struct intel_ring_buffer *ring, u64 acthd) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2448,9 +2594,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) */ tmp = I915_READ_CTL(ring); if (tmp & RING_WAIT) { - DRM_ERROR("Kicking stuck wait on %s\n", - ring->name); - i915_handle_error(dev, false); + i915_handle_error(dev, false, + "Kicking stuck wait on %s", + ring->name); I915_WRITE_CTL(ring, tmp); return HANGCHECK_KICK; } @@ -2460,9 +2606,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) default: return HANGCHECK_HUNG; case 1: - DRM_ERROR("Kicking stuck semaphore on %s\n", - ring->name); - i915_handle_error(dev, false); + i915_handle_error(dev, false, + "Kicking stuck semaphore on %s", + ring->name); I915_WRITE_CTL(ring, tmp); return HANGCHECK_KICK; case 0: @@ -2484,7 +2630,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) static void i915_hangcheck_elapsed(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; int i; int busy_count = 0, rings_hung = 0; @@ -2492,13 +2638,13 @@ static void i915_hangcheck_elapsed(unsigned long data) #define BUSY 1 #define KICK 5 #define HUNG 20 -#define FIRE 30 - if (!i915_enable_hangcheck) + if (!i915.enable_hangcheck) return; for_each_ring(ring, dev_priv, i) { - u32 seqno, acthd; + u64 acthd; + u32 seqno; bool busy = true; semaphore_clear_deadlocks(dev_priv); @@ -2576,7 +2722,7 @@ static void i915_hangcheck_elapsed(unsigned long data) } for_each_ring(ring, dev_priv, i) { - if (ring->hangcheck.score > FIRE) { + if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { DRM_INFO("%s on %s\n", stuck[i] ? "stuck" : "no progress", ring->name); @@ -2585,7 +2731,7 @@ static void i915_hangcheck_elapsed(unsigned long data) } if (rings_hung) - return i915_handle_error(dev, true); + return i915_handle_error(dev, true, "Ring hung"); if (busy_count) /* Reset timer case chip hangs without another request @@ -2596,7 +2742,7 @@ static void i915_hangcheck_elapsed(unsigned long data) void i915_queue_hangcheck(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (!i915_enable_hangcheck) + if (!i915.enable_hangcheck) return; mod_timer(&dev_priv->gpu_error.hangcheck_timer, @@ -2643,9 +2789,7 @@ static void gen5_gt_irq_preinstall(struct drm_device *dev) */ static void ironlake_irq_preinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - - atomic_set(&dev_priv->irq_received, 0); + struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(HWSTAM, 0xeffe); @@ -2660,11 +2804,9 @@ static void ironlake_irq_preinstall(struct drm_device *dev) static void valleyview_irq_preinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - atomic_set(&dev_priv->irq_received, 0); - /* VLV magic */ I915_WRITE(VLV_IMR, 0); I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); @@ -2694,8 +2836,6 @@ static void gen8_irq_preinstall(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - atomic_set(&dev_priv->irq_received, 0); - I915_WRITE(GEN8_MASTER_IRQ, 0); POSTING_READ(GEN8_MASTER_IRQ); @@ -2740,7 +2880,7 @@ static void gen8_irq_preinstall(struct drm_device *dev) static void ibx_hpd_irq_setup(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *intel_encoder; u32 hotplug_irqs, hotplug, enabled_irqs = 0; @@ -2775,7 +2915,7 @@ static void ibx_hpd_irq_setup(struct drm_device *dev) static void ibx_irq_postinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 mask; if (HAS_PCH_NOP(dev)) @@ -2821,7 +2961,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) POSTING_READ(GTIER); if (INTEL_INFO(dev)->gen >= 6) { - pm_irqs |= GEN6_PM_RPS_EVENTS; + pm_irqs |= dev_priv->pm_rps_events; if (HAS_VEBOX(dev)) pm_irqs |= PM_VEBOX_USER_INTERRUPT; @@ -2837,7 +2977,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) static int ironlake_irq_postinstall(struct drm_device *dev) { unsigned long irqflags; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 display_mask, extra_mask; if (INTEL_INFO(dev)->gen >= 7) { @@ -2885,44 +3025,113 @@ static int ironlake_irq_postinstall(struct drm_device *dev) return 0; } +static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) +{ + u32 pipestat_mask; + u32 iir_mask; + + pipestat_mask = PIPESTAT_INT_STATUS_MASK | + PIPE_FIFO_UNDERRUN_STATUS; + + I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); + I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); + POSTING_READ(PIPESTAT(PIPE_A)); + + pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | + PIPE_CRC_DONE_INTERRUPT_STATUS; + + i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask | + PIPE_GMBUS_INTERRUPT_STATUS); + i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask); + + iir_mask = I915_DISPLAY_PORT_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + dev_priv->irq_mask &= ~iir_mask; + + I915_WRITE(VLV_IIR, iir_mask); + I915_WRITE(VLV_IIR, iir_mask); + I915_WRITE(VLV_IMR, dev_priv->irq_mask); + I915_WRITE(VLV_IER, ~dev_priv->irq_mask); + POSTING_READ(VLV_IER); +} + +static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) +{ + u32 pipestat_mask; + u32 iir_mask; + + iir_mask = I915_DISPLAY_PORT_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + + dev_priv->irq_mask |= iir_mask; + I915_WRITE(VLV_IER, ~dev_priv->irq_mask); + I915_WRITE(VLV_IMR, dev_priv->irq_mask); + I915_WRITE(VLV_IIR, iir_mask); + I915_WRITE(VLV_IIR, iir_mask); + POSTING_READ(VLV_IIR); + + pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | + PIPE_CRC_DONE_INTERRUPT_STATUS; + + i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask | + PIPE_GMBUS_INTERRUPT_STATUS); + i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask); + + pipestat_mask = PIPESTAT_INT_STATUS_MASK | + PIPE_FIFO_UNDERRUN_STATUS; + I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); + I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); + POSTING_READ(PIPESTAT(PIPE_A)); +} + +void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) +{ + assert_spin_locked(&dev_priv->irq_lock); + + if (dev_priv->display_irqs_enabled) + return; + + dev_priv->display_irqs_enabled = true; + + if (dev_priv->dev->irq_enabled) + valleyview_display_irqs_install(dev_priv); +} + +void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) +{ + assert_spin_locked(&dev_priv->irq_lock); + + if (!dev_priv->display_irqs_enabled) + return; + + dev_priv->display_irqs_enabled = false; + + if (dev_priv->dev->irq_enabled) + valleyview_display_irqs_uninstall(dev_priv); +} + static int valleyview_irq_postinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 enable_mask; - u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV | - PIPE_CRC_DONE_ENABLE; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; - enable_mask = I915_DISPLAY_PORT_INTERRUPT; - enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; - - /* - *Leave vblank interrupts masked initially. enable/disable will - * toggle them based on usage. - */ - dev_priv->irq_mask = (~enable_mask) | - I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | - I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + dev_priv->irq_mask = ~0; I915_WRITE(PORT_HOTPLUG_EN, 0); POSTING_READ(PORT_HOTPLUG_EN); I915_WRITE(VLV_IMR, dev_priv->irq_mask); - I915_WRITE(VLV_IER, enable_mask); + I915_WRITE(VLV_IER, ~dev_priv->irq_mask); I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(PIPESTAT(0), 0xffff); - I915_WRITE(PIPESTAT(1), 0xffff); POSTING_READ(VLV_IER); /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); - i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable); + if (dev_priv->display_irqs_enabled) + valleyview_display_irqs_install(dev_priv); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); I915_WRITE(VLV_IIR, 0xffffffff); @@ -3018,8 +3227,6 @@ static void gen8_irq_uninstall(struct drm_device *dev) if (!dev_priv) return; - atomic_set(&dev_priv->irq_received, 0); - I915_WRITE(GEN8_MASTER_IRQ, 0); #define GEN8_IRQ_FINI_NDX(type, which) do { \ @@ -3054,13 +3261,14 @@ static void gen8_irq_uninstall(struct drm_device *dev) static void valleyview_irq_uninstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long irqflags; int pipe; if (!dev_priv) return; - del_timer_sync(&dev_priv->hotplug_reenable_timer); + intel_hpd_irq_uninstall(dev_priv); for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0xffff); @@ -3068,8 +3276,14 @@ static void valleyview_irq_uninstall(struct drm_device *dev) I915_WRITE(HWSTAM, 0xffffffff); I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - for_each_pipe(pipe) - I915_WRITE(PIPESTAT(pipe), 0xffff); + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (dev_priv->display_irqs_enabled) + valleyview_display_irqs_uninstall(dev_priv); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + dev_priv->irq_mask = 0; + I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IMR, 0xffffffff); I915_WRITE(VLV_IER, 0x0); @@ -3078,12 +3292,12 @@ static void valleyview_irq_uninstall(struct drm_device *dev) static void ironlake_irq_uninstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; if (!dev_priv) return; - del_timer_sync(&dev_priv->hotplug_reenable_timer); + intel_hpd_irq_uninstall(dev_priv); I915_WRITE(HWSTAM, 0xffffffff); @@ -3109,11 +3323,9 @@ static void ironlake_irq_uninstall(struct drm_device *dev) static void i8xx_irq_preinstall(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - atomic_set(&dev_priv->irq_received, 0); - for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE16(IMR, 0xffff); @@ -3123,7 +3335,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev) static int i8xx_irq_postinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; I915_WRITE16(EMR, @@ -3148,8 +3360,8 @@ static int i8xx_irq_postinstall(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; @@ -3161,7 +3373,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev) static bool i8xx_handle_vblank(struct drm_device *dev, int plane, int pipe, u32 iir) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); if (!drm_handle_vblank(dev, pipe)) @@ -3189,7 +3401,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev, static irqreturn_t i8xx_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u16 iir, new_iir; u32 pipe_stats[2]; unsigned long irqflags; @@ -3198,8 +3410,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; - atomic_inc(&dev_priv->irq_received); - iir = I915_READ16(IIR); if (iir == 0) return IRQ_NONE; @@ -3212,7 +3422,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false); + i915_handle_error(dev, false, + "Command parser error, iir 0x%08x", + iir); for_each_pipe(pipe) { int reg = PIPESTAT(pipe); @@ -3221,12 +3433,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) /* * Clear the PIPE*STAT regs before the IIR */ - if (pipe_stats[pipe] & 0x8000ffff) { - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - DRM_DEBUG_DRIVER("pipe %c underrun\n", - pipe_name(pipe)); + if (pipe_stats[pipe] & 0x8000ffff) I915_WRITE(reg, pipe_stats[pipe]); - } } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -3249,6 +3457,10 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && + intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) + DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); } iir = new_iir; @@ -3259,7 +3471,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) static void i8xx_irq_uninstall(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; for_each_pipe(pipe) { @@ -3274,11 +3486,9 @@ static void i8xx_irq_uninstall(struct drm_device * dev) static void i915_irq_preinstall(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - atomic_set(&dev_priv->irq_received, 0); - if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -3294,7 +3504,7 @@ static void i915_irq_preinstall(struct drm_device * dev) static int i915_irq_postinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 enable_mask; unsigned long irqflags; @@ -3335,8 +3545,8 @@ static int i915_irq_postinstall(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; @@ -3348,7 +3558,7 @@ static int i915_irq_postinstall(struct drm_device *dev) static bool i915_handle_vblank(struct drm_device *dev, int plane, int pipe, u32 iir) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); if (!drm_handle_vblank(dev, pipe)) @@ -3376,7 +3586,7 @@ static bool i915_handle_vblank(struct drm_device *dev, static irqreturn_t i915_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; unsigned long irqflags; u32 flip_mask = @@ -3384,8 +3594,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; int pipe, ret = IRQ_NONE; - atomic_inc(&dev_priv->irq_received); - iir = I915_READ(IIR); do { bool irq_received = (iir & ~flip_mask) != 0; @@ -3398,7 +3606,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false); + i915_handle_error(dev, false, + "Command parser error, iir 0x%08x", + iir); for_each_pipe(pipe) { int reg = PIPESTAT(pipe); @@ -3406,9 +3616,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) /* Clear the PIPE*STAT regs before the IIR */ if (pipe_stats[pipe] & 0x8000ffff) { - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - DRM_DEBUG_DRIVER("pipe %c underrun\n", - pipe_name(pipe)); I915_WRITE(reg, pipe_stats[pipe]); irq_received = true; } @@ -3424,9 +3631,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", - hotplug_status); - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); @@ -3453,6 +3657,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && + intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) + DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); } if (blc_event || (iir & I915_ASLE_INTERRUPT)) @@ -3484,10 +3692,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) static void i915_irq_uninstall(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - del_timer_sync(&dev_priv->hotplug_reenable_timer); + intel_hpd_irq_uninstall(dev_priv); if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); @@ -3508,11 +3716,9 @@ static void i915_irq_uninstall(struct drm_device * dev) static void i965_irq_preinstall(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - atomic_set(&dev_priv->irq_received, 0); - I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -3526,7 +3732,7 @@ static void i965_irq_preinstall(struct drm_device * dev) static int i965_irq_postinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 enable_mask; u32 error_mask; unsigned long irqflags; @@ -3551,9 +3757,9 @@ static int i965_irq_postinstall(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); /* @@ -3585,7 +3791,7 @@ static int i965_irq_postinstall(struct drm_device *dev) static void i915_hpd_irq_setup(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *intel_encoder; u32 hotplug_en; @@ -3617,25 +3823,21 @@ static void i915_hpd_irq_setup(struct drm_device *dev) static irqreturn_t i965_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 iir, new_iir; u32 pipe_stats[I915_MAX_PIPES]; unsigned long irqflags; - int irq_received; int ret = IRQ_NONE, pipe; u32 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; - atomic_inc(&dev_priv->irq_received); - iir = I915_READ(IIR); for (;;) { + bool irq_received = (iir & ~flip_mask) != 0; bool blc_event = false; - irq_received = (iir & ~flip_mask) != 0; - /* Can't rely on pipestat interrupt bit in iir as it might * have been cleared after the pipestat interrupt was received. * It doesn't set the bit in iir again, but it still produces @@ -3643,7 +3845,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false); + i915_handle_error(dev, false, + "Command parser error, iir 0x%08x", + iir); for_each_pipe(pipe) { int reg = PIPESTAT(pipe); @@ -3653,11 +3857,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) * Clear the PIPE*STAT regs before the IIR */ if (pipe_stats[pipe] & 0x8000ffff) { - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - DRM_DEBUG_DRIVER("pipe %c underrun\n", - pipe_name(pipe)); I915_WRITE(reg, pipe_stats[pipe]); - irq_received = 1; + irq_received = true; } } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -3674,9 +3875,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) HOTPLUG_INT_STATUS_G4X : HOTPLUG_INT_STATUS_I915); - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", - hotplug_status); - intel_hpd_irq_handler(dev, hotplug_trigger, IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); @@ -3706,8 +3904,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev, pipe); - } + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && + intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) + DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); + } if (blc_event || (iir & I915_ASLE_INTERRUPT)) intel_opregion_asle_intr(dev); @@ -3740,13 +3941,13 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) static void i965_irq_uninstall(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; if (!dev_priv) return; - del_timer_sync(&dev_priv->hotplug_reenable_timer); + intel_hpd_irq_uninstall(dev_priv); I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -3763,9 +3964,9 @@ static void i965_irq_uninstall(struct drm_device * dev) I915_WRITE(IIR, I915_READ(IIR)); } -static void i915_reenable_hotplug_timer_func(unsigned long data) +static void intel_hpd_irq_reenable(unsigned long data) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; + struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; unsigned long irqflags; @@ -3807,10 +4008,13 @@ void intel_irq_init(struct drm_device *dev) INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); + /* Let's track the enabled rps events */ + dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; + setup_timer(&dev_priv->gpu_error.hangcheck_timer, i915_hangcheck_elapsed, (unsigned long) dev); - setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, + setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, (unsigned long) dev_priv); pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); @@ -3906,32 +4110,32 @@ void intel_hpd_init(struct drm_device *dev) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -/* Disable interrupts so we can allow Package C8+. */ -void hsw_pc8_disable_interrupts(struct drm_device *dev) +/* Disable interrupts so we can allow runtime PM. */ +void hsw_runtime_pm_disable_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); - dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); - dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); - dev_priv->pc8.regsave.gtier = I915_READ(GTIER); - dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); + dev_priv->pm.regsave.deimr = I915_READ(DEIMR); + dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR); + dev_priv->pm.regsave.gtimr = I915_READ(GTIMR); + dev_priv->pm.regsave.gtier = I915_READ(GTIER); + dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); ironlake_disable_display_irq(dev_priv, 0xffffffff); ibx_disable_display_interrupt(dev_priv, 0xffffffff); ilk_disable_gt_irq(dev_priv, 0xffffffff); snb_disable_pm_irq(dev_priv, 0xffffffff); - dev_priv->pc8.irqs_disabled = true; + dev_priv->pm.irqs_disabled = true; spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -/* Restore interrupts so we can recover from Package C8+. */ -void hsw_pc8_restore_interrupts(struct drm_device *dev) +/* Restore interrupts so we can recover from runtime PM. */ +void hsw_runtime_pm_restore_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; @@ -3951,13 +4155,13 @@ void hsw_pc8_restore_interrupts(struct drm_device *dev) val = I915_READ(GEN6_PMIMR); WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); - dev_priv->pc8.irqs_disabled = false; + dev_priv->pm.irqs_disabled = false; - ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); - ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr); - ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); - snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); - I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); + ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr); + ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr); + ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr); + snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr); + I915_WRITE(GTIER, dev_priv->pm.regsave.gtier); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c new file mode 100644 index 00000000000..d1d7980f0e0 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_params.c @@ -0,0 +1,154 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "i915_drv.h" + +struct i915_params i915 __read_mostly = { + .modeset = -1, + .panel_ignore_lid = 1, + .powersave = 1, + .semaphores = -1, + .lvds_downclock = 0, + .lvds_channel_mode = 0, + .panel_use_ssc = -1, + .vbt_sdvo_panel_type = -1, + .enable_rc6 = -1, + .enable_fbc = -1, + .enable_hangcheck = true, + .enable_ppgtt = -1, + .enable_psr = 0, + .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), + .disable_power_well = 1, + .enable_ips = 1, + .fastboot = 0, + .prefault_disable = 0, + .reset = true, + .invert_brightness = 0, + .disable_display = 0, + .enable_cmd_parser = 0, +}; + +module_param_named(modeset, i915.modeset, int, 0400); +MODULE_PARM_DESC(modeset, + "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " + "1=on, -1=force vga console preference [default])"); + +module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600); +MODULE_PARM_DESC(panel_ignore_lid, + "Override lid status (0=autodetect, 1=autodetect disabled [default], " + "-1=force lid closed, -2=force lid open)"); + +module_param_named(powersave, i915.powersave, int, 0600); +MODULE_PARM_DESC(powersave, + "Enable powersavings, fbc, downclocking, etc. (default: true)"); + +module_param_named(semaphores, i915.semaphores, int, 0400); +MODULE_PARM_DESC(semaphores, + "Use semaphores for inter-ring sync " + "(default: -1 (use per-chip defaults))"); + +module_param_named(enable_rc6, i915.enable_rc6, int, 0400); +MODULE_PARM_DESC(enable_rc6, + "Enable power-saving render C-state 6. " + "Different stages can be selected via bitmask values " + "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " + "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " + "default: -1 (use per-chip default)"); + +module_param_named(enable_fbc, i915.enable_fbc, int, 0600); +MODULE_PARM_DESC(enable_fbc, + "Enable frame buffer compression for power savings " + "(default: -1 (use per-chip default))"); + +module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400); +MODULE_PARM_DESC(lvds_downclock, + "Use panel (LVDS/eDP) downclocking for power savings " + "(default: false)"); + +module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600); +MODULE_PARM_DESC(lvds_channel_mode, + "Specify LVDS channel mode " + "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); + +module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600); +MODULE_PARM_DESC(lvds_use_ssc, + "Use Spread Spectrum Clock with panels [LVDS/eDP] " + "(default: auto from VBT)"); + +module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600); +MODULE_PARM_DESC(vbt_sdvo_panel_type, + "Override/Ignore selection of SDVO panel mode in the VBT " + "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); + +module_param_named(reset, i915.reset, bool, 0600); +MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); + +module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644); +MODULE_PARM_DESC(enable_hangcheck, + "Periodically check GPU activity for detecting hangs. " + "WARNING: Disabling this can cause system wide hangs. " + "(default: true)"); + +module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400); +MODULE_PARM_DESC(enable_ppgtt, + "Override PPGTT usage. " + "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); + +module_param_named(enable_psr, i915.enable_psr, int, 0600); +MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); + +module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600); +MODULE_PARM_DESC(preliminary_hw_support, + "Enable preliminary hardware support."); + +module_param_named(disable_power_well, i915.disable_power_well, int, 0600); +MODULE_PARM_DESC(disable_power_well, + "Disable the power well when possible (default: true)"); + +module_param_named(enable_ips, i915.enable_ips, int, 0600); +MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); + +module_param_named(fastboot, i915.fastboot, bool, 0600); +MODULE_PARM_DESC(fastboot, + "Try to skip unnecessary mode sets at boot time (default: false)"); + +module_param_named(prefault_disable, i915.prefault_disable, bool, 0600); +MODULE_PARM_DESC(prefault_disable, + "Disable page prefaulting for pread/pwrite/reloc (default:false). " + "For developers only."); + +module_param_named(invert_brightness, i915.invert_brightness, int, 0600); +MODULE_PARM_DESC(invert_brightness, + "Invert backlight brightness " + "(-1 force normal, 0 machine defaults, 1 force inversion), please " + "report PCI device ID, subsystem vendor and subsystem device ID " + "to dri-devel@lists.freedesktop.org, if your machine needs it. " + "It will then be included in an upcoming module version."); + +module_param_named(disable_display, i915.disable_display, bool, 0600); +MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); + +module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); +MODULE_PARM_DESC(enable_cmd_parser, + "Enable command parsing (1=enabled, 0=disabled [default])"); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a48b7cad6f1..9f5b18d9d88 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -26,7 +26,6 @@ #define _I915_REG_H_ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) -#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc)) #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) @@ -73,7 +72,8 @@ #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) -#define LBB 0xf4 +#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */ + /* Graphics reset regs */ #define I965_GDRST 0xc0 /* PCI config register */ @@ -175,6 +175,18 @@ #define VGA_CR_DATA_CGA 0x3d5 /* + * Instruction field definitions used by the command parser + */ +#define INSTR_CLIENT_SHIFT 29 +#define INSTR_CLIENT_MASK 0xE0000000 +#define INSTR_MI_CLIENT 0x0 +#define INSTR_BC_CLIENT 0x2 +#define INSTR_RC_CLIENT 0x3 +#define INSTR_SUBCLIENT_SHIFT 27 +#define INSTR_SUBCLIENT_MASK 0x18000000 +#define INSTR_MEDIA_SUBCLIENT 0x2 + +/* * Memory interface instructions used by the kernel */ #define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) @@ -377,14 +389,30 @@ #define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) #define DSPFREQGUAR_SHIFT 14 #define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) + +/* See the PUNIT HAS v0.8 for the below bits */ +enum punit_power_well { + PUNIT_POWER_WELL_RENDER = 0, + PUNIT_POWER_WELL_MEDIA = 1, + PUNIT_POWER_WELL_DISP2D = 3, + PUNIT_POWER_WELL_DPIO_CMN_BC = 5, + PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6, + PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7, + PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8, + PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9, + PUNIT_POWER_WELL_DPIO_RX0 = 10, + PUNIT_POWER_WELL_DPIO_RX1 = 11, + + PUNIT_POWER_WELL_NUM, +}; + #define PUNIT_REG_PWRGT_CTRL 0x60 #define PUNIT_REG_PWRGT_STATUS 0x61 -#define PUNIT_CLK_GATE 1 -#define PUNIT_PWR_RESET 2 -#define PUNIT_PWR_GATE 3 -#define RENDER_PWRGT (PUNIT_PWR_GATE << 0) -#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2) -#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6) +#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2)) +#define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2)) +#define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2)) +#define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2)) +#define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2)) #define PUNIT_REG_GPU_LFM 0xd3 #define PUNIT_REG_GPU_FREQ_REQ 0xd4 @@ -678,6 +706,7 @@ #define BLT_HWS_PGA_GEN7 (0x04280) #define VEBOX_HWS_PGA_GEN7 (0x04380) #define RING_ACTHD(base) ((base)+0x74) +#define RING_ACTHD_UDW(base) ((base)+0x5c) #define RING_NOPID(base) ((base)+0x94) #define RING_IMR(base) ((base)+0xa8) #define RING_TIMESTAMP(base) ((base)+0x358) @@ -720,6 +749,7 @@ #define RING_INSTPS(base) ((base)+0x70) #define RING_DMA_FADD(base) ((base)+0x78) #define RING_INSTPM(base) ((base)+0xc0) +#define RING_MI_MODE(base) ((base)+0x9c) #define INSTPS 0x02070 /* 965+ only */ #define INSTDONE1 0x0207c /* 965+ only */ #define ACTHD_I965 0x02074 @@ -789,15 +819,22 @@ #define _3D_CHICKEN3 0x02090 #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) -#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) +#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */ +#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ #define MI_MODE 0x0209c # define VS_TIMER_DISPATCH (1 << 6) # define MI_FLUSH_ENABLE (1 << 12) # define ASYNC_FLIP_PERF_DISABLE (1 << 14) +# define MODE_IDLE (1 << 9) #define GEN6_GT_MODE 0x20d0 -#define GEN6_GT_MODE_HI (1 << 9) +#define GEN7_GT_MODE 0x7008 +#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7)) +#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0) +#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1) +#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) +#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16) #define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) #define GFX_MODE 0x02520 @@ -934,13 +971,19 @@ #define ECO_GATING_CX_ONLY (1<<3) #define ECO_FLIP_DONE (1<<0) +#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */ +#define HIZ_RAW_STALL_OPT_DISABLE (1<<2) #define CACHE_MODE_1 0x7004 /* IVB+ */ -#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) +#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) +#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6) #define GEN6_BLITTER_ECOSKPD 0x221d0 #define GEN6_BLITTER_LOCK_SHIFT 16 #define GEN6_BLITTER_FBC_NOTIFY (1<<3) +#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 +#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) + #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) #define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) @@ -1046,9 +1089,8 @@ #define FBC_CTL_IDLE_LINE (2<<2) #define FBC_CTL_IDLE_DEBUG (3<<2) #define FBC_CTL_CPU_FENCE (1<<1) -#define FBC_CTL_PLANEA (0<<0) -#define FBC_CTL_PLANEB (1<<0) -#define FBC_FENCE_OFF 0x0321b +#define FBC_CTL_PLANE(plane) ((plane)<<0) +#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */ #define FBC_TAG 0x03300 #define FBC_LL_SIZE (1536) @@ -1057,9 +1099,8 @@ #define DPFC_CB_BASE 0x3200 #define DPFC_CONTROL 0x3208 #define DPFC_CTL_EN (1<<31) -#define DPFC_CTL_PLANEA (0<<30) -#define DPFC_CTL_PLANEB (1<<30) -#define IVB_DPFC_CTL_PLANE_SHIFT (29) +#define DPFC_CTL_PLANE(plane) ((plane)<<30) +#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29) #define DPFC_CTL_FENCE_EN (1<<29) #define IVB_DPFC_CTL_FENCE_EN (1<<28) #define DPFC_CTL_PERSISTENT_MODE (1<<25) @@ -1120,13 +1161,6 @@ #define FBC_REND_NUKE (1<<2) #define FBC_REND_CACHE_CLEAN (1<<1) -#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0 -#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4 -#define HSW_BYPASS_FBC_QUEUE (1<<22) -#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \ - _HSW_PIPE_SLICE_CHICKEN_1_A, + \ - _HSW_PIPE_SLICE_CHICKEN_1_B) - /* * GPIO regs */ @@ -1202,6 +1236,10 @@ /* * Clock control & power management */ +#define DPLL_A_OFFSET 0x6014 +#define DPLL_B_OFFSET 0x6018 +#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \ + dev_priv->info.display_mmio_offset) #define VGA0 0x6000 #define VGA1 0x6004 @@ -1214,9 +1252,6 @@ #define VGA1_PD_P1_DIV_2 (1 << 13) #define VGA1_PD_P1_SHIFT 8 #define VGA1_PD_P1_MASK (0x1f << 8) -#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014) -#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) -#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) #define DPLL_VCO_ENABLE (1 << 31) #define DPLL_SDVO_HIGH_SPEED (1 << 30) #define DPLL_DVO_2X_MODE (1 << 30) @@ -1278,7 +1313,12 @@ #define SDVO_MULTIPLIER_MASK 0x000000ff #define SDVO_MULTIPLIER_SHIFT_HIRES 4 #define SDVO_MULTIPLIER_SHIFT_VGA 0 -#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */ + +#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */ +#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */ +#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \ + dev_priv->info.display_mmio_offset) + /* * UDI pixel divider, controlling how many pixels are stuffed into a packet. * @@ -1315,8 +1355,6 @@ */ #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 -#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */ -#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) #define _FPA0 0x06040 #define _FPA1 0x06044 @@ -1348,7 +1386,7 @@ #define DSTATE_PLL_D3_OFF (1<<3) #define DSTATE_GFX_CLOCK_GATING (1<<1) #define DSTATE_DOT_CLOCK_GATING (1<<0) -#define DSPCLK_GATE_D (dev_priv->info->display_mmio_offset + 0x6200) +#define DSPCLK_GATE_D (dev_priv->info.display_mmio_offset + 0x6200) # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ @@ -1472,10 +1510,10 @@ /* * Palette regs */ - -#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000) -#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800) -#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) +#define PALETTE_A_OFFSET 0xa000 +#define PALETTE_B_OFFSET 0xa800 +#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \ + dev_priv->info.display_mmio_offset) /* MCH MMIO space */ @@ -1862,7 +1900,7 @@ */ /* Pipe A CRC regs */ -#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050) +#define _PIPE_CRC_CTL_A 0x60050 #define PIPE_CRC_ENABLE (1 << 31) /* ivb+ source selection */ #define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) @@ -1902,11 +1940,11 @@ #define _PIPE_CRC_RES_4_A_IVB 0x60070 #define _PIPE_CRC_RES_5_A_IVB 0x60074 -#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060) -#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064) -#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068) -#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c) -#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080) +#define _PIPE_CRC_RES_RED_A 0x60060 +#define _PIPE_CRC_RES_GREEN_A 0x60064 +#define _PIPE_CRC_RES_BLUE_A 0x60068 +#define _PIPE_CRC_RES_RES1_A_I915 0x6006c +#define _PIPE_CRC_RES_RES2_A_G4X 0x60080 /* Pipe B CRC regs */ #define _PIPE_CRC_RES_1_B_IVB 0x61064 @@ -1915,59 +1953,69 @@ #define _PIPE_CRC_RES_4_B_IVB 0x61070 #define _PIPE_CRC_RES_5_B_IVB 0x61074 -#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000) +#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A) #define PIPE_CRC_RES_1_IVB(pipe) \ - _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB) + _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB) #define PIPE_CRC_RES_2_IVB(pipe) \ - _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB) + _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB) #define PIPE_CRC_RES_3_IVB(pipe) \ - _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB) + _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB) #define PIPE_CRC_RES_4_IVB(pipe) \ - _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB) + _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB) #define PIPE_CRC_RES_5_IVB(pipe) \ - _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB) + _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB) #define PIPE_CRC_RES_RED(pipe) \ - _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000) + _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A) #define PIPE_CRC_RES_GREEN(pipe) \ - _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000) + _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A) #define PIPE_CRC_RES_BLUE(pipe) \ - _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000) + _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A) #define PIPE_CRC_RES_RES1_I915(pipe) \ - _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000) + _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915) #define PIPE_CRC_RES_RES2_G4X(pipe) \ - _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000) + _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X) /* Pipe A timing regs */ -#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) -#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) -#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008) -#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c) -#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010) -#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014) -#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c) -#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020) -#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028) +#define _HTOTAL_A 0x60000 +#define _HBLANK_A 0x60004 +#define _HSYNC_A 0x60008 +#define _VTOTAL_A 0x6000c +#define _VBLANK_A 0x60010 +#define _VSYNC_A 0x60014 +#define _PIPEASRC 0x6001c +#define _BCLRPAT_A 0x60020 +#define _VSYNCSHIFT_A 0x60028 /* Pipe B timing regs */ -#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000) -#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004) -#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008) -#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c) -#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010) -#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014) -#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c) -#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) -#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) - -#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) -#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) -#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) -#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B) -#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B) -#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B) -#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) -#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) +#define _HTOTAL_B 0x61000 +#define _HBLANK_B 0x61004 +#define _HSYNC_B 0x61008 +#define _VTOTAL_B 0x6100c +#define _VBLANK_B 0x61010 +#define _VSYNC_B 0x61014 +#define _PIPEBSRC 0x6101c +#define _BCLRPAT_B 0x61020 +#define _VSYNCSHIFT_B 0x61028 + +#define TRANSCODER_A_OFFSET 0x60000 +#define TRANSCODER_B_OFFSET 0x61000 +#define TRANSCODER_C_OFFSET 0x62000 +#define TRANSCODER_EDP_OFFSET 0x6f000 + +#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \ + dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ + dev_priv->info.display_mmio_offset) + +#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A) +#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A) +#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A) +#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A) +#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A) +#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A) +#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A) +#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A) +#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) /* HSW+ eDP PSR registers */ #define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) @@ -2084,7 +2132,7 @@ /* Hotplug control (945+ only) */ -#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110) +#define PORT_HOTPLUG_EN (dev_priv->info.display_mmio_offset + 0x61110) #define PORTB_HOTPLUG_INT_EN (1 << 29) #define PORTC_HOTPLUG_INT_EN (1 << 28) #define PORTD_HOTPLUG_INT_EN (1 << 27) @@ -2114,7 +2162,7 @@ #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) -#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) +#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114) /* * HDMI/DP bits are gen4+ * @@ -2332,9 +2380,7 @@ #define VIDEO_DIP_CTL 0x61170 /* Pre HSW: */ #define VIDEO_DIP_ENABLE (1 << 31) -#define VIDEO_DIP_PORT_B (1 << 29) -#define VIDEO_DIP_PORT_C (2 << 29) -#define VIDEO_DIP_PORT_D (3 << 29) +#define VIDEO_DIP_PORT(port) ((port) << 29) #define VIDEO_DIP_PORT_MASK (3 << 29) #define VIDEO_DIP_ENABLE_GCP (1 << 25) #define VIDEO_DIP_ENABLE_AVI (1 << 21) @@ -2391,7 +2437,7 @@ #define PP_DIVISOR 0x61210 /* Panel fitting */ -#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230) +#define PFIT_CONTROL (dev_priv->info.display_mmio_offset + 0x61230) #define PFIT_ENABLE (1 << 31) #define PFIT_PIPE_MASK (3 << 29) #define PFIT_PIPE_SHIFT 29 @@ -2409,7 +2455,7 @@ #define PFIT_SCALING_PROGRAMMED (1 << 26) #define PFIT_SCALING_PILLAR (2 << 26) #define PFIT_SCALING_LETTER (3 << 26) -#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234) +#define PFIT_PGM_RATIOS (dev_priv->info.display_mmio_offset + 0x61234) /* Pre-965 */ #define PFIT_VERT_SCALE_SHIFT 20 #define PFIT_VERT_SCALE_MASK 0xfff00000 @@ -2421,25 +2467,25 @@ #define PFIT_HORIZ_SCALE_SHIFT_965 0 #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff -#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) +#define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238) -#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250) -#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350) +#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250) +#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350) #define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ _VLV_BLC_PWM_CTL2_B) -#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254) -#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354) +#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254) +#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354) #define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ _VLV_BLC_PWM_CTL_B) -#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260) -#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360) +#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260) +#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360) #define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ _VLV_BLC_HIST_CTL_B) /* Backlight control */ -#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */ +#define BLC_PWM_CTL2 (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */ #define BLM_PWM_ENABLE (1 << 31) #define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ #define BLM_PIPE_SELECT (1 << 29) @@ -2462,7 +2508,7 @@ #define BLM_PHASE_IN_COUNT_MASK (0xff << 8) #define BLM_PHASE_IN_INCR_SHIFT (0) #define BLM_PHASE_IN_INCR_MASK (0xff << 0) -#define BLC_PWM_CTL (dev_priv->info->display_mmio_offset + 0x61254) +#define BLC_PWM_CTL (dev_priv->info.display_mmio_offset + 0x61254) /* * This is the most significant 15 bits of the number of backlight cycles in a * complete cycle of the modulated backlight control. @@ -2484,7 +2530,7 @@ #define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) #define BLM_POLARITY_PNV (1 << 0) /* pnv only */ -#define BLC_HIST_CTL (dev_priv->info->display_mmio_offset + 0x61260) +#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260) /* New registers for PCH-split platforms. Safe where new bits show up, the * register layout machtes with gen4 BLC_PWM_CTL[12]. */ @@ -3178,10 +3224,10 @@ /* Display & cursor control */ /* Pipe A */ -#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000) +#define _PIPEADSL 0x70000 #define DSL_LINEMASK_GEN2 0x00000fff #define DSL_LINEMASK_GEN3 0x00001fff -#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008) +#define _PIPEACONF 0x70008 #define PIPECONF_ENABLE (1<<31) #define PIPECONF_DISABLE 0 #define PIPECONF_DOUBLE_WIDE (1<<30) @@ -3224,9 +3270,9 @@ #define PIPECONF_DITHER_TYPE_ST1 (1<<2) #define PIPECONF_DITHER_TYPE_ST2 (2<<2) #define PIPECONF_DITHER_TYPE_TEMP (3<<2) -#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024) +#define _PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) -#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30) +#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30) #define PIPE_CRC_ERROR_ENABLE (1UL<<29) #define PIPE_CRC_DONE_ENABLE (1UL<<28) #define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) @@ -3239,35 +3285,55 @@ #define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) #define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) #define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) +#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19) #define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ #define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ #define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) #define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) #define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) -#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15) -#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14) +#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15) +#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14) #define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) #define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) #define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) -#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10) +#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10) #define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) #define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) #define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) #define PIPE_DPST_EVENT_STATUS (1UL<<7) #define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) +#define PIPE_A_PSR_STATUS_VLV (1UL<<6) #define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) #define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) +#define PIPE_B_PSR_STATUS_VLV (1UL<<3) #define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) -#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) -#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) -#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) -#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) -#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) -#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) +#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000 +#define PIPESTAT_INT_STATUS_MASK 0x0000ffff + +#define PIPE_A_OFFSET 0x70000 +#define PIPE_B_OFFSET 0x71000 +#define PIPE_C_OFFSET 0x72000 +/* + * There's actually no pipe EDP. Some pipe registers have + * simply shifted from the pipe to the transcoder, while + * keeping their original offset. Thus we need PIPE_EDP_OFFSET + * to access such registers in transcoder EDP. + */ +#define PIPE_EDP_OFFSET 0x7f000 + +#define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \ + dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ + dev_priv->info.display_mmio_offset) + +#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF) +#define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL) +#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH) +#define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL) +#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT) #define _PIPE_MISC_A 0x70030 #define _PIPE_MISC_B 0x71030 @@ -3279,20 +3345,20 @@ #define PIPEMISC_DITHER_ENABLE (1<<4) #define PIPEMISC_DITHER_TYPE_MASK (3<<2) #define PIPEMISC_DITHER_TYPE_SP (0<<2) -#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B) +#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A) #define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) #define PIPEB_LINE_COMPARE_INT_EN (1<<29) #define PIPEB_HLINE_INT_EN (1<<28) #define PIPEB_VBLANK_INT_EN (1<<27) -#define SPRITED_FLIPDONE_INT_EN (1<<26) -#define SPRITEC_FLIPDONE_INT_EN (1<<25) -#define PLANEB_FLIPDONE_INT_EN (1<<24) +#define SPRITED_FLIP_DONE_INT_EN (1<<26) +#define SPRITEC_FLIP_DONE_INT_EN (1<<25) +#define PLANEB_FLIP_DONE_INT_EN (1<<24) #define PIPEA_LINE_COMPARE_INT_EN (1<<21) #define PIPEA_HLINE_INT_EN (1<<20) #define PIPEA_VBLANK_INT_EN (1<<19) -#define SPRITEB_FLIPDONE_INT_EN (1<<18) -#define SPRITEA_FLIPDONE_INT_EN (1<<17) +#define SPRITEB_FLIP_DONE_INT_EN (1<<18) +#define SPRITEA_FLIP_DONE_INT_EN (1<<17) #define PLANEA_FLIPDONE_INT_EN (1<<16) #define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */ @@ -3323,7 +3389,7 @@ #define DSPARB_BEND_SHIFT 9 /* on 855 */ #define DSPARB_AEND_SHIFT 0 -#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034) +#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) #define DSPFW_SR_SHIFT 23 #define DSPFW_SR_MASK (0x1ff<<23) #define DSPFW_CURSORB_SHIFT 16 @@ -3331,11 +3397,11 @@ #define DSPFW_PLANEB_SHIFT 8 #define DSPFW_PLANEB_MASK (0x7f<<8) #define DSPFW_PLANEA_MASK (0x7f) -#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038) +#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038) #define DSPFW_CURSORA_MASK 0x00003f00 #define DSPFW_CURSORA_SHIFT 8 #define DSPFW_PLANEC_MASK (0x7f) -#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c) +#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c) #define DSPFW_HPLL_SR_EN (1<<31) #define DSPFW_CURSOR_SR_SHIFT 24 #define PINEVIEW_SELF_REFRESH_EN (1<<30) @@ -3343,8 +3409,8 @@ #define DSPFW_HPLL_CURSOR_SHIFT 16 #define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) #define DSPFW_HPLL_SR_MASK (0x1ff) -#define DSPFW4 (dev_priv->info->display_mmio_offset + 0x70070) -#define DSPFW7 (dev_priv->info->display_mmio_offset + 0x7007c) +#define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070) +#define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c) /* drain latency register values*/ #define DRAIN_LATENCY_PRECISION_32 32 @@ -3468,12 +3534,12 @@ #define PIPE_PIXEL_MASK 0x00ffffff #define PIPE_PIXEL_SHIFT 0 /* GM45+ just has to be different */ -#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70040) -#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70044) +#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70040) +#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70044) #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) /* Cursor A & B regs */ -#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080) +#define _CURACNTR (dev_priv->info.display_mmio_offset + 0x70080) /* Old style CUR*CNTR flags (desktop 8xx) */ #define CURSOR_ENABLE 0x80000000 #define CURSOR_GAMMA_ENABLE 0x40000000 @@ -3489,23 +3555,27 @@ /* New style CUR*CNTR flags */ #define CURSOR_MODE 0x27 #define CURSOR_MODE_DISABLE 0x00 +#define CURSOR_MODE_128_32B_AX 0x02 +#define CURSOR_MODE_256_32B_AX 0x03 #define CURSOR_MODE_64_32B_AX 0x07 +#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX) +#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX) #define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) #define MCURSOR_PIPE_SELECT (1 << 28) #define MCURSOR_PIPE_A 0x00 #define MCURSOR_PIPE_B (1 << 28) #define MCURSOR_GAMMA_ENABLE (1 << 26) #define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) -#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) -#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) +#define _CURABASE (dev_priv->info.display_mmio_offset + 0x70084) +#define _CURAPOS (dev_priv->info.display_mmio_offset + 0x70088) #define CURSOR_POS_MASK 0x007FF #define CURSOR_POS_SIGN 0x8000 #define CURSOR_X_SHIFT 0 #define CURSOR_Y_SHIFT 16 #define CURSIZE 0x700a0 -#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0) -#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4) -#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8) +#define _CURBCNTR (dev_priv->info.display_mmio_offset + 0x700c0) +#define _CURBBASE (dev_priv->info.display_mmio_offset + 0x700c4) +#define _CURBPOS (dev_priv->info.display_mmio_offset + 0x700c8) #define _CURBCNTR_IVB 0x71080 #define _CURBBASE_IVB 0x71084 @@ -3520,7 +3590,7 @@ #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) /* Display A control */ -#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180) +#define _DSPACNTR 0x70180 #define DISPLAY_PLANE_ENABLE (1<<31) #define DISPLAY_PLANE_DISABLE 0 #define DISPPLANE_GAMMA_ENABLE (1<<30) @@ -3554,25 +3624,25 @@ #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ #define DISPPLANE_TILED (1<<10) -#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184) -#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188) -#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */ -#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190) -#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */ -#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */ -#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */ -#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC) - -#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) -#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) -#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE) -#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS) -#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE) -#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) -#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) +#define _DSPAADDR 0x70184 +#define _DSPASTRIDE 0x70188 +#define _DSPAPOS 0x7018C /* reserved */ +#define _DSPASIZE 0x70190 +#define _DSPASURF 0x7019C /* 965+ only */ +#define _DSPATILEOFF 0x701A4 /* 965+ only */ +#define _DSPAOFFSET 0x701A4 /* HSW */ +#define _DSPASURFLIVE 0x701AC + +#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR) +#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR) +#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE) +#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS) +#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE) +#define DSPSURF(plane) _PIPE2(plane, _DSPASURF) +#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF) #define DSPLINOFF(plane) DSPADDR(plane) -#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET) -#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE) +#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET) +#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE) /* Display/Sprite base address macros */ #define DISP_BASEADDR_MASK (0xfffff000) @@ -3580,44 +3650,44 @@ #define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) /* VBIOS flags */ -#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410) -#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414) -#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418) -#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c) -#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420) -#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424) -#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428) -#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410) -#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414) -#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420) -#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414) -#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418) -#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c) +#define SWF00 (dev_priv->info.display_mmio_offset + 0x71410) +#define SWF01 (dev_priv->info.display_mmio_offset + 0x71414) +#define SWF02 (dev_priv->info.display_mmio_offset + 0x71418) +#define SWF03 (dev_priv->info.display_mmio_offset + 0x7141c) +#define SWF04 (dev_priv->info.display_mmio_offset + 0x71420) +#define SWF05 (dev_priv->info.display_mmio_offset + 0x71424) +#define SWF06 (dev_priv->info.display_mmio_offset + 0x71428) +#define SWF10 (dev_priv->info.display_mmio_offset + 0x70410) +#define SWF11 (dev_priv->info.display_mmio_offset + 0x70414) +#define SWF14 (dev_priv->info.display_mmio_offset + 0x71420) +#define SWF30 (dev_priv->info.display_mmio_offset + 0x72414) +#define SWF31 (dev_priv->info.display_mmio_offset + 0x72418) +#define SWF32 (dev_priv->info.display_mmio_offset + 0x7241c) /* Pipe B */ -#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000) -#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008) -#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024) +#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000) +#define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008) +#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024) #define _PIPEBFRAMEHIGH 0x71040 #define _PIPEBFRAMEPIXEL 0x71044 -#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71040) -#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71044) +#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71040) +#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71044) /* Display B control */ -#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180) +#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180) #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) #define DISPPLANE_ALPHA_TRANS_DISABLE 0 #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) -#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184) -#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188) -#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C) -#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190) -#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C) -#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4) -#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4) -#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC) +#define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184) +#define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188) +#define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C) +#define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190) +#define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C) +#define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4) +#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4) +#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC) /* Sprite A control */ #define _DVSACNTR 0x72180 @@ -3866,48 +3936,45 @@ #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff -#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030) +#define _PIPEA_DATA_M1 0x60030 #define PIPE_DATA_M1_OFFSET 0 -#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034) +#define _PIPEA_DATA_N1 0x60034 #define PIPE_DATA_N1_OFFSET 0 -#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038) +#define _PIPEA_DATA_M2 0x60038 #define PIPE_DATA_M2_OFFSET 0 -#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c) +#define _PIPEA_DATA_N2 0x6003c #define PIPE_DATA_N2_OFFSET 0 -#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040) +#define _PIPEA_LINK_M1 0x60040 #define PIPE_LINK_M1_OFFSET 0 -#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044) +#define _PIPEA_LINK_N1 0x60044 #define PIPE_LINK_N1_OFFSET 0 -#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048) +#define _PIPEA_LINK_M2 0x60048 #define PIPE_LINK_M2_OFFSET 0 -#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c) +#define _PIPEA_LINK_N2 0x6004c #define PIPE_LINK_N2_OFFSET 0 /* PIPEB timing regs are same start from 0x61000 */ -#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030) -#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034) - -#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038) -#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c) - -#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040) -#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044) - -#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048) -#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c) - -#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) -#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) -#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2) -#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2) -#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1) -#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1) -#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2) -#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2) +#define _PIPEB_DATA_M1 0x61030 +#define _PIPEB_DATA_N1 0x61034 +#define _PIPEB_DATA_M2 0x61038 +#define _PIPEB_DATA_N2 0x6103c +#define _PIPEB_LINK_M1 0x61040 +#define _PIPEB_LINK_N1 0x61044 +#define _PIPEB_LINK_M2 0x61048 +#define _PIPEB_LINK_N2 0x6104c + +#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1) +#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1) +#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2) +#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2) +#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1) +#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1) +#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2) +#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2) /* CPU panel fitter */ /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ @@ -4084,13 +4151,14 @@ #define ILK_ELPIN_409_SELECT (1 << 25) #define ILK_DPARB_GATE (1<<22) #define ILK_VSDPFD_FULL (1<<21) -#define ILK_DISPLAY_CHICKEN_FUSES 0x42014 -#define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31) -#define ILK_INTERNAL_DISPLAY_DISABLE (1<<30) -#define ILK_DISPLAY_DEBUG_DISABLE (1<<29) -#define ILK_HDCP_DISABLE (1<<25) -#define ILK_eDP_A_DISABLE (1<<24) -#define ILK_DESKTOP (1<<23) +#define FUSE_STRAP 0x42014 +#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31) +#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30) +#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29) +#define ILK_HDCP_DISABLE (1 << 25) +#define ILK_eDP_A_DISABLE (1 << 24) +#define HSW_CDCLK_LIMIT (1 << 24) +#define ILK_DESKTOP (1 << 23) #define ILK_DSPCLK_GATE_D 0x42020 #define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) @@ -4109,7 +4177,8 @@ #define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_B 0x420b4 -#define DPRS_MASK_VBLANK_SRD (1 << 0) +#define HSW_FBCQ_DIS (1 << 22) +#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) #define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) #define DISP_ARB_CTL 0x45000 @@ -4120,6 +4189,8 @@ #define GEN7_MSG_CTL 0x45010 #define WAIT_FOR_PCH_RESET_ACK (1<<1) #define WAIT_FOR_PCH_FLR_ACK (1<<0) +#define HSW_NDE_RSTWRN_OPT 0x46408 +#define RESET_PCH_HANDSHAKE_ENABLE (1<<4) /* GEN7 chicken */ #define GEN7_COMMON_SLICE_CHICKEN1 0x7010 @@ -4127,8 +4198,11 @@ #define COMMON_SLICE_CHICKEN2 0x7014 # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) +#define GEN7_L3SQCREG1 0xB010 +#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 + #define GEN7_L3CNTLREG1 0xB01C -#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C +#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C #define GEN7_L3AGDIS (1<<19) #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 @@ -4148,9 +4222,6 @@ #define HSW_SCRATCH1 0xb038 #define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) -#define HSW_FUSE_STRAP 0x42014 -#define HSW_CDCLK_LIMIT (1 << 24) - /* PCH */ /* south display engine interrupt: IBX */ @@ -4436,24 +4507,24 @@ #define HSW_VIDEO_DIP_GCP_B 0x61210 #define HSW_TVIDEO_DIP_CTL(trans) \ - _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) + _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A) #define HSW_TVIDEO_DIP_AVI_DATA(trans) \ - _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) + _TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) #define HSW_TVIDEO_DIP_VS_DATA(trans) \ - _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B) + _TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) #define HSW_TVIDEO_DIP_SPD_DATA(trans) \ - _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) + _TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) #define HSW_TVIDEO_DIP_GCP(trans) \ - _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B) + _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A) #define HSW_TVIDEO_DIP_VSC_DATA(trans) \ - _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) + _TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) #define HSW_STEREO_3D_CTL_A 0x70020 #define S3D_ENABLE (1<<31) #define HSW_STEREO_3D_CTL_B 0x71020 #define HSW_STEREO_3D_CTL(trans) \ - _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A) + _PIPE2(trans, HSW_STEREO_3D_CTL_A) #define _PCH_TRANS_HTOTAL_B 0xe1000 #define _PCH_TRANS_HBLANK_B 0xe1004 @@ -4865,6 +4936,9 @@ #define GEN7_UCGCTL4 0x940c #define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) +#define GEN8_UCGCTL6 0x9430 +#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) + #define GEN6_RPNSWREQ 0xA008 #define GEN6_TURBO_DISABLE (1<<31) #define GEN6_FREQUENCY(x) ((x)<<25) @@ -4945,6 +5019,10 @@ GEN6_PM_RP_DOWN_THRESHOLD | \ GEN6_PM_RP_DOWN_TIMEOUT) +#define VLV_GTLC_SURVIVABILITY_REG 0x130098 +#define VLV_GFX_CLK_STATUS_BIT (1<<3) +#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2) + #define GEN6_GT_GFX_RC6_LOCKED 0x138104 #define VLV_COUNTER_CONTROL 0x138104 #define VLV_COUNT_RANGE_HIGH (1<<15) @@ -5006,6 +5084,10 @@ #define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) #define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) +#define GEN8_ROW_CHICKEN 0xe4f0 +#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) +#define STALL_DOP_GATING_DISABLE (1<<5) + #define GEN7_ROW_CHICKEN2 0xe4f4 #define GEN7_ROW_CHICKEN2_GT2 0xf4f4 #define DOP_CLOCK_GATING_DISABLE (1<<0) @@ -5017,7 +5099,7 @@ #define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) #define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) -#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) +#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020) #define INTEL_AUDIO_DEVCL 0x808629FB #define INTEL_AUDIO_DEVBLC 0x80862801 #define INTEL_AUDIO_DEVCTG 0x80862802 @@ -5178,8 +5260,8 @@ #define TRANS_DDI_FUNC_CTL_B 0x61400 #define TRANS_DDI_FUNC_CTL_C 0x62400 #define TRANS_DDI_FUNC_CTL_EDP 0x6F400 -#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \ - TRANS_DDI_FUNC_CTL_B) +#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A) + #define TRANS_DDI_FUNC_ENABLE (1<<31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ #define TRANS_DDI_PORT_MASK (7<<28) @@ -5311,8 +5393,12 @@ #define SPLL_PLL_ENABLE (1<<31) #define SPLL_PLL_SSC (1<<28) #define SPLL_PLL_NON_SSC (2<<28) +#define SPLL_PLL_LCPLL (3<<28) +#define SPLL_PLL_REF_MASK (3<<28) #define SPLL_PLL_FREQ_810MHz (0<<26) #define SPLL_PLL_FREQ_1350MHz (1<<26) +#define SPLL_PLL_FREQ_2700MHz (2<<26) +#define SPLL_PLL_FREQ_MASK (3<<26) /* WRPLL */ #define WRPLL_CTL1 0x46040 @@ -5323,8 +5409,13 @@ #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) /* WRPLL divider programming */ #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) +#define WRPLL_DIVIDER_REF_MASK (0xff) #define WRPLL_DIVIDER_POST(x) ((x)<<8) +#define WRPLL_DIVIDER_POST_MASK (0x3f<<8) +#define WRPLL_DIVIDER_POST_SHIFT 8 #define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) +#define WRPLL_DIVIDER_FB_SHIFT 16 +#define WRPLL_DIVIDER_FB_MASK (0xff<<16) /* Port clock selection */ #define PORT_CLK_SEL_A 0x46100 @@ -5337,6 +5428,7 @@ #define PORT_CLK_SEL_WRPLL1 (4<<29) #define PORT_CLK_SEL_WRPLL2 (5<<29) #define PORT_CLK_SEL_NONE (7<<29) +#define PORT_CLK_SEL_MASK (7<<29) /* Transcoder clock selection */ #define TRANS_CLK_SEL_A 0x46140 @@ -5346,10 +5438,12 @@ #define TRANS_CLK_SEL_DISABLED (0x0<<29) #define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) -#define _TRANSA_MSA_MISC 0x60410 -#define _TRANSB_MSA_MISC 0x61410 -#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \ - _TRANSB_MSA_MISC) +#define TRANSA_MSA_MISC 0x60410 +#define TRANSB_MSA_MISC 0x61410 +#define TRANSC_MSA_MISC 0x62410 +#define TRANS_EDP_MSA_MISC 0x6f410 +#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC) + #define TRANS_MSA_SYNC_CLK (1<<0) #define TRANS_MSA_6_BPC (0<<5) #define TRANS_MSA_8_BPC (1<<5) @@ -5389,6 +5483,8 @@ /* SFUSE_STRAP */ #define SFUSE_STRAP 0xc2014 +#define SFUSE_STRAP_FUSE_LOCK (1<<13) +#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7) #define SFUSE_STRAP_DDIB_DETECTED (1<<2) #define SFUSE_STRAP_DDIC_DETECTED (1<<1) #define SFUSE_STRAP_DDID_DETECTED (1<<0) @@ -5857,4 +5953,12 @@ #define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) #define READ_DATA_VALID(n) (1 << (n)) +/* For UMS only (deprecated): */ +#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) +#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) +#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) +#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) +#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) +#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 8150fdc08d4..56785e8fb2e 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -236,19 +236,9 @@ static void i915_save_display(struct drm_device *dev) dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); } - /* Only regfile.save FBC state on the platform that supports FBC */ - if (HAS_FBC(dev)) { - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); - } else if (IS_GM45(dev)) { - dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); - } else { - dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); - dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); - dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); - dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); - } - } + /* save FBC interval */ + if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) + dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_save_vga(dev); @@ -300,18 +290,10 @@ static void i915_restore_display(struct drm_device *dev) /* only restore FBC info on the platform that supports FBC*/ intel_disable_fbc(dev); - if (HAS_FBC(dev)) { - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); - } else if (IS_GM45(dev)) { - I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); - } else { - I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE); - I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE); - I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2); - I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); - } - } + + /* restore FBC interval */ + if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) + I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_restore_vga(dev); @@ -324,10 +306,6 @@ int i915_save_state(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - if (INTEL_INFO(dev)->gen <= 4) - pci_read_config_byte(dev->pdev, LBB, - &dev_priv->regfile.saveLBB); - mutex_lock(&dev->struct_mutex); i915_save_display(dev); @@ -377,10 +355,6 @@ int i915_restore_state(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - if (INTEL_INFO(dev)->gen <= 4) - pci_write_config_byte(dev->pdev, LBB, - dev_priv->regfile.saveLBB); - mutex_lock(&dev->struct_mutex); i915_gem_restore_fences(dev); diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 33bcae314bf..9c57029f6f4 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff); } else { - ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; + ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER; } mutex_unlock(&dev_priv->rps.hw_lock); @@ -284,7 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, struct drm_i915_private *dev_priv = dev->dev_private; return snprintf(buf, PAGE_SIZE, "%d\n", - vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay)); + vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); } static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) @@ -298,9 +298,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute mutex_lock(&dev_priv->rps.hw_lock); if (IS_VALLEYVIEW(dev_priv->dev)) - ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); + ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); else - ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; + ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER; mutex_unlock(&dev_priv->rps.hw_lock); return snprintf(buf, PAGE_SIZE, "%d\n", ret); @@ -313,7 +313,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 val, rp_state_cap, hw_max, hw_min, non_oc_max; + u32 val; ssize_t ret; ret = kstrtou32(buf, 0, &val); @@ -324,38 +324,34 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, mutex_lock(&dev_priv->rps.hw_lock); - if (IS_VALLEYVIEW(dev_priv->dev)) { + if (IS_VALLEYVIEW(dev_priv->dev)) val = vlv_freq_opcode(dev_priv, val); - - hw_max = valleyview_rps_max_freq(dev_priv); - hw_min = valleyview_rps_min_freq(dev_priv); - non_oc_max = hw_max; - } else { + else val /= GT_FREQUENCY_MULTIPLIER; - rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - hw_max = dev_priv->rps.hw_max; - non_oc_max = (rp_state_cap & 0xff); - hw_min = ((rp_state_cap & 0xff0000) >> 16); - } - - if (val < hw_min || val > hw_max || - val < dev_priv->rps.min_delay) { + if (val < dev_priv->rps.min_freq || + val > dev_priv->rps.max_freq || + val < dev_priv->rps.min_freq_softlimit) { mutex_unlock(&dev_priv->rps.hw_lock); return -EINVAL; } - if (val > non_oc_max) + if (val > dev_priv->rps.rp0_freq) DRM_DEBUG("User requested overclocking to %d\n", val * GT_FREQUENCY_MULTIPLIER); - dev_priv->rps.max_delay = val; + dev_priv->rps.max_freq_softlimit = val; - if (dev_priv->rps.cur_delay > val) { + if (dev_priv->rps.cur_freq > val) { if (IS_VALLEYVIEW(dev)) valleyview_set_rps(dev, val); else gen6_set_rps(dev, val); + } else if (!IS_VALLEYVIEW(dev)) { + /* We still need gen6_set_rps to process the new max_delay and + * update the interrupt limits even though frequency request is + * unchanged. */ + gen6_set_rps(dev, dev_priv->rps.cur_freq); } mutex_unlock(&dev_priv->rps.hw_lock); @@ -374,9 +370,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute mutex_lock(&dev_priv->rps.hw_lock); if (IS_VALLEYVIEW(dev_priv->dev)) - ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); + ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); else - ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; + ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER; mutex_unlock(&dev_priv->rps.hw_lock); return snprintf(buf, PAGE_SIZE, "%d\n", ret); @@ -389,7 +385,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 val, rp_state_cap, hw_max, hw_min; + u32 val; ssize_t ret; ret = kstrtou32(buf, 0, &val); @@ -400,31 +396,30 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, mutex_lock(&dev_priv->rps.hw_lock); - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev)) val = vlv_freq_opcode(dev_priv, val); - - hw_max = valleyview_rps_max_freq(dev_priv); - hw_min = valleyview_rps_min_freq(dev_priv); - } else { + else val /= GT_FREQUENCY_MULTIPLIER; - rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - hw_max = dev_priv->rps.hw_max; - hw_min = ((rp_state_cap & 0xff0000) >> 16); - } - - if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { + if (val < dev_priv->rps.min_freq || + val > dev_priv->rps.max_freq || + val > dev_priv->rps.max_freq_softlimit) { mutex_unlock(&dev_priv->rps.hw_lock); return -EINVAL; } - dev_priv->rps.min_delay = val; + dev_priv->rps.min_freq_softlimit = val; - if (dev_priv->rps.cur_delay < val) { + if (dev_priv->rps.cur_freq < val) { if (IS_VALLEYVIEW(dev)) valleyview_set_rps(dev, val); else gen6_set_rps(dev, val); + } else if (!IS_VALLEYVIEW(dev)) { + /* We still need gen6_set_rps to process the new min_delay and + * update the interrupt limits even though frequency request is + * unchanged. */ + gen6_set_rps(dev, dev_priv->rps.cur_freq); } mutex_unlock(&dev_priv->rps.hw_lock); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 6e580c98ded..23c26f1f8b3 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -34,15 +34,15 @@ TRACE_EVENT(i915_gem_object_create, ); TRACE_EVENT(i915_vma_bind, - TP_PROTO(struct i915_vma *vma, bool mappable), - TP_ARGS(vma, mappable), + TP_PROTO(struct i915_vma *vma, unsigned flags), + TP_ARGS(vma, flags), TP_STRUCT__entry( __field(struct drm_i915_gem_object *, obj) __field(struct i915_address_space *, vm) __field(u32, offset) __field(u32, size) - __field(bool, mappable) + __field(unsigned, flags) ), TP_fast_assign( @@ -50,12 +50,12 @@ TRACE_EVENT(i915_vma_bind, __entry->vm = vma->vm; __entry->offset = vma->node.start; __entry->size = vma->node.size; - __entry->mappable = mappable; + __entry->flags = flags; ), TP_printk("obj=%p, offset=%08x size=%x%s vm=%p", __entry->obj, __entry->offset, __entry->size, - __entry->mappable ? ", mappable" : "", + __entry->flags & PIN_MAPPABLE ? ", mappable" : "", __entry->vm) ); @@ -196,26 +196,26 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, ); TRACE_EVENT(i915_gem_evict, - TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable), - TP_ARGS(dev, size, align, mappable), + TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags), + TP_ARGS(dev, size, align, flags), TP_STRUCT__entry( __field(u32, dev) __field(u32, size) __field(u32, align) - __field(bool, mappable) + __field(unsigned, flags) ), TP_fast_assign( __entry->dev = dev->primary->index; __entry->size = size; __entry->align = align; - __entry->mappable = mappable; + __entry->flags = flags; ), TP_printk("dev=%d, size=%d, align=%d %s", __entry->dev, __entry->size, __entry->align, - __entry->mappable ? ", mappable" : "") + __entry->flags & PIN_MAPPABLE ? ", mappable" : "") ); TRACE_EVENT(i915_gem_evict_everything, @@ -238,14 +238,16 @@ TRACE_EVENT(i915_gem_evict_vm, TP_ARGS(vm), TP_STRUCT__entry( + __field(u32, dev) __field(struct i915_address_space *, vm) ), TP_fast_assign( + __entry->dev = vm->dev->primary->index; __entry->vm = vm; ), - TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm) + TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) ); TRACE_EVENT(i915_gem_ring_sync_to, diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c index caa18e85581..480da593e6c 100644 --- a/drivers/gpu/drm/i915/i915_ums.c +++ b/drivers/gpu/drm/i915/i915_ums.c @@ -271,6 +271,10 @@ void i915_save_display_reg(struct drm_device *dev) /* FIXME: regfile.save TV & SDVO state */ /* Backlight */ + if (INTEL_INFO(dev)->gen <= 4) + pci_read_config_byte(dev->pdev, PCI_LBPC, + &dev_priv->regfile.saveLBB); + if (HAS_PCH_SPLIT(dev)) { dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); @@ -293,6 +297,10 @@ void i915_restore_display_reg(struct drm_device *dev) int i; /* Backlight */ + if (INTEL_INFO(dev)->gen <= 4) + pci_write_config_byte(dev->pdev, PCI_LBPC, + dev_priv->regfile.saveLBB); + if (HAS_PCH_SPLIT(dev)) { I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL); I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index f22041973f3..4867f4cc093 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -259,7 +259,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, downclock = dvo_timing->clock; } - if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) { + if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) { dev_priv->lvds_downclock_avail = 1; dev_priv->lvds_downclock = downclock * 10; DRM_DEBUG_KMS("LVDS downclock is found in VBT. " @@ -318,7 +318,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, struct drm_display_mode *panel_fixed_mode; int index; - index = i915_vbt_sdvo_panel_type; + index = i915.vbt_sdvo_panel_type; if (index == -2) { DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n"); return; @@ -599,14 +599,14 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_mipi *mipi; - mipi = find_section(bdb, BDB_MIPI); + mipi = find_section(bdb, BDB_MIPI_CONFIG); if (!mipi) { DRM_DEBUG_KMS("No MIPI BDB found"); return; } /* XXX: add more info */ - dev_priv->vbt.dsi.panel_id = mipi->panel_id; + dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; } static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 282de5e9f39..83b7629e436 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -104,7 +104,8 @@ struct vbios_data { #define BDB_LVDS_LFP_DATA 42 #define BDB_LVDS_BACKLIGHT 43 #define BDB_LVDS_POWER 44 -#define BDB_MIPI 50 +#define BDB_MIPI_CONFIG 52 +#define BDB_MIPI_SEQUENCE 53 #define BDB_SKIP 254 /* VBIOS private block, ignore */ struct bdb_general_features { @@ -711,44 +712,159 @@ int intel_parse_bios(struct drm_device *dev); #define DVO_PORT_DPD 9 #define DVO_PORT_DPA 10 -/* MIPI DSI panel info */ -struct bdb_mipi { - u16 panel_id; - u16 bridge_revision; - - /* General params */ - u32 dithering:1; - u32 bpp_pixel_format:1; - u32 rsvd1:1; - u32 dphy_valid:1; - u32 resvd2:28; +/* Block 52 contains MIPI Panel info + * 6 such enteries will there. Index into correct + * entery is based on the panel_index in #40 LFP + */ +#define MAX_MIPI_CONFIGURATIONS 6 - u16 port_info; - u16 rsvd3:2; - u16 num_lanes:2; - u16 rsvd4:12; +#define MIPI_DSI_UNDEFINED_PANEL_ID 0 +#define MIPI_DSI_GENERIC_PANEL_ID 1 - /* DSI config */ - u16 virt_ch_num:2; - u16 vtm:2; - u16 rsvd5:12; +struct mipi_config { + u16 panel_id; - u32 dsi_clock; + /* General Params */ + u32 enable_dithering:1; + u32 rsvd1:1; + u32 is_bridge:1; + + u32 panel_arch_type:2; + u32 is_cmd_mode:1; + +#define NON_BURST_SYNC_PULSE 0x1 +#define NON_BURST_SYNC_EVENTS 0x2 +#define BURST_MODE 0x3 + u32 video_transfer_mode:2; + + u32 cabc_supported:1; + u32 pwm_blc:1; + + /* Bit 13:10 */ +#define PIXEL_FORMAT_RGB565 0x1 +#define PIXEL_FORMAT_RGB666 0x2 +#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3 +#define PIXEL_FORMAT_RGB888 0x4 + u32 videomode_color_format:4; + + /* Bit 15:14 */ +#define ENABLE_ROTATION_0 0x0 +#define ENABLE_ROTATION_90 0x1 +#define ENABLE_ROTATION_180 0x2 +#define ENABLE_ROTATION_270 0x3 + u32 rotation:2; + u32 bta_enabled:1; + u32 rsvd2:15; + + /* 2 byte Port Description */ +#define DUAL_LINK_NOT_SUPPORTED 0 +#define DUAL_LINK_FRONT_BACK 1 +#define DUAL_LINK_PIXEL_ALT 2 + u16 dual_link:2; + u16 lane_cnt:2; + u16 rsvd3:12; + + u16 rsvd4; + + u8 rsvd5[5]; + u32 dsi_ddr_clk; u32 bridge_ref_clk; - u16 rsvd_pwr; - /* Dphy Params */ - u32 prepare_cnt:5; - u32 rsvd6:3; +#define BYTE_CLK_SEL_20MHZ 0 +#define BYTE_CLK_SEL_10MHZ 1 +#define BYTE_CLK_SEL_5MHZ 2 + u8 byte_clk_sel:2; + + u8 rsvd6:6; + + /* DPHY Flags */ + u16 dphy_param_valid:1; + u16 eot_pkt_disabled:1; + u16 enable_clk_stop:1; + u16 rsvd7:13; + + u32 hs_tx_timeout; + u32 lp_rx_timeout; + u32 turn_around_timeout; + u32 device_reset_timer; + u32 master_init_timer; + u32 dbi_bw_timer; + u32 lp_byte_clk_val; + + /* 4 byte Dphy Params */ + u32 prepare_cnt:6; + u32 rsvd8:2; u32 clk_zero_cnt:8; u32 trail_cnt:5; - u32 rsvd7:3; + u32 rsvd9:3; u32 exit_zero_cnt:6; - u32 rsvd8:2; + u32 rsvd10:2; - u32 hl_switch_cnt; - u32 lp_byte_clk; u32 clk_lane_switch_cnt; + u32 hl_switch_cnt; + + u32 rsvd11[6]; + + /* timings based on dphy spec */ + u8 tclk_miss; + u8 tclk_post; + u8 rsvd12; + u8 tclk_pre; + u8 tclk_prepare; + u8 tclk_settle; + u8 tclk_term_enable; + u8 tclk_trail; + u16 tclk_prepare_clkzero; + u8 rsvd13; + u8 td_term_enable; + u8 teot; + u8 ths_exit; + u8 ths_prepare; + u16 ths_prepare_hszero; + u8 rsvd14; + u8 ths_settle; + u8 ths_skip; + u8 ths_trail; + u8 tinit; + u8 tlpx; + u8 rsvd15[3]; + + /* GPIOs */ + u8 panel_enable; + u8 bl_enable; + u8 pwm_enable; + u8 reset_r_n; + u8 pwr_down_r; + u8 stdby_r_n; + } __packed; +/* Block 52 contains MIPI configuration block + * 6 * bdb_mipi_config, followed by 6 pps data + * block below + * + * all delays has a unit of 100us + */ +struct mipi_pps_data { + u16 panel_on_delay; + u16 bl_enable_delay; + u16 bl_disable_delay; + u16 panel_off_delay; + u16 panel_power_cycle_delay; +}; + +struct bdb_mipi_config { + struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; + struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; +}; + +/* Block 53 contains MIPI sequences as needed by the panel + * for enabling it. This block can be variable in size and + * can be maximum of 6 blocks + */ +struct bdb_mipi_sequence { + u8 version; + u8 data[0]; +}; + #endif /* _I830_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index e2e39e65f10..aa5a3dc4334 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -68,8 +68,13 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crt *crt = intel_encoder_to_crt(encoder); + enum intel_display_power_domain power_domain; u32 tmp; + power_domain = intel_display_port_power_domain(encoder); + if (!intel_display_power_enabled(dev_priv, power_domain)) + return false; + tmp = I915_READ(crt->adpa_reg); if (!(tmp & ADPA_DAC_ENABLE)) @@ -262,6 +267,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, if (HAS_PCH_LPT(dev)) pipe_config->pipe_bpp = 24; + /* FDI must always be 2.7 GHz */ + if (HAS_DDI(dev)) + pipe_config->port_clock = 135000 * 2; + return true; } @@ -630,14 +639,22 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crt *crt = intel_attached_crt(connector); + struct intel_encoder *intel_encoder = &crt->base; + enum intel_display_power_domain power_domain; enum drm_connector_status status; struct intel_load_detect_pipe tmp; + intel_runtime_pm_get(dev_priv); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", connector->base.id, drm_get_connector_name(connector), force); + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + if (I915_HAS_HOTPLUG(dev)) { /* We can not rely on the HPD pin always being correctly wired * up, for example many KVM do not pass it through, and so @@ -645,23 +662,30 @@ intel_crt_detect(struct drm_connector *connector, bool force) */ if (intel_crt_detect_hotplug(connector)) { DRM_DEBUG_KMS("CRT detected via hotplug\n"); - return connector_status_connected; + status = connector_status_connected; + goto out; } else DRM_DEBUG_KMS("CRT not detected via hotplug\n"); } - if (intel_crt_detect_ddc(connector)) - return connector_status_connected; + if (intel_crt_detect_ddc(connector)) { + status = connector_status_connected; + goto out; + } /* Load detection is broken on HPD capable machines. Whoever wants a * broken monitor (without edid) to work behind a broken kvm (that fails * to have the right resistors for HP detection) needs to fix this up. * For now just bail out. */ - if (I915_HAS_HOTPLUG(dev)) - return connector_status_disconnected; + if (I915_HAS_HOTPLUG(dev)) { + status = connector_status_disconnected; + goto out; + } - if (!force) - return connector->status; + if (!force) { + status = connector->status; + goto out; + } /* for pre-945g platforms use load detect */ if (intel_get_load_detect_pipe(connector, NULL, &tmp)) { @@ -673,6 +697,10 @@ intel_crt_detect(struct drm_connector *connector, bool force) } else status = connector_status_unknown; +out: + intel_display_power_put(dev_priv, power_domain); + intel_runtime_pm_put(dev_priv); + return status; } @@ -686,17 +714,28 @@ static int intel_crt_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crt *crt = intel_attached_crt(connector); + struct intel_encoder *intel_encoder = &crt->base; + enum intel_display_power_domain power_domain; int ret; struct i2c_adapter *i2c; + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin); ret = intel_crt_ddc_get_modes(connector, i2c); if (ret || !IS_G4X(dev)) - return ret; + goto out; /* Try to probe digital port for output in DVI-I -> VGA mode. */ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); - return intel_crt_ddc_get_modes(connector, i2c); + ret = intel_crt_ddc_get_modes(connector, i2c); + +out: + intel_display_power_put(dev_priv, power_domain); + + return ret; } static int intel_crt_set_property(struct drm_connector *connector, @@ -765,6 +804,14 @@ static const struct dmi_system_id intel_no_crt[] = { DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"), }, }, + { + .callback = intel_no_crt_dmi_callback, + .ident = "DELL XPS 8700", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"), + }, + }, { } }; @@ -800,7 +847,7 @@ void intel_crt_init(struct drm_device *dev) intel_connector_attach_encoder(intel_connector, &crt->base); crt->base.type = INTEL_OUTPUT_ANALOG; - crt->base.cloneable = true; + crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI); if (IS_I830(dev)) crt->base.crtc_mask = (1 << 0); else @@ -833,6 +880,7 @@ void intel_crt_init(struct drm_device *dev) crt->base.get_hw_state = intel_crt_get_hw_state; } intel_connector->get_hw_state = intel_connector_get_hw_state; + intel_connector->unregister = intel_connector_unregister; drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); @@ -857,4 +905,6 @@ void intel_crt_init(struct drm_device *dev) dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config; } + + intel_crt_reset(connector); } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 234ac5f7bc5..0ad4e960006 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -633,6 +633,97 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget, /* Otherwise a < c && b >= d, do nothing */ } +static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, + int reg) +{ + int refclk = LC_FREQ; + int n, p, r; + u32 wrpll; + + wrpll = I915_READ(reg); + switch (wrpll & SPLL_PLL_REF_MASK) { + case SPLL_PLL_SSC: + case SPLL_PLL_NON_SSC: + /* + * We could calculate spread here, but our checking + * code only cares about 5% accuracy, and spread is a max of + * 0.5% downspread. + */ + refclk = 135; + break; + case SPLL_PLL_LCPLL: + refclk = LC_FREQ; + break; + default: + WARN(1, "bad wrpll refclk\n"); + return 0; + } + + r = wrpll & WRPLL_DIVIDER_REF_MASK; + p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT; + n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT; + + /* Convert to KHz, p & r have a fixed point portion */ + return (refclk * n * 100) / (p * r); +} + +static void intel_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + enum port port = intel_ddi_get_encoder_port(encoder); + int link_clock = 0; + u32 val, pll; + + val = I915_READ(PORT_CLK_SEL(port)); + switch (val & PORT_CLK_SEL_MASK) { + case PORT_CLK_SEL_LCPLL_810: + link_clock = 81000; + break; + case PORT_CLK_SEL_LCPLL_1350: + link_clock = 135000; + break; + case PORT_CLK_SEL_LCPLL_2700: + link_clock = 270000; + break; + case PORT_CLK_SEL_WRPLL1: + link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1); + break; + case PORT_CLK_SEL_WRPLL2: + link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2); + break; + case PORT_CLK_SEL_SPLL: + pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK; + if (pll == SPLL_PLL_FREQ_810MHz) + link_clock = 81000; + else if (pll == SPLL_PLL_FREQ_1350MHz) + link_clock = 135000; + else if (pll == SPLL_PLL_FREQ_2700MHz) + link_clock = 270000; + else { + WARN(1, "bad spll freq\n"); + return; + } + break; + default: + WARN(1, "bad port clock sel\n"); + return; + } + + pipe_config->port_clock = link_clock * 2; + + if (pipe_config->has_pch_encoder) + pipe_config->adjusted_mode.crtc_clock = + intel_dotclock_calculate(pipe_config->port_clock, + &pipe_config->fdi_m_n); + else if (pipe_config->has_dp_encoder) + pipe_config->adjusted_mode.crtc_clock = + intel_dotclock_calculate(pipe_config->port_clock, + &pipe_config->dp_m_n); + else + pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock; +} + static void intel_ddi_calculate_wrpll(int clock /* in Hz */, unsigned *r2_out, unsigned *n2_out, unsigned *p_out) @@ -1017,8 +1108,13 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) enum port port = intel_ddi_get_encoder_port(intel_encoder); enum pipe pipe = 0; enum transcoder cpu_transcoder; + enum intel_display_power_domain power_domain; uint32_t tmp; + power_domain = intel_display_port_power_domain(intel_encoder); + if (!intel_display_power_enabled(dev_priv, power_domain)) + return false; + if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) return false; @@ -1054,9 +1150,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; enum port port = intel_ddi_get_encoder_port(encoder); + enum intel_display_power_domain power_domain; u32 tmp; int i; + power_domain = intel_display_port_power_domain(encoder); + if (!intel_display_power_enabled(dev_priv, power_domain)) + return false; + tmp = I915_READ(DDI_BUF_CTL(port)); if (!(tmp & DDI_BUF_CTL_ENABLE)) @@ -1200,7 +1301,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - ironlake_edp_panel_on(intel_dp); + intel_edp_panel_on(intel_dp); } WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); @@ -1244,8 +1345,8 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); - ironlake_edp_panel_vdd_on(intel_dp); - ironlake_edp_panel_off(intel_dp); + intel_edp_panel_vdd_on(intel_dp); + intel_edp_panel_off(intel_dp); } I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); @@ -1280,7 +1381,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) if (port == PORT_A) intel_dp_stop_link_train(intel_dp); - ironlake_edp_backlight_on(intel_dp); + intel_edp_backlight_on(intel_dp); intel_edp_psr_enable(intel_dp); } @@ -1313,7 +1414,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_edp_psr_disable(intel_dp); - ironlake_edp_backlight_off(intel_dp); + intel_edp_backlight_off(intel_dp); } } @@ -1325,7 +1426,7 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) if (lcpll & LCPLL_CD_SOURCE_FCLK) { return 800000; - } else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) { + } else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) { return 450000; } else if (freq == LCPLL_CLK_FREQ_450) { return 450000; @@ -1510,6 +1611,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; } + + intel_ddi_clock_get(encoder, pipe_config); } static void intel_ddi_destroy(struct drm_encoder *encoder) @@ -1620,7 +1723,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder->type = INTEL_OUTPUT_UNKNOWN; intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - intel_encoder->cloneable = false; + intel_encoder->cloneable = 0; intel_encoder->hot_plug = intel_ddi_hot_plug; if (init_dp) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9b8a7c7ea7f..dae976f51d8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -51,7 +51,10 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc, static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *old_fb); - +static int intel_framebuffer_init(struct drm_device *dev, + struct intel_framebuffer *ifb, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_i915_gem_object *obj); typedef struct { int min, max; @@ -738,10 +741,10 @@ bool intel_crtc_active(struct drm_crtc *crtc) * We can ditch the adjusted_mode.crtc_clock check as soon * as Haswell has gained clock readout/fastboot support. * - * We can ditch the crtc->fb check as soon as we can + * We can ditch the crtc->primary->fb check as soon as we can * properly reconstruct framebuffers. */ - return intel_crtc->active && crtc->fb && + return intel_crtc->active && crtc->primary->fb && intel_crtc->config.adjusted_mode.crtc_clock; } @@ -1030,7 +1033,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, u32 val; /* ILK FDI PLL is always enabled */ - if (dev_priv->info->gen == 5) + if (INTEL_INFO(dev_priv->dev)->gen == 5) return; /* On Haswell, DDI ports are responsible for the FDI PLL setup */ @@ -1119,7 +1122,7 @@ void assert_pipe(struct drm_i915_private *dev_priv, if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) state = true; - if (!intel_display_power_enabled(dev_priv->dev, + if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { cur_state = false; } else { @@ -1163,7 +1166,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, if (INTEL_INFO(dev)->gen >= 4) { reg = DSPCNTR(pipe); val = I915_READ(reg); - WARN((val & DISPLAY_PLANE_ENABLE), + WARN(val & DISPLAY_PLANE_ENABLE, "plane %c assertion failure, should be disabled but not\n", plane_name(pipe)); return; @@ -1185,27 +1188,27 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { struct drm_device *dev = dev_priv->dev; - int reg, i; + int reg, sprite; u32 val; if (IS_VALLEYVIEW(dev)) { - for (i = 0; i < dev_priv->num_plane; i++) { - reg = SPCNTR(pipe, i); + for_each_sprite(pipe, sprite) { + reg = SPCNTR(pipe, sprite); val = I915_READ(reg); - WARN((val & SP_ENABLE), + WARN(val & SP_ENABLE, "sprite %c assertion failure, should be off on pipe %c but is still active\n", - sprite_name(pipe, i), pipe_name(pipe)); + sprite_name(pipe, sprite), pipe_name(pipe)); } } else if (INTEL_INFO(dev)->gen >= 7) { reg = SPRCTL(pipe); val = I915_READ(reg); - WARN((val & SPRITE_ENABLE), + WARN(val & SPRITE_ENABLE, "sprite %c assertion failure, should be off on pipe %c but is still active\n", plane_name(pipe), pipe_name(pipe)); } else if (INTEL_INFO(dev)->gen >= 5) { reg = DVSCNTR(pipe); val = I915_READ(reg); - WARN((val & DVS_ENABLE), + WARN(val & DVS_ENABLE, "sprite %c assertion failure, should be off on pipe %c but is still active\n", plane_name(pipe), pipe_name(pipe)); } @@ -1443,7 +1446,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) assert_pipe_disabled(dev_priv, crtc->pipe); /* No really, not for ILK+ */ - BUG_ON(dev_priv->info->gen >= 5); + BUG_ON(INTEL_INFO(dev)->gen >= 5); /* PLL is protected by panel, make sure we can write it */ if (IS_MOBILE(dev) && !IS_I830(dev)) @@ -1549,11 +1552,12 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, */ static void ironlake_enable_shared_dpll(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); /* PCH PLLs only available on ILK, SNB and IVB */ - BUG_ON(dev_priv->info->gen < 5); + BUG_ON(INTEL_INFO(dev)->gen < 5); if (WARN_ON(pll == NULL)) return; @@ -1578,11 +1582,12 @@ static void ironlake_enable_shared_dpll(struct intel_crtc *crtc) static void intel_disable_shared_dpll(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); /* PCH only available on ILK+ */ - BUG_ON(dev_priv->info->gen < 5); + BUG_ON(INTEL_INFO(dev)->gen < 5); if (WARN_ON(pll == NULL)) return; @@ -1617,7 +1622,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, uint32_t reg, val, pipeconf_val; /* PCH only available on ILK+ */ - BUG_ON(dev_priv->info->gen < 5); + BUG_ON(INTEL_INFO(dev)->gen < 5); /* Make sure PCH DPLL is enabled */ assert_shared_dpll_enabled(dev_priv, @@ -1670,7 +1675,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, u32 val, pipeconf_val; /* PCH only available on ILK+ */ - BUG_ON(dev_priv->info->gen < 5); + BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5); /* FDI must be feeding us bits for PCH ports */ assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); @@ -1744,21 +1749,16 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) /** * intel_enable_pipe - enable a pipe, asserting requirements - * @dev_priv: i915 private structure - * @pipe: pipe to enable - * @pch_port: on ILK+, is this pipe driving a PCH port or not + * @crtc: crtc responsible for the pipe * - * Enable @pipe, making sure that various hardware specific requirements + * Enable @crtc's pipe, making sure that various hardware specific requirements * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. - * - * @pipe should be %PIPE_A or %PIPE_B. - * - * Will wait until the pipe is actually running (i.e. first vblank) before - * returning. */ -static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, - bool pch_port, bool dsi) +static void intel_enable_pipe(struct intel_crtc *crtc) { + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, pipe); enum pipe pch_transcoder; @@ -1780,12 +1780,12 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, * need the check. */ if (!HAS_PCH_SPLIT(dev_priv->dev)) - if (dsi) + if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); else { - if (pch_port) { + if (crtc->config.has_pch_encoder) { /* if driving the PCH, we need FDI enabled */ assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); assert_fdi_tx_pll_enabled(dev_priv, @@ -1796,11 +1796,24 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); - if (val & PIPECONF_ENABLE) + if (val & PIPECONF_ENABLE) { + WARN_ON(!(pipe == PIPE_A && + dev_priv->quirks & QUIRK_PIPEA_FORCE)); return; + } I915_WRITE(reg, val | PIPECONF_ENABLE); - intel_wait_for_vblank(dev_priv->dev, pipe); + POSTING_READ(reg); + + /* + * There's no guarantee the pipe will really start running now. It + * depends on the Gen, the output type and the relative order between + * pipe and plane enabling. Avoid waiting on HSW+ since it's not + * necessary. + * TODO: audit the previous gens. + */ + if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) + intel_wait_for_vblank(dev_priv->dev, pipe); } /** @@ -1851,22 +1864,23 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, void intel_flush_primary_plane(struct drm_i915_private *dev_priv, enum plane plane) { - u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); + struct drm_device *dev = dev_priv->dev; + u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); I915_WRITE(reg, I915_READ(reg)); POSTING_READ(reg); } /** - * intel_enable_primary_plane - enable the primary plane on a given pipe + * intel_enable_primary_hw_plane - enable the primary plane on a given pipe * @dev_priv: i915 private structure * @plane: plane to enable * @pipe: pipe being fed * * Enable @plane on @pipe, making sure that @pipe is running first. */ -static void intel_enable_primary_plane(struct drm_i915_private *dev_priv, - enum plane plane, enum pipe pipe) +static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, + enum plane plane, enum pipe pipe) { struct intel_crtc *intel_crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); @@ -1891,15 +1905,15 @@ static void intel_enable_primary_plane(struct drm_i915_private *dev_priv, } /** - * intel_disable_primary_plane - disable the primary plane + * intel_disable_primary_hw_plane - disable the primary hardware plane * @dev_priv: i915 private structure * @plane: plane to disable * @pipe: pipe consuming the data * * Disable @plane; should be an independent operation. */ -static void intel_disable_primary_plane(struct drm_i915_private *dev_priv, - enum plane plane, enum pipe pipe) +static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv, + enum plane plane, enum pipe pipe) { struct intel_crtc *intel_crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); @@ -1929,6 +1943,14 @@ static bool need_vtd_wa(struct drm_device *dev) return false; } +static int intel_align_height(struct drm_device *dev, int height, bool tiled) +{ + int tile_height; + + tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1; + return ALIGN(height, tile_height); +} + int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, @@ -2025,8 +2047,114 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y, } } -static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y) +int intel_format_to_fourcc(int format) +{ + switch (format) { + case DISPPLANE_8BPP: + return DRM_FORMAT_C8; + case DISPPLANE_BGRX555: + return DRM_FORMAT_XRGB1555; + case DISPPLANE_BGRX565: + return DRM_FORMAT_RGB565; + default: + case DISPPLANE_BGRX888: + return DRM_FORMAT_XRGB8888; + case DISPPLANE_RGBX888: + return DRM_FORMAT_XBGR8888; + case DISPPLANE_BGRX101010: + return DRM_FORMAT_XRGB2101010; + case DISPPLANE_RGBX101010: + return DRM_FORMAT_XBGR2101010; + } +} + +static bool intel_alloc_plane_obj(struct intel_crtc *crtc, + struct intel_plane_config *plane_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_gem_object *obj = NULL; + struct drm_mode_fb_cmd2 mode_cmd = { 0 }; + u32 base = plane_config->base; + + if (plane_config->size == 0) + return false; + + obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, + plane_config->size); + if (!obj) + return false; + + if (plane_config->tiled) { + obj->tiling_mode = I915_TILING_X; + obj->stride = crtc->base.primary->fb->pitches[0]; + } + + mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format; + mode_cmd.width = crtc->base.primary->fb->width; + mode_cmd.height = crtc->base.primary->fb->height; + mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0]; + + mutex_lock(&dev->struct_mutex); + + if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb), + &mode_cmd, obj)) { + DRM_DEBUG_KMS("intel fb init failed\n"); + goto out_unref_obj; + } + + mutex_unlock(&dev->struct_mutex); + + DRM_DEBUG_KMS("plane fb obj %p\n", obj); + return true; + +out_unref_obj: + drm_gem_object_unreference(&obj->base); + mutex_unlock(&dev->struct_mutex); + return false; +} + +static void intel_find_plane_obj(struct intel_crtc *intel_crtc, + struct intel_plane_config *plane_config) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_crtc *c; + struct intel_crtc *i; + struct intel_framebuffer *fb; + + if (!intel_crtc->base.primary->fb) + return; + + if (intel_alloc_plane_obj(intel_crtc, plane_config)) + return; + + kfree(intel_crtc->base.primary->fb); + intel_crtc->base.primary->fb = NULL; + + /* + * Failed to alloc the obj, check to see if we should share + * an fb with another CRTC instead + */ + list_for_each_entry(c, &dev->mode_config.crtc_list, head) { + i = to_intel_crtc(c); + + if (c == &intel_crtc->base) + continue; + + if (!i->active || !c->primary->fb) + continue; + + fb = to_intel_framebuffer(c->primary->fb); + if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) { + drm_framebuffer_reference(c->primary->fb); + intel_crtc->base.primary->fb = c->primary->fb; + break; + } + } +} + +static int i9xx_update_primary_plane(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2125,8 +2253,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, return 0; } -static int ironlake_update_plane(struct drm_crtc *crtc, - struct drm_framebuffer *fb, int x, int y) +static int ironlake_update_primary_plane(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2230,7 +2359,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, dev_priv->display.disable_fbc(dev); intel_increase_pllclock(crtc); - return dev_priv->display.update_plane(crtc, fb, x, y); + return dev_priv->display.update_primary_plane(crtc, fb, x, y); } void intel_display_handle_reset(struct drm_device *dev) @@ -2267,11 +2396,13 @@ void intel_display_handle_reset(struct drm_device *dev) /* * FIXME: Once we have proper support for primary planes (and * disabling them without disabling the entire crtc) allow again - * a NULL crtc->fb. + * a NULL crtc->primary->fb. */ - if (intel_crtc->active && crtc->fb) - dev_priv->display.update_plane(crtc, crtc->fb, - crtc->x, crtc->y); + if (intel_crtc->active && crtc->primary->fb) + dev_priv->display.update_primary_plane(crtc, + crtc->primary->fb, + crtc->x, + crtc->y); mutex_unlock(&crtc->mutex); } } @@ -2299,31 +2430,23 @@ intel_finish_fb(struct drm_framebuffer *old_fb) return ret; } -static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y) +static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_master_private *master_priv; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + unsigned long flags; + bool pending; - if (!dev->primary->master) - return; + if (i915_reset_in_progress(&dev_priv->gpu_error) || + intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) + return false; - master_priv = dev->primary->master->driver_priv; - if (!master_priv->sarea_priv) - return; + spin_lock_irqsave(&dev->event_lock, flags); + pending = to_intel_crtc(crtc)->unpin_work != NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); - switch (intel_crtc->pipe) { - case 0: - master_priv->sarea_priv->pipeA_x = x; - master_priv->sarea_priv->pipeA_y = y; - break; - case 1: - master_priv->sarea_priv->pipeB_x = x; - master_priv->sarea_priv->pipeB_y = y; - break; - default: - break; - } + return pending; } static int @@ -2336,6 +2459,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb; int ret; + if (intel_crtc_has_pending_flip(crtc)) { + DRM_ERROR("pipe is still busy with an old pageflip\n"); + return -EBUSY; + } + /* no fb bound */ if (!fb) { DRM_ERROR("No FB bound\n"); @@ -2353,8 +2481,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, ret = intel_pin_and_fence_fb_obj(dev, to_intel_framebuffer(fb)->obj, NULL); + mutex_unlock(&dev->struct_mutex); if (ret != 0) { - mutex_unlock(&dev->struct_mutex); DRM_ERROR("pin & fence failed\n"); return ret; } @@ -2372,7 +2500,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, * whether the platform allows pfit disable with pipe active, and only * then update the pipesrc and pfit state, even on the flip path. */ - if (i915_fastboot) { + if (i915.fastboot) { const struct drm_display_mode *adjusted_mode = &intel_crtc->config.adjusted_mode; @@ -2390,31 +2518,33 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; } - ret = dev_priv->display.update_plane(crtc, fb, x, y); + ret = dev_priv->display.update_primary_plane(crtc, fb, x, y); if (ret) { + mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); mutex_unlock(&dev->struct_mutex); DRM_ERROR("failed to update base address\n"); return ret; } - old_fb = crtc->fb; - crtc->fb = fb; + old_fb = crtc->primary->fb; + crtc->primary->fb = fb; crtc->x = x; crtc->y = y; if (old_fb) { if (intel_crtc->active && old_fb != fb) intel_wait_for_vblank(dev, intel_crtc->pipe); + mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); + mutex_unlock(&dev->struct_mutex); } + mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); - intel_crtc_update_sarea_pos(crtc, x, y); - return 0; } @@ -2963,25 +3093,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) udelay(100); } -static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - unsigned long flags; - bool pending; - - if (i915_reset_in_progress(&dev_priv->gpu_error) || - intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) - return false; - - spin_lock_irqsave(&dev->event_lock, flags); - pending = to_intel_crtc(crtc)->unpin_work != NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); - - return pending; -} - bool intel_has_pending_fb_unpin(struct drm_device *dev) { struct intel_crtc *crtc; @@ -3011,7 +3122,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - if (crtc->fb == NULL) + if (crtc->primary->fb == NULL) return; WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); @@ -3020,7 +3131,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) !intel_crtc_has_pending_flip(crtc)); mutex_lock(&dev->struct_mutex); - intel_finish_fb(crtc->fb); + intel_finish_fb(crtc->primary->fb); mutex_unlock(&dev->struct_mutex); } @@ -3425,22 +3536,28 @@ static void intel_enable_planes(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; enum pipe pipe = to_intel_crtc(crtc)->pipe; + struct drm_plane *plane; struct intel_plane *intel_plane; - list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head) + drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { + intel_plane = to_intel_plane(plane); if (intel_plane->pipe == pipe) intel_plane_restore(&intel_plane->base); + } } static void intel_disable_planes(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; enum pipe pipe = to_intel_crtc(crtc)->pipe; + struct drm_plane *plane; struct intel_plane *intel_plane; - list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head) + drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { + intel_plane = to_intel_plane(plane); if (intel_plane->pipe == pipe) intel_plane_disable(&intel_plane->base); + } } void hsw_enable_ips(struct intel_crtc *crtc) @@ -3587,9 +3704,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_crtc_load_lut(crtc); intel_update_watermarks(crtc); - intel_enable_pipe(dev_priv, pipe, - intel_crtc->config.has_pch_encoder, false); - intel_enable_primary_plane(dev_priv, plane, pipe); + intel_enable_pipe(intel_crtc); + intel_enable_primary_hw_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); intel_crtc_update_cursor(crtc, true); @@ -3631,7 +3747,7 @@ static void haswell_crtc_enable_planes(struct drm_crtc *crtc) int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - intel_enable_primary_plane(dev_priv, plane, pipe); + intel_enable_primary_hw_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); intel_crtc_update_cursor(crtc, true); @@ -3661,7 +3777,7 @@ static void haswell_crtc_disable_planes(struct drm_crtc *crtc) intel_crtc_update_cursor(crtc, false); intel_disable_planes(crtc); - intel_disable_primary_plane(dev_priv, plane, pipe); + intel_disable_primary_hw_plane(dev_priv, plane, pipe); } /* @@ -3733,8 +3849,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_ddi_enable_transcoder_func(crtc); intel_update_watermarks(crtc); - intel_enable_pipe(dev_priv, pipe, - intel_crtc->config.has_pch_encoder, false); + intel_enable_pipe(intel_crtc); if (intel_crtc->config.has_pch_encoder) lpt_pch_enable(crtc); @@ -3748,16 +3863,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) * to change the workaround. */ haswell_mode_set_planes_workaround(intel_crtc); haswell_crtc_enable_planes(crtc); - - /* - * There seems to be a race in PCH platform hw (at least on some - * outputs) where an enabled pipe still completes any pageflip right - * away (as if the pipe is off) instead of waiting for vblank. As soon - * as the first vblank happend, everything works as expected. Hence just - * wait for one vblank before returning to avoid strange things - * happening. - */ - intel_wait_for_vblank(dev, intel_crtc->pipe); } static void ironlake_pfit_disable(struct intel_crtc *crtc) @@ -3800,7 +3905,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_crtc_update_cursor(crtc, false); intel_disable_planes(crtc); - intel_disable_primary_plane(dev_priv, plane, pipe); + intel_disable_primary_hw_plane(dev_priv, plane, pipe); if (intel_crtc->config.has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev, pipe, false); @@ -3972,6 +4077,117 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc) I915_WRITE(BCLRPAT(crtc->pipe), 0); } +#define for_each_power_domain(domain, mask) \ + for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ + if ((1 << (domain)) & (mask)) + +enum intel_display_power_domain +intel_display_port_power_domain(struct intel_encoder *intel_encoder) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct intel_digital_port *intel_dig_port; + + switch (intel_encoder->type) { + case INTEL_OUTPUT_UNKNOWN: + /* Only DDI platforms should ever use this output type */ + WARN_ON_ONCE(!HAS_DDI(dev)); + case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_HDMI: + case INTEL_OUTPUT_EDP: + intel_dig_port = enc_to_dig_port(&intel_encoder->base); + switch (intel_dig_port->port) { + case PORT_A: + return POWER_DOMAIN_PORT_DDI_A_4_LANES; + case PORT_B: + return POWER_DOMAIN_PORT_DDI_B_4_LANES; + case PORT_C: + return POWER_DOMAIN_PORT_DDI_C_4_LANES; + case PORT_D: + return POWER_DOMAIN_PORT_DDI_D_4_LANES; + default: + WARN_ON_ONCE(1); + return POWER_DOMAIN_PORT_OTHER; + } + case INTEL_OUTPUT_ANALOG: + return POWER_DOMAIN_PORT_CRT; + case INTEL_OUTPUT_DSI: + return POWER_DOMAIN_PORT_DSI; + default: + return POWER_DOMAIN_PORT_OTHER; + } +} + +static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct intel_encoder *intel_encoder; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum pipe pipe = intel_crtc->pipe; + bool pfit_enabled = intel_crtc->config.pch_pfit.enabled; + unsigned long mask; + enum transcoder transcoder; + + transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); + + mask = BIT(POWER_DOMAIN_PIPE(pipe)); + mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); + if (pfit_enabled) + mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); + + for_each_encoder_on_crtc(dev, crtc, intel_encoder) + mask |= BIT(intel_display_port_power_domain(intel_encoder)); + + return mask; +} + +void intel_display_set_init_power(struct drm_i915_private *dev_priv, + bool enable) +{ + if (dev_priv->power_domains.init_power_on == enable) + return; + + if (enable) + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + else + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + dev_priv->power_domains.init_power_on = enable; +} + +static void modeset_update_crtc_power_domains(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; + struct intel_crtc *crtc; + + /* + * First get all needed power domains, then put all unneeded, to avoid + * any unnecessary toggling of the power wells. + */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { + enum intel_display_power_domain domain; + + if (!crtc->base.enabled) + continue; + + pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); + + for_each_power_domain(domain, pipe_domains[crtc->pipe]) + intel_display_power_get(dev_priv, domain); + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { + enum intel_display_power_domain domain; + + for_each_power_domain(domain, crtc->enabled_power_domains) + intel_display_power_put(dev_priv, domain); + + crtc->enabled_power_domains = pipe_domains[crtc->pipe]; + } + + intel_display_set_init_power(dev_priv, false); +} + int valleyview_get_vco(struct drm_i915_private *dev_priv) { int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; @@ -4088,9 +4304,8 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, /* Looks like the 200MHz CDclk freq doesn't work on some configs */ } -static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv, - unsigned modeset_pipes, - struct intel_crtc_config *pipe_config) +/* compute the max pixel clock for new configuration */ +static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; struct intel_crtc *intel_crtc; @@ -4098,31 +4313,26 @@ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv, list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) { - if (modeset_pipes & (1 << intel_crtc->pipe)) - max_pixclk = max(max_pixclk, - pipe_config->adjusted_mode.crtc_clock); - else if (intel_crtc->base.enabled) + if (intel_crtc->new_enabled) max_pixclk = max(max_pixclk, - intel_crtc->config.adjusted_mode.crtc_clock); + intel_crtc->new_config->adjusted_mode.crtc_clock); } return max_pixclk; } static void valleyview_modeset_global_pipes(struct drm_device *dev, - unsigned *prepare_pipes, - unsigned modeset_pipes, - struct intel_crtc_config *pipe_config) + unsigned *prepare_pipes) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc; - int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes, - pipe_config); + int max_pixclk = intel_mode_max_pixclk(dev_priv); int cur_cdclk = valleyview_cur_cdclk(dev_priv); if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk) return; + /* disable/enable all currently active pipes while we change cdclk */ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) if (intel_crtc->base.enabled) @@ -4132,12 +4342,13 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev, static void valleyview_modeset_global_resources(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL); + int max_pixclk = intel_mode_max_pixclk(dev_priv); int cur_cdclk = valleyview_cur_cdclk(dev_priv); int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); if (req_cdclk != cur_cdclk) valleyview_set_cdclk(dev, req_cdclk); + modeset_update_crtc_power_domains(dev); } static void valleyview_crtc_enable(struct drm_crtc *crtc) @@ -4175,8 +4386,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_crtc_load_lut(crtc); intel_update_watermarks(crtc); - intel_enable_pipe(dev_priv, pipe, false, is_dsi); - intel_enable_primary_plane(dev_priv, plane, pipe); + intel_enable_pipe(intel_crtc); + intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); + intel_enable_primary_hw_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); intel_crtc_update_cursor(crtc, true); @@ -4213,8 +4425,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_crtc_load_lut(crtc); intel_update_watermarks(crtc); - intel_enable_pipe(dev_priv, pipe, false, false); - intel_enable_primary_plane(dev_priv, plane, pipe); + intel_enable_pipe(intel_crtc); + intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); + intel_enable_primary_hw_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); /* The fixup needs to happen before cursor is enabled */ if (IS_G4X(dev)) @@ -4270,8 +4483,9 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) intel_crtc_dpms_overlay(intel_crtc, false); intel_crtc_update_cursor(crtc, false); intel_disable_planes(crtc); - intel_disable_primary_plane(dev_priv, plane, pipe); + intel_disable_primary_hw_plane(dev_priv, plane, pipe); + intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); intel_disable_pipe(dev_priv, pipe); i9xx_pfit_disable(intel_crtc); @@ -4365,11 +4579,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc) assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe); assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); - if (crtc->fb) { + if (crtc->primary->fb) { mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); + intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj); mutex_unlock(&dev->struct_mutex); - crtc->fb = NULL; + crtc->primary->fb = NULL; } /* Update computed state. */ @@ -4583,7 +4797,7 @@ retry: static void hsw_compute_ips_config(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config) { - pipe_config->ips_enabled = i915_enable_ips && + pipe_config->ips_enabled = i915.enable_ips && hsw_crtc_supports_ips(crtc) && pipe_config->pipe_bpp <= 24; } @@ -4784,8 +4998,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes, static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { - if (i915_panel_use_ssc >= 0) - return i915_panel_use_ssc != 0; + if (i915.panel_use_ssc >= 0) + return i915.panel_use_ssc != 0; return dev_priv->vbt.lvds_use_ssc && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } @@ -4844,7 +5058,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc, crtc->lowfreq_avail = false; if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && - reduced_clock && i915_powersave) { + reduced_clock && i915.powersave) { I915_WRITE(FP1(pipe), fp2); crtc->config.dpll_hw_state.fp1 = fp2; crtc->lowfreq_avail = true; @@ -5161,21 +5375,26 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; struct drm_display_mode *adjusted_mode = &intel_crtc->config.adjusted_mode; - uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end; + uint32_t crtc_vtotal, crtc_vblank_end; + int vsyncshift = 0; /* We need to be careful not to changed the adjusted mode, for otherwise * the hw state checker will get angry at the mismatch. */ crtc_vtotal = adjusted_mode->crtc_vtotal; crtc_vblank_end = adjusted_mode->crtc_vblank_end; - if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { /* the chip adds 2 halflines automatically */ crtc_vtotal -= 1; crtc_vblank_end -= 1; - vsyncshift = adjusted_mode->crtc_hsync_start - - adjusted_mode->crtc_htotal / 2; - } else { - vsyncshift = 0; + + if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) + vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; + else + vsyncshift = adjusted_mode->crtc_hsync_start - + adjusted_mode->crtc_htotal / 2; + if (vsyncshift < 0) + vsyncshift += adjusted_mode->crtc_htotal; } if (INTEL_INFO(dev)->gen > 3) @@ -5259,25 +5478,23 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w; } -static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc, - struct intel_crtc_config *pipe_config) +void intel_mode_from_pipe_config(struct drm_display_mode *mode, + struct intel_crtc_config *pipe_config) { - struct drm_crtc *crtc = &intel_crtc->base; - - crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay; - crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal; - crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start; - crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end; + mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay; + mode->htotal = pipe_config->adjusted_mode.crtc_htotal; + mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start; + mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end; - crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay; - crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal; - crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start; - crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end; + mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay; + mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal; + mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start; + mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end; - crtc->mode.flags = pipe_config->adjusted_mode.flags; + mode->flags = pipe_config->adjusted_mode.flags; - crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock; - crtc->mode.flags |= pipe_config->adjusted_mode.flags; + mode->clock = pipe_config->adjusted_mode.crtc_clock; + mode->flags |= pipe_config->adjusted_mode.flags; } static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) @@ -5327,10 +5544,13 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) } } - if (!IS_GEN2(dev) && - intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) - pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; - else + if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { + if (INTEL_INFO(dev)->gen < 4 || + intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) + pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; + else + pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; + } else pipeconf |= PIPECONF_PROGRESSIVE; if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range) @@ -5512,6 +5732,67 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, pipe_config->port_clock = clock.dot / 5; } +static void i9xx_get_plane_config(struct intel_crtc *crtc, + struct intel_plane_config *plane_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val, base, offset; + int pipe = crtc->pipe, plane = crtc->plane; + int fourcc, pixel_format; + int aligned_height; + + crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL); + if (!crtc->base.primary->fb) { + DRM_DEBUG_KMS("failed to alloc fb\n"); + return; + } + + val = I915_READ(DSPCNTR(plane)); + + if (INTEL_INFO(dev)->gen >= 4) + if (val & DISPPLANE_TILED) + plane_config->tiled = true; + + pixel_format = val & DISPPLANE_PIXFORMAT_MASK; + fourcc = intel_format_to_fourcc(pixel_format); + crtc->base.primary->fb->pixel_format = fourcc; + crtc->base.primary->fb->bits_per_pixel = + drm_format_plane_cpp(fourcc, 0) * 8; + + if (INTEL_INFO(dev)->gen >= 4) { + if (plane_config->tiled) + offset = I915_READ(DSPTILEOFF(plane)); + else + offset = I915_READ(DSPLINOFF(plane)); + base = I915_READ(DSPSURF(plane)) & 0xfffff000; + } else { + base = I915_READ(DSPADDR(plane)); + } + plane_config->base = base; + + val = I915_READ(PIPESRC(pipe)); + crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1; + crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; + + val = I915_READ(DSPSTRIDE(pipe)); + crtc->base.primary->fb->pitches[0] = val & 0xffffff80; + + aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, + plane_config->tiled); + + plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * + aligned_height, PAGE_SIZE); + + DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", + pipe, plane, crtc->base.primary->fb->width, + crtc->base.primary->fb->height, + crtc->base.primary->fb->bits_per_pixel, base, + crtc->base.primary->fb->pitches[0], + plane_config->size); + +} + static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config) { @@ -5519,6 +5800,10 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; uint32_t tmp; + if (!intel_display_power_enabled(dev_priv, + POWER_DOMAIN_PIPE(crtc->pipe))) + return false; + pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; @@ -6180,7 +6465,7 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) * is 2.5%; use 5% for safety's sake. */ u32 bps = target_clock * bpp * 21 / 20; - return bps / (link_bw * 8) + 1; + return DIV_ROUND_UP(bps, link_bw * 8); } static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) @@ -6348,7 +6633,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, if (intel_crtc->config.has_dp_encoder) intel_dp_set_m_n(intel_crtc); - if (is_lvds && has_reduced_clock && i915_powersave) + if (is_lvds && has_reduced_clock && i915.powersave) intel_crtc->lowfreq_avail = true; else intel_crtc->lowfreq_avail = false; @@ -6455,6 +6740,66 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, } } +static void ironlake_get_plane_config(struct intel_crtc *crtc, + struct intel_plane_config *plane_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val, base, offset; + int pipe = crtc->pipe, plane = crtc->plane; + int fourcc, pixel_format; + int aligned_height; + + crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL); + if (!crtc->base.primary->fb) { + DRM_DEBUG_KMS("failed to alloc fb\n"); + return; + } + + val = I915_READ(DSPCNTR(plane)); + + if (INTEL_INFO(dev)->gen >= 4) + if (val & DISPPLANE_TILED) + plane_config->tiled = true; + + pixel_format = val & DISPPLANE_PIXFORMAT_MASK; + fourcc = intel_format_to_fourcc(pixel_format); + crtc->base.primary->fb->pixel_format = fourcc; + crtc->base.primary->fb->bits_per_pixel = + drm_format_plane_cpp(fourcc, 0) * 8; + + base = I915_READ(DSPSURF(plane)) & 0xfffff000; + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + offset = I915_READ(DSPOFFSET(plane)); + } else { + if (plane_config->tiled) + offset = I915_READ(DSPTILEOFF(plane)); + else + offset = I915_READ(DSPLINOFF(plane)); + } + plane_config->base = base; + + val = I915_READ(PIPESRC(pipe)); + crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1; + crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; + + val = I915_READ(DSPSTRIDE(pipe)); + crtc->base.primary->fb->pitches[0] = val & 0xffffff80; + + aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, + plane_config->tiled); + + plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * + aligned_height, PAGE_SIZE); + + DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", + pipe, plane, crtc->base.primary->fb->width, + crtc->base.primary->fb->height, + crtc->base.primary->fb->bits_per_pixel, base, + crtc->base.primary->fb->pitches[0], + plane_config->size); +} + static bool ironlake_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config) { @@ -6629,6 +6974,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) { uint32_t val; + unsigned long irqflags; val = I915_READ(LCPLL_CTL); @@ -6636,9 +6982,22 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) return; - /* Make sure we're not on PC8 state before disabling PC8, otherwise - * we'll hang the machine! */ - gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); + /* + * Make sure we're not on PC8 state before disabling PC8, otherwise + * we'll hang the machine. To prevent PC8 state, just enable force_wake. + * + * The other problem is that hsw_restore_lcpll() is called as part of + * the runtime PM resume sequence, so we can't just call + * gen6_gt_force_wake_get() because that function calls + * intel_runtime_pm_get(), and we can't change the runtime PM refcount + * while we are on the resume sequence. So to solve this problem we have + * to call special forcewake code that doesn't touch runtime PM and + * doesn't enable the forcewake delayed work. + */ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (dev_priv->uncore.forcewake_count++ == 0) + dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); if (val & LCPLL_POWER_DOWN_ALLOW) { val &= ~LCPLL_POWER_DOWN_ALLOW; @@ -6672,26 +7031,45 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) DRM_ERROR("Switching back to LCPLL failed\n"); } - gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); + /* See the big comment above. */ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (--dev_priv->uncore.forcewake_count == 0) + dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -void hsw_enable_pc8_work(struct work_struct *__work) +/* + * Package states C8 and deeper are really deep PC states that can only be + * reached when all the devices on the system allow it, so even if the graphics + * device allows PC8+, it doesn't mean the system will actually get to these + * states. Our driver only allows PC8+ when going into runtime PM. + * + * The requirements for PC8+ are that all the outputs are disabled, the power + * well is disabled and most interrupts are disabled, and these are also + * requirements for runtime PM. When these conditions are met, we manually do + * the other conditions: disable the interrupts, clocks and switch LCPLL refclk + * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard + * hang the machine. + * + * When we really reach PC8 or deeper states (not just when we allow it) we lose + * the state of some registers, so when we come back from PC8+ we need to + * restore this state. We don't get into PC8+ if we're not in RC6, so we don't + * need to take care of the registers kept by RC6. Notice that this happens even + * if we don't put the device in PCI D3 state (which is what currently happens + * because of the runtime PM support). + * + * For more, read "Display Sequences for Package C8" on the hardware + * documentation. + */ +void hsw_enable_pc8(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = - container_of(to_delayed_work(__work), struct drm_i915_private, - pc8.enable_work); struct drm_device *dev = dev_priv->dev; uint32_t val; WARN_ON(!HAS_PC8(dev)); - if (dev_priv->pc8.enabled) - return; - DRM_DEBUG_KMS("Enabling package C8+\n"); - dev_priv->pc8.enabled = true; - if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { val = I915_READ(SOUTH_DSPCLK_GATE_D); val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; @@ -6699,51 +7077,21 @@ void hsw_enable_pc8_work(struct work_struct *__work) } lpt_disable_clkout_dp(dev); - hsw_pc8_disable_interrupts(dev); + hsw_runtime_pm_disable_interrupts(dev); hsw_disable_lcpll(dev_priv, true, true); - - intel_runtime_pm_put(dev_priv); -} - -static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv) -{ - WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock)); - WARN(dev_priv->pc8.disable_count < 1, - "pc8.disable_count: %d\n", dev_priv->pc8.disable_count); - - dev_priv->pc8.disable_count--; - if (dev_priv->pc8.disable_count != 0) - return; - - schedule_delayed_work(&dev_priv->pc8.enable_work, - msecs_to_jiffies(i915_pc8_timeout)); } -static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) +void hsw_disable_pc8(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; uint32_t val; - WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock)); - WARN(dev_priv->pc8.disable_count < 0, - "pc8.disable_count: %d\n", dev_priv->pc8.disable_count); - - dev_priv->pc8.disable_count++; - if (dev_priv->pc8.disable_count != 1) - return; - WARN_ON(!HAS_PC8(dev)); - cancel_delayed_work_sync(&dev_priv->pc8.enable_work); - if (!dev_priv->pc8.enabled) - return; - DRM_DEBUG_KMS("Disabling package C8+\n"); - intel_runtime_pm_get(dev_priv); - hsw_restore_lcpll(dev_priv); - hsw_pc8_restore_interrupts(dev); + hsw_runtime_pm_restore_interrupts(dev); lpt_init_pch_refclk(dev); if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { @@ -6757,185 +7105,11 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->rps.hw_lock); gen6_update_ring_freq(dev); mutex_unlock(&dev_priv->rps.hw_lock); - dev_priv->pc8.enabled = false; -} - -void hsw_enable_package_c8(struct drm_i915_private *dev_priv) -{ - if (!HAS_PC8(dev_priv->dev)) - return; - - mutex_lock(&dev_priv->pc8.lock); - __hsw_enable_package_c8(dev_priv); - mutex_unlock(&dev_priv->pc8.lock); -} - -void hsw_disable_package_c8(struct drm_i915_private *dev_priv) -{ - if (!HAS_PC8(dev_priv->dev)) - return; - - mutex_lock(&dev_priv->pc8.lock); - __hsw_disable_package_c8(dev_priv); - mutex_unlock(&dev_priv->pc8.lock); -} - -static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - struct intel_crtc *crtc; - uint32_t val; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) - if (crtc->base.enabled) - return false; - - /* This case is still possible since we have the i915.disable_power_well - * parameter and also the KVMr or something else might be requesting the - * power well. */ - val = I915_READ(HSW_PWR_WELL_DRIVER); - if (val != 0) { - DRM_DEBUG_KMS("Not enabling PC8: power well on\n"); - return false; - } - - return true; -} - -/* Since we're called from modeset_global_resources there's no way to - * symmetrically increase and decrease the refcount, so we use - * dev_priv->pc8.requirements_met to track whether we already have the refcount - * or not. - */ -static void hsw_update_package_c8(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - bool allow; - - if (!HAS_PC8(dev_priv->dev)) - return; - - if (!i915_enable_pc8) - return; - - mutex_lock(&dev_priv->pc8.lock); - - allow = hsw_can_enable_package_c8(dev_priv); - - if (allow == dev_priv->pc8.requirements_met) - goto done; - - dev_priv->pc8.requirements_met = allow; - - if (allow) - __hsw_enable_package_c8(dev_priv); - else - __hsw_disable_package_c8(dev_priv); - -done: - mutex_unlock(&dev_priv->pc8.lock); -} - -static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv) -{ - if (!HAS_PC8(dev_priv->dev)) - return; - - mutex_lock(&dev_priv->pc8.lock); - if (!dev_priv->pc8.gpu_idle) { - dev_priv->pc8.gpu_idle = true; - __hsw_enable_package_c8(dev_priv); - } - mutex_unlock(&dev_priv->pc8.lock); -} - -static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv) -{ - if (!HAS_PC8(dev_priv->dev)) - return; - - mutex_lock(&dev_priv->pc8.lock); - if (dev_priv->pc8.gpu_idle) { - dev_priv->pc8.gpu_idle = false; - __hsw_disable_package_c8(dev_priv); - } - mutex_unlock(&dev_priv->pc8.lock); -} - -#define for_each_power_domain(domain, mask) \ - for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ - if ((1 << (domain)) & (mask)) - -static unsigned long get_pipe_power_domains(struct drm_device *dev, - enum pipe pipe, bool pfit_enabled) -{ - unsigned long mask; - enum transcoder transcoder; - - transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); - - mask = BIT(POWER_DOMAIN_PIPE(pipe)); - mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); - if (pfit_enabled) - mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); - - return mask; -} - -void intel_display_set_init_power(struct drm_device *dev, bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->power_domains.init_power_on == enable) - return; - - if (enable) - intel_display_power_get(dev, POWER_DOMAIN_INIT); - else - intel_display_power_put(dev, POWER_DOMAIN_INIT); - - dev_priv->power_domains.init_power_on = enable; -} - -static void modeset_update_power_wells(struct drm_device *dev) -{ - unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; - struct intel_crtc *crtc; - - /* - * First get all needed power domains, then put all unneeded, to avoid - * any unnecessary toggling of the power wells. - */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { - enum intel_display_power_domain domain; - - if (!crtc->base.enabled) - continue; - - pipe_domains[crtc->pipe] = get_pipe_power_domains(dev, - crtc->pipe, - crtc->config.pch_pfit.enabled); - - for_each_power_domain(domain, pipe_domains[crtc->pipe]) - intel_display_power_get(dev, domain); - } - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { - enum intel_display_power_domain domain; - - for_each_power_domain(domain, crtc->enabled_power_domains) - intel_display_power_put(dev, domain); - - crtc->enabled_power_domains = pipe_domains[crtc->pipe]; - } - - intel_display_set_init_power(dev, false); } static void haswell_modeset_global_resources(struct drm_device *dev) { - modeset_update_power_wells(dev); - hsw_update_package_c8(dev); + modeset_update_crtc_power_domains(dev); } static int haswell_crtc_mode_set(struct drm_crtc *crtc, @@ -6985,6 +7159,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, enum intel_display_power_domain pfit_domain; uint32_t tmp; + if (!intel_display_power_enabled(dev_priv, + POWER_DOMAIN_PIPE(crtc->pipe))) + return false; + pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; @@ -7010,7 +7188,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->cpu_transcoder = TRANSCODER_EDP; } - if (!intel_display_power_enabled(dev, + if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) return false; @@ -7038,7 +7216,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, intel_get_pipe_timings(crtc, pipe_config); pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); - if (intel_display_power_enabled(dev, pfit_domain)) + if (intel_display_power_enabled(dev_priv, pfit_domain)) ironlake_get_pfit_config(crtc, pipe_config); if (IS_HASWELL(dev)) @@ -7435,10 +7613,26 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) bool visible = base != 0; if (intel_crtc->cursor_visible != visible) { + int16_t width = intel_crtc->cursor_width; uint32_t cntl = I915_READ(CURCNTR(pipe)); if (base) { cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); - cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; + cntl |= MCURSOR_GAMMA_ENABLE; + + switch (width) { + case 64: + cntl |= CURSOR_MODE_64_ARGB_AX; + break; + case 128: + cntl |= CURSOR_MODE_128_ARGB_AX; + break; + case 256: + cntl |= CURSOR_MODE_256_ARGB_AX; + break; + default: + WARN_ON(1); + return; + } cntl |= pipe << 28; /* Connect to correct pipe */ } else { cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); @@ -7463,10 +7657,25 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) bool visible = base != 0; if (intel_crtc->cursor_visible != visible) { + int16_t width = intel_crtc->cursor_width; uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); if (base) { cntl &= ~CURSOR_MODE; - cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; + cntl |= MCURSOR_GAMMA_ENABLE; + switch (width) { + case 64: + cntl |= CURSOR_MODE_64_ARGB_AX; + break; + case 128: + cntl |= CURSOR_MODE_128_ARGB_AX; + break; + case 256: + cntl |= CURSOR_MODE_256_ARGB_AX; + break; + default: + WARN_ON(1); + return; + } } else { cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); cntl |= CURSOR_MODE_DISABLE; @@ -7550,6 +7759,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_gem_object *obj; + unsigned old_width; uint32_t addr; int ret; @@ -7562,9 +7772,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, goto finish; } - /* Currently we only support 64x64 cursors */ - if (width != 64 || height != 64) { - DRM_ERROR("we currently only support 64x64 cursors\n"); + /* Check for which cursor types we support */ + if (!((width == 64 && height == 64) || + (width == 128 && height == 128 && !IS_GEN2(dev)) || + (width == 256 && height == 256 && !IS_GEN2(dev)))) { + DRM_DEBUG("Cursor dimension not supported\n"); return -EINVAL; } @@ -7573,18 +7785,18 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return -ENOENT; if (obj->base.size < width * height * 4) { - DRM_ERROR("buffer is to small\n"); + DRM_DEBUG_KMS("buffer is to small\n"); ret = -ENOMEM; goto fail; } /* we only need to pin inside GTT if cursor is non-phy */ mutex_lock(&dev->struct_mutex); - if (!dev_priv->info->cursor_needs_physical) { + if (!INTEL_INFO(dev)->cursor_needs_physical) { unsigned alignment; if (obj->tiling_mode) { - DRM_ERROR("cursor cannot be tiled\n"); + DRM_DEBUG_KMS("cursor cannot be tiled\n"); ret = -EINVAL; goto fail_locked; } @@ -7600,13 +7812,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); if (ret) { - DRM_ERROR("failed to move cursor bo into the GTT\n"); + DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n"); goto fail_locked; } ret = i915_gem_object_put_fence(obj); if (ret) { - DRM_ERROR("failed to release fence for cursor"); + DRM_DEBUG_KMS("failed to release fence for cursor"); goto fail_unpin; } @@ -7617,7 +7829,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, align); if (ret) { - DRM_ERROR("failed to attach phys object\n"); + DRM_DEBUG_KMS("failed to attach phys object\n"); goto fail_locked; } addr = obj->phys_obj->handle->busaddr; @@ -7628,7 +7840,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, finish: if (intel_crtc->cursor_bo) { - if (dev_priv->info->cursor_needs_physical) { + if (INTEL_INFO(dev)->cursor_needs_physical) { if (intel_crtc->cursor_bo != obj) i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); } else @@ -7638,13 +7850,18 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, mutex_unlock(&dev->struct_mutex); + old_width = intel_crtc->cursor_width; + intel_crtc->cursor_addr = addr; intel_crtc->cursor_bo = obj; intel_crtc->cursor_width = width; intel_crtc->cursor_height = height; - if (intel_crtc->active) + if (intel_crtc->active) { + if (old_width != width) + intel_update_watermarks(crtc); intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); + } return 0; fail_unpin: @@ -7690,10 +7907,10 @@ static struct drm_display_mode load_detect_mode = { 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), }; -static struct drm_framebuffer * -intel_framebuffer_create(struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_i915_gem_object *obj) +struct drm_framebuffer * +__intel_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_i915_gem_object *obj) { struct intel_framebuffer *intel_fb; int ret; @@ -7704,12 +7921,7 @@ intel_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOMEM); } - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto err; - ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); - mutex_unlock(&dev->struct_mutex); if (ret) goto err; @@ -7721,6 +7933,23 @@ err: return ERR_PTR(ret); } +static struct drm_framebuffer * +intel_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_i915_gem_object *obj) +{ + struct drm_framebuffer *fb; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ERR_PTR(ret); + fb = __intel_framebuffer_create(dev, mode_cmd, obj); + mutex_unlock(&dev->struct_mutex); + + return fb; +} + static u32 intel_framebuffer_pitch_for_width(int width, int bpp) { @@ -7766,14 +7995,16 @@ mode_fits_in_fbdev(struct drm_device *dev, struct drm_i915_gem_object *obj; struct drm_framebuffer *fb; - if (dev_priv->fbdev == NULL) + if (!dev_priv->fbdev) return NULL; - obj = dev_priv->fbdev->ifb.obj; - if (obj == NULL) + if (!dev_priv->fbdev->fb) return NULL; - fb = &dev_priv->fbdev->ifb.base; + obj = dev_priv->fbdev->fb->obj; + BUG_ON(!obj); + + fb = &dev_priv->fbdev->fb->base; if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, fb->bits_per_pixel)) return NULL; @@ -7855,6 +8086,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, to_intel_connector(connector)->new_encoder = intel_encoder; intel_crtc = to_intel_crtc(crtc); + intel_crtc->new_enabled = true; + intel_crtc->new_config = &intel_crtc->config; old->dpms_mode = connector->dpms; old->load_detect_temp = true; old->release_fb = NULL; @@ -7878,21 +8111,28 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); if (IS_ERR(fb)) { DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); - mutex_unlock(&crtc->mutex); - return false; + goto fail; } if (intel_set_mode(crtc, mode, 0, 0, fb)) { DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); - mutex_unlock(&crtc->mutex); - return false; + goto fail; } /* let the connector get through one full cycle before testing */ intel_wait_for_vblank(dev, intel_crtc->pipe); return true; + + fail: + intel_crtc->new_enabled = crtc->enabled; + if (intel_crtc->new_enabled) + intel_crtc->new_config = &intel_crtc->config; + else + intel_crtc->new_config = NULL; + mutex_unlock(&crtc->mutex); + return false; } void intel_release_load_detect_pipe(struct drm_connector *connector, @@ -7902,6 +8142,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, intel_attached_encoder(connector); struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, drm_get_connector_name(connector), @@ -7910,6 +8151,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, if (old->load_detect_temp) { to_intel_connector(connector)->new_encoder = NULL; intel_encoder->new_crtc = NULL; + intel_crtc->new_enabled = false; + intel_crtc->new_config = NULL; intel_set_mode(crtc, NULL, 0, 0, NULL); if (old->release_fb) { @@ -8122,7 +8365,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, static void intel_increase_pllclock(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int dpll_reg = DPLL(pipe); @@ -8153,7 +8396,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) static void intel_decrease_pllclock(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); if (HAS_PCH_SPLIT(dev)) @@ -8190,8 +8433,12 @@ void intel_mark_busy(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - hsw_package_c8_gpu_busy(dev_priv); + if (dev_priv->mm.busy) + return; + + intel_runtime_pm_get(dev_priv); i915_update_gfx_val(dev_priv); + dev_priv->mm.busy = true; } void intel_mark_idle(struct drm_device *dev) @@ -8199,20 +8446,26 @@ void intel_mark_idle(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; - hsw_package_c8_gpu_idle(dev_priv); - - if (!i915_powersave) + if (!dev_priv->mm.busy) return; + dev_priv->mm.busy = false; + + if (!i915.powersave) + goto out; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (!crtc->fb) + if (!crtc->primary->fb) continue; intel_decrease_pllclock(crtc); } - if (dev_priv->info->gen >= 6) + if (INTEL_INFO(dev)->gen >= 6) gen6_rps_idle(dev->dev_private); + +out: + intel_runtime_pm_put(dev_priv); } void intel_mark_fb_busy(struct drm_i915_gem_object *obj, @@ -8221,14 +8474,14 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj, struct drm_device *dev = obj->base.dev; struct drm_crtc *crtc; - if (!i915_powersave) + if (!i915.powersave) return; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (!crtc->fb) + if (!crtc->primary->fb) continue; - if (to_intel_framebuffer(crtc->fb)->obj != obj) + if (to_intel_framebuffer(crtc->primary->fb)->obj != obj) continue; intel_increase_pllclock(crtc); @@ -8284,7 +8537,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) static void do_intel_finish_page_flip(struct drm_device *dev, struct drm_crtc *crtc) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; unsigned long flags; @@ -8325,7 +8578,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, void intel_finish_page_flip(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; do_intel_finish_page_flip(dev, crtc); @@ -8333,7 +8586,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) void intel_finish_page_flip_plane(struct drm_device *dev, int plane) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; do_intel_finish_page_flip(dev, crtc); @@ -8341,7 +8594,7 @@ void intel_finish_page_flip_plane(struct drm_device *dev, int plane) void intel_prepare_page_flip(struct drm_device *dev, int plane) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); unsigned long flags; @@ -8656,7 +8909,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *old_fb = crtc->fb; + struct drm_framebuffer *old_fb = crtc->primary->fb; struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; @@ -8664,7 +8917,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, int ret; /* Can't change pixel format via MI display flips. */ - if (fb->pixel_format != crtc->fb->pixel_format) + if (fb->pixel_format != crtc->primary->fb->pixel_format) return -EINVAL; /* @@ -8672,10 +8925,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, * Note that pitch changes could also affect these register. */ if (INTEL_INFO(dev)->gen > 3 && - (fb->offsets[0] != crtc->fb->offsets[0] || - fb->pitches[0] != crtc->fb->pitches[0])) + (fb->offsets[0] != crtc->primary->fb->offsets[0] || + fb->pitches[0] != crtc->primary->fb->pitches[0])) return -EINVAL; + if (i915_terminally_wedged(&dev_priv->gpu_error)) + goto out_hang; + work = kzalloc(sizeof(*work), GFP_KERNEL); if (work == NULL) return -ENOMEM; @@ -8713,7 +8969,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, drm_gem_object_reference(&work->old_fb_obj->base); drm_gem_object_reference(&obj->base); - crtc->fb = fb; + crtc->primary->fb = fb; work->pending_flip_obj = obj; @@ -8736,7 +8992,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, cleanup_pending: atomic_dec(&intel_crtc->unpin_work_count); - crtc->fb = old_fb; + crtc->primary->fb = old_fb; drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); @@ -8750,6 +9006,13 @@ cleanup: free_work: kfree(work); + if (ret == -EIO) { +out_hang: + intel_crtc_wait_for_pending_flips(crtc); + ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); + if (ret == 0 && event) + drm_send_vblank_event(dev, intel_crtc->pipe, event); + } return ret; } @@ -8766,6 +9029,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = { */ static void intel_modeset_update_staged_output_state(struct drm_device *dev) { + struct intel_crtc *crtc; struct intel_encoder *encoder; struct intel_connector *connector; @@ -8780,6 +9044,16 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev) encoder->new_crtc = to_intel_crtc(encoder->base.crtc); } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + crtc->new_enabled = crtc->base.enabled; + + if (crtc->new_enabled) + crtc->new_config = &crtc->config; + else + crtc->new_config = NULL; + } } /** @@ -8789,6 +9063,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev) */ static void intel_modeset_commit_output_state(struct drm_device *dev) { + struct intel_crtc *crtc; struct intel_encoder *encoder; struct intel_connector *connector; @@ -8801,6 +9076,11 @@ static void intel_modeset_commit_output_state(struct drm_device *dev) base.head) { encoder->base.crtc = &encoder->new_crtc->base; } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + crtc->base.enabled = crtc->new_enabled; + } } static void @@ -8941,23 +9221,47 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); } -static bool check_encoder_cloning(struct drm_crtc *crtc) +static bool encoders_cloneable(const struct intel_encoder *a, + const struct intel_encoder *b) { - int num_encoders = 0; - bool uncloneable_encoders = false; + /* masks could be asymmetric, so check both ways */ + return a == b || (a->cloneable & (1 << b->type) && + b->cloneable & (1 << a->type)); +} + +static bool check_single_encoder_cloning(struct intel_crtc *crtc, + struct intel_encoder *encoder) +{ + struct drm_device *dev = crtc->base.dev; + struct intel_encoder *source_encoder; + + list_for_each_entry(source_encoder, + &dev->mode_config.encoder_list, base.head) { + if (source_encoder->new_crtc != crtc) + continue; + + if (!encoders_cloneable(encoder, source_encoder)) + return false; + } + + return true; +} + +static bool check_encoder_cloning(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; struct intel_encoder *encoder; - list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, - base.head) { - if (&encoder->new_crtc->base != crtc) + list_for_each_entry(encoder, + &dev->mode_config.encoder_list, base.head) { + if (encoder->new_crtc != crtc) continue; - num_encoders++; - if (!encoder->cloneable) - uncloneable_encoders = true; + if (!check_single_encoder_cloning(crtc, encoder)) + return false; } - return !(num_encoders > 1 && uncloneable_encoders); + return true; } static struct intel_crtc_config * @@ -8971,7 +9275,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, int plane_bpp, ret = -EINVAL; bool retry = true; - if (!check_encoder_cloning(crtc)) { + if (!check_encoder_cloning(to_intel_crtc(crtc))) { DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); return ERR_PTR(-EINVAL); } @@ -9127,29 +9431,22 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, *prepare_pipes |= 1 << encoder->new_crtc->pipe; } - /* Check for any pipes that will be fully disabled ... */ + /* Check for pipes that will be enabled/disabled ... */ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) { - bool used = false; - - /* Don't try to disable disabled crtcs. */ - if (!intel_crtc->base.enabled) + if (intel_crtc->base.enabled == intel_crtc->new_enabled) continue; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { - if (encoder->new_crtc == intel_crtc) - used = true; - } - - if (!used) + if (!intel_crtc->new_enabled) *disable_pipes |= 1 << intel_crtc->pipe; + else + *prepare_pipes |= 1 << intel_crtc->pipe; } /* set_mode is also used to update properties on life display pipes. */ intel_crtc = to_intel_crtc(crtc); - if (crtc->enabled) + if (intel_crtc->new_enabled) *prepare_pipes |= 1 << intel_crtc->pipe; /* @@ -9208,10 +9505,13 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) intel_modeset_commit_output_state(dev); - /* Update computed state. */ + /* Double check state. */ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) { - intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base); + WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base)); + WARN_ON(intel_crtc->new_config && + intel_crtc->new_config != &intel_crtc->config); + WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config); } list_for_each_entry(connector, &dev->mode_config.connector_list, head) { @@ -9380,10 +9680,8 @@ intel_pipe_config_compare(struct drm_device *dev, if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) PIPE_CONF_CHECK_I(pipe_bpp); - if (!HAS_DDI(dev)) { - PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); - PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); - } + PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I @@ -9471,7 +9769,7 @@ check_encoder_state(struct drm_device *dev) static void check_crtc_state(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *crtc; struct intel_encoder *encoder; struct intel_crtc_config pipe_config; @@ -9539,7 +9837,7 @@ check_crtc_state(struct drm_device *dev) static void check_shared_dpll_state(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *crtc; struct intel_dpll_hw_state dpll_hw_state; int i; @@ -9612,7 +9910,7 @@ static int __intel_set_mode(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *fb) { struct drm_device *dev = crtc->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *saved_mode; struct intel_crtc_config *pipe_config = NULL; struct intel_crtc *intel_crtc; @@ -9643,6 +9941,7 @@ static int __intel_set_mode(struct drm_crtc *crtc, } intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, "[modeset]"); + to_intel_crtc(crtc)->new_config = pipe_config; } /* @@ -9653,8 +9952,7 @@ static int __intel_set_mode(struct drm_crtc *crtc, * adjusted_mode bits in the crtc directly. */ if (IS_VALLEYVIEW(dev)) { - valleyview_modeset_global_pipes(dev, &prepare_pipes, - modeset_pipes, pipe_config); + valleyview_modeset_global_pipes(dev, &prepare_pipes); /* may have added more to prepare_pipes than we should */ prepare_pipes &= ~disable_pipes; @@ -9676,6 +9974,7 @@ static int __intel_set_mode(struct drm_crtc *crtc, /* mode_set/enable/disable functions rely on a correct pipe * config. */ to_intel_crtc(crtc)->config = *pipe_config; + to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config; /* * Calculate and store various constants which @@ -9734,7 +10033,7 @@ static int intel_set_mode(struct drm_crtc *crtc, void intel_crtc_restore_mode(struct drm_crtc *crtc) { - intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); + intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); } #undef for_each_intel_crtc_masked @@ -9746,16 +10045,24 @@ static void intel_set_config_free(struct intel_set_config *config) kfree(config->save_connector_encoders); kfree(config->save_encoder_crtcs); + kfree(config->save_crtc_enabled); kfree(config); } static int intel_set_config_save_state(struct drm_device *dev, struct intel_set_config *config) { + struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; int count; + config->save_crtc_enabled = + kcalloc(dev->mode_config.num_crtc, + sizeof(bool), GFP_KERNEL); + if (!config->save_crtc_enabled) + return -ENOMEM; + config->save_encoder_crtcs = kcalloc(dev->mode_config.num_encoder, sizeof(struct drm_crtc *), GFP_KERNEL); @@ -9773,6 +10080,11 @@ static int intel_set_config_save_state(struct drm_device *dev, * restored, not the drivers personal bookkeeping. */ count = 0; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + config->save_crtc_enabled[count++] = crtc->enabled; + } + + count = 0; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { config->save_encoder_crtcs[count++] = encoder->crtc; } @@ -9788,11 +10100,22 @@ static int intel_set_config_save_state(struct drm_device *dev, static void intel_set_config_restore_state(struct drm_device *dev, struct intel_set_config *config) { + struct intel_crtc *crtc; struct intel_encoder *encoder; struct intel_connector *connector; int count; count = 0; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { + crtc->new_enabled = config->save_crtc_enabled[count++]; + + if (crtc->new_enabled) + crtc->new_config = &crtc->config; + else + crtc->new_config = NULL; + } + + count = 0; list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { encoder->new_crtc = to_intel_crtc(config->save_encoder_crtcs[count++]); @@ -9834,13 +10157,13 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, * and then just flip_or_move it */ if (is_crtc_connector_off(set)) { config->mode_changed = true; - } else if (set->crtc->fb != set->fb) { + } else if (set->crtc->primary->fb != set->fb) { /* If we have no fb then treat it as a full mode set */ - if (set->crtc->fb == NULL) { + if (set->crtc->primary->fb == NULL) { struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); - if (intel_crtc->active && i915_fastboot) { + if (intel_crtc->active && i915.fastboot) { DRM_DEBUG_KMS("crtc has no fb, will flip\n"); config->fb_changed = true; } else { @@ -9850,7 +10173,7 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, } else if (set->fb == NULL) { config->mode_changed = true; } else if (set->fb->pixel_format != - set->crtc->fb->pixel_format) { + set->crtc->primary->fb->pixel_format) { config->mode_changed = true; } else { config->fb_changed = true; @@ -9876,9 +10199,9 @@ intel_modeset_stage_output_state(struct drm_device *dev, struct drm_mode_set *set, struct intel_set_config *config) { - struct drm_crtc *new_crtc; struct intel_connector *connector; struct intel_encoder *encoder; + struct intel_crtc *crtc; int ro; /* The upper layers ensure that we either disable a crtc or have a list @@ -9921,6 +10244,8 @@ intel_modeset_stage_output_state(struct drm_device *dev, /* Update crtc of enabled connectors. */ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { + struct drm_crtc *new_crtc; + if (!connector->new_encoder) continue; @@ -9971,9 +10296,58 @@ intel_modeset_stage_output_state(struct drm_device *dev, } /* Now we've also updated encoder->new_crtc for all encoders. */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + crtc->new_enabled = false; + + list_for_each_entry(encoder, + &dev->mode_config.encoder_list, + base.head) { + if (encoder->new_crtc == crtc) { + crtc->new_enabled = true; + break; + } + } + + if (crtc->new_enabled != crtc->base.enabled) { + DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", + crtc->new_enabled ? "en" : "dis"); + config->mode_changed = true; + } + + if (crtc->new_enabled) + crtc->new_config = &crtc->config; + else + crtc->new_config = NULL; + } + return 0; } +static void disable_crtc_nofb(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct intel_encoder *encoder; + struct intel_connector *connector; + + DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n", + pipe_name(crtc->pipe)); + + list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { + if (connector->new_encoder && + connector->new_encoder->new_crtc == crtc) + connector->new_encoder = NULL; + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + if (encoder->new_crtc == crtc) + encoder->new_crtc = NULL; + } + + crtc->new_enabled = false; + crtc->new_config = NULL; +} + static int intel_crtc_set_config(struct drm_mode_set *set) { struct drm_device *dev; @@ -10012,7 +10386,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set) save_set.mode = &set->crtc->mode; save_set.x = set->crtc->x; save_set.y = set->crtc->y; - save_set.fb = set->crtc->fb; + save_set.fb = set->crtc->primary->fb; /* Compute whether we need a full modeset, only an fb base update or no * change at all. In the future we might also check whether only the @@ -10040,7 +10414,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set) * flipping, so increasing its cost here shouldn't be a big * deal). */ - if (i915_fastboot && ret == 0) + if (i915.fastboot && ret == 0) intel_modeset_check_state(set->crtc->dev); } @@ -10050,6 +10424,15 @@ static int intel_crtc_set_config(struct drm_mode_set *set) fail: intel_set_config_restore_state(dev, config); + /* + * HACK: if the pipe was on, but we didn't have a framebuffer, + * force the pipe off to avoid oopsing in the modeset code + * due to fb==NULL. This should only happen during boot since + * we don't yet reconstruct the FB from the hardware state. + */ + if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb) + disable_crtc_nofb(to_intel_crtc(save_set.crtc)); + /* Try to restore the config */ if (config->mode_changed && intel_set_mode(save_set.crtc, save_set.mode, @@ -10174,7 +10557,7 @@ static void intel_shared_dpll_init(struct drm_device *dev) static void intel_crtc_init(struct drm_device *dev, int pipe) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc; int i; @@ -10184,6 +10567,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); + if (IS_GEN2(dev)) { + intel_crtc->max_cursor_width = GEN2_CURSOR_WIDTH; + intel_crtc->max_cursor_height = GEN2_CURSOR_HEIGHT; + } else { + intel_crtc->max_cursor_width = CURSOR_WIDTH; + intel_crtc->max_cursor_height = CURSOR_HEIGHT; + } + dev->mode_config.cursor_width = intel_crtc->max_cursor_width; + dev->mode_config.cursor_height = intel_crtc->max_cursor_height; + drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); for (i = 0; i < 256; i++) { intel_crtc->lut_r[i] = i; @@ -10255,12 +10648,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder) list_for_each_entry(source_encoder, &dev->mode_config.encoder_list, base.head) { - - if (encoder == source_encoder) - index_mask |= (1 << entry); - - /* Intel hw has only one MUX where enocoders could be cloned. */ - if (encoder->cloneable && source_encoder->cloneable) + if (encoders_cloneable(encoder, source_encoder)) index_mask |= (1 << entry); entry++; @@ -10279,8 +10667,7 @@ static bool has_edp_a(struct drm_device *dev) if ((I915_READ(DP_A) & DP_DETECTED) == 0) return false; - if (IS_GEN5(dev) && - (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) + if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) return false; return true; @@ -10433,18 +10820,13 @@ static void intel_setup_outputs(struct drm_device *dev) drm_helper_move_panel_connectors_to_head(dev); } -void intel_framebuffer_fini(struct intel_framebuffer *fb) -{ - drm_framebuffer_cleanup(&fb->base); - WARN_ON(!fb->obj->framebuffer_references--); - drm_gem_object_unreference_unlocked(&fb->obj->base); -} - static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - intel_framebuffer_fini(intel_fb); + drm_framebuffer_cleanup(fb); + WARN_ON(!intel_fb->obj->framebuffer_references--); + drm_gem_object_unreference_unlocked(&intel_fb->obj->base); kfree(intel_fb); } @@ -10463,12 +10845,12 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { .create_handle = intel_user_framebuffer_create_handle, }; -int intel_framebuffer_init(struct drm_device *dev, - struct intel_framebuffer *intel_fb, - struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_i915_gem_object *obj) +static int intel_framebuffer_init(struct drm_device *dev, + struct intel_framebuffer *intel_fb, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_i915_gem_object *obj) { - int aligned_height, tile_height; + int aligned_height; int pitch_limit; int ret; @@ -10562,9 +10944,8 @@ int intel_framebuffer_init(struct drm_device *dev, if (mode_cmd->offsets[0] != 0) return -EINVAL; - tile_height = IS_GEN2(dev) ? 16 : 8; - aligned_height = ALIGN(mode_cmd->height, - obj->tiling_mode ? tile_height : 1); + aligned_height = intel_align_height(dev, mode_cmd->height, + obj->tiling_mode); /* FIXME drm helper for size checks (especially planar formats)? */ if (obj->base.size < aligned_height * mode_cmd->pitches[0]) return -EINVAL; @@ -10624,32 +11005,40 @@ static void intel_init_display(struct drm_device *dev) if (HAS_DDI(dev)) { dev_priv->display.get_pipe_config = haswell_get_pipe_config; + dev_priv->display.get_plane_config = ironlake_get_plane_config; dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; dev_priv->display.crtc_enable = haswell_crtc_enable; dev_priv->display.crtc_disable = haswell_crtc_disable; dev_priv->display.off = haswell_crtc_off; - dev_priv->display.update_plane = ironlake_update_plane; + dev_priv->display.update_primary_plane = + ironlake_update_primary_plane; } else if (HAS_PCH_SPLIT(dev)) { dev_priv->display.get_pipe_config = ironlake_get_pipe_config; + dev_priv->display.get_plane_config = ironlake_get_plane_config; dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; dev_priv->display.crtc_enable = ironlake_crtc_enable; dev_priv->display.crtc_disable = ironlake_crtc_disable; dev_priv->display.off = ironlake_crtc_off; - dev_priv->display.update_plane = ironlake_update_plane; + dev_priv->display.update_primary_plane = + ironlake_update_primary_plane; } else if (IS_VALLEYVIEW(dev)) { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; + dev_priv->display.get_plane_config = i9xx_get_plane_config; dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; dev_priv->display.crtc_enable = valleyview_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; dev_priv->display.off = i9xx_crtc_off; - dev_priv->display.update_plane = i9xx_update_plane; + dev_priv->display.update_primary_plane = + i9xx_update_primary_plane; } else { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; + dev_priv->display.get_plane_config = i9xx_get_plane_config; dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; dev_priv->display.crtc_enable = i9xx_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; dev_priv->display.off = i9xx_crtc_off; - dev_priv->display.update_plane = i9xx_update_plane; + dev_priv->display.update_primary_plane = + i9xx_update_primary_plane; } /* Returns the core display clock speed */ @@ -10839,6 +11228,9 @@ static struct intel_quirk intel_quirks[] = { /* Acer Aspire 4736Z */ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, + + /* Acer Aspire 5336 */ + { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, }; static void intel_init_quirks(struct drm_device *dev) @@ -10869,6 +11261,7 @@ static void i915_disable_vga(struct drm_device *dev) u8 sr1; u32 vga_reg = i915_vgacntrl_reg(dev); + /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); outb(SR01, VGA_SR_INDEX); sr1 = inb(VGA_SR_DATA); @@ -10901,7 +11294,9 @@ void intel_modeset_suspend_hw(struct drm_device *dev) void intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int i, j, ret; + int sprite, ret; + enum pipe pipe; + struct intel_crtc *crtc; drm_mode_config_init(dev); @@ -10938,13 +11333,13 @@ void intel_modeset_init(struct drm_device *dev) INTEL_INFO(dev)->num_pipes, INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); - for_each_pipe(i) { - intel_crtc_init(dev, i); - for (j = 0; j < dev_priv->num_plane; j++) { - ret = intel_plane_init(dev, i, j); + for_each_pipe(pipe) { + intel_crtc_init(dev, pipe); + for_each_sprite(pipe, sprite) { + ret = intel_plane_init(dev, pipe, sprite); if (ret) DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", - pipe_name(i), sprite_name(i, j), ret); + pipe_name(pipe), sprite_name(pipe, sprite), ret); } } @@ -10960,6 +11355,33 @@ void intel_modeset_init(struct drm_device *dev) /* Just in case the BIOS is doing something questionable. */ intel_disable_fbc(dev); + + mutex_lock(&dev->mode_config.mutex); + intel_modeset_setup_hw_state(dev, false); + mutex_unlock(&dev->mode_config.mutex); + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + if (!crtc->active) + continue; + + /* + * Note that reserving the BIOS fb up front prevents us + * from stuffing other stolen allocations like the ring + * on top. This prevents some ugliness at boot time, and + * can even allow for smooth boot transitions if the BIOS + * fb is large enough for the active pipe configuration. + */ + if (dev_priv->display.get_plane_config) { + dev_priv->display.get_plane_config(crtc, + &crtc->plane_config); + /* + * If the fb is shared between multiple heads, we'll + * just get the first one. + */ + intel_find_plane_obj(crtc, &crtc->plane_config); + } + } } static void @@ -11097,6 +11519,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) encoder->base.crtc = NULL; } } + if (crtc->active) { + /* + * We start out with underrun reporting disabled to avoid races. + * For correct bookkeeping mark this on active crtcs. + * + * No protection against concurrent access is required - at + * worst a fifo underrun happens which also sets this to false. + */ + crtc->cpu_fifo_underrun_disabled = true; + crtc->pch_fifo_underrun_disabled = true; + } } static void intel_sanitize_encoder(struct intel_encoder *encoder) @@ -11142,11 +11575,21 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) * the crtc fixup. */ } -void i915_redisable_vga(struct drm_device *dev) +void i915_redisable_vga_power_on(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 vga_reg = i915_vgacntrl_reg(dev); + if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { + DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); + i915_disable_vga(dev); + } +} + +void i915_redisable_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + /* This function can be called both from intel_modeset_setup_hw_state or * at a very early point in our resume sequence, where the power well * structures are not yet restored. Since this function is at a very @@ -11154,14 +11597,10 @@ void i915_redisable_vga(struct drm_device *dev) * level, just check if the power well is enabled instead of trying to * follow the "don't touch the power well if we don't need it" policy * the rest of the driver uses. */ - if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && - (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0) + if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA)) return; - if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { - DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); - i915_disable_vga(dev); - } + i915_redisable_vga_power_on(dev); } static void intel_modeset_readout_hw_state(struct drm_device *dev) @@ -11265,9 +11704,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { - if (crtc->active && i915_fastboot) { - intel_crtc_mode_from_pipe_config(crtc, &crtc->config); - + if (crtc->active && i915.fastboot) { + intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config); DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", crtc->base.base.id); drm_mode_debug_printmodeline(&crtc->base.mode); @@ -11313,7 +11751,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, dev_priv->pipe_to_crtc_mapping[pipe]; __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, - crtc->fb); + crtc->primary->fb); } } else { intel_modeset_update_staged_output_state(dev); @@ -11324,14 +11762,44 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, void intel_modeset_gem_init(struct drm_device *dev) { + struct drm_crtc *c; + struct intel_framebuffer *fb; + + mutex_lock(&dev->struct_mutex); + intel_init_gt_powersave(dev); + mutex_unlock(&dev->struct_mutex); + intel_modeset_init_hw(dev); intel_setup_overlay(dev); - mutex_lock(&dev->mode_config.mutex); - drm_mode_config_reset(dev); - intel_modeset_setup_hw_state(dev, false); - mutex_unlock(&dev->mode_config.mutex); + /* + * Make sure any fbs we allocated at startup are properly + * pinned & fenced. When we do the allocation it's too early + * for this. + */ + mutex_lock(&dev->struct_mutex); + list_for_each_entry(c, &dev->mode_config.crtc_list, head) { + if (!c->primary->fb) + continue; + + fb = to_intel_framebuffer(c->primary->fb); + if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) { + DRM_ERROR("failed to pin boot fb on pipe %d\n", + to_intel_crtc(c)->pipe); + drm_framebuffer_unreference(c->primary->fb); + c->primary->fb = NULL; + } + } + mutex_unlock(&dev->struct_mutex); +} + +void intel_connector_unregister(struct intel_connector *intel_connector) +{ + struct drm_connector *connector = &intel_connector->base; + + intel_panel_destroy_backlight(connector); + drm_sysfs_connector_remove(connector); } void intel_modeset_cleanup(struct drm_device *dev) @@ -11359,7 +11827,7 @@ void intel_modeset_cleanup(struct drm_device *dev) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ - if (!crtc->fb) + if (!crtc->primary->fb) continue; intel_increase_pllclock(crtc); @@ -11378,13 +11846,19 @@ void intel_modeset_cleanup(struct drm_device *dev) /* destroy the backlight and sysfs files before encoders/connectors */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - intel_panel_destroy_backlight(connector); - drm_sysfs_connector_remove(connector); + struct intel_connector *intel_connector; + + intel_connector = to_intel_connector(connector); + intel_connector->unregister(intel_connector); } drm_mode_config_cleanup(dev); intel_cleanup_overlay(dev); + + mutex_lock(&dev->struct_mutex); + intel_cleanup_gt_powersave(dev); + mutex_unlock(&dev->struct_mutex); } /* @@ -11412,12 +11886,24 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; u16 gmch_ctrl; - pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl); + if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { + DRM_ERROR("failed to read control word\n"); + return -EIO; + } + + if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) + return 0; + if (state) gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; else gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; - pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl); + + if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { + DRM_ERROR("failed to write control word\n"); + return -EIO; + } + return 0; } @@ -11467,7 +11953,7 @@ struct intel_display_error_state { struct intel_display_error_state * intel_display_capture_error_state(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_display_error_state *error; int transcoders[] = { TRANSCODER_A, @@ -11489,7 +11975,8 @@ intel_display_capture_error_state(struct drm_device *dev) for_each_pipe(i) { error->pipe[i].power_domain_on = - intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i)); + intel_display_power_enabled_sw(dev_priv, + POWER_DOMAIN_PIPE(i)); if (!error->pipe[i].power_domain_on) continue; @@ -11527,7 +12014,7 @@ intel_display_capture_error_state(struct drm_device *dev) enum transcoder cpu_transcoder = transcoders[i]; error->transcoder[i].power_domain_on = - intel_display_power_enabled_sw(dev, + intel_display_power_enabled_sw(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder)); if (!error->transcoder[i].power_domain_on) continue; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 2688f6d64bb..a0dad1a2f81 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -91,18 +91,25 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector) } static void intel_dp_link_down(struct intel_dp *intel_dp); +static bool _edp_panel_vdd_on(struct intel_dp *intel_dp); +static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); static int intel_dp_max_link_bw(struct intel_dp *intel_dp) { int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; + struct drm_device *dev = intel_dp->attached_connector->base.dev; switch (max_link_bw) { case DP_LINK_BW_1_62: case DP_LINK_BW_2_7: break; case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ - max_link_bw = DP_LINK_BW_2_7; + if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && + intel_dp->dpcd[DP_DPCD_REV] >= 0x12) + max_link_bw = DP_LINK_BW_5_4; + else + max_link_bw = DP_LINK_BW_2_7; break; default: WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", @@ -294,7 +301,7 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp) return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); } -static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) +static bool edp_have_panel_power(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; @@ -302,12 +309,13 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; } -static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) +static bool edp_have_panel_vdd(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; - return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; + return !dev_priv->pm.suspended && + (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; } static void @@ -319,7 +327,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp) if (!is_edp(intel_dp)) return; - if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { + if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { WARN(1, "eDP powered off while attempting aux channel communication.\n"); DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", I915_READ(_pp_stat_reg(intel_dp)), @@ -351,31 +359,46 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) return status; } -static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp, - int index) +static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - /* The clock divider is based off the hrawclk, - * and would like to run at 2MHz. So, take the - * hrawclk value and divide by 2 and use that - * - * Note that PCH attached eDP panels should use a 125MHz input - * clock divider. + /* + * The clock divider is based off the hrawclk, and would like to run at + * 2MHz. So, take the hrawclk value and divide by 2 and use that */ - if (IS_VALLEYVIEW(dev)) { - return index ? 0 : 100; - } else if (intel_dig_port->port == PORT_A) { - if (index) - return 0; - if (HAS_DDI(dev)) - return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); - else if (IS_GEN6(dev) || IS_GEN7(dev)) + return index ? 0 : intel_hrawclk(dev) / 2; +} + +static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + + if (index) + return 0; + + if (intel_dig_port->port == PORT_A) { + if (IS_GEN6(dev) || IS_GEN7(dev)) return 200; /* SNB & IVB eDP input clock at 400Mhz */ else return 225; /* eDP input clock at 450Mhz */ + } else { + return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); + } +} + +static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (intel_dig_port->port == PORT_A) { + if (index) + return 0; + return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { /* Workaround for non-ULT HSW */ switch (index) { @@ -383,13 +406,46 @@ static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp, case 1: return 72; default: return 0; } - } else if (HAS_PCH_SPLIT(dev)) { + } else { return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); - } else { - return index ? 0 :intel_hrawclk(dev) / 2; } } +static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +{ + return index ? 0 : 100; +} + +static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, + bool has_aux_irq, + int send_bytes, + uint32_t aux_clock_divider) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + uint32_t precharge, timeout; + + if (IS_GEN6(dev)) + precharge = 3; + else + precharge = 5; + + if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL) + timeout = DP_AUX_CH_CTL_TIME_OUT_600us; + else + timeout = DP_AUX_CH_CTL_TIME_OUT_400us; + + return DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_DONE | + (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + timeout | + DP_AUX_CH_CTL_RECEIVE_ERROR | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); +} + static int intel_dp_aux_ch(struct intel_dp *intel_dp, uint8_t *send, int send_bytes, @@ -403,9 +459,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, uint32_t aux_clock_divider; int i, ret, recv_bytes; uint32_t status; - int try, precharge, clock = 0; + int try, clock = 0; bool has_aux_irq = HAS_AUX_IRQ(dev); - uint32_t timeout; + bool vdd; + + vdd = _edp_panel_vdd_on(intel_dp); /* dp aux is extremely sensitive to irq latency, hence request the * lowest possible wakeup latency and so prevent the cpu from going into @@ -415,16 +473,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, intel_dp_check_edp(intel_dp); - if (IS_GEN6(dev)) - precharge = 3; - else - precharge = 5; - - if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL) - timeout = DP_AUX_CH_CTL_TIME_OUT_600us; - else - timeout = DP_AUX_CH_CTL_TIME_OUT_400us; - intel_aux_display_runtime_get(dev_priv); /* Try to wait for any previous AUX channel activity */ @@ -448,7 +496,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, goto out; } - while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { + while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { + u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, + has_aux_irq, + send_bytes, + aux_clock_divider); + /* Must try at least 3 times according to DP spec */ for (try = 0; try < 5; try++) { /* Load the send data into the aux channel data registers */ @@ -457,16 +510,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, pack_aux(send + i, send_bytes - i)); /* Send the command and wait for it to complete */ - I915_WRITE(ch_ctl, - DP_AUX_CH_CTL_SEND_BUSY | - (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | - timeout | - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR); + I915_WRITE(ch_ctl, send_ctl); status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); @@ -525,246 +569,140 @@ out: pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); intel_aux_display_runtime_put(dev_priv); + if (vdd) + edp_panel_vdd_off(intel_dp, false); + return ret; } -/* Write data to the aux channel in native mode */ -static int -intel_dp_aux_native_write(struct intel_dp *intel_dp, - uint16_t address, uint8_t *send, int send_bytes) +#define HEADER_SIZE 4 +static ssize_t +intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { + struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); + uint8_t txbuf[20], rxbuf[20]; + size_t txsize, rxsize; int ret; - uint8_t msg[20]; - int msg_bytes; - uint8_t ack; - int retry; - if (WARN_ON(send_bytes > 16)) - return -E2BIG; + txbuf[0] = msg->request << 4; + txbuf[1] = msg->address >> 8; + txbuf[2] = msg->address & 0xff; + txbuf[3] = msg->size - 1; - intel_dp_check_edp(intel_dp); - msg[0] = DP_AUX_NATIVE_WRITE << 4; - msg[1] = address >> 8; - msg[2] = address & 0xff; - msg[3] = send_bytes - 1; - memcpy(&msg[4], send, send_bytes); - msg_bytes = send_bytes + 4; - for (retry = 0; retry < 7; retry++) { - ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); - if (ret < 0) - return ret; - ack >>= 4; - if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) - return send_bytes; - else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) - usleep_range(400, 500); - else - return -EIO; - } + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + txsize = HEADER_SIZE + msg->size; + rxsize = 1; - DRM_ERROR("too many retries, giving up\n"); - return -EIO; -} + if (WARN_ON(txsize > 20)) + return -E2BIG; -/* Write a single byte to the aux channel in native mode */ -static int -intel_dp_aux_native_write_1(struct intel_dp *intel_dp, - uint16_t address, uint8_t byte) -{ - return intel_dp_aux_native_write(intel_dp, address, &byte, 1); -} + memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); -/* read bytes from a native aux channel */ -static int -intel_dp_aux_native_read(struct intel_dp *intel_dp, - uint16_t address, uint8_t *recv, int recv_bytes) -{ - uint8_t msg[4]; - int msg_bytes; - uint8_t reply[20]; - int reply_bytes; - uint8_t ack; - int ret; - int retry; + ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); + if (ret > 0) { + msg->reply = rxbuf[0] >> 4; + + /* Return payload size. */ + ret = msg->size; + } + break; - if (WARN_ON(recv_bytes > 19)) - return -E2BIG; + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + txsize = HEADER_SIZE; + rxsize = msg->size + 1; - intel_dp_check_edp(intel_dp); - msg[0] = DP_AUX_NATIVE_READ << 4; - msg[1] = address >> 8; - msg[2] = address & 0xff; - msg[3] = recv_bytes - 1; - - msg_bytes = 4; - reply_bytes = recv_bytes + 1; - - for (retry = 0; retry < 7; retry++) { - ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, - reply, reply_bytes); - if (ret == 0) - return -EPROTO; - if (ret < 0) - return ret; - ack = reply[0] >> 4; - if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) { - memcpy(recv, reply + 1, ret - 1); - return ret - 1; + if (WARN_ON(rxsize > 20)) + return -E2BIG; + + ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); + if (ret > 0) { + msg->reply = rxbuf[0] >> 4; + /* + * Assume happy day, and copy the data. The caller is + * expected to check msg->reply before touching it. + * + * Return payload size. + */ + ret--; + memcpy(msg->buffer, rxbuf + 1, ret); } - else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) - usleep_range(400, 500); - else - return -EIO; + break; + + default: + ret = -EINVAL; + break; } - DRM_ERROR("too many retries, giving up\n"); - return -EIO; + return ret; } -static int -intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, - uint8_t write_byte, uint8_t *read_byte) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - struct intel_dp *intel_dp = container_of(adapter, - struct intel_dp, - adapter); - uint16_t address = algo_data->address; - uint8_t msg[5]; - uint8_t reply[2]; - unsigned retry; - int msg_bytes; - int reply_bytes; +static void +intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + enum port port = intel_dig_port->port; + const char *name = NULL; int ret; - ironlake_edp_panel_vdd_on(intel_dp); - intel_dp_check_edp(intel_dp); - /* Set up the command byte */ - if (mode & MODE_I2C_READ) - msg[0] = DP_AUX_I2C_READ << 4; - else - msg[0] = DP_AUX_I2C_WRITE << 4; - - if (!(mode & MODE_I2C_STOP)) - msg[0] |= DP_AUX_I2C_MOT << 4; - - msg[1] = address >> 8; - msg[2] = address; - - switch (mode) { - case MODE_I2C_WRITE: - msg[3] = 0; - msg[4] = write_byte; - msg_bytes = 5; - reply_bytes = 1; + switch (port) { + case PORT_A: + intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; + name = "DPDDC-A"; + break; + case PORT_B: + intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; + name = "DPDDC-B"; + break; + case PORT_C: + intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; + name = "DPDDC-C"; break; - case MODE_I2C_READ: - msg[3] = 0; - msg_bytes = 4; - reply_bytes = 2; + case PORT_D: + intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; + name = "DPDDC-D"; break; default: - msg_bytes = 3; - reply_bytes = 1; - break; + BUG(); } - /* - * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is - * required to retry at least seven times upon receiving AUX_DEFER - * before giving up the AUX transaction. - */ - for (retry = 0; retry < 7; retry++) { - ret = intel_dp_aux_ch(intel_dp, - msg, msg_bytes, - reply, reply_bytes); - if (ret < 0) { - DRM_DEBUG_KMS("aux_ch failed %d\n", ret); - goto out; - } + if (!HAS_DDI(dev)) + intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; - switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) { - case DP_AUX_NATIVE_REPLY_ACK: - /* I2C-over-AUX Reply field is only valid - * when paired with AUX ACK. - */ - break; - case DP_AUX_NATIVE_REPLY_NACK: - DRM_DEBUG_KMS("aux_ch native nack\n"); - ret = -EREMOTEIO; - goto out; - case DP_AUX_NATIVE_REPLY_DEFER: - /* - * For now, just give more slack to branch devices. We - * could check the DPCD for I2C bit rate capabilities, - * and if available, adjust the interval. We could also - * be more careful with DP-to-Legacy adapters where a - * long legacy cable may force very low I2C bit rates. - */ - if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & - DP_DWN_STRM_PORT_PRESENT) - usleep_range(500, 600); - else - usleep_range(300, 400); - continue; - default: - DRM_ERROR("aux_ch invalid native reply 0x%02x\n", - reply[0]); - ret = -EREMOTEIO; - goto out; - } + intel_dp->aux.name = name; + intel_dp->aux.dev = dev->dev; + intel_dp->aux.transfer = intel_dp_aux_transfer; - switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) { - case DP_AUX_I2C_REPLY_ACK: - if (mode == MODE_I2C_READ) { - *read_byte = reply[1]; - } - ret = reply_bytes - 1; - goto out; - case DP_AUX_I2C_REPLY_NACK: - DRM_DEBUG_KMS("aux_i2c nack\n"); - ret = -EREMOTEIO; - goto out; - case DP_AUX_I2C_REPLY_DEFER: - DRM_DEBUG_KMS("aux_i2c defer\n"); - udelay(100); - break; - default: - DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); - ret = -EREMOTEIO; - goto out; - } - } + DRM_DEBUG_KMS("registering %s bus for %s\n", name, + connector->base.kdev->kobj.name); - DRM_ERROR("too many retries, giving up\n"); - ret = -EREMOTEIO; + ret = drm_dp_aux_register_i2c_bus(&intel_dp->aux); + if (ret < 0) { + DRM_ERROR("drm_dp_aux_register_i2c_bus() for %s failed (%d)\n", + name, ret); + return; + } -out: - ironlake_edp_panel_vdd_off(intel_dp, false); - return ret; + ret = sysfs_create_link(&connector->base.kdev->kobj, + &intel_dp->aux.ddc.dev.kobj, + intel_dp->aux.ddc.dev.kobj.name); + if (ret < 0) { + DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret); + drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); + } } -static int -intel_dp_i2c_init(struct intel_dp *intel_dp, - struct intel_connector *intel_connector, const char *name) +static void +intel_dp_connector_unregister(struct intel_connector *intel_connector) { - int ret; - - DRM_DEBUG_KMS("i2c_init %s\n", name); - intel_dp->algo.running = false; - intel_dp->algo.address = 0; - intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; - - memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); - intel_dp->adapter.owner = THIS_MODULE; - intel_dp->adapter.class = I2C_CLASS_DDC; - strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); - intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; - intel_dp->adapter.algo_data = &intel_dp->algo; - intel_dp->adapter.dev.parent = intel_connector->base.kdev; + struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); - ret = i2c_dp_aux_add_bus(&intel_dp->adapter); - return ret; + sysfs_remove_link(&intel_connector->base.kdev->kobj, + intel_dp->aux.ddc.dev.kobj.name); + intel_connector_unregister(intel_connector); } static void @@ -812,9 +750,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_connector *intel_connector = intel_dp->attached_connector; int lane_count, clock; int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); - int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; + /* Conveniently, the link BW constants become indices with a shift...*/ + int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; int bpp, mode_rate; - static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; + static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; int link_avail, link_clock; if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) @@ -855,8 +794,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, bpp); - for (clock = 0; clock <= max_clock; clock++) { - for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { + for (clock = 0; clock <= max_clock; clock++) { link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); link_avail = intel_dp_max_data_rate(link_clock, lane_count); @@ -1015,16 +954,16 @@ static void intel_dp_mode_set(struct intel_encoder *encoder) ironlake_set_pll_cpu_edp(intel_dp); } -#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) -#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) +#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) +#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) -#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) -#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) +#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) +#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) -#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) -#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) +#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) +#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) -static void ironlake_wait_panel_status(struct intel_dp *intel_dp, +static void wait_panel_status(struct intel_dp *intel_dp, u32 mask, u32 value) { @@ -1049,24 +988,41 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp, DRM_DEBUG_KMS("Wait complete\n"); } -static void ironlake_wait_panel_on(struct intel_dp *intel_dp) +static void wait_panel_on(struct intel_dp *intel_dp) { DRM_DEBUG_KMS("Wait for panel power on\n"); - ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); + wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); } -static void ironlake_wait_panel_off(struct intel_dp *intel_dp) +static void wait_panel_off(struct intel_dp *intel_dp) { DRM_DEBUG_KMS("Wait for panel power off time\n"); - ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); + wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); } -static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) +static void wait_panel_power_cycle(struct intel_dp *intel_dp) { DRM_DEBUG_KMS("Wait for panel power cycle\n"); - ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); + + /* When we disable the VDD override bit last we have to do the manual + * wait. */ + wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle, + intel_dp->panel_power_cycle_delay); + + wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); +} + +static void wait_backlight_on(struct intel_dp *intel_dp) +{ + wait_remaining_ms_from_jiffies(intel_dp->last_power_on, + intel_dp->backlight_on_delay); } +static void edp_wait_backlight_off(struct intel_dp *intel_dp) +{ + wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, + intel_dp->backlight_off_delay); +} /* Read the current pp_control value, unlocking the register if it * is locked @@ -1084,30 +1040,28 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) return control; } -void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) +static bool _edp_panel_vdd_on(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; u32 pp_stat_reg, pp_ctrl_reg; + bool need_to_disable = !intel_dp->want_panel_vdd; if (!is_edp(intel_dp)) - return; - - WARN(intel_dp->want_panel_vdd, - "eDP VDD already requested on\n"); + return false; intel_dp->want_panel_vdd = true; - if (ironlake_edp_have_panel_vdd(intel_dp)) - return; + if (edp_have_panel_vdd(intel_dp)) + return need_to_disable; intel_runtime_pm_get(dev_priv); DRM_DEBUG_KMS("Turning eDP VDD on\n"); - if (!ironlake_edp_have_panel_power(intel_dp)) - ironlake_wait_panel_power_cycle(intel_dp); + if (!edp_have_panel_power(intel_dp)) + wait_panel_power_cycle(intel_dp); pp = ironlake_get_pp_control(intel_dp); pp |= EDP_FORCE_VDD; @@ -1122,13 +1076,24 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) /* * If the panel wasn't on, delay before accessing aux channel */ - if (!ironlake_edp_have_panel_power(intel_dp)) { + if (!edp_have_panel_power(intel_dp)) { DRM_DEBUG_KMS("eDP was not running\n"); msleep(intel_dp->panel_power_up_delay); } + + return need_to_disable; +} + +void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) +{ + if (is_edp(intel_dp)) { + bool vdd = _edp_panel_vdd_on(intel_dp); + + WARN(!vdd, "eDP VDD already requested on\n"); + } } -static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) +static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; @@ -1137,7 +1102,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); - if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { + if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) { DRM_DEBUG_KMS("Turning eDP VDD off\n"); pp = ironlake_get_pp_control(intel_dp); @@ -1154,24 +1119,24 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); if ((pp & POWER_TARGET_ON) == 0) - msleep(intel_dp->panel_power_cycle_delay); + intel_dp->last_power_cycle = jiffies; intel_runtime_pm_put(dev_priv); } } -static void ironlake_panel_vdd_work(struct work_struct *__work) +static void edp_panel_vdd_work(struct work_struct *__work) { struct intel_dp *intel_dp = container_of(to_delayed_work(__work), struct intel_dp, panel_vdd_work); struct drm_device *dev = intel_dp_to_dev(intel_dp); mutex_lock(&dev->mode_config.mutex); - ironlake_panel_vdd_off_sync(intel_dp); + edp_panel_vdd_off_sync(intel_dp); mutex_unlock(&dev->mode_config.mutex); } -void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) +static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) { if (!is_edp(intel_dp)) return; @@ -1181,7 +1146,7 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) intel_dp->want_panel_vdd = false; if (sync) { - ironlake_panel_vdd_off_sync(intel_dp); + edp_panel_vdd_off_sync(intel_dp); } else { /* * Queue the timer to fire a long @@ -1193,7 +1158,7 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) } } -void ironlake_edp_panel_on(struct intel_dp *intel_dp) +void intel_edp_panel_on(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; @@ -1205,12 +1170,12 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp) DRM_DEBUG_KMS("Turn eDP power on\n"); - if (ironlake_edp_have_panel_power(intel_dp)) { + if (edp_have_panel_power(intel_dp)) { DRM_DEBUG_KMS("eDP power already on\n"); return; } - ironlake_wait_panel_power_cycle(intel_dp); + wait_panel_power_cycle(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); pp = ironlake_get_pp_control(intel_dp); @@ -1228,7 +1193,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp) I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); - ironlake_wait_panel_on(intel_dp); + wait_panel_on(intel_dp); + intel_dp->last_power_on = jiffies; if (IS_GEN5(dev)) { pp |= PANEL_POWER_RESET; /* restore panel reset bit */ @@ -1237,7 +1203,7 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp) } } -void ironlake_edp_panel_off(struct intel_dp *intel_dp) +void intel_edp_panel_off(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; @@ -1249,27 +1215,31 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp) DRM_DEBUG_KMS("Turn eDP power off\n"); + edp_wait_backlight_off(intel_dp); + WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); pp = ironlake_get_pp_control(intel_dp); /* We need to switch off panel power _and_ force vdd, for otherwise some * panels get very unhappy and cease to work. */ - pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); + pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | + EDP_BLC_ENABLE); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + intel_dp->want_panel_vdd = false; + I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); - intel_dp->want_panel_vdd = false; - - ironlake_wait_panel_off(intel_dp); + intel_dp->last_power_cycle = jiffies; + wait_panel_off(intel_dp); /* We got a reference when we enabled the VDD. */ intel_runtime_pm_put(dev_priv); } -void ironlake_edp_backlight_on(struct intel_dp *intel_dp) +void intel_edp_backlight_on(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; @@ -1287,7 +1257,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp) * link. So delay a bit to make sure the image is solid before * allowing it to appear. */ - msleep(intel_dp->backlight_on_delay); + wait_backlight_on(intel_dp); pp = ironlake_get_pp_control(intel_dp); pp |= EDP_BLC_ENABLE; @@ -1299,7 +1269,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp) intel_panel_enable_backlight(intel_dp->attached_connector); } -void ironlake_edp_backlight_off(struct intel_dp *intel_dp) +void intel_edp_backlight_off(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; @@ -1319,7 +1289,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp) I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); - msleep(intel_dp->backlight_off_delay); + intel_dp->last_backlight_off = jiffies; } static void ironlake_edp_pll_on(struct intel_dp *intel_dp) @@ -1383,8 +1353,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) return; if (mode != DRM_MODE_DPMS_ON) { - ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, - DP_SET_POWER_D3); + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, + DP_SET_POWER_D3); if (ret != 1) DRM_DEBUG_DRIVER("failed to write sink power state\n"); } else { @@ -1393,9 +1363,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) * time to wake up. */ for (i = 0; i < 3; i++) { - ret = intel_dp_aux_native_write_1(intel_dp, - DP_SET_POWER, - DP_SET_POWER_D0); + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, + DP_SET_POWER_D0); if (ret == 1) break; msleep(1); @@ -1410,7 +1379,14 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, enum port port = dp_to_dig_port(intel_dp)->port; struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 tmp = I915_READ(intel_dp->output_reg); + enum intel_display_power_domain power_domain; + u32 tmp; + + power_domain = intel_display_port_power_domain(encoder); + if (!intel_display_power_enabled(dev_priv, power_domain)) + return false; + + tmp = I915_READ(intel_dp->output_reg); if (!(tmp & DP_PORT_EN)) return false; @@ -1604,19 +1580,19 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0); + uint32_t aux_clock_divider; int precharge = 0x3; int msg_size = 5; /* Header(4) + Message(1) */ + aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); + /* Enable PSR in sink */ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) - intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, - DP_PSR_ENABLE & - ~DP_PSR_MAIN_LINK_ACTIVE); + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); else - intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, - DP_PSR_ENABLE | - DP_PSR_MAIN_LINK_ACTIVE); + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); /* Setup AUX registers */ I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND); @@ -1659,7 +1635,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = dig_port->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; + struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj; struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; dev_priv->psr.source_ok = false; @@ -1675,7 +1651,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) return false; } - if (!i915_enable_psr) { + if (!i915.enable_psr) { DRM_DEBUG_KMS("PSR disable by flag\n"); return false; } @@ -1692,7 +1668,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) return false; } - obj = to_intel_framebuffer(crtc->fb)->obj; + obj = to_intel_framebuffer(crtc->primary->fb)->obj; if (obj->tiling_mode != I915_TILING_X || obj->fence_reg == I915_FENCE_REG_NONE) { DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); @@ -1791,10 +1767,10 @@ static void intel_disable_dp(struct intel_encoder *encoder) /* Make sure the panel is off before trying to change the mode. But also * ensure that we have vdd while we switch off the panel. */ - ironlake_edp_panel_vdd_on(intel_dp); - ironlake_edp_backlight_off(intel_dp); + intel_edp_panel_vdd_on(intel_dp); + intel_edp_backlight_off(intel_dp); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); - ironlake_edp_panel_off(intel_dp); + intel_edp_panel_off(intel_dp); /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ if (!(port == PORT_A || IS_VALLEYVIEW(dev))) @@ -1824,11 +1800,11 @@ static void intel_enable_dp(struct intel_encoder *encoder) if (WARN_ON(dp_reg & DP_PORT_EN)) return; - ironlake_edp_panel_vdd_on(intel_dp); + intel_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_start_link_train(intel_dp); - ironlake_edp_panel_on(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, true); + intel_edp_panel_on(intel_dp); + edp_panel_vdd_off(intel_dp, true); intel_dp_complete_link_train(intel_dp); intel_dp_stop_link_train(intel_dp); } @@ -1838,14 +1814,14 @@ static void g4x_enable_dp(struct intel_encoder *encoder) struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); intel_enable_dp(encoder); - ironlake_edp_backlight_on(intel_dp); + intel_edp_backlight_on(intel_dp); } static void vlv_enable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - ironlake_edp_backlight_on(intel_dp); + intel_edp_backlight_on(intel_dp); } static void g4x_pre_enable_dp(struct intel_encoder *encoder) @@ -1927,26 +1903,25 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) /* * Native read with retry for link status and receiver capability reads for * cases where the sink may still be asleep. + * + * Sinks are *supposed* to come up within 1ms from an off state, but we're also + * supposed to retry 3 times per the spec. */ -static bool -intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, - uint8_t *recv, int recv_bytes) +static ssize_t +intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size) { - int ret, i; + ssize_t ret; + int i; - /* - * Sinks are *supposed* to come up within 1ms from an off state, - * but we're also supposed to retry 3 times per the spec. - */ for (i = 0; i < 3; i++) { - ret = intel_dp_aux_native_read(intel_dp, address, recv, - recv_bytes); - if (ret == recv_bytes) - return true; + ret = drm_dp_dpcd_read(aux, offset, buffer, size); + if (ret == size) + return ret; msleep(1); } - return false; + return ret; } /* @@ -1956,10 +1931,10 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, static bool intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) { - return intel_dp_aux_native_read_retry(intel_dp, - DP_LANE0_1_STATUS, - link_status, - DP_LINK_STATUS_SIZE); + return intel_dp_dpcd_read_wake(&intel_dp->aux, + DP_LANE0_1_STATUS, + link_status, + DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; } /* @@ -2473,8 +2448,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, len = intel_dp->lane_count + 1; } - ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET, - buf, len); + ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET, + buf, len); return ret == len; } @@ -2503,9 +2478,8 @@ intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, I915_WRITE(intel_dp->output_reg, *DP); POSTING_READ(intel_dp->output_reg); - ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET, - intel_dp->train_set, - intel_dp->lane_count); + ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, + intel_dp->train_set, intel_dp->lane_count); return ret == intel_dp->lane_count; } @@ -2561,11 +2535,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) link_config[1] = intel_dp->lane_count; if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2); + drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); link_config[0] = 0; link_config[1] = DP_SET_ANSI_8B10B; - intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2); + drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); DP |= DP_PORT_EN; @@ -2638,10 +2612,15 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) bool channel_eq = false; int tries, cr_tries; uint32_t DP = intel_dp->DP; + uint32_t training_pattern = DP_TRAINING_PATTERN_2; + + /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/ + if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3) + training_pattern = DP_TRAINING_PATTERN_3; /* channel equalization */ if (!intel_dp_set_link_train(intel_dp, &DP, - DP_TRAINING_PATTERN_2 | + training_pattern | DP_LINK_SCRAMBLING_DISABLE)) { DRM_ERROR("failed to start channel equalization\n"); return; @@ -2668,7 +2647,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { intel_dp_start_link_train(intel_dp); intel_dp_set_link_train(intel_dp, &DP, - DP_TRAINING_PATTERN_2 | + training_pattern | DP_LINK_SCRAMBLING_DISABLE); cr_tries++; continue; @@ -2684,7 +2663,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) intel_dp_link_down(intel_dp); intel_dp_start_link_train(intel_dp); intel_dp_set_link_train(intel_dp, &DP, - DP_TRAINING_PATTERN_2 | + training_pattern | DP_LINK_SCRAMBLING_DISABLE); tries = 0; cr_tries++; @@ -2803,8 +2782,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; - if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, - sizeof(intel_dp->dpcd)) == 0) + if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, + sizeof(intel_dp->dpcd)) < 0) return false; /* aux transfer failed */ hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), @@ -2817,15 +2796,23 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) /* Check if the panel supports PSR */ memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); if (is_edp(intel_dp)) { - intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, - intel_dp->psr_dpcd, - sizeof(intel_dp->psr_dpcd)); + intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT, + intel_dp->psr_dpcd, + sizeof(intel_dp->psr_dpcd)); if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { dev_priv->psr.sink_support = true; DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); } } + /* Training Pattern 3 support */ + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && + intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { + intel_dp->use_tps3 = true; + DRM_DEBUG_KMS("Displayport TPS3 supported"); + } else + intel_dp->use_tps3 = false; + if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) return true; /* native DP sink */ @@ -2833,9 +2820,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) return true; /* no per-port downstream info */ - if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, - intel_dp->downstream_ports, - DP_MAX_DOWNSTREAM_PORTS) == 0) + if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, + intel_dp->downstream_ports, + DP_MAX_DOWNSTREAM_PORTS) < 0) return false; /* downstream port status fetch failed */ return true; @@ -2849,38 +2836,61 @@ intel_dp_probe_oui(struct intel_dp *intel_dp) if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) return; - ironlake_edp_panel_vdd_on(intel_dp); + intel_edp_panel_vdd_on(intel_dp); - if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) + if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3) DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", buf[0], buf[1], buf[2]); - if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) + if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3) DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", buf[0], buf[1], buf[2]); - ironlake_edp_panel_vdd_off(intel_dp, false); + edp_panel_vdd_off(intel_dp, false); } -static bool -intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) +int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) { - int ret; + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct intel_crtc *intel_crtc = + to_intel_crtc(intel_dig_port->base.base.crtc); + u8 buf[1]; - ret = intel_dp_aux_native_read_retry(intel_dp, - DP_DEVICE_SERVICE_IRQ_VECTOR, - sink_irq_vector, 1); - if (!ret) - return false; + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0) + return -EAGAIN; - return true; + if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) + return -ENOTTY; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, + DP_TEST_SINK_START) < 0) + return -EAGAIN; + + /* Wait 2 vblanks to be sure we will have the correct CRC value */ + intel_wait_for_vblank(dev, intel_crtc->pipe); + intel_wait_for_vblank(dev, intel_crtc->pipe); + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) + return -EAGAIN; + + drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0); + return 0; +} + +static bool +intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) +{ + return intel_dp_dpcd_read_wake(&intel_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector, 1) == 1; } static void intel_dp_handle_test_request(struct intel_dp *intel_dp) { /* NAK by default */ - intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); + drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK); } /* @@ -2919,9 +2929,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { /* Clear interrupt source */ - intel_dp_aux_native_write_1(intel_dp, - DP_DEVICE_SERVICE_IRQ_VECTOR, - sink_irq_vector); + drm_dp_dpcd_writeb(&intel_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector); if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) intel_dp_handle_test_request(intel_dp); @@ -2956,15 +2966,17 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { uint8_t reg; - if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, - ®, 1)) + + if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT, + ®, 1) < 0) return connector_status_unknown; + return DP_GET_SINK_COUNT(reg) ? connector_status_connected : connector_status_disconnected; } /* If no HPD, poke DDC gently */ - if (drm_probe_ddc(&intel_dp->adapter)) + if (drm_probe_ddc(&intel_dp->aux.ddc)) return connector_status_connected; /* Well we tried, say unknown for unreliable port types */ @@ -3106,10 +3118,14 @@ intel_dp_detect(struct drm_connector *connector, bool force) struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; enum drm_connector_status status; + enum intel_display_power_domain power_domain; struct edid *edid = NULL; intel_runtime_pm_get(dev_priv); + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); @@ -3128,7 +3144,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); } else { - edid = intel_dp_get_edid(connector, &intel_dp->adapter); + edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc); if (edid) { intel_dp->has_audio = drm_detect_monitor_audio(edid); kfree(edid); @@ -3140,21 +3156,32 @@ intel_dp_detect(struct drm_connector *connector, bool force) status = connector_status_connected; out: + intel_display_power_put(dev_priv, power_domain); + intel_runtime_pm_put(dev_priv); + return status; } static int intel_dp_get_modes(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *intel_encoder = &intel_dig_port->base; struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum intel_display_power_domain power_domain; int ret; /* We should parse the EDID data and find out if it has an audio sink */ - ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + + ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc); + intel_display_power_put(dev_priv, power_domain); if (ret) return ret; @@ -3175,15 +3202,25 @@ static bool intel_dp_detect_audio(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *intel_encoder = &intel_dig_port->base; + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum intel_display_power_domain power_domain; struct edid *edid; bool has_audio = false; - edid = intel_dp_get_edid(connector, &intel_dp->adapter); + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + + edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc); if (edid) { has_audio = drm_detect_monitor_audio(edid); kfree(edid); } + intel_display_power_put(dev_priv, power_domain); + return has_audio; } @@ -3298,12 +3335,12 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_device *dev = intel_dp_to_dev(intel_dp); - i2c_del_adapter(&intel_dp->adapter); + drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); drm_encoder_cleanup(encoder); if (is_edp(intel_dp)) { cancel_delayed_work_sync(&intel_dp->panel_vdd_work); mutex_lock(&dev->mode_config.mutex); - ironlake_panel_vdd_off_sync(intel_dp); + edp_panel_vdd_off_sync(intel_dp); mutex_unlock(&dev->mode_config.mutex); } kfree(intel_dig_port); @@ -3402,6 +3439,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect } } +static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) +{ + intel_dp->last_power_cycle = jiffies; + intel_dp->last_power_on = jiffies; + intel_dp->last_backlight_off = jiffies; +} + static void intel_dp_init_panel_power_sequencer(struct drm_device *dev, struct intel_dp *intel_dp, @@ -3524,10 +3568,17 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); } - /* And finally store the new values in the power sequencer. */ + /* + * And finally store the new values in the power sequencer. The + * backlight delays are set to 1 because we do manual waits on them. For + * T8, even BSpec recommends doing it. For T9, if we don't do this, + * we'll end up waiting for the backlight off delay twice: once when we + * do the manual sleep, and once when we disable the panel and wait for + * the PP_STATUS bit to become zero. + */ pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | - (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); - pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | + (1 << PANEL_LIGHT_ON_DELAY_SHIFT); + pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); /* Compute the divisor for the pp clock, simply match the Bspec * formula. */ @@ -3562,14 +3613,14 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, } static bool intel_edp_init_connector(struct intel_dp *intel_dp, - struct intel_connector *intel_connector) + struct intel_connector *intel_connector, + struct edp_power_seq *power_seq) { struct drm_connector *connector = &intel_connector->base; struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *fixed_mode = NULL; - struct edp_power_seq power_seq = { 0 }; bool has_dpcd; struct drm_display_mode *scan; struct edid *edid; @@ -3577,12 +3628,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!is_edp(intel_dp)) return true; - intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); - /* Cache DPCD and EDID for edp. */ - ironlake_edp_panel_vdd_on(intel_dp); + intel_edp_panel_vdd_on(intel_dp); has_dpcd = intel_dp_get_dpcd(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, false); + edp_panel_vdd_off(intel_dp, false); if (has_dpcd) { if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) @@ -3596,10 +3645,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, } /* We now know it's not a ghost, init power sequence regs. */ - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, - &power_seq); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); - edid = drm_get_edid(connector, &intel_dp->adapter); + mutex_lock(&dev->mode_config.mutex); + edid = drm_get_edid(connector, &intel_dp->aux.ddc); if (edid) { if (drm_add_edid_modes(connector, edid)) { drm_mode_connector_update_edid_property(connector, @@ -3629,8 +3678,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (fixed_mode) fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; } + mutex_unlock(&dev->mode_config.mutex); - intel_panel_init(&intel_connector->panel, fixed_mode); + intel_panel_init(&intel_connector->panel, fixed_mode, NULL); intel_panel_setup_backlight(connector); return true; @@ -3646,8 +3696,20 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; enum port port = intel_dig_port->port; - const char *name = NULL; - int type, error; + struct edp_power_seq power_seq = { 0 }; + int type; + + /* intel_dp vfuncs */ + if (IS_VALLEYVIEW(dev)) + intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; + else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; + else if (HAS_PCH_SPLIT(dev)) + intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; + else + intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider; + + intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; /* Preserve the current hw state. */ intel_dp->DP = I915_READ(intel_dp->output_reg); @@ -3677,7 +3739,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, connector->doublescan_allowed = 0; INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, - ironlake_panel_vdd_work); + edp_panel_vdd_work); intel_connector_attach_encoder(intel_connector, intel_encoder); drm_sysfs_connector_add(connector); @@ -3686,61 +3748,41 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; else intel_connector->get_hw_state = intel_connector_get_hw_state; + intel_connector->unregister = intel_dp_connector_unregister; - intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; - if (HAS_DDI(dev)) { - switch (intel_dig_port->port) { - case PORT_A: - intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; - break; - case PORT_B: - intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; - break; - case PORT_C: - intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; - break; - case PORT_D: - intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; - break; - default: - BUG(); - } - } - - /* Set up the DDC bus. */ + /* Set up the hotplug pin. */ switch (port) { case PORT_A: intel_encoder->hpd_pin = HPD_PORT_A; - name = "DPDDC-A"; break; case PORT_B: intel_encoder->hpd_pin = HPD_PORT_B; - name = "DPDDC-B"; break; case PORT_C: intel_encoder->hpd_pin = HPD_PORT_C; - name = "DPDDC-C"; break; case PORT_D: intel_encoder->hpd_pin = HPD_PORT_D; - name = "DPDDC-D"; break; default: BUG(); } - error = intel_dp_i2c_init(intel_dp, intel_connector, name); - WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", - error, port_name(port)); + if (is_edp(intel_dp)) { + intel_dp_init_panel_power_timestamps(intel_dp); + intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); + } + + intel_dp_aux_init(intel_dp, intel_connector); intel_dp->psr_setup_done = false; - if (!intel_edp_init_connector(intel_dp, intel_connector)) { - i2c_del_adapter(&intel_dp->adapter); + if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { + drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); if (is_edp(intel_dp)) { cancel_delayed_work_sync(&intel_dp->panel_vdd_work); mutex_lock(&dev->mode_config.mutex); - ironlake_panel_vdd_off_sync(intel_dp); + edp_panel_vdd_off_sync(intel_dp); mutex_unlock(&dev->mode_config.mutex); } drm_sysfs_connector_remove(connector); @@ -3806,7 +3848,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - intel_encoder->cloneable = false; + intel_encoder->cloneable = 0; intel_encoder->hot_plug = intel_dp_hot_plug; if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index fbfaaba5cc3..0542de98226 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -78,6 +78,12 @@ #define MAX_OUTPUTS 6 /* maximum connectors per crtcs in the mode set */ +/* Maximum cursor sizes */ +#define GEN2_CURSOR_WIDTH 64 +#define GEN2_CURSOR_HEIGHT 64 +#define CURSOR_WIDTH 256 +#define CURSOR_HEIGHT 256 + #define INTEL_I2C_BUS_DVO 1 #define INTEL_I2C_BUS_SDVO 2 @@ -110,9 +116,10 @@ struct intel_framebuffer { struct intel_fbdev { struct drm_fb_helper helper; - struct intel_framebuffer ifb; + struct intel_framebuffer *fb; struct list_head fbdev_list; struct drm_display_mode *our_mode; + int preferred_bpp; }; struct intel_encoder { @@ -124,11 +131,7 @@ struct intel_encoder { struct intel_crtc *new_crtc; int type; - /* - * Intel hw has only one MUX where encoders could be clone, hence a - * simple flag is enough to compute the possible_clones mask. - */ - bool cloneable; + unsigned int cloneable; bool connectors_active; void (*hot_plug)(struct intel_encoder *); bool (*compute_config)(struct intel_encoder *, @@ -187,6 +190,14 @@ struct intel_connector { * and active (i.e. dpms ON state). */ bool (*get_hw_state)(struct intel_connector *); + /* + * Removes all interfaces through which the connector is accessible + * - like sysfs, debugfs entries -, so that no new operations can be + * started on the connector. Also makes sure all currently pending + * operations finish before returing. + */ + void (*unregister)(struct intel_connector *); + /* Panel info for eDP and LVDS */ struct intel_panel panel; @@ -210,6 +221,12 @@ typedef struct dpll { int p; } intel_clock_t; +struct intel_plane_config { + bool tiled; + int size; + u32 base; +}; + struct intel_crtc_config { /** * quirks - bitfield with hw state readout quirks @@ -356,9 +373,13 @@ struct intel_crtc { uint32_t cursor_addr; int16_t cursor_x, cursor_y; int16_t cursor_width, cursor_height; + int16_t max_cursor_width, max_cursor_height; bool cursor_visible; + struct intel_plane_config plane_config; struct intel_crtc_config config; + struct intel_crtc_config *new_config; + bool new_enabled; uint32_t ddi_pll_sel; @@ -475,8 +496,7 @@ struct intel_dp { uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; - struct i2c_adapter adapter; - struct i2c_algo_dp_aux_data algo; + struct drm_dp_aux aux; uint8_t train_set[4]; int panel_power_up_delay; int panel_power_down_delay; @@ -485,8 +505,22 @@ struct intel_dp { int backlight_off_delay; struct delayed_work panel_vdd_work; bool want_panel_vdd; + unsigned long last_power_cycle; + unsigned long last_power_on; + unsigned long last_backlight_off; bool psr_setup_done; + bool use_tps3; struct intel_connector *attached_connector; + + uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index); + /* + * This function returns the value we have to program the AUX_CTL + * register with to kick off an AUX transaction. + */ + uint32_t (*get_aux_send_ctl)(struct intel_dp *dp, + bool has_aux_irq, + int send_bytes, + uint32_t aux_clock_divider); }; struct intel_digital_port { @@ -540,6 +574,7 @@ struct intel_unpin_work { struct intel_set_config { struct drm_encoder **save_connector_encoders; struct drm_crtc **save_encoder_crtcs; + bool *save_crtc_enabled; bool fb_changed; bool mode_changed; @@ -584,6 +619,8 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) /* i915_irq.c */ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable); +bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable); bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, enum transcoder pch_transcoder, bool enable); @@ -591,8 +628,8 @@ void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void hsw_pc8_disable_interrupts(struct drm_device *dev); -void hsw_pc8_restore_interrupts(struct drm_device *dev); +void hsw_runtime_pm_disable_interrupts(struct drm_device *dev); +void hsw_runtime_pm_restore_interrupts(struct drm_device *dev); /* intel_crt.c */ @@ -664,11 +701,10 @@ int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, struct intel_ring_buffer *pipelined); void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); -int intel_framebuffer_init(struct drm_device *dev, - struct intel_framebuffer *ifb, +struct drm_framebuffer * +__intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj); -void intel_framebuffer_fini(struct intel_framebuffer *fb); void intel_prepare_page_flip(struct drm_device *dev, int plane); void intel_finish_page_flip(struct drm_device *dev, int pipe); void intel_finish_page_flip_plane(struct drm_device *dev, int plane); @@ -696,9 +732,8 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y, unsigned int bpp, unsigned int pitch); void intel_display_handle_reset(struct drm_device *dev); -void hsw_enable_pc8_work(struct work_struct *__work); -void hsw_enable_package_c8(struct drm_i915_private *dev_priv); -void hsw_disable_package_c8(struct drm_i915_private *dev_priv); +void hsw_enable_pc8(struct drm_i915_private *dev_priv); +void hsw_disable_pc8(struct drm_i915_private *dev_priv); void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); @@ -708,8 +743,13 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, bool intel_crtc_active(struct drm_crtc *crtc); void hsw_enable_ips(struct intel_crtc *crtc); void hsw_disable_ips(struct intel_crtc *crtc); -void intel_display_set_init_power(struct drm_device *dev, bool enable); +void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); +enum intel_display_power_domain +intel_display_port_power_domain(struct intel_encoder *intel_encoder); int valleyview_get_vco(struct drm_i915_private *dev_priv); +void intel_mode_from_pipe_config(struct drm_display_mode *mode, + struct intel_crtc_config *pipe_config); +int intel_format_to_fourcc(int format); /* intel_dp.c */ void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); @@ -721,15 +761,15 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp); void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); void intel_dp_encoder_destroy(struct drm_encoder *encoder); void intel_dp_check_link_status(struct intel_dp *intel_dp); +int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config); bool intel_dp_is_edp(struct drm_device *dev, enum port port); -void ironlake_edp_backlight_on(struct intel_dp *intel_dp); -void ironlake_edp_backlight_off(struct intel_dp *intel_dp); -void ironlake_edp_panel_on(struct intel_dp *intel_dp); -void ironlake_edp_panel_off(struct intel_dp *intel_dp); -void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); -void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); +void intel_edp_backlight_on(struct intel_dp *intel_dp); +void intel_edp_backlight_off(struct intel_dp *intel_dp); +void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); +void intel_edp_panel_on(struct intel_dp *intel_dp); +void intel_edp_panel_off(struct intel_dp *intel_dp); void intel_edp_psr_enable(struct intel_dp *intel_dp); void intel_edp_psr_disable(struct intel_dp *intel_dp); void intel_edp_psr_update(struct drm_device *dev); @@ -808,7 +848,8 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, /* intel_panel.c */ int intel_panel_init(struct intel_panel *panel, - struct drm_display_mode *fixed_mode); + struct drm_display_mode *fixed_mode, + struct drm_display_mode *downclock_mode); void intel_panel_fini(struct intel_panel *panel); void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); @@ -845,18 +886,19 @@ bool intel_fbc_enabled(struct drm_device *dev); void intel_update_fbc(struct drm_device *dev); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_teardown(void); -int intel_power_domains_init(struct drm_device *dev); -void intel_power_domains_remove(struct drm_device *dev); -bool intel_display_power_enabled(struct drm_device *dev, +int intel_power_domains_init(struct drm_i915_private *); +void intel_power_domains_remove(struct drm_i915_private *); +bool intel_display_power_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); -bool intel_display_power_enabled_sw(struct drm_device *dev, +bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); -void intel_display_power_get(struct drm_device *dev, +void intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); -void intel_display_power_put(struct drm_device *dev, +void intel_display_power_put(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); -void intel_power_domains_init_hw(struct drm_device *dev); -void intel_set_power_well(struct drm_device *dev, bool enable); +void intel_power_domains_init_hw(struct drm_i915_private *dev_priv); +void intel_init_gt_powersave(struct drm_device *dev); +void intel_cleanup_gt_powersave(struct drm_device *dev); void intel_enable_gt_powersave(struct drm_device *dev); void intel_disable_gt_powersave(struct drm_device *dev); void ironlake_teardown_rc6(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index fabbf0d895c..33656647f8b 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -243,11 +243,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + enum intel_display_power_domain power_domain; u32 port, func; enum pipe p; DRM_DEBUG_KMS("\n"); + power_domain = intel_display_port_power_domain(encoder); + if (!intel_display_power_enabled(dev_priv, power_domain)) + return false; + /* XXX: this only works for one DSI output */ for (p = PIPE_A; p <= PIPE_B; p++) { port = I915_READ(MIPI_PORT_CTRL(p)); @@ -488,8 +493,19 @@ static enum drm_connector_status intel_dsi_detect(struct drm_connector *connector, bool force) { struct intel_dsi *intel_dsi = intel_attached_dsi(connector); + struct intel_encoder *intel_encoder = &intel_dsi->base; + enum intel_display_power_domain power_domain; + enum drm_connector_status connector_status; + struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private; + DRM_DEBUG_KMS("\n"); - return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev); + power_domain = intel_display_port_power_domain(intel_encoder); + + intel_display_power_get(dev_priv, power_domain); + connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev); + intel_display_power_put(dev_priv, power_domain); + + return connector_status; } static int intel_dsi_get_modes(struct drm_connector *connector) @@ -586,6 +602,7 @@ bool intel_dsi_init(struct drm_device *dev) intel_encoder->get_config = intel_dsi_get_config; intel_connector->get_hw_state = intel_connector_get_hw_state; + intel_connector->unregister = intel_connector_unregister; for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) { dsi = &intel_dsi_devices[i]; @@ -603,7 +620,7 @@ bool intel_dsi_init(struct drm_device *dev) intel_encoder->type = INTEL_OUTPUT_DSI; intel_encoder->crtc_mask = (1 << 0); /* XXX */ - intel_encoder->cloneable = false; + intel_encoder->cloneable = 0; drm_connector_init(dev, connector, &intel_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); @@ -624,7 +641,7 @@ bool intel_dsi_init(struct drm_device *dev) } fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; - intel_panel_init(&intel_connector->panel, fixed_mode); + intel_panel_init(&intel_connector->panel, fixed_mode, NULL); return true; diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index eeff998e52e..7fe3feedfe0 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -477,6 +477,7 @@ void intel_dvo_init(struct drm_device *dev) intel_encoder->compute_config = intel_dvo_compute_config; intel_encoder->mode_set = intel_dvo_mode_set; intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; + intel_connector->unregister = intel_connector_unregister; /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { @@ -521,14 +522,15 @@ void intel_dvo_init(struct drm_device *dev) intel_encoder->crtc_mask = (1 << 0) | (1 << 1); switch (dvo->type) { case INTEL_DVO_CHIP_TMDS: - intel_encoder->cloneable = true; + intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) | + (1 << INTEL_OUTPUT_DVO); drm_connector_init(dev, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_DVII); encoder_type = DRM_MODE_ENCODER_TMDS; break; case INTEL_DVO_CHIP_LVDS: - intel_encoder->cloneable = false; + intel_encoder->cloneable = 0; drm_connector_init(dev, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_LVDS); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 39eac9937a4..b4d44e62f0c 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -62,6 +62,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, { struct intel_fbdev *ifbdev = container_of(helper, struct intel_fbdev, helper); + struct drm_framebuffer *fb; struct drm_device *dev = helper->dev; struct drm_mode_fb_cmd2 mode_cmd = {}; struct drm_i915_gem_object *obj; @@ -93,18 +94,22 @@ static int intelfb_alloc(struct drm_fb_helper *helper, /* Flush everything out, we'll be doing GTT only from now on */ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); if (ret) { - DRM_ERROR("failed to pin fb: %d\n", ret); + DRM_ERROR("failed to pin obj: %d\n", ret); goto out_unref; } - ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); - if (ret) + fb = __intel_framebuffer_create(dev, &mode_cmd, obj); + if (IS_ERR(fb)) { + ret = PTR_ERR(fb); goto out_unpin; + } + + ifbdev->fb = to_intel_framebuffer(fb); return 0; out_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); out_unref: drm_gem_object_unreference(&obj->base); out: @@ -116,23 +121,26 @@ static int intelfb_create(struct drm_fb_helper *helper, { struct intel_fbdev *ifbdev = container_of(helper, struct intel_fbdev, helper); - struct intel_framebuffer *intel_fb = &ifbdev->ifb; + struct intel_framebuffer *intel_fb = ifbdev->fb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; struct drm_i915_gem_object *obj; int size, ret; + bool prealloc = false; mutex_lock(&dev->struct_mutex); - if (!intel_fb->obj) { + if (!intel_fb || WARN_ON(!intel_fb->obj)) { DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); ret = intelfb_alloc(helper, sizes); if (ret) goto out_unlock; + intel_fb = ifbdev->fb; } else { DRM_DEBUG_KMS("re-using BIOS fb\n"); + prealloc = true; sizes->fb_width = intel_fb->base.width; sizes->fb_height = intel_fb->base.height; } @@ -148,7 +156,7 @@ static int intelfb_create(struct drm_fb_helper *helper, info->par = helper; - fb = &ifbdev->ifb.base; + fb = &ifbdev->fb->base; ifbdev->helper.fb = fb; ifbdev->helper.fbdev = info; @@ -194,7 +202,7 @@ static int intelfb_create(struct drm_fb_helper *helper, * If the object is stolen however, it will be full of whatever * garbage was left in there. */ - if (ifbdev->ifb.obj->stolen) + if (ifbdev->fb->obj->stolen && !prealloc) memset_io(info->screen_base, 0, info->screen_size); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ @@ -208,7 +216,7 @@ static int intelfb_create(struct drm_fb_helper *helper, return 0; out_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); drm_gem_object_unreference(&obj->base); out_unlock: mutex_unlock(&dev->struct_mutex); @@ -236,7 +244,193 @@ static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, *blue = intel_crtc->lut_b[regno] << 8; } +static struct drm_fb_helper_crtc * +intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc) +{ + int i; + + for (i = 0; i < fb_helper->crtc_count; i++) + if (fb_helper->crtc_info[i].mode_set.crtc == crtc) + return &fb_helper->crtc_info[i]; + + return NULL; +} + +/* + * Try to read the BIOS display configuration and use it for the initial + * fb configuration. + * + * The BIOS or boot loader will generally create an initial display + * configuration for us that includes some set of active pipes and displays. + * This routine tries to figure out which pipes and connectors are active + * and stuffs them into the crtcs and modes array given to us by the + * drm_fb_helper code. + * + * The overall sequence is: + * intel_fbdev_init - from driver load + * intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data + * drm_fb_helper_init - build fb helper structs + * drm_fb_helper_single_add_all_connectors - more fb helper structs + * intel_fbdev_initial_config - apply the config + * drm_fb_helper_initial_config - call ->probe then register_framebuffer() + * drm_setup_crtcs - build crtc config for fbdev + * intel_fb_initial_config - find active connectors etc + * drm_fb_helper_single_fb_probe - set up fbdev + * intelfb_create - re-use or alloc fb, build out fbdev structs + * + * Note that we don't make special consideration whether we could actually + * switch to the selected modes without a full modeset. E.g. when the display + * is in VGA mode we need to recalculate watermarks and set a new high-res + * framebuffer anyway. + */ +static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_crtc **crtcs, + struct drm_display_mode **modes, + bool *enabled, int width, int height) +{ + struct drm_device *dev = fb_helper->dev; + int i, j; + bool *save_enabled; + bool fallback = true; + int num_connectors_enabled = 0; + int num_connectors_detected = 0; + + /* + * If the user specified any force options, just bail here + * and use that config. + */ + for (i = 0; i < fb_helper->connector_count; i++) { + struct drm_fb_helper_connector *fb_conn; + struct drm_connector *connector; + + fb_conn = fb_helper->connector_info[i]; + connector = fb_conn->connector; + + if (!enabled[i]) + continue; + + if (connector->force != DRM_FORCE_UNSPECIFIED) + return false; + } + + save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), + GFP_KERNEL); + if (!save_enabled) + return false; + + memcpy(save_enabled, enabled, dev->mode_config.num_connector); + + for (i = 0; i < fb_helper->connector_count; i++) { + struct drm_fb_helper_connector *fb_conn; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_fb_helper_crtc *new_crtc; + + fb_conn = fb_helper->connector_info[i]; + connector = fb_conn->connector; + + if (connector->status == connector_status_connected) + num_connectors_detected++; + + if (!enabled[i]) { + DRM_DEBUG_KMS("connector %d not enabled, skipping\n", + connector->base.id); + continue; + } + + encoder = connector->encoder; + if (!encoder || WARN_ON(!encoder->crtc)) { + DRM_DEBUG_KMS("connector %d has no encoder or crtc, skipping\n", + connector->base.id); + enabled[i] = false; + continue; + } + + num_connectors_enabled++; + + new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc); + + /* + * Make sure we're not trying to drive multiple connectors + * with a single CRTC, since our cloning support may not + * match the BIOS. + */ + for (j = 0; j < fb_helper->connector_count; j++) { + if (crtcs[j] == new_crtc) { + DRM_DEBUG_KMS("fallback: cloned configuration\n"); + fallback = true; + goto out; + } + } + + DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", + fb_conn->connector->base.id); + + /* go for command line mode first */ + modes[i] = drm_pick_cmdline_mode(fb_conn, width, height); + + /* try for preferred next */ + if (!modes[i]) { + DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", + fb_conn->connector->base.id); + modes[i] = drm_has_preferred_mode(fb_conn, width, + height); + } + + /* last resort: use current mode */ + if (!modes[i]) { + /* + * IMPORTANT: We want to use the adjusted mode (i.e. + * after the panel fitter upscaling) as the initial + * config, not the input mode, which is what crtc->mode + * usually contains. But since our current fastboot + * code puts a mode derived from the post-pfit timings + * into crtc->mode this works out correctly. We don't + * use hwmode anywhere right now, so use it for this + * since the fb helper layer wants a pointer to + * something we own. + */ + intel_mode_from_pipe_config(&encoder->crtc->hwmode, + &to_intel_crtc(encoder->crtc)->config); + modes[i] = &encoder->crtc->hwmode; + } + crtcs[i] = new_crtc; + + DRM_DEBUG_KMS("connector %s on crtc %d: %s\n", + drm_get_connector_name(connector), + encoder->crtc->base.id, + modes[i]->name); + + fallback = false; + } + + /* + * If the BIOS didn't enable everything it could, fall back to have the + * same user experiencing of lighting up as much as possible like the + * fbdev helper library. + */ + if (num_connectors_enabled != num_connectors_detected && + num_connectors_enabled < INTEL_INFO(dev)->num_pipes) { + DRM_DEBUG_KMS("fallback: Not all outputs enabled\n"); + DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled, + num_connectors_detected); + fallback = true; + } + +out: + if (fallback) { + DRM_DEBUG_KMS("Not using firmware configuration\n"); + memcpy(enabled, save_enabled, dev->mode_config.num_connector); + kfree(save_enabled); + return false; + } + + kfree(save_enabled); + return true; +} + static struct drm_fb_helper_funcs intel_fb_helper_funcs = { + .initial_config = intel_fb_initial_config, .gamma_set = intel_crtc_fb_gamma_set, .gamma_get = intel_crtc_fb_gamma_get, .fb_probe = intelfb_create, @@ -258,8 +452,139 @@ static void intel_fbdev_destroy(struct drm_device *dev, drm_fb_helper_fini(&ifbdev->helper); - drm_framebuffer_unregister_private(&ifbdev->ifb.base); - intel_framebuffer_fini(&ifbdev->ifb); + drm_framebuffer_unregister_private(&ifbdev->fb->base); + drm_framebuffer_remove(&ifbdev->fb->base); +} + +/* + * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible. + * The core display code will have read out the current plane configuration, + * so we use that to figure out if there's an object for us to use as the + * fb, and if so, we re-use it for the fbdev configuration. + * + * Note we only support a single fb shared across pipes for boot (mostly for + * fbcon), so we just find the biggest and use that. + */ +static bool intel_fbdev_init_bios(struct drm_device *dev, + struct intel_fbdev *ifbdev) +{ + struct intel_framebuffer *fb = NULL; + struct drm_crtc *crtc; + struct intel_crtc *intel_crtc; + struct intel_plane_config *plane_config = NULL; + unsigned int max_size = 0; + + if (!i915.fastboot) + return false; + + /* Find the largest fb */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + intel_crtc = to_intel_crtc(crtc); + + if (!intel_crtc->active || !crtc->primary->fb) { + DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", + pipe_name(intel_crtc->pipe)); + continue; + } + + if (intel_crtc->plane_config.size > max_size) { + DRM_DEBUG_KMS("found possible fb from plane %c\n", + pipe_name(intel_crtc->pipe)); + plane_config = &intel_crtc->plane_config; + fb = to_intel_framebuffer(crtc->primary->fb); + max_size = plane_config->size; + } + } + + if (!fb) { + DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n"); + goto out; + } + + /* Now make sure all the pipes will fit into it */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + unsigned int cur_size; + + intel_crtc = to_intel_crtc(crtc); + + if (!intel_crtc->active) { + DRM_DEBUG_KMS("pipe %c not active, skipping\n", + pipe_name(intel_crtc->pipe)); + continue; + } + + DRM_DEBUG_KMS("checking plane %c for BIOS fb\n", + pipe_name(intel_crtc->pipe)); + + /* + * See if the plane fb we found above will fit on this + * pipe. Note we need to use the selected fb's pitch and bpp + * rather than the current pipe's, since they differ. + */ + cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay; + cur_size = cur_size * fb->base.bits_per_pixel / 8; + if (fb->base.pitches[0] < cur_size) { + DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", + pipe_name(intel_crtc->pipe), + cur_size, fb->base.pitches[0]); + plane_config = NULL; + fb = NULL; + break; + } + + cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay; + cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1); + cur_size *= fb->base.pitches[0]; + DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", + pipe_name(intel_crtc->pipe), + intel_crtc->config.adjusted_mode.crtc_hdisplay, + intel_crtc->config.adjusted_mode.crtc_vdisplay, + fb->base.bits_per_pixel, + cur_size); + + if (cur_size > max_size) { + DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n", + pipe_name(intel_crtc->pipe), + cur_size, max_size); + plane_config = NULL; + fb = NULL; + break; + } + + DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n", + pipe_name(intel_crtc->pipe), + max_size, cur_size); + } + + if (!fb) { + DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n"); + goto out; + } + + ifbdev->preferred_bpp = fb->base.bits_per_pixel; + ifbdev->fb = fb; + + drm_framebuffer_reference(&ifbdev->fb->base); + + /* Final pass to check if any active pipes don't have fbs */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + intel_crtc = to_intel_crtc(crtc); + + if (!intel_crtc->active) + continue; + + WARN(!crtc->primary->fb, + "re-used BIOS config but lost an fb on crtc %d\n", + crtc->base.id); + } + + + DRM_DEBUG_KMS("using BIOS fb for initial console\n"); + return true; + +out: + + return false; } int intel_fbdev_init(struct drm_device *dev) @@ -268,21 +593,25 @@ int intel_fbdev_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL); - if (!ifbdev) + if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0)) + return -ENODEV; + + ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); + if (ifbdev == NULL) return -ENOMEM; - dev_priv->fbdev = ifbdev; ifbdev->helper.funcs = &intel_fb_helper_funcs; + if (!intel_fbdev_init_bios(dev, ifbdev)) + ifbdev->preferred_bpp = 32; ret = drm_fb_helper_init(dev, &ifbdev->helper, - INTEL_INFO(dev)->num_pipes, - 4); + INTEL_INFO(dev)->num_pipes, 4); if (ret) { kfree(ifbdev); return ret; } + dev_priv->fbdev = ifbdev; drm_fb_helper_single_add_all_connectors(&ifbdev->helper); return 0; @@ -291,9 +620,10 @@ int intel_fbdev_init(struct drm_device *dev) void intel_fbdev_initial_config(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_fbdev *ifbdev = dev_priv->fbdev; /* Due to peculiar init order wrt to hpd handling this is separate. */ - drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32); + drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp); } void intel_fbdev_fini(struct drm_device *dev) @@ -322,7 +652,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state) * been restored from swap. If the object is stolen however, it will be * full of whatever garbage was left in there. */ - if (state == FBINFO_STATE_RUNNING && ifbdev->ifb.obj->stolen) + if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen) memset_io(info->screen_base, 0, info->screen_size); fb_set_suspend(info, state); @@ -331,7 +661,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state) void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); + if (dev_priv->fbdev) + drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); } void intel_fbdev_restore_mode(struct drm_device *dev) @@ -339,7 +670,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev) int ret; struct drm_i915_private *dev_priv = dev->dev_private; - if (INTEL_INFO(dev)->num_pipes == 0) + if (!dev_priv->fbdev) return; drm_modeset_lock_all(dev); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ee3181ebcc9..b0413e19062 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -113,7 +113,8 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type) } static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type, - enum transcoder cpu_transcoder) + enum transcoder cpu_transcoder, + struct drm_i915_private *dev_priv) { switch (type) { case HDMI_INFOFRAME_TYPE_AVI: @@ -296,7 +297,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, u32 val = I915_READ(ctl_reg); data_reg = hsw_infoframe_data_reg(type, - intel_crtc->config.cpu_transcoder); + intel_crtc->config.cpu_transcoder, + dev_priv); if (data_reg == 0) return; @@ -423,7 +425,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; u32 reg = VIDEO_DIP_CTL; u32 val = I915_READ(reg); - u32 port; + u32 port = VIDEO_DIP_PORT(intel_dig_port->port); assert_hdmi_port_disabled(intel_hdmi); @@ -447,18 +449,6 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, return; } - switch (intel_dig_port->port) { - case PORT_B: - port = VIDEO_DIP_PORT_B; - break; - case PORT_C: - port = VIDEO_DIP_PORT_C; - break; - default: - BUG(); - return; - } - if (port != (val & VIDEO_DIP_PORT_MASK)) { if (val & VIDEO_DIP_ENABLE) { val &= ~VIDEO_DIP_ENABLE; @@ -489,7 +479,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); - u32 port; + u32 port = VIDEO_DIP_PORT(intel_dig_port->port); assert_hdmi_port_disabled(intel_hdmi); @@ -505,21 +495,6 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, return; } - switch (intel_dig_port->port) { - case PORT_B: - port = VIDEO_DIP_PORT_B; - break; - case PORT_C: - port = VIDEO_DIP_PORT_C; - break; - case PORT_D: - port = VIDEO_DIP_PORT_D; - break; - default: - BUG(); - return; - } - if (port != (val & VIDEO_DIP_PORT_MASK)) { if (val & VIDEO_DIP_ENABLE) { val &= ~VIDEO_DIP_ENABLE; @@ -692,8 +667,13 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + enum intel_display_power_domain power_domain; u32 tmp; + power_domain = intel_display_port_power_domain(encoder); + if (!intel_display_power_enabled(dev_priv, power_domain)) + return false; + tmp = I915_READ(intel_hdmi->hdmi_reg); if (!(tmp & SDVO_ENABLE)) @@ -868,6 +848,30 @@ intel_hdmi_mode_valid(struct drm_connector *connector, return MODE_OK; } +static bool hdmi_12bpc_possible(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct intel_encoder *encoder; + int count = 0, count_hdmi = 0; + + if (!HAS_PCH_SPLIT(dev)) + return false; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + if (encoder->new_crtc != crtc) + continue; + + count_hdmi += encoder->type == INTEL_OUTPUT_HDMI; + count++; + } + + /* + * HDMI 12bpc affects the clocks, so it's only possible + * when not cloning with other encoder types. + */ + return count_hdmi > 0 && count_hdmi == count; +} + bool intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { @@ -900,7 +904,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, * within limits. */ if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && - clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) { + clock_12bpc <= portclock_limit && + hdmi_12bpc_possible(encoder->new_crtc)) { DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); desired_bpp = 12*3; @@ -934,11 +939,15 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_i915_private *dev_priv = dev->dev_private; struct edid *edid; + enum intel_display_power_domain power_domain; enum drm_connector_status status = connector_status_disconnected; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + intel_hdmi->has_hdmi_sink = false; intel_hdmi->has_audio = false; intel_hdmi->rgb_quant_range_selectable = false; @@ -966,31 +975,48 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) intel_encoder->type = INTEL_OUTPUT_HDMI; } + intel_display_power_put(dev_priv, power_domain); + return status; } static int intel_hdmi_get_modes(struct drm_connector *connector) { - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_encoder *intel_encoder = intel_attached_encoder(connector); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); struct drm_i915_private *dev_priv = connector->dev->dev_private; + enum intel_display_power_domain power_domain; + int ret; /* We should parse the EDID data and find out if it's an HDMI sink so * we can send audio to it. */ - return intel_ddc_get_modes(connector, + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + + ret = intel_ddc_get_modes(connector, intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus)); + + intel_display_power_put(dev_priv, power_domain); + + return ret; } static bool intel_hdmi_detect_audio(struct drm_connector *connector) { - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_encoder *intel_encoder = intel_attached_encoder(connector); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); struct drm_i915_private *dev_priv = connector->dev->dev_private; + enum intel_display_power_domain power_domain; struct edid *edid; bool has_audio = false; + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus)); @@ -1000,6 +1026,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector) kfree(edid); } + intel_display_power_put(dev_priv, power_domain); + return has_audio; } @@ -1261,6 +1289,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; else intel_connector->get_hw_state = intel_connector_get_hw_state; + intel_connector->unregister = intel_connector_unregister; intel_hdmi_add_properties(intel_hdmi, connector); @@ -1314,7 +1343,14 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) intel_encoder->type = INTEL_OUTPUT_HDMI; intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - intel_encoder->cloneable = false; + intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG; + /* + * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems + * to work on real hardware. And since g4x can send infoframes to + * only one port anyway, nothing is lost by allowing it. + */ + if (IS_G4X(dev)) + intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI; intel_dig_port->port = port; intel_dig_port->hdmi.hdmi_reg = hdmi_reg; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 8bcb93a2a9f..f1ecf916474 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -848,8 +848,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) struct drm_i915_private *dev_priv = dev->dev_private; /* use the module option value if specified */ - if (i915_lvds_channel_mode > 0) - return i915_lvds_channel_mode == 2; + if (i915.lvds_channel_mode > 0) + return i915.lvds_channel_mode == 2; if (dmi_check_system(intel_dual_link_lvds)) return true; @@ -899,6 +899,7 @@ void intel_lvds_init(struct drm_device *dev) struct drm_encoder *encoder; struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_display_mode *fixed_mode = NULL; + struct drm_display_mode *downclock_mode = NULL; struct edid *edid; struct drm_crtc *crtc; u32 lvds; @@ -957,11 +958,12 @@ void intel_lvds_init(struct drm_device *dev) intel_encoder->get_hw_state = intel_lvds_get_hw_state; intel_encoder->get_config = intel_lvds_get_config; intel_connector->get_hw_state = intel_connector_get_hw_state; + intel_connector->unregister = intel_connector_unregister; intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_LVDS; - intel_encoder->cloneable = false; + intel_encoder->cloneable = 0; if (HAS_PCH_SPLIT(dev)) intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); else if (IS_GEN4(dev)) @@ -1000,6 +1002,7 @@ void intel_lvds_init(struct drm_device *dev) * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ + mutex_lock(&dev->mode_config.mutex); edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin)); if (edid) { if (drm_add_edid_modes(connector, edid)) { @@ -1032,15 +1035,14 @@ void intel_lvds_init(struct drm_device *dev) fixed_mode = drm_mode_duplicate(dev, scan); if (fixed_mode) { - intel_connector->panel.downclock_mode = + downclock_mode = intel_find_panel_downclock(dev, fixed_mode, connector); - if (intel_connector->panel.downclock_mode != - NULL && i915_lvds_downclock) { + if (downclock_mode != NULL && + i915.lvds_downclock) { /* We found the downclock for LVDS. */ dev_priv->lvds_downclock_avail = true; dev_priv->lvds_downclock = - intel_connector->panel. downclock_mode->clock; DRM_DEBUG_KMS("LVDS downclock is found" " in EDID. Normal clock %dKhz, " @@ -1094,6 +1096,8 @@ void intel_lvds_init(struct drm_device *dev) goto failed; out: + mutex_unlock(&dev->mode_config.mutex); + lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); DRM_DEBUG_KMS("detected %s-link lvds configuration\n", lvds_encoder->is_dual_link ? "dual" : "single"); @@ -1116,17 +1120,17 @@ out: } drm_sysfs_connector_add(connector); - intel_panel_init(&intel_connector->panel, fixed_mode); + intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); intel_panel_setup_backlight(connector); return; failed: + mutex_unlock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); drm_connector_cleanup(connector); drm_encoder_cleanup(encoder); - if (fixed_mode) - drm_mode_destroy(dev, fixed_mode); kfree(lvds_encoder); kfree(lvds_connector); return; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index a759ecdb7a6..d8adc9104dc 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -189,7 +189,7 @@ struct intel_overlay { static struct overlay_registers __iomem * intel_overlay_map_regs(struct intel_overlay *overlay) { - drm_i915_private_t *dev_priv = overlay->dev->dev_private; + struct drm_i915_private *dev_priv = overlay->dev->dev_private; struct overlay_registers __iomem *regs; if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) @@ -212,7 +212,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, void (*tail)(struct intel_overlay *)) { struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; int ret; @@ -262,7 +262,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, bool load_polyphase_filter) { struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; u32 flip_addr = overlay->flip_addr; u32 tmp; @@ -293,7 +293,7 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) { struct drm_i915_gem_object *obj = overlay->old_vid_bo; - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); drm_gem_object_unreference(&obj->base); overlay->old_vid_bo = NULL; @@ -306,7 +306,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) /* never have the overlay hw on without showing a frame */ BUG_ON(!overlay->vid_bo); - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); drm_gem_object_unreference(&obj->base); overlay->vid_bo = NULL; @@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; int ret; @@ -388,7 +388,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; int ret; @@ -606,14 +606,14 @@ static void update_colorkey(struct intel_overlay *overlay, { u32 key = overlay->color_key; - switch (overlay->crtc->base.fb->bits_per_pixel) { + switch (overlay->crtc->base.primary->fb->bits_per_pixel) { case 8: iowrite32(0, ®s->DCLRKV); iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, ®s->DCLRKM); break; case 16: - if (overlay->crtc->base.fb->depth == 15) { + if (overlay->crtc->base.primary->fb->depth == 15) { iowrite32(RGB15_TO_COLORKEY(key), ®s->DCLRKV); iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE, ®s->DCLRKM); @@ -782,7 +782,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, return 0; out_unpin: - i915_gem_object_unpin(new_bo); + i915_gem_object_ggtt_unpin(new_bo); return ret; } @@ -834,7 +834,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, static void update_pfit_vscale_ratio(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 pfit_control = I915_READ(PFIT_CONTROL); u32 ratio; @@ -1026,7 +1026,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_overlay_put_image *put_image_rec = data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay; struct drm_mode_object *drmmode_obj; struct intel_crtc *crtc; @@ -1076,7 +1076,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); if (new_bo->tiling_mode) { - DRM_ERROR("buffer used for overlay image can not be tiled\n"); + DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n"); ret = -EINVAL; goto out_unlock; } @@ -1226,7 +1226,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_overlay_attrs *attrs = data; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay; struct overlay_registers __iomem *regs; int ret; @@ -1311,7 +1311,7 @@ out_unlock: void intel_setup_overlay(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay; struct drm_i915_gem_object *reg_bo; struct overlay_registers __iomem *regs; @@ -1349,7 +1349,7 @@ void intel_setup_overlay(struct drm_device *dev) } overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; } else { - ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false); + ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; @@ -1386,7 +1386,7 @@ void intel_setup_overlay(struct drm_device *dev) out_unpin_bo: if (!OVERLAY_NEEDS_PHYSICAL(dev)) - i915_gem_object_unpin(reg_bo); + i915_gem_object_ggtt_unpin(reg_bo); out_free_bo: drm_gem_object_unreference(®_bo->base); out_free: @@ -1397,7 +1397,7 @@ out_free: void intel_cleanup_overlay(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; if (!dev_priv->overlay) return; @@ -1421,7 +1421,7 @@ struct intel_overlay_error_state { static struct overlay_registers __iomem * intel_overlay_map_regs_atomic(struct intel_overlay *overlay) { - drm_i915_private_t *dev_priv = overlay->dev->dev_private; + struct drm_i915_private *dev_priv = overlay->dev->dev_private; struct overlay_registers __iomem *regs; if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) @@ -1447,7 +1447,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, struct intel_overlay_error_state * intel_overlay_capture_error_state(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay = dev_priv->overlay; struct intel_overlay_error_state *error; struct overlay_registers __iomem *regs; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 079ea38f14d..cb058408c70 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -33,8 +33,6 @@ #include <linux/moduleparam.h> #include "intel_drv.h" -#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ - void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode) @@ -325,13 +323,6 @@ out: pipe_config->gmch_pfit.lvds_border_bits = border; } -static int i915_panel_invert_brightness; -MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness " - "(-1 force normal, 0 machine defaults, 1 force inversion), please " - "report PCI device ID, subsystem vendor and subsystem device ID " - "to dri-devel@lists.freedesktop.org, if your machine needs it. " - "It will then be included in an upcoming module version."); -module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600); static u32 intel_panel_compute_brightness(struct intel_connector *connector, u32 val) { @@ -341,10 +332,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, WARN_ON(panel->backlight.max == 0); - if (i915_panel_invert_brightness < 0) + if (i915.invert_brightness < 0) return val; - if (i915_panel_invert_brightness > 0 || + if (i915.invert_brightness > 0 || dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { return panel->backlight.max - val; } @@ -810,13 +801,13 @@ intel_panel_detect(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* Assume that the BIOS does not lie through the OpRegion... */ - if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) { + if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { return ioread32(dev_priv->opregion.lid_state) & 0x1 ? connector_status_connected : connector_status_disconnected; } - switch (i915_panel_ignore_lid) { + switch (i915.panel_ignore_lid) { case -2: return connector_status_connected; case -1: @@ -1199,9 +1190,11 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev) } int intel_panel_init(struct intel_panel *panel, - struct drm_display_mode *fixed_mode) + struct drm_display_mode *fixed_mode, + struct drm_display_mode *downclock_mode) { panel->fixed_mode = fixed_mode; + panel->downclock_mode = downclock_mode; return 0; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index e1fc35a7265..5874716774a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -92,12 +92,12 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->fb; + struct drm_framebuffer *fb = crtc->primary->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int cfb_pitch; - int plane, i; + int i; u32 fbc_ctl; cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; @@ -109,7 +109,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc) cfb_pitch = (cfb_pitch / 32) - 1; else cfb_pitch = (cfb_pitch / 64) - 1; - plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; /* Clear old tags */ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) @@ -120,7 +119,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc) /* Set it up... */ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; - fbc_ctl2 |= plane; + fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane); I915_WRITE(FBC_CONTROL2, fbc_ctl2); I915_WRITE(FBC_FENCE_OFF, crtc->y); } @@ -135,7 +134,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc) fbc_ctl |= obj->fence_reg; I915_WRITE(FBC_CONTROL, fbc_ctl); - DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ", + DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n", cfb_pitch, crtc->y, plane_name(intel_crtc->plane)); } @@ -150,21 +149,23 @@ static void g4x_enable_fbc(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->fb; + struct drm_framebuffer *fb = crtc->primary->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; u32 dpfc_ctl; - dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; + dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN; + if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + else + dpfc_ctl |= DPFC_CTL_LIMIT_1X; dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; - I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); I915_WRITE(DPFC_FENCE_YOFF, crtc->y); /* enable it... */ - I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); + I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); } @@ -220,22 +221,20 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->fb; + struct drm_framebuffer *fb = crtc->primary->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; u32 dpfc_ctl; - dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); - dpfc_ctl &= DPFC_RESERVED; - dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); - /* Set persistent mode for front-buffer rendering, ala X. */ - dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; + dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); + if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + else + dpfc_ctl |= DPFC_CTL_LIMIT_1X; dpfc_ctl |= DPFC_CTL_FENCE_EN; if (IS_GEN5(dev)) dpfc_ctl |= obj->fence_reg; - I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); @@ -278,24 +277,31 @@ static void gen7_enable_fbc(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->fb; + struct drm_framebuffer *fb = crtc->primary->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + u32 dpfc_ctl; - I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj)); + dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); + if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + else + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; - I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X | - IVB_DPFC_CTL_FENCE_EN | - intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT); + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); if (IS_IVYBRIDGE(dev)) { /* WaFbcAsynchFlipDisableFbcQueue:ivb */ - I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS); + I915_WRITE(ILK_DISPLAY_CHICKEN1, + I915_READ(ILK_DISPLAY_CHICKEN1) | + ILK_FBCQ_DIS); } else { - /* WaFbcAsynchFlipDisableFbcQueue:hsw */ - I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe), - HSW_BYPASS_FBC_QUEUE); + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ + I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe), + I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) | + HSW_FBCQ_DIS); } I915_WRITE(SNB_DPFC_CTL_SA, @@ -330,11 +336,11 @@ static void intel_fbc_work_fn(struct work_struct *__work) /* Double check that we haven't switched fb without cancelling * the prior work. */ - if (work->crtc->fb == work->fb) { + if (work->crtc->primary->fb == work->fb) { dev_priv->display.enable_fbc(work->crtc); dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; - dev_priv->fbc.fb_id = work->crtc->fb->base.id; + dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id; dev_priv->fbc.y = work->crtc->y; } @@ -387,7 +393,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc) } work->crtc = crtc; - work->fb = crtc->fb; + work->fb = crtc->primary->fb; INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); dev_priv->fbc.fbc_work = work; @@ -466,7 +472,7 @@ void intel_update_fbc(struct drm_device *dev) return; } - if (!i915_powersave) { + if (!i915.powersave) { if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) DRM_DEBUG_KMS("fbc disabled per module param\n"); return; @@ -493,25 +499,25 @@ void intel_update_fbc(struct drm_device *dev) } } - if (!crtc || crtc->fb == NULL) { + if (!crtc || crtc->primary->fb == NULL) { if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT)) DRM_DEBUG_KMS("no output, disabling\n"); goto out_disable; } intel_crtc = to_intel_crtc(crtc); - fb = crtc->fb; + fb = crtc->primary->fb; intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; adjusted_mode = &intel_crtc->config.adjusted_mode; - if (i915_enable_fbc < 0 && + if (i915.enable_fbc < 0 && INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) DRM_DEBUG_KMS("disabled per chip default\n"); goto out_disable; } - if (!i915_enable_fbc) { + if (!i915.enable_fbc) { if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) DRM_DEBUG_KMS("fbc disabled per module param\n"); goto out_disable; @@ -537,7 +543,7 @@ void intel_update_fbc(struct drm_device *dev) DRM_DEBUG_KMS("mode too large for compression, disabling\n"); goto out_disable; } - if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) && + if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) && intel_crtc->plane != PLANE_A) { if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) DRM_DEBUG_KMS("plane not A, disabling compression\n"); @@ -617,7 +623,7 @@ out_disable: static void i915_pineview_get_mem_freq(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 tmp; tmp = I915_READ(CLKCFG); @@ -656,7 +662,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev) static void i915_ironlake_get_mem_freq(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u16 ddrpll, csipll; ddrpll = I915_READ16(DDRMPLL1); @@ -1035,7 +1041,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) crtc = single_enabled_crtc(dev); if (crtc) { const struct drm_display_mode *adjusted_mode; - int pixel_size = crtc->fb->bits_per_pixel / 8; + int pixel_size = crtc->primary->fb->bits_per_pixel / 8; int clock; adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; @@ -1115,7 +1121,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; - pixel_size = crtc->fb->bits_per_pixel / 8; + pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* Use the small buffer method to calculate plane watermark */ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; @@ -1128,9 +1134,9 @@ static bool g4x_compute_wm0(struct drm_device *dev, *plane_wm = display->max_wm; /* Use the large buffer method to calculate cursor watermark */ - line_time_us = ((htotal * 1000) / clock); + line_time_us = max(htotal * 1000 / clock, 1); line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; - entries = line_count * 64 * pixel_size; + entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size; tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; if (tlb_miss > 0) entries += tlb_miss; @@ -1202,9 +1208,9 @@ static bool g4x_compute_srwm(struct drm_device *dev, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; - pixel_size = crtc->fb->bits_per_pixel / 8; + pixel_size = crtc->primary->fb->bits_per_pixel / 8; - line_time_us = (htotal * 1000) / clock; + line_time_us = max(htotal * 1000 / clock, 1); line_count = (latency_ns / line_time_us + 1000) / 1000; line_size = hdisplay * pixel_size; @@ -1216,7 +1222,7 @@ static bool g4x_compute_srwm(struct drm_device *dev, *display_wm = entries + display->guard_size; /* calculate the self-refresh watermark for display cursor */ - entries = line_count * pixel_size * 64; + entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width; entries = DIV_ROUND_UP(entries, cursor->cacheline_size); *cursor_wm = entries + cursor->guard_size; @@ -1241,7 +1247,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev, return false; clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; - pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */ + pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ entries = (clock / 1000) * pixel_size; *plane_prec_mult = (entries > 256) ? @@ -1433,11 +1439,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) int clock = adjusted_mode->crtc_clock; int htotal = adjusted_mode->crtc_htotal; int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; - int pixel_size = crtc->fb->bits_per_pixel / 8; + int pixel_size = crtc->primary->fb->bits_per_pixel / 8; unsigned long line_time_us; int entries; - line_time_us = ((htotal * 1000) / clock); + line_time_us = max(htotal * 1000 / clock, 1); /* Use ns/us then divide to preserve precision */ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * @@ -1451,7 +1457,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) entries, srwm); entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * 64; + pixel_size * to_intel_crtc(crtc)->cursor_width; entries = DIV_ROUND_UP(entries, i965_cursor_wm_info.cacheline_size); cursor_sr = i965_cursor_wm_info.fifo_size - @@ -1506,7 +1512,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) crtc = intel_get_crtc_for_plane(dev, 0); if (intel_crtc_active(crtc)) { const struct drm_display_mode *adjusted_mode; - int cpp = crtc->fb->bits_per_pixel / 8; + int cpp = crtc->primary->fb->bits_per_pixel / 8; if (IS_GEN2(dev)) cpp = 4; @@ -1522,7 +1528,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) crtc = intel_get_crtc_for_plane(dev, 1); if (intel_crtc_active(crtc)) { const struct drm_display_mode *adjusted_mode; - int cpp = crtc->fb->bits_per_pixel / 8; + int cpp = crtc->primary->fb->bits_per_pixel / 8; if (IS_GEN2(dev)) cpp = 4; @@ -1559,11 +1565,11 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) int clock = adjusted_mode->crtc_clock; int htotal = adjusted_mode->crtc_htotal; int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; - int pixel_size = enabled->fb->bits_per_pixel / 8; + int pixel_size = enabled->primary->fb->bits_per_pixel / 8; unsigned long line_time_us; int entries; - line_time_us = (htotal * 1000) / clock; + line_time_us = max(htotal * 1000 / clock, 1); /* Use ns/us then divide to preserve precision */ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * @@ -1886,7 +1892,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, } /* Calculate the maximum FBC watermark */ -static unsigned int ilk_fbc_wm_max(struct drm_device *dev) +static unsigned int ilk_fbc_wm_max(const struct drm_device *dev) { /* max that registers can hold */ if (INTEL_INFO(dev)->gen >= 8) @@ -1895,7 +1901,7 @@ static unsigned int ilk_fbc_wm_max(struct drm_device *dev) return 15; } -static void ilk_compute_wm_maximums(struct drm_device *dev, +static void ilk_compute_wm_maximums(const struct drm_device *dev, int level, const struct intel_wm_config *config, enum intel_ddb_partitioning ddb_partitioning, @@ -1948,7 +1954,7 @@ static bool ilk_validate_wm_level(int level, return ret; } -static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, +static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, int level, const struct ilk_pipe_wm_parameters *p, struct intel_wm_level *result) @@ -2079,7 +2085,7 @@ static void intel_print_wm_latency(struct drm_device *dev, } } -static void intel_setup_wm_latency(struct drm_device *dev) +static void ilk_setup_wm_latency(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2111,10 +2117,10 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc, if (p->active) { p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal; p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); - p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; + p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8; p->cur.bytes_per_pixel = 4; p->pri.horiz_pixels = intel_crtc->config.pipe_src_w; - p->cur.horiz_pixels = 64; + p->cur.horiz_pixels = intel_crtc->cursor_width; /* TODO: for now, assume primary and cursor planes are always enabled. */ p->pri.enabled = true; p->cur.enabled = true; @@ -2123,7 +2129,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc, list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) config->num_pipes_active += intel_crtc_active(crtc); - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { + drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { struct intel_plane *intel_plane = to_intel_plane(plane); if (intel_plane->pipe == pipe) @@ -2140,7 +2146,7 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc, struct intel_pipe_wm *pipe_wm) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + const struct drm_i915_private *dev_priv = dev->dev_private; int level, max_level = ilk_wm_max_level(dev); /* LP0 watermark maximums depend on this pipe alone */ struct intel_wm_config config = { @@ -2738,7 +2744,7 @@ intel_alloc_context_page(struct drm_device *dev) return NULL; } - ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false); + ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0); if (ret) { DRM_ERROR("failed to pin power context: %d\n", ret); goto err_unref; @@ -2753,7 +2759,7 @@ intel_alloc_context_page(struct drm_device *dev) return ctx; err_unpin: - i915_gem_object_unpin(ctx); + i915_gem_object_ggtt_unpin(ctx); err_unref: drm_gem_object_unreference(&ctx->base); return NULL; @@ -2901,9 +2907,9 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) * the hw runs at the minimal clock before selecting the desired * frequency, if the down threshold expires in that window we will not * receive a down interrupt. */ - limits = dev_priv->rps.max_delay << 24; - if (val <= dev_priv->rps.min_delay) - limits |= dev_priv->rps.min_delay << 16; + limits = dev_priv->rps.max_freq_softlimit << 24; + if (val <= dev_priv->rps.min_freq_softlimit) + limits |= dev_priv->rps.min_freq_softlimit << 16; return limits; } @@ -2915,26 +2921,26 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) new_power = dev_priv->rps.power; switch (dev_priv->rps.power) { case LOW_POWER: - if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay) + if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) new_power = BETWEEN; break; case BETWEEN: - if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay) + if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) new_power = LOW_POWER; - else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay) + else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) new_power = HIGH_POWER; break; case HIGH_POWER: - if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay) + if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) new_power = BETWEEN; break; } /* Max/min bins are special */ - if (val == dev_priv->rps.min_delay) + if (val == dev_priv->rps.min_freq_softlimit) new_power = LOW_POWER; - if (val == dev_priv->rps.max_delay) + if (val == dev_priv->rps.max_freq_softlimit) new_power = HIGH_POWER; if (new_power == dev_priv->rps.power) return; @@ -3000,41 +3006,113 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) dev_priv->rps.last_adj = 0; } +static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) +{ + u32 mask = 0; + + if (val > dev_priv->rps.min_freq_softlimit) + mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; + if (val < dev_priv->rps.max_freq_softlimit) + mask |= GEN6_PM_RP_UP_THRESHOLD; + + /* IVB and SNB hard hangs on looping batchbuffer + * if GEN6_PM_UP_EI_EXPIRED is masked. + */ + if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev)) + mask |= GEN6_PM_RP_UP_EI_EXPIRED; + + return ~mask; +} + +/* gen6_set_rps is called to update the frequency request, but should also be + * called when the range (min_delay and max_delay) is modified so that we can + * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ void gen6_set_rps(struct drm_device *dev, u8 val) { struct drm_i915_private *dev_priv = dev->dev_private; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - WARN_ON(val > dev_priv->rps.max_delay); - WARN_ON(val < dev_priv->rps.min_delay); - - if (val == dev_priv->rps.cur_delay) - return; + WARN_ON(val > dev_priv->rps.max_freq_softlimit); + WARN_ON(val < dev_priv->rps.min_freq_softlimit); - gen6_set_rps_thresholds(dev_priv, val); + /* min/max delay may still have been modified so be sure to + * write the limits value. + */ + if (val != dev_priv->rps.cur_freq) { + gen6_set_rps_thresholds(dev_priv, val); - if (IS_HASWELL(dev)) - I915_WRITE(GEN6_RPNSWREQ, - HSW_FREQUENCY(val)); - else - I915_WRITE(GEN6_RPNSWREQ, - GEN6_FREQUENCY(val) | - GEN6_OFFSET(0) | - GEN6_AGGRESSIVE_TURBO); + if (IS_HASWELL(dev)) + I915_WRITE(GEN6_RPNSWREQ, + HSW_FREQUENCY(val)); + else + I915_WRITE(GEN6_RPNSWREQ, + GEN6_FREQUENCY(val) | + GEN6_OFFSET(0) | + GEN6_AGGRESSIVE_TURBO); + } /* Make sure we continue to get interrupts * until we hit the minimum or maximum frequencies. */ - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - gen6_rps_limits(dev_priv, val)); + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val)); + I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); POSTING_READ(GEN6_RPNSWREQ); - dev_priv->rps.cur_delay = val; - + dev_priv->rps.cur_freq = val; trace_intel_gpu_freq_change(val * 50); } +/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down + * + * * If Gfx is Idle, then + * 1. Mask Turbo interrupts + * 2. Bring up Gfx clock + * 3. Change the freq to Rpn and wait till P-Unit updates freq + * 4. Clear the Force GFX CLK ON bit so that Gfx can down + * 5. Unmask Turbo interrupts +*/ +static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) +{ + /* + * When we are idle. Drop to min voltage state. + */ + + if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit) + return; + + /* Mask turbo interrupt so that they will not come in between */ + I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); + + /* Bring up the Gfx clock */ + I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, + I915_READ(VLV_GTLC_SURVIVABILITY_REG) | + VLV_GFX_CLK_FORCE_ON_BIT); + + if (wait_for(((VLV_GFX_CLK_STATUS_BIT & + I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) { + DRM_ERROR("GFX_CLK_ON request timed out\n"); + return; + } + + dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit; + + vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, + dev_priv->rps.min_freq_softlimit); + + if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) + & GENFREQSTATUS) == 0, 5)) + DRM_ERROR("timed out waiting for Punit\n"); + + /* Release the Gfx clock */ + I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, + I915_READ(VLV_GTLC_SURVIVABILITY_REG) & + ~VLV_GFX_CLK_FORCE_ON_BIT); + + I915_WRITE(GEN6_PMINTRMSK, + gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); +} + void gen6_rps_idle(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; @@ -3042,9 +3120,9 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->rps.hw_lock); if (dev_priv->rps.enabled) { if (IS_VALLEYVIEW(dev)) - valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); + vlv_set_rps_idle(dev_priv); else - gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); + gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); dev_priv->rps.last_adj = 0; } mutex_unlock(&dev_priv->rps.hw_lock); @@ -3057,9 +3135,9 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->rps.hw_lock); if (dev_priv->rps.enabled) { if (IS_VALLEYVIEW(dev)) - valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); + valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); else - gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); + gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); dev_priv->rps.last_adj = 0; } mutex_unlock(&dev_priv->rps.hw_lock); @@ -3070,21 +3148,20 @@ void valleyview_set_rps(struct drm_device *dev, u8 val) struct drm_i915_private *dev_priv = dev->dev_private; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - WARN_ON(val > dev_priv->rps.max_delay); - WARN_ON(val < dev_priv->rps.min_delay); + WARN_ON(val > dev_priv->rps.max_freq_softlimit); + WARN_ON(val < dev_priv->rps.min_freq_softlimit); DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", - vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), - dev_priv->rps.cur_delay, + vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), + dev_priv->rps.cur_freq, vlv_gpu_freq(dev_priv, val), val); - if (val == dev_priv->rps.cur_delay) - return; - - vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); + if (val != dev_priv->rps.cur_freq) + vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); - dev_priv->rps.cur_delay = val; + I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); + dev_priv->rps.cur_freq = val; trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); } @@ -3093,7 +3170,8 @@ static void gen6_disable_rps_interrupts(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); - I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS); + I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & + ~dev_priv->pm_rps_events); /* Complete PM interrupt masking here doesn't race with the rps work * item again unmasking PM interrupts because that is using a different * register (PMIMR) to mask PM interrupts. The only risk is in leaving @@ -3103,7 +3181,7 @@ static void gen6_disable_rps_interrupts(struct drm_device *dev) dev_priv->rps.pm_iir = 0; spin_unlock_irq(&dev_priv->irq_lock); - I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); + I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); } static void gen6_disable_rps(struct drm_device *dev) @@ -3123,25 +3201,14 @@ static void valleyview_disable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_CONTROL, 0); gen6_disable_rps_interrupts(dev); - - if (dev_priv->vlv_pctx) { - drm_gem_object_unreference(&dev_priv->vlv_pctx->base); - dev_priv->vlv_pctx = NULL; - } } static void intel_print_rc6_info(struct drm_device *dev, u32 mode) { - if (IS_GEN6(dev)) - DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); - - if (IS_HASWELL(dev)) - DRM_DEBUG_DRIVER("Haswell: only RC6 available\n"); - DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", - (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", - (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", - (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); + (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", + (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", + (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); } int intel_enable_rc6(const struct drm_device *dev) @@ -3151,44 +3218,28 @@ int intel_enable_rc6(const struct drm_device *dev) return 0; /* Respect the kernel parameter if it is set */ - if (i915_enable_rc6 >= 0) - return i915_enable_rc6; + if (i915.enable_rc6 >= 0) + return i915.enable_rc6; /* Disable RC6 on Ironlake */ if (INTEL_INFO(dev)->gen == 5) return 0; - if (IS_HASWELL(dev)) - return INTEL_RC6_ENABLE; - - /* snb/ivb have more than one rc6 state. */ - if (INTEL_INFO(dev)->gen == 6) - return INTEL_RC6_ENABLE; + if (IS_IVYBRIDGE(dev)) + return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); - return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); + return INTEL_RC6_ENABLE; } static void gen6_enable_rps_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 enabled_intrs; spin_lock_irq(&dev_priv->irq_lock); WARN_ON(dev_priv->rps.pm_iir); - snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); - I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); + snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); - - /* only unmask PM interrupts we need. Mask all others. */ - enabled_intrs = GEN6_PM_RPS_EVENTS; - - /* IVB and SNB hard hangs on looping batchbuffer - * if GEN6_PM_UP_EI_EXPIRED is masked. - */ - if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) - enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED; - - I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs); } static void gen8_enable_rps(struct drm_device *dev) @@ -3222,10 +3273,10 @@ static void gen8_enable_rps(struct drm_device *dev) /* 3: Enable RC6 */ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; - DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off"); + intel_print_rc6_info(dev, rc6_mask); I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | - GEN6_RC_CTL_EI_MODE(1) | - rc6_mask); + GEN6_RC_CTL_EI_MODE(1) | + rc6_mask); /* 4 Program defaults and thresholds for RPS*/ I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */ @@ -3235,8 +3286,8 @@ static void gen8_enable_rps(struct drm_device *dev) /* Docs recommend 900MHz, and 300 MHz respectively */ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - dev_priv->rps.max_delay << 24 | - dev_priv->rps.min_delay << 16); + dev_priv->rps.max_freq_softlimit << 24 | + dev_priv->rps.min_freq_softlimit << 16); I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ @@ -3269,7 +3320,7 @@ static void gen6_enable_rps(struct drm_device *dev) struct intel_ring_buffer *ring; u32 rp_state_cap; u32 gt_perf_status; - u32 rc6vids, pcu_mbox, rc6_mask = 0; + u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; u32 gtfifodbg; int rc6_mode; int i, ret; @@ -3295,13 +3346,23 @@ static void gen6_enable_rps(struct drm_device *dev) rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); - /* In units of 50MHz */ - dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff; - dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff; - dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff; - dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff; - dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay; - dev_priv->rps.cur_delay = 0; + /* All of these values are in units of 50MHz */ + dev_priv->rps.cur_freq = 0; + /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */ + dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; + dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; + dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; + /* XXX: only BYT has a special efficient freq */ + dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; + /* hw_max = RP0 until we check for overclocking */ + dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; + + /* Preserve min/max settings in case of re-init */ + if (dev_priv->rps.max_freq_softlimit == 0) + dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; + + if (dev_priv->rps.min_freq_softlimit == 0) + dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -3350,21 +3411,19 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); - if (!ret) { - pcu_mbox = 0; - ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); - if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ - DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", - (dev_priv->rps.max_delay & 0xff) * 50, - (pcu_mbox & 0xff) * 50); - dev_priv->rps.hw_max = pcu_mbox & 0xff; - } - } else { + if (ret) DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); + + ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); + if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ + DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", + (dev_priv->rps.max_freq_softlimit & 0xff) * 50, + (pcu_mbox & 0xff) * 50); + dev_priv->rps.max_freq = pcu_mbox & 0xff; } dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); + gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); gen6_enable_rps_interrupts(dev); @@ -3420,9 +3479,9 @@ void gen6_update_ring_freq(struct drm_device *dev) * to use for memory access. We do this by specifying the IA frequency * the PCU should use as a reference to determine the ring frequency. */ - for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; + for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit; gpu_freq--) { - int diff = dev_priv->rps.max_delay - gpu_freq; + int diff = dev_priv->rps.max_freq_softlimit - gpu_freq; unsigned int ia_freq = 0, ring_freq = 0; if (INTEL_INFO(dev)->gen >= 8) { @@ -3485,6 +3544,15 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; } +/* Check that the pctx buffer wasn't move under us. */ +static void valleyview_check_pctx(struct drm_i915_private *dev_priv) +{ + unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; + + WARN_ON(pctx_addr != dev_priv->mm.stolen_base + + dev_priv->vlv_pctx->stolen->start); +} + static void valleyview_setup_pctx(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3529,6 +3597,17 @@ out: dev_priv->vlv_pctx = pctx; } +static void valleyview_cleanup_pctx(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (WARN_ON(!dev_priv->vlv_pctx)) + return; + + drm_gem_object_unreference(&dev_priv->vlv_pctx->base); + dev_priv->vlv_pctx = NULL; +} + static void valleyview_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3538,6 +3617,8 @@ static void valleyview_enable_rps(struct drm_device *dev) WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + valleyview_check_pctx(dev_priv); + if ((gtfifodbg = I915_READ(GTFIFODBG))) { DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", gtfifodbg); @@ -3588,32 +3669,39 @@ static void valleyview_enable_rps(struct drm_device *dev) DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); - dev_priv->rps.cur_delay = (val >> 8) & 0xff; + dev_priv->rps.cur_freq = (val >> 8) & 0xff; DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", - vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), - dev_priv->rps.cur_delay); + vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), + dev_priv->rps.cur_freq); - dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv); - dev_priv->rps.hw_max = dev_priv->rps.max_delay; + dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); + dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", - vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay), - dev_priv->rps.max_delay); + vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq), + dev_priv->rps.max_freq); - dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); + dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", - vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), - dev_priv->rps.rpe_delay); + vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), + dev_priv->rps.efficient_freq); - dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv); + dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", - vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay), - dev_priv->rps.min_delay); + vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), + dev_priv->rps.min_freq); + + /* Preserve min/max settings in case of re-init */ + if (dev_priv->rps.max_freq_softlimit == 0) + dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; + + if (dev_priv->rps.min_freq_softlimit == 0) + dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", - vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), - dev_priv->rps.rpe_delay); + vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), + dev_priv->rps.efficient_freq); - valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); + valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); gen6_enable_rps_interrupts(dev); @@ -3625,13 +3713,13 @@ void ironlake_teardown_rc6(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->ips.renderctx) { - i915_gem_object_unpin(dev_priv->ips.renderctx); + i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx); drm_gem_object_unreference(&dev_priv->ips.renderctx->base); dev_priv->ips.renderctx = NULL; } if (dev_priv->ips.pwrctx) { - i915_gem_object_unpin(dev_priv->ips.pwrctx); + i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx); drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); dev_priv->ips.pwrctx = NULL; } @@ -3823,9 +3911,10 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) { + struct drm_device *dev = dev_priv->dev; unsigned long val; - if (dev_priv->info->gen != 5) + if (INTEL_INFO(dev)->gen != 5) return 0; spin_lock_irq(&mchdev_lock); @@ -3854,6 +3943,7 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv) static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) { + struct drm_device *dev = dev_priv->dev; static const struct v_table { u16 vd; /* in .1 mil */ u16 vm; /* in .1 mil */ @@ -3987,7 +4077,7 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) { 16000, 14875, }, { 16125, 15000, }, }; - if (dev_priv->info->is_mobile) + if (INTEL_INFO(dev)->is_mobile) return v_table[pxvid].vm; else return v_table[pxvid].vd; @@ -4030,7 +4120,9 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) void i915_update_gfx_val(struct drm_i915_private *dev_priv) { - if (dev_priv->info->gen != 5) + struct drm_device *dev = dev_priv->dev; + + if (INTEL_INFO(dev)->gen != 5) return; spin_lock_irq(&mchdev_lock); @@ -4047,7 +4139,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) assert_spin_locked(&mchdev_lock); - pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4)); + pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4)); pxvid = (pxvid >> 24) & 0x7f; ext_v = pvid_to_extvid(dev_priv, pxvid); @@ -4079,9 +4171,10 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) { + struct drm_device *dev = dev_priv->dev; unsigned long val; - if (dev_priv->info->gen != 5) + if (INTEL_INFO(dev)->gen != 5) return 0; spin_lock_irq(&mchdev_lock); @@ -4270,6 +4363,7 @@ void intel_gpu_ips_teardown(void) i915_mch_dev = NULL; spin_unlock_irq(&mchdev_lock); } + static void intel_init_emon(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4341,6 +4435,18 @@ static void intel_init_emon(struct drm_device *dev) dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); } +void intel_init_gt_powersave(struct drm_device *dev) +{ + if (IS_VALLEYVIEW(dev)) + valleyview_setup_pctx(dev); +} + +void intel_cleanup_gt_powersave(struct drm_device *dev) +{ + if (IS_VALLEYVIEW(dev)) + valleyview_cleanup_pctx(dev); +} + void intel_disable_gt_powersave(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4395,8 +4501,6 @@ void intel_enable_gt_powersave(struct drm_device *dev) ironlake_enable_rc6(dev); intel_init_emon(dev); } else if (IS_GEN6(dev) || IS_GEN7(dev)) { - if (IS_VALLEYVIEW(dev)) - valleyview_setup_pctx(dev); /* * PCU communication is slow and this doesn't need to be * done at any specific time, so do this out of our fast path @@ -4587,6 +4691,17 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); + /* + * BSpec recoomends 8x4 when MSAA is used, + * however in practice 16x4 seems fastest. + * + * Note that PS/WM thread counts depend on the WIZ hashing + * disable bit, which we don't touch here, but it's good + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). + */ + I915_WRITE(GEN6_GT_MODE, + GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); + ilk_init_lp_watermarks(dev); I915_WRITE(CACHE_MODE_0, @@ -4607,17 +4722,24 @@ static void gen6_init_clock_gating(struct drm_device *dev) * According to the spec, bit 11 (RCCUNIT) must also be set, * but we didn't debug actual testcases to find it out. * - * Also apply WaDisableVDSUnitClockGating:snb and - * WaDisableRCPBUnitClockGating:snb. + * WaDisableRCCUnitClockGating:snb + * WaDisableRCPBUnitClockGating:snb */ I915_WRITE(GEN6_UCGCTL2, - GEN7_VDSUNIT_CLOCK_GATE_DISABLE | GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); - /* Bspec says we need to always set all mask bits. */ - I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) | - _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL); + /* WaStripsFansDisableFastClipPerformanceFix:snb */ + I915_WRITE(_3D_CHICKEN3, + _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL)); + + /* + * Bspec says: + * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and + * 3DSTATE_SF number of SF output attributes is more than 16." + */ + I915_WRITE(_3D_CHICKEN3, + _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH)); /* * According to the spec the following bits should be @@ -4643,11 +4765,6 @@ static void gen6_init_clock_gating(struct drm_device *dev) g4x_disable_trickle_feed(dev); - /* The default value should be 0x200 according to docs, but the two - * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ - I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); - I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); - cpt_init_clock_gating(dev); gen6_check_mch_setup(dev); @@ -4657,14 +4774,17 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) { uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); + /* + * WaVSThreadDispatchOverride:ivb,vlv + * + * This actually overrides the dispatch + * mode for all thread types. + */ reg &= ~GEN7_FF_SCHED_MASK; reg |= GEN7_FF_TS_SCHED_HW; reg |= GEN7_FF_VS_SCHED_HW; reg |= GEN7_FF_DS_SCHED_HW; - if (IS_HASWELL(dev_priv->dev)) - reg &= ~GEN7_FF_VS_REF_CNT_FFME; - I915_WRITE(GEN7_FF_THREAD_MODE, reg); } @@ -4702,7 +4822,7 @@ static void lpt_suspend_hw(struct drm_device *dev) static void gen8_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe i; + enum pipe pipe; I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); @@ -4711,8 +4831,19 @@ static void gen8_init_clock_gating(struct drm_device *dev) /* FIXME(BDW): Check all the w/a, some might only apply to * pre-production hw. */ - WARN(!i915_preliminary_hw_support, - "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n"); + /* WaDisablePartialInstShootdown:bdw */ + I915_WRITE(GEN8_ROW_CHICKEN, + _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE)); + + /* WaDisableThreadStallDopClockGating:bdw */ + /* FIXME: Unclear whether we really need this on production bdw. */ + I915_WRITE(GEN8_ROW_CHICKEN, + _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE)); + + /* + * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for + * pre-production hardware + */ I915_WRITE(HALF_SLICE_CHICKEN3, _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS)); I915_WRITE(HALF_SLICE_CHICKEN3, @@ -4736,10 +4867,10 @@ static void gen8_init_clock_gating(struct drm_device *dev) I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ - for_each_pipe(i) { - I915_WRITE(CHICKEN_PIPESL_1(i), - I915_READ(CHICKEN_PIPESL_1(i) | - DPRS_MASK_VBLANK_SRD)); + for_each_pipe(pipe) { + I915_WRITE(CHICKEN_PIPESL_1(pipe), + I915_READ(CHICKEN_PIPESL_1(pipe)) | + BDW_DPRS_MASK_VBLANK_SRD); } /* Use Force Non-Coherent whenever executing a 3D context. This is a @@ -4755,6 +4886,28 @@ static void gen8_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN7_FF_THREAD_MODE, I915_READ(GEN7_FF_THREAD_MODE) & ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); + + /* + * BSpec recommends 8x4 when MSAA is used, + * however in practice 16x4 seems fastest. + * + * Note that PS/WM thread counts depend on the WIZ hashing + * disable bit, which we don't touch here, but it's good + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). + */ + I915_WRITE(GEN7_GT_MODE, + GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); + + I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, + _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); + + /* WaDisableSDEUnitClockGating:bdw */ + I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | + GEN8_SDEUNIT_CLOCK_GATE_DISABLE); + + /* Wa4x4STCOptimizationDisable:bdw */ + I915_WRITE(CACHE_MODE_1, + _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE)); } static void haswell_init_clock_gating(struct drm_device *dev) @@ -4763,21 +4916,6 @@ static void haswell_init_clock_gating(struct drm_device *dev) ilk_init_lp_watermarks(dev); - /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. - * This implements the WaDisableRCZUnitClockGating:hsw workaround. - */ - I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); - - /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */ - I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, - GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); - - /* WaApplyL3ControlAndL3ChickenMode:hsw */ - I915_WRITE(GEN7_L3CNTLREG1, - GEN7_WA_FOR_GEN7_L3_CONTROL); - I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, - GEN7_WA_L3_CHICKEN_MODE); - /* L3 caching of data atomics doesn't work -- disable it. */ I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); I915_WRITE(HSW_ROW_CHICKEN3, @@ -4789,12 +4927,28 @@ static void haswell_init_clock_gating(struct drm_device *dev) GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); /* WaVSRefCountFullforceMissDisable:hsw */ - gen7_setup_fixed_func_scheduler(dev_priv); + I915_WRITE(GEN7_FF_THREAD_MODE, + I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); + + /* enable HiZ Raw Stall Optimization */ + I915_WRITE(CACHE_MODE_0_GEN7, + _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); /* WaDisable4x2SubspanOptimization:hsw */ I915_WRITE(CACHE_MODE_1, _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); + /* + * BSpec recommends 8x4 when MSAA is used, + * however in practice 16x4 seems fastest. + * + * Note that PS/WM thread counts depend on the WIZ hashing + * disable bit, which we don't touch here, but it's good + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). + */ + I915_WRITE(GEN7_GT_MODE, + GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); + /* WaSwitchSolVfFArbitrationPriority:hsw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); @@ -4827,9 +4981,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) if (IS_IVB_GT1(dev)) I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); - else - I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2, - _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, @@ -4843,31 +4994,24 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) if (IS_IVB_GT1(dev)) I915_WRITE(GEN7_ROW_CHICKEN2, _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); - else + else { + /* must write both registers */ + I915_WRITE(GEN7_ROW_CHICKEN2, + _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); I915_WRITE(GEN7_ROW_CHICKEN2_GT2, _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); - + } /* WaForceL3Serialization:ivb */ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & ~L3SQ_URB_READ_CAM_MATCH_DISABLE); - /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock - * gating disable must be set. Failure to set it results in - * flickering pixels due to Z write ordering failures after - * some amount of runtime in the Mesa "fire" demo, and Unigine - * Sanctuary and Tropics, and apparently anything else with - * alpha test or pixel discard. - * - * According to the spec, bit 11 (RCCUNIT) must also be set, - * but we didn't debug actual testcases to find it out. - * + /* * According to the spec, bit 13 (RCZUNIT) must be set on IVB. * This implements the WaDisableRCZUnitClockGating:ivb workaround. */ I915_WRITE(GEN6_UCGCTL2, - GEN6_RCZUNIT_CLOCK_GATE_DISABLE | - GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + GEN6_RCZUNIT_CLOCK_GATE_DISABLE); /* This is required by WaCatErrorRejectionIssue:ivb */ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, @@ -4876,13 +5020,29 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) g4x_disable_trickle_feed(dev); - /* WaVSRefCountFullforceMissDisable:ivb */ gen7_setup_fixed_func_scheduler(dev_priv); + if (0) { /* causes HiZ corruption on ivb:gt1 */ + /* enable HiZ Raw Stall Optimization */ + I915_WRITE(CACHE_MODE_0_GEN7, + _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); + } + /* WaDisable4x2SubspanOptimization:ivb */ I915_WRITE(CACHE_MODE_1, _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); + /* + * BSpec recommends 8x4 when MSAA is used, + * however in practice 16x4 seems fastest. + * + * Note that PS/WM thread counts depend on the WIZ hashing + * disable bit, which we don't touch here, but it's good + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). + */ + I915_WRITE(GEN7_GT_MODE, + GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); snpcr &= ~GEN6_MBC_SNPCR_MASK; snpcr |= GEN6_MBC_SNPCR_MED; @@ -4904,13 +5064,11 @@ static void valleyview_init_clock_gating(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); switch ((val >> 6) & 3) { case 0: - dev_priv->mem_freq = 800; - break; case 1: - dev_priv->mem_freq = 1066; + dev_priv->mem_freq = 800; break; case 2: - dev_priv->mem_freq = 1333; + dev_priv->mem_freq = 1066; break; case 3: dev_priv->mem_freq = 1333; @@ -4929,19 +5087,12 @@ static void valleyview_init_clock_gating(struct drm_device *dev) CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | CHICKEN3_DGMG_DONE_FIX_DISABLE); + /* WaPsdDispatchEnable:vlv */ /* WaDisablePSDDualDispatchEnable:vlv */ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); - /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */ - I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, - GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); - - /* WaApplyL3ControlAndL3ChickenMode:vlv */ - I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS); - I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); - /* WaForceL3Serialization:vlv */ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & ~L3SQ_URB_READ_CAM_MATCH_DISABLE); @@ -4955,51 +5106,39 @@ static void valleyview_init_clock_gating(struct drm_device *dev) I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock - * gating disable must be set. Failure to set it results in - * flickering pixels due to Z write ordering failures after - * some amount of runtime in the Mesa "fire" demo, and Unigine - * Sanctuary and Tropics, and apparently anything else with - * alpha test or pixel discard. - * - * According to the spec, bit 11 (RCCUNIT) must also be set, - * but we didn't debug actual testcases to find it out. - * + gen7_setup_fixed_func_scheduler(dev_priv); + + /* * According to the spec, bit 13 (RCZUNIT) must be set on IVB. * This implements the WaDisableRCZUnitClockGating:vlv workaround. - * - * Also apply WaDisableVDSUnitClockGating:vlv and - * WaDisableRCPBUnitClockGating:vlv. */ I915_WRITE(GEN6_UCGCTL2, - GEN7_VDSUNIT_CLOCK_GATE_DISABLE | - GEN7_TDLUNIT_CLOCK_GATE_DISABLE | - GEN6_RCZUNIT_CLOCK_GATE_DISABLE | - GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | - GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + GEN6_RCZUNIT_CLOCK_GATE_DISABLE); + /* WaDisableL3Bank2xClockGate:vlv */ I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE); I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); + /* + * BSpec says this must be set, even though + * WaDisable4x2SubspanOptimization isn't listed for VLV. + */ I915_WRITE(CACHE_MODE_1, _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); /* + * WaIncreaseL3CreditsForVLVB0:vlv + * This is the hardware default actually. + */ + I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); + + /* * WaDisableVLVClockGating_VBIIssue:vlv * Disable clock gating on th GCFG unit to prevent a delay * in the reporting of vblank events. */ - I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff); - - /* Conservative clock gating settings for now */ - I915_WRITE(0x9400, 0xffffffff); - I915_WRITE(0x9404, 0xffffffff); - I915_WRITE(0x9408, 0xffffffff); - I915_WRITE(0x940c, 0xffffffff); - I915_WRITE(0x9410, 0xffffffff); - I915_WRITE(0x9414, 0xffffffff); - I915_WRITE(0x9418, 0xffffffff); + I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); } static void g4x_init_clock_gating(struct drm_device *dev) @@ -5114,19 +5253,16 @@ void intel_suspend_hw(struct drm_device *dev) * enable it, so check if it's enabled and also check if we've requested it to * be enabled. */ -static bool hsw_power_well_enabled(struct drm_device *dev, +static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - struct drm_i915_private *dev_priv = dev->dev_private; - return I915_READ(HSW_PWR_WELL_DRIVER) == (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); } -bool intel_display_power_enabled_sw(struct drm_device *dev, +bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct drm_i915_private *dev_priv = dev->dev_private; struct i915_power_domains *power_domains; power_domains = &dev_priv->power_domains; @@ -5134,15 +5270,17 @@ bool intel_display_power_enabled_sw(struct drm_device *dev, return power_domains->domain_use_count[domain]; } -bool intel_display_power_enabled(struct drm_device *dev, +bool intel_display_power_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct drm_i915_private *dev_priv = dev->dev_private; struct i915_power_domains *power_domains; struct i915_power_well *power_well; bool is_enabled; int i; + if (dev_priv->pm.suspended) + return false; + power_domains = &dev_priv->power_domains; is_enabled = true; @@ -5152,7 +5290,7 @@ bool intel_display_power_enabled(struct drm_device *dev, if (power_well->always_on) continue; - if (!power_well->is_enabled(dev, power_well)) { + if (!power_well->ops->is_enabled(dev_priv, power_well)) { is_enabled = false; break; } @@ -5162,6 +5300,12 @@ bool intel_display_power_enabled(struct drm_device *dev, return is_enabled; } +/* + * Starting with Haswell, we have a "Power Down Well" that can be turned off + * when not needed anymore. We have 4 registers that can request the power well + * to be enabled, and it will only be disabled if none of the registers is + * requesting it to be enabled. + */ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; @@ -5198,10 +5342,17 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) } } +static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe) +{ + assert_spin_locked(&dev->vbl_lock); + + dev->vblank[pipe].last = 0; +} + static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; - enum pipe p; + enum pipe pipe; unsigned long irqflags; /* @@ -5212,21 +5363,18 @@ static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv) * FIXME: Should we do this in general in drm_vblank_post_modeset? */ spin_lock_irqsave(&dev->vbl_lock, irqflags); - for_each_pipe(p) - if (p != PIPE_A) - dev->vblank[p].last = 0; + for_each_pipe(pipe) + if (pipe != PIPE_A) + reset_vblank_counter(dev, pipe); spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } -static void hsw_set_power_well(struct drm_device *dev, +static void hsw_set_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; bool is_enabled, enable_requested; uint32_t tmp; - WARN_ON(dev_priv->pc8.enabled); - tmp = I915_READ(HSW_PWR_WELL_DRIVER); is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; @@ -5255,55 +5403,229 @@ static void hsw_set_power_well(struct drm_device *dev, } } -static void __intel_power_well_get(struct drm_device *dev, +static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - struct drm_i915_private *dev_priv = dev->dev_private; + hsw_set_power_well(dev_priv, power_well, power_well->count > 0); - if (!power_well->count++ && power_well->set) { - hsw_disable_package_c8(dev_priv); - power_well->set(dev, power_well, true); - } + /* + * We're taking over the BIOS, so clear any requests made by it since + * the driver is in charge now. + */ + if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) + I915_WRITE(HSW_PWR_WELL_BIOS, 0); +} + +static void hsw_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + hsw_set_power_well(dev_priv, power_well, true); } -static void __intel_power_well_put(struct drm_device *dev, +static void hsw_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - struct drm_i915_private *dev_priv = dev->dev_private; + hsw_set_power_well(dev_priv, power_well, false); +} + +static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ +} + +static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return true; +} + +static void vlv_set_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, bool enable) +{ + enum punit_power_well power_well_id = power_well->data; + u32 mask; + u32 state; + u32 ctrl; + + mask = PUNIT_PWRGT_MASK(power_well_id); + state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : + PUNIT_PWRGT_PWR_GATE(power_well_id); + + mutex_lock(&dev_priv->rps.hw_lock); + +#define COND \ + ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) + + if (COND) + goto out; + + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); + ctrl &= ~mask; + ctrl |= state; + vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); + + if (wait_for(COND, 100)) + DRM_ERROR("timout setting power well state %08x (%08x)\n", + state, + vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); + +#undef COND + +out: + mutex_unlock(&dev_priv->rps.hw_lock); +} + +static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, power_well->count > 0); +} + +static void vlv_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, true); +} + +static void vlv_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, false); +} + +static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int power_well_id = power_well->data; + bool enabled = false; + u32 mask; + u32 state; + u32 ctrl; + + mask = PUNIT_PWRGT_MASK(power_well_id); + ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); + + mutex_lock(&dev_priv->rps.hw_lock); + + state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; + /* + * We only ever set the power-on and power-gate states, anything + * else is unexpected. + */ + WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && + state != PUNIT_PWRGT_PWR_GATE(power_well_id)); + if (state == ctrl) + enabled = true; + + /* + * A transient state at this point would mean some unexpected party + * is poking at the power controls too. + */ + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; + WARN_ON(ctrl != state); + + mutex_unlock(&dev_priv->rps.hw_lock); + + return enabled; +} - WARN_ON(!power_well->count); +static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); + + vlv_set_power_well(dev_priv, power_well, true); + + spin_lock_irq(&dev_priv->irq_lock); + valleyview_enable_display_irqs(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + /* + * During driver initialization we need to defer enabling hotplug + * processing until fbdev is set up. + */ + if (dev_priv->enable_hotplug_processing) + intel_hpd_init(dev_priv->dev); + + i915_redisable_vga_power_on(dev_priv->dev); +} + +static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + struct drm_device *dev = dev_priv->dev; + enum pipe pipe; + + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); + + spin_lock_irq(&dev_priv->irq_lock); + for_each_pipe(pipe) + __intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); + + valleyview_disable_display_irqs(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + spin_lock_irq(&dev->vbl_lock); + for_each_pipe(pipe) + reset_vblank_counter(dev, pipe); + spin_unlock_irq(&dev->vbl_lock); + + vlv_set_power_well(dev_priv, power_well, false); +} - if (!--power_well->count && power_well->set && - i915_disable_power_well) { - power_well->set(dev, power_well, false); - hsw_enable_package_c8(dev_priv); +static void check_power_well_state(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + bool enabled = power_well->ops->is_enabled(dev_priv, power_well); + + if (power_well->always_on || !i915.disable_power_well) { + if (!enabled) + goto mismatch; + + return; } + + if (enabled != (power_well->count > 0)) + goto mismatch; + + return; + +mismatch: + WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", + power_well->name, power_well->always_on, enabled, + power_well->count, i915.disable_power_well); } -void intel_display_power_get(struct drm_device *dev, +void intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct drm_i915_private *dev_priv = dev->dev_private; struct i915_power_domains *power_domains; struct i915_power_well *power_well; int i; + intel_runtime_pm_get(dev_priv); + power_domains = &dev_priv->power_domains; mutex_lock(&power_domains->lock); - for_each_power_well(i, power_well, BIT(domain), power_domains) - __intel_power_well_get(dev, power_well); + for_each_power_well(i, power_well, BIT(domain), power_domains) { + if (!power_well->count++) { + DRM_DEBUG_KMS("enabling %s\n", power_well->name); + power_well->ops->enable(dev_priv, power_well); + } + + check_power_well_state(dev_priv, power_well); + } power_domains->domain_use_count[domain]++; mutex_unlock(&power_domains->lock); } -void intel_display_power_put(struct drm_device *dev, +void intel_display_power_put(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct drm_i915_private *dev_priv = dev->dev_private; struct i915_power_domains *power_domains; struct i915_power_well *power_well; int i; @@ -5315,10 +5637,20 @@ void intel_display_power_put(struct drm_device *dev, WARN_ON(!power_domains->domain_use_count[domain]); power_domains->domain_use_count[domain]--; - for_each_power_well_rev(i, power_well, BIT(domain), power_domains) - __intel_power_well_put(dev, power_well); + for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { + WARN_ON(!power_well->count); + + if (!--power_well->count && i915.disable_power_well) { + DRM_DEBUG_KMS("disabling %s\n", power_well->name); + power_well->ops->disable(dev_priv, power_well); + } + + check_power_well_state(dev_priv, power_well); + } mutex_unlock(&power_domains->lock); + + intel_runtime_pm_put(dev_priv); } static struct i915_power_domains *hsw_pwr; @@ -5333,7 +5665,7 @@ void i915_request_power_well(void) dev_priv = container_of(hsw_pwr, struct drm_i915_private, power_domains); - intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO); + intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); } EXPORT_SYMBOL_GPL(i915_request_power_well); @@ -5347,29 +5679,99 @@ void i915_release_power_well(void) dev_priv = container_of(hsw_pwr, struct drm_i915_private, power_domains); - intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO); + intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); } EXPORT_SYMBOL_GPL(i915_release_power_well); +#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) + +#define HSW_ALWAYS_ON_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PIPE_A) | \ + BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ + BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_CRT) | \ + BIT(POWER_DOMAIN_INIT)) +#define HSW_DISPLAY_POWER_DOMAINS ( \ + (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ + BIT(POWER_DOMAIN_INIT)) + +#define BDW_ALWAYS_ON_POWER_DOMAINS ( \ + HSW_ALWAYS_ON_POWER_DOMAINS | \ + BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) +#define BDW_DISPLAY_POWER_DOMAINS ( \ + (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \ + BIT(POWER_DOMAIN_INIT)) + +#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT) +#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK + +#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_CRT) | \ + BIT(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { + .sync_hw = i9xx_always_on_power_well_noop, + .enable = i9xx_always_on_power_well_noop, + .disable = i9xx_always_on_power_well_noop, + .is_enabled = i9xx_always_on_power_well_enabled, +}; + static struct i915_power_well i9xx_always_on_power_well[] = { { .name = "always-on", .always_on = 1, .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, }, }; +static const struct i915_power_well_ops hsw_power_well_ops = { + .sync_hw = hsw_power_well_sync_hw, + .enable = hsw_power_well_enable, + .disable = hsw_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + static struct i915_power_well hsw_power_wells[] = { { .name = "always-on", .always_on = 1, .domains = HSW_ALWAYS_ON_POWER_DOMAINS, + .ops = &i9xx_always_on_power_well_ops, }, { .name = "display", - .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS, - .is_enabled = hsw_power_well_enabled, - .set = hsw_set_power_well, + .domains = HSW_DISPLAY_POWER_DOMAINS, + .ops = &hsw_power_well_ops, }, }; @@ -5378,12 +5780,83 @@ static struct i915_power_well bdw_power_wells[] = { .name = "always-on", .always_on = 1, .domains = BDW_ALWAYS_ON_POWER_DOMAINS, + .ops = &i9xx_always_on_power_well_ops, }, { .name = "display", - .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS, - .is_enabled = hsw_power_well_enabled, - .set = hsw_set_power_well, + .domains = BDW_DISPLAY_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + }, +}; + +static const struct i915_power_well_ops vlv_display_power_well_ops = { + .sync_hw = vlv_power_well_sync_hw, + .enable = vlv_display_power_well_enable, + .disable = vlv_display_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +static const struct i915_power_well_ops vlv_dpio_power_well_ops = { + .sync_hw = vlv_power_well_sync_hw, + .enable = vlv_power_well_enable, + .disable = vlv_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +static struct i915_power_well vlv_power_wells[] = { + { + .name = "always-on", + .always_on = 1, + .domains = VLV_ALWAYS_ON_POWER_DOMAINS, + .ops = &i9xx_always_on_power_well_ops, + }, + { + .name = "display", + .domains = VLV_DISPLAY_POWER_DOMAINS, + .data = PUNIT_POWER_WELL_DISP2D, + .ops = &vlv_display_power_well_ops, + }, + { + .name = "dpio-common", + .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, + .data = PUNIT_POWER_WELL_DPIO_CMN_BC, + .ops = &vlv_dpio_power_well_ops, + }, + { + .name = "dpio-tx-b-01", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, + }, + { + .name = "dpio-tx-b-23", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, + }, + { + .name = "dpio-tx-c-01", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, + }, + { + .name = "dpio-tx-c-23", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, }, }; @@ -5392,9 +5865,8 @@ static struct i915_power_well bdw_power_wells[] = { (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ }) -int intel_power_domains_init(struct drm_device *dev) +int intel_power_domains_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct i915_power_domains *power_domains = &dev_priv->power_domains; mutex_init(&power_domains->lock); @@ -5403,12 +5875,14 @@ int intel_power_domains_init(struct drm_device *dev) * The enabling order will be from lower to higher indexed wells, * the disabling order is reversed. */ - if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev_priv->dev)) { set_power_wells(power_domains, hsw_power_wells); hsw_pwr = power_domains; - } else if (IS_BROADWELL(dev)) { + } else if (IS_BROADWELL(dev_priv->dev)) { set_power_wells(power_domains, bdw_power_wells); hsw_pwr = power_domains; + } else if (IS_VALLEYVIEW(dev_priv->dev)) { + set_power_wells(power_domains, vlv_power_wells); } else { set_power_wells(power_domains, i9xx_always_on_power_well); } @@ -5416,58 +5890,38 @@ int intel_power_domains_init(struct drm_device *dev) return 0; } -void intel_power_domains_remove(struct drm_device *dev) +void intel_power_domains_remove(struct drm_i915_private *dev_priv) { hsw_pwr = NULL; } -static void intel_power_domains_resume(struct drm_device *dev) +static void intel_power_domains_resume(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_well *power_well; int i; mutex_lock(&power_domains->lock); - for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { - if (power_well->set) - power_well->set(dev, power_well, power_well->count > 0); - } + for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) + power_well->ops->sync_hw(dev_priv, power_well); mutex_unlock(&power_domains->lock); } -/* - * Starting with Haswell, we have a "Power Down Well" that can be turned off - * when not needed anymore. We have 4 registers that can request the power well - * to be enabled, and it will only be disabled if none of the registers is - * requesting it to be enabled. - */ -void intel_power_domains_init_hw(struct drm_device *dev) +void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* For now, we need the power well to be always enabled. */ - intel_display_set_init_power(dev, true); - intel_power_domains_resume(dev); - - if (!(IS_HASWELL(dev) || IS_BROADWELL(dev))) - return; - - /* We're taking over the BIOS, so clear any requests made by it since - * the driver is in charge now. */ - if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) - I915_WRITE(HSW_PWR_WELL_BIOS, 0); + intel_display_set_init_power(dev_priv, true); + intel_power_domains_resume(dev_priv); } -/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */ void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) { - hsw_disable_package_c8(dev_priv); + intel_runtime_pm_get(dev_priv); } void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) { - hsw_enable_package_c8(dev_priv); + intel_runtime_pm_put(dev_priv); } void intel_runtime_pm_get(struct drm_i915_private *dev_priv) @@ -5499,8 +5953,6 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv) struct drm_device *dev = dev_priv->dev; struct device *device = &dev->pdev->dev; - dev_priv->pm.suspended = false; - if (!HAS_RUNTIME_PM(dev)) return; @@ -5509,6 +5961,8 @@ void intel_init_runtime_pm(struct drm_i915_private *dev_priv) pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ pm_runtime_mark_last_busy(device); pm_runtime_use_autosuspend(device); + + pm_runtime_put_autosuspend(device); } void intel_fini_runtime_pm(struct drm_i915_private *dev_priv) @@ -5560,7 +6014,7 @@ void intel_init_pm(struct drm_device *dev) /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev)) { - intel_setup_wm_latency(dev); + ilk_setup_wm_latency(dev); if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || @@ -5731,13 +6185,9 @@ void intel_pm_setup(struct drm_device *dev) mutex_init(&dev_priv->rps.hw_lock); - mutex_init(&dev_priv->pc8.lock); - dev_priv->pc8.requirements_met = false; - dev_priv->pc8.gpu_idle = false; - dev_priv->pc8.irqs_disabled = false; - dev_priv->pc8.enabled = false; - dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ - INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, intel_gen6_powersave_work); + + dev_priv->pm.suspended = false; + dev_priv->pm.irqs_disabled = false; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 31b36c5ac89..6bc68bdcf43 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -406,17 +406,24 @@ gen8_render_ring_flush(struct intel_ring_buffer *ring, static void ring_write_tail(struct intel_ring_buffer *ring, u32 value) { - drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = ring->dev->dev_private; I915_WRITE_TAIL(ring, value); } -u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) +u64 intel_ring_get_active_head(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = ring->dev->dev_private; - u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? - RING_ACTHD(ring->mmio_base) : ACTHD; + struct drm_i915_private *dev_priv = ring->dev->dev_private; + u64 acthd; + + if (INTEL_INFO(ring->dev)->gen >= 8) + acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), + RING_ACTHD_UDW(ring->mmio_base)); + else if (INTEL_INFO(ring->dev)->gen >= 4) + acthd = I915_READ(RING_ACTHD(ring->mmio_base)); + else + acthd = I915_READ(ACTHD); - return I915_READ(acthd_reg); + return acthd; } static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) @@ -433,22 +440,24 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) static int init_ring_common(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = ring->obj; int ret = 0; u32 head; gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); - if (I915_NEED_GFX_HWS(dev)) - intel_ring_setup_status_page(ring); - else - ring_setup_phys_status_page(ring); - /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); ring->write_tail(ring, 0); + if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) + DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); + + if (I915_NEED_GFX_HWS(dev)) + intel_ring_setup_status_page(ring); + else + ring_setup_phys_status_page(ring); head = I915_READ_HEAD(ring) & HEAD_ADDR; @@ -531,9 +540,11 @@ init_pipe_control(struct intel_ring_buffer *ring) goto err; } - i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); + ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); + if (ret) + goto err_unref; - ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); + ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); if (ret) goto err_unref; @@ -549,7 +560,7 @@ init_pipe_control(struct intel_ring_buffer *ring) return 0; err_unpin: - i915_gem_object_unpin(ring->scratch.obj); + i915_gem_object_ggtt_unpin(ring->scratch.obj); err_unref: drm_gem_object_unreference(&ring->scratch.obj->base); err: @@ -562,14 +573,15 @@ static int init_render_ring(struct intel_ring_buffer *ring) struct drm_i915_private *dev_priv = dev->dev_private; int ret = init_ring_common(ring); - if (INTEL_INFO(dev)->gen > 3) + /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ + if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); /* We need to disable the AsyncFlip performance optimisations in order * to use MI_WAIT_FOR_EVENT within the CS. It should already be * programmed to '1' on all products. * - * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv + * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw */ if (INTEL_INFO(dev)->gen >= 6) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); @@ -625,7 +637,7 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) if (INTEL_INFO(dev)->gen >= 5) { kunmap(sg_page(ring->scratch.obj->pages->sgl)); - i915_gem_object_unpin(ring->scratch.obj); + i915_gem_object_ggtt_unpin(ring->scratch.obj); } drm_gem_object_unreference(&ring->scratch.obj->base); @@ -809,8 +821,11 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like * ACTHD) before reading the status page. */ - if (!lazy_coherency) - intel_ring_get_active_head(ring); + if (!lazy_coherency) { + struct drm_i915_private *dev_priv = ring->dev->dev_private; + POSTING_READ(RING_ACTHD(ring->mmio_base)); + } + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } @@ -842,7 +857,7 @@ static bool gen5_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; if (!dev->irq_enabled) @@ -860,7 +875,7 @@ static void gen5_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -873,7 +888,7 @@ static bool i9xx_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; if (!dev->irq_enabled) @@ -894,7 +909,7 @@ static void i9xx_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -910,7 +925,7 @@ static bool i8xx_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; if (!dev->irq_enabled) @@ -931,7 +946,7 @@ static void i8xx_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -946,7 +961,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring) void intel_ring_setup_status_page(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = ring->dev->dev_private; u32 mmio = 0; /* The ring status page addresses are no longer next to the rest of @@ -977,9 +992,19 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); POSTING_READ(mmio); - /* Flush the TLB for this page */ - if (INTEL_INFO(dev)->gen >= 6) { + /* + * Flush the TLB for this page + * + * FIXME: These two bits have disappeared on gen8, so a question + * arises: do we still need this and if so how should we go about + * invalidating the TLB? + */ + if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { u32 reg = RING_INSTPM(ring->mmio_base); + + /* ring should be idle before issuing a sync flush*/ + WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); + I915_WRITE(reg, _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | INSTPM_SYNC_FLUSH)); @@ -1029,7 +1054,7 @@ static bool gen6_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; if (!dev->irq_enabled) @@ -1054,7 +1079,7 @@ static void gen6_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1253,7 +1278,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring) return; kunmap(sg_page(obj->pages->sgl)); - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); drm_gem_object_unreference(&obj->base); ring->status_page.obj = NULL; } @@ -1271,12 +1296,13 @@ static int init_status_page(struct intel_ring_buffer *ring) goto err; } - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + if (ret) + goto err_unref; - ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); - if (ret != 0) { + ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); + if (ret) goto err_unref; - } ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); @@ -1293,7 +1319,7 @@ static int init_status_page(struct intel_ring_buffer *ring) return 0; err_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); err_unref: drm_gem_object_unreference(&obj->base); err: @@ -1356,7 +1382,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->obj = obj; - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false); + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); if (ret) goto err_unref; @@ -1385,12 +1411,14 @@ static int intel_init_ring_buffer(struct drm_device *dev, if (IS_I830(ring->dev) || IS_845G(ring->dev)) ring->effective_size -= 128; + i915_cmd_parser_init_ring(ring); + return 0; err_unmap: iounmap(ring->virtual_start); err_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); err_unref: drm_gem_object_unreference(&obj->base); ring->obj = NULL; @@ -1418,7 +1446,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) iounmap(ring->virtual_start); - i915_gem_object_unpin(ring->obj); + i915_gem_object_ggtt_unpin(ring->obj); drm_gem_object_unreference(&ring->obj->base); ring->obj = NULL; ring->preallocated_lazy_request = NULL; @@ -1430,28 +1458,16 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) cleanup_status_page(ring); } -static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) -{ - int ret; - - ret = i915_wait_seqno(ring, seqno); - if (!ret) - i915_gem_retire_requests_ring(ring); - - return ret; -} - static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) { struct drm_i915_gem_request *request; - u32 seqno = 0; + u32 seqno = 0, tail; int ret; - i915_gem_retire_requests_ring(ring); - if (ring->last_retired_head != -1) { ring->head = ring->last_retired_head; ring->last_retired_head = -1; + ring->space = ring_space(ring); if (ring->space >= n) return 0; @@ -1468,6 +1484,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) space += ring->size; if (space >= n) { seqno = request->seqno; + tail = request->tail; break; } @@ -1482,15 +1499,11 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) if (seqno == 0) return -ENOSPC; - ret = intel_ring_wait_seqno(ring, seqno); + ret = i915_wait_seqno(ring, seqno); if (ret) return ret; - if (WARN_ON(ring->last_retired_head == -1)) - return -ENOSPC; - - ring->head = ring->last_retired_head; - ring->last_retired_head = -1; + ring->head = tail; ring->space = ring_space(ring); if (WARN_ON(ring->space < n)) return -ENOSPC; @@ -1528,7 +1541,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) return 0; } - if (dev->primary->master) { + if (!drm_core_check_feature(dev, DRIVER_MODESET) && + dev->primary->master) { struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; @@ -1632,7 +1646,7 @@ static int __intel_ring_prepare(struct intel_ring_buffer *ring, int intel_ring_begin(struct intel_ring_buffer *ring, int num_dwords) { - drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = ring->dev->dev_private; int ret; ret = i915_gem_check_wedge(&dev_priv->gpu_error, @@ -1694,7 +1708,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, u32 value) { - drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = ring->dev->dev_private; /* Every tail move must follow the sequence below */ @@ -1869,7 +1883,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, int intel_init_render_ring_buffer(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; ring->name = "render ring"; @@ -1954,7 +1968,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) return -ENOMEM; } - ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 0, 0); if (ret != 0) { drm_gem_object_unreference(&obj->base); DRM_ERROR("Failed to ping batch bo\n"); @@ -1970,7 +1984,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; int ret; @@ -2038,7 +2052,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) int intel_init_bsd_ring_buffer(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; ring->name = "bsd ring"; @@ -2101,7 +2115,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) int intel_init_blt_ring_buffer(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; ring->name = "blitter ring"; @@ -2141,7 +2155,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) int intel_init_vebox_ring_buffer(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[VECS]; ring->name = "video enhancement ring"; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 0b243ce3371..270a6a97343 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -33,6 +33,8 @@ struct intel_hw_status_page { #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) +#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) + enum intel_ring_hangcheck_action { HANGCHECK_IDLE = 0, HANGCHECK_WAIT, @@ -41,12 +43,14 @@ enum intel_ring_hangcheck_action { HANGCHECK_HUNG, }; +#define HANGCHECK_SCORE_RING_HUNG 31 + struct intel_ring_hangcheck { - bool deadlock; + u64 acthd; u32 seqno; - u32 acthd; int score; enum intel_ring_hangcheck_action action; + bool deadlock; }; struct intel_ring_buffer { @@ -162,6 +166,38 @@ struct intel_ring_buffer { u32 gtt_offset; volatile u32 *cpu_page; } scratch; + + /* + * Tables of commands the command parser needs to know about + * for this ring. + */ + const struct drm_i915_cmd_table *cmd_tables; + int cmd_table_count; + + /* + * Table of registers allowed in commands that read/write registers. + */ + const u32 *reg_table; + int reg_count; + + /* + * Table of registers allowed in commands that read/write registers, but + * only from the DRM master. + */ + const u32 *master_reg_table; + int master_reg_count; + + /* + * Returns the bitmask for the length field of the specified command. + * Return 0 for an unrecognized/invalid command. + * + * If the command parser finds an entry for a command in the ring's + * cmd_tables, it gets the command's length based on the table entry. + * If not, it calls this function to determine the per-ring length field + * encoding for the command (i.e. certain opcode ranges use certain bits + * to encode the command length in the header). + */ + u32 (*get_cmd_length_mask)(u32 cmd_header); }; static inline bool @@ -256,7 +292,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev); int intel_init_blt_ring_buffer(struct drm_device *dev); int intel_init_vebox_ring_buffer(struct drm_device *dev); -u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); +u64 intel_ring_get_active_head(struct intel_ring_buffer *ring); void intel_ring_setup_status_page(struct intel_ring_buffer *ring); static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 95bdfb3c431..d27155adf5d 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1461,7 +1461,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder) u32 temp; bool input1, input2; int i; - u8 status; + bool success; temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) == 0) { @@ -1475,12 +1475,12 @@ static void intel_enable_sdvo(struct intel_encoder *encoder) for (i = 0; i < 2; i++) intel_wait_for_vblank(dev, intel_crtc->pipe); - status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); + success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); /* Warn if the device reported failure to sync. * A lot of SDVO devices fail to notify of sync, but it's * a given it the status is a success, we succeeded. */ - if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { + if (success && !input1) { DRM_DEBUG_KMS("First %s output reported failure to " "sync\n", SDVO_NAME(intel_sdvo)); } @@ -2382,24 +2382,62 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) } static void +intel_sdvo_connector_unregister(struct intel_connector *intel_connector) +{ + struct drm_connector *drm_connector; + struct intel_sdvo *sdvo_encoder; + + drm_connector = &intel_connector->base; + sdvo_encoder = intel_attached_sdvo(&intel_connector->base); + + sysfs_remove_link(&drm_connector->kdev->kobj, + sdvo_encoder->ddc.dev.kobj.name); + intel_connector_unregister(intel_connector); +} + +static int intel_sdvo_connector_init(struct intel_sdvo_connector *connector, struct intel_sdvo *encoder) { - drm_connector_init(encoder->base.base.dev, - &connector->base.base, + struct drm_connector *drm_connector; + int ret; + + drm_connector = &connector->base.base; + ret = drm_connector_init(encoder->base.base.dev, + drm_connector, &intel_sdvo_connector_funcs, connector->base.base.connector_type); + if (ret < 0) + return ret; - drm_connector_helper_add(&connector->base.base, + drm_connector_helper_add(drm_connector, &intel_sdvo_connector_helper_funcs); connector->base.base.interlace_allowed = 1; connector->base.base.doublescan_allowed = 0; connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; + connector->base.unregister = intel_sdvo_connector_unregister; intel_connector_attach_encoder(&connector->base, &encoder->base); - drm_sysfs_connector_add(&connector->base.base); + ret = drm_sysfs_connector_add(drm_connector); + if (ret < 0) + goto err1; + + ret = sysfs_create_link(&encoder->ddc.dev.kobj, + &drm_connector->kdev->kobj, + encoder->ddc.dev.kobj.name); + if (ret < 0) + goto err2; + + return 0; + +err2: + drm_sysfs_connector_remove(drm_connector); +err1: + drm_connector_cleanup(drm_connector); + + return ret; } static void @@ -2459,7 +2497,11 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo->is_hdmi = true; } - intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); + if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { + kfree(intel_sdvo_connector); + return false; + } + if (intel_sdvo->is_hdmi) intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector); @@ -2490,7 +2532,10 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) intel_sdvo->is_tv = true; - intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); + if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { + kfree(intel_sdvo_connector); + return false; + } if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) goto err; @@ -2534,8 +2579,11 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; } - intel_sdvo_connector_init(intel_sdvo_connector, - intel_sdvo); + if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { + kfree(intel_sdvo_connector); + return false; + } + return true; } @@ -2566,7 +2614,11 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; } - intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); + if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { + kfree(intel_sdvo_connector); + return false; + } + if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; @@ -2980,7 +3032,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) * simplistic anyway to express such constraints, so just give up on * cloning for SDVO encoders. */ - intel_sdvo->base.cloneable = false; + intel_sdvo->base.cloneable = 0; intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 716a3c9c075..336ae6c602f 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -124,9 +124,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, crtc_w--; crtc_h--; - I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); - I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); - linear_offset = y * fb->pitches[0] + x * pixel_size; sprsurf_offset = intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, @@ -134,6 +131,9 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, fb->pitches[0]); linear_offset -= sprsurf_offset; + I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); + I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); + if (obj->tiling_mode != I915_TILING_NONE) I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x); else @@ -293,15 +293,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (crtc_w != src_w || crtc_h != src_h) sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; - I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); - I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); - linear_offset = y * fb->pitches[0] + x * pixel_size; sprsurf_offset = intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, pixel_size, fb->pitches[0]); linear_offset -= sprsurf_offset; + I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); + I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); + /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET * register */ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) @@ -472,15 +472,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; - I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); - I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); - linear_offset = y * fb->pitches[0] + x * pixel_size; dvssurf_offset = intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, pixel_size, fb->pitches[0]); linear_offset -= dvssurf_offset; + I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); + I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); + if (obj->tiling_mode != I915_TILING_NONE) I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); else diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 22cf0f4ba24..bafe92e317d 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1189,8 +1189,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv, if (connector->polled & DRM_CONNECTOR_POLL_HPD) { spin_lock_irqsave(&dev_priv->irq_lock, irqflags); i915_disable_pipestat(dev_priv, 0, - PIPE_HOTPLUG_INTERRUPT_ENABLE | - PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); + PIPE_HOTPLUG_INTERRUPT_STATUS | + PIPE_HOTPLUG_TV_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -1266,8 +1266,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv, if (connector->polled & DRM_CONNECTOR_POLL_HPD) { spin_lock_irqsave(&dev_priv->irq_lock, irqflags); i915_enable_pipestat(dev_priv, 0, - PIPE_HOTPLUG_INTERRUPT_ENABLE | - PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); + PIPE_HOTPLUG_INTERRUPT_STATUS | + PIPE_HOTPLUG_TV_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -1536,9 +1536,14 @@ static int tv_is_present_in_vbt(struct drm_device *dev) /* * If the device type is not TV, continue. */ - if (p_child->old.device_type != DEVICE_TYPE_INT_TV && - p_child->old.device_type != DEVICE_TYPE_TV) + switch (p_child->old.device_type) { + case DEVICE_TYPE_INT_TV: + case DEVICE_TYPE_TV: + case DEVICE_TYPE_TV_SVIDEO_COMPOSITE: + break; + default: continue; + } /* Only when the addin_offset is non-zero, it is regarded * as present. */ @@ -1634,13 +1639,13 @@ intel_tv_init(struct drm_device *dev) intel_encoder->disable = intel_disable_tv; intel_encoder->get_hw_state = intel_tv_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state; + intel_connector->unregister = intel_connector_unregister; intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->crtc_mask = (1 << 0) | (1 << 1); - intel_encoder->cloneable = false; + intel_encoder->cloneable = 0; intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); - intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); intel_tv->type = DRM_MODE_CONNECTOR_Unknown; /* BIOS margin values */ diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 87df68f5f50..f729dc71d5b 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -40,6 +40,12 @@ #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) +static void +assert_device_not_suspended(struct drm_i915_private *dev_priv) +{ + WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, + "Device suspended\n"); +} static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) { @@ -83,14 +89,14 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, __gen6_gt_wait_for_thread_c0(dev_priv); } -static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) +static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) { __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); /* something from same cacheline, but !FORCEWAKE_MT */ __raw_posting_read(dev_priv, ECOBUS); } -static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, +static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, int fw_engine) { u32 forcewake_ack; @@ -136,14 +142,16 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, gen6_gt_check_fifodbg(dev_priv); } -static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv, +static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv, int fw_engine) { __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); /* something from same cacheline, but !FORCEWAKE_MT */ __raw_posting_read(dev_priv, ECOBUS); - gen6_gt_check_fifodbg(dev_priv); + + if (IS_GEN7(dev_priv->dev)) + gen6_gt_check_fifodbg(dev_priv); } static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) @@ -251,16 +259,16 @@ void vlv_force_wake_get(struct drm_i915_private *dev_priv, unsigned long irqflags; spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - if (FORCEWAKE_RENDER & fw_engine) { - if (dev_priv->uncore.fw_rendercount++ == 0) - dev_priv->uncore.funcs.force_wake_get(dev_priv, - FORCEWAKE_RENDER); - } - if (FORCEWAKE_MEDIA & fw_engine) { - if (dev_priv->uncore.fw_mediacount++ == 0) - dev_priv->uncore.funcs.force_wake_get(dev_priv, - FORCEWAKE_MEDIA); - } + + if (fw_engine & FORCEWAKE_RENDER && + dev_priv->uncore.fw_rendercount++ != 0) + fw_engine &= ~FORCEWAKE_RENDER; + if (fw_engine & FORCEWAKE_MEDIA && + dev_priv->uncore.fw_mediacount++ != 0) + fw_engine &= ~FORCEWAKE_MEDIA; + + if (fw_engine) + dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -272,46 +280,89 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - if (FORCEWAKE_RENDER & fw_engine) { - WARN_ON(dev_priv->uncore.fw_rendercount == 0); - if (--dev_priv->uncore.fw_rendercount == 0) - dev_priv->uncore.funcs.force_wake_put(dev_priv, - FORCEWAKE_RENDER); + if (fw_engine & FORCEWAKE_RENDER) { + WARN_ON(!dev_priv->uncore.fw_rendercount); + if (--dev_priv->uncore.fw_rendercount != 0) + fw_engine &= ~FORCEWAKE_RENDER; } - if (FORCEWAKE_MEDIA & fw_engine) { - WARN_ON(dev_priv->uncore.fw_mediacount == 0); - if (--dev_priv->uncore.fw_mediacount == 0) - dev_priv->uncore.funcs.force_wake_put(dev_priv, - FORCEWAKE_MEDIA); + if (fw_engine & FORCEWAKE_MEDIA) { + WARN_ON(!dev_priv->uncore.fw_mediacount); + if (--dev_priv->uncore.fw_mediacount != 0) + fw_engine &= ~FORCEWAKE_MEDIA; } + if (fw_engine) + dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void gen6_force_wake_work(struct work_struct *work) +static void gen6_force_wake_timer(unsigned long arg) { - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), uncore.force_wake_work.work); + struct drm_i915_private *dev_priv = (void *)arg; unsigned long irqflags; + assert_device_not_suspended(dev_priv); + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + WARN_ON(!dev_priv->uncore.forcewake_count); + if (--dev_priv->uncore.forcewake_count == 0) dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + + intel_runtime_pm_put(dev_priv); } -static void intel_uncore_forcewake_reset(struct drm_device *dev) +static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) { struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long irqflags; - if (IS_VALLEYVIEW(dev)) { + del_timer_sync(&dev_priv->uncore.force_wake_timer); + + /* Hold uncore.lock across reset to prevent any register access + * with forcewake not set correctly + */ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + if (IS_VALLEYVIEW(dev)) vlv_force_wake_reset(dev_priv); - } else if (INTEL_INFO(dev)->gen >= 6) { + else if (IS_GEN6(dev) || IS_GEN7(dev)) __gen6_gt_force_wake_reset(dev_priv); - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - __gen6_gt_force_wake_mt_reset(dev_priv); + + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev)) + __gen7_gt_force_wake_mt_reset(dev_priv); + + if (restore) { /* If reset with a user forcewake, try to restore */ + unsigned fw = 0; + + if (IS_VALLEYVIEW(dev)) { + if (dev_priv->uncore.fw_rendercount) + fw |= FORCEWAKE_RENDER; + + if (dev_priv->uncore.fw_mediacount) + fw |= FORCEWAKE_MEDIA; + } else { + if (dev_priv->uncore.forcewake_count) + fw = FORCEWAKE_ALL; + } + + if (fw) + dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); + + if (IS_GEN6(dev) || IS_GEN7(dev)) + dev_priv->uncore.fifo_count = + __raw_i915_read32(dev_priv, GTFIFOCTL) & + GT_FIFO_FREE_ENTRIES_MASK; + } else { + dev_priv->uncore.forcewake_count = 0; + dev_priv->uncore.fw_rendercount = 0; + dev_priv->uncore.fw_mediacount = 0; } + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } void intel_uncore_early_sanitize(struct drm_device *dev) @@ -337,7 +388,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev) __raw_i915_write32(dev_priv, GTFIFODBG, __raw_i915_read32(dev_priv, GTFIFODBG)); - intel_uncore_forcewake_reset(dev); + intel_uncore_forcewake_reset(dev, false); } void intel_uncore_sanitize(struct drm_device *dev) @@ -354,7 +405,9 @@ void intel_uncore_sanitize(struct drm_device *dev) mutex_lock(&dev_priv->rps.hw_lock); reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS); - if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT)) + if (reg_val & (PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_RENDER) | + PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_MEDIA) | + PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_DISP2D))) vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0); mutex_unlock(&dev_priv->rps.hw_lock); @@ -393,25 +446,40 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) { unsigned long irqflags; + bool delayed = false; if (!dev_priv->uncore.funcs.force_wake_put) return; /* Redirect to VLV specific routine */ - if (IS_VALLEYVIEW(dev_priv->dev)) - return vlv_force_wake_put(dev_priv, fw_engine); + if (IS_VALLEYVIEW(dev_priv->dev)) { + vlv_force_wake_put(dev_priv, fw_engine); + goto out; + } spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + WARN_ON(!dev_priv->uncore.forcewake_count); + if (--dev_priv->uncore.forcewake_count == 0) { dev_priv->uncore.forcewake_count++; - mod_delayed_work(dev_priv->wq, - &dev_priv->uncore.force_wake_work, - 1); + delayed = true; + mod_timer_pinned(&dev_priv->uncore.force_wake_timer, + jiffies + 1); } spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); - intel_runtime_pm_put(dev_priv); +out: + if (!delayed) + intel_runtime_pm_put(dev_priv); +} + +void assert_force_wake_inactive(struct drm_i915_private *dev_priv) +{ + if (!dev_priv->uncore.funcs.force_wake_get) + return; + + WARN_ON(dev_priv->uncore.forcewake_count > 0); } /* We give fast paths for the really cool registers */ @@ -446,16 +514,10 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) } } -static void -assert_device_not_suspended(struct drm_i915_private *dev_priv) -{ - WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, - "Device suspended\n"); -} - #define REG_READ_HEADER(x) \ unsigned long irqflags; \ u##x val = 0; \ + assert_device_not_suspended(dev_priv); \ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) #define REG_READ_FOOTER \ @@ -484,14 +546,13 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ static u##x \ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ REG_READ_HEADER(x); \ - if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ - if (dev_priv->uncore.forcewake_count == 0) \ - dev_priv->uncore.funcs.force_wake_get(dev_priv, \ - FORCEWAKE_ALL); \ + if (dev_priv->uncore.forcewake_count == 0 && \ + NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ + dev_priv->uncore.funcs.force_wake_get(dev_priv, \ + FORCEWAKE_ALL); \ val = __raw_i915_read##x(dev_priv, reg); \ - if (dev_priv->uncore.forcewake_count == 0) \ - dev_priv->uncore.funcs.force_wake_put(dev_priv, \ - FORCEWAKE_ALL); \ + dev_priv->uncore.funcs.force_wake_put(dev_priv, \ + FORCEWAKE_ALL); \ } else { \ val = __raw_i915_read##x(dev_priv, reg); \ } \ @@ -502,27 +563,19 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ static u##x \ vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ unsigned fwengine = 0; \ - unsigned *fwcount; \ REG_READ_HEADER(x); \ - if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \ - fwengine = FORCEWAKE_RENDER; \ - fwcount = &dev_priv->uncore.fw_rendercount; \ - } \ - else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \ - fwengine = FORCEWAKE_MEDIA; \ - fwcount = &dev_priv->uncore.fw_mediacount; \ + if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine = FORCEWAKE_RENDER; \ + } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine = FORCEWAKE_MEDIA; \ } \ - if (fwengine != 0) { \ - if ((*fwcount)++ == 0) \ - (dev_priv)->uncore.funcs.force_wake_get(dev_priv, \ - fwengine); \ - val = __raw_i915_read##x(dev_priv, reg); \ - if (--(*fwcount) == 0) \ - (dev_priv)->uncore.funcs.force_wake_put(dev_priv, \ - fwengine); \ - } else { \ - val = __raw_i915_read##x(dev_priv, reg); \ - } \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ + val = __raw_i915_read##x(dev_priv, reg); \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ REG_READ_FOOTER; \ } @@ -554,6 +607,7 @@ __gen4_read(64) #define REG_WRITE_HEADER \ unsigned long irqflags; \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ + assert_device_not_suspended(dev_priv); \ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) #define REG_WRITE_FOOTER \ @@ -584,7 +638,6 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ - assert_device_not_suspended(dev_priv); \ __raw_i915_write##x(dev_priv, reg, val); \ if (unlikely(__fifo_ret)) { \ gen6_gt_check_fifodbg(dev_priv); \ @@ -600,7 +653,6 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ - assert_device_not_suspended(dev_priv); \ hsw_unclaimed_reg_clear(dev_priv, reg); \ __raw_i915_write##x(dev_priv, reg, val); \ if (unlikely(__fifo_ret)) { \ @@ -634,16 +686,17 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) #define __gen8_write(x) \ static void \ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ - bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \ REG_WRITE_HEADER; \ - if (__needs_put) { \ - dev_priv->uncore.funcs.force_wake_get(dev_priv, \ - FORCEWAKE_ALL); \ - } \ - __raw_i915_write##x(dev_priv, reg, val); \ - if (__needs_put) { \ - dev_priv->uncore.funcs.force_wake_put(dev_priv, \ - FORCEWAKE_ALL); \ + if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \ + if (dev_priv->uncore.forcewake_count == 0) \ + dev_priv->uncore.funcs.force_wake_get(dev_priv, \ + FORCEWAKE_ALL); \ + __raw_i915_write##x(dev_priv, reg, val); \ + if (dev_priv->uncore.forcewake_count == 0) \ + dev_priv->uncore.funcs.force_wake_put(dev_priv, \ + FORCEWAKE_ALL); \ + } else { \ + __raw_i915_write##x(dev_priv, reg, val); \ } \ REG_WRITE_FOOTER; \ } @@ -681,15 +734,17 @@ void intel_uncore_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work, - gen6_force_wake_work); + setup_timer(&dev_priv->uncore.force_wake_timer, + gen6_force_wake_timer, (unsigned long)dev_priv); + + intel_uncore_early_sanitize(dev); if (IS_VALLEYVIEW(dev)) { dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { - dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; - dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; + dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get; + dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put; } else if (IS_IVYBRIDGE(dev)) { u32 ecobus; @@ -703,16 +758,16 @@ void intel_uncore_init(struct drm_device *dev) * forcewake being disabled. */ mutex_lock(&dev->struct_mutex); - __gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL); + __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL); ecobus = __raw_i915_read32(dev_priv, ECOBUS); - __gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL); + __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL); mutex_unlock(&dev->struct_mutex); if (ecobus & FORCEWAKE_MT_ENABLE) { dev_priv->uncore.funcs.force_wake_get = - __gen6_gt_force_wake_mt_get; + __gen7_gt_force_wake_mt_get; dev_priv->uncore.funcs.force_wake_put = - __gen6_gt_force_wake_mt_put; + __gen7_gt_force_wake_mt_put; } else { DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); DRM_INFO("when using vblank-synced partial screen updates.\n"); @@ -792,12 +847,9 @@ void intel_uncore_init(struct drm_device *dev) void intel_uncore_fini(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - - flush_delayed_work(&dev_priv->uncore.force_wake_work); - /* Paranoia: make sure we have disabled everything before we exit. */ intel_uncore_sanitize(dev); + intel_uncore_forcewake_reset(dev, false); } static const struct register_whitelist { @@ -814,7 +866,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_reg_read *reg = data; struct register_whitelist const *entry = whitelist; - int i; + int i, ret = 0; for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { if (entry->offset == reg->offset && @@ -825,6 +877,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, if (i == ARRAY_SIZE(whitelist)) return -EINVAL; + intel_runtime_pm_get(dev_priv); + switch (entry->size) { case 8: reg->val = I915_READ64(reg->offset); @@ -840,10 +894,13 @@ int i915_reg_read_ioctl(struct drm_device *dev, break; default: WARN_ON(1); - return -EINVAL; + ret = -EINVAL; + goto out; } - return 0; +out: + intel_runtime_pm_put(dev_priv); + return ret; } int i915_get_reset_stats_ioctl(struct drm_device *dev, @@ -852,6 +909,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_reset_stats *args = data; struct i915_ctx_hang_stats *hs; + struct i915_hw_context *ctx; int ret; if (args->flags || args->pad) @@ -864,11 +922,12 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev, if (ret) return ret; - hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id); - if (IS_ERR(hs)) { + ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); + if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); - return PTR_ERR(hs); + return PTR_ERR(ctx); } + hs = &ctx->hang_stats; if (capable(CAP_SYS_ADMIN)) args->reset_count = i915_reset_count(&dev_priv->gpu_error); @@ -944,12 +1003,6 @@ static int gen6_do_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; - unsigned long irqflags; - - /* Hold uncore.lock across reset to prevent any register access - * with forcewake not set correctly - */ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); /* Reset the chip */ @@ -962,18 +1015,8 @@ static int gen6_do_reset(struct drm_device *dev) /* Spin waiting for the device to ack the reset request */ ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); - intel_uncore_forcewake_reset(dev); + intel_uncore_forcewake_reset(dev, true); - /* If reset with a user forcewake, try to restore, otherwise turn it off */ - if (dev_priv->uncore.forcewake_count) - dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); - else - dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); - - /* Restore fifo count */ - dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); return ret; } |